44 #include <sys/types.h>
56 #include "debug/AddrRanges.hh"
57 #include "debug/Checkpoint.hh"
66 #if defined(__APPLE__) || defined(__FreeBSD__)
68 #define MAP_NORESERVE 0
76 bool mmap_using_noreserve) :
77 _name(_name), rangeCache(addrMap.end()),
size(0),
78 mmapUsingNoReserve(mmap_using_noreserve)
80 if (mmap_using_noreserve)
81 warn(
"Not reserving swap space. May cause SIGSEGV on actual usage\n");
85 for (
const auto&
m : _memories) {
87 if (
m->isInAddrMap()) {
88 memories.push_back(
m);
95 fatal_if(addrMap.insert(
m->getAddrRange(),
m) == addrMap.end(),
96 "Memory address range for %s is overlapping\n",
103 "Skipping memory %s that is not in global address map\n",
107 fatal_if(
m->getAddrRange().interleaved(),
108 "Memory %s that is not in the global address map cannot "
109 "be interleaved\n",
m->name());
115 createBackingStore(
m->getAddrRange(), unmapped_mems,
116 m->isConfReported(),
m->isInAddrMap(),
126 for (
const auto&
r : addrMap) {
129 if (!
r.second->isNull()) {
131 if (
r.first.interleaved()) {
135 if (!intlv_ranges.empty() &&
136 !intlv_ranges.back().mergesWith(
r.first)) {
140 for (
const auto&
c : curr_memories)
144 fatal(
"Inconsistent flags in an interleaved "
147 createBackingStore(merged_range, curr_memories,
151 intlv_ranges.clear();
152 curr_memories.clear();
154 intlv_ranges.push_back(
r.first);
155 curr_memories.push_back(
r.second);
158 createBackingStore(
r.first, single_memory,
159 r.second->isConfReported(),
160 r.second->isInAddrMap(),
161 r.second->isKvmMap());
168 if (!intlv_ranges.empty()) {
172 for (
const auto&
c : curr_memories)
176 fatal(
"Inconsistent flags in an interleaved "
179 createBackingStore(merged_range, curr_memories,
188 bool conf_table_reported,
189 bool in_addr_map,
bool kvm_map)
192 "Cannot create backing store for interleaved range %s\n",
196 DPRINTF(AddrRanges,
"Creating backing store for range %s with size %d\n",
198 int map_flags = MAP_ANON | MAP_PRIVATE;
203 map_flags |= MAP_NORESERVE;
206 uint8_t* pmem = (uint8_t*) mmap(NULL, range.
size(),
207 PROT_READ | PROT_WRITE,
210 if (pmem == (uint8_t*) MAP_FAILED) {
212 fatal(
"Could not mmap %d bytes for range %s!\n", range.
size(),
219 conf_table_reported, in_addr_map, kvm_map);
222 for (
const auto&
m : _memories) {
223 DPRINTF(AddrRanges,
"Mapping memory %s to backing store\n",
225 m->setBackingStore(pmem);
233 munmap((
char*)
s.pmem,
s.range.size());
244 const auto&
r = addrMap.find(addr);
245 if (
r == addrMap.end()) {
262 for (
const auto&
r : addrMap) {
263 if (
r.second->isConfReported()) {
265 if (
r.first.interleaved()) {
269 if (!intlv_ranges.empty() &&
270 !intlv_ranges.back().mergesWith(
r.first)) {
271 ranges.push_back(
AddrRange(intlv_ranges));
272 intlv_ranges.clear();
274 intlv_ranges.push_back(
r.first);
277 ranges.push_back(
r.first);
284 if (!intlv_ranges.empty()) {
285 ranges.push_back(
AddrRange(intlv_ranges));
301 const auto&
m = addrMap.find(addr);
302 assert(
m != addrMap.end());
303 m->second->access(pkt);
317 const auto&
m = addrMap.find(addr);
318 assert(
m != addrMap.end());
319 m->second->functionalAccess(pkt);
332 for (
const auto&
l : locked_addrs) {
333 lal_addr.push_back(
l.addr);
334 lal_cid.push_back(
l.contextId);
345 unsigned int store_id = 0;
359 string filename =
name() +
".store" + to_string(store_id) +
".pmem";
360 long range_size = range.
size();
362 DPRINTF(Checkpoint,
"Serializing physical memory %s with size %d\n",
363 filename, range_size);
371 gzFile compressed_mem = gzopen(filepath.c_str(),
"wb");
372 if (compressed_mem == NULL)
373 fatal(
"Can't open physical memory checkpoint file '%s'\n",
376 uint64_t pass_size = 0;
379 for (uint64_t written = 0; written < range.
size();
380 written += pass_size) {
381 pass_size = (uint64_t)INT_MAX < (range.
size() - written) ?
382 (uint64_t)INT_MAX : (range.
size() - written);
384 if (gzwrite(compressed_mem, pmem + written,
385 (
unsigned int) pass_size) != (int) pass_size) {
386 fatal(
"Write failed on physical memory checkpoint file '%s'\n",
393 if (gzclose(compressed_mem))
394 fatal(
"Close failed on physical memory checkpoint file '%s'\n",
408 for (
size_t i = 0;
i < lal_addr.size(); ++
i) {
409 const auto&
m = addrMap.find(lal_addr[
i]);
410 m->second->addLockedAddr(
LockedAddr(lal_addr[i], lal_cid[i]));
414 unsigned int nbr_of_stores;
417 for (
unsigned int i = 0;
i < nbr_of_stores; ++
i) {
427 const uint32_t chunk_size = 16384;
429 unsigned int store_id;
434 string filepath = cp.
cptDir +
"/" + filename;
437 gzFile compressed_mem = gzopen(filepath.c_str(),
"rb");
438 if (compressed_mem == NULL)
439 fatal(
"Can't open physical memory checkpoint file '%s'", filename);
448 DPRINTF(Checkpoint,
"Unserializing physical memory %s with size %d\n",
449 filename, range_size);
451 if (range_size != range.
size())
452 fatal(
"Memory range size has changed! Saw %lld, expected %lld\n",
453 range_size, range.
size());
455 uint64_t curr_size = 0;
456 long* temp_page =
new long[chunk_size];
459 while (curr_size < range.
size()) {
460 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
464 assert(bytes_read %
sizeof(
long) == 0);
466 for (uint32_t
x = 0;
x < bytes_read /
sizeof(long);
x++) {
469 if (*(temp_page +
x) != 0) {
470 pmem_current = (
long*)(pmem + curr_size +
x *
sizeof(
long));
471 *pmem_current = *(temp_page +
x);
474 curr_size += bytes_read;
479 if (gzclose(compressed_mem))
480 fatal(
"Close failed on physical memory checkpoint file '%s'\n",
const_iterator end() const
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
std::stack< Addr > locked_addrs
bool interleaved() const
Determine if the range is interleaved or not.
Locked address class that represents a physical address and a context id.
panic_if(!root,"Invalid expression\n")
bool isConfReported() const
Should this memory be passed to the kernel and part of the OS physical memory layout.
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
AddrRangeMap< AbstractMemory * >::const_iterator rangeCache
AbstractMemory declaration.
std::vector< AbstractMemory * > memories
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
#define UNSERIALIZE_SCALAR(scalar)
#define SERIALIZE_CONTAINER(member)
std::string csprintf(const char *format, const Args &...args)
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table...
std::string to_string() const
Get a string representation of the range.
bool isKvmMap() const
When shadow memories are in use, KVM may want to make one or the other, but cannot map both into the ...
#define UNSERIALIZE_CONTAINER(member)
PhysicalMemory(const PhysicalMemory &)
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
const std::string name() const
Return the name for debugging and for creation of sections for checkpointing.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
#define SERIALIZE_SCALAR(scalar)
~PhysicalMemory()
Unmap all the backing store we have used.
void createBackingStore(AddrRange range, const std::vector< AbstractMemory * > &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
std::vector< BackingStoreEntry > backingStore
std::ostream CheckpointOut
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
const bool mmapUsingNoReserve
An abstract memory represents a contiguous block of physical memory, with an associated address range...
bool isInAddrMap() const
Some memories are used as shadow memories or should for other reasons not be part of the global addre...
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map...
fatal_if(p->js_features.size() > 16,"Too many job slot feature registers specified (%i)\n", p->js_features.size())
Scoped checkpoint section helper class.
Addr size() const
Get the size of the address range.