gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
physical.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2014 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Andreas Hansson
38  */
39 
40 #include "mem/physical.hh"
41 
42 #include <fcntl.h>
43 #include <sys/mman.h>
44 #include <sys/types.h>
45 #include <sys/user.h>
46 #include <unistd.h>
47 #include <zlib.h>
48 
49 #include <cerrno>
50 #include <climits>
51 #include <cstdio>
52 #include <iostream>
53 #include <string>
54 
55 #include "base/trace.hh"
56 #include "debug/AddrRanges.hh"
57 #include "debug/Checkpoint.hh"
58 #include "mem/abstract_mem.hh"
59 
66 #if defined(__APPLE__) || defined(__FreeBSD__)
67 #ifndef MAP_NORESERVE
68 #define MAP_NORESERVE 0
69 #endif
70 #endif
71 
72 using namespace std;
73 
74 PhysicalMemory::PhysicalMemory(const string& _name,
75  const vector<AbstractMemory*>& _memories,
76  bool mmap_using_noreserve) :
77  _name(_name), rangeCache(addrMap.end()), size(0),
78  mmapUsingNoReserve(mmap_using_noreserve)
79 {
80  if (mmap_using_noreserve)
81  warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
82 
83  // add the memories from the system to the address map as
84  // appropriate
85  for (const auto& m : _memories) {
86  // only add the memory if it is part of the global address map
87  if (m->isInAddrMap()) {
88  memories.push_back(m);
89 
90  // calculate the total size once and for all
91  size += m->size();
92 
93  // add the range to our interval tree and make sure it does not
94  // intersect an existing range
95  fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
96  "Memory address range for %s is overlapping\n",
97  m->name());
98  } else {
99  // this type of memory is used e.g. as reference memory by
100  // Ruby, and they also needs a backing store, but should
101  // not be part of the global address map
102  DPRINTF(AddrRanges,
103  "Skipping memory %s that is not in global address map\n",
104  m->name());
105 
106  // sanity check
107  fatal_if(m->getAddrRange().interleaved(),
108  "Memory %s that is not in the global address map cannot "
109  "be interleaved\n", m->name());
110 
111  // simply do it independently, also note that this kind of
112  // memories are allowed to overlap in the logic address
113  // map
114  vector<AbstractMemory*> unmapped_mems{m};
115  createBackingStore(m->getAddrRange(), unmapped_mems,
116  m->isConfReported(), m->isInAddrMap(),
117  m->isKvmMap());
118  }
119  }
120 
121  // iterate over the increasing addresses and chunks of contiguous
122  // space to be mapped to backing store, create it and inform the
123  // memories
124  vector<AddrRange> intlv_ranges;
125  vector<AbstractMemory*> curr_memories;
126  for (const auto& r : addrMap) {
127  // simply skip past all memories that are null and hence do
128  // not need any backing store
129  if (!r.second->isNull()) {
130  // if the range is interleaved then save it for now
131  if (r.first.interleaved()) {
132  // if we already got interleaved ranges that are not
133  // part of the same range, then first do a merge
134  // before we add the new one
135  if (!intlv_ranges.empty() &&
136  !intlv_ranges.back().mergesWith(r.first)) {
137  AddrRange merged_range(intlv_ranges);
138 
139  AbstractMemory *f = curr_memories.front();
140  for (const auto& c : curr_memories)
141  if (f->isConfReported() != c->isConfReported() ||
142  f->isInAddrMap() != c->isInAddrMap() ||
143  f->isKvmMap() != c->isKvmMap())
144  fatal("Inconsistent flags in an interleaved "
145  "range\n");
146 
147  createBackingStore(merged_range, curr_memories,
148  f->isConfReported(), f->isInAddrMap(),
149  f->isKvmMap());
150 
151  intlv_ranges.clear();
152  curr_memories.clear();
153  }
154  intlv_ranges.push_back(r.first);
155  curr_memories.push_back(r.second);
156  } else {
157  vector<AbstractMemory*> single_memory{r.second};
158  createBackingStore(r.first, single_memory,
159  r.second->isConfReported(),
160  r.second->isInAddrMap(),
161  r.second->isKvmMap());
162  }
163  }
164  }
165 
166  // if there is still interleaved ranges waiting to be merged, go
167  // ahead and do it
168  if (!intlv_ranges.empty()) {
169  AddrRange merged_range(intlv_ranges);
170 
171  AbstractMemory *f = curr_memories.front();
172  for (const auto& c : curr_memories)
173  if (f->isConfReported() != c->isConfReported() ||
174  f->isInAddrMap() != c->isInAddrMap() ||
175  f->isKvmMap() != c->isKvmMap())
176  fatal("Inconsistent flags in an interleaved "
177  "range\n");
178 
179  createBackingStore(merged_range, curr_memories,
180  f->isConfReported(), f->isInAddrMap(),
181  f->isKvmMap());
182  }
183 }
184 
185 void
187  const vector<AbstractMemory*>& _memories,
188  bool conf_table_reported,
189  bool in_addr_map, bool kvm_map)
190 {
191  panic_if(range.interleaved(),
192  "Cannot create backing store for interleaved range %s\n",
193  range.to_string());
194 
195  // perform the actual mmap
196  DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
197  range.to_string(), range.size());
198  int map_flags = MAP_ANON | MAP_PRIVATE;
199 
200  // to be able to simulate very large memories, the user can opt to
201  // pass noreserve to mmap
202  if (mmapUsingNoReserve) {
203  map_flags |= MAP_NORESERVE;
204  }
205 
206  uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
207  PROT_READ | PROT_WRITE,
208  map_flags, -1, 0);
209 
210  if (pmem == (uint8_t*) MAP_FAILED) {
211  perror("mmap");
212  fatal("Could not mmap %d bytes for range %s!\n", range.size(),
213  range.to_string());
214  }
215 
216  // remember this backing store so we can checkpoint it and unmap
217  // it appropriately
218  backingStore.emplace_back(range, pmem,
219  conf_table_reported, in_addr_map, kvm_map);
220 
221  // point the memories to their backing store
222  for (const auto& m : _memories) {
223  DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
224  m->name());
225  m->setBackingStore(pmem);
226  }
227 }
228 
230 {
231  // unmap the backing store
232  for (auto& s : backingStore)
233  munmap((char*)s.pmem, s.range.size());
234 }
235 
236 bool
238 {
239  // see if the address is within the last matched range
240  if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
241  return true;
242  } else {
243  // lookup in the interval tree
244  const auto& r = addrMap.find(addr);
245  if (r == addrMap.end()) {
246  // not in the cache, and not in the tree
247  return false;
248  }
249  // the range is in the tree, update the cache
250  rangeCache = r;
251  return true;
252  }
253 }
254 
257 {
258  // this could be done once in the constructor, but since it is unlikely to
259  // be called more than once the iteration should not be a problem
260  AddrRangeList ranges;
261  vector<AddrRange> intlv_ranges;
262  for (const auto& r : addrMap) {
263  if (r.second->isConfReported()) {
264  // if the range is interleaved then save it for now
265  if (r.first.interleaved()) {
266  // if we already got interleaved ranges that are not
267  // part of the same range, then first do a merge
268  // before we add the new one
269  if (!intlv_ranges.empty() &&
270  !intlv_ranges.back().mergesWith(r.first)) {
271  ranges.push_back(AddrRange(intlv_ranges));
272  intlv_ranges.clear();
273  }
274  intlv_ranges.push_back(r.first);
275  } else {
276  // keep the current range
277  ranges.push_back(r.first);
278  }
279  }
280  }
281 
282  // if there is still interleaved ranges waiting to be merged,
283  // go ahead and do it
284  if (!intlv_ranges.empty()) {
285  ranges.push_back(AddrRange(intlv_ranges));
286  }
287 
288  return ranges;
289 }
290 
291 void
293 {
294  assert(pkt->isRequest());
295  Addr addr = pkt->getAddr();
296  if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
297  rangeCache->second->access(pkt);
298  } else {
299  // do not update the cache here, as we typically call
300  // isMemAddr before calling access
301  const auto& m = addrMap.find(addr);
302  assert(m != addrMap.end());
303  m->second->access(pkt);
304  }
305 }
306 
307 void
309 {
310  assert(pkt->isRequest());
311  Addr addr = pkt->getAddr();
312  if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
313  rangeCache->second->functionalAccess(pkt);
314  } else {
315  // do not update the cache here, as we typically call
316  // isMemAddr before calling functionalAccess
317  const auto& m = addrMap.find(addr);
318  assert(m != addrMap.end());
319  m->second->functionalAccess(pkt);
320  }
321 }
322 
323 void
325 {
326  // serialize all the locked addresses and their context ids
327  vector<Addr> lal_addr;
328  vector<ContextID> lal_cid;
329 
330  for (auto& m : memories) {
331  const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
332  for (const auto& l : locked_addrs) {
333  lal_addr.push_back(l.addr);
334  lal_cid.push_back(l.contextId);
335  }
336  }
337 
338  SERIALIZE_CONTAINER(lal_addr);
339  SERIALIZE_CONTAINER(lal_cid);
340 
341  // serialize the backing stores
342  unsigned int nbr_of_stores = backingStore.size();
343  SERIALIZE_SCALAR(nbr_of_stores);
344 
345  unsigned int store_id = 0;
346  // store each backing store memory segment in a file
347  for (auto& s : backingStore) {
348  ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
349  serializeStore(cp, store_id++, s.range, s.pmem);
350  }
351 }
352 
353 void
355  AddrRange range, uint8_t* pmem) const
356 {
357  // we cannot use the address range for the name as the
358  // memories that are not part of the address map can overlap
359  string filename = name() + ".store" + to_string(store_id) + ".pmem";
360  long range_size = range.size();
361 
362  DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
363  filename, range_size);
364 
365  SERIALIZE_SCALAR(store_id);
366  SERIALIZE_SCALAR(filename);
367  SERIALIZE_SCALAR(range_size);
368 
369  // write memory file
370  string filepath = CheckpointIn::dir() + "/" + filename.c_str();
371  gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
372  if (compressed_mem == NULL)
373  fatal("Can't open physical memory checkpoint file '%s'\n",
374  filename);
375 
376  uint64_t pass_size = 0;
377 
378  // gzwrite fails if (int)len < 0 (gzwrite returns int)
379  for (uint64_t written = 0; written < range.size();
380  written += pass_size) {
381  pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
382  (uint64_t)INT_MAX : (range.size() - written);
383 
384  if (gzwrite(compressed_mem, pmem + written,
385  (unsigned int) pass_size) != (int) pass_size) {
386  fatal("Write failed on physical memory checkpoint file '%s'\n",
387  filename);
388  }
389  }
390 
391  // close the compressed stream and check that the exit status
392  // is zero
393  if (gzclose(compressed_mem))
394  fatal("Close failed on physical memory checkpoint file '%s'\n",
395  filename);
396 
397 }
398 
399 void
401 {
402  // unserialize the locked addresses and map them to the
403  // appropriate memory controller
404  vector<Addr> lal_addr;
405  vector<ContextID> lal_cid;
406  UNSERIALIZE_CONTAINER(lal_addr);
407  UNSERIALIZE_CONTAINER(lal_cid);
408  for (size_t i = 0; i < lal_addr.size(); ++i) {
409  const auto& m = addrMap.find(lal_addr[i]);
410  m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
411  }
412 
413  // unserialize the backing stores
414  unsigned int nbr_of_stores;
415  UNSERIALIZE_SCALAR(nbr_of_stores);
416 
417  for (unsigned int i = 0; i < nbr_of_stores; ++i) {
418  ScopedCheckpointSection sec(cp, csprintf("store%d", i));
419  unserializeStore(cp);
420  }
421 
422 }
423 
424 void
426 {
427  const uint32_t chunk_size = 16384;
428 
429  unsigned int store_id;
430  UNSERIALIZE_SCALAR(store_id);
431 
432  string filename;
433  UNSERIALIZE_SCALAR(filename);
434  string filepath = cp.cptDir + "/" + filename;
435 
436  // mmap memoryfile
437  gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
438  if (compressed_mem == NULL)
439  fatal("Can't open physical memory checkpoint file '%s'", filename);
440 
441  // we've already got the actual backing store mapped
442  uint8_t* pmem = backingStore[store_id].pmem;
443  AddrRange range = backingStore[store_id].range;
444 
445  long range_size;
446  UNSERIALIZE_SCALAR(range_size);
447 
448  DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
449  filename, range_size);
450 
451  if (range_size != range.size())
452  fatal("Memory range size has changed! Saw %lld, expected %lld\n",
453  range_size, range.size());
454 
455  uint64_t curr_size = 0;
456  long* temp_page = new long[chunk_size];
457  long* pmem_current;
458  uint32_t bytes_read;
459  while (curr_size < range.size()) {
460  bytes_read = gzread(compressed_mem, temp_page, chunk_size);
461  if (bytes_read == 0)
462  break;
463 
464  assert(bytes_read % sizeof(long) == 0);
465 
466  for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
467  // Only copy bytes that are non-zero, so we don't give
468  // the VM system hell
469  if (*(temp_page + x) != 0) {
470  pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
471  *pmem_current = *(temp_page + x);
472  }
473  }
474  curr_size += bytes_read;
475  }
476 
477  delete[] temp_page;
478 
479  if (gzclose(compressed_mem))
480  fatal("Close failed on physical memory checkpoint file '%s'\n",
481  filename);
482 }
const_iterator end() const
#define DPRINTF(x,...)
Definition: trace.hh:212
const std::string cptDir
Definition: serialize.hh:352
void serialize(CheckpointOut &cp) const override
Serialize all the memories in the system.
Definition: physical.cc:324
std::stack< Addr > locked_addrs
Definition: locked_mem.cc:10
static std::string dir()
Definition: serialize.cc:676
bool interleaved() const
Determine if the range is interleaved or not.
Definition: addr_range.hh:184
Bitfield< 7 > i
Definition: miscregs.hh:1378
Bitfield< 0 > m
Definition: miscregs.hh:1577
Locked address class that represents a physical address and a context id.
Definition: abstract_mem.hh:63
ip6_addr_t addr
Definition: inet.hh:335
panic_if(!root,"Invalid expression\n")
bool isConfReported() const
Should this memory be passed to the kernel and part of the OS physical memory layout.
void serializeStore(CheckpointOut &cp, unsigned int store_id, AddrRange range, uint8_t *pmem) const
Serialize a specific store.
Definition: physical.cc:354
AddrRangeMap< AbstractMemory * >::const_iterator rangeCache
Definition: physical.hh:125
bool isRequest() const
Definition: packet.hh:505
Bitfield< 6 > f
Definition: miscregs.hh:1379
#define warn(...)
Definition: misc.hh:219
AbstractMemory declaration.
std::vector< AbstractMemory * > memories
Definition: physical.hh:128
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition: addr_range.hh:72
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:164
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
Bitfield< 4 > s
Definition: miscregs.hh:1738
AddrRangeList getConfAddrRanges() const
Get the memory ranges for all memories that are to be reported to the configuration table...
Definition: physical.cc:256
std::string to_string() const
Get a string representation of the range.
Definition: addr_range.hh:239
#define fatal(...)
Definition: misc.hh:163
bool isKvmMap() const
When shadow memories are in use, KVM may want to make one or the other, but cannot map both into the ...
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:167
PhysicalMemory(const PhysicalMemory &)
void unserialize(CheckpointIn &cp) override
Unserialize the memories in the system.
Definition: physical.cc:400
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: physical.cc:292
const std::string name() const
Return the name for debugging and for creation of sections for checkpointing.
Definition: physical.hh:178
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
~PhysicalMemory()
Unmap all the backing store we have used.
Definition: physical.cc:229
int size()
Definition: pagetable.hh:146
Bitfield< 29 > c
Definition: miscregs.hh:1365
void createBackingStore(AddrRange range, const std::vector< AbstractMemory * > &_memories, bool conf_table_reported, bool in_addr_map, bool kvm_map)
Create the memory region providing the backing store for a given address range that corresponds to a ...
Definition: physical.cc:186
std::vector< BackingStoreEntry > backingStore
Definition: physical.hh:138
std::ostream CheckpointOut
Definition: serialize.hh:67
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: physical.cc:308
const bool mmapUsingNoReserve
Definition: physical.hh:134
An abstract memory represents a contiguous block of physical memory, with an associated address range...
bool isInAddrMap() const
Some memories are used as shadow memories or should for other reasons not be part of the global addre...
void unserializeStore(CheckpointIn &cp)
Unserialize a specific backing store, identified by a section.
Definition: physical.cc:425
bool isMemAddr(Addr addr) const
Check if a physical address is within a range of a memory that is part of the global address map...
Definition: physical.cc:237
fatal_if(p->js_features.size() > 16,"Too many job slot feature registers specified (%i)\n", p->js_features.size())
Scoped checkpoint section helper class.
Definition: serialize.hh:240
Bitfield< 1 > x
Definition: types.hh:105
Bitfield< 5 > l
Addr size() const
Get the size of the address range.
Definition: addr_range.hh:214
Addr getAddr() const
Definition: packet.hh:639

Generated on Fri Jun 9 2017 13:03:49 for gem5 by doxygen 1.8.6