gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
page_table.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014 Advanced Micro Devices, Inc.
3  * Copyright (c) 2003 The Regents of The University of Michigan
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met: redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer;
10  * redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution;
13  * neither the name of the copyright holders nor the names of its
14  * contributors may be used to endorse or promote products derived from
15  * this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * Authors: Steve Reinhardt
30  * Ron Dreslinski
31  * Ali Saidi
32  */
33 
38 #include "mem/page_table.hh"
39 
40 #include <string>
41 
42 #include "base/trace.hh"
43 #include "config/the_isa.hh"
44 #include "debug/MMU.hh"
45 #include "sim/faults.hh"
46 #include "sim/serialize.hh"
47 
48 using namespace std;
49 using namespace TheISA;
50 
51 FuncPageTable::FuncPageTable(const std::string &__name,
52  uint64_t _pid, Addr _pageSize)
53  : PageTableBase(__name, _pid, _pageSize)
54 {
55 }
56 
58 {
59 }
60 
61 void
62 FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
63 {
64  bool clobber = flags & Clobber;
65  // starting address must be page aligned
66  assert(pageOffset(vaddr) == 0);
67 
68  DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
69 
70  for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
71  if (!clobber && (pTable.find(vaddr) != pTable.end())) {
72  // already mapped
73  fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
74  }
75 
76  pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
77  flags & Uncacheable,
78  flags & ReadOnly);
79  eraseCacheEntry(vaddr);
80  updateCache(vaddr, pTable[vaddr]);
81  }
82 }
83 
84 void
85 FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
86 {
87  assert(pageOffset(vaddr) == 0);
88  assert(pageOffset(new_vaddr) == 0);
89 
90  DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
91  new_vaddr, size);
92 
93  for (; size > 0;
94  size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
95  {
96  assert(pTable.find(vaddr) != pTable.end());
97 
98  pTable[new_vaddr] = pTable[vaddr];
99  pTable.erase(vaddr);
100  eraseCacheEntry(vaddr);
101  pTable[new_vaddr].updateVaddr(new_vaddr);
102  updateCache(new_vaddr, pTable[new_vaddr]);
103  }
104 }
105 
106 void
108 {
109  for (auto &iter : pTable)
110  addr_maps->push_back(make_pair(iter.first, iter.second.pageStart()));
111 }
112 
113 void
115 {
116  assert(pageOffset(vaddr) == 0);
117 
118  DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
119 
120  for (; size > 0; size -= pageSize, vaddr += pageSize) {
121  assert(pTable.find(vaddr) != pTable.end());
122  pTable.erase(vaddr);
123  eraseCacheEntry(vaddr);
124  }
125 
126 }
127 
128 bool
130 {
131  // starting address must be page aligned
132  assert(pageOffset(vaddr) == 0);
133 
134  for (; size > 0; size -= pageSize, vaddr += pageSize) {
135  if (pTable.find(vaddr) != pTable.end()) {
136  return false;
137  }
138  }
139 
140  return true;
141 }
142 
143 bool
145 {
146  Addr page_addr = pageAlign(vaddr);
147 
148  if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
149  entry = pTableCache[0].entry;
150  return true;
151  }
152  if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
153  entry = pTableCache[1].entry;
154  return true;
155  }
156  if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
157  entry = pTableCache[2].entry;
158  return true;
159  }
160 
161  PTableItr iter = pTable.find(page_addr);
162 
163  if (iter == pTable.end()) {
164  return false;
165  }
166 
167  updateCache(page_addr, iter->second);
168  entry = iter->second;
169  return true;
170 }
171 
172 bool
174 {
175  TheISA::TlbEntry entry;
176  if (!lookup(vaddr, entry)) {
177  DPRINTF(MMU, "Couldn't Translate: %#x\n", vaddr);
178  return false;
179  }
180  paddr = pageOffset(vaddr) + entry.pageStart();
181  DPRINTF(MMU, "Translating: %#x->%#x\n", vaddr, paddr);
182  return true;
183 }
184 
185 Fault
187 {
188  Addr paddr;
189  assert(pageAlign(req->getVaddr() + req->getSize() - 1)
190  == pageAlign(req->getVaddr()));
191  if (!translate(req->getVaddr(), paddr)) {
192  return Fault(new GenericPageTableFault(req->getVaddr()));
193  }
194  req->setPaddr(paddr);
195  if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
196  panic("Request spans page boundaries!\n");
197  return NoFault;
198  }
199  return NoFault;
200 }
201 
202 void
204 {
205  paramOut(cp, "ptable.size", pTable.size());
206 
208  for (auto &pte : pTable) {
209  ScopedCheckpointSection sec(cp, csprintf("Entry%d", count++));
210 
211  paramOut(cp, "vaddr", pte.first);
212  pte.second.serialize(cp);
213  }
214  assert(count == pTable.size());
215 }
216 
217 void
219 {
220  int count;
221  paramIn(cp, "ptable.size", count);
222 
223  for (int i = 0; i < count; ++i) {
224  ScopedCheckpointSection sec(cp, csprintf("Entry%d", i));
225 
226  std::unique_ptr<TheISA::TlbEntry> entry;
227  Addr vaddr;
228 
229  paramIn(cp, "vaddr", vaddr);
230  entry.reset(new TheISA::TlbEntry());
231  entry->unserialize(cp);
232 
233  pTable[vaddr] = *entry;
234  }
235 }
236 
count
Definition: misc.hh:704
#define DPRINTF(x,...)
Definition: trace.hh:212
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
Bitfield< 7 > i
Definition: miscregs.hh:1378
void updateCache(Addr vaddr, TheISA::TlbEntry entry)
Update the page table cache.
Definition: page_table.hh:167
#define panic(...)
Definition: misc.hh:153
bool isUnmapped(Addr vaddr, int64_t size) override
Check if any pages in a region are already allocated.
Definition: page_table.cc:129
Addr pageAlign(Addr a)
Definition: page_table.hh:109
TheISA::TlbEntry entry
Definition: page_table.hh:63
FuncPageTable(const std::string &__name, uint64_t _pid, Addr _pageSize=TheISA::PageBytes)
Definition: page_table.cc:51
Addr pageOffset(Addr a)
Definition: page_table.hh:110
unsigned int size_type
Definition: types.hh:56
const Addr pageSize
Definition: page_table.hh:68
STL vector class.
Definition: stl.hh:40
void map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags=0) override
Maps a virtual memory region to a physical memory region.
Definition: page_table.cc:62
PTable::iterator PTableItr
Definition: page_table.hh:209
void remap(Addr vaddr, int64_t size, Addr new_vaddr) override
Definition: page_table.cc:85
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
bool translate(Addr vaddr, Addr &paddr)
Translate function.
Definition: page_table.cc:173
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: page_table.cc:203
void paramOut(CheckpointOut &cp, const string &name, ExtMachInst const &machInst)
Definition: types.cc:40
#define fatal(...)
Definition: misc.hh:163
TlbEntry(Addr asn, Addr _vaddr, Addr _paddr, bool uncacheable, bool read_only)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Declaration of base class for page table.
Definition: page_table.hh:57
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: page_table.cc:218
int size()
Definition: pagetable.hh:146
void unmap(Addr vaddr, int64_t size) override
Definition: page_table.cc:114
Declarations of a non-full system Page Table.
void getMappings(std::vector< std::pair< Addr, Addr >> *addr_maps) override
Definition: page_table.cc:107
std::ostream CheckpointOut
Definition: serialize.hh:67
Addr getVaddr() const
Definition: request.hh:616
bool lookup(Addr vaddr, TheISA::TlbEntry &entry) override
Lookup function.
Definition: page_table.cc:144
virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry)=0
Lookup function.
void paramIn(CheckpointIn &cp, const string &name, ExtMachInst &machInst)
Definition: types.cc:71
void eraseCacheEntry(Addr vaddr)
Erase an entry from the page table cache.
Definition: page_table.hh:186
struct cacheElement pTableCache[3]
Definition: page_table.hh:66
unsigned getSize() const
Definition: request.hh:552
Scoped checkpoint section helper class.
Definition: serialize.hh:240
void setPaddr(Addr paddr)
Set just the physical address.
Definition: request.hh:487
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
const uint64_t pid
Definition: page_table.hh:71

Generated on Fri Jun 9 2017 13:03:49 for gem5 by doxygen 1.8.6