37 #include "arch/isa_traits.hh"
38 #include "arch/tlb.hh"
40 #include "config/the_isa.hh"
41 #include "debug/MMU.hh"
46 using namespace TheISA;
48 template <
class ISAOps>
50 uint64_t _pid,
System *_sys)
53 numLevels(logLevelSize.
size())
57 template <
class ISAOps>
62 template <
class ISAOps>
66 basePtr = pTableISAOps.getBasePtr(tc);
67 if (basePtr == 0) basePtr++;
68 DPRINTF(MMU,
"basePtr: %d\n", basePtr);
73 uint64_t log_req_size =
floorLog2(
sizeof(PageTableEntry)) +
74 logLevelSize[numLevels-1];
76 uint64_t npages = 1 << (log_req_size -
PageShift);
85 template <
class ISAOps>
91 Addr level_base = basePtr;
92 for (
int i = numLevels - 1;
i > 0;
i--) {
95 offsets[
i] *
sizeof(PageTableEntry);
98 PageTableEntry entry = p.
read<PageTableEntry>(entry_addr);
100 Addr next_entry_pnum = pTableISAOps.getPnum(entry);
101 if (next_entry_pnum == 0) {
103 if (!allocate)
return false;
105 uint64_t log_req_size =
floorLog2(
sizeof(PageTableEntry)) +
108 uint64_t npages = 1 << (log_req_size -
PageShift);
110 DPRINTF(MMU,
"Allocating %d pages needed for entry in level %d\n",
114 Addr next_entry_paddr =
system->allocPhysPages(npages);
117 next_entry_pnum = next_entry_paddr >>
PageShift;
118 pTableISAOps.setPnum(entry, next_entry_pnum);
119 pTableISAOps.setPTEFields(entry);
120 p.
write<PageTableEntry>(entry_addr, entry);
123 DPRINTF(MMU,
"Level %d base: %d offset: %d entry: %d\n",
124 i, level_base, offsets[
i], next_entry_pnum);
125 level_base = next_entry_pnum;
129 offsets[0] *
sizeof(PageTableEntry);
130 DPRINTF(MMU,
"Returning PTE_addr: %x\n", PTE_addr);
134 template <
class ISAOps>
137 int64_t
size, uint64_t flags)
139 bool clobber = flags & Clobber;
141 assert(pageOffset(vaddr) == 0);
143 DPRINTF(MMU,
"Allocating Page: %#x-%#x\n", vaddr, vaddr + size);
147 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
149 if (walk(vaddr,
true, PTE_addr)) {
150 PageTableEntry PTE = p.
read<PageTableEntry>(PTE_addr);
151 Addr entry_paddr = pTableISAOps.getPnum(PTE);
152 if (!clobber && entry_paddr != 0) {
153 fatal(
"addr 0x%x already mapped to %x", vaddr, entry_paddr);
155 pTableISAOps.setPnum(PTE, paddr >>
PageShift);
156 uint64_t PTE_flags = 0;
157 if (flags & NotPresent)
159 if (flags & Uncacheable)
161 if (flags & ReadOnly)
163 pTableISAOps.setPTEFields(PTE, PTE_flags);
164 p.
write<PageTableEntry>(PTE_addr, PTE);
165 DPRINTF(MMU,
"New mapping: %#x-%#x\n", vaddr, paddr);
167 eraseCacheEntry(vaddr);
168 updateCache(vaddr,
TlbEntry(pid, vaddr, paddr,
176 template <
class ISAOps>
180 assert(pageOffset(vaddr) == 0);
181 assert(pageOffset(new_vaddr) == 0);
183 DPRINTF(MMU,
"moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
189 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
192 if (walk(vaddr,
false, PTE_addr)) {
193 PageTableEntry PTE = p.
read<PageTableEntry>(PTE_addr);
194 Addr paddr = pTableISAOps.getPnum(PTE);
197 fatal(
"Page fault while remapping");
200 pTableISAOps.setPnum(PTE, 0);
201 p.
write<PageTableEntry>(PTE_addr, PTE);
205 walk(new_vaddr,
true, new_PTE_addr);
206 PageTableEntry new_PTE = p.
read<PageTableEntry>(new_PTE_addr);
208 pTableISAOps.setPnum(new_PTE, paddr>>
PageShift);
209 pTableISAOps.setPTEFields(new_PTE);
210 p.
write<PageTableEntry>(new_PTE_addr, new_PTE);
211 DPRINTF(MMU,
"Remapping: %#x-%#x\n", vaddr, new_PTE_addr);
214 eraseCacheEntry(vaddr);
215 updateCache(new_vaddr,
TlbEntry(pid, new_vaddr, paddr,
216 pTableISAOps.isUncacheable(PTE),
217 pTableISAOps.isReadOnly(PTE)));
219 fatal(
"Page fault while remapping");
224 template <
class ISAOps>
228 assert(pageOffset(vaddr) == 0);
230 DPRINTF(MMU,
"Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
234 for (; size > 0; size -= pageSize, vaddr += pageSize) {
236 if (walk(vaddr,
false, PTE_addr)) {
237 PageTableEntry PTE = p.
read<PageTableEntry>(PTE_addr);
238 Addr paddr = pTableISAOps.getPnum(PTE);
240 fatal(
"PageTable::allocate: address 0x%x not mapped", vaddr);
242 pTableISAOps.setPnum(PTE, 0);
243 p.
write<PageTableEntry>(PTE_addr, PTE);
244 DPRINTF(MMU,
"Unmapping: %#x\n", vaddr);
246 eraseCacheEntry(vaddr);
248 fatal(
"Page fault while unmapping");
254 template <
class ISAOps>
259 assert(pageOffset(vaddr) == 0);
262 for (; size > 0; size -= pageSize, vaddr += pageSize) {
264 if (walk(vaddr,
false, PTE_addr)) {
265 PageTableEntry PTE = p.
read<PageTableEntry>(PTE_addr);
266 if (pTableISAOps.getPnum(PTE) != 0)
274 template <
class ISAOps>
278 Addr page_addr = pageAlign(vaddr);
280 if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
281 entry = pTableCache[0].entry;
284 if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
285 entry = pTableCache[1].entry;
288 if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
289 entry = pTableCache[2].entry;
293 DPRINTF(MMU,
"lookup page_addr: %#x\n", page_addr);
295 if (walk(page_addr,
false, PTE_addr)) {
297 PageTableEntry PTE = p.
read<PageTableEntry>(PTE_addr);
298 Addr pnum = pTableISAOps.getPnum(PTE);
303 pTableISAOps.isUncacheable(PTE),
304 pTableISAOps.isReadOnly(PTE));
305 updateCache(page_addr, entry);
312 template <
class ISAOps>
320 paramOut(cp,
"ptable.pointer", basePtr);
323 template <
class ISAOps>
327 paramIn(cp,
"ptable.pointer", basePtr);
void write(Addr address, T data) const
Write object T to address.
Declaration of a multi-level page table.
ThreadContext is the external interface to all thread state for anything outside of the CPU...
MultiLevelPageTable(const std::string &__name, uint64_t _pid, System *_sys)
bool lookup(Addr vaddr, TheISA::TlbEntry &entry) override
Lookup function.
void initState(ThreadContext *tc) override
void paramOut(CheckpointOut &cp, const string &name, ExtMachInst const &machInst)
void remap(Addr vaddr, int64_t size, Addr new_vaddr) override
T read(Addr address) const
Read sizeof(T) bytes from address and return as object T.
TlbEntry(Addr asn, Addr _vaddr, Addr _paddr, bool uncacheable, bool read_only)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
const std::vector< uint8_t > PageTableLayout
The size of each level of the page table expressed in base 2 logarithmic values.
Declaration of base class for page table.
void map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags=0) override
Maps a virtual memory region to a physical memory region.
This object is a proxy for a structural port, to be used for debug accesses.
int floorLog2(unsigned x)
bool walk(Addr vaddr, bool allocate, Addr &PTE_addr)
Method for walking the page table.
Declarations of a non-full system Page Table.
std::ostream CheckpointOut
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void unmap(Addr vaddr, int64_t size) override
void paramIn(CheckpointIn &cp, const string &name, ExtMachInst &machInst)
virtual void memsetBlob(Addr addr, uint8_t v, int size) const
Fill size bytes starting at addr with byte value val.
bool isUnmapped(Addr vaddr, int64_t size) override
Check if any pages in a region are already allocated.
void serialize(CheckpointOut &cp) const override
Serialize an object.