gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007-2008 The Hewlett-Packard Development Company
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Gabe Black
38  */
39 
40 #include "arch/x86/tlb.hh"
41 
42 #include <cstring>
43 #include <memory>
44 
46 #include "arch/x86/faults.hh"
49 #include "arch/x86/regs/misc.hh"
50 #include "arch/x86/regs/msr.hh"
51 #include "arch/x86/x86_traits.hh"
52 #include "base/trace.hh"
53 #include "cpu/thread_context.hh"
54 #include "debug/TLB.hh"
55 #include "mem/page_table.hh"
56 #include "mem/request.hh"
57 #include "sim/full_system.hh"
58 #include "sim/process.hh"
59 
60 namespace X86ISA {
61 
62 TLB::TLB(const Params *p)
63  : BaseTLB(p), configAddress(0), size(p->size),
64  tlb(size), lruSeq(0)
65 {
66  if (!size)
67  fatal("TLBs must have a non-zero size.\n");
68 
69  for (int x = 0; x < size; x++) {
70  tlb[x].trieHandle = NULL;
71  freeList.push_back(&tlb[x]);
72  }
73 
74  walker = p->walker;
75  walker->setTLB(this);
76 }
77 
78 void
80 {
81  // Find the entry with the lowest (and hence least recently updated)
82  // sequence number.
83 
84  unsigned lru = 0;
85  for (unsigned i = 1; i < size; i++) {
86  if (tlb[i].lruSeq < tlb[lru].lruSeq)
87  lru = i;
88  }
89 
90  assert(tlb[lru].trieHandle);
91  trie.remove(tlb[lru].trieHandle);
92  tlb[lru].trieHandle = NULL;
93  freeList.push_back(&tlb[lru]);
94 }
95 
96 TlbEntry *
98 {
99  // If somebody beat us to it, just use that existing entry.
100  TlbEntry *newEntry = trie.lookup(vpn);
101  if (newEntry) {
102  assert(newEntry->vaddr == vpn);
103  return newEntry;
104  }
105 
106  if (freeList.empty())
107  evictLRU();
108 
109  newEntry = freeList.front();
110  freeList.pop_front();
111 
112  *newEntry = entry;
113  newEntry->lruSeq = nextSeq();
114  newEntry->vaddr = vpn;
115  newEntry->trieHandle =
116  trie.insert(vpn, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
117  return newEntry;
118 }
119 
120 TlbEntry *
121 TLB::lookup(Addr va, bool update_lru)
122 {
123  TlbEntry *entry = trie.lookup(va);
124  if (entry && update_lru)
125  entry->lruSeq = nextSeq();
126  return entry;
127 }
128 
129 void
131 {
132  DPRINTF(TLB, "Invalidating all entries.\n");
133  for (unsigned i = 0; i < size; i++) {
134  if (tlb[i].trieHandle) {
135  trie.remove(tlb[i].trieHandle);
136  tlb[i].trieHandle = NULL;
137  freeList.push_back(&tlb[i]);
138  }
139  }
140 }
141 
142 void
144 {
146 }
147 
148 void
150 {
151  DPRINTF(TLB, "Invalidating all non global entries.\n");
152  for (unsigned i = 0; i < size; i++) {
153  if (tlb[i].trieHandle && !tlb[i].global) {
155  tlb[i].trieHandle = NULL;
156  freeList.push_back(&tlb[i]);
157  }
158  }
159 }
160 
161 void
162 TLB::demapPage(Addr va, uint64_t asn)
163 {
164  TlbEntry *entry = trie.lookup(va);
165  if (entry) {
166  trie.remove(entry->trieHandle);
167  entry->trieHandle = NULL;
168  freeList.push_back(entry);
169  }
170 }
171 
172 Fault
174 {
175  DPRINTF(TLB, "Addresses references internal memory.\n");
176  Addr vaddr = req->getVaddr();
177  Addr prefix = (vaddr >> 3) & IntAddrPrefixMask;
178  if (prefix == IntAddrPrefixCPUID) {
179  panic("CPUID memory space not yet implemented!\n");
180  } else if (prefix == IntAddrPrefixMSR) {
181  vaddr = (vaddr >> 3) & ~IntAddrPrefixMask;
183 
184  MiscRegIndex regNum;
185  if (!msrAddrToIndex(regNum, vaddr))
186  return std::make_shared<GeneralProtection>(0);
187 
188  //The index is multiplied by the size of a MiscReg so that
189  //any memory dependence calculations will not see these as
190  //overlapping.
191  req->setPaddr((Addr)regNum * sizeof(MiscReg));
192  return NoFault;
193  } else if (prefix == IntAddrPrefixIO) {
194  // TODO If CPL > IOPL or in virtual mode, check the I/O permission
195  // bitmap in the TSS.
196 
197  Addr IOPort = vaddr & ~IntAddrPrefixMask;
198  // Make sure the address fits in the expected 16 bit IO address
199  // space.
200  assert(!(IOPort & ~0xFFFF));
201  if (IOPort == 0xCF8 && req->getSize() == 4) {
204  } else if ((IOPort & ~mask(2)) == 0xCFC) {
208  if (bits(configAddress, 31, 31)) {
210  mbits(configAddress, 30, 2) |
211  (IOPort & mask(2)));
212  } else {
213  req->setPaddr(PhysAddrPrefixIO | IOPort);
214  }
215  } else {
217  req->setPaddr(PhysAddrPrefixIO | IOPort);
218  }
219  return NoFault;
220  } else {
221  panic("Access to unrecognized internal address space %#x.\n",
222  prefix);
223  }
224 }
225 
226 Fault
228 {
229  Addr paddr = req->getPaddr();
230 
231  AddrRange m5opRange(0xFFFF0000, 0xFFFFFFFF);
232 
233  if (m5opRange.contains(paddr)) {
236  req->setPaddr(GenericISA::iprAddressPseudoInst((paddr >> 8) & 0xFF,
237  paddr & 0xFF));
238  } else if (FullSystem) {
239  // Check for an access to the local APIC
240  LocalApicBase localApicBase =
242  AddrRange apicRange(localApicBase.base * PageBytes,
243  (localApicBase.base + 1) * PageBytes - 1);
244 
245  if (apicRange.contains(paddr)) {
246  // The Intel developer's manuals say the below restrictions apply,
247  // but the linux kernel, because of a compiler optimization, breaks
248  // them.
249  /*
250  // Check alignment
251  if (paddr & ((32/8) - 1))
252  return new GeneralProtection(0);
253  // Check access size
254  if (req->getSize() != (32/8))
255  return new GeneralProtection(0);
256  */
257  // Force the access to be uncacheable.
260  paddr - apicRange.start()));
261  }
262  }
263 
264  return NoFault;
265 }
266 
267 Fault
269  Mode mode, bool &delayedResponse, bool timing)
270 {
271  Request::Flags flags = req->getFlags();
272  int seg = flags & SegmentFlagMask;
273  bool storeCheck = flags & (StoreCheck << FlagShift);
274 
275  delayedResponse = false;
276 
277  // If this is true, we're dealing with a request to a non-memory address
278  // space.
279  if (seg == SEGMENT_REG_MS) {
280  return translateInt(req, tc);
281  }
282 
283  Addr vaddr = req->getVaddr();
284  DPRINTF(TLB, "Translating vaddr %#x.\n", vaddr);
285 
286  HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
287 
288  // If protected mode has been enabled...
289  if (m5Reg.prot) {
290  DPRINTF(TLB, "In protected mode.\n");
291  // If we're not in 64-bit mode, do protection/limit checks
292  if (m5Reg.mode != LongMode) {
293  DPRINTF(TLB, "Not in long mode. Checking segment protection.\n");
294  // Check for a NULL segment selector.
295  if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
296  seg == SEGMENT_REG_HS || seg == SEGMENT_REG_LS)
297  && !tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
298  return std::make_shared<GeneralProtection>(0);
299  bool expandDown = false;
300  SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
301  if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
302  if (!attr.writable && (mode == Write || storeCheck))
303  return std::make_shared<GeneralProtection>(0);
304  if (!attr.readable && mode == Read)
305  return std::make_shared<GeneralProtection>(0);
306  expandDown = attr.expandDown;
307 
308  }
310  Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
311  bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
312  unsigned logSize = sizeOverride ? (unsigned)m5Reg.altAddr
313  : (unsigned)m5Reg.defAddr;
314  int size = (1 << logSize) * 8;
315  Addr offset = bits(vaddr - base, size - 1, 0);
316  Addr endOffset = offset + req->getSize() - 1;
317  if (expandDown) {
318  DPRINTF(TLB, "Checking an expand down segment.\n");
319  warn_once("Expand down segments are untested.\n");
320  if (offset <= limit || endOffset <= limit)
321  return std::make_shared<GeneralProtection>(0);
322  } else {
323  if (offset > limit || endOffset > limit)
324  return std::make_shared<GeneralProtection>(0);
325  }
326  }
327  if (m5Reg.submode != SixtyFourBitMode ||
328  (flags & (AddrSizeFlagBit << FlagShift)))
329  vaddr &= mask(32);
330  // If paging is enabled, do the translation.
331  if (m5Reg.paging) {
332  DPRINTF(TLB, "Paging enabled.\n");
333  // The vaddr already has the segment base applied.
334  TlbEntry *entry = lookup(vaddr);
335  if (!entry) {
336  if (FullSystem) {
337  Fault fault = walker->start(tc, translation, req, mode);
338  if (timing || fault != NoFault) {
339  // This gets ignored in atomic mode.
340  delayedResponse = true;
341  return fault;
342  }
343  entry = lookup(vaddr);
344  assert(entry);
345  } else {
346  DPRINTF(TLB, "Handling a TLB miss for "
347  "address %#x at pc %#x.\n",
348  vaddr, tc->instAddr());
349 
350  Process *p = tc->getProcessPtr();
351  TlbEntry newEntry;
352  bool success = p->pTable->lookup(vaddr, newEntry);
353  if (!success && mode != Execute) {
354  // Check if we just need to grow the stack.
355  if (p->fixupStackFault(vaddr)) {
356  // If we did, lookup the entry for the new page.
357  success = p->pTable->lookup(vaddr, newEntry);
358  }
359  }
360  if (!success) {
361  return std::make_shared<PageFault>(vaddr, true, mode,
362  true, false);
363  } else {
364  Addr alignedVaddr = p->pTable->pageAlign(vaddr);
365  DPRINTF(TLB, "Mapping %#x to %#x\n", alignedVaddr,
366  newEntry.pageStart());
367  entry = insert(alignedVaddr, newEntry);
368  }
369  DPRINTF(TLB, "Miss was serviced.\n");
370  }
371  }
372 
373  DPRINTF(TLB, "Entry found with paddr %#x, "
374  "doing protection checks.\n", entry->paddr);
375  // Do paging protection checks.
376  bool inUser = (m5Reg.cpl == 3 &&
377  !(flags & (CPL0FlagBit << FlagShift)));
378  CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
379  bool badWrite = (!entry->writable && (inUser || cr0.wp));
380  if ((inUser && !entry->user) || (mode == Write && badWrite)) {
381  // The page must have been present to get into the TLB in
382  // the first place. We'll assume the reserved bits are
383  // fine even though we're not checking them.
384  return std::make_shared<PageFault>(vaddr, true, mode, inUser,
385  false);
386  }
387  if (storeCheck && badWrite) {
388  // This would fault if this were a write, so return a page
389  // fault that reflects that happening.
390  return std::make_shared<PageFault>(vaddr, true, Write, inUser,
391  false);
392  }
393 
394  Addr paddr = entry->paddr | (vaddr & mask(entry->logBytes));
395  DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, paddr);
396  req->setPaddr(paddr);
397  if (entry->uncacheable)
399  } else {
400  //Use the address which already has segmentation applied.
401  DPRINTF(TLB, "Paging disabled.\n");
402  DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
403  req->setPaddr(vaddr);
404  }
405  } else {
406  // Real mode
407  DPRINTF(TLB, "In real mode.\n");
408  DPRINTF(TLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
409  req->setPaddr(vaddr);
410  }
411 
412  return finalizePhysical(req, tc, mode);
413 }
414 
415 Fault
417 {
418  bool delayedResponse;
419  return TLB::translate(req, tc, NULL, mode, delayedResponse, false);
420 }
421 
422 void
424  Translation *translation, Mode mode)
425 {
426  bool delayedResponse;
427  assert(translation);
428  Fault fault =
429  TLB::translate(req, tc, translation, mode, delayedResponse, true);
430  if (!delayedResponse)
431  translation->finish(fault, req, tc, mode);
432 }
433 
434 Fault
436 {
437  panic("Not implemented\n");
438  return NoFault;
439 }
440 
441 Walker *
443 {
444  return walker;
445 }
446 
447 void
449 {
450  // Only store the entries in use.
451  uint32_t _size = size - freeList.size();
452  SERIALIZE_SCALAR(_size);
454 
455  uint32_t _count = 0;
456  for (uint32_t x = 0; x < size; x++) {
457  if (tlb[x].trieHandle != NULL)
458  tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
459  }
460 }
461 
462 void
464 {
465  // Do not allow to restore with a smaller tlb.
466  uint32_t _size;
467  UNSERIALIZE_SCALAR(_size);
468  if (_size > size) {
469  fatal("TLB size less than the one in checkpoint!");
470  }
471 
473 
474  for (uint32_t x = 0; x < _size; x++) {
475  TlbEntry *newEntry = freeList.front();
476  freeList.pop_front();
477 
478  newEntry->unserializeSection(cp, csprintf("Entry%d", x));
479  newEntry->trieHandle = trie.insert(newEntry->vaddr,
480  TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
481  }
482 }
483 
486 {
487  return &walker->getMasterPort("port");
488 }
489 
490 } // namespace X86ISA
491 
492 X86ISA::TLB *
493 X86TLBParams::create()
494 {
495  return new X86ISA::TLB(this);
496 }
#define DPRINTF(x,...)
Definition: trace.hh:212
const Addr PhysAddrPrefixPciConfig
Definition: x86_traits.hh:73
offset
Definition: misc.hh:977
Handle insert(Key key, unsigned width, Value *val)
Method which inserts a key/value pair into the trie.
Definition: trie.hh:186
const int FlagShift
Definition: ldstflags.hh:52
virtual Addr instAddr()=0
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
Addr start() const
Get the start address of the range.
Definition: addr_range.hh:227
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: tlb.cc:463
Bitfield< 7 > i
Definition: miscregs.hh:1378
#define panic(...)
Definition: misc.hh:153
void setConfigAddress(uint32_t addr)
Definition: tlb.cc:143
std::vector< TlbEntry > tlb
Definition: tlb.hh:96
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
static const unsigned MaxBits
Definition: trie.hh:111
TLB(const Params *p)
Definition: tlb.cc:62
virtual MiscReg readMiscRegNoEffect(int misc_reg) const =0
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:146
virtual Process * getProcessPtr()=0
#define warn_once(...)
Definition: misc.hh:226
uint32_t configAddress
Definition: tlb.hh:65
const Addr IntAddrPrefixCPUID
Definition: x86_traits.hh:68
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:328
const Addr PageBytes
Definition: isa_traits.hh:64
bool global
Definition: pagetable.hh:120
Bitfield< 14 > expandDown
Definition: misc.hh:949
Bitfield< 4, 0 > mode
Definition: miscregs.hh:1385
void flushNonGlobal()
Definition: tlb.cc:149
TlbEntryTrie trie
Definition: tlb.hh:100
ThreadContext is the external interface to all thread state for anything outside of the CPU...
BaseMasterPort * getMasterPort() override
Get the table walker master port.
Definition: tlb.cc:485
TlbEntry * insert(Addr vpn, TlbEntry &entry)
Definition: tlb.cc:97
MiscRegIndex
Definition: misc.hh:101
void setTLB(TLB *_tlb)
const Addr IntAddrPrefixMask
Definition: x86_traits.hh:67
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition: addr_range.hh:72
Definition: tlb.hh:53
Addr iprAddressPseudoInst(uint8_t func, uint8_t subfunc)
Generate a generic IPR address that emulates a pseudo inst.
Definition: mmapped_ipr.hh:84
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
uint64_t lruSeq
Definition: pagetable.hh:126
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
static MiscRegIndex MISCREG_SEG_ATTR(int index)
Definition: misc.hh:534
uint64_t nextSeq()
Definition: tlb.hh:114
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
Definition: misc.hh:527
mask
Definition: misc.hh:797
Value * lookup(Key key)
Method which looks up the Value corresponding to a particular key.
Definition: trie.hh:268
The request is to an uncacheable address.
Definition: request.hh:114
void evictLRU()
Definition: tlb.cc:79
Bitfield< 51, 12 > base
Definition: pagetable.hh:85
Fault start(ThreadContext *_tc, BaseTLB::Translation *translation, RequestPtr req, BaseTLB::Mode mode)
Addr getPaddr() const
Definition: request.hh:519
#define fatal(...)
Definition: misc.hh:163
TlbEntry(Addr asn, Addr _vaddr, Addr _paddr, bool uncacheable, bool read_only)
Fault translate(RequestPtr req, ThreadContext *tc, Translation *translation, Mode mode, bool &delayedResponse, bool timing)
Definition: tlb.cc:268
Fault translateInt(RequestPtr req, ThreadContext *tc)
Definition: tlb.cc:173
Walker * getWalker()
Definition: tlb.cc:442
const Addr IntAddrPrefixMSR
Definition: x86_traits.hh:69
TlbEntryTrie::Handle trieHandle
Definition: pagetable.hh:128
static MiscRegIndex MISCREG_SEG_SEL(int index)
Definition: misc.hh:506
Bitfield< 2, 0 > seg
Definition: types.hh:84
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
const Request::FlagsType M5_VAR_USED SegmentFlagMask
Definition: ldstflags.hh:51
Bitfield< 8 > va
Definition: miscregs.hh:1473
const Addr IntAddrPrefixIO
Definition: x86_traits.hh:70
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
bool msrAddrToIndex(MiscRegIndex &regNum, Addr addr)
Find and return the misc reg corresponding to an MSR address.
Definition: msr.cc:149
void flushAll() override
Remove all entries from the TLB.
Definition: tlb.cc:130
Flags getFlags()
Accessor for flags.
Definition: request.hh:584
Mode
Definition: tlb.hh:61
X86TLBParams Params
Definition: tlb.hh:69
Value * remove(Handle handle)
Method to delete a value from the trie.
Definition: trie.hh:283
TlbEntry * lookup(Addr va, bool update_lru=true)
Definition: tlb.cc:121
int size()
Definition: pagetable.hh:146
Declarations of a non-full system Page Table.
static MiscRegIndex MISCREG_SEG_BASE(int index)
Definition: misc.hh:513
void translateTiming(RequestPtr req, ThreadContext *tc, Translation *translation, Mode mode)
Definition: tlb.cc:423
BaseMasterPort & getMasterPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a master port with a given name and index.
uint64_t MiscReg
Definition: registers.hh:94
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
Stub function for compilation support of CheckerCPU.
Definition: tlb.cc:435
std::ostream CheckpointOut
Definition: serialize.hh:67
EndBitUnion(PageTableEntry) struct TlbEntry Addr vaddr
Definition: pagetable.hh:96
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: tlb.cc:448
void demapPage(Addr va, uint64_t asn) override
Definition: tlb.cc:162
ISA-generic helper functions for memory mapped IPR accesses.
EntryList freeList
Definition: tlb.hh:98
Addr getVaddr() const
Definition: request.hh:616
A BaseMasterPort is a protocol-agnostic master port, responsible only for the structural connection t...
Definition: port.hh:115
virtual int contextId() const =0
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:124
const Addr PhysAddrPrefixIO
Definition: x86_traits.hh:72
uint64_t lruSeq
Definition: tlb.hh:101
Bitfield< 0 > p
Definition: pagetable.hh:95
Walker * walker
Definition: tlb.hh:82
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:91
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
Definition: bitfield.hh:67
This request is to a memory mapped register.
Definition: request.hh:126
unsigned getSize() const
Definition: request.hh:552
The request should be handled by the generic IPR code (only valid together with MMAPPED_IPR) ...
Definition: request.hh:178
static Addr x86LocalAPICAddress(const uint8_t id, const uint16_t addr)
Definition: x86_traits.hh:93
void setPaddr(Addr paddr)
Set just the physical address.
Definition: request.hh:487
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
Definition: tlb.cc:416
Bitfield< 1 > x
Definition: types.hh:105
virtual void finish(const Fault &fault, RequestPtr req, ThreadContext *tc, Mode mode)=0
void setFlags(Flags flags)
Note that unlike other accessors, this function sets specific flags (ORs them in); it does not assign...
Definition: request.hh:595
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
Bitfield< 3 > addr
Definition: types.hh:81
Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
Do post-translation physical address finalization.
Definition: tlb.cc:227
uint32_t size
Definition: tlb.hh:94

Generated on Fri Jun 9 2017 13:03:36 for gem5 by doxygen 1.8.6