gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2005 The Regents of The University of Michigan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Authors: Nathan Binkert
29  * Steve Reinhardt
30  * Andrew Schultz
31  */
32 
33 #include "arch/alpha/tlb.hh"
34 
35 #include <algorithm>
36 #include <memory>
37 #include <string>
38 #include <vector>
39 
40 #include "arch/alpha/faults.hh"
41 #include "arch/alpha/pagetable.hh"
43 #include "base/inifile.hh"
44 #include "base/str.hh"
45 #include "base/trace.hh"
46 #include "cpu/thread_context.hh"
47 #include "debug/TLB.hh"
48 #include "sim/full_system.hh"
49 
50 using namespace std;
51 
52 namespace AlphaISA {
53 
55 //
56 // Alpha TLB
57 //
58 
59 #ifdef DEBUG
60 bool uncacheBit39 = false;
61 bool uncacheBit40 = false;
62 #endif
63 
64 #define MODE2MASK(X) (1 << (X))
65 
66 TLB::TLB(const Params *p)
67  : BaseTLB(p), table(p->size), nlu(0)
68 {
69  flushCache();
70 }
71 
73 {
74 }
75 
76 void
78 {
80 
82  .name(name() + ".fetch_hits")
83  .desc("ITB hits");
85  .name(name() + ".fetch_misses")
86  .desc("ITB misses");
87  fetch_acv
88  .name(name() + ".fetch_acv")
89  .desc("ITB acv");
91  .name(name() + ".fetch_accesses")
92  .desc("ITB accesses");
93 
95 
96  read_hits
97  .name(name() + ".read_hits")
98  .desc("DTB read hits")
99  ;
100 
102  .name(name() + ".read_misses")
103  .desc("DTB read misses")
104  ;
105 
106  read_acv
107  .name(name() + ".read_acv")
108  .desc("DTB read access violations")
109  ;
110 
112  .name(name() + ".read_accesses")
113  .desc("DTB read accesses")
114  ;
115 
116  write_hits
117  .name(name() + ".write_hits")
118  .desc("DTB write hits")
119  ;
120 
122  .name(name() + ".write_misses")
123  .desc("DTB write misses")
124  ;
125 
126  write_acv
127  .name(name() + ".write_acv")
128  .desc("DTB write access violations")
129  ;
130 
132  .name(name() + ".write_accesses")
133  .desc("DTB write accesses")
134  ;
135 
136  data_hits
137  .name(name() + ".data_hits")
138  .desc("DTB hits")
139  ;
140 
142  .name(name() + ".data_misses")
143  .desc("DTB misses")
144  ;
145 
146  data_acv
147  .name(name() + ".data_acv")
148  .desc("DTB access violations")
149  ;
150 
152  .name(name() + ".data_accesses")
153  .desc("DTB accesses")
154  ;
155 
160 }
161 
162 // look up an entry in the TLB
163 TlbEntry *
164 TLB::lookup(Addr vpn, uint8_t asn)
165 {
166  // assume not found...
167  TlbEntry *retval = NULL;
168 
169  if (EntryCache[0]) {
170  if (vpn == EntryCache[0]->tag &&
171  (EntryCache[0]->asma || EntryCache[0]->asn == asn))
172  retval = EntryCache[0];
173  else if (EntryCache[1]) {
174  if (vpn == EntryCache[1]->tag &&
175  (EntryCache[1]->asma || EntryCache[1]->asn == asn))
176  retval = EntryCache[1];
177  else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
178  (EntryCache[2]->asma || EntryCache[2]->asn == asn))
179  retval = EntryCache[2];
180  }
181  }
182 
183  if (retval == NULL) {
184  PageTable::const_iterator i = lookupTable.find(vpn);
185  if (i != lookupTable.end()) {
186  while (i->first == vpn) {
187  int index = i->second;
188  TlbEntry *entry = &table[index];
189  assert(entry->valid);
190  if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
191  retval = updateCache(entry);
192  break;
193  }
194 
195  ++i;
196  }
197  }
198  }
199 
200  DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
201  retval ? "hit" : "miss", retval ? retval->ppn : 0);
202  return retval;
203 }
204 
205 Fault
207 {
208  // in Alpha, cacheability is controlled by upper-level bits of the
209  // physical address
210 
211  /*
212  * We support having the uncacheable bit in either bit 39 or bit
213  * 40. The Turbolaser platform (and EV5) support having the bit
214  * in 39, but Tsunami (which Linux assumes uses an EV6) generates
215  * accesses with the bit in 40. So we must check for both, but we
216  * have debug flags to catch a weird case where both are used,
217  * which shouldn't happen.
218  */
219 
220 
221  if (req->getPaddr() & PAddrUncachedBit43) {
222  // IPR memory space not implemented
223  if (PAddrIprSpace(req->getPaddr())) {
224  return std::make_shared<UnimpFault>(
225  "IPR memory space not implemented!");
226  } else {
227  // mark request as uncacheable
229 
230  // Clear bits 42:35 of the physical address (10-2 in
231  // Tsunami manual)
232  req->setPaddr(req->getPaddr() & PAddrUncachedMask);
233  }
234  // We shouldn't be able to read from an uncachable address in Alpha as
235  // we don't have a ROM and we don't want to try to fetch from a device
236  // register as we destroy any data that is clear-on-read.
237  if (req->isUncacheable() && itb)
238  return std::make_shared<UnimpFault>(
239  "CPU trying to fetch from uncached I/O");
240 
241  }
242  return NoFault;
243 }
244 
245 
246 // insert a new TLB entry
247 void
249 {
250  flushCache();
251  VAddr vaddr = addr;
252  if (table[nlu].valid) {
253  Addr oldvpn = table[nlu].tag;
254  PageTable::iterator i = lookupTable.find(oldvpn);
255 
256  if (i == lookupTable.end())
257  panic("TLB entry not found in lookupTable");
258 
259  int index;
260  while ((index = i->second) != nlu) {
261  if (table[index].tag != oldvpn)
262  panic("TLB entry not found in lookupTable");
263 
264  ++i;
265  }
266 
267  DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
268 
269  lookupTable.erase(i);
270  }
271 
272  DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
273 
274  table[nlu] = entry;
275  table[nlu].tag = vaddr.vpn();
276  table[nlu].valid = true;
277 
278  lookupTable.insert(make_pair(vaddr.vpn(), nlu));
279  nextnlu();
280 }
281 
282 void
284 {
285  DPRINTF(TLB, "flushAll\n");
286  std::fill(table.begin(), table.end(), TlbEntry());
287  flushCache();
288  lookupTable.clear();
289  nlu = 0;
290 }
291 
292 void
294 {
295  flushCache();
296  PageTable::iterator i = lookupTable.begin();
297  PageTable::iterator end = lookupTable.end();
298  while (i != end) {
299  int index = i->second;
300  TlbEntry *entry = &table[index];
301  assert(entry->valid);
302 
303  // we can't increment i after we erase it, so save a copy and
304  // increment it to get the next entry now
305  PageTable::iterator cur = i;
306  ++i;
307 
308  if (!entry->asma) {
309  DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
310  entry->tag, entry->ppn);
311  entry->valid = false;
312  lookupTable.erase(cur);
313  }
314  }
315 }
316 
317 void
318 TLB::flushAddr(Addr addr, uint8_t asn)
319 {
320  flushCache();
321  VAddr vaddr = addr;
322 
323  PageTable::iterator i = lookupTable.find(vaddr.vpn());
324  if (i == lookupTable.end())
325  return;
326 
327  while (i != lookupTable.end() && i->first == vaddr.vpn()) {
328  int index = i->second;
329  TlbEntry *entry = &table[index];
330  assert(entry->valid);
331 
332  if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
333  DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
334  entry->ppn);
335 
336  // invalidate this entry
337  entry->valid = false;
338 
339  lookupTable.erase(i++);
340  } else {
341  ++i;
342  }
343  }
344 }
345 
346 
347 void
349 {
350  const unsigned size(table.size());
353 
354  for (int i = 0; i < size; i++)
355  table[i].serializeSection(cp, csprintf("Entry%d", i));
356 }
357 
358 void
360 {
361  unsigned size(0);
362  UNSERIALIZE_SCALAR(size);
364 
365  table.resize(size);
366  for (int i = 0; i < size; i++) {
367  table[i].unserializeSection(cp, csprintf("Entry%d", i));
368  if (table[i].valid) {
369  lookupTable.insert(make_pair(table[i].tag, i));
370  }
371  }
372 }
373 
374 Fault
376 {
377  //If this is a pal pc, then set PHYSICAL
378  if (FullSystem && PcPAL(req->getPC()))
380 
381  if (PcPAL(req->getPC())) {
382  // strip off PAL PC marker (lsb is 1)
383  req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
384  fetch_hits++;
385  return NoFault;
386  }
387 
388  if (req->getFlags() & Request::PHYSICAL) {
389  req->setPaddr(req->getVaddr());
390  } else {
391  // verify that this is a good virtual address
392  if (!validVirtualAddress(req->getVaddr())) {
393  fetch_acv++;
394  return std::make_shared<ItbAcvFault>(req->getVaddr());
395  }
396 
397 
398  // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
399  // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
400  if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
401  // only valid in kernel mode
402  if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
403  mode_kernel) {
404  fetch_acv++;
405  return std::make_shared<ItbAcvFault>(req->getVaddr());
406  }
407 
408  req->setPaddr(req->getVaddr() & PAddrImplMask);
409 
410  // sign extend the physical address properly
411  if (req->getPaddr() & PAddrUncachedBit40)
412  req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
413  else
414  req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
415  } else {
416  // not a physical address: need to look up pte
418  TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
419  asn);
420 
421  if (!entry) {
422  fetch_misses++;
423  return std::make_shared<ItbPageFault>(req->getVaddr());
424  }
425 
426  req->setPaddr((entry->ppn << PageShift) +
427  (VAddr(req->getVaddr()).offset()
428  & ~3));
429 
430  // check permissions for this access
431  if (!(entry->xre &
432  (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
433  // instruction access fault
434  fetch_acv++;
435  return std::make_shared<ItbAcvFault>(req->getVaddr());
436  }
437 
438  fetch_hits++;
439  }
440  }
441 
442  // check that the physical address is ok (catch bad physical addresses)
443  if (req->getPaddr() & ~PAddrImplMask) {
444  return std::make_shared<MachineCheckFault>();
445  }
446 
447  return checkCacheability(req, true);
448 
449 }
450 
451 Fault
453 {
454  mode_type mode =
456 
460  if (req->getVaddr() & (req->getSize() - 1)) {
461  DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
462  req->getSize());
463  uint64_t flags = write ? MM_STAT_WR_MASK : 0;
464  return std::make_shared<DtbAlignmentFault>(req->getVaddr(),
465  req->getFlags(),
466  flags);
467  }
468 
469  if (PcPAL(req->getPC())) {
470  mode = (req->getFlags() & AlphaRequestFlags::ALTMODE) ?
473  : mode_kernel;
474  }
475 
476  if (req->getFlags() & Request::PHYSICAL) {
477  req->setPaddr(req->getVaddr());
478  } else {
479  // verify that this is a good virtual address
480  if (!validVirtualAddress(req->getVaddr())) {
481  if (write) { write_acv++; } else { read_acv++; }
482  uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
485  return std::make_shared<DtbPageFault>(req->getVaddr(),
486  req->getFlags(),
487  flags);
488  }
489 
490  // Check for "superpage" mapping
491  if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
492  // only valid in kernel mode
494  mode_kernel) {
495  if (write) { write_acv++; } else { read_acv++; }
496  uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
498 
499  return std::make_shared<DtbAcvFault>(req->getVaddr(),
500  req->getFlags(),
501  flags);
502  }
503 
504  req->setPaddr(req->getVaddr() & PAddrImplMask);
505 
506  // sign extend the physical address properly
507  if (req->getPaddr() & PAddrUncachedBit40)
508  req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
509  else
510  req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
511  } else {
512  if (write)
513  write_accesses++;
514  else
515  read_accesses++;
516 
518 
519  // not a physical address: need to look up pte
520  TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
521 
522  if (!entry) {
523  // page fault
524  if (write) { write_misses++; } else { read_misses++; }
525  uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
527  return (req->getFlags() & AlphaRequestFlags::VPTE) ?
528  (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(),
529  req->getFlags(),
530  flags)) :
531  (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(),
532  req->getFlags(),
533  flags));
534  }
535 
536  req->setPaddr((entry->ppn << PageShift) +
537  VAddr(req->getVaddr()).offset());
538 
539  if (write) {
540  if (!(entry->xwe & MODE2MASK(mode))) {
541  // declare the instruction access fault
542  write_acv++;
543  uint64_t flags = MM_STAT_WR_MASK |
545  (entry->fonw ? MM_STAT_FONW_MASK : 0);
546  return std::make_shared<DtbPageFault>(req->getVaddr(),
547  req->getFlags(),
548  flags);
549  }
550  if (entry->fonw) {
551  write_acv++;
552  uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
553  return std::make_shared<DtbPageFault>(req->getVaddr(),
554  req->getFlags(),
555  flags);
556  }
557  } else {
558  if (!(entry->xre & MODE2MASK(mode))) {
559  read_acv++;
560  uint64_t flags = MM_STAT_ACV_MASK |
561  (entry->fonr ? MM_STAT_FONR_MASK : 0);
562  return std::make_shared<DtbAcvFault>(req->getVaddr(),
563  req->getFlags(),
564  flags);
565  }
566  if (entry->fonr) {
567  read_acv++;
568  uint64_t flags = MM_STAT_FONR_MASK;
569  return std::make_shared<DtbPageFault>(req->getVaddr(),
570  req->getFlags(),
571  flags);
572  }
573  }
574  }
575 
576  if (write)
577  write_hits++;
578  else
579  read_hits++;
580  }
581 
582  // check that the physical address is ok (catch bad physical addresses)
583  if (req->getPaddr() & ~PAddrImplMask) {
584  return std::make_shared<MachineCheckFault>();
585  }
586 
587  return checkCacheability(req);
588 }
589 
590 TlbEntry &
591 TLB::index(bool advance)
592 {
593  TlbEntry *entry = &table[nlu];
594 
595  if (advance)
596  nextnlu();
597 
598  return *entry;
599 }
600 
601 Fault
603 {
604  if (mode == Execute)
605  return translateInst(req, tc);
606  else
607  return translateData(req, tc, mode == Write);
608 }
609 
610 void
612  Translation *translation, Mode mode)
613 {
614  assert(translation);
615  translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
616 }
617 
618 Fault
620 {
621  panic("Not implemented\n");
622  return NoFault;
623 }
624 
625 Fault
627 {
628  return NoFault;
629 }
630 
631 } // namespace AlphaISA
632 
634 AlphaTLBParams::create()
635 {
636  return new AlphaISA::TLB(this);
637 }
const uint64_t MM_STAT_WR_MASK
Definition: ev5.hh:104
#define MODE2MASK(X)
Definition: tlb.cc:64
#define DPRINTF(x,...)
Definition: trace.hh:212
bool PcPAL(Addr addr)
Definition: utility.hh:69
bool isUncacheable() const
Accessor functions for flags.
Definition: request.hh:767
Stats::Scalar fetch_hits
Definition: tlb.hh:56
int DTB_ASN_ASN(uint64_t reg)
Definition: ev5.hh:70
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
Stats::Formula data_acv
Definition: tlb.hh:70
Bitfield< 7 > i
Definition: miscregs.hh:1378
Stats::Formula fetch_accesses
Definition: tlb.hh:59
AlphaTLBParams Params
Definition: tlb.hh:84
#define panic(...)
Definition: misc.hh:153
static const ArchFlagsType ALTMODE
Use the alternate mode bits in ALPHA.
Definition: types.hh:65
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: tlb.cc:359
const Addr PAddrImplMask
Definition: ev5.hh:54
int nlu
Definition: tlb.hh:78
const Addr PageShift
Definition: isa_traits.hh:51
Stats::Scalar write_acv
Definition: tlb.hh:66
const Addr PAddrUncachedBit40
Definition: ev5.hh:56
ip6_addr_t addr
Definition: inet.hh:335
virtual MiscReg readMiscRegNoEffect(int misc_reg) const =0
Stats::Scalar read_acv
Definition: tlb.hh:62
Stats::Formula data_misses
Definition: tlb.hh:69
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:146
Bitfield< 23, 0 > offset
Definition: types.hh:149
const uint64_t MM_STAT_BAD_VA_MASK
Definition: ev5.hh:99
Addr getPC() const
Accessor function for pc.
Definition: request.hh:715
uint64_t ALT_MODE_AM(uint64_t reg)
Definition: ev5.hh:95
void flushProcesses()
Definition: tlb.cc:293
const uint64_t MM_STAT_DTB_MISS_MASK
Definition: ev5.hh:100
bool PAddrIprSpace(Addr a)
Definition: ev5.hh:52
Bitfield< 4, 0 > mode
Definition: miscregs.hh:1385
std::vector< TlbEntry > table
Definition: tlb.hh:77
virtual void regStats()
Register statistics for this object.
Definition: sim_object.cc:105
TlbEntry & index(bool advance=true)
Definition: tlb.cc:591
ThreadContext is the external interface to all thread state for anything outside of the CPU...
Addr VAddrSpaceEV6(Addr a)
Definition: ev5.hh:50
Stats::Scalar write_misses
Definition: tlb.hh:65
The virtual address is also the physical address.
Definition: request.hh:106
Fault translateData(RequestPtr req, ThreadContext *tc, bool write)
Definition: tlb.cc:452
Stats::Scalar fetch_misses
Definition: tlb.hh:57
Definition: tlb.hh:53
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
uint64_t DTB_CM_CM(uint64_t reg)
Definition: ev5.hh:96
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
void flushCache()
Definition: tlb.hh:126
Stats::Scalar read_hits
Definition: tlb.hh:60
Stats::Scalar fetch_acv
Definition: tlb.hh:58
static Fault checkCacheability(RequestPtr &req, bool itb=false)
Definition: tlb.cc:206
The request is to an uncacheable address.
Definition: request.hh:114
void flushAll() override
Remove all entries from the TLB.
Definition: tlb.cc:283
void serializeSection(CheckpointOut &cp, const char *name) const
Serialize an object into a new section.
Definition: serialize.cc:578
Addr getPaddr() const
Definition: request.hh:519
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: tlb.cc:348
Stats::Scalar write_hits
Definition: tlb.hh:64
TlbEntry(Addr asn, Addr _vaddr, Addr _paddr, bool uncacheable, bool read_only)
Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
Definition: tlb.cc:626
void nextnlu()
Definition: tlb.hh:80
static const ArchFlagsType VPTE
The request is an ALPHA VPTE pal access (hw_ld).
Definition: types.hh:63
Addr vpn() const
Definition: pagetable.hh:52
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Declaration of IniFile object.
#define ULL(N)
uint64_t constant
Definition: types.hh:50
void translateTiming(RequestPtr req, ThreadContext *tc, Translation *translation, Mode mode)
Definition: tlb.cc:611
TlbEntry * updateCache(TlbEntry *entry)
Definition: tlb.hh:132
Stats::Formula data_hits
Definition: tlb.hh:68
Stats::Scalar read_accesses
Definition: tlb.hh:63
void flushAddr(Addr addr, uint8_t asn)
Definition: tlb.cc:318
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
Stats::Scalar write_accesses
Definition: tlb.hh:67
Flags getFlags()
Accessor for flags.
Definition: request.hh:584
Mode
Definition: tlb.hh:61
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:254
virtual ~TLB()
Definition: tlb.cc:72
const uint64_t MM_STAT_FONW_MASK
Definition: ev5.hh:101
const Addr PAddrUncachedBit43
Definition: ev5.hh:57
int size()
Definition: pagetable.hh:146
virtual const std::string name() const
Definition: sim_object.hh:117
static bool validVirtualAddress(Addr vaddr)
Definition: tlb.hh:110
std::ostream CheckpointOut
Definition: serialize.hh:67
TlbEntry * lookup(Addr vpn, uint8_t asn)
Definition: tlb.cc:164
Fault translateInst(RequestPtr req, ThreadContext *tc)
Definition: tlb.cc:375
Addr getVaddr() const
Definition: request.hh:616
Stats::Scalar read_misses
Definition: tlb.hh:61
TlbEntry * EntryCache[3]
Definition: tlb.hh:124
void insert(Addr vaddr, TlbEntry &entry)
Definition: tlb.cc:248
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:124
const uint64_t MM_STAT_ACV_MASK
Definition: ev5.hh:103
const Addr PAddrUncachedMask
Definition: ev5.hh:58
Stats::Formula data_accesses
Definition: tlb.hh:71
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:287
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
translateFunctional stub function for future CheckerCPU support
Definition: tlb.cc:619
unsigned getSize() const
Definition: request.hh:552
uint64_t ICM_CM(uint64_t reg)
Definition: ev5.hh:97
void setPaddr(Addr paddr)
Set just the physical address.
Definition: request.hh:487
Bitfield< 0 > p
virtual void finish(const Fault &fault, RequestPtr req, ThreadContext *tc, Mode mode)=0
void setFlags(Flags flags)
Note that unlike other accessors, this function sets specific flags (ORs them in); it does not assign...
Definition: request.hh:595
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
PageTable lookupTable
Definition: tlb.hh:75
const uint64_t MM_STAT_FONR_MASK
Definition: ev5.hh:102
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
Definition: tlb.cc:602
void regStats() override
Register statistics for this object.
Definition: tlb.cc:77

Generated on Fri Jun 9 2017 13:03:36 for gem5 by doxygen 1.8.6