gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2013, 2016-2017 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2001-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Authors: Ali Saidi
41  * Nathan Binkert
42  * Steve Reinhardt
43  */
44 
45 #include "arch/arm/tlb.hh"
46 
47 #include <memory>
48 #include <string>
49 #include <vector>
50 
51 #include "arch/arm/faults.hh"
52 #include "arch/arm/pagetable.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/system.hh"
56 #include "arch/arm/table_walker.hh"
57 #include "arch/arm/utility.hh"
59 #include "base/inifile.hh"
60 #include "base/str.hh"
61 #include "base/trace.hh"
62 #include "cpu/base.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Checkpoint.hh"
65 #include "debug/TLB.hh"
66 #include "debug/TLBVerbose.hh"
67 #include "mem/page_table.hh"
68 #include "mem/request.hh"
69 #include "params/ArmTLB.hh"
70 #include "sim/full_system.hh"
71 #include "sim/process.hh"
72 
73 using namespace std;
74 using namespace ArmISA;
75 
76 TLB::TLB(const ArmTLBParams *p)
77  : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78  isStage2(p->is_stage2), stage2Req(false), _attr(0),
79  directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80  stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81  aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82  isHyp(false), asid(0), vmid(0), dacr(0),
83  miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84 {
85  const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86 
87  tableWalker->setTlb(this);
88 
89  // Cache system-level properties
93 
94  if (sys)
95  m5opRange = sys->m5opRange();
96 }
97 
99 {
100  delete[] table;
101 }
102 
103 void
105 {
106  if (stage2Mmu && !isStage2)
108 }
109 
110 void
112 {
113  stage2Mmu = m;
114  tableWalker->setMMU(m, master_id);
115 }
116 
117 bool
119 {
120  updateMiscReg(tc);
121 
122  if (directToStage2) {
123  assert(stage2Tlb);
124  return stage2Tlb->translateFunctional(tc, va, pa);
125  }
126 
127  TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128  aarch64 ? aarch64EL : EL1);
129  if (!e)
130  return false;
131  pa = e->pAddr(va);
132  return true;
133 }
134 
135 Fault
137 {
138  const Addr paddr = req->getPaddr();
139 
140  if (m5opRange.contains(paddr)) {
143  (paddr >> 8) & 0xFF,
144  paddr & 0xFF));
145  }
146 
147  return NoFault;
148 }
149 
150 TlbEntry*
151 TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
152  bool functional, bool ignore_asn, uint8_t target_el)
153 {
154 
155  TlbEntry *retval = NULL;
156 
157  // Maintaining LRU array
158  int x = 0;
159  while (retval == NULL && x < size) {
160  if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
161  target_el)) ||
162  (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
163  // We only move the hit entry ahead when the position is higher
164  // than rangeMRU
165  if (x > rangeMRU && !functional) {
166  TlbEntry tmp_entry = table[x];
167  for (int i = x; i > 0; i--)
168  table[i] = table[i - 1];
169  table[0] = tmp_entry;
170  retval = &table[0];
171  } else {
172  retval = &table[x];
173  }
174  break;
175  }
176  ++x;
177  }
178 
179  DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
180  "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
181  "el: %d\n",
182  va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
183  retval ? retval->pfn : 0, retval ? retval->size : 0,
184  retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
185  retval ? retval->ns : 0, retval ? retval->nstid : 0,
186  retval ? retval->global : 0, retval ? retval->asid : 0,
187  retval ? retval->el : 0);
188 
189  return retval;
190 }
191 
192 // insert a new TLB entry
193 void
195 {
196  DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
197  " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
198  " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
199  entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
200  entry.global, entry.valid, entry.nonCacheable, entry.xn,
201  entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
202  entry.isHyp);
203 
204  if (table[size - 1].valid)
205  DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
206  "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
207  table[size-1].vpn << table[size-1].N, table[size-1].asid,
208  table[size-1].vmid, table[size-1].pfn << table[size-1].N,
209  table[size-1].size, table[size-1].ap, table[size-1].ns,
210  table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
211  table[size-1].el);
212 
213  //inserting to MRU position and evicting the LRU one
214 
215  for (int i = size - 1; i > 0; --i)
216  table[i] = table[i-1];
217  table[0] = entry;
218 
219  inserts++;
220  ppRefills->notify(1);
221 }
222 
223 void
225 {
226  int x = 0;
227  TlbEntry *te;
228  DPRINTF(TLB, "Current TLB contents:\n");
229  while (x < size) {
230  te = &table[x];
231  if (te->valid)
232  DPRINTF(TLB, " * %s\n", te->print());
233  ++x;
234  }
235 }
236 
237 void
238 TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
239 {
240  DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
241  (secure_lookup ? "secure" : "non-secure"));
242  int x = 0;
243  TlbEntry *te;
244  while (x < size) {
245  te = &table[x];
246  if (te->valid && secure_lookup == !te->nstid &&
247  (te->vmid == vmid || secure_lookup) &&
248  checkELMatch(target_el, te->el, ignore_el)) {
249 
250  DPRINTF(TLB, " - %s\n", te->print());
251  te->valid = false;
252  flushedEntries++;
253  }
254  ++x;
255  }
256 
257  flushTlb++;
258 
259  // If there's a second stage TLB (and we're not it) then flush it as well
260  // if we're currently in hyp mode
261  if (!isStage2 && isHyp) {
262  stage2Tlb->flushAllSecurity(secure_lookup, true);
263  }
264 }
265 
266 void
267 TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
268 {
269  DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
270  (hyp ? "hyp" : "non-hyp"));
271  int x = 0;
272  TlbEntry *te;
273  while (x < size) {
274  te = &table[x];
275  if (te->valid && te->nstid && te->isHyp == hyp &&
276  checkELMatch(target_el, te->el, ignore_el)) {
277 
278  DPRINTF(TLB, " - %s\n", te->print());
279  flushedEntries++;
280  te->valid = false;
281  }
282  ++x;
283  }
284 
285  flushTlb++;
286 
287  // If there's a second stage TLB (and we're not it) then flush it as well
288  if (!isStage2 && !hyp) {
289  stage2Tlb->flushAllNs(false, true);
290  }
291 }
292 
293 void
294 TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
295 {
296  DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
297  "(%s lookup)\n", mva, asn, (secure_lookup ?
298  "secure" : "non-secure"));
299  _flushMva(mva, asn, secure_lookup, false, false, target_el);
300  flushTlbMvaAsid++;
301 }
302 
303 void
304 TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
305 {
306  DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
307  (secure_lookup ? "secure" : "non-secure"));
308 
309  int x = 0 ;
310  TlbEntry *te;
311 
312  while (x < size) {
313  te = &table[x];
314  if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
315  (te->vmid == vmid || secure_lookup) &&
316  checkELMatch(target_el, te->el, false)) {
317 
318  te->valid = false;
319  DPRINTF(TLB, " - %s\n", te->print());
320  flushedEntries++;
321  }
322  ++x;
323  }
324  flushTlbAsid++;
325 }
326 
327 void
328 TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
329 {
330  DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
331  (secure_lookup ? "secure" : "non-secure"));
332  _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
333  flushTlbMva++;
334 }
335 
336 void
337 TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
338  bool ignore_asn, uint8_t target_el)
339 {
340  TlbEntry *te;
341  // D5.7.2: Sign-extend address to 64 bits
342  mva = sext<56>(mva);
343  te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
344  target_el);
345  while (te != NULL) {
346  if (secure_lookup == !te->nstid) {
347  DPRINTF(TLB, " - %s\n", te->print());
348  te->valid = false;
349  flushedEntries++;
350  }
351  te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
352  target_el);
353  }
354 }
355 
356 void
357 TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
358 {
359  assert(!isStage2);
360  stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
361 }
362 
363 bool
364 TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
365 {
366  bool elMatch = true;
367  if (!ignore_el) {
368  if (target_el == 2 || target_el == 3) {
369  elMatch = (tentry_el == target_el);
370  } else {
371  elMatch = (tentry_el == 0) || (tentry_el == 1);
372  }
373  }
374  return elMatch;
375 }
376 
377 void
379 {
380  // We might have unserialized something or switched CPUs, so make
381  // sure to re-read the misc regs.
382  miscRegValid = false;
383 }
384 
385 void
387 {
388  TLB *otlb = dynamic_cast<TLB*>(_otlb);
389  /* Make sure we actually have a valid type */
390  if (otlb) {
391  _attr = otlb->_attr;
392  haveLPAE = otlb->haveLPAE;
394  stage2Req = otlb->stage2Req;
395 
396  /* Sync the stage2 MMU if they exist in both
397  * the old CPU and the new
398  */
399  if (!isStage2 &&
400  stage2Tlb && otlb->stage2Tlb) {
402  }
403  } else {
404  panic("Incompatible TLB type!");
405  }
406 }
407 
408 void
410 {
411  DPRINTF(Checkpoint, "Serializing Arm TLB\n");
412 
417 
418  int num_entries = size;
419  SERIALIZE_SCALAR(num_entries);
420  for (int i = 0; i < size; i++)
421  table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
422 }
423 
424 void
426 {
427  DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
428 
433 
434  int num_entries;
435  UNSERIALIZE_SCALAR(num_entries);
436  for (int i = 0; i < min(size, num_entries); i++)
437  table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
438 }
439 
440 void
442 {
444  instHits
445  .name(name() + ".inst_hits")
446  .desc("ITB inst hits")
447  ;
448 
449  instMisses
450  .name(name() + ".inst_misses")
451  .desc("ITB inst misses")
452  ;
453 
455  .name(name() + ".inst_accesses")
456  .desc("ITB inst accesses")
457  ;
458 
459  readHits
460  .name(name() + ".read_hits")
461  .desc("DTB read hits")
462  ;
463 
464  readMisses
465  .name(name() + ".read_misses")
466  .desc("DTB read misses")
467  ;
468 
470  .name(name() + ".read_accesses")
471  .desc("DTB read accesses")
472  ;
473 
474  writeHits
475  .name(name() + ".write_hits")
476  .desc("DTB write hits")
477  ;
478 
480  .name(name() + ".write_misses")
481  .desc("DTB write misses")
482  ;
483 
485  .name(name() + ".write_accesses")
486  .desc("DTB write accesses")
487  ;
488 
489  hits
490  .name(name() + ".hits")
491  .desc("DTB hits")
492  ;
493 
494  misses
495  .name(name() + ".misses")
496  .desc("DTB misses")
497  ;
498 
499  accesses
500  .name(name() + ".accesses")
501  .desc("DTB accesses")
502  ;
503 
504  flushTlb
505  .name(name() + ".flush_tlb")
506  .desc("Number of times complete TLB was flushed")
507  ;
508 
510  .name(name() + ".flush_tlb_mva")
511  .desc("Number of times TLB was flushed by MVA")
512  ;
513 
515  .name(name() + ".flush_tlb_mva_asid")
516  .desc("Number of times TLB was flushed by MVA & ASID")
517  ;
518 
520  .name(name() + ".flush_tlb_asid")
521  .desc("Number of times TLB was flushed by ASID")
522  ;
523 
525  .name(name() + ".flush_entries")
526  .desc("Number of entries that have been flushed from TLB")
527  ;
528 
530  .name(name() + ".align_faults")
531  .desc("Number of TLB faults due to alignment restrictions")
532  ;
533 
535  .name(name() + ".prefetch_faults")
536  .desc("Number of TLB faults due to prefetch")
537  ;
538 
540  .name(name() + ".domain_faults")
541  .desc("Number of TLB faults due to domain restrictions")
542  ;
543 
545  .name(name() + ".perms_faults")
546  .desc("Number of TLB faults due to permissions restrictions")
547  ;
548 
553  misses = readMisses + writeMisses + instMisses;
555 }
556 
557 void
559 {
560  ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
561 }
562 
563 Fault
565  Translation *translation, bool &delay, bool timing)
566 {
567  updateMiscReg(tc);
568  Addr vaddr_tainted = req->getVaddr();
569  Addr vaddr = 0;
570  if (aarch64)
571  vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
572  else
573  vaddr = vaddr_tainted;
574  Request::Flags flags = req->getFlags();
575 
576  bool is_fetch = (mode == Execute);
577  bool is_write = (mode == Write);
578 
579  if (!is_fetch) {
580  assert(flags & MustBeOne);
581  if (sctlr.a || !(flags & AllowUnaligned)) {
582  if (vaddr & mask(flags & AlignmentMask)) {
583  // LPAE is always disabled in SE mode
584  return std::make_shared<DataAbort>(
585  vaddr_tainted,
589  }
590  }
591  }
592 
593  Addr paddr;
594  Process *p = tc->getProcessPtr();
595 
596  if (!p->pTable->translate(vaddr, paddr))
597  return std::make_shared<GenericPageTableFault>(vaddr_tainted);
598  req->setPaddr(paddr);
599 
600  return finalizePhysical(req, tc, mode);
601 }
602 
603 Fault
605 {
606  Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
607  Request::Flags flags = req->getFlags();
608  bool is_fetch = (mode == Execute);
609  bool is_write = (mode == Write);
610  bool is_priv = isPriv && !(flags & UserMode);
611 
612  // Get the translation type from the actuall table entry
615 
616  // If this is the second stage of translation and the request is for a
617  // stage 1 page table walk then we need to check the HCR.PTW bit. This
618  // allows us to generate a fault if the request targets an area marked
619  // as a device or strongly ordered.
620  if (isStage2 && req->isPTWalk() && hcr.ptw &&
622  return std::make_shared<DataAbort>(
623  vaddr, te->domain, is_write,
625  isStage2, tranMethod);
626  }
627 
628  // Generate an alignment fault for unaligned data accesses to device or
629  // strongly ordered memory
630  if (!is_fetch) {
631  if (te->mtype != TlbEntry::MemoryType::Normal) {
632  if (vaddr & mask(flags & AlignmentMask)) {
633  alignFaults++;
634  return std::make_shared<DataAbort>(
637  tranMethod);
638  }
639  }
640  }
641 
642  if (te->nonCacheable) {
643  // Prevent prefetching from I/O devices.
644  if (req->isPrefetch()) {
645  // Here we can safely use the fault status for the short
646  // desc. format in all cases
647  return std::make_shared<PrefetchAbort>(
649  isStage2, tranMethod);
650  }
651  }
652 
653  if (!te->longDescFormat) {
654  switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
655  case 0:
656  domainFaults++;
657  DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
658  " domain: %#x write:%d\n", dacr,
659  static_cast<uint8_t>(te->domain), is_write);
660  if (is_fetch) {
661  // Use PC value instead of vaddr because vaddr might
662  // be aligned to cache line and should not be the
663  // address reported in FAR
664  return std::make_shared<PrefetchAbort>(
665  req->getPC(),
667  isStage2, tranMethod);
668  } else
669  return std::make_shared<DataAbort>(
670  vaddr, te->domain, is_write,
672  isStage2, tranMethod);
673  case 1:
674  // Continue with permissions check
675  break;
676  case 2:
677  panic("UNPRED domain\n");
678  case 3:
679  return NoFault;
680  }
681  }
682 
683  // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
684  uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
685  uint8_t hap = te->hap;
686 
687  if (sctlr.afe == 1 || te->longDescFormat)
688  ap |= 1;
689 
690  bool abt;
691  bool isWritable = true;
692  // If this is a stage 2 access (eg for reading stage 1 page table entries)
693  // then don't perform the AP permissions check, we stil do the HAP check
694  // below.
695  if (isStage2) {
696  abt = false;
697  } else {
698  switch (ap) {
699  case 0:
700  DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
701  (int)sctlr.rs);
702  if (!sctlr.xp) {
703  switch ((int)sctlr.rs) {
704  case 2:
705  abt = is_write;
706  break;
707  case 1:
708  abt = is_write || !is_priv;
709  break;
710  case 0:
711  case 3:
712  default:
713  abt = true;
714  break;
715  }
716  } else {
717  abt = true;
718  }
719  break;
720  case 1:
721  abt = !is_priv;
722  break;
723  case 2:
724  abt = !is_priv && is_write;
725  isWritable = is_priv;
726  break;
727  case 3:
728  abt = false;
729  break;
730  case 4:
731  panic("UNPRED premissions\n");
732  case 5:
733  abt = !is_priv || is_write;
734  isWritable = false;
735  break;
736  case 6:
737  case 7:
738  abt = is_write;
739  isWritable = false;
740  break;
741  default:
742  panic("Unknown permissions %#x\n", ap);
743  }
744  }
745 
746  bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
747  bool xn = te->xn || (isWritable && sctlr.wxn) ||
748  (ap == 3 && sctlr.uwxn && is_priv);
749  if (is_fetch && (abt || xn ||
750  (te->longDescFormat && te->pxn && is_priv) ||
751  (isSecure && te->ns && scr.sif))) {
752  permsFaults++;
753  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
754  "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
755  ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
756  // Use PC value instead of vaddr because vaddr might be aligned to
757  // cache line and should not be the address reported in FAR
758  return std::make_shared<PrefetchAbort>(
759  req->getPC(),
761  isStage2, tranMethod);
762  } else if (abt | hapAbt) {
763  permsFaults++;
764  DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
765  " write:%d\n", ap, is_priv, is_write);
766  return std::make_shared<DataAbort>(
767  vaddr, te->domain, is_write,
769  isStage2 | !abt, tranMethod);
770  }
771  return NoFault;
772 }
773 
774 
775 Fault
777  ThreadContext *tc)
778 {
779  assert(aarch64);
780 
781  Addr vaddr_tainted = req->getVaddr();
782  Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
783 
784  Request::Flags flags = req->getFlags();
785  bool is_fetch = (mode == Execute);
786  bool is_write = (mode == Write);
787  bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
788 
790 
791  // If this is the second stage of translation and the request is for a
792  // stage 1 page table walk then we need to check the HCR.PTW bit. This
793  // allows us to generate a fault if the request targets an area marked
794  // as a device or strongly ordered.
795  if (isStage2 && req->isPTWalk() && hcr.ptw &&
797  return std::make_shared<DataAbort>(
798  vaddr_tainted, te->domain, is_write,
801  }
802 
803  // Generate an alignment fault for unaligned accesses to device or
804  // strongly ordered memory
805  if (!is_fetch) {
806  if (te->mtype != TlbEntry::MemoryType::Normal) {
807  if (vaddr & mask(flags & AlignmentMask)) {
808  alignFaults++;
809  return std::make_shared<DataAbort>(
810  vaddr_tainted,
814  }
815  }
816  }
817 
818  if (te->nonCacheable) {
819  // Prevent prefetching from I/O devices.
820  if (req->isPrefetch()) {
821  // Here we can safely use the fault status for the short
822  // desc. format in all cases
823  return std::make_shared<PrefetchAbort>(
824  vaddr_tainted,
827  }
828  }
829 
830  uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
831  bool grant = false;
832 
833  uint8_t xn = te->xn;
834  uint8_t pxn = te->pxn;
835  bool r = !is_write && !is_fetch;
836  bool w = is_write;
837  bool x = is_fetch;
838  DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
839  "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
840 
841  if (isStage2) {
842  assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
843  // In stage 2 we use the hypervisor access permission bits.
844  // The following permissions are described in ARM DDI 0487A.f
845  // D4-1802
846  uint8_t hap = 0x3 & te->hap;
847  if (is_fetch) {
848  // sctlr.wxn overrides the xn bit
849  grant = !sctlr.wxn && !xn;
850  } else if (is_write) {
851  grant = hap & 0x2;
852  } else { // is_read
853  grant = hap & 0x1;
854  }
855  } else {
856  switch (aarch64EL) {
857  case EL0:
858  {
859  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
860  switch (perm) {
861  case 0:
862  case 1:
863  case 8:
864  case 9:
865  grant = x;
866  break;
867  case 4:
868  case 5:
869  grant = r || w || (x && !sctlr.wxn);
870  break;
871  case 6:
872  case 7:
873  grant = r || w;
874  break;
875  case 12:
876  case 13:
877  grant = r || x;
878  break;
879  case 14:
880  case 15:
881  grant = r;
882  break;
883  default:
884  grant = false;
885  }
886  }
887  break;
888  case EL1:
889  {
890  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
891  switch (perm) {
892  case 0:
893  case 2:
894  grant = r || w || (x && !sctlr.wxn);
895  break;
896  case 1:
897  case 3:
898  case 4:
899  case 5:
900  case 6:
901  case 7:
902  // regions that are writeable at EL0 should not be
903  // executable at EL1
904  grant = r || w;
905  break;
906  case 8:
907  case 10:
908  case 12:
909  case 14:
910  grant = r || x;
911  break;
912  case 9:
913  case 11:
914  case 13:
915  case 15:
916  grant = r;
917  break;
918  default:
919  grant = false;
920  }
921  }
922  break;
923  case EL2:
924  case EL3:
925  {
926  uint8_t perm = (ap & 0x2) | xn;
927  switch (perm) {
928  case 0:
929  grant = r || w || (x && !sctlr.wxn) ;
930  break;
931  case 1:
932  grant = r || w;
933  break;
934  case 2:
935  grant = r || x;
936  break;
937  case 3:
938  grant = r;
939  break;
940  default:
941  grant = false;
942  }
943  }
944  break;
945  }
946  }
947 
948  if (!grant) {
949  if (is_fetch) {
950  permsFaults++;
951  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
952  "AP:%d priv:%d write:%d ns:%d sif:%d "
953  "sctlr.afe: %d\n",
954  ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
955  // Use PC value instead of vaddr because vaddr might be aligned to
956  // cache line and should not be the address reported in FAR
957  return std::make_shared<PrefetchAbort>(
958  req->getPC(),
961  } else {
962  permsFaults++;
963  DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
964  "priv:%d write:%d\n", ap, is_priv, is_write);
965  return std::make_shared<DataAbort>(
966  vaddr_tainted, te->domain, is_write,
969  }
970  }
971 
972  return NoFault;
973 }
974 
975 Fault
977  Translation *translation, bool &delay, bool timing,
978  TLB::ArmTranslationType tranType, bool functional)
979 {
980  // No such thing as a functional timing access
981  assert(!(timing && functional));
982 
983  updateMiscReg(tc, tranType);
984 
985  Addr vaddr_tainted = req->getVaddr();
986  Addr vaddr = 0;
987  if (aarch64)
988  vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
989  else
990  vaddr = vaddr_tainted;
991  Request::Flags flags = req->getFlags();
992 
993  bool is_fetch = (mode == Execute);
994  bool is_write = (mode == Write);
995  bool long_desc_format = aarch64 || longDescFormatInUse(tc);
996  ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
998 
999  req->setAsid(asid);
1000 
1001  DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1002  isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1003 
1004  DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1005  "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1006  scr, sctlr, flags, tranType);
1007 
1008  if ((req->isInstFetch() && (!sctlr.i)) ||
1009  ((!req->isInstFetch()) && (!sctlr.c))){
1011  }
1012  if (!is_fetch) {
1013  assert(flags & MustBeOne);
1014  if (sctlr.a || !(flags & AllowUnaligned)) {
1015  if (vaddr & mask(flags & AlignmentMask)) {
1016  alignFaults++;
1017  return std::make_shared<DataAbort>(
1018  vaddr_tainted,
1021  tranMethod);
1022  }
1023  }
1024  }
1025 
1026  // If guest MMU is off or hcr.vm=0 go straight to stage2
1027  if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1028 
1029  req->setPaddr(vaddr);
1030  // When the MMU is off the security attribute corresponds to the
1031  // security state of the processor
1032  if (isSecure)
1033  req->setFlags(Request::SECURE);
1034 
1035  // @todo: double check this (ARM ARM issue C B3.2.1)
1036  if (long_desc_format || sctlr.tre == 0) {
1038  } else {
1039  if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1041  }
1042 
1043  // Set memory attributes
1044  TlbEntry temp_te;
1045  temp_te.ns = !isSecure;
1046  if (isStage2 || hcr.dc == 0 || isSecure ||
1047  (isHyp && !(tranType & S1CTran))) {
1048 
1049  temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1051  temp_te.innerAttrs = 0x0;
1052  temp_te.outerAttrs = 0x0;
1053  temp_te.shareable = true;
1054  temp_te.outerShareable = true;
1055  } else {
1057  temp_te.innerAttrs = 0x3;
1058  temp_te.outerAttrs = 0x3;
1059  temp_te.shareable = false;
1060  temp_te.outerShareable = false;
1061  }
1062  temp_te.setAttributes(long_desc_format);
1063  DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1064  "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1065  temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1066  isStage2);
1067  setAttr(temp_te.attributes);
1068 
1070  }
1071 
1072  DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1073  isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1074  // Translation enabled
1075 
1076  TlbEntry *te = NULL;
1077  TlbEntry mergeTe;
1078  Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1079  functional, &mergeTe);
1080  // only proceed if we have a valid table entry
1081  if ((te == NULL) && (fault == NoFault)) delay = true;
1082 
1083  // If we have the table entry transfer some of the attributes to the
1084  // request that triggered the translation
1085  if (te != NULL) {
1086  // Set memory attributes
1087  DPRINTF(TLBVerbose,
1088  "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1089  "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1090  te->shareable, te->innerAttrs, te->outerAttrs,
1091  static_cast<uint8_t>(te->mtype), isStage2);
1092  setAttr(te->attributes);
1093 
1094  if (te->nonCacheable)
1096 
1097  // Require requests to be ordered if the request goes to
1098  // strongly ordered or device memory (i.e., anything other
1099  // than normal memory requires strict order).
1102 
1103  Addr pa = te->pAddr(vaddr);
1104  req->setPaddr(pa);
1105 
1106  if (isSecure && !te->ns) {
1107  req->setFlags(Request::SECURE);
1108  }
1109  if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1111  // Unaligned accesses to Device memory should always cause an
1112  // abort regardless of sctlr.a
1113  alignFaults++;
1114  return std::make_shared<DataAbort>(
1115  vaddr_tainted,
1118  tranMethod);
1119  }
1120 
1121  // Check for a trickbox generated address fault
1122  if (fault == NoFault)
1123  fault = testTranslation(req, mode, te->domain);
1124  }
1125 
1126  if (fault == NoFault) {
1127  // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1128  if (aarch64 && is_fetch && cpsr.il == 1) {
1129  return std::make_shared<IllegalInstSetStateFault>();
1130  }
1131 
1132  // Don't try to finalize a physical address unless the
1133  // translation has completed (i.e., there is a table entry).
1134  return te ? finalizePhysical(req, tc, mode) : NoFault;
1135  } else {
1136  return fault;
1137  }
1138 }
1139 
1140 Fault
1142  TLB::ArmTranslationType tranType)
1143 {
1144  updateMiscReg(tc, tranType);
1145 
1146  if (directToStage2) {
1147  assert(stage2Tlb);
1148  return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1149  }
1150 
1151  bool delay = false;
1152  Fault fault;
1153  if (FullSystem)
1154  fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1155  else
1156  fault = translateSe(req, tc, mode, NULL, delay, false);
1157  assert(!delay);
1158  return fault;
1159 }
1160 
1161 Fault
1163  TLB::ArmTranslationType tranType)
1164 {
1165  updateMiscReg(tc, tranType);
1166 
1167  if (directToStage2) {
1168  assert(stage2Tlb);
1169  return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1170  }
1171 
1172  bool delay = false;
1173  Fault fault;
1174  if (FullSystem)
1175  fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1176  else
1177  fault = translateSe(req, tc, mode, NULL, delay, false);
1178  assert(!delay);
1179  return fault;
1180 }
1181 
1182 Fault
1184  Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1185 {
1186  updateMiscReg(tc, tranType);
1187 
1188  if (directToStage2) {
1189  assert(stage2Tlb);
1190  return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1191  }
1192 
1193  assert(translation);
1194 
1195  return translateComplete(req, tc, translation, mode, tranType, isStage2);
1196 }
1197 
1198 Fault
1200  Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1201  bool callFromS2)
1202 {
1203  bool delay = false;
1204  Fault fault;
1205  if (FullSystem)
1206  fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1207  else
1208  fault = translateSe(req, tc, mode, translation, delay, true);
1209  DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1210  NoFault);
1211  // If we have a translation, and we're not in the middle of doing a stage
1212  // 2 translation tell the translation that we've either finished or its
1213  // going to take a while. By not doing this when we're in the middle of a
1214  // stage 2 translation we prevent marking the translation as delayed twice,
1215  // one when the translation starts and again when the stage 1 translation
1216  // completes.
1217  if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1218  if (!delay)
1219  translation->finish(fault, req, tc, mode);
1220  else
1221  translation->markDelayed();
1222  }
1223  return fault;
1224 }
1225 
1228 {
1229  return &stage2Mmu->getPort();
1230 }
1231 
1232 void
1234 {
1235  // check if the regs have changed, or the translation mode is different.
1236  // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1237  // one type of translation anyway
1238  if (miscRegValid && miscRegContext == tc->contextId() &&
1239  ((tranType == curTranType) || isStage2)) {
1240  return;
1241  }
1242 
1243  DPRINTF(TLBVerbose, "TLB variables changed!\n");
1244  cpsr = tc->readMiscReg(MISCREG_CPSR);
1245 
1246  // Dependencies: SCR/SCR_EL3, CPSR
1247  isSecure = inSecureState(tc) &&
1248  !(tranType & HypMode) && !(tranType & S1S2NsTran);
1249 
1250  const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode;
1251  aarch64 = opModeIs64(op_mode) ||
1252  (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1));
1253 
1254  if (aarch64) { // AArch64
1255  // determine EL we need to translate in
1256  switch (tranType) {
1257  case S1E0Tran:
1258  case S12E0Tran:
1259  aarch64EL = EL0;
1260  break;
1261  case S1E1Tran:
1262  case S12E1Tran:
1263  aarch64EL = EL1;
1264  break;
1265  case S1E2Tran:
1266  aarch64EL = EL2;
1267  break;
1268  case S1E3Tran:
1269  aarch64EL = EL3;
1270  break;
1271  case NormalTran:
1272  case S1CTran:
1273  case S1S2NsTran:
1274  case HypMode:
1275  aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1276  break;
1277  }
1278 
1279  switch (aarch64EL) {
1280  case EL0:
1281  case EL1:
1282  {
1285  uint64_t ttbr_asid = ttbcr.a1 ?
1288  asid = bits(ttbr_asid,
1289  (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1290  }
1291  break;
1292  case EL2:
1295  asid = -1;
1296  break;
1297  case EL3:
1300  asid = -1;
1301  break;
1302  }
1305  isPriv = aarch64EL != EL0;
1306  if (haveVirtualization) {
1307  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1308  isHyp = tranType & HypMode;
1309  isHyp &= (tranType & S1S2NsTran) == 0;
1310  isHyp &= (tranType & S1CTran) == 0;
1311  // Work out if we should skip the first stage of translation and go
1312  // directly to stage 2. This value is cached so we don't have to
1313  // compute it for every translation.
1314  stage2Req = isStage2 ||
1315  (hcr.vm && !isHyp && !isSecure &&
1316  !(tranType & S1CTran) && (aarch64EL < EL2) &&
1317  !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1318  directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1319  } else {
1320  vmid = 0;
1321  isHyp = false;
1322  directToStage2 = false;
1323  stage2Req = false;
1324  }
1325  } else { // AArch32
1327  !isSecure));
1329  !isSecure));
1330  scr = tc->readMiscReg(MISCREG_SCR);
1331  isPriv = cpsr.mode != MODE_USER;
1332  if (longDescFormatInUse(tc)) {
1333  uint64_t ttbr_asid = tc->readMiscReg(
1335  : MISCREG_TTBR0,
1336  tc, !isSecure));
1337  asid = bits(ttbr_asid, 55, 48);
1338  } else { // Short-descriptor translation table format in use
1339  CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1341  asid = context_id.asid;
1342  }
1344  !isSecure));
1346  !isSecure));
1348  !isSecure));
1349  hcr = tc->readMiscReg(MISCREG_HCR);
1350 
1351  if (haveVirtualization) {
1352  vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1353  isHyp = cpsr.mode == MODE_HYP;
1354  isHyp |= tranType & HypMode;
1355  isHyp &= (tranType & S1S2NsTran) == 0;
1356  isHyp &= (tranType & S1CTran) == 0;
1357  if (isHyp) {
1359  }
1360  // Work out if we should skip the first stage of translation and go
1361  // directly to stage 2. This value is cached so we don't have to
1362  // compute it for every translation.
1363  stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1364  !(tranType & S1CTran);
1365  directToStage2 = stage2Req && !sctlr.m;
1366  } else {
1367  vmid = 0;
1368  stage2Req = false;
1369  isHyp = false;
1370  directToStage2 = false;
1371  }
1372  }
1373  miscRegValid = true;
1374  miscRegContext = tc->contextId();
1375  curTranType = tranType;
1376 }
1377 
1378 Fault
1380  Translation *translation, bool timing, bool functional,
1381  bool is_secure, TLB::ArmTranslationType tranType)
1382 {
1383  bool is_fetch = (mode == Execute);
1384  bool is_write = (mode == Write);
1385 
1386  Addr vaddr_tainted = req->getVaddr();
1387  Addr vaddr = 0;
1388  ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1389  if (aarch64) {
1390  vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1391  } else {
1392  vaddr = vaddr_tainted;
1393  }
1394  *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1395  if (*te == NULL) {
1396  if (req->isPrefetch()) {
1397  // if the request is a prefetch don't attempt to fill the TLB or go
1398  // any further with the memory access (here we can safely use the
1399  // fault status for the short desc. format in all cases)
1400  prefetchFaults++;
1401  return std::make_shared<PrefetchAbort>(
1402  vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1403  }
1404 
1405  if (is_fetch)
1406  instMisses++;
1407  else if (is_write)
1408  writeMisses++;
1409  else
1410  readMisses++;
1411 
1412  // start translation table walk, pass variables rather than
1413  // re-retreaving in table walker for speed
1414  DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1415  vaddr_tainted, asid, vmid);
1416  Fault fault;
1417  fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1418  translation, timing, functional, is_secure,
1419  tranType, stage2Req);
1420  // for timing mode, return and wait for table walk,
1421  if (timing || fault != NoFault) {
1422  return fault;
1423  }
1424 
1425  *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1426  if (!*te)
1427  printTlb();
1428  assert(*te);
1429  } else {
1430  if (is_fetch)
1431  instHits++;
1432  else if (is_write)
1433  writeHits++;
1434  else
1435  readHits++;
1436  }
1437  return NoFault;
1438 }
1439 
1440 Fault
1442  Translation *translation, bool timing, bool functional,
1443  TlbEntry *mergeTe)
1444 {
1445  Fault fault;
1446 
1447  if (isStage2) {
1448  // We are already in the stage 2 TLB. Grab the table entry for stage
1449  // 2 only. We are here because stage 1 translation is disabled.
1450  TlbEntry *s2Te = NULL;
1451  // Get the stage 2 table entry
1452  fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1454  // Check permissions of stage 2
1455  if ((s2Te != NULL) && (fault = NoFault)) {
1456  if(aarch64)
1457  fault = checkPermissions64(s2Te, req, mode, tc);
1458  else
1459  fault = checkPermissions(s2Te, req, mode);
1460  }
1461  *te = s2Te;
1462  return fault;
1463  }
1464 
1465  TlbEntry *s1Te = NULL;
1466 
1467  Addr vaddr_tainted = req->getVaddr();
1468 
1469  // Get the stage 1 table entry
1470  fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1472  // only proceed if we have a valid table entry
1473  if ((s1Te != NULL) && (fault == NoFault)) {
1474  // Check stage 1 permissions before checking stage 2
1475  if (aarch64)
1476  fault = checkPermissions64(s1Te, req, mode, tc);
1477  else
1478  fault = checkPermissions(s1Te, req, mode);
1479  if (stage2Req & (fault == NoFault)) {
1480  Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1481  req, translation, mode, timing, functional, curTranType);
1482  fault = s2Lookup->getTe(tc, mergeTe);
1483  if (s2Lookup->isComplete()) {
1484  *te = mergeTe;
1485  // We've finished with the lookup so delete it
1486  delete s2Lookup;
1487  } else {
1488  // The lookup hasn't completed, so we can't delete it now. We
1489  // get round this by asking the object to self delete when the
1490  // translation is complete.
1491  s2Lookup->setSelfDelete();
1492  }
1493  } else {
1494  // This case deals with an S1 hit (or bypass), followed by
1495  // an S2 hit-but-perms issue
1496  if (isStage2) {
1497  DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1498  vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1499  if (fault != NoFault) {
1500  ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1501  armFault->annotate(ArmFault::S1PTW, false);
1502  armFault->annotate(ArmFault::OVA, vaddr_tainted);
1503  }
1504  }
1505  *te = s1Te;
1506  }
1507  }
1508  return fault;
1509 }
1510 
1511 void
1513 {
1514  if (!_ti) {
1515  test = nullptr;
1516  } else {
1517  TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1518  fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1519  test = ti;
1520  }
1521 }
1522 
1523 Fault
1525 {
1526  if (!test || !req->hasSize() || req->getSize() == 0) {
1527  return NoFault;
1528  } else {
1529  return test->translationCheck(req, isPriv, mode, domain);
1530  }
1531 }
1532 
1533 Fault
1535  TlbEntry::DomainType domain, LookupLevel lookup_level)
1536 {
1537  if (!test) {
1538  return NoFault;
1539  } else {
1540  return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1541  domain, lookup_level);
1542  }
1543 }
1544 
1545 
1546 ArmISA::TLB *
1547 ArmTLBParams::create()
1548 {
1549  return new ArmISA::TLB(this);
1550 }
uint8_t innerAttrs
Definition: pagetable.hh:116
#define DPRINTF(x,...)
Definition: trace.hh:212
Stats::Formula hits
Definition: tlb.hh:178
int size
Definition: tlb.hh:144
ProbePoints::PMUUPtr ppRefills
PMU probe for TLB refills.
Definition: tlb.hh:183
ExceptionLevel aarch64EL
Definition: tlb.hh:374
bool aarch64
Definition: tlb.hh:373
AddrRange m5opRange
Definition: tlb.hh:396
Bitfield< 15 > te
Definition: mt_constants.hh:62
void flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
Remove any entries that match both a va and asn.
Definition: tlb.cc:294
Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
Definition: tlb.cc:136
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
bool checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
Definition: tlb.cc:364
bool isHyp
Definition: tlb.hh:379
Fault translateFs(RequestPtr req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tranType, bool functional=false)
Definition: tlb.cc:976
virtual ~TLB()
Definition: tlb.cc:98
Bitfield< 7 > i
Definition: miscregs.hh:1378
bool directToStage2
Definition: tlb.hh:148
TLB * stage2Tlb
Definition: tlb.hh:152
virtual Fault translationCheck(RequestPtr req, bool is_priv, BaseTLB::Mode mode, TlbEntry::DomainType domain)=0
Check if a TLB translation should be forced to fail.
Bitfield< 0 > m
Definition: miscregs.hh:1577
#define panic(...)
Definition: misc.hh:153
void setAsid(int asid)
Accessor function for asid.
Definition: request.hh:650
TTBCR ttbcr
Definition: tlb.hh:380
void unserializeSection(CheckpointIn &cp, const char *name)
Unserialize an a child object.
Definition: serialize.cc:585
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
void flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el=false)
Reset the entire TLB.
Definition: tlb.cc:238
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
bool hasSize() const
Accessor for size.
Definition: request.hh:546
ip6_addr_t addr
Definition: inet.hh:335
bool stage2Req
Definition: tlb.hh:146
bool isSecure
Definition: tlb.hh:378
DmaPort & getPort()
Get the port that ultimately belongs to the stage-two MMU, but is used by the two table walkers...
Definition: stage2_mmu.hh:113
OperatingMode
Definition: types.hh:569
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:146
virtual Process * getProcessPtr()=0
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: tlb.cc:425
Addr getPC() const
Accessor function for pc.
Definition: request.hh:715
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:328
void regStats() override
Register statistics for this object.
Definition: tlb.cc:441
Bitfield< 30 > ti
Stats::Scalar prefetchFaults
Definition: tlb.hh:171
bool global
Definition: pagetable.hh:120
MemoryType mtype
Definition: pagetable.hh:122
Bitfield< 4, 0 > mode
Definition: miscregs.hh:1385
virtual void regStats()
Register statistics for this object.
Definition: sim_object.cc:105
bool isPrefetch() const
Definition: request.hh:770
bool haveVirtualization() const
void drainResume() override
Resume execution after a successful drain.
Definition: tlb.cc:378
ThreadContext is the external interface to all thread state for anything outside of the CPU...
bool isPTWalk() const
Definition: request.hh:778
ExceptionLevel
Definition: types.hh:562
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level)
Definition: tlb.cc:1534
Stats::Formula writeAccesses
Definition: tlb.hh:176
bool isPriv
Definition: tlb.hh:377
TableWalker * tableWalker
Definition: tlb.hh:151
void _flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp, bool ignore_asn, uint8_t target_el)
Remove any entries that match both a va and asn.
Definition: tlb.cc:337
Stats::Scalar flushedEntries
Definition: tlb.hh:169
bool isComplete() const
Stats::Scalar readMisses
Definition: tlb.hh:161
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:228
Definition: tlb.hh:53
Addr iprAddressPseudoInst(uint8_t func, uint8_t subfunc)
Generate a generic IPR address that emulates a pseudo inst.
Definition: mmapped_ipr.hh:84
Stats::Formula misses
Definition: tlb.hh:179
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
Bitfield< 0 > ns
Definition: miscregs.hh:1521
bool miscRegValid
Definition: tlb.hh:387
Fault translateComplete(RequestPtr req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tranType, bool callFromS2)
Definition: tlb.cc:1199
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
Bitfield< 3, 2 > el
Definition: miscregs.hh:1384
HCR hcr
Definition: tlb.hh:385
Stats::Scalar permsFaults
Definition: tlb.hh:173
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
int rangeMRU
Definition: tlb.hh:185
Stats::Scalar writeMisses
Definition: tlb.hh:163
void flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
Remove all entries that match the va regardless of asn.
Definition: tlb.cc:328
bool haveVirtualization
Definition: tlb.hh:393
Stats::Scalar instMisses
Definition: tlb.hh:159
bool translate(Addr vaddr, Addr &paddr)
Translate function.
Definition: page_table.cc:173
Fault translateTiming(RequestPtr req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tranType=NormalTran)
Definition: tlb.cc:1183
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:189
uint16_t asid
Definition: tlb.hh:381
void printTlb() const
Definition: tlb.cc:224
void setMMU(Stage2MMU *m, MasterID master_id)
Definition: table_walker.cc:99
The request is to an uncacheable address.
Definition: request.hh:114
Addr pAddr(Addr va) const
Definition: pagetable.hh:224
BaseMasterPort * getMasterPort() override
Get the table walker master port.
Definition: tlb.cc:1227
void serializeSection(CheckpointOut &cp, const char *name) const
Serialize an object into a new section.
Definition: serialize.cc:578
Fault getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, bool is_secure, ArmTranslationType tranType)
Definition: tlb.cc:1379
PRRR prrr
Definition: tlb.hh:383
Stats::Scalar inserts
Definition: tlb.hh:164
static ExceptionLevel opModeToEL(OperatingMode mode)
Definition: types.hh:663
TlbTestInterface * test
Definition: tlb.hh:155
Bitfield< 39, 12 > pa
Definition: miscregs.hh:1829
Addr getPaddr() const
Definition: request.hh:519
bool haveLargeAsid64
Definition: tlb.hh:394
Stats::Scalar domainFaults
Definition: tlb.hh:172
void updateMiscReg(ThreadContext *tc, ArmTranslationType tranType=NormalTran)
Definition: tlb.cc:1233
uint32_t dacr
Definition: tlb.hh:386
const AddrRange & m5opRange() const
Range used by memory-mapped m5 pseudo-ops if enabled.
Definition: system.hh:229
TlbEntry * lookup(Addr vpn, uint16_t asn, uint8_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, uint8_t target_el)
Lookup an entry in the TLB.
Definition: tlb.cc:151
bool isInstFetch() const
Definition: request.hh:769
uint64_t attributes
Definition: pagetable.hh:106
Stage2MMU * stage2Mmu
Definition: tlb.hh:153
virtual Fault walkCheck(Addr pa, Addr size, Addr va, bool is_secure, Addr is_priv, BaseTLB::Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level)=0
Check if a page table walker access should be forced to fail.
Fault checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode, ThreadContext *tc)
Definition: tlb.cc:776
bool translateFunctional(ThreadContext *tc, Addr vaddr, Addr &paddr)
Do a functional lookup on the TLB (for debugging) and don't modify any internal state.
Definition: tlb.cc:118
ArmTranslationType
Definition: tlb.hh:124
Bitfield< 0 > w
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Declaration of IniFile object.
uint8_t outerAttrs
Definition: pagetable.hh:117
uint16_t MasterID
Definition: request.hh:85
bool haveLPAE
Definition: tlb.hh:392
Stats::Scalar writeHits
Definition: tlb.hh:162
Bitfield< 8 > va
Definition: miscregs.hh:1473
Stats::Scalar flushTlbMva
Definition: tlb.hh:166
Bitfield< 34 > aarch64
Definition: types.hh:86
void init() override
setup all the back pointers
Definition: tlb.cc:104
PageTableBase * pTable
Definition: process.hh:178
Stats::Scalar instHits
Definition: tlb.hh:158
Stats::Formula accesses
Definition: tlb.hh:180
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
virtual MiscReg readMiscReg(int misc_reg)=0
static const int NumArgumentRegs M5_VAR_USED
Definition: process.cc:83
Flags getFlags()
Accessor for flags.
Definition: request.hh:584
Mode
Definition: tlb.hh:61
Stats::Scalar flushTlbMvaAsid
Definition: tlb.hh:167
Bitfield< 9 > e
Definition: miscregs.hh:1376
std::string print() const
Definition: pagetable.hh:279
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:254
uint64_t _attr
Definition: tlb.hh:147
ProbePointArg generates a point for the class of Arg.
Stats::Formula readAccesses
Definition: tlb.hh:175
int size()
Definition: pagetable.hh:146
virtual const std::string name() const
Definition: sim_object.hh:117
Stats::Scalar alignFaults
Definition: tlb.hh:170
Declarations of a non-full system Page Table.
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:192
void flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
Remove any entries that match the asn.
Definition: tlb.cc:304
bool haveVirtualization() const
Returns true if this system implements the virtualization Extensions.
Definition: system.hh:173
Bitfield< 7, 4 > domain
Definition: miscregs.hh:1605
std::ostream CheckpointOut
Definition: serialize.hh:67
Stats::Scalar readHits
Definition: tlb.hh:160
void insert(Addr vaddr, TlbEntry &pte)
Definition: tlb.cc:194
ISA-generic helper functions for memory mapped IPR accesses.
void regProbePoints() override
Register probe points for this object.
Definition: tlb.cc:558
Fault walk(RequestPtr req, ThreadContext *tc, uint16_t asid, uint8_t _vmid, bool _isHyp, TLB::Mode mode, TLB::Translation *_trans, bool timing, bool functional, bool secure, TLB::ArmTranslationType tranType, bool _stage2Req)
void setMMU(Stage2MMU *m, MasterID master_id)
Definition: tlb.cc:111
Addr getVaddr() const
Definition: request.hh:616
A BaseMasterPort is a protocol-agnostic master port, responsible only for the structural connection t...
Definition: port.hh:115
virtual int contextId() const =0
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:131
DomainType domain
Definition: pagetable.hh:120
NMRR nmrr
Definition: tlb.hh:384
Fault getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe)
Definition: tlb.cc:1441
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:124
Stats::Formula instAccesses
Definition: tlb.hh:177
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: tlb.cc:409
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode, ArmTranslationType tranType=NormalTran)
Definition: tlb.cc:1141
The request targets the secure memory space.
Definition: request.hh:181
SCTLR sctlr
Definition: tlb.hh:375
void setTlb(TLB *_tlb)
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TTBCR tcr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:279
Fault checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
Definition: tlb.cc:604
Bitfield< 3, 0 > mask
Definition: types.hh:64
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:287
void flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
Invalidate all entries in the stage 2 TLB that match the given ipa and the current VMID...
Definition: tlb.cc:357
CPSR cpsr
Definition: tlb.hh:372
Stats::Scalar flushTlbAsid
Definition: tlb.hh:168
Fault testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
Definition: tlb.cc:1524
bool inSecureState(ThreadContext *tc)
Definition: utility.cc:176
int flattenMiscRegNsBanked(MiscRegIndex reg, ThreadContext *tc)
Definition: miscregs.cc:2045
fatal_if(p->js_features.size() > 16,"Too many job slot feature registers specified (%i)\n", p->js_features.size())
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
Definition: bitfield.hh:67
This request is to a memory mapped register.
Definition: request.hh:126
unsigned getSize() const
Definition: request.hh:552
The request should be handled by the generic IPR code (only valid together with MMAPPED_IPR) ...
Definition: request.hh:178
void takeOverFrom(BaseTLB *otlb) override
Take over from an old tlb context.
Definition: tlb.cc:386
void flushAllNs(bool hyp, uint8_t target_el, bool ignore_el=false)
Remove all entries in the non secure world, depending on whether they were allocated in hyp mode or n...
Definition: tlb.cc:267
Fault translateSe(RequestPtr req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing)
Definition: tlb.cc:564
void setPaddr(Addr paddr)
Set just the physical address.
Definition: request.hh:487
ArmTranslationType curTranType
Definition: tlb.hh:389
LookupLevel lookupLevel
Definition: pagetable.hh:108
bool haveLargeAsid64() const
Bitfield< 0 > p
LookupLevel
Definition: pagetable.hh:77
Bitfield< 1 > x
Definition: types.hh:105
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition: tlb.hh:319
virtual void finish(const Fault &fault, RequestPtr req, ThreadContext *tc, Mode mode)=0
void setFlags(Flags flags)
Note that unlike other accessors, this function sets specific flags (ORs them in); it does not assign...
Definition: request.hh:595
Bitfield< 29, 6 > pfn
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
TlbEntry * table
Definition: tlb.hh:143
Abstract superclass for simulation objects.
Definition: sim_object.hh:94
Stats::Scalar flushTlb
Definition: tlb.hh:165
uint8_t vmid
Definition: tlb.hh:382
bool hasPaddr() const
Accessor for paddr.
Definition: request.hh:513
void setAttributes(bool lpae)
Definition: pagetable.hh:272
void setTestInterface(SimObject *ti)
Definition: tlb.cc:1512
TLB * stage2Tlb() const
Definition: stage2_mmu.hh:122
bool isStage2
Definition: tlb.hh:145
ContextID miscRegContext
Definition: tlb.hh:388
bool haveLPAE() const
SCR scr
Definition: tlb.hh:376

Generated on Fri Jun 9 2017 13:03:36 for gem5 by doxygen 1.8.6