gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2005 The Regents of The University of Michigan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Authors: Ali Saidi
29  */
30 
31 #include "arch/sparc/tlb.hh"
32 
33 #include <cstring>
34 
35 #include "arch/sparc/asi.hh"
36 #include "arch/sparc/faults.hh"
37 #include "arch/sparc/registers.hh"
38 #include "base/bitfield.hh"
39 #include "base/trace.hh"
40 #include "cpu/base.hh"
41 #include "cpu/thread_context.hh"
42 #include "debug/IPR.hh"
43 #include "debug/TLB.hh"
44 #include "mem/packet_access.hh"
45 #include "mem/request.hh"
46 #include "sim/full_system.hh"
47 #include "sim/system.hh"
48 
49 /* @todo remove some of the magic constants. -- ali
50  * */
51 namespace SparcISA {
52 
53 TLB::TLB(const Params *p)
54  : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
55  cacheState(0), cacheValid(false)
56 {
57  // To make this work you'll have to change the hypervisor and OS
58  if (size > 64)
59  fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
60 
61  tlb = new TlbEntry[size];
62  std::memset(tlb, 0, sizeof(TlbEntry) * size);
63 
64  for (int x = 0; x < size; x++)
65  freeList.push_back(&tlb[x]);
66 
67  c0_tsb_ps0 = 0;
68  c0_tsb_ps1 = 0;
69  c0_config = 0;
70  cx_tsb_ps0 = 0;
71  cx_tsb_ps1 = 0;
72  cx_config = 0;
73  sfsr = 0;
74  tag_access = 0;
75  sfar = 0;
76  cacheEntry[0] = NULL;
77  cacheEntry[1] = NULL;
78 }
79 
80 void
82 {
83  MapIter i;
84  for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
85  TlbEntry *t = i->second;
86  if (!t->pte.locked()) {
87  t->used = false;
88  usedEntries--;
89  }
90  }
91 }
92 
93 
94 void
95 TLB::insert(Addr va, int partition_id, int context_id, bool real,
96  const PageTableEntry& PTE, int entry)
97 {
98  MapIter i;
99  TlbEntry *new_entry = NULL;
100 // TlbRange tr;
101  int x;
102 
103  cacheValid = false;
104  va &= ~(PTE.size()-1);
105  /* tr.va = va;
106  tr.size = PTE.size() - 1;
107  tr.contextId = context_id;
108  tr.partitionId = partition_id;
109  tr.real = real;
110 */
111 
112  DPRINTF(TLB,
113  "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
114  va, PTE.paddr(), partition_id, context_id, (int)real, entry);
115 
116  // Demap any entry that conflicts
117  for (x = 0; x < size; x++) {
118  if (tlb[x].range.real == real &&
119  tlb[x].range.partitionId == partition_id &&
120  tlb[x].range.va < va + PTE.size() - 1 &&
121  tlb[x].range.va + tlb[x].range.size >= va &&
122  (real || tlb[x].range.contextId == context_id ))
123  {
124  if (tlb[x].valid) {
125  freeList.push_front(&tlb[x]);
126  DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
127 
128  tlb[x].valid = false;
129  if (tlb[x].used) {
130  tlb[x].used = false;
131  usedEntries--;
132  }
133  lookupTable.erase(tlb[x].range);
134  }
135  }
136  }
137 
138  if (entry != -1) {
139  assert(entry < size && entry >= 0);
140  new_entry = &tlb[entry];
141  } else {
142  if (!freeList.empty()) {
143  new_entry = freeList.front();
144  } else {
145  x = lastReplaced;
146  do {
147  ++x;
148  if (x == size)
149  x = 0;
150  if (x == lastReplaced)
151  goto insertAllLocked;
152  } while (tlb[x].pte.locked());
153  lastReplaced = x;
154  new_entry = &tlb[x];
155  }
156  }
157 
158 insertAllLocked:
159  // Update the last ently if their all locked
160  if (!new_entry) {
161  new_entry = &tlb[size-1];
162  }
163 
164  freeList.remove(new_entry);
165  if (new_entry->valid && new_entry->used)
166  usedEntries--;
167  if (new_entry->valid)
168  lookupTable.erase(new_entry->range);
169 
170 
171  assert(PTE.valid());
172  new_entry->range.va = va;
173  new_entry->range.size = PTE.size() - 1;
174  new_entry->range.partitionId = partition_id;
175  new_entry->range.contextId = context_id;
176  new_entry->range.real = real;
177  new_entry->pte = PTE;
178  new_entry->used = true;;
179  new_entry->valid = true;
180  usedEntries++;
181 
182  i = lookupTable.insert(new_entry->range, new_entry);
183  assert(i != lookupTable.end());
184 
185  // If all entries have their used bit set, clear it on them all,
186  // but the one we just inserted
187  if (usedEntries == size) {
188  clearUsedBits();
189  new_entry->used = true;
190  usedEntries++;
191  }
192 }
193 
194 
195 TlbEntry*
196 TLB::lookup(Addr va, int partition_id, bool real, int context_id,
197  bool update_used)
198 {
199  MapIter i;
200  TlbRange tr;
201  TlbEntry *t;
202 
203  DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
204  va, partition_id, context_id, real);
205  // Assemble full address structure
206  tr.va = va;
207  tr.size = 1;
208  tr.contextId = context_id;
209  tr.partitionId = partition_id;
210  tr.real = real;
211 
212  // Try to find the entry
213  i = lookupTable.find(tr);
214  if (i == lookupTable.end()) {
215  DPRINTF(TLB, "TLB: No valid entry found\n");
216  return NULL;
217  }
218 
219  // Mark the entries used bit and clear other used bits in needed
220  t = i->second;
221  DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
222  t->pte.size());
223 
224  // Update the used bits only if this is a real access (not a fake
225  // one from virttophys()
226  if (!t->used && update_used) {
227  t->used = true;
228  usedEntries++;
229  if (usedEntries == size) {
230  clearUsedBits();
231  t->used = true;
232  usedEntries++;
233  }
234  }
235 
236  return t;
237 }
238 
239 void
241 {
242  MapIter i;
243  for (int x = 0; x < size; x++) {
244  if (tlb[x].valid) {
245  DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
246  x, tlb[x].range.partitionId, tlb[x].range.contextId,
247  tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
248  tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
249  }
250  }
251 }
252 
253 void
254 TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
255 {
256  TlbRange tr;
257  MapIter i;
258 
259  DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
260  va, partition_id, context_id, real);
261 
262  cacheValid = false;
263 
264  // Assemble full address structure
265  tr.va = va;
266  tr.size = 1;
267  tr.contextId = context_id;
268  tr.partitionId = partition_id;
269  tr.real = real;
270 
271  // Demap any entry that conflicts
272  i = lookupTable.find(tr);
273  if (i != lookupTable.end()) {
274  DPRINTF(IPR, "TLB: Demapped page\n");
275  i->second->valid = false;
276  if (i->second->used) {
277  i->second->used = false;
278  usedEntries--;
279  }
280  freeList.push_front(i->second);
281  lookupTable.erase(i);
282  }
283 }
284 
285 void
286 TLB::demapContext(int partition_id, int context_id)
287 {
288  DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
289  partition_id, context_id);
290  cacheValid = false;
291  for (int x = 0; x < size; x++) {
292  if (tlb[x].range.contextId == context_id &&
293  tlb[x].range.partitionId == partition_id) {
294  if (tlb[x].valid) {
295  freeList.push_front(&tlb[x]);
296  }
297  tlb[x].valid = false;
298  if (tlb[x].used) {
299  tlb[x].used = false;
300  usedEntries--;
301  }
302  lookupTable.erase(tlb[x].range);
303  }
304  }
305 }
306 
307 void
308 TLB::demapAll(int partition_id)
309 {
310  DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
311  cacheValid = false;
312  for (int x = 0; x < size; x++) {
313  if (tlb[x].valid && !tlb[x].pte.locked() &&
314  tlb[x].range.partitionId == partition_id) {
315  freeList.push_front(&tlb[x]);
316  tlb[x].valid = false;
317  if (tlb[x].used) {
318  tlb[x].used = false;
319  usedEntries--;
320  }
321  lookupTable.erase(tlb[x].range);
322  }
323  }
324 }
325 
326 void
328 {
329  cacheValid = false;
330  lookupTable.clear();
331 
332  for (int x = 0; x < size; x++) {
333  if (tlb[x].valid)
334  freeList.push_back(&tlb[x]);
335  tlb[x].valid = false;
336  tlb[x].used = false;
337  }
338  usedEntries = 0;
339 }
340 
341 uint64_t
342 TLB::TteRead(int entry)
343 {
344  if (entry >= size)
345  panic("entry: %d\n", entry);
346 
347  assert(entry < size);
348  if (tlb[entry].valid)
349  return tlb[entry].pte();
350  else
351  return (uint64_t)-1ll;
352 }
353 
354 uint64_t
355 TLB::TagRead(int entry)
356 {
357  assert(entry < size);
358  uint64_t tag;
359  if (!tlb[entry].valid)
360  return (uint64_t)-1ll;
361 
362  tag = tlb[entry].range.contextId;
363  tag |= tlb[entry].range.va;
364  tag |= (uint64_t)tlb[entry].range.partitionId << 61;
365  tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
366  tag |= (uint64_t)~tlb[entry].pte._size() << 56;
367  return tag;
368 }
369 
370 bool
372 {
373  if (am)
374  return true;
375  if (va >= StartVAddrHole && va <= EndVAddrHole)
376  return false;
377  return true;
378 }
379 
380 void
381 TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
382 {
383  if (sfsr & 0x1)
384  sfsr = 0x3;
385  else
386  sfsr = 1;
387 
388  if (write)
389  sfsr |= 1 << 2;
390  sfsr |= ct << 4;
391  if (se)
392  sfsr |= 1 << 6;
393  sfsr |= ft << 7;
394  sfsr |= asi << 16;
395 }
396 
397 void
399 {
400  DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
401  va, context, mbits(va, 63,13) | mbits(context,12,0));
402 
403  tag_access = mbits(va, 63,13) | mbits(context,12,0);
404 }
405 
406 void
408  bool se, FaultTypes ft, int asi)
409 {
410  DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
411  a, (int)write, ct, ft, asi);
412  TLB::writeSfsr(write, ct, se, ft, asi);
413  sfar = a;
414 }
415 
416 Fault
418 {
419  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
420 
421  Addr vaddr = req->getVaddr();
422  TlbEntry *e;
423 
424  assert(req->getArchFlags() == ASI_IMPLICIT);
425 
426  DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
427  vaddr, req->getSize());
428 
429  // Be fast if we can!
430  if (cacheValid && cacheState == tlbdata) {
431  if (cacheEntry[0]) {
432  if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
433  cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
434  req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
435  return NoFault;
436  }
437  } else {
438  req->setPaddr(vaddr & PAddrImplMask);
439  return NoFault;
440  }
441  }
442 
443  bool hpriv = bits(tlbdata,0,0);
444  bool red = bits(tlbdata,1,1);
445  bool priv = bits(tlbdata,2,2);
446  bool addr_mask = bits(tlbdata,3,3);
447  bool lsu_im = bits(tlbdata,4,4);
448 
449  int part_id = bits(tlbdata,15,8);
450  int tl = bits(tlbdata,18,16);
451  int pri_context = bits(tlbdata,47,32);
452  int context;
453  ContextType ct;
454  int asi;
455  bool real = false;
456 
457  DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
458  priv, hpriv, red, lsu_im, part_id);
459 
460  if (tl > 0) {
461  asi = ASI_N;
462  ct = Nucleus;
463  context = 0;
464  } else {
465  asi = ASI_P;
466  ct = Primary;
467  context = pri_context;
468  }
469 
470  if ( hpriv || red ) {
471  cacheValid = true;
472  cacheState = tlbdata;
473  cacheEntry[0] = NULL;
474  req->setPaddr(vaddr & PAddrImplMask);
475  return NoFault;
476  }
477 
478  // If the access is unaligned trap
479  if (vaddr & 0x3) {
480  writeSfsr(false, ct, false, OtherFault, asi);
481  return std::make_shared<MemAddressNotAligned>();
482  }
483 
484  if (addr_mask)
485  vaddr = vaddr & VAddrAMask;
486 
487  if (!validVirtualAddress(vaddr, addr_mask)) {
488  writeSfsr(false, ct, false, VaOutOfRange, asi);
489  return std::make_shared<InstructionAccessException>();
490  }
491 
492  if (!lsu_im) {
493  e = lookup(vaddr, part_id, true);
494  real = true;
495  context = 0;
496  } else {
497  e = lookup(vaddr, part_id, false, context);
498  }
499 
500  if (e == NULL || !e->valid) {
501  writeTagAccess(vaddr, context);
502  if (real) {
503  return std::make_shared<InstructionRealTranslationMiss>();
504  } else {
505  if (FullSystem)
506  return std::make_shared<FastInstructionAccessMMUMiss>();
507  else
508  return std::make_shared<FastInstructionAccessMMUMiss>(
509  req->getVaddr());
510  }
511  }
512 
513  // were not priviledged accesing priv page
514  if (!priv && e->pte.priv()) {
515  writeTagAccess(vaddr, context);
516  writeSfsr(false, ct, false, PrivViolation, asi);
517  return std::make_shared<InstructionAccessException>();
518  }
519 
520  // cache translation date for next translation
521  cacheValid = true;
522  cacheState = tlbdata;
523  cacheEntry[0] = e;
524 
525  req->setPaddr(e->pte.translate(vaddr));
526  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
527  return NoFault;
528 }
529 
530 Fault
532 {
533  /*
534  * @todo this could really use some profiling and fixing to make
535  * it faster!
536  */
537  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
538  Addr vaddr = req->getVaddr();
539  Addr size = req->getSize();
540  ASI asi;
541  asi = (ASI)req->getArchFlags();
542  bool implicit = false;
543  bool hpriv = bits(tlbdata,0,0);
544  bool unaligned = vaddr & (size - 1);
545 
546  DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
547  vaddr, size, asi);
548 
549  if (lookupTable.size() != 64 - freeList.size())
550  panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
551  freeList.size());
552  if (asi == ASI_IMPLICIT)
553  implicit = true;
554 
555  // Only use the fast path here if there doesn't need to be an unaligned
556  // trap later
557  if (!unaligned) {
558  if (hpriv && implicit) {
559  req->setPaddr(vaddr & PAddrImplMask);
560  return NoFault;
561  }
562 
563  // Be fast if we can!
564  if (cacheValid && cacheState == tlbdata) {
565 
566 
567 
568  if (cacheEntry[0]) {
569  TlbEntry *ce = cacheEntry[0];
570  Addr ce_va = ce->range.va;
571  if (cacheAsi[0] == asi &&
572  ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
573  (!write || ce->pte.writable())) {
574  req->setPaddr(ce->pte.translate(vaddr));
575  if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
576  req->setFlags(
578  }
579  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
580  return NoFault;
581  } // if matched
582  } // if cache entry valid
583  if (cacheEntry[1]) {
584  TlbEntry *ce = cacheEntry[1];
585  Addr ce_va = ce->range.va;
586  if (cacheAsi[1] == asi &&
587  ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
588  (!write || ce->pte.writable())) {
589  req->setPaddr(ce->pte.translate(vaddr));
590  if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
591  req->setFlags(
593  }
594  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
595  return NoFault;
596  } // if matched
597  } // if cache entry valid
598  }
599  }
600 
601  bool red = bits(tlbdata,1,1);
602  bool priv = bits(tlbdata,2,2);
603  bool addr_mask = bits(tlbdata,3,3);
604  bool lsu_dm = bits(tlbdata,5,5);
605 
606  int part_id = bits(tlbdata,15,8);
607  int tl = bits(tlbdata,18,16);
608  int pri_context = bits(tlbdata,47,32);
609  int sec_context = bits(tlbdata,63,48);
610 
611  bool real = false;
612  ContextType ct = Primary;
613  int context = 0;
614 
615  TlbEntry *e;
616 
617  DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
618  priv, hpriv, red, lsu_dm, part_id);
619 
620  if (implicit) {
621  if (tl > 0) {
622  asi = ASI_N;
623  ct = Nucleus;
624  context = 0;
625  } else {
626  asi = ASI_P;
627  ct = Primary;
628  context = pri_context;
629  }
630  } else {
631  // We need to check for priv level/asi priv
632  if (!priv && !hpriv && !asiIsUnPriv(asi)) {
633  // It appears that context should be Nucleus in these cases?
634  writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
635  return std::make_shared<PrivilegedAction>();
636  }
637 
638  if (!hpriv && asiIsHPriv(asi)) {
639  writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
640  return std::make_shared<DataAccessException>();
641  }
642 
643  if (asiIsPrimary(asi)) {
644  context = pri_context;
645  ct = Primary;
646  } else if (asiIsSecondary(asi)) {
647  context = sec_context;
648  ct = Secondary;
649  } else if (asiIsNucleus(asi)) {
650  ct = Nucleus;
651  context = 0;
652  } else { // ????
653  ct = Primary;
654  context = pri_context;
655  }
656  }
657 
658  if (!implicit && asi != ASI_P && asi != ASI_S) {
659  if (asiIsLittle(asi))
660  panic("Little Endian ASIs not supported\n");
661 
662  //XXX It's unclear from looking at the documentation how a no fault
663  // load differs from a regular one, other than what happens concerning
664  // nfo and e bits in the TTE
665 // if (asiIsNoFault(asi))
666 // panic("No Fault ASIs not supported\n");
667 
668  if (asiIsPartialStore(asi))
669  panic("Partial Store ASIs not supported\n");
670 
671  if (asiIsCmt(asi))
672  panic("Cmt ASI registers not implmented\n");
673 
674  if (asiIsInterrupt(asi))
675  goto handleIntRegAccess;
676  if (asiIsMmu(asi))
677  goto handleMmuRegAccess;
678  if (asiIsScratchPad(asi))
679  goto handleScratchRegAccess;
680  if (asiIsQueue(asi))
681  goto handleQueueRegAccess;
682  if (asiIsSparcError(asi))
683  goto handleSparcErrorRegAccess;
684 
685  if (!asiIsReal(asi) && !asiIsNucleus(asi) && !asiIsAsIfUser(asi) &&
686  !asiIsTwin(asi) && !asiIsBlock(asi) && !asiIsNoFault(asi))
687  panic("Accessing ASI %#X. Should we?\n", asi);
688  }
689 
690  // If the asi is unaligned trap
691  if (unaligned) {
692  writeSfsr(vaddr, false, ct, false, OtherFault, asi);
693  return std::make_shared<MemAddressNotAligned>();
694  }
695 
696  if (addr_mask)
697  vaddr = vaddr & VAddrAMask;
698 
699  if (!validVirtualAddress(vaddr, addr_mask)) {
700  writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
701  return std::make_shared<DataAccessException>();
702  }
703 
704  if ((!lsu_dm && !hpriv && !red) || asiIsReal(asi)) {
705  real = true;
706  context = 0;
707  }
708 
709  if (hpriv && (implicit || (!asiIsAsIfUser(asi) && !asiIsReal(asi)))) {
710  req->setPaddr(vaddr & PAddrImplMask);
711  return NoFault;
712  }
713 
714  e = lookup(vaddr, part_id, real, context);
715 
716  if (e == NULL || !e->valid) {
717  writeTagAccess(vaddr, context);
718  DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
719  if (real) {
720  return std::make_shared<DataRealTranslationMiss>();
721  } else {
722  if (FullSystem)
723  return std::make_shared<FastDataAccessMMUMiss>();
724  else
725  return std::make_shared<FastDataAccessMMUMiss>(
726  req->getVaddr());
727  }
728 
729  }
730 
731  if (!priv && e->pte.priv()) {
732  writeTagAccess(vaddr, context);
733  writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
734  return std::make_shared<DataAccessException>();
735  }
736 
737  if (write && !e->pte.writable()) {
738  writeTagAccess(vaddr, context);
739  writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
740  return std::make_shared<FastDataAccessProtection>();
741  }
742 
743  if (e->pte.nofault() && !asiIsNoFault(asi)) {
744  writeTagAccess(vaddr, context);
745  writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
746  return std::make_shared<DataAccessException>();
747  }
748 
749  if (e->pte.sideffect() && asiIsNoFault(asi)) {
750  writeTagAccess(vaddr, context);
751  writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
752  return std::make_shared<DataAccessException>();
753  }
754 
755  if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
757 
758  // cache translation date for next translation
759  cacheState = tlbdata;
760  if (!cacheValid) {
761  cacheEntry[1] = NULL;
762  cacheEntry[0] = NULL;
763  }
764 
765  if (cacheEntry[0] != e && cacheEntry[1] != e) {
766  cacheEntry[1] = cacheEntry[0];
767  cacheEntry[0] = e;
768  cacheAsi[1] = cacheAsi[0];
769  cacheAsi[0] = asi;
770  if (implicit)
771  cacheAsi[0] = (ASI)0;
772  }
773  cacheValid = true;
774  req->setPaddr(e->pte.translate(vaddr));
775  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
776  return NoFault;
777 
779 handleIntRegAccess:
780  if (!hpriv) {
781  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
782  if (priv)
783  return std::make_shared<DataAccessException>();
784  else
785  return std::make_shared<PrivilegedAction>();
786  }
787 
788  if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
789  (asi == ASI_SWVR_UDB_INTR_R && write)) {
790  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
791  return std::make_shared<DataAccessException>();
792  }
793 
794  goto regAccessOk;
795 
796 
797 handleScratchRegAccess:
798  if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
799  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
800  return std::make_shared<DataAccessException>();
801  }
802  goto regAccessOk;
803 
804 handleQueueRegAccess:
805  if (!priv && !hpriv) {
806  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
807  return std::make_shared<PrivilegedAction>();
808  }
809  if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
810  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
811  return std::make_shared<DataAccessException>();
812  }
813  goto regAccessOk;
814 
815 handleSparcErrorRegAccess:
816  if (!hpriv) {
817  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
818  if (priv)
819  return std::make_shared<DataAccessException>();
820  else
821  return std::make_shared<PrivilegedAction>();
822  }
823  goto regAccessOk;
824 
825 
826 regAccessOk:
827 handleMmuRegAccess:
828  DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
830  req->setPaddr(req->getVaddr());
831  return NoFault;
832 };
833 
834 Fault
836 {
837  if (mode == Execute)
838  return translateInst(req, tc);
839  else
840  return translateData(req, tc, mode == Write);
841 }
842 
843 void
845  Translation *translation, Mode mode)
846 {
847  assert(translation);
848  translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
849 }
850 
851 Fault
853 {
854  panic("Not implemented\n");
855  return NoFault;
856 }
857 
858 Fault
860 {
861  return NoFault;
862 }
863 
864 Cycles
866 {
867  Addr va = pkt->getAddr();
868  ASI asi = (ASI)pkt->req->getArchFlags();
869  uint64_t temp;
870 
871  DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
872  (uint32_t)pkt->req->getArchFlags(), pkt->getAddr());
873 
874  TLB *itb = tc->getITBPtr();
875 
876  switch (asi) {
877  case ASI_LSU_CONTROL_REG:
878  assert(va == 0);
880  break;
881  case ASI_MMU:
882  switch (va) {
883  case 0x8:
885  break;
886  case 0x10:
888  break;
889  default:
890  goto doMmuReadError;
891  }
892  break;
893  case ASI_QUEUE:
895  (va >> 4) - 0x3c));
896  break;
898  assert(va == 0);
899  pkt->set(c0_tsb_ps0);
900  break;
902  assert(va == 0);
903  pkt->set(c0_tsb_ps1);
904  break;
906  assert(va == 0);
907  pkt->set(c0_config);
908  break;
910  assert(va == 0);
911  pkt->set(itb->c0_tsb_ps0);
912  break;
914  assert(va == 0);
915  pkt->set(itb->c0_tsb_ps1);
916  break;
918  assert(va == 0);
919  pkt->set(itb->c0_config);
920  break;
922  assert(va == 0);
923  pkt->set(cx_tsb_ps0);
924  break;
926  assert(va == 0);
927  pkt->set(cx_tsb_ps1);
928  break;
930  assert(va == 0);
931  pkt->set(cx_config);
932  break;
934  assert(va == 0);
935  pkt->set(itb->cx_tsb_ps0);
936  break;
938  assert(va == 0);
939  pkt->set(itb->cx_tsb_ps1);
940  break;
942  assert(va == 0);
943  pkt->set(itb->cx_config);
944  break;
946  pkt->set((uint64_t)0);
947  break;
948  case ASI_HYP_SCRATCHPAD:
949  case ASI_SCRATCHPAD:
950  pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
951  break;
952  case ASI_IMMU:
953  switch (va) {
954  case 0x0:
955  temp = itb->tag_access;
956  pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
957  break;
958  case 0x18:
959  pkt->set(itb->sfsr);
960  break;
961  case 0x30:
962  pkt->set(itb->tag_access);
963  break;
964  default:
965  goto doMmuReadError;
966  }
967  break;
968  case ASI_DMMU:
969  switch (va) {
970  case 0x0:
971  temp = tag_access;
972  pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
973  break;
974  case 0x18:
975  pkt->set(sfsr);
976  break;
977  case 0x20:
978  pkt->set(sfar);
979  break;
980  case 0x30:
981  pkt->set(tag_access);
982  break;
983  case 0x80:
985  break;
986  default:
987  goto doMmuReadError;
988  }
989  break;
991  pkt->set(MakeTsbPtr(Ps0,
992  tag_access,
993  c0_tsb_ps0,
994  c0_config,
995  cx_tsb_ps0,
996  cx_config));
997  break;
999  pkt->set(MakeTsbPtr(Ps1,
1000  tag_access,
1001  c0_tsb_ps1,
1002  c0_config,
1003  cx_tsb_ps1,
1004  cx_config));
1005  break;
1007  pkt->set(MakeTsbPtr(Ps0,
1008  itb->tag_access,
1009  itb->c0_tsb_ps0,
1010  itb->c0_config,
1011  itb->cx_tsb_ps0,
1012  itb->cx_config));
1013  break;
1015  pkt->set(MakeTsbPtr(Ps1,
1016  itb->tag_access,
1017  itb->c0_tsb_ps1,
1018  itb->c0_config,
1019  itb->cx_tsb_ps1,
1020  itb->cx_config));
1021  break;
1022  case ASI_SWVR_INTR_RECEIVE:
1023  {
1024  SparcISA::Interrupts * interrupts =
1025  dynamic_cast<SparcISA::Interrupts *>(
1026  tc->getCpuPtr()->getInterruptController(0));
1027  pkt->set(interrupts->get_vec(IT_INT_VEC));
1028  }
1029  break;
1030  case ASI_SWVR_UDB_INTR_R:
1031  {
1032  SparcISA::Interrupts * interrupts =
1033  dynamic_cast<SparcISA::Interrupts *>(
1034  tc->getCpuPtr()->getInterruptController(0));
1035  temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1036  tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, temp);
1037  pkt->set(temp);
1038  }
1039  break;
1040  default:
1041 doMmuReadError:
1042  panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1043  (uint32_t)asi, va);
1044  }
1045  pkt->makeAtomicResponse();
1046  return Cycles(1);
1047 }
1048 
1049 Cycles
1051 {
1052  uint64_t data = pkt->get<uint64_t>();
1053  Addr va = pkt->getAddr();
1054  ASI asi = (ASI)pkt->req->getArchFlags();
1055 
1056  Addr ta_insert;
1057  Addr va_insert;
1058  Addr ct_insert;
1059  int part_insert;
1060  int entry_insert = -1;
1061  bool real_insert;
1062  bool ignore;
1063  int part_id;
1064  int ctx_id;
1065  PageTableEntry pte;
1066 
1067  DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1068  (uint32_t)asi, va, data);
1069 
1070  TLB *itb = tc->getITBPtr();
1071 
1072  switch (asi) {
1073  case ASI_LSU_CONTROL_REG:
1074  assert(va == 0);
1075  tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1076  break;
1077  case ASI_MMU:
1078  switch (va) {
1079  case 0x8:
1080  tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1081  break;
1082  case 0x10:
1083  tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1084  break;
1085  default:
1086  goto doMmuWriteError;
1087  }
1088  break;
1089  case ASI_QUEUE:
1090  assert(mbits(data,13,6) == data);
1092  (va >> 4) - 0x3c, data);
1093  break;
1095  assert(va == 0);
1096  c0_tsb_ps0 = data;
1097  break;
1099  assert(va == 0);
1100  c0_tsb_ps1 = data;
1101  break;
1103  assert(va == 0);
1104  c0_config = data;
1105  break;
1107  assert(va == 0);
1108  itb->c0_tsb_ps0 = data;
1109  break;
1111  assert(va == 0);
1112  itb->c0_tsb_ps1 = data;
1113  break;
1115  assert(va == 0);
1116  itb->c0_config = data;
1117  break;
1119  assert(va == 0);
1120  cx_tsb_ps0 = data;
1121  break;
1123  assert(va == 0);
1124  cx_tsb_ps1 = data;
1125  break;
1127  assert(va == 0);
1128  cx_config = data;
1129  break;
1131  assert(va == 0);
1132  itb->cx_tsb_ps0 = data;
1133  break;
1135  assert(va == 0);
1136  itb->cx_tsb_ps1 = data;
1137  break;
1139  assert(va == 0);
1140  itb->cx_config = data;
1141  break;
1144  inform("Ignoring write to SPARC ERROR regsiter\n");
1145  break;
1146  case ASI_HYP_SCRATCHPAD:
1147  case ASI_SCRATCHPAD:
1148  tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1149  break;
1150  case ASI_IMMU:
1151  switch (va) {
1152  case 0x18:
1153  itb->sfsr = data;
1154  break;
1155  case 0x30:
1156  sext<59>(bits(data, 59,0));
1157  itb->tag_access = data;
1158  break;
1159  default:
1160  goto doMmuWriteError;
1161  }
1162  break;
1164  entry_insert = bits(va, 8,3);
1165  case ASI_ITLB_DATA_IN_REG:
1166  assert(entry_insert != -1 || mbits(va,10,9) == va);
1167  ta_insert = itb->tag_access;
1168  va_insert = mbits(ta_insert, 63,13);
1169  ct_insert = mbits(ta_insert, 12,0);
1170  part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1171  real_insert = bits(va, 9,9);
1172  pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1174  tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1175  pte, entry_insert);
1176  break;
1178  entry_insert = bits(va, 8,3);
1179  case ASI_DTLB_DATA_IN_REG:
1180  assert(entry_insert != -1 || mbits(va,10,9) == va);
1181  ta_insert = tag_access;
1182  va_insert = mbits(ta_insert, 63,13);
1183  ct_insert = mbits(ta_insert, 12,0);
1184  part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1185  real_insert = bits(va, 9,9);
1186  pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1187  PageTableEntry::sun4u);
1188  insert(va_insert, part_insert, ct_insert, real_insert, pte,
1189  entry_insert);
1190  break;
1191  case ASI_IMMU_DEMAP:
1192  ignore = false;
1193  ctx_id = -1;
1194  part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1195  switch (bits(va,5,4)) {
1196  case 0:
1197  ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1198  break;
1199  case 1:
1200  ignore = true;
1201  break;
1202  case 3:
1203  ctx_id = 0;
1204  break;
1205  default:
1206  ignore = true;
1207  }
1208 
1209  switch (bits(va,7,6)) {
1210  case 0: // demap page
1211  if (!ignore)
1212  tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1213  bits(va,9,9), ctx_id);
1214  break;
1215  case 1: // demap context
1216  if (!ignore)
1217  tc->getITBPtr()->demapContext(part_id, ctx_id);
1218  break;
1219  case 2:
1220  tc->getITBPtr()->demapAll(part_id);
1221  break;
1222  default:
1223  panic("Invalid type for IMMU demap\n");
1224  }
1225  break;
1226  case ASI_DMMU:
1227  switch (va) {
1228  case 0x18:
1229  sfsr = data;
1230  break;
1231  case 0x30:
1232  sext<59>(bits(data, 59,0));
1233  tag_access = data;
1234  break;
1235  case 0x80:
1236  tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1237  break;
1238  default:
1239  goto doMmuWriteError;
1240  }
1241  break;
1242  case ASI_DMMU_DEMAP:
1243  ignore = false;
1244  ctx_id = -1;
1245  part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1246  switch (bits(va,5,4)) {
1247  case 0:
1248  ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1249  break;
1250  case 1:
1251  ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1252  break;
1253  case 3:
1254  ctx_id = 0;
1255  break;
1256  default:
1257  ignore = true;
1258  }
1259 
1260  switch (bits(va,7,6)) {
1261  case 0: // demap page
1262  if (!ignore)
1263  demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1264  break;
1265  case 1: // demap context
1266  if (!ignore)
1267  demapContext(part_id, ctx_id);
1268  break;
1269  case 2:
1270  demapAll(part_id);
1271  break;
1272  default:
1273  panic("Invalid type for IMMU demap\n");
1274  }
1275  break;
1276  case ASI_SWVR_INTR_RECEIVE:
1277  {
1278  int msb;
1279  // clear all the interrupts that aren't set in the write
1280  SparcISA::Interrupts * interrupts =
1281  dynamic_cast<SparcISA::Interrupts *>(
1282  tc->getCpuPtr()->getInterruptController(0));
1283  while (interrupts->get_vec(IT_INT_VEC) & data) {
1284  msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1285  tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, msb);
1286  }
1287  }
1288  break;
1289  case ASI_SWVR_UDB_INTR_W:
1290  tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1291  postInterrupt(0, bits(data, 5, 0), 0);
1292  break;
1293  default:
1294 doMmuWriteError:
1295  panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1296  (uint32_t)pkt->req->getArchFlags(), pkt->getAddr(), data);
1297  }
1298  pkt->makeAtomicResponse();
1299  return Cycles(1);
1300 }
1301 
1302 void
1304 {
1305  uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1306  TLB * itb = tc->getITBPtr();
1307  ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1308  c0_tsb_ps0,
1309  c0_config,
1310  cx_tsb_ps0,
1311  cx_config);
1312  ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1313  c0_tsb_ps1,
1314  c0_config,
1315  cx_tsb_ps1,
1316  cx_config);
1317  ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1318  itb->c0_tsb_ps0,
1319  itb->c0_config,
1320  itb->cx_tsb_ps0,
1321  itb->cx_config);
1322  ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1323  itb->c0_tsb_ps1,
1324  itb->c0_config,
1325  itb->cx_tsb_ps1,
1326  itb->cx_config);
1327 }
1328 
1329 uint64_t
1330 TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1331  uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1332 {
1333  uint64_t tsb;
1334  uint64_t config;
1335 
1336  if (bits(tag_access, 12,0) == 0) {
1337  tsb = c0_tsb;
1338  config = c0_config;
1339  } else {
1340  tsb = cX_tsb;
1341  config = cX_config;
1342  }
1343 
1344  uint64_t ptr = mbits(tsb,63,13);
1345  bool split = bits(tsb,12,12);
1346  int tsb_size = bits(tsb,3,0);
1347  int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1348 
1349  if (ps == Ps1 && split)
1350  ptr |= ULL(1) << (13 + tsb_size);
1351  ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1352 
1353  return ptr;
1354 }
1355 
1356 void
1358 {
1362 
1363  // convert the pointer based free list into an index based one
1364  std::vector<int> free_list;
1365  for (const TlbEntry *entry : freeList)
1366  free_list.push_back(entry - tlb);
1367 
1368  SERIALIZE_CONTAINER(free_list);
1369 
1378 
1379  for (int x = 0; x < size; x++) {
1380  ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1381  tlb[x].serialize(cp);
1382  }
1384 }
1385 
1386 void
1388 {
1389  int oldSize;
1390 
1391  paramIn(cp, "size", oldSize);
1392  if (oldSize != size)
1393  panic("Don't support unserializing different sized TLBs\n");
1396 
1397  std::vector<int> free_list;
1398  UNSERIALIZE_CONTAINER(free_list);
1399  freeList.clear();
1400  for (int idx : free_list)
1401  freeList.push_back(&tlb[idx]);
1402 
1411 
1412  lookupTable.clear();
1413  for (int x = 0; x < size; x++) {
1414  ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1415  tlb[x].unserialize(cp);
1416  if (tlb[x].valid)
1417  lookupTable.insert(tlb[x].range, &tlb[x]);
1418 
1419  }
1421 }
1422 
1423 } // namespace SparcISA
1424 
1425 SparcISA::TLB *
1426 SparcTLBParams::create()
1427 {
1428  return new SparcISA::TLB(this);
1429 }
Addr translate(Addr vaddr) const
Definition: pagetable.hh:175
void demapContext(int partition_id, int context_id)
Remove all entries that match a given context/partition id.
Definition: tlb.cc:286
static void ignore(const char *expr)
Definition: debug.cc:74
bool asiIsReal(ASI asi)
Definition: asi.cc:142
#define DPRINTF(x,...)
Definition: trace.hh:212
bool asiIsSparcError(ASI asi)
Definition: asi.cc:312
uint64_t cx_config
Definition: tlb.hh:62
void set(T v, ByteOrder endian)
Set the value in the data pointer to v using the specified endianness.
void writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
Definition: tlb.cc:381
virtual System * getSystemPtr()=0
int usedEntries
Definition: tlb.hh:73
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
iterator insert(TlbRange &r, TlbEntry *d)
Definition: tlb_map.hh:96
Fault translateInst(RequestPtr req, ThreadContext *tc)
Definition: tlb.cc:417
Bitfield< 7 > i
Definition: miscregs.hh:1378
#define panic(...)
Definition: misc.hh:153
const Addr PAddrImplMask
Definition: isa_traits.hh:67
const Addr VAddrAMask
Definition: isa_traits.hh:66
void unserialize(CheckpointIn &cp)
Definition: pagetable.cc:56
uint32_t MachInst
Definition: types.hh:41
size_t size()
Definition: tlb_map.hh:141
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
Bitfield< 3 > am
Definition: miscregs.hh:130
Bitfield< 8 > a
Definition: miscregs.hh:1377
const Addr EndVAddrHole
Definition: isa_traits.hh:65
ip6_addr_t addr
Definition: inet.hh:335
bool locked() const
Definition: pagetable.hh:165
virtual MiscReg readMiscRegNoEffect(int misc_reg) const =0
uint64_t c0_config
Definition: tlb.hh:59
Bitfield< 29, 28 > ce
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:146
void clearUsedBits()
Definition: tlb.cc:81
virtual void setMiscReg(int misc_reg, const MiscReg &val)=0
size_t erase(TlbRange k)
Definition: tlb_map.hh:105
bool validVirtualAddress(Addr va, bool am)
Checks if the virtual address provided is a valid one.
Definition: tlb.cc:371
uint64_t MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb, uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
Definition: tlb.cc:1330
virtual BaseCPU * getCpuPtr()=0
Bitfield< 4, 0 > mode
Definition: miscregs.hh:1385
TlbEntry * lookup(Addr va, int partition_id, bool real, int context_id=0, bool update_used=true)
lookup an entry in the TLB based on the partition id, and real bit if real is true or the partition i...
Definition: tlb.cc:196
MMU Internal Registers.
Definition: miscregs.hh:89
ASI cacheAsi[2]
Definition: tlb.hh:194
ThreadContext is the external interface to all thread state for anything outside of the CPU...
#define DPRINTFN(...)
Definition: trace.hh:216
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt)
Definition: tlb.cc:865
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: tlb.cc:1387
void populate(uint64_t e, EntryType t=sun4u)
Definition: pagetable.hh:98
T get(ByteOrder endian) const
Get the data in the packet byte swapped from the specified endianness.
const char data[]
Definition: circlebuf.cc:43
Fault translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
Definition: tlb.cc:835
Bitfield< 2 > hpriv
Definition: miscregs.hh:121
void dumpAll()
Definition: tlb.cc:240
Definition: tlb.hh:53
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
bool asiIsQueue(ASI asi)
Definition: asi.cc:256
Bitfield< 23, 20 > tl
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:164
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt)
Definition: tlb.cc:1050
std::list< TlbEntry * > freeList
Definition: tlb.hh:79
TlbMap::iterator MapIter
Definition: tlb.hh:67
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
uint64_t cx_tsb_ps0
Definition: tlb.hh:60
void demapPage(Addr va, int partition_id, bool real, int context_id)
Remve all entries that match a certain partition id, (contextid), and va).
Definition: tlb.cc:254
const Addr StartVAddrHole
Definition: isa_traits.hh:64
iterator end()
Definition: tlb_map.hh:135
void makeAtomicResponse()
Definition: packet.hh:857
uint64_t cx_tsb_ps1
Definition: tlb.hh:61
uint64_t get_vec(int int_num)
Definition: interrupts.hh:241
void serialize(CheckpointOut &cp) const
Definition: pagetable.cc:39
void flushAll() override
Remove all entries from the TLB.
Definition: tlb.cc:327
The request is to an uncacheable address.
Definition: request.hh:114
TlbEntry * cacheEntry[2]
Definition: tlb.hh:193
std::vector< ThreadContext * > threadContexts
Definition: system.hh:199
Addr getPaddr() const
Definition: request.hh:519
#define fatal(...)
Definition: misc.hh:163
const RequestPtr req
A pointer to the original request.
Definition: packet.hh:304
Fault finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
Definition: tlb.cc:859
bool asiIsPrimary(ASI asi)
Definition: asi.cc:51
bool asiIsBlock(ASI asi)
Definition: asi.cc:38
bool nofault() const
Definition: pagetable.hh:170
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:167
TlbEntry * tlb
Definition: tlb.hh:70
bool writable() const
Definition: pagetable.hh:169
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
#define ULL(N)
uint64_t constant
Definition: types.hh:50
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
TlbMap lookupTable
Definition: tlb.hh:67
bool asiIsHPriv(ASI asi)
Definition: asi.cc:298
Bitfield< 8 > va
Definition: miscregs.hh:1473
void writeTagAccess(Addr va, int context)
Definition: tlb.cc:398
bool asiIsScratchPad(ASI asi)
Definition: asi.cc:242
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
ArchFlagsType getArchFlags() const
Accessor function for architecture-specific flags.
Definition: request.hh:657
Fault translateData(RequestPtr req, ThreadContext *tc, bool write)
Definition: tlb.cc:531
virtual MiscReg readMiscReg(int misc_reg)=0
Mode
Definition: tlb.hh:61
void insert(Addr vpn, int partition_id, int context_id, bool real, const PageTableEntry &PTE, int entry=-1)
Insert a PTE into the TLB.
Definition: tlb.cc:95
Bitfield< 9 > e
Definition: miscregs.hh:1376
bool asiIsNoFault(ASI asi)
Definition: asi.cc:233
SparcTLBParams Params
Definition: tlb.hh:153
int size()
Definition: pagetable.hh:146
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
Definition: tlb.cc:1303
std::ostream CheckpointOut
Definition: serialize.hh:67
uint64_t tag_access
Definition: tlb.hh:64
uint64_t TagRead(int entry)
Given an entry id, read that tlb entries' tag.
Definition: tlb.cc:355
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: tlb.cc:1357
Scratchpad regiscers.
Definition: miscregs.hh:95
bool asiIsUnPriv(ASI asi)
Definition: asi.cc:285
bool asiIsCmt(ASI asi)
Definition: asi.cc:249
bool asiIsTwin(ASI asi)
Definition: asi.cc:188
Addr getVaddr() const
Definition: request.hh:616
iterator find(const TlbRange &r)
Definition: tlb_map.hh:51
uint64_t TteRead(int entry)
Give an entry id, read that tlb entries' tte.
Definition: tlb.cc:342
uint64_t c0_tsb_ps1
Definition: tlb.hh:58
Bitfield< 5 > red
Definition: miscregs.hh:122
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:124
int findMsbSet(uint64_t val)
Returns the bit position of the MSB that is set in the input.
Definition: bitfield.hh:163
int lastReplaced
Definition: tlb.hh:74
Bitfield< 2 > priv
Definition: miscregs.hh:129
uint64_t sfsr
Definition: tlb.hh:63
bool asiIsInterrupt(ASI asi)
Definition: asi.cc:262
void paramIn(CheckpointIn &cp, const string &name, ExtMachInst &machInst)
Definition: types.cc:71
TLB(const Params *p)
Definition: tlb.cc:53
bool cacheValid
Definition: tlb.hh:77
bool asiIsMmu(ASI asi)
Definition: asi.cc:270
bool asiIsAsIfUser(ASI asi)
Definition: asi.cc:118
int size
Definition: tlb.hh:72
Bitfield< 3, 0 > mask
Definition: types.hh:64
bool asiIsLittle(ASI asi)
Definition: asi.cc:153
PageTableEntry pte
Definition: pagetable.hh:264
bool sideffect() const
Definition: pagetable.hh:171
uint64_t c0_tsb_ps0
Definition: tlb.hh:57
Bitfield< 5 > t
Definition: miscregs.hh:1382
uint64_t sfar
Definition: tlb.hh:56
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
Stub function for compilation support with CheckerCPU.
Definition: tlb.cc:852
uint64_t cacheState
Definition: tlb.hh:76
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:91
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
Definition: bitfield.hh:67
This request is to a memory mapped register.
Definition: request.hh:126
void demapAll(int partition_id)
Remove all non-locked entries from the tlb that match partition id.
Definition: tlb.cc:308
unsigned getSize() const
Definition: request.hh:552
Bitfield< 18, 16 > ps
Definition: miscregs.hh:1701
iterator begin()
Definition: tlb_map.hh:129
#define inform(...)
Definition: misc.hh:221
Scoped checkpoint section helper class.
Definition: serialize.hh:240
void setPaddr(Addr paddr)
Set just the physical address.
Definition: request.hh:487
bool asiIsNucleus(ASI asi)
Definition: asi.cc:109
bool asiIsSecondary(ASI asi)
Definition: asi.cc:80
Bitfield< 0 > p
Bitfield< 1 > x
Definition: types.hh:105
virtual void finish(const Fault &fault, RequestPtr req, ThreadContext *tc, Mode mode)=0
void setFlags(Flags flags)
Note that unlike other accessors, this function sets specific flags (ORs them in); it does not assign...
Definition: request.hh:595
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
virtual TheISA::TLB * getITBPtr()=0
ASI
Definition: asi.hh:38
Addr getAddr() const
Definition: packet.hh:639
bool asiIsPartialStore(ASI asi)
Definition: asi.cc:203
void translateTiming(RequestPtr req, ThreadContext *tc, Translation *translation, Mode mode)
Definition: tlb.cc:844

Generated on Fri Jun 9 2017 13:03:36 for gem5 by doxygen 1.8.6