gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
cache.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2016 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2002-2005 The Regents of The University of Michigan
15  * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  *
41  * Authors: Erik Hallnor
42  * Dave Greene
43  * Nathan Binkert
44  * Steve Reinhardt
45  * Ron Dreslinski
46  * Andreas Sandberg
47  */
48 
54 #include "mem/cache/cache.hh"
55 
56 #include "base/misc.hh"
57 #include "base/types.hh"
58 #include "debug/Cache.hh"
59 #include "debug/CachePort.hh"
60 #include "debug/CacheTags.hh"
61 #include "debug/CacheVerbose.hh"
62 #include "mem/cache/blk.hh"
63 #include "mem/cache/mshr.hh"
65 #include "sim/sim_exit.hh"
66 
67 Cache::Cache(const CacheParams *p)
68  : BaseCache(p, p->system->cacheLineSize()),
69  tags(p->tags),
70  prefetcher(p->prefetcher),
71  doFastWrites(true),
72  prefetchOnAccess(p->prefetch_on_access),
73  clusivity(p->clusivity),
74  writebackClean(p->writeback_clean),
75  tempBlockWriteback(nullptr),
76  writebackTempBlockAtomicEvent(this, false,
77  EventBase::Delayed_Writeback_Pri)
78 {
79  tempBlock = new CacheBlk();
80  tempBlock->data = new uint8_t[blkSize];
81 
82  cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
83  "CpuSidePort");
84  memSidePort = new MemSidePort(p->name + ".mem_side", this,
85  "MemSidePort");
86 
87  tags->setCache(this);
88  if (prefetcher)
89  prefetcher->setCache(this);
90 }
91 
93 {
94  delete [] tempBlock->data;
95  delete tempBlock;
96 
97  delete cpuSidePort;
98  delete memSidePort;
99 }
100 
101 void
103 {
105 }
106 
107 void
109 {
110  assert(pkt->isRequest());
111 
112  uint64_t overwrite_val;
113  bool overwrite_mem;
114  uint64_t condition_val64;
115  uint32_t condition_val32;
116 
117  int offset = tags->extractBlkOffset(pkt->getAddr());
118  uint8_t *blk_data = blk->data + offset;
119 
120  assert(sizeof(uint64_t) >= pkt->getSize());
121 
122  overwrite_mem = true;
123  // keep a copy of our possible write value, and copy what is at the
124  // memory address into the packet
125  pkt->writeData((uint8_t *)&overwrite_val);
126  pkt->setData(blk_data);
127 
128  if (pkt->req->isCondSwap()) {
129  if (pkt->getSize() == sizeof(uint64_t)) {
130  condition_val64 = pkt->req->getExtraData();
131  overwrite_mem = !std::memcmp(&condition_val64, blk_data,
132  sizeof(uint64_t));
133  } else if (pkt->getSize() == sizeof(uint32_t)) {
134  condition_val32 = (uint32_t)pkt->req->getExtraData();
135  overwrite_mem = !std::memcmp(&condition_val32, blk_data,
136  sizeof(uint32_t));
137  } else
138  panic("Invalid size for conditional read/write\n");
139  }
140 
141  if (overwrite_mem) {
142  std::memcpy(blk_data, &overwrite_val, pkt->getSize());
143  blk->status |= BlkDirty;
144  }
145 }
146 
147 
148 void
150  bool deferred_response, bool pending_downgrade)
151 {
152  assert(pkt->isRequest());
153 
154  assert(blk && blk->isValid());
155  // Occasionally this is not true... if we are a lower-level cache
156  // satisfying a string of Read and ReadEx requests from
157  // upper-level caches, a Read will mark the block as shared but we
158  // can satisfy a following ReadEx anyway since we can rely on the
159  // Read requester(s) to have buffered the ReadEx snoop and to
160  // invalidate their blocks after receiving them.
161  // assert(!pkt->needsWritable() || blk->isWritable());
162  assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
163 
164  // Check RMW operations first since both isRead() and
165  // isWrite() will be true for them
166  if (pkt->cmd == MemCmd::SwapReq) {
167  cmpAndSwap(blk, pkt);
168  } else if (pkt->isWrite()) {
169  // we have the block in a writable state and can go ahead,
170  // note that the line may be also be considered writable in
171  // downstream caches along the path to memory, but always
172  // Exclusive, and never Modified
173  assert(blk->isWritable());
174  // Write or WriteLine at the first cache with block in writable state
175  if (blk->checkWrite(pkt)) {
176  pkt->writeDataToBlock(blk->data, blkSize);
177  }
178  // Always mark the line as dirty (and thus transition to the
179  // Modified state) even if we are a failed StoreCond so we
180  // supply data to any snoops that have appended themselves to
181  // this cache before knowing the store will fail.
182  blk->status |= BlkDirty;
183  DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
184  } else if (pkt->isRead()) {
185  if (pkt->isLLSC()) {
186  blk->trackLoadLocked(pkt);
187  }
188 
189  // all read responses have a data payload
190  assert(pkt->hasRespData());
191  pkt->setDataFromBlock(blk->data, blkSize);
192 
193  // determine if this read is from a (coherent) cache or not
194  if (pkt->fromCache()) {
195  assert(pkt->getSize() == blkSize);
196  // special handling for coherent block requests from
197  // upper-level caches
198  if (pkt->needsWritable()) {
199  // sanity check
200  assert(pkt->cmd == MemCmd::ReadExReq ||
201  pkt->cmd == MemCmd::SCUpgradeFailReq);
202  assert(!pkt->hasSharers());
203 
204  // if we have a dirty copy, make sure the recipient
205  // keeps it marked dirty (in the modified state)
206  if (blk->isDirty()) {
207  pkt->setCacheResponding();
208  blk->status &= ~BlkDirty;
209  }
210  } else if (blk->isWritable() && !pending_downgrade &&
211  !pkt->hasSharers() &&
212  pkt->cmd != MemCmd::ReadCleanReq) {
213  // we can give the requester a writable copy on a read
214  // request if:
215  // - we have a writable copy at this level (& below)
216  // - we don't have a pending snoop from below
217  // signaling another read request
218  // - no other cache above has a copy (otherwise it
219  // would have set hasSharers flag when
220  // snooping the packet)
221  // - the read has explicitly asked for a clean
222  // copy of the line
223  if (blk->isDirty()) {
224  // special considerations if we're owner:
225  if (!deferred_response) {
226  // respond with the line in Modified state
227  // (cacheResponding set, hasSharers not set)
228  pkt->setCacheResponding();
229 
230  // if this cache is mostly inclusive, we
231  // keep the block in the Exclusive state,
232  // and pass it upwards as Modified
233  // (writable and dirty), hence we have
234  // multiple caches, all on the same path
235  // towards memory, all considering the
236  // same block writable, but only one
237  // considering it Modified
238 
239  // we get away with multiple caches (on
240  // the same path to memory) considering
241  // the block writeable as we always enter
242  // the cache hierarchy through a cache,
243  // and first snoop upwards in all other
244  // branches
245  blk->status &= ~BlkDirty;
246  } else {
247  // if we're responding after our own miss,
248  // there's a window where the recipient didn't
249  // know it was getting ownership and may not
250  // have responded to snoops correctly, so we
251  // have to respond with a shared line
252  pkt->setHasSharers();
253  }
254  }
255  } else {
256  // otherwise only respond with a shared copy
257  pkt->setHasSharers();
258  }
259  }
260  } else if (pkt->isUpgrade()) {
261  // sanity check
262  assert(!pkt->hasSharers());
263 
264  if (blk->isDirty()) {
265  // we were in the Owned state, and a cache above us that
266  // has the line in Shared state needs to be made aware
267  // that the data it already has is in fact dirty
268  pkt->setCacheResponding();
269  blk->status &= ~BlkDirty;
270  }
271  } else {
272  assert(pkt->isInvalidate());
273  invalidateBlock(blk);
274  DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
275  pkt->print());
276  }
277 }
278 
280 //
281 // Access path: requests coming in from the CPU side
282 //
284 
285 bool
287  PacketList &writebacks)
288 {
289  // sanity check
290  assert(pkt->isRequest());
291 
292  chatty_assert(!(isReadOnly && pkt->isWrite()),
293  "Should never see a write in a read-only cache %s\n",
294  name());
295 
296  DPRINTF(CacheVerbose, "%s for %s\n", __func__, pkt->print());
297 
298  if (pkt->req->isUncacheable()) {
299  DPRINTF(Cache, "uncacheable: %s\n", pkt->print());
300 
301  // flush and invalidate any existing block
302  CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
303  if (old_blk && old_blk->isValid()) {
304  if (old_blk->isDirty() || writebackClean)
305  writebacks.push_back(writebackBlk(old_blk));
306  else
307  writebacks.push_back(cleanEvictBlk(old_blk));
308  invalidateBlock(old_blk);
309  }
310 
311  blk = nullptr;
312  // lookupLatency is the latency in case the request is uncacheable.
313  lat = lookupLatency;
314  return false;
315  }
316 
317  // Here lat is the value passed as parameter to accessBlock() function
318  // that can modify its value.
319  blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat);
320 
321  DPRINTF(Cache, "%s %s\n", pkt->print(),
322  blk ? "hit " + blk->print() : "miss");
323 
324 
325  if (pkt->isEviction()) {
326  // We check for presence of block in above caches before issuing
327  // Writeback or CleanEvict to write buffer. Therefore the only
328  // possible cases can be of a CleanEvict packet coming from above
329  // encountering a Writeback generated in this cache peer cache and
330  // waiting in the write buffer. Cases of upper level peer caches
331  // generating CleanEvict and Writeback or simply CleanEvict and
332  // CleanEvict almost simultaneously will be caught by snoops sent out
333  // by crossbar.
334  WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
335  pkt->isSecure());
336  if (wb_entry) {
337  assert(wb_entry->getNumTargets() == 1);
338  PacketPtr wbPkt = wb_entry->getTarget()->pkt;
339  assert(wbPkt->isWriteback());
340 
341  if (pkt->isCleanEviction()) {
342  // The CleanEvict and WritebackClean snoops into other
343  // peer caches of the same level while traversing the
344  // crossbar. If a copy of the block is found, the
345  // packet is deleted in the crossbar. Hence, none of
346  // the other upper level caches connected to this
347  // cache have the block, so we can clear the
348  // BLOCK_CACHED flag in the Writeback if set and
349  // discard the CleanEvict by returning true.
350  wbPkt->clearBlockCached();
351  return true;
352  } else {
353  assert(pkt->cmd == MemCmd::WritebackDirty);
354  // Dirty writeback from above trumps our clean
355  // writeback... discard here
356  // Note: markInService will remove entry from writeback buffer.
357  markInService(wb_entry);
358  delete wbPkt;
359  }
360  }
361  }
362 
363  // Writeback handling is special case. We can write the block into
364  // the cache without having a writeable copy (or any copy at all).
365  if (pkt->isWriteback()) {
366  assert(blkSize == pkt->getSize());
367 
368  // we could get a clean writeback while we are having
369  // outstanding accesses to a block, do the simple thing for
370  // now and drop the clean writeback so that we do not upset
371  // any ordering/decisions about ownership already taken
372  if (pkt->cmd == MemCmd::WritebackClean &&
373  mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
374  DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
375  "dropping\n", pkt->getAddr());
376  return true;
377  }
378 
379  if (blk == nullptr) {
380  // need to do a replacement
381  blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
382  if (blk == nullptr) {
383  // no replaceable block available: give up, fwd to next level.
384  incMissCount(pkt);
385  return false;
386  }
387  tags->insertBlock(pkt, blk);
388 
389  blk->status = (BlkValid | BlkReadable);
390  if (pkt->isSecure()) {
391  blk->status |= BlkSecure;
392  }
393  }
394  // only mark the block dirty if we got a writeback command,
395  // and leave it as is for a clean writeback
396  if (pkt->cmd == MemCmd::WritebackDirty) {
397  blk->status |= BlkDirty;
398  }
399  // if the packet does not have sharers, it is passing
400  // writable, and we got the writeback in Modified or Exclusive
401  // state, if not we are in the Owned or Shared state
402  if (!pkt->hasSharers()) {
403  blk->status |= BlkWritable;
404  }
405  // nothing else to do; writeback doesn't expect response
406  assert(!pkt->needsResponse());
407  std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
408  DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
409  incHitCount(pkt);
410  return true;
411  } else if (pkt->cmd == MemCmd::CleanEvict) {
412  if (blk != nullptr) {
413  // Found the block in the tags, need to stop CleanEvict from
414  // propagating further down the hierarchy. Returning true will
415  // treat the CleanEvict like a satisfied write request and delete
416  // it.
417  return true;
418  }
419  // We didn't find the block here, propagate the CleanEvict further
420  // down the memory hierarchy. Returning false will treat the CleanEvict
421  // like a Writeback which could not find a replaceable block so has to
422  // go to next level.
423  return false;
424  } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
425  blk->isReadable())) {
426  // OK to satisfy access
427  incHitCount(pkt);
428  satisfyRequest(pkt, blk);
429  maintainClusivity(pkt->fromCache(), blk);
430 
431  return true;
432  }
433 
434  // Can't satisfy access normally... either no block (blk == nullptr)
435  // or have block but need writable
436 
437  incMissCount(pkt);
438 
439  if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) {
440  // complete miss on store conditional... just give up now
441  pkt->req->setExtraData(0);
442  return true;
443  }
444 
445  return false;
446 }
447 
448 void
449 Cache::maintainClusivity(bool from_cache, CacheBlk *blk)
450 {
451  if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
452  clusivity == Enums::mostly_excl) {
453  // if we have responded to a cache, and our block is still
454  // valid, but not dirty, and this cache is mostly exclusive
455  // with respect to the cache above, drop the block
456  invalidateBlock(blk);
457  }
458 }
459 
460 void
461 Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
462 {
463  while (!writebacks.empty()) {
464  PacketPtr wbPkt = writebacks.front();
465  // We use forwardLatency here because we are copying writebacks to
466  // write buffer. Call isCachedAbove for both Writebacks and
467  // CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag
468  // in Writebacks and discard CleanEvicts.
469  if (isCachedAbove(wbPkt)) {
470  if (wbPkt->cmd == MemCmd::CleanEvict) {
471  // Delete CleanEvict because cached copies exist above. The
472  // packet destructor will delete the request object because
473  // this is a non-snoop request packet which does not require a
474  // response.
475  delete wbPkt;
476  } else if (wbPkt->cmd == MemCmd::WritebackClean) {
477  // clean writeback, do not send since the block is
478  // still cached above
479  assert(writebackClean);
480  delete wbPkt;
481  } else {
482  assert(wbPkt->cmd == MemCmd::WritebackDirty);
483  // Set BLOCK_CACHED flag in Writeback and send below, so that
484  // the Writeback does not reset the bit corresponding to this
485  // address in the snoop filter below.
486  wbPkt->setBlockCached();
487  allocateWriteBuffer(wbPkt, forward_time);
488  }
489  } else {
490  // If the block is not cached above, send packet below. Both
491  // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
492  // reset the bit corresponding to this address in the snoop filter
493  // below.
494  allocateWriteBuffer(wbPkt, forward_time);
495  }
496  writebacks.pop_front();
497  }
498 }
499 
500 void
502 {
503  while (!writebacks.empty()) {
504  PacketPtr wbPkt = writebacks.front();
505  // Call isCachedAbove for both Writebacks and CleanEvicts. If
506  // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
507  // and discard CleanEvicts.
508  if (isCachedAbove(wbPkt, false)) {
509  if (wbPkt->cmd == MemCmd::WritebackDirty) {
510  // Set BLOCK_CACHED flag in Writeback and send below,
511  // so that the Writeback does not reset the bit
512  // corresponding to this address in the snoop filter
513  // below. We can discard CleanEvicts because cached
514  // copies exist above. Atomic mode isCachedAbove
515  // modifies packet to set BLOCK_CACHED flag
516  memSidePort->sendAtomic(wbPkt);
517  }
518  } else {
519  // If the block is not cached above, send packet below. Both
520  // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
521  // reset the bit corresponding to this address in the snoop filter
522  // below.
523  memSidePort->sendAtomic(wbPkt);
524  }
525  writebacks.pop_front();
526  // In case of CleanEvicts, the packet destructor will delete the
527  // request object because this is a non-snoop request packet which
528  // does not require a response.
529  delete wbPkt;
530  }
531 }
532 
533 
534 void
536 {
537  DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
538 
539  assert(pkt->isResponse());
540  assert(!system->bypassCaches());
541 
542  // determine if the response is from a snoop request we created
543  // (in which case it should be in the outstandingSnoop), or if we
544  // merely forwarded someone else's snoop request
545  const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
546  outstandingSnoop.end();
547 
548  if (!forwardAsSnoop) {
549  // the packet came from this cache, so sink it here and do not
550  // forward it
551  assert(pkt->cmd == MemCmd::HardPFResp);
552 
553  outstandingSnoop.erase(pkt->req);
554 
555  DPRINTF(Cache, "Got prefetch response from above for addr "
556  "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
557  recvTimingResp(pkt);
558  return;
559  }
560 
561  // forwardLatency is set here because there is a response from an
562  // upper level cache.
563  // To pay the delay that occurs if the packet comes from the bus,
564  // we charge also headerDelay.
565  Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
566  // Reset the timing of the packet.
567  pkt->headerDelay = pkt->payloadDelay = 0;
568  memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
569 }
570 
571 void
573 {
574  // Cache line clearing instructions
575  if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
576  (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
577  pkt->cmd = MemCmd::WriteLineReq;
578  DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
579  }
580 }
581 
582 bool
584 {
585  DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
586 
587  assert(pkt->isRequest());
588 
589  // Just forward the packet if caches are disabled.
590  if (system->bypassCaches()) {
591  // @todo This should really enqueue the packet rather
592  bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
593  assert(success);
594  return true;
595  }
596 
598 
599  if (pkt->cacheResponding()) {
600  // a cache above us (but not where the packet came from) is
601  // responding to the request, in other words it has the line
602  // in Modified or Owned state
603  DPRINTF(Cache, "Cache above responding to %s: not responding\n",
604  pkt->print());
605 
606  // if the packet needs the block to be writable, and the cache
607  // that has promised to respond (setting the cache responding
608  // flag) is not providing writable (it is in Owned rather than
609  // the Modified state), we know that there may be other Shared
610  // copies in the system; go out and invalidate them all
611  assert(pkt->needsWritable() && !pkt->responderHadWritable());
612 
613  // an upstream cache that had the line in Owned state
614  // (dirty, but not writable), is responding and thus
615  // transferring the dirty line from one branch of the
616  // cache hierarchy to another
617 
618  // send out an express snoop and invalidate all other
619  // copies (snooping a packet that needs writable is the
620  // same as an invalidation), thus turning the Owned line
621  // into a Modified line, note that we don't invalidate the
622  // block in the current cache or any other cache on the
623  // path to memory
624 
625  // create a downstream express snoop with cleared packet
626  // flags, there is no need to allocate any data as the
627  // packet is merely used to co-ordinate state transitions
628  Packet *snoop_pkt = new Packet(pkt, true, false);
629 
630  // also reset the bus time that the original packet has
631  // not yet paid for
632  snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
633 
634  // make this an instantaneous express snoop, and let the
635  // other caches in the system know that the another cache
636  // is responding, because we have found the authorative
637  // copy (Modified or Owned) that will supply the right
638  // data
639  snoop_pkt->setExpressSnoop();
640  snoop_pkt->setCacheResponding();
641 
642  // this express snoop travels towards the memory, and at
643  // every crossbar it is snooped upwards thus reaching
644  // every cache in the system
645  bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
646  // express snoops always succeed
647  assert(success);
648 
649  // main memory will delete the snoop packet
650 
651  // queue for deletion, as opposed to immediate deletion, as
652  // the sending cache is still relying on the packet
653  pendingDelete.reset(pkt);
654 
655  // no need to take any further action in this particular cache
656  // as an upstram cache has already committed to responding,
657  // and we have already sent out any express snoops in the
658  // section above to ensure all other copies in the system are
659  // invalidated
660  return true;
661  }
662 
663  // anything that is merely forwarded pays for the forward latency and
664  // the delay provided by the crossbar
665  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
666 
667  // We use lookupLatency here because it is used to specify the latency
668  // to access.
669  Cycles lat = lookupLatency;
670  CacheBlk *blk = nullptr;
671  bool satisfied = false;
672  {
674  // Note that lat is passed by reference here. The function
675  // access() calls accessBlock() which can modify lat value.
676  satisfied = access(pkt, blk, lat, writebacks);
677 
678  // copy writebacks to write buffer here to ensure they logically
679  // proceed anything happening below
680  doWritebacks(writebacks, forward_time);
681  }
682 
683  // Here we charge the headerDelay that takes into account the latencies
684  // of the bus, if the packet comes from it.
685  // The latency charged it is just lat that is the value of lookupLatency
686  // modified by access() function, or if not just lookupLatency.
687  // In case of a hit we are neglecting response latency.
688  // In case of a miss we are neglecting forward latency.
689  Tick request_time = clockEdge(lat) + pkt->headerDelay;
690  // Here we reset the timing of the packet.
691  pkt->headerDelay = pkt->payloadDelay = 0;
692 
693  // track time of availability of next prefetch, if any
694  Tick next_pf_time = MaxTick;
695 
696  bool needsResponse = pkt->needsResponse();
697 
698  if (satisfied) {
699  // should never be satisfying an uncacheable access as we
700  // flush and invalidate any existing block as part of the
701  // lookup
702  assert(!pkt->req->isUncacheable());
703 
704  // hit (for all other request types)
705 
706  if (prefetcher && (prefetchOnAccess ||
707  (blk && blk->wasPrefetched()))) {
708  if (blk)
709  blk->status &= ~BlkHWPrefetched;
710 
711  // Don't notify on SWPrefetch
712  if (!pkt->cmd.isSWPrefetch())
713  next_pf_time = prefetcher->notify(pkt);
714  }
715 
716  if (needsResponse) {
717  pkt->makeTimingResponse();
718  // @todo: Make someone pay for this
719  pkt->headerDelay = pkt->payloadDelay = 0;
720 
721  // In this case we are considering request_time that takes
722  // into account the delay of the xbar, if any, and just
723  // lat, neglecting responseLatency, modelling hit latency
724  // just as lookupLatency or or the value of lat overriden
725  // by access(), that calls accessBlock() function.
726  cpuSidePort->schedTimingResp(pkt, request_time, true);
727  } else {
728  DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
729  pkt->print());
730 
731  // queue the packet for deletion, as the sending cache is
732  // still relying on it; if the block is found in access(),
733  // CleanEvict and Writeback messages will be deleted
734  // here as well
735  pendingDelete.reset(pkt);
736  }
737  } else {
738  // miss
739 
740  Addr blk_addr = pkt->getBlockAddr(blkSize);
741 
742  // ignore any existing MSHR if we are dealing with an
743  // uncacheable request
744  MSHR *mshr = pkt->req->isUncacheable() ? nullptr :
745  mshrQueue.findMatch(blk_addr, pkt->isSecure());
746 
747  // Software prefetch handling:
748  // To keep the core from waiting on data it won't look at
749  // anyway, send back a response with dummy data. Miss handling
750  // will continue asynchronously. Unfortunately, the core will
751  // insist upon freeing original Packet/Request, so we have to
752  // create a new pair with a different lifecycle. Note that this
753  // processing happens before any MSHR munging on the behalf of
754  // this request because this new Request will be the one stored
755  // into the MSHRs, not the original.
756  if (pkt->cmd.isSWPrefetch()) {
757  assert(needsResponse);
758  assert(pkt->req->hasPaddr());
759  assert(!pkt->req->isUncacheable());
760 
761  // There's no reason to add a prefetch as an additional target
762  // to an existing MSHR. If an outstanding request is already
763  // in progress, there is nothing for the prefetch to do.
764  // If this is the case, we don't even create a request at all.
765  PacketPtr pf = nullptr;
766 
767  if (!mshr) {
768  // copy the request and create a new SoftPFReq packet
769  RequestPtr req = new Request(pkt->req->getPaddr(),
770  pkt->req->getSize(),
771  pkt->req->getFlags(),
772  pkt->req->masterId());
773  pf = new Packet(req, pkt->cmd);
774  pf->allocate();
775  assert(pf->getAddr() == pkt->getAddr());
776  assert(pf->getSize() == pkt->getSize());
777  }
778 
779  pkt->makeTimingResponse();
780 
781  // request_time is used here, taking into account lat and the delay
782  // charged if the packet comes from the xbar.
783  cpuSidePort->schedTimingResp(pkt, request_time, true);
784 
785  // If an outstanding request is in progress (we found an
786  // MSHR) this is set to null
787  pkt = pf;
788  }
789 
790  if (mshr) {
794 
795  //@todo remove hw_pf here
796 
797  // Coalesce unless it was a software prefetch (see above).
798  if (pkt) {
799  assert(!pkt->isWriteback());
800  // CleanEvicts corresponding to blocks which have
801  // outstanding requests in MSHRs are simply sunk here
802  if (pkt->cmd == MemCmd::CleanEvict) {
803  pendingDelete.reset(pkt);
804  } else {
805  DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
806  pkt->print());
807 
808  assert(pkt->req->masterId() < system->maxMasters());
809  mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
810  // We use forward_time here because it is the same
811  // considering new targets. We have multiple
812  // requests for the same address here. It
813  // specifies the latency to allocate an internal
814  // buffer and to schedule an event to the queued
815  // port and also takes into account the additional
816  // delay of the xbar.
817  mshr->allocateTarget(pkt, forward_time, order++,
818  allocOnFill(pkt->cmd));
819  if (mshr->getNumTargets() == numTarget) {
820  noTargetMSHR = mshr;
822  // need to be careful with this... if this mshr isn't
823  // ready yet (i.e. time > curTick()), we don't want to
824  // move it ahead of mshrs that are ready
825  // mshrQueue.moveToFront(mshr);
826  }
827  }
828  // We should call the prefetcher reguardless if the request is
829  // satisfied or not, reguardless if the request is in the MSHR
830  // or not. The request could be a ReadReq hit, but still not
831  // satisfied (potentially because of a prior write to the same
832  // cache line. So, even when not satisfied, tehre is an MSHR
833  // already allocated for this, we need to let the prefetcher
834  // know about the request
835  if (prefetcher) {
836  // Don't notify on SWPrefetch
837  if (!pkt->cmd.isSWPrefetch())
838  next_pf_time = prefetcher->notify(pkt);
839  }
840  }
841  } else {
842  // no MSHR
843  assert(pkt->req->masterId() < system->maxMasters());
844  if (pkt->req->isUncacheable()) {
845  mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
846  } else {
847  mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
848  }
849 
850  if (pkt->isEviction() ||
851  (pkt->req->isUncacheable() && pkt->isWrite())) {
852  // We use forward_time here because there is an
853  // uncached memory write, forwarded to WriteBuffer.
854  allocateWriteBuffer(pkt, forward_time);
855  } else {
856  if (blk && blk->isValid()) {
857  // should have flushed and have no valid block
858  assert(!pkt->req->isUncacheable());
859 
860  // If we have a write miss to a valid block, we
861  // need to mark the block non-readable. Otherwise
862  // if we allow reads while there's an outstanding
863  // write miss, the read could return stale data
864  // out of the cache block... a more aggressive
865  // system could detect the overlap (if any) and
866  // forward data out of the MSHRs, but we don't do
867  // that yet. Note that we do need to leave the
868  // block valid so that it stays in the cache, in
869  // case we get an upgrade response (and hence no
870  // new data) when the write miss completes.
871  // As long as CPUs do proper store/load forwarding
872  // internally, and have a sufficiently weak memory
873  // model, this is probably unnecessary, but at some
874  // point it must have seemed like we needed it...
875  assert(pkt->needsWritable());
876  assert(!blk->isWritable());
877  blk->status &= ~BlkReadable;
878  }
879  // Here we are using forward_time, modelling the latency of
880  // a miss (outbound) just as forwardLatency, neglecting the
881  // lookupLatency component.
882  allocateMissBuffer(pkt, forward_time);
883  }
884 
885  if (prefetcher) {
886  // Don't notify on SWPrefetch
887  if (!pkt->cmd.isSWPrefetch())
888  next_pf_time = prefetcher->notify(pkt);
889  }
890  }
891  }
892 
893  if (next_pf_time != MaxTick)
894  schedMemSideSendEvent(next_pf_time);
895 
896  return true;
897 }
898 
899 PacketPtr
901  bool needsWritable) const
902 {
903  // should never see evictions here
904  assert(!cpu_pkt->isEviction());
905 
906  bool blkValid = blk && blk->isValid();
907 
908  if (cpu_pkt->req->isUncacheable() ||
909  (!blkValid && cpu_pkt->isUpgrade()) ||
910  cpu_pkt->cmd == MemCmd::InvalidateReq) {
911  // uncacheable requests and upgrades from upper-level caches
912  // that missed completely just go through as is
913  return nullptr;
914  }
915 
916  assert(cpu_pkt->needsResponse());
917 
918  MemCmd cmd;
919  // @TODO make useUpgrades a parameter.
920  // Note that ownership protocols require upgrade, otherwise a
921  // write miss on a shared owned block will generate a ReadExcl,
922  // which will clobber the owned copy.
923  const bool useUpgrades = true;
924  if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
925  assert(!blkValid || !blk->isWritable());
926  // forward as invalidate to all other caches, this gives us
927  // the line in Exclusive state, and invalidates all other
928  // copies
929  cmd = MemCmd::InvalidateReq;
930  } else if (blkValid && useUpgrades) {
931  // only reason to be here is that blk is read only and we need
932  // it to be writable
933  assert(needsWritable);
934  assert(!blk->isWritable());
935  cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
936  } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
937  cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
938  // Even though this SC will fail, we still need to send out the
939  // request and get the data to supply it to other snoopers in the case
940  // where the determination the StoreCond fails is delayed due to
941  // all caches not being on the same local bus.
943  } else {
944  // block is invalid
945  cmd = needsWritable ? MemCmd::ReadExReq :
947  }
948  PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
949 
950  // if there are upstream caches that have already marked the
951  // packet as having sharers (not passing writable), pass that info
952  // downstream
953  if (cpu_pkt->hasSharers() && !needsWritable) {
954  // note that cpu_pkt may have spent a considerable time in the
955  // MSHR queue and that the information could possibly be out
956  // of date, however, there is no harm in conservatively
957  // assuming the block has sharers
958  pkt->setHasSharers();
959  DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
960  __func__, cpu_pkt->print(), pkt->print());
961  }
962 
963  // the packet should be block aligned
964  assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
965 
966  pkt->allocate();
967  DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
968  cpu_pkt->print());
969  return pkt;
970 }
971 
972 
973 Tick
975 {
976  // We are in atomic mode so we pay just for lookupLatency here.
977  Cycles lat = lookupLatency;
978 
979  // Forward the request if the system is in cache bypass mode.
980  if (system->bypassCaches())
981  return ticksToCycles(memSidePort->sendAtomic(pkt));
982 
984 
985  // follow the same flow as in recvTimingReq, and check if a cache
986  // above us is responding
987  if (pkt->cacheResponding()) {
988  DPRINTF(Cache, "Cache above responding to %s: not responding\n",
989  pkt->print());
990 
991  // if a cache is responding, and it had the line in Owned
992  // rather than Modified state, we need to invalidate any
993  // copies that are not on the same path to memory
994  assert(pkt->needsWritable() && !pkt->responderHadWritable());
995  lat += ticksToCycles(memSidePort->sendAtomic(pkt));
996 
997  return lat * clockPeriod();
998  }
999 
1000  // should assert here that there are no outstanding MSHRs or
1001  // writebacks... that would mean that someone used an atomic
1002  // access in timing mode
1003 
1004  CacheBlk *blk = nullptr;
1006  bool satisfied = access(pkt, blk, lat, writebacks);
1007 
1008  // handle writebacks resulting from the access here to ensure they
1009  // logically proceed anything happening below
1010  doWritebacksAtomic(writebacks);
1011 
1012  if (!satisfied) {
1013  // MISS
1014 
1015  // deal with the packets that go through the write path of
1016  // the cache, i.e. any evictions and uncacheable writes
1017  if (pkt->isEviction() ||
1018  (pkt->req->isUncacheable() && pkt->isWrite())) {
1019  lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1020  return lat * clockPeriod();
1021  }
1022  // only misses left
1023 
1024  PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
1025 
1026  bool is_forward = (bus_pkt == nullptr);
1027 
1028  if (is_forward) {
1029  // just forwarding the same request to the next level
1030  // no local cache operation involved
1031  bus_pkt = pkt;
1032  }
1033 
1034  DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
1035  bus_pkt->print());
1036 
1037 #if TRACING_ON
1038  CacheBlk::State old_state = blk ? blk->status : 0;
1039 #endif
1040 
1041  lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1042 
1043  bool is_invalidate = bus_pkt->isInvalidate();
1044 
1045  // We are now dealing with the response handling
1046  DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
1047  bus_pkt->print(), old_state);
1048 
1049  // If packet was a forward, the response (if any) is already
1050  // in place in the bus_pkt == pkt structure, so we don't need
1051  // to do anything. Otherwise, use the separate bus_pkt to
1052  // generate response to pkt and then delete it.
1053  if (!is_forward) {
1054  if (pkt->needsResponse()) {
1055  assert(bus_pkt->isResponse());
1056  if (bus_pkt->isError()) {
1057  pkt->makeAtomicResponse();
1058  pkt->copyError(bus_pkt);
1059  } else if (pkt->cmd == MemCmd::WriteLineReq) {
1060  // note the use of pkt, not bus_pkt here.
1061 
1062  // write-line request to the cache that promoted
1063  // the write to a whole line
1064  blk = handleFill(pkt, blk, writebacks,
1065  allocOnFill(pkt->cmd));
1066  assert(blk != NULL);
1067  is_invalidate = false;
1068  satisfyRequest(pkt, blk);
1069  } else if (bus_pkt->isRead() ||
1070  bus_pkt->cmd == MemCmd::UpgradeResp) {
1071  // we're updating cache state to allow us to
1072  // satisfy the upstream request from the cache
1073  blk = handleFill(bus_pkt, blk, writebacks,
1074  allocOnFill(pkt->cmd));
1075  satisfyRequest(pkt, blk);
1076  maintainClusivity(pkt->fromCache(), blk);
1077  } else {
1078  // we're satisfying the upstream request without
1079  // modifying cache state, e.g., a write-through
1080  pkt->makeAtomicResponse();
1081  }
1082  }
1083  delete bus_pkt;
1084  }
1085 
1086  if (is_invalidate && blk && blk->isValid()) {
1087  invalidateBlock(blk);
1088  }
1089  }
1090 
1091  // Note that we don't invoke the prefetcher at all in atomic mode.
1092  // It's not clear how to do it properly, particularly for
1093  // prefetchers that aggressively generate prefetch candidates and
1094  // rely on bandwidth contention to throttle them; these will tend
1095  // to pollute the cache in atomic mode since there is no bandwidth
1096  // contention. If we ever do want to enable prefetching in atomic
1097  // mode, though, this is the place to do it... see timingAccess()
1098  // for an example (though we'd want to issue the prefetch(es)
1099  // immediately rather than calling requestMemSideBus() as we do
1100  // there).
1101 
1102  // do any writebacks resulting from the response handling
1103  doWritebacksAtomic(writebacks);
1104 
1105  // if we used temp block, check to see if its valid and if so
1106  // clear it out, but only do so after the call to recvAtomic is
1107  // finished so that any downstream observers (such as a snoop
1108  // filter), first see the fill, and only then see the eviction
1109  if (blk == tempBlock && tempBlock->isValid()) {
1110  // the atomic CPU calls recvAtomic for fetch and load/store
1111  // sequentuially, and we may already have a tempBlock
1112  // writeback from the fetch that we have not yet sent
1113  if (tempBlockWriteback) {
1114  // if that is the case, write the prevoius one back, and
1115  // do not schedule any new event
1117  } else {
1118  // the writeback/clean eviction happens after the call to
1119  // recvAtomic has finished (but before any successive
1120  // calls), so that the response handling from the fill is
1121  // allowed to happen first
1123  }
1124 
1125  tempBlockWriteback = (blk->isDirty() || writebackClean) ?
1126  writebackBlk(blk) : cleanEvictBlk(blk);
1127  invalidateBlock(blk);
1128  }
1129 
1130  if (pkt->needsResponse()) {
1131  pkt->makeAtomicResponse();
1132  }
1133 
1134  return lat * clockPeriod();
1135 }
1136 
1137 
1138 void
1139 Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
1140 {
1141  if (system->bypassCaches()) {
1142  // Packets from the memory side are snoop request and
1143  // shouldn't happen in bypass mode.
1144  assert(fromCpuSide);
1145 
1146  // The cache should be flushed if we are in cache bypass mode,
1147  // so we don't need to check if we need to update anything.
1149  return;
1150  }
1151 
1152  Addr blk_addr = pkt->getBlockAddr(blkSize);
1153  bool is_secure = pkt->isSecure();
1154  CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1155  MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1156 
1157  pkt->pushLabel(name());
1158 
1159  CacheBlkPrintWrapper cbpw(blk);
1160 
1161  // Note that just because an L2/L3 has valid data doesn't mean an
1162  // L1 doesn't have a more up-to-date modified copy that still
1163  // needs to be found. As a result we always update the request if
1164  // we have it, but only declare it satisfied if we are the owner.
1165 
1166  // see if we have data at all (owned or otherwise)
1167  bool have_data = blk && blk->isValid()
1168  && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1169  blk->data);
1170 
1171  // data we have is dirty if marked as such or if we have an
1172  // in-service MSHR that is pending a modified line
1173  bool have_dirty =
1174  have_data && (blk->isDirty() ||
1175  (mshr && mshr->inService && mshr->isPendingModified()));
1176 
1177  bool done = have_dirty
1178  || cpuSidePort->checkFunctional(pkt)
1179  || mshrQueue.checkFunctional(pkt, blk_addr)
1180  || writeBuffer.checkFunctional(pkt, blk_addr)
1181  || memSidePort->checkFunctional(pkt);
1182 
1183  DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
1184  (blk && blk->isValid()) ? "valid " : "",
1185  have_data ? "data " : "", done ? "done " : "");
1186 
1187  // We're leaving the cache, so pop cache->name() label
1188  pkt->popLabel();
1189 
1190  if (done) {
1191  pkt->makeResponse();
1192  } else {
1193  // if it came as a request from the CPU side then make sure it
1194  // continues towards the memory side
1195  if (fromCpuSide) {
1197  } else if (cpuSidePort->isSnooping()) {
1198  // if it came from the memory side, it must be a snoop request
1199  // and we should only forward it if we are forwarding snoops
1201  }
1202  }
1203 }
1204 
1205 
1207 //
1208 // Response handling: responses from the memory side
1209 //
1211 
1212 
1213 void
1215 {
1216  Tick completion_time = clockEdge(responseLatency) +
1217  pkt->headerDelay + pkt->payloadDelay;
1218 
1219  // Reset the bus additional time as it is now accounted for
1220  pkt->headerDelay = pkt->payloadDelay = 0;
1221 
1222  cpuSidePort->schedTimingResp(pkt, completion_time, true);
1223 }
1224 
1225 void
1227 {
1228  assert(pkt->isResponse());
1229 
1230  // all header delay should be paid for by the crossbar, unless
1231  // this is a prefetch response from above
1232  panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1233  "%s saw a non-zero packet delay\n", name());
1234 
1235  bool is_error = pkt->isError();
1236 
1237  if (is_error) {
1238  DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
1239  pkt->print());
1240  }
1241 
1242  DPRINTF(Cache, "%s: Handling response %s\n", __func__,
1243  pkt->print());
1244 
1245  // if this is a write, we should be looking at an uncacheable
1246  // write
1247  if (pkt->isWrite()) {
1248  assert(pkt->req->isUncacheable());
1250  return;
1251  }
1252 
1253  // we have dealt with any (uncacheable) writes above, from here on
1254  // we know we are dealing with an MSHR due to a miss or a prefetch
1255  MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
1256  assert(mshr);
1257 
1258  if (mshr == noTargetMSHR) {
1259  // we always clear at least one target
1261  noTargetMSHR = nullptr;
1262  }
1263 
1264  // Initial target is used just for stats
1265  MSHR::Target *initial_tgt = mshr->getTarget();
1266  int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1267  Tick miss_latency = curTick() - initial_tgt->recvTime;
1268 
1269  if (pkt->req->isUncacheable()) {
1270  assert(pkt->req->masterId() < system->maxMasters());
1271  mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1272  miss_latency;
1273  } else {
1274  assert(pkt->req->masterId() < system->maxMasters());
1275  mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1276  miss_latency;
1277  }
1278 
1279  bool wasFull = mshrQueue.isFull();
1280 
1282 
1283  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1284 
1285  // upgrade deferred targets if the response has no sharers, and is
1286  // thus passing writable
1287  if (!pkt->hasSharers()) {
1288  mshr->promoteWritable();
1289  }
1290 
1291  bool is_fill = !mshr->isForward &&
1292  (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1293 
1294  CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1295 
1296  if (is_fill && !is_error) {
1297  DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
1298  pkt->getAddr());
1299 
1300  blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill());
1301  assert(blk != nullptr);
1302  }
1303 
1304  // allow invalidation responses originating from write-line
1305  // requests to be discarded
1306  bool is_invalidate = pkt->isInvalidate();
1307 
1308  // First offset for critical word first calculations
1309  int initial_offset = initial_tgt->pkt->getOffset(blkSize);
1310 
1311  bool from_cache = false;
1312  MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
1313  for (auto &target: targets) {
1314  Packet *tgt_pkt = target.pkt;
1315  switch (target.source) {
1316  case MSHR::Target::FromCPU:
1317  Tick completion_time;
1318  // Here we charge on completion_time the delay of the xbar if the
1319  // packet comes from it, charged on headerDelay.
1320  completion_time = pkt->headerDelay;
1321 
1322  // Software prefetch handling for cache closest to core
1323  if (tgt_pkt->cmd.isSWPrefetch()) {
1324  // a software prefetch would have already been ack'd
1325  // immediately with dummy data so the core would be able to
1326  // retire it. This request completes right here, so we
1327  // deallocate it.
1328  delete tgt_pkt->req;
1329  delete tgt_pkt;
1330  break; // skip response
1331  }
1332 
1333  // keep track of whether we have responded to another
1334  // cache
1335  from_cache = from_cache || tgt_pkt->fromCache();
1336 
1337  // unlike the other packet flows, where data is found in other
1338  // caches or memory and brought back, write-line requests always
1339  // have the data right away, so the above check for "is fill?"
1340  // cannot actually be determined until examining the stored MSHR
1341  // state. We "catch up" with that logic here, which is duplicated
1342  // from above.
1343  if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
1344  assert(!is_error);
1345  // we got the block in a writable state, so promote
1346  // any deferred targets if possible
1347  mshr->promoteWritable();
1348  // NB: we use the original packet here and not the response!
1349  blk = handleFill(tgt_pkt, blk, writebacks,
1350  targets.allocOnFill);
1351  assert(blk != nullptr);
1352 
1353  // treat as a fill, and discard the invalidation
1354  // response
1355  is_fill = true;
1356  is_invalidate = false;
1357  }
1358 
1359  if (is_fill) {
1360  satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
1361 
1362  // How many bytes past the first request is this one
1363  int transfer_offset =
1364  tgt_pkt->getOffset(blkSize) - initial_offset;
1365  if (transfer_offset < 0) {
1366  transfer_offset += blkSize;
1367  }
1368 
1369  // If not critical word (offset) return payloadDelay.
1370  // responseLatency is the latency of the return path
1371  // from lower level caches/memory to an upper level cache or
1372  // the core.
1373  completion_time += clockEdge(responseLatency) +
1374  (transfer_offset ? pkt->payloadDelay : 0);
1375 
1376  assert(!tgt_pkt->req->isUncacheable());
1377 
1378  assert(tgt_pkt->req->masterId() < system->maxMasters());
1379  missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
1380  completion_time - target.recvTime;
1381  } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1382  // failed StoreCond upgrade
1383  assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
1384  tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
1385  tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
1386  // responseLatency is the latency of the return path
1387  // from lower level caches/memory to an upper level cache or
1388  // the core.
1389  completion_time += clockEdge(responseLatency) +
1390  pkt->payloadDelay;
1391  tgt_pkt->req->setExtraData(0);
1392  } else {
1393  // We are about to send a response to a cache above
1394  // that asked for an invalidation; we need to
1395  // invalidate our copy immediately as the most
1396  // up-to-date copy of the block will now be in the
1397  // cache above. It will also prevent this cache from
1398  // responding (if the block was previously dirty) to
1399  // snoops as they should snoop the caches above where
1400  // they will get the response from.
1401  if (is_invalidate && blk && blk->isValid()) {
1402  invalidateBlock(blk);
1403  }
1404  // not a cache fill, just forwarding response
1405  // responseLatency is the latency of the return path
1406  // from lower level cahces/memory to the core.
1407  completion_time += clockEdge(responseLatency) +
1408  pkt->payloadDelay;
1409  if (pkt->isRead() && !is_error) {
1410  // sanity check
1411  assert(pkt->getAddr() == tgt_pkt->getAddr());
1412  assert(pkt->getSize() >= tgt_pkt->getSize());
1413 
1414  tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
1415  }
1416  }
1417  tgt_pkt->makeTimingResponse();
1418  // if this packet is an error copy that to the new packet
1419  if (is_error)
1420  tgt_pkt->copyError(pkt);
1421  if (tgt_pkt->cmd == MemCmd::ReadResp &&
1422  (is_invalidate || mshr->hasPostInvalidate())) {
1423  // If intermediate cache got ReadRespWithInvalidate,
1424  // propagate that. Response should not have
1425  // isInvalidate() set otherwise.
1427  DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
1428  tgt_pkt->print());
1429  }
1430  // Reset the bus additional time as it is now accounted for
1431  tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1432  cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1433  break;
1434 
1436  assert(tgt_pkt->cmd == MemCmd::HardPFReq);
1437  if (blk)
1438  blk->status |= BlkHWPrefetched;
1439  delete tgt_pkt->req;
1440  delete tgt_pkt;
1441  break;
1442 
1444  // I don't believe that a snoop can be in an error state
1445  assert(!is_error);
1446  // response to snoop request
1447  DPRINTF(Cache, "processing deferred snoop...\n");
1448  // If the response is invalidating, a snooping target can
1449  // be satisfied if it is also invalidating. If the reponse is, not
1450  // only invalidating, but more specifically an InvalidateResp, the
1451  // MSHR was created due to an InvalidateReq and a cache above is
1452  // waiting to satisfy a WriteLineReq. In this case even an
1453  // non-invalidating snoop is added as a target here since this is
1454  // the ordering point. When the InvalidateResp reaches this cache,
1455  // the snooping target will snoop further the cache above with the
1456  // WriteLineReq.
1457  assert(!(is_invalidate &&
1458  pkt->cmd != MemCmd::InvalidateResp &&
1459  !mshr->hasPostInvalidate()));
1460  handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
1461  break;
1462 
1463  default:
1464  panic("Illegal target->source enum %d\n", target.source);
1465  }
1466  }
1467 
1468  maintainClusivity(from_cache, blk);
1469 
1470  if (blk && blk->isValid()) {
1471  // an invalidate response stemming from a write line request
1472  // should not invalidate the block, so check if the
1473  // invalidation should be discarded
1474  if (is_invalidate || mshr->hasPostInvalidate()) {
1475  invalidateBlock(blk);
1476  } else if (mshr->hasPostDowngrade()) {
1477  blk->status &= ~BlkWritable;
1478  }
1479  }
1480 
1481  if (mshr->promoteDeferredTargets()) {
1482  // avoid later read getting stale data while write miss is
1483  // outstanding.. see comment in timingAccess()
1484  if (blk) {
1485  blk->status &= ~BlkReadable;
1486  }
1487  mshrQueue.markPending(mshr);
1489  } else {
1490  mshrQueue.deallocate(mshr);
1491  if (wasFull && !mshrQueue.isFull()) {
1493  }
1494 
1495  // Request the bus for a prefetch if this deallocation freed enough
1496  // MSHRs for a prefetch to take place
1497  if (prefetcher && mshrQueue.canPrefetch()) {
1498  Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1499  clockEdge());
1500  if (next_pf_time != MaxTick)
1501  schedMemSideSendEvent(next_pf_time);
1502  }
1503  }
1504  // reset the xbar additional timinig as it is now accounted for
1505  pkt->headerDelay = pkt->payloadDelay = 0;
1506 
1507  // copy writebacks to write buffer
1508  doWritebacks(writebacks, forward_time);
1509 
1510  // if we used temp block, check to see if its valid and then clear it out
1511  if (blk == tempBlock && tempBlock->isValid()) {
1512  // We use forwardLatency here because we are copying
1513  // Writebacks/CleanEvicts to write buffer. It specifies the latency to
1514  // allocate an internal buffer and to schedule an event to the
1515  // queued port.
1516  if (blk->isDirty() || writebackClean) {
1517  PacketPtr wbPkt = writebackBlk(blk);
1518  allocateWriteBuffer(wbPkt, forward_time);
1519  // Set BLOCK_CACHED flag if cached above.
1520  if (isCachedAbove(wbPkt))
1521  wbPkt->setBlockCached();
1522  } else {
1523  PacketPtr wcPkt = cleanEvictBlk(blk);
1524  // Check to see if block is cached above. If not allocate
1525  // write buffer
1526  if (isCachedAbove(wcPkt))
1527  delete wcPkt;
1528  else
1529  allocateWriteBuffer(wcPkt, forward_time);
1530  }
1531  invalidateBlock(blk);
1532  }
1533 
1534  DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
1535  delete pkt;
1536 }
1537 
1538 PacketPtr
1540 {
1542  "Writeback from read-only cache");
1543  assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1544 
1546 
1547  Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
1549  if (blk->isSecure())
1550  req->setFlags(Request::SECURE);
1551 
1552  req->taskId(blk->task_id);
1554  blk->tickInserted = curTick();
1555 
1556  PacketPtr pkt =
1557  new Packet(req, blk->isDirty() ?
1559 
1560  DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1561  pkt->print(), blk->isWritable(), blk->isDirty());
1562 
1563  if (blk->isWritable()) {
1564  // not asserting shared means we pass the block in modified
1565  // state, mark our own block non-writeable
1566  blk->status &= ~BlkWritable;
1567  } else {
1568  // we are in the Owned state, tell the receiver
1569  pkt->setHasSharers();
1570  }
1571 
1572  // make sure the block is not marked dirty
1573  blk->status &= ~BlkDirty;
1574 
1575  pkt->allocate();
1576  std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
1577 
1578  return pkt;
1579 }
1580 
1581 PacketPtr
1583 {
1584  assert(!writebackClean);
1585  assert(blk && blk->isValid() && !blk->isDirty());
1586  // Creating a zero sized write, a message to the snoop filter
1587  Request *req =
1588  new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1590  if (blk->isSecure())
1591  req->setFlags(Request::SECURE);
1592 
1593  req->taskId(blk->task_id);
1595  blk->tickInserted = curTick();
1596 
1597  PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1598  pkt->allocate();
1599  DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
1600 
1601  return pkt;
1602 }
1603 
1604 void
1606 {
1608  tags->forEachBlk(visitor);
1609 }
1610 
1611 void
1613 {
1615  tags->forEachBlk(visitor);
1616 }
1617 
1618 bool
1620 {
1621  CacheBlkIsDirtyVisitor visitor;
1622  tags->forEachBlk(visitor);
1623 
1624  return visitor.isDirty();
1625 }
1626 
1627 bool
1629 {
1630  if (blk.isDirty()) {
1631  assert(blk.isValid());
1632 
1633  Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1635  request.taskId(blk.task_id);
1636  if (blk.isSecure()) {
1637  request.setFlags(Request::SECURE);
1638  }
1639 
1640  Packet packet(&request, MemCmd::WriteReq);
1641  packet.dataStatic(blk.data);
1642 
1643  memSidePort->sendFunctional(&packet);
1644 
1645  blk.status &= ~BlkDirty;
1646  }
1647 
1648  return true;
1649 }
1650 
1651 bool
1653 {
1654 
1655  if (blk.isDirty())
1656  warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1657 
1658  if (blk.isValid()) {
1659  assert(!blk.isDirty());
1660  invalidateBlock(&blk);
1661  }
1662 
1663  return true;
1664 }
1665 
1666 CacheBlk*
1667 Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
1668 {
1669  CacheBlk *blk = tags->findVictim(addr);
1670 
1671  // It is valid to return nullptr if there is no victim
1672  if (!blk)
1673  return nullptr;
1674 
1675  if (blk->isValid()) {
1676  Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1677  MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1678  if (repl_mshr) {
1679  // must be an outstanding upgrade request
1680  // on a block we're about to replace...
1681  assert(!blk->isWritable() || blk->isDirty());
1682  assert(repl_mshr->needsWritable());
1683  // too hard to replace block with transient state
1684  // allocation failed, block not inserted
1685  return nullptr;
1686  } else {
1687  DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
1688  "(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns",
1689  addr, is_secure ? "s" : "ns",
1690  blk->isDirty() ? "writeback" : "clean");
1691 
1692  if (blk->wasPrefetched()) {
1693  unusedPrefetches++;
1694  }
1695  // Will send up Writeback/CleanEvict snoops via isCachedAbove
1696  // when pushing this writeback list into the write buffer.
1697  if (blk->isDirty() || writebackClean) {
1698  // Save writeback packet for handling by caller
1699  writebacks.push_back(writebackBlk(blk));
1700  } else {
1701  writebacks.push_back(cleanEvictBlk(blk));
1702  }
1703  }
1704  }
1705 
1706  return blk;
1707 }
1708 
1709 void
1711 {
1712  if (blk != tempBlock)
1713  tags->invalidate(blk);
1714  blk->invalidate();
1715 }
1716 
1717 // Note that the reason we return a list of writebacks rather than
1718 // inserting them directly in the write buffer is that this function
1719 // is called by both atomic and timing-mode accesses, and in atomic
1720 // mode we don't mess with the write buffer (we just perform the
1721 // writebacks atomically once the original request is complete).
1722 CacheBlk*
1724  bool allocate)
1725 {
1726  assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1727  Addr addr = pkt->getAddr();
1728  bool is_secure = pkt->isSecure();
1729 #if TRACING_ON
1730  CacheBlk::State old_state = blk ? blk->status : 0;
1731 #endif
1732 
1733  // When handling a fill, we should have no writes to this line.
1734  assert(addr == pkt->getBlockAddr(blkSize));
1735  assert(!writeBuffer.findMatch(addr, is_secure));
1736 
1737  if (blk == nullptr) {
1738  // better have read new data...
1739  assert(pkt->hasData());
1740 
1741  // only read responses and write-line requests have data;
1742  // note that we don't write the data here for write-line - that
1743  // happens in the subsequent call to satisfyRequest
1744  assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
1745 
1746  // need to do a replacement if allocating, otherwise we stick
1747  // with the temporary storage
1748  blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr;
1749 
1750  if (blk == nullptr) {
1751  // No replaceable block or a mostly exclusive
1752  // cache... just use temporary storage to complete the
1753  // current request and then get rid of it
1754  assert(!tempBlock->isValid());
1755  blk = tempBlock;
1756  tempBlock->set = tags->extractSet(addr);
1757  tempBlock->tag = tags->extractTag(addr);
1758  // @todo: set security state as well...
1759  DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1760  is_secure ? "s" : "ns");
1761  } else {
1762  tags->insertBlock(pkt, blk);
1763  }
1764 
1765  // we should never be overwriting a valid block
1766  assert(!blk->isValid());
1767  } else {
1768  // existing block... probably an upgrade
1769  assert(blk->tag == tags->extractTag(addr));
1770  // either we're getting new data or the block should already be valid
1771  assert(pkt->hasData() || blk->isValid());
1772  // don't clear block status... if block is already dirty we
1773  // don't want to lose that
1774  }
1775 
1776  if (is_secure)
1777  blk->status |= BlkSecure;
1778  blk->status |= BlkValid | BlkReadable;
1779 
1780  // sanity check for whole-line writes, which should always be
1781  // marked as writable as part of the fill, and then later marked
1782  // dirty as part of satisfyRequest
1783  if (pkt->cmd == MemCmd::WriteLineReq) {
1784  assert(!pkt->hasSharers());
1785  }
1786 
1787  // here we deal with setting the appropriate state of the line,
1788  // and we start by looking at the hasSharers flag, and ignore the
1789  // cacheResponding flag (normally signalling dirty data) if the
1790  // packet has sharers, thus the line is never allocated as Owned
1791  // (dirty but not writable), and always ends up being either
1792  // Shared, Exclusive or Modified, see Packet::setCacheResponding
1793  // for more details
1794  if (!pkt->hasSharers()) {
1795  // we could get a writable line from memory (rather than a
1796  // cache) even in a read-only cache, note that we set this bit
1797  // even for a read-only cache, possibly revisit this decision
1798  blk->status |= BlkWritable;
1799 
1800  // check if we got this via cache-to-cache transfer (i.e., from a
1801  // cache that had the block in Modified or Owned state)
1802  if (pkt->cacheResponding()) {
1803  // we got the block in Modified state, and invalidated the
1804  // owners copy
1805  blk->status |= BlkDirty;
1806 
1807  chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1808  "in read-only cache %s\n", name());
1809  }
1810  }
1811 
1812  DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1813  addr, is_secure ? "s" : "ns", old_state, blk->print());
1814 
1815  // if we got new data, copy it in (checking for a read response
1816  // and a response that has data is the same in the end)
1817  if (pkt->isRead()) {
1818  // sanity checks
1819  assert(pkt->hasData());
1820  assert(pkt->getSize() == blkSize);
1821 
1822  std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1823  }
1824  // We pay for fillLatency here.
1825  blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1826  pkt->payloadDelay;
1827 
1828  return blk;
1829 }
1830 
1831 
1833 //
1834 // Snoop path: requests coming in from the memory side
1835 //
1837 
1838 void
1839 Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1840  bool already_copied, bool pending_inval)
1841 {
1842  // sanity check
1843  assert(req_pkt->isRequest());
1844  assert(req_pkt->needsResponse());
1845 
1846  DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
1847  // timing-mode snoop responses require a new packet, unless we
1848  // already made a copy...
1849  PacketPtr pkt = req_pkt;
1850  if (!already_copied)
1851  // do not clear flags, and allocate space for data if the
1852  // packet needs it (the only packets that carry data are read
1853  // responses)
1854  pkt = new Packet(req_pkt, false, req_pkt->isRead());
1855 
1856  assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1857  pkt->hasSharers());
1858  pkt->makeTimingResponse();
1859  if (pkt->isRead()) {
1860  pkt->setDataFromBlock(blk_data, blkSize);
1861  }
1862  if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1863  // Assume we defer a response to a read from a far-away cache
1864  // A, then later defer a ReadExcl from a cache B on the same
1865  // bus as us. We'll assert cacheResponding in both cases, but
1866  // in the latter case cacheResponding will keep the
1867  // invalidation from reaching cache A. This special response
1868  // tells cache A that it gets the block to satisfy its read,
1869  // but must immediately invalidate it.
1870  pkt->cmd = MemCmd::ReadRespWithInvalidate;
1871  }
1872  // Here we consider forward_time, paying for just forward latency and
1873  // also charging the delay provided by the xbar.
1874  // forward_time is used as send_time in next allocateWriteBuffer().
1875  Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1876  // Here we reset the timing of the packet.
1877  pkt->headerDelay = pkt->payloadDelay = 0;
1878  DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
1879  pkt->print(), forward_time);
1880  memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1881 }
1882 
1883 uint32_t
1884 Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1885  bool is_deferred, bool pending_inval)
1886 {
1887  DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1888  // deferred snoops can only happen in timing mode
1889  assert(!(is_deferred && !is_timing));
1890  // pending_inval only makes sense on deferred snoops
1891  assert(!(pending_inval && !is_deferred));
1892  assert(pkt->isRequest());
1893 
1894  // the packet may get modified if we or a forwarded snooper
1895  // responds in atomic mode, so remember a few things about the
1896  // original packet up front
1897  bool invalidate = pkt->isInvalidate();
1898  bool M5_VAR_USED needs_writable = pkt->needsWritable();
1899 
1900  // at the moment we could get an uncacheable write which does not
1901  // have the invalidate flag, and we need a suitable way of dealing
1902  // with this case
1903  panic_if(invalidate && pkt->req->isUncacheable(),
1904  "%s got an invalidating uncacheable snoop request %s",
1905  name(), pkt->print());
1906 
1907  uint32_t snoop_delay = 0;
1908 
1909  if (forwardSnoops) {
1910  // first propagate snoop upward to see if anyone above us wants to
1911  // handle it. save & restore packet src since it will get
1912  // rewritten to be relative to cpu-side bus (if any)
1913  bool alreadyResponded = pkt->cacheResponding();
1914  if (is_timing) {
1915  // copy the packet so that we can clear any flags before
1916  // forwarding it upwards, we also allocate data (passing
1917  // the pointer along in case of static data), in case
1918  // there is a snoop hit in upper levels
1919  Packet snoopPkt(pkt, true, true);
1920  snoopPkt.setExpressSnoop();
1921  // the snoop packet does not need to wait any additional
1922  // time
1923  snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1924  cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1925 
1926  // add the header delay (including crossbar and snoop
1927  // delays) of the upward snoop to the snoop delay for this
1928  // cache
1929  snoop_delay += snoopPkt.headerDelay;
1930 
1931  if (snoopPkt.cacheResponding()) {
1932  // cache-to-cache response from some upper cache
1933  assert(!alreadyResponded);
1934  pkt->setCacheResponding();
1935  }
1936  // upstream cache has the block, or has an outstanding
1937  // MSHR, pass the flag on
1938  if (snoopPkt.hasSharers()) {
1939  pkt->setHasSharers();
1940  }
1941  // If this request is a prefetch or clean evict and an upper level
1942  // signals block present, make sure to propagate the block
1943  // presence to the requester.
1944  if (snoopPkt.isBlockCached()) {
1945  pkt->setBlockCached();
1946  }
1947  } else {
1949  if (!alreadyResponded && pkt->cacheResponding()) {
1950  // cache-to-cache response from some upper cache:
1951  // forward response to original requester
1952  assert(pkt->isResponse());
1953  }
1954  }
1955  }
1956 
1957  if (!blk || !blk->isValid()) {
1958  DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1959  pkt->print());
1960  if (is_deferred) {
1961  // we no longer have the block, and will not respond, but a
1962  // packet was allocated in MSHR::handleSnoop and we have
1963  // to delete it
1964  assert(pkt->needsResponse());
1965 
1966  // we have passed the block to a cache upstream, that
1967  // cache should be responding
1968  assert(pkt->cacheResponding());
1969 
1970  delete pkt;
1971  }
1972  return snoop_delay;
1973  } else {
1974  DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1975  pkt->print(), blk->print());
1976  }
1977 
1978  chatty_assert(!(isReadOnly && blk->isDirty()),
1979  "Should never have a dirty block in a read-only cache %s\n",
1980  name());
1981 
1982  // We may end up modifying both the block state and the packet (if
1983  // we respond in atomic mode), so just figure out what to do now
1984  // and then do it later. We respond to all snoops that need
1985  // responses provided we have the block in dirty state. The
1986  // invalidation itself is taken care of below.
1987  bool respond = blk->isDirty() && pkt->needsResponse();
1988  bool have_writable = blk->isWritable();
1989 
1990  // Invalidate any prefetch's from below that would strip write permissions
1991  // MemCmd::HardPFReq is only observed by upstream caches. After missing
1992  // above and in it's own cache, a new MemCmd::ReadReq is created that
1993  // downstream caches observe.
1994  if (pkt->mustCheckAbove()) {
1995  DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1996  "from lower cache\n", pkt->getAddr(), pkt->print());
1997  pkt->setBlockCached();
1998  return snoop_delay;
1999  }
2000 
2001  if (pkt->isRead() && !invalidate) {
2002  // reading without requiring the line in a writable state
2003  assert(!needs_writable);
2004  pkt->setHasSharers();
2005 
2006  // if the requesting packet is uncacheable, retain the line in
2007  // the current state, otherwhise unset the writable flag,
2008  // which means we go from Modified to Owned (and will respond
2009  // below), remain in Owned (and will respond below), from
2010  // Exclusive to Shared, or remain in Shared
2011  if (!pkt->req->isUncacheable())
2012  blk->status &= ~BlkWritable;
2013  }
2014 
2015  if (respond) {
2016  // prevent anyone else from responding, cache as well as
2017  // memory, and also prevent any memory from even seeing the
2018  // request
2019  pkt->setCacheResponding();
2020  if (have_writable) {
2021  // inform the cache hierarchy that this cache had the line
2022  // in the Modified state so that we avoid unnecessary
2023  // invalidations (see Packet::setResponderHadWritable)
2024  pkt->setResponderHadWritable();
2025 
2026  // in the case of an uncacheable request there is no point
2027  // in setting the responderHadWritable flag, but since the
2028  // recipient does not care there is no harm in doing so
2029  } else {
2030  // if the packet has needsWritable set we invalidate our
2031  // copy below and all other copies will be invalidates
2032  // through express snoops, and if needsWritable is not set
2033  // we already called setHasSharers above
2034  }
2035 
2036  // if we are returning a writable and dirty (Modified) line,
2037  // we should be invalidating the line
2038  panic_if(!invalidate && !pkt->hasSharers(),
2039  "%s is passing a Modified line through %s, "
2040  "but keeping the block", name(), pkt->print());
2041 
2042  if (is_timing) {
2043  doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
2044  } else {
2045  pkt->makeAtomicResponse();
2046  // packets such as upgrades do not actually have any data
2047  // payload
2048  if (pkt->hasData())
2049  pkt->setDataFromBlock(blk->data, blkSize);
2050  }
2051  }
2052 
2053  if (!respond && is_deferred) {
2054  assert(pkt->needsResponse());
2055 
2056  // if we copied the deferred packet with the intention to
2057  // respond, but are not responding, then a cache above us must
2058  // be, and we can use this as the indication of whether this
2059  // is a packet where we created a copy of the request or not
2060  if (!pkt->cacheResponding()) {
2061  delete pkt->req;
2062  }
2063 
2064  delete pkt;
2065  }
2066 
2067  // Do this last in case it deallocates block data or something
2068  // like that
2069  if (invalidate) {
2070  invalidateBlock(blk);
2071  }
2072 
2073  DPRINTF(Cache, "new state is %s\n", blk->print());
2074 
2075  return snoop_delay;
2076 }
2077 
2078 
2079 void
2081 {
2082  DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
2083 
2084  // Snoops shouldn't happen when bypassing caches
2085  assert(!system->bypassCaches());
2086 
2087  // no need to snoop requests that are not in range
2088  if (!inRange(pkt->getAddr())) {
2089  return;
2090  }
2091 
2092  bool is_secure = pkt->isSecure();
2093  CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
2094 
2095  Addr blk_addr = pkt->getBlockAddr(blkSize);
2096  MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
2097 
2098  // Update the latency cost of the snoop so that the crossbar can
2099  // account for it. Do not overwrite what other neighbouring caches
2100  // have already done, rather take the maximum. The update is
2101  // tentative, for cases where we return before an upward snoop
2102  // happens below.
2103  pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2105 
2106  // Inform request(Prefetch, CleanEvict or Writeback) from below of
2107  // MSHR hit, set setBlockCached.
2108  if (mshr && pkt->mustCheckAbove()) {
2109  DPRINTF(Cache, "Setting block cached for %s from lower cache on "
2110  "mshr hit\n", pkt->print());
2111  pkt->setBlockCached();
2112  return;
2113  }
2114 
2115  // Let the MSHR itself track the snoop and decide whether we want
2116  // to go ahead and do the regular cache snoop
2117  if (mshr && mshr->handleSnoop(pkt, order++)) {
2118  DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
2119  "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
2120  mshr->print());
2121 
2122  if (mshr->getNumTargets() > numTarget)
2123  warn("allocating bonus target for snoop"); //handle later
2124  return;
2125  }
2126 
2127  //We also need to check the writeback buffers and handle those
2128  WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
2129  if (wb_entry) {
2130  DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
2131  pkt->getAddr(), is_secure ? "s" : "ns");
2132  // Expect to see only Writebacks and/or CleanEvicts here, both of
2133  // which should not be generated for uncacheable data.
2134  assert(!wb_entry->isUncacheable());
2135  // There should only be a single request responsible for generating
2136  // Writebacks/CleanEvicts.
2137  assert(wb_entry->getNumTargets() == 1);
2138  PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
2139  assert(wb_pkt->isEviction());
2140 
2141  if (pkt->isEviction()) {
2142  // if the block is found in the write queue, set the BLOCK_CACHED
2143  // flag for Writeback/CleanEvict snoop. On return the snoop will
2144  // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2145  // any CleanEvicts from travelling down the memory hierarchy.
2146  pkt->setBlockCached();
2147  DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
2148  "hit\n", __func__, pkt->print());
2149  return;
2150  }
2151 
2152  // conceptually writebacks are no different to other blocks in
2153  // this cache, so the behaviour is modelled after handleSnoop,
2154  // the difference being that instead of querying the block
2155  // state to determine if it is dirty and writable, we use the
2156  // command and fields of the writeback packet
2157  bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
2158  pkt->needsResponse();
2159  bool have_writable = !wb_pkt->hasSharers();
2160  bool invalidate = pkt->isInvalidate();
2161 
2162  if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
2163  assert(!pkt->needsWritable());
2164  pkt->setHasSharers();
2165  wb_pkt->setHasSharers();
2166  }
2167 
2168  if (respond) {
2169  pkt->setCacheResponding();
2170 
2171  if (have_writable) {
2172  pkt->setResponderHadWritable();
2173  }
2174 
2175  doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2176  false, false);
2177  }
2178 
2179  if (invalidate) {
2180  // Invalidation trumps our writeback... discard here
2181  // Note: markInService will remove entry from writeback buffer.
2182  markInService(wb_entry);
2183  delete wb_pkt;
2184  }
2185  }
2186 
2187  // If this was a shared writeback, there may still be
2188  // other shared copies above that require invalidation.
2189  // We could be more selective and return here if the
2190  // request is non-exclusive or if the writeback is
2191  // exclusive.
2192  uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
2193 
2194  // Override what we did when we first saw the snoop, as we now
2195  // also have the cost of the upwards snoops to account for
2196  pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
2198 }
2199 
2200 bool
2202 {
2203  // Express snoop responses from master to slave, e.g., from L1 to L2
2204  cache->recvTimingSnoopResp(pkt);
2205  return true;
2206 }
2207 
2208 Tick
2210 {
2211  // Snoops shouldn't happen when bypassing caches
2212  assert(!system->bypassCaches());
2213 
2214  // no need to snoop requests that are not in range.
2215  if (!inRange(pkt->getAddr())) {
2216  return 0;
2217  }
2218 
2219  CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
2220  uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
2221  return snoop_delay + lookupLatency * clockPeriod();
2222 }
2223 
2224 
2225 QueueEntry*
2227 {
2228  // Check both MSHR queue and write buffer for potential requests,
2229  // note that null does not mean there is no request, it could
2230  // simply be that it is not ready
2231  MSHR *miss_mshr = mshrQueue.getNext();
2232  WriteQueueEntry *wq_entry = writeBuffer.getNext();
2233 
2234  // If we got a write buffer request ready, first priority is a
2235  // full write buffer, otherwise we favour the miss requests
2236  if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
2237  // need to search MSHR queue for conflicting earlier miss.
2238  MSHR *conflict_mshr =
2239  mshrQueue.findPending(wq_entry->blkAddr,
2240  wq_entry->isSecure);
2241 
2242  if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
2243  // Service misses in order until conflict is cleared.
2244  return conflict_mshr;
2245 
2246  // @todo Note that we ignore the ready time of the conflict here
2247  }
2248 
2249  // No conflicts; issue write
2250  return wq_entry;
2251  } else if (miss_mshr) {
2252  // need to check for conflicting earlier writeback
2253  WriteQueueEntry *conflict_mshr =
2254  writeBuffer.findPending(miss_mshr->blkAddr,
2255  miss_mshr->isSecure);
2256  if (conflict_mshr) {
2257  // not sure why we don't check order here... it was in the
2258  // original code but commented out.
2259 
2260  // The only way this happens is if we are
2261  // doing a write and we didn't have permissions
2262  // then subsequently saw a writeback (owned got evicted)
2263  // We need to make sure to perform the writeback first
2264  // To preserve the dirty data, then we can issue the write
2265 
2266  // should we return wq_entry here instead? I.e. do we
2267  // have to flush writes in order? I don't think so... not
2268  // for Alpha anyway. Maybe for x86?
2269  return conflict_mshr;
2270 
2271  // @todo Note that we ignore the ready time of the conflict here
2272  }
2273 
2274  // No conflicts; issue read
2275  return miss_mshr;
2276  }
2277 
2278  // fall through... no pending requests. Try a prefetch.
2279  assert(!miss_mshr && !wq_entry);
2280  if (prefetcher && mshrQueue.canPrefetch()) {
2281  // If we have a miss queue slot, we can try a prefetch
2282  PacketPtr pkt = prefetcher->getPacket();
2283  if (pkt) {
2284  Addr pf_addr = pkt->getBlockAddr(blkSize);
2285  if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
2286  !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
2287  !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
2288  // Update statistic on number of prefetches issued
2289  // (hwpf_mshr_misses)
2290  assert(pkt->req->masterId() < system->maxMasters());
2291  mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
2292 
2293  // allocate an MSHR and return it, note
2294  // that we send the packet straight away, so do not
2295  // schedule the send
2296  return allocateMissBuffer(pkt, curTick(), false);
2297  } else {
2298  // free the request and packet
2299  delete pkt->req;
2300  delete pkt;
2301  }
2302  }
2303  }
2304 
2305  return nullptr;
2306 }
2307 
2308 bool
2309 Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
2310 {
2311  if (!forwardSnoops)
2312  return false;
2313  // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
2314  // Writeback snoops into upper level caches to check for copies of the
2315  // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
2316  // packet, the cache can inform the crossbar below of presence or absence
2317  // of the block.
2318  if (is_timing) {
2319  Packet snoop_pkt(pkt, true, false);
2320  snoop_pkt.setExpressSnoop();
2321  // Assert that packet is either Writeback or CleanEvict and not a
2322  // prefetch request because prefetch requests need an MSHR and may
2323  // generate a snoop response.
2324  assert(pkt->isEviction());
2325  snoop_pkt.senderState = nullptr;
2326  cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2327  // Writeback/CleanEvict snoops do not generate a snoop response.
2328  assert(!(snoop_pkt.cacheResponding()));
2329  return snoop_pkt.isBlockCached();
2330  } else {
2332  return pkt->isBlockCached();
2333  }
2334 }
2335 
2336 Tick
2338 {
2339  Tick nextReady = std::min(mshrQueue.nextReadyTime(),
2341 
2342  // Don't signal prefetch ready time if no MSHRs available
2343  // Will signal once enoguh MSHRs are deallocated
2344  if (prefetcher && mshrQueue.canPrefetch()) {
2345  nextReady = std::min(nextReady,
2347  }
2348 
2349  return nextReady;
2350 }
2351 
2352 bool
2354 {
2355  assert(mshr);
2356 
2357  // use request from 1st target
2358  PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2359 
2360  DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
2361 
2362  CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2363 
2364  if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2365  // we should never have hardware prefetches to allocated
2366  // blocks
2367  assert(blk == nullptr);
2368 
2369  // We need to check the caches above us to verify that
2370  // they don't have a copy of this block in the dirty state
2371  // at the moment. Without this check we could get a stale
2372  // copy from memory that might get used in place of the
2373  // dirty one.
2374  Packet snoop_pkt(tgt_pkt, true, false);
2375  snoop_pkt.setExpressSnoop();
2376  // We are sending this packet upwards, but if it hits we will
2377  // get a snoop response that we end up treating just like a
2378  // normal response, hence it needs the MSHR as its sender
2379  // state
2380  snoop_pkt.senderState = mshr;
2381  cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2382 
2383  // Check to see if the prefetch was squashed by an upper cache (to
2384  // prevent us from grabbing the line) or if a Check to see if a
2385  // writeback arrived between the time the prefetch was placed in
2386  // the MSHRs and when it was selected to be sent or if the
2387  // prefetch was squashed by an upper cache.
2388 
2389  // It is important to check cacheResponding before
2390  // prefetchSquashed. If another cache has committed to
2391  // responding, it will be sending a dirty response which will
2392  // arrive at the MSHR allocated for this request. Checking the
2393  // prefetchSquash first may result in the MSHR being
2394  // prematurely deallocated.
2395  if (snoop_pkt.cacheResponding()) {
2396  auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
2397  assert(r.second);
2398 
2399  // if we are getting a snoop response with no sharers it
2400  // will be allocated as Modified
2401  bool pending_modified_resp = !snoop_pkt.hasSharers();
2402  markInService(mshr, pending_modified_resp);
2403 
2404  DPRINTF(Cache, "Upward snoop of prefetch for addr"
2405  " %#x (%s) hit\n",
2406  tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2407  return false;
2408  }
2409 
2410  if (snoop_pkt.isBlockCached()) {
2411  DPRINTF(Cache, "Block present, prefetch squashed by cache. "
2412  "Deallocating mshr target %#x.\n",
2413  mshr->blkAddr);
2414 
2415  // Deallocate the mshr target
2416  if (mshrQueue.forceDeallocateTarget(mshr)) {
2417  // Clear block if this deallocation resulted freed an
2418  // mshr when all had previously been utilized
2420  }
2421  return false;
2422  }
2423  }
2424 
2425  // either a prefetch that is not present upstream, or a normal
2426  // MSHR request, proceed to get the packet to send downstream
2427  PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable());
2428 
2429  mshr->isForward = (pkt == nullptr);
2430 
2431  if (mshr->isForward) {
2432  // not a cache block request, but a response is expected
2433  // make copy of current packet to forward, keep current
2434  // copy for response handling
2435  pkt = new Packet(tgt_pkt, false, true);
2436  assert(!pkt->isWrite());
2437  }
2438 
2439  // play it safe and append (rather than set) the sender state,
2440  // as forwarded packets may already have existing state
2441  pkt->pushSenderState(mshr);
2442 
2443  if (!memSidePort->sendTimingReq(pkt)) {
2444  // we are awaiting a retry, but we
2445  // delete the packet and will be creating a new packet
2446  // when we get the opportunity
2447  delete pkt;
2448 
2449  // note that we have now masked any requestBus and
2450  // schedSendEvent (we will wait for a retry before
2451  // doing anything), and this is so even if we do not
2452  // care about this packet and might override it before
2453  // it gets retried
2454  return true;
2455  } else {
2456  // As part of the call to sendTimingReq the packet is
2457  // forwarded to all neighbouring caches (and any caches
2458  // above them) as a snoop. Thus at this point we know if
2459  // any of the neighbouring caches are responding, and if
2460  // so, we know it is dirty, and we can determine if it is
2461  // being passed as Modified, making our MSHR the ordering
2462  // point
2463  bool pending_modified_resp = !pkt->hasSharers() &&
2464  pkt->cacheResponding();
2465  markInService(mshr, pending_modified_resp);
2466  return false;
2467  }
2468 }
2469 
2470 bool
2472 {
2473  assert(wq_entry);
2474 
2475  // always a single target for write queue entries
2476  PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
2477 
2478  DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
2479 
2480  // forward as is, both for evictions and uncacheable writes
2481  if (!memSidePort->sendTimingReq(tgt_pkt)) {
2482  // note that we have now masked any requestBus and
2483  // schedSendEvent (we will wait for a retry before
2484  // doing anything), and this is so even if we do not
2485  // care about this packet and might override it before
2486  // it gets retried
2487  return true;
2488  } else {
2489  markInService(wq_entry);
2490  return false;
2491  }
2492 }
2493 
2494 void
2496 {
2497  bool dirty(isDirty());
2498 
2499  if (dirty) {
2500  warn("*** The cache still contains dirty data. ***\n");
2501  warn(" Make sure to drain the system using the correct flags.\n");
2502  warn(" This checkpoint will not restore correctly and dirty data "
2503  " in the cache will be lost!\n");
2504  }
2505 
2506  // Since we don't checkpoint the data in the cache, any dirty data
2507  // will be lost when restoring from a checkpoint of a system that
2508  // wasn't drained properly. Flag the checkpoint as invalid if the
2509  // cache contains dirty data.
2510  bool bad_checkpoint(dirty);
2511  SERIALIZE_SCALAR(bad_checkpoint);
2512 }
2513 
2514 void
2516 {
2517  bool bad_checkpoint;
2518  UNSERIALIZE_SCALAR(bad_checkpoint);
2519  if (bad_checkpoint) {
2520  fatal("Restoring from checkpoints with dirty caches is not supported "
2521  "in the classic memory system. Please remove any caches or "
2522  " drain them properly before taking checkpoints.\n");
2523  }
2524 }
2525 
2527 //
2528 // CpuSidePort
2529 //
2531 
2534 {
2535  return cache->getAddrRanges();
2536 }
2537 
2538 bool
2540 {
2541  assert(!cache->system->bypassCaches());
2542 
2543  bool success = false;
2544 
2545  // always let express snoop packets through if even if blocked
2546  if (pkt->isExpressSnoop()) {
2547  // do not change the current retry state
2548  bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2549  assert(bypass_success);
2550  return true;
2551  } else if (blocked || mustSendRetry) {
2552  // either already committed to send a retry, or blocked
2553  success = false;
2554  } else {
2555  // pass it on to the cache, and let the cache decide if we
2556  // have to retry or not
2557  success = cache->recvTimingReq(pkt);
2558  }
2559 
2560  // remember if we have to retry
2561  mustSendRetry = !success;
2562  return success;
2563 }
2564 
2565 Tick
2567 {
2568  return cache->recvAtomic(pkt);
2569 }
2570 
2571 void
2573 {
2574  // functional request
2575  cache->functionalAccess(pkt, true);
2576 }
2577 
2578 Cache::
2579 CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
2580  const std::string &_label)
2581  : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2582 {
2583 }
2584 
2585 Cache*
2586 CacheParams::create()
2587 {
2588  assert(tags);
2589 
2590  return new Cache(this);
2591 }
2593 //
2594 // MemSidePort
2595 //
2597 
2598 bool
2600 {
2601  cache->recvTimingResp(pkt);
2602  return true;
2603 }
2604 
2605 // Express snooping requests to memside port
2606 void
2608 {
2609  // handle snooping requests
2610  cache->recvTimingSnoopReq(pkt);
2611 }
2612 
2613 Tick
2615 {
2616  return cache->recvAtomicSnoop(pkt);
2617 }
2618 
2619 void
2621 {
2622  // functional snoop (note that in contrast to atomic we don't have
2623  // a specific functionalSnoop method, as they have the same
2624  // behaviour regardless)
2625  cache->functionalAccess(pkt, false);
2626 }
2627 
2628 void
2630 {
2631  // sanity check
2632  assert(!waitingOnRetry);
2633 
2634  // there should never be any deferred request packets in the
2635  // queue, instead we resly on the cache to provide the packets
2636  // from the MSHR queue or write queue
2637  assert(deferredPacketReadyTime() == MaxTick);
2638 
2639  // check for request packets (requests & writebacks)
2640  QueueEntry* entry = cache.getNextQueueEntry();
2641 
2642  if (!entry) {
2643  // can happen if e.g. we attempt a writeback and fail, but
2644  // before the retry, the writeback is eliminated because
2645  // we snoop another cache's ReadEx.
2646  } else {
2647  // let our snoop responses go first if there are responses to
2648  // the same addresses
2649  if (checkConflictingSnoop(entry->blkAddr)) {
2650  return;
2651  }
2652  waitingOnRetry = entry->sendPacket(cache);
2653  }
2654 
2655  // if we succeeded and are not waiting for a retry, schedule the
2656  // next send considering when the next queue is ready, note that
2657  // snoop responses have their own packet queue and thus schedule
2658  // their own events
2659  if (!waitingOnRetry) {
2660  schedSendEvent(cache.nextQueueReadyTime());
2661  }
2662 }
2663 
2664 Cache::
2665 MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
2666  const std::string &_label)
2667  : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2668  _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2669  _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2670 {
2671 }
Entry * findMatch(Addr blk_addr, bool is_secure) const
Find the first WriteQueueEntry that matches the provided address.
Definition: queue.hh:156
Miss Status and Handling Register (MSHR) declaration.
#define chatty_assert(cond,...)
The chatty assert macro will function like a normal assert, but will allow the specification of addit...
Definition: misc.hh:259
bool isSecure() const
Definition: packet.hh:661
#define DPRINTF(x,...)
Definition: trace.hh:212
This master id is used for functional requests that don't come from a particular device.
Definition: request.hh:201
bool isDirty() const
Check to see if a block has been written.
Definition: blk.hh:222
virtual void regStats()
Register stats for this object.
Definition: base.cc:178
virtual void recvTimingSnoopReq(PacketPtr pkt)
Definition: cache.cc:2607
bool forwardSnoops
Do we forward snoops from mem side port through to cpu side port?
Definition: base.hh:294
bool isLLSC() const
Definition: packet.hh:527
bool isUncacheable() const
Accessor functions for flags.
Definition: request.hh:767
virtual void recvFunctional(PacketPtr pkt)
Definition: cache.cc:2572
Target * getTarget()
Returns a reference to the first target.
bool isSecure
True if the entry targets the secure memory space.
Definition: queue_entry.hh:92
void setResponderHadWritable()
On responding to a snoop request (which only happens for Modified or Owned lines), make sure that we can transform an Owned response to a Modified one.
Definition: packet.hh:612
QueueEntry * getNextQueueEntry()
Return the next queue entry to service, either a pending miss from the MSHR queue, a buffered write from the write buffer, or something from the prefetcher.
Definition: cache.cc:2226
void setHasSharers()
On fills, the hasSharers flag is used by the caches in combination with the cacheResponding flag...
Definition: packet.hh:584
bool inService
True if the entry has been sent downstream.
Definition: queue_entry.hh:80
State status
The current status of this block.
Definition: blk.hh:100
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
bool isBlockCached() const
Definition: packet.hh:624
Entry * findPending(Addr blk_addr, bool is_secure) const
Find any pending requests that overlap the given request.
Definition: queue.hh:192
int getNumTargets() const
Returns the current number of allocated targets.
Definition: packet.hh:73
Tick whenReady
Which curTick() will this block be accessable.
Definition: blk.hh:103
bool isDirty() const
Does the array contain a dirty line?
Definition: cache.hh:623
#define panic(...)
Definition: misc.hh:153
WriteQueue writeBuffer
Write/writeback buffer.
Definition: base.hh:194
Addr tag
Data block tag value.
Definition: blk.hh:86
bool needsWritable() const
The pending* and post* flags are only valid if inService is true.
Definition: mshr.hh:236
void doWritebacksAtomic(PacketList &writebacks)
Send writebacks down the memory hierarchy in atomic mode.
Definition: cache.cc:501
Cycles ticksToCycles(Tick t) const
uint32_t snoopDelay
Keep track of the extra delay incurred by snooping upwards before sending a request down the memory s...
Definition: packet.hh:348
valid, readable
Definition: blk.hh:62
void makeTimingResponse()
Definition: packet.hh:863
MSHR * noTargetMSHR
Pointer to the MSHR that has no targets.
Definition: base.hh:317
int getNumTargets() const
Returns the current number of allocated targets.
Definition: mshr.hh:312
bool forceDeallocateTarget(MSHR *mshr)
Deallocate top target, possibly freeing the MSHR.
Definition: mshr_queue.cc:107
void setExtraData(uint64_t extraData)
Accessor function for store conditional return value.
Definition: request.hh:680
const Cycles lookupLatency
The latency of tag lookup of a cache.
Definition: base.hh:265
Stats::Scalar unusedPrefetches
The number of times a HW-prefetched block is evicted w/o reference.
Definition: base.hh:393
void memWriteback() override
Write back dirty blocks in the cache using functional accesses.
Definition: cache.cc:1605
virtual void invalidate(CacheBlk *blk)=0
void invalidateBlock(CacheBlk *blk)
Invalidate a cache block.
Definition: cache.cc:1710
bool isFull() const
Definition: queue.hh:140
bool isExpressSnoop() const
Definition: packet.hh:601
Tick sendAtomicSnoop(PacketPtr pkt)
Send an atomic snoop request packet, where the data is moved and the state is updated in zero time...
Definition: port.cc:237
bool bypassCaches() const
Should caches be bypassed?
Definition: system.hh:165
void functionalAccess(PacketPtr pkt, bool fromCpuSide)
Performs the access specified by the request.
Definition: cache.cc:1139
Addr getBlockAddr(unsigned int blk_size) const
Definition: packet.hh:656
bool hasData() const
Definition: packet.hh:521
uint64_t getExtraData() const
Accessor function for store conditional return value.
Definition: request.hh:672
ip6_addr_t addr
Definition: inet.hh:335
bool isWrite() const
Definition: packet.hh:503
void memInvalidate() override
Invalidates all blocks in the cache.
Definition: cache.cc:1612
PacketPtr writebackBlk(CacheBlk *blk)
Create a writeback request for the given block.
Definition: cache.cc:1539
System * system
System we are currently operating in.
Definition: base.hh:329
panic_if(!root,"Invalid expression\n")
const bool isReadOnly
Is this cache read only, for example the instruction cache, or table-walker cache.
Definition: base.hh:302
CpuSidePort(const std::string &_name, Cache *_cache, const std::string &_label)
Definition: cache.cc:2579
BasePrefetcher * prefetcher
Prefetcher.
Definition: cache.hh:192
CacheSlavePort * cpuSidePort
Definition: base.hh:185
virtual bool recvTimingReq(PacketPtr pkt)
Definition: cache.cc:2539
const PacketPtr pkt
Pending request packet.
Definition: mshr.hh:127
void sendFunctionalSnoop(PacketPtr pkt)
Send a functional snoop request packet, where the data is instantly updated everywhere in the memory ...
Definition: port.cc:244
#define warn_once(...)
Definition: misc.hh:226
bool inRange(Addr addr) const
Determine if an address is in the ranges covered by this cache.
Definition: base.cc:167
Simple class to provide virtual print() method on cache blocks without allocating a vtable pointer fo...
Definition: blk.hh:381
MSHR * allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send=true)
Definition: base.hh:489
Bitfield< 23, 0 > offset
Definition: types.hh:149
bool fromCache() const
Definition: packet.hh:519
bool responderHadWritable() const
Definition: packet.hh:618
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the slave port by calling its corresponding receive function...
Definition: port.cc:180
virtual Tick notify(const PacketPtr &pkt)=0
Notify prefetcher of cache access (may be any access or just misses, depending on cache parameters...
void setCache(BaseCache *_cache)
Set the parent cache back pointer.
Definition: base.cc:70
virtual CacheBlk * accessBlock(Addr addr, bool is_secure, Cycles &lat)=0
bool isWriteback() const
Definition: packet.hh:520
int cmdToIndex() const
Return the index of this command.
Definition: packet.hh:500
const Cycles fillLatency
The latency to fill a cache block.
Definition: base.hh:281
This master id is used for writeback requests by the caches.
Definition: request.hh:196
virtual void insertBlock(PacketPtr pkt, CacheBlk *blk)=0
void promoteWritable()
Definition: mshr.cc:518
Cache * cache
Definition: cache.hh:87
bool isWritable() const
Checks the write permissions of this block.
Definition: blk.hh:181
Counter order
Order number assigned to disambiguate writes and misses.
Definition: queue_entry.hh:83
void print(std::ostream &os, int verbosity=0, const std::string &prefix="") const
Prints the contents of this MSHR for debugging.
Definition: mshr.cc:564
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1160
void writebackTempBlockAtomic()
Send the outstanding tempBlock writeback.
Definition: cache.hh:248
bool isRequest() const
Definition: packet.hh:505
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:959
bool mustCheckAbove() const
Does the request need to check for cached copies of the same block in the memory hierarchy above...
Definition: packet.hh:1130
bool isCachedAbove(PacketPtr pkt, bool is_timing=true) const
Send up a snoop request and find cached copies.
Definition: cache.cc:2309
void writeData(uint8_t *p) const
Copy data from the packet to the provided block pointer, which is aligned to the given block size...
Definition: packet.hh:1052
void markPending(MSHR *mshr)
Mark an in service entry as pending, used to resend a request.
Definition: mshr_queue.cc:94
bool isCondSwap() const
Definition: request.hh:775
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:909
Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS]
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:425
virtual void sendDeferredPacket()
Override the normal sendDeferredPacket and do not only consider the transmit list (used for responses...
Definition: cache.cc:2629
bool checkFunctional(PacketPtr pkt, Addr blk_addr)
Definition: queue.hh:173
const Cycles responseLatency
The latency of sending reponse to its upper level cache/core on a linefill.
Definition: base.hh:288
Tick tickInserted
Definition: blk.hh:120
virtual AddrRangeList getAddrRanges() const
Definition: cache.cc:2533
bool invalidateVisitor(CacheBlk &blk)
Cache block visitor that invalidates all blocks in the cache.
Definition: cache.cc:1652
#define warn(...)
Definition: misc.hh:219
bool isSnooping() const
Find out if the peer master port is snooping or not.
Definition: port.hh:405
void deallocate(Entry *entry)
Removes the given entry from the queue.
Definition: queue.hh:225
bool sendMSHRQueuePacket(MSHR *mshr)
Take an MSHR, turn it into a suitable downstream packet, and send it out.
Definition: cache.cc:2353
A template-policy based cache.
Definition: cache.hh:74
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
system
Definition: isa.cc:226
const Tick MaxTick
Definition: types.hh:65
const Enums::Clusivity clusivity
Clusivity with respect to the upstream cache, determining if we fill into both this cache and the cac...
Definition: cache.hh:217
A cache master port is used for the memory-side port of the cache, and in addition to the basic timin...
Definition: base.hh:112
Cache block visitor that determines if there are dirty blocks in a cache.
Definition: cache.hh:603
Tick curTick()
The current simulated tick.
Definition: core.hh:47
A Basic Cache block.
Definition: blk.hh:79
virtual CacheBlk * findVictim(Addr addr)=0
bool checkWrite(PacketPtr pkt)
Handle interaction of load-locked operations and stores.
Definition: blk.hh:332
virtual Tick recvAtomic(PacketPtr pkt)
Definition: cache.cc:2566
void trackLoadLocked(PacketPtr pkt)
Track the fact that a local locked was issued to the block.
Definition: blk.hh:250
void allocateTarget(PacketPtr target, Tick when, Counter order, bool alloc_on_fill)
Add a request to the list of targets.
Definition: mshr.cc:303
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:340
Stats::Vector writebacks
Number of blocks written back per thread.
Definition: base.hh:396
void serialize(CheckpointOut &cp) const override
serialize the state of the caches We currently don't support checkpointing cache state, so this panics.
Definition: cache.cc:2495
Target * getTarget()
Returns a reference to the first target.
Definition: mshr.hh:339
bool hasRespData() const
Definition: packet.hh:522
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1024
The memory-side port extends the base cache master port with access functions for functional...
Definition: cache.hh:160
block holds data from the secure memory space
Definition: blk.hh:72
void makeAtomicResponse()
Definition: packet.hh:857
PacketPtr cleanEvictBlk(CacheBlk *blk)
Create a CleanEvict request for the given block.
Definition: cache.cc:1582
bool hasSharers() const
Definition: packet.hh:585
uint64_t Tick
Tick count type.
Definition: types.hh:63
const bool prefetchOnAccess
Notify the prefetcher on every access, not just misses.
Definition: cache.hh:210
EventWrapper< Cache,&Cache::writebackTempBlockAtomic > writebackTempBlockAtomicEvent
An event to writeback the tempBlock after recvAtomic finishes.
Definition: cache.hh:261
virtual int extractSet(Addr addr) const =0
void incMissCount(PacketPtr pkt)
Definition: base.hh:590
bool checkFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
Definition: qport.hh:162
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1170
CacheMasterPort * memSidePort
Definition: base.hh:186
unsigned State
block state: OR of CacheBlkStatusBit
Definition: blk.hh:97
Miss Status and handling Register.
Definition: mshr.hh:63
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: cache.cc:2515
CacheBlk * handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, bool allocate)
Populates a cache block and handles all outstanding requests for the satisfied fill request...
Definition: cache.cc:1723
void satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool deferred_response=false, bool pending_downgrade=false)
Perform any necessary updates to the block and perform any data exchange between the packet and the b...
Definition: cache.cc:149
write permission
Definition: blk.hh:64
bool sendWriteQueuePacket(WriteQueueEntry *wq_entry)
Similar to sendMSHR, but for a write-queue entry instead.
Definition: cache.cc:2471
Addr getPaddr() const
Definition: request.hh:519
void handleUncacheableWriteResp(PacketPtr pkt)
Handling the special case of uncacheable write responses to make recvTimingResp less cluttered...
Definition: cache.cc:1214
const bool writebackClean
Determine if clean lines should be written back or not.
Definition: cache.hh:226
#define fatal(...)
Definition: misc.hh:163
const RequestPtr req
A pointer to the original request.
Definition: packet.hh:304
void allocateWriteBuffer(PacketPtr pkt, Tick time)
Definition: base.hh:507
A basic cache interface.
Definition: base.hh:79
virtual void recvFunctionalSnoop(PacketPtr pkt)
Definition: cache.cc:2620
Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS]
Total number of cycles per thread/command spent waiting for a miss.
Definition: base.hh:357
virtual bool recvTimingSnoopResp(PacketPtr pkt)
Definition: cache.cc:2201
bool needsResponse() const
Definition: packet.hh:516
void recvTimingResp(PacketPtr pkt)
Handles a response (cache line fill/write ack) from the bus.
Definition: cache.cc:1226
bool isUpgrade() const
Definition: packet.hh:504
Tick recvAtomicSnoop(PacketPtr pkt)
Snoop for the provided request in the cache and return the estimated time taken.
Definition: cache.cc:2209
void doWritebacks(PacketList &writebacks, Tick forward_time)
Insert writebacks into the write buffer.
Definition: cache.cc:461
STL list class.
Definition: stl.hh:54
void promoteWholeLineWrites(PacketPtr pkt)
Turn line-sized writes into WriteInvalidate transactions.
Definition: cache.cc:572
Definitions of a simple cache block class.
bool isRead() const
Definition: packet.hh:502
void invalidate()
Invalidate the block and clear all state.
Definition: blk.hh:211
virtual ~Cache()
Non-default destructor is needed to deallocate memory.
Definition: cache.cc:92
bool needsWritable() const
Definition: packet.hh:507
uint8_t blocked
Bit vector of the blocking reasons for the access path.
Definition: base.hh:308
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,16,32,64}_t.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void schedMemSideSendEvent(Tick time)
Schedule a send event for the memory-side port.
Definition: base.hh:581
bool cacheResponding() const
Definition: packet.hh:558
bool isCleanEviction() const
Is this packet a clean eviction, including both actual clean evict packets, but also clean writebacks...
Definition: packet.hh:1140
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:358
Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS]
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:406
void recvTimingSnoopReq(PacketPtr pkt)
Snoops bus transactions to maintain coherence.
Definition: cache.cc:2080
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
const unsigned blkSize
Block size of this cache.
Definition: base.hh:259
virtual std::string print() const =0
Print all tags used.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS]
Number of misses that hit in the MSHRs per command and thread.
Definition: base.hh:399
void recvTimingSnoopResp(PacketPtr pkt)
Handle a snoop response.
Definition: cache.cc:535
void setDataFromBlock(const uint8_t *blk_data, int blkSize)
Copy data into the packet from the provided block pointer, which is aligned to the given block size...
Definition: packet.hh:1042
uint64_t order
Increasing order number assigned to each incoming request.
Definition: base.hh:311
void cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
Handle doing the Compare and Swap function for SPARC.
Definition: cache.cc:108
bool isSWPrefetch() const
Definition: packet.hh:212
MasterID maxMasters()
Get the number of masters registered in the system.
Definition: system.hh:344
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
Tick nextReadyTime() const
Definition: queue.hh:214
bool checkFunctional(PacketPtr other)
Check a functional request against a memory value stored in another packet (i.e.
Definition: packet.hh:1115
static const int NumArgumentRegs M5_VAR_USED
Definition: process.cc:83
Flags getFlags()
Accessor for flags.
Definition: request.hh:584
virtual bool recvTimingResp(PacketPtr pkt)
Definition: cache.cc:2599
A queue entry base class, to be used by both the MSHRs and write-queue entries.
Definition: queue_entry.hh:60
MasterID masterId() const
Accesssor for the requestor id.
Definition: request.hh:624
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition: packet.hh:845
Tick clockPeriod() const
bool canPrefetch() const
Returns true if sufficient mshrs for prefetch.
Definition: mshr_queue.hh:142
read permission (yes, block can be valid but not readable)
Definition: blk.hh:66
void markInService(MSHR *mshr, bool pending_modified_resp)
Mark a request as in service (sent downstream in the memory system), effectively making this MSHR the...
Definition: base.hh:200
bool access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, PacketList &writebacks)
Does all the processing necessary to perform the provided request.
Definition: cache.cc:286
Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS]
Total cycle latency of each MSHR miss, per command and thread.
Definition: base.hh:418
virtual const std::string name() const
Definition: sim_object.hh:117
Tick nextQueueReadyTime() const
Find next request ready time from among possible sources.
Definition: cache.cc:2337
Addr blkAddr
Block aligned address.
Definition: queue_entry.hh:86
std::string print() const
Pretty-print a tag, and interpret state bits to readable form including mapping to a MOESI state...
Definition: blk.hh:286
A cache slave port is used for the CPU-side port of the cache, and it is basically a simple timing po...
Definition: base.hh:151
bool hasPostDowngrade() const
Definition: mshr.hh:246
std::ostream CheckpointOut
Definition: serialize.hh:67
bool handleSnoop(PacketPtr target, Counter order)
Definition: mshr.cc:345
bool isError() const
Definition: packet.hh:528
PacketPtr tempBlockWriteback
Writebacks from the tempBlock, resulting on the response path in atomic mode, must happen after the c...
Definition: cache.hh:241
Cache(const CacheParams *p)
Instantiates a basic cache object.
Definition: cache.cc:67
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, bool already_copied, bool pending_inval)
Definition: cache.cc:1839
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Definition: cache.cc:2614
void setBlocked(BlockedCause cause)
Marks the access path of the cache as blocked for the given cause.
Definition: base.hh:543
SenderState * senderState
This packet's sender state.
Definition: packet.hh:454
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, bool needsWritable) const
Create an appropriate downstream bus request packet for the given parameters.
Definition: cache.cc:900
Tick recvAtomic(PacketPtr pkt)
Performs the access specified by the request.
Definition: cache.cc:974
MemCmd cmd
The command field of the packet.
Definition: packet.hh:301
TargetList extractServiceableTargets(PacketPtr pkt)
Extracts the subset of the targets that can be serviced given a received response.
Definition: mshr.cc:458
dirty (modified)
Definition: blk.hh:68
virtual CacheBlk * findBlock(Addr addr, bool is_secure) const =0
Find a block using the memory address.
virtual bool sendPacket(Cache &cache)=0
Send this queue entry as a downstream packet, with the exact behaviour depending on the specific entr...
bool isForward
True if the entry is just a simple forward from an upper level.
Definition: mshr.hh:113
Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS]
Number of misses that miss in the MSHRs, per command and thread.
Definition: base.hh:413
int extractBlkOffset(Addr addr) const
Calculate the block offset of an address.
Definition: base.hh:206
virtual void forEachBlk(CacheBlkVisitor &visitor)=0
uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, bool is_deferred, bool pending_inval)
Perform an upward snoop if needed, and update the block state (possibly invalidating the block)...
Definition: cache.cc:1884
void setCache(BaseCache *_cache)
Definition: base.cc:67
bool promoteDeferredTargets()
Definition: mshr.cc:491
const bool doFastWrites
This cache should allocate a block on a line-sized write miss.
Definition: cache.hh:200
void sendTimingSnoopReq(PacketPtr pkt)
Attempt to send a timing snoop request packet to the master port by calling its corresponding receive...
Definition: port.cc:258
void schedule(Event &event, Tick when)
Definition: eventq.hh:728
bool isDirty() const override
Determine if there are any dirty blocks in the cache.
Definition: cache.cc:1619
Entry * getNext() const
Returns the WriteQueueEntry at the head of the readyList.
Definition: queue.hh:206
const int numTarget
The number of targets for each MSHR.
Definition: base.hh:291
uint32_t task_id
Task Id associated with this block.
Definition: blk.hh:83
const Cycles forwardLatency
This is the forward latency of the cache.
Definition: base.hh:278
const PacketPtr pkt
Pending request packet.
void setBlockCached()
Definition: packet.hh:623
void schedTimingResp(PacketPtr pkt, Tick when, bool force_order=false)
Schedule the sending of a timing response.
Definition: qport.hh:91
The request targets the secure memory space.
Definition: request.hh:181
bool recvTimingReq(PacketPtr pkt)
Performs the access specified by the request.
Definition: cache.cc:583
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:329
virtual Addr extractTag(Addr addr) const =0
bool writebackVisitor(CacheBlk &blk)
Cache block visitor that writes back dirty cache blocks using functional writes.
Definition: cache.cc:1628
bool allocOnFill(MemCmd cmd) const override
Determine whether we should allocate on a fill or not.
Definition: cache.hh:339
CacheBlk * allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
Find a block frame for new block at address addr targeting the given security space, assuming that the block is not currently in the cache.
Definition: cache.cc:1667
std::unordered_set< RequestPtr > outstandingSnoop
Store the outstanding requests that we are expecting snoop responses from so we can determine which s...
Definition: cache.hh:268
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:337
void regStats() override
Register stats for this object.
Definition: cache.cc:102
const Tick recvTime
Time when request was received (for stats)
Definition: mshr.hh:124
virtual Addr regenerateBlkAddr(Addr tag, unsigned set) const =0
bool isEviction() const
Definition: packet.hh:518
int set
The set and way this block belongs to.
Definition: blk.hh:109
void copyError(Packet *pkt)
Definition: packet.hh:637
const T * getConstPtr() const
Definition: packet.hh:967
Common base class for Event and GlobalEvent, so they can share flag and priority definitions and acce...
Definition: eventq.hh:92
void setExpressSnoop()
The express snoop flag is used for two purposes.
Definition: packet.hh:600
The CPU-side port extends the base cache slave port with access functions for functional, atomic and timing requests.
Definition: cache.hh:82
unsigned getSize() const
Definition: packet.hh:649
bool isReadable() const
Checks the read permissions of this block.
Definition: blk.hh:193
bool isUncacheable() const
Definition: queue_entry.hh:99
Bitfield< 2 > pf
Definition: misc.hh:551
virtual Tick nextPrefetchReadyTime() const =0
bool isValid() const
Checks that a block is valid.
Definition: blk.hh:203
bool isInvalidate() const
Definition: packet.hh:517
unsigned getSize() const
Definition: request.hh:552
Write queue entry.
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time...
Definition: port.cc:166
CacheBlk * tempBlock
Temporary cache block for occasional transitory use.
Definition: cache.hh:195
Miss and writeback queue declarations.
Bitfield< 0 > p
Describes a cache based on template policies.
void setFlags(Flags flags)
Note that unlike other accessors, this function sets specific flags (ORs them in); it does not assign...
Definition: request.hh:595
bool wasPrefetched() const
Check if this block was the result of a hardware prefetch, yet to be touched.
Definition: blk.hh:232
void incHitCount(PacketPtr pkt)
Definition: base.hh:601
uint8_t * data
Contains a copy of the data in this block for easy access.
Definition: blk.hh:94
virtual PacketPtr getPacket()=0
block was a hardware prefetch yet unaccessed
Definition: blk.hh:70
uint32_t taskId() const
Definition: request.hh:630
BaseTags * tags
Tag and data Storage.
Definition: cache.hh:189
bool hasPostInvalidate() const
Definition: mshr.hh:242
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1082
Wrap a method and present it as a cache block visitor.
Definition: cache.hh:579
void clearBlocked(BlockedCause cause)
Marks the cache as unblocked for the given cause.
Definition: base.hh:562
bool isPendingModified() const
Definition: mshr.hh:238
bool checkFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
Definition: qport.hh:96
bool hasPaddr() const
Accessor for paddr.
Definition: request.hh:513
Addr getOffset(unsigned int blk_size) const
Definition: packet.hh:651
void sendFunctional(PacketPtr pkt)
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.cc:173
void maintainClusivity(bool from_cache, CacheBlk *blk)
Maintain the clusivity of this cache by potentially invalidating a block.
Definition: cache.cc:449
bool isResponse() const
Definition: packet.hh:506
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:102
MSHRQueue mshrQueue
Miss status registers.
Definition: base.hh:191
Addr getAddr() const
Definition: packet.hh:639
MemSidePort(const std::string &_name, Cache *_cache, const std::string &_label)
Definition: cache.cc:2665
bool allocOnFill() const
Definition: mshr.hh:252
void clearBlockCached()
Definition: packet.hh:625
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: cache.hh:232
void writeDataToBlock(uint8_t *blk_data, int blkSize) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1061
bool isSecure() const
Check if this block holds data from the secure memory space.
Definition: blk.hh:241
void setCacheResponding()
Snoop flags.
Definition: packet.hh:552
void schedTimingSnoopResp(PacketPtr pkt, Tick when, bool force_order=false)
Schedule the sending of a timing snoop response.
Definition: qport.hh:156

Generated on Fri Jun 9 2017 13:03:48 for gem5 by doxygen 1.8.6