46 #ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
47 #define __CPU_O3_LSQ_UNIT_IMPL_HH__
50 #include "arch/locked_mem.hh"
52 #include "config/the_isa.hh"
56 #include "debug/Activity.hh"
57 #include "debug/IEW.hh"
58 #include "debug/LSQUnit.hh"
59 #include "debug/O3PipeView.hh"
66 :
Event(Default_Pri, AutoDelete),
67 inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
75 assert(!lsqPtr->cpu->switchedOut());
77 lsqPtr->writeback(inst, pkt);
80 delete pkt->senderState;
90 return "Store writeback";
99 DPRINTF(
IEW,
"Writeback event [sn:%lli].\n", inst->seqNum);
100 DPRINTF(Activity,
"Activity: Writeback event [sn:%lli].\n", inst->seqNum);
105 DPRINTF(
IEW,
"[sn:%lli]: Response from first half of earlier "
106 "blocked split load recieved. Ignoring.\n", inst->seqNum);
116 assert(!
cpu->switchedOut());
117 if (!inst->isSquashed()) {
127 if (inst->isStore()) {
138 cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
143 template <
class Impl>
154 LSQ *lsq_ptr,
unsigned maxLQEntries,
unsigned maxSQEntries,
167 LQEntries = maxLQEntries + 1;
168 SQEntries = maxSQEntries + 1;
171 assert(LQEntries <= 256);
172 assert(SQEntries <= 256);
174 loadQueue.resize(LQEntries);
175 storeQueue.resize(SQEntries);
177 depCheckShift = params->LSQDepCheckShift;
178 checkLoads = params->LSQCheckLoads;
179 cacheStorePorts = params->cacheStorePorts;
180 needsTSO = params->needsTSO;
190 loads = stores = storesToWB = 0;
192 loadHead = loadTail = 0;
194 storeHead = storeWBIdx = storeTail = 0;
199 memDepViolator = NULL;
203 cacheBlockMask = ~(cpu->cacheLineSize() - 1);
210 if (Impl::MaxThreads == 1) {
211 return iewStage->name() +
".lsq";
213 return iewStage->name() +
".lsq.thread" + std::to_string(lsqID);
222 .name(
name() +
".forwLoads")
223 .desc(
"Number of loads that had data forwarded from stores");
226 .name(
name() +
".invAddrLoads")
227 .desc(
"Number of loads ignored due to an invalid address");
230 .name(
name() +
".squashedLoads")
231 .desc(
"Number of loads squashed");
234 .name(
name() +
".ignoredResponses")
235 .desc(
"Number of memory responses ignored because the instruction is squashed");
238 .name(
name() +
".memOrderViolation")
239 .desc(
"Number of memory ordering violations");
242 .name(
name() +
".squashedStores")
243 .desc(
"Number of stores squashed");
246 .name(
name() +
".invAddrSwpfs")
247 .desc(
"Number of software prefetches ignored due to an invalid address");
250 .name(
name() +
".blockedLoads")
251 .desc(
"Number of blocked loads due to partial load-store forwarding");
254 .name(
name() +
".rescheduledLoads")
255 .desc(
"Number of loads that were rescheduled");
258 .name(
name() +
".cacheBlocked")
259 .desc(
"Number of times an access to memory failed due to the cache being blocked");
266 dcachePort = dcache_port;
287 for (
int i = 0;
i < loadQueue.size(); ++
i)
288 assert(!loadQueue[
i]);
290 assert(storesToWB == 0);
305 unsigned size_plus_sentinel = size + 1;
306 assert(size_plus_sentinel >= LQEntries);
308 if (size_plus_sentinel > LQEntries) {
309 while (size_plus_sentinel > loadQueue.size()) {
311 loadQueue.push_back(dummy);
315 LQEntries = size_plus_sentinel;
318 assert(LQEntries <= 256);
325 unsigned size_plus_sentinel = size + 1;
326 if (size_plus_sentinel > SQEntries) {
327 while (size_plus_sentinel > storeQueue.size()) {
329 storeQueue.push_back(dummy);
333 SQEntries = size_plus_sentinel;
336 assert(SQEntries <= 256);
339 template <
class Impl>
343 assert(inst->isMemRef());
345 assert(inst->isLoad() || inst->isStore());
347 if (inst->isLoad()) {
356 template <
class Impl>
360 assert((loadTail + 1) % LQEntries != loadHead);
361 assert(loads < LQEntries);
364 load_inst->pcState(), loadTail, load_inst->seqNum);
366 load_inst->lqIdx = loadTail;
369 load_inst->sqIdx = -1;
371 load_inst->sqIdx = storeTail;
374 loadQueue[loadTail] = load_inst;
381 template <
class Impl>
386 assert((storeTail + 1) % SQEntries != storeHead);
387 assert(stores < SQEntries);
390 store_inst->pcState(), storeTail, store_inst->seqNum);
392 store_inst->sqIdx = storeTail;
393 store_inst->lqIdx = loadTail;
395 storeQueue[storeTail] =
SQEntry(store_inst);
397 incrStIdx(storeTail);
402 template <
class Impl>
403 typename Impl::DynInstPtr
408 memDepViolator = NULL;
413 template <
class Impl>
419 DPRINTF(
LSQUnit,
"LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
420 return LQEntries - loads - 1;
423 template <
class Impl>
429 DPRINTF(
LSQUnit,
"SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
430 return SQEntries - stores - 1;
434 template <
class Impl>
441 int load_idx = loadHead;
446 for (
int x = 0;
x < cpu->numContexts();
x++) {
448 bool no_squash = cpu->thread[
x]->noSquashFromTC;
449 cpu->thread[
x]->noSquashFromTC =
true;
451 cpu->thread[
x]->noSquashFromTC = no_squash;
454 Addr invalidate_addr = pkt->
getAddr() & cacheBlockMask;
458 Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
459 Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
462 if (ld_inst->effAddrValid() && (load_addr_low == invalidate_addr
463 || load_addr_high == invalidate_addr)
469 if (load_idx == loadTail)
474 bool force_squash =
false;
476 while (load_idx != loadTail) {
479 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
484 Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
485 Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
487 DPRINTF(
LSQUnit,
"-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
488 ld_inst->seqNum, load_addr_low, invalidate_addr);
490 if ((load_addr_low == invalidate_addr
491 || load_addr_high == invalidate_addr) || force_squash) {
498 if (ld_inst->possibleLoadViolation() || force_squash) {
500 pkt->
getAddr(), ld_inst->seqNum);
503 ld_inst->fault = std::make_shared<ReExec>();
506 pkt->
getAddr(), ld_inst->seqNum);
517 ld_inst->hitExternalSnoop(
true);
525 template <
class Impl>
529 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
530 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
537 while (load_idx != loadTail) {
539 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) {
544 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
546 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
548 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
549 if (inst->isLoad()) {
553 if (ld_inst->hitExternalSnoop()) {
554 if (!memDepViolator ||
555 ld_inst->seqNum < memDepViolator->seqNum) {
557 "and [sn:%lli] at address %#x\n",
558 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
559 memDepViolator = ld_inst;
561 ++lsqMemOrderViolation;
563 return std::make_shared<GenericISA::M5PanicFault>(
564 "Detected fault with inst [sn:%lli] and "
565 "[sn:%lli] at address %#x\n",
566 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
572 ld_inst->possibleLoadViolation(
true);
574 " between instructions [sn:%lli] and [sn:%lli]\n",
575 inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
580 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
584 "[sn:%lli] at address %#x\n",
585 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
586 memDepViolator = ld_inst;
588 ++lsqMemOrderViolation;
590 return std::make_shared<GenericISA::M5PanicFault>(
591 "Detected fault with "
592 "inst [sn:%lli] and [sn:%lli] at address %#x\n",
593 inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
605 template <
class Impl>
609 using namespace TheISA;
614 inst->pcState(), inst->seqNum);
616 assert(!inst->isSquashed());
618 load_fault = inst->initiateAcc();
620 if (inst->isTranslationDelayed() &&
626 if (load_fault !=
NoFault || !inst->readPredicate()) {
631 if (!inst->readPredicate())
632 inst->forwardOldRegs();
635 (load_fault !=
NoFault ?
"fault" :
"predication"));
636 if (!(inst->hasRequest() && inst->strictlyOrdered()) ||
637 inst->isAtCommit()) {
640 iewStage->instToCommit(inst);
641 iewStage->activityThisCycle();
643 assert(inst->effAddrValid());
644 int load_idx = inst->lqIdx;
648 return checkViolations(load_idx, inst);
654 template <
class Impl>
658 using namespace TheISA;
662 int store_idx = store_inst->sqIdx;
665 store_inst->pcState(), store_inst->seqNum);
667 assert(!store_inst->isSquashed());
671 int load_idx = store_inst->lqIdx;
673 Fault store_fault = store_inst->initiateAcc();
675 if (store_inst->isTranslationDelayed() &&
679 if (!store_inst->readPredicate())
680 store_inst->forwardOldRegs();
682 if (storeQueue[store_idx].
size == 0) {
684 store_inst->pcState(), store_inst->seqNum);
687 }
else if (!store_inst->readPredicate()) {
688 DPRINTF(
LSQUnit,
"Store [sn:%lli] not executed from predication\n",
693 assert(store_fault ==
NoFault);
695 if (store_inst->isStoreConditional()) {
698 storeQueue[store_idx].canWB =
true;
703 return checkViolations(load_idx, store_inst);
707 template <
class Impl>
711 assert(loadQueue[loadHead]);
714 loadQueue[loadHead]->pcState());
716 loadQueue[loadHead] = NULL;
723 template <
class Impl>
727 assert(loads == 0 || loadQueue[loadHead]);
729 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
734 template <
class Impl>
738 assert(stores == 0 || storeQueue[storeHead].inst);
740 int store_idx = storeHead;
742 while (store_idx != storeTail) {
743 assert(storeQueue[store_idx].inst);
746 if (!storeQueue[store_idx].canWB) {
747 if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
752 storeQueue[store_idx].inst->pcState(),
753 storeQueue[store_idx].inst->seqNum);
755 storeQueue[store_idx].canWB =
true;
760 incrStIdx(store_idx);
764 template <
class Impl>
769 assert(pendingPkt != NULL);
772 if (sendStore(pendingPkt)) {
773 storePostSend(pendingPkt);
776 hasPendingPkt =
false;
780 template <
class Impl>
787 writebackPendingStore();
790 while (storesToWB > 0 &&
791 storeWBIdx != storeTail &&
792 storeQueue[storeWBIdx].inst &&
793 storeQueue[storeWBIdx].canWB &&
794 ((!needsTSO) || (!storeInFlight)) &&
795 usedStorePorts < cacheStorePorts) {
797 if (isStoreBlocked) {
805 if (storeQueue[storeWBIdx].
size == 0) {
806 completeStore(storeWBIdx);
808 incrStIdx(storeWBIdx);
815 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
816 incrStIdx(storeWBIdx);
821 assert(storeQueue[storeWBIdx].req);
822 assert(!storeQueue[storeWBIdx].committed);
825 assert(storeQueue[storeWBIdx].sreqLow);
826 assert(storeQueue[storeWBIdx].sreqHigh);
829 DynInstPtr inst = storeQueue[storeWBIdx].inst;
831 Request *req = storeQueue[storeWBIdx].req;
832 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
833 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
835 storeQueue[storeWBIdx].committed =
true;
837 assert(!inst->memData);
838 inst->memData =
new uint8_t[req->
getSize()];
840 if (storeQueue[storeWBIdx].isAllZeros)
841 memset(inst->memData, 0, req->
getSize());
843 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->
getSize());
850 state->
idx = storeWBIdx;
857 data_pkt->dataStatic(inst->memData);
858 data_pkt->senderState = state;
864 data_pkt->dataStatic(inst->memData);
867 data_pkt->senderState = state;
879 "to Addr:%#x, data:%#x [sn:%lli]\n",
880 storeWBIdx, inst->pcState(),
881 req->
getPaddr(), (int)*(inst->memData),
885 if (inst->isStoreConditional()) {
886 assert(!storeQueue[storeWBIdx].isSplit);
890 inst->recordResult(
false);
892 inst->recordResult(
true);
897 "Instantly completing it.\n",
900 cpu->schedule(wb,
curTick() + 1);
904 inst->reqToVerify->setExtraData(0);
905 inst->completeAcc(data_pkt);
907 completeStore(storeWBIdx);
908 incrStIdx(storeWBIdx);
922 assert(!inst->isStoreConditional());
934 completeStore(storeWBIdx);
935 incrStIdx(storeWBIdx);
936 }
else if (!sendStore(data_pkt)) {
937 DPRINTF(
IEW,
"D-Cache became blocked when writing [sn:%lli], will"
950 assert(snd_data_pkt);
953 if (usedStorePorts < cacheStorePorts) {
955 if (sendStore(snd_data_pkt)) {
956 storePostSend(snd_data_pkt);
958 DPRINTF(
IEW,
"D-Cache became blocked when writing"
959 " [sn:%lli] second packet, will retry later\n",
965 assert(pendingPkt == NULL);
966 pendingPkt = snd_data_pkt;
967 hasPendingPkt =
true;
972 storePostSend(data_pkt);
980 assert(stores >= 0 && storesToWB >= 0);
997 template <
class Impl>
1002 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
1004 int load_idx = loadTail;
1005 decrLdIdx(load_idx);
1007 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
1010 loadQueue[load_idx]->pcState(),
1011 loadQueue[load_idx]->seqNum);
1013 if (isStalled() && load_idx == stallingLoadIdx) {
1015 stallingStoreIsn = 0;
1016 stallingLoadIdx = 0;
1020 loadQueue[load_idx]->setSquashed();
1021 loadQueue[load_idx] = NULL;
1025 loadTail = load_idx;
1027 decrLdIdx(load_idx);
1031 if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1032 memDepViolator = NULL;
1035 int store_idx = storeTail;
1036 decrStIdx(store_idx);
1038 while (stores != 0 &&
1039 storeQueue[store_idx].inst->seqNum > squashed_num) {
1041 if (storeQueue[store_idx].canWB) {
1046 "idx:%i [sn:%lli]\n",
1047 storeQueue[store_idx].inst->pcState(),
1048 store_idx, storeQueue[store_idx].inst->seqNum);
1053 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1054 panic(
"Is stalled should have been cleared by stalling load!\n");
1056 stallingStoreIsn = 0;
1060 storeQueue[store_idx].inst->setSquashed();
1061 storeQueue[store_idx].inst = NULL;
1062 storeQueue[store_idx].canWB = 0;
1067 delete storeQueue[store_idx].req;
1069 delete storeQueue[store_idx].sreqLow;
1070 delete storeQueue[store_idx].sreqHigh;
1072 storeQueue[store_idx].sreqLow = NULL;
1073 storeQueue[store_idx].sreqHigh = NULL;
1076 storeQueue[store_idx].req = NULL;
1080 storeTail = store_idx;
1082 decrStIdx(store_idx);
1083 ++lsqSquashedStores;
1087 template <
class Impl>
1092 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1095 stallingStoreIsn, stallingLoadIdx);
1097 stallingStoreIsn = 0;
1098 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1101 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1105 storeQueue[storeWBIdx].inst->setCompleted();
1108 cpu->checker->verify(storeQueue[storeWBIdx].inst);
1113 storeInFlight =
true;
1116 incrStIdx(storeWBIdx);
1119 template <
class Impl>
1123 iewStage->wakeCPU();
1126 if (inst->isSquashed()) {
1127 assert(!inst->isStore());
1128 ++lsqIgnoredResponses;
1132 if (!inst->isExecuted()) {
1133 inst->setExecuted();
1137 inst->completeAcc(pkt);
1144 assert(dynamic_cast<ReExec*>(inst->fault.get()) !=
nullptr);
1147 "due to pending fault.\n", inst->seqNum);
1152 iewStage->instToCommit(inst);
1154 iewStage->activityThisCycle();
1157 iewStage->checkMisprediction(inst);
1160 template <
class Impl>
1164 assert(storeQueue[store_idx].inst);
1165 storeQueue[store_idx].completed =
true;
1171 cpu->activityThisCycle();
1173 if (store_idx == storeHead) {
1175 incrStIdx(storeHead);
1178 }
while (storeQueue[storeHead].completed &&
1179 storeHead != storeTail);
1181 iewStage->updateLSQNextCycle =
true;
1184 DPRINTF(
LSQUnit,
"Completing store [sn:%lli], idx:%i, store head "
1186 storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1189 if (
DTRACE(O3PipeView)) {
1190 storeQueue[store_idx].inst->storeTick =
1191 curTick() - storeQueue[store_idx].inst->fetchTick;
1196 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1199 stallingStoreIsn, stallingLoadIdx);
1201 stallingStoreIsn = 0;
1202 iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1205 storeQueue[store_idx].inst->setCompleted();
1208 storeInFlight =
false;
1215 cpu->checker->verify(storeQueue[store_idx].inst);
1219 template <
class Impl>
1223 if (!dcachePort->sendTimingReq(data_pkt)) {
1225 isStoreBlocked =
true;
1227 assert(retryPkt == NULL);
1228 retryPkt = data_pkt;
1234 template <
class Impl>
1238 if (isStoreBlocked) {
1240 assert(retryPkt != NULL);
1245 if (dcachePort->sendTimingReq(retryPkt)) {
1250 storePostSend(retryPkt);
1253 isStoreBlocked =
false;
1269 template <
class Impl>
1273 if (++store_idx >= SQEntries)
1277 template <
class Impl>
1281 if (--store_idx < 0)
1282 store_idx += SQEntries;
1285 template <
class Impl>
1289 if (++load_idx >= LQEntries)
1293 template <
class Impl>
1298 load_idx += LQEntries;
1301 template <
class Impl>
1305 cprintf(
"Load store queue: Dumping instructions.\n");
1306 cprintf(
"Load queue size: %i\n", loads);
1309 int load_idx = loadHead;
1311 while (load_idx != loadTail && loadQueue[load_idx]) {
1313 cprintf(
"%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1315 incrLdIdx(load_idx);
1319 cprintf(
"Store queue size: %i\n", stores);
1322 int store_idx = storeHead;
1324 while (store_idx != storeTail && storeQueue[store_idx].inst) {
1325 const DynInstPtr &inst(storeQueue[store_idx].inst);
1326 cprintf(
"%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1328 incrStIdx(store_idx);
1334 #endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
A MasterPort is a specialisation of a BaseMasterPort, which implements the default protocol for the t...
unsigned numFreeLoadEntries()
Returns the number of free LQ entries.
void squash(const InstSeqNum &squashed_num)
Squashes all instructions younger than a specific sequence number.
Impl::DynInstPtr DynInstPtr
Fault checkViolations(int load_idx, DynInstPtr &inst)
Check for ordering violations in the LSQ.
decltype(nullptr) constexpr NoFault
void resizeSQ(unsigned size)
Resizes the SQ to a given size.
uint8_t idx
The LQ/SQ index of the instruction.
const std::string & name()
bool isStoreBlocked
Whehter or not a store is blocked due to the memory system.
WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr)
Constructs a writeback event.
void drainSanityCheck() const
Perform sanity checks after a drain.
DynInstPtr getMemDepViolator()
Returns the memory ordering violator.
void decrStIdx(int &store_idx) const
Decrements the given store index (circular queue).
void completeStore(int store_idx)
Completes the store at the specified index.
Writeback event, specifically for when stores forward data to loads.
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
const bool HasUnalignedMemAcc
bool isMmappedIpr() const
bool handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
void resetState()
Reset the LSQ state.
Derived class to hold any sender state the LSQ needs.
void writebackPendingStore()
Writes back a store that couldn't be completed the previous cycle.
void resizeLQ(unsigned size)
Resizes the LQ to a given size.
void regStats()
Registers statistics.
Cycles handleIprWrite(ThreadContext *xc, Packet *pkt)
Helper function to handle IPRs when the target architecture doesn't need its own IPR handling...
ThreadContext is the external interface to all thread state for anything outside of the CPU...
void insertLoad(DynInstPtr &load_inst)
Inserts a load instruction.
void recvRetry()
Handles doing the retry.
void setAccessLatency()
Set/Get the time taken to complete this request's access, not including the time to successfully tran...
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
void takeOverFrom()
Takes over from another CPU's thread.
void commitStores(InstSeqNum &youngest_inst)
Commits stores older than a specific sequence number.
std::string name() const
Returns the name of the LSQ unit.
void decrLdIdx(int &load_idx) const
Decrements the given load index (circular queue).
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
bool cacheBlocked
Whether or not the second packet of this split load was blocked.
void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, unsigned id)
Initializes the LSQ unit with the specified number of entries.
bool hasPendingPkt
Whether or not there is a packet that couldn't be sent because of a lack of cache ports...
bool storeInFlight
Whether or not a store is in flight.
void incrLdIdx(int &load_idx) const
Increments the given load index (circular queue).
Tick curTick()
The current simulated tick.
bool noWB
Whether or not the instruction will need to writeback.
void checkSnoop(PacketPtr pkt)
Check if an incoming invalidate hits in the lsq on a load that might have issued out of order wrt ano...
bool pktToSend
Whether or not there is a packet that needs sending.
void dumpInsts() const
Debugging function to dump instructions in the LSQ.
void storePostSend(PacketPtr pkt)
Handles completing the send of a store to memory.
bool stalled
Whether or not the LSQ is stalled.
void commitLoads(InstSeqNum &youngest_inst)
Commits loads older than a specific sequence number.
bool isLoad
Whether or not it is a load.
bool complete()
Completes a packet and returns whether the access is finished.
const RequestPtr req
A pointer to the original request.
void commitLoad()
Commits the head load.
void insert(DynInstPtr &inst)
Inserts an instruction.
void completeDataAccess(PacketPtr pkt)
Completes the data access that has been returned from the memory system.
LSQUnit()
Constructs an LSQ unit.
void setDcachePort(MasterPort *dcache_port)
Sets the pointer to the dcache port.
PacketPtr pendingPkt
The packet that is pending free cache ports.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
bool isSplit
Whether or not this access is split in two.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Fault executeLoad(DynInstPtr &inst)
Executes a load instruction.
void clearSQ()
Clears all the entries in the SQ.
Fault executeStore(DynInstPtr &inst)
Executes a store instruction.
int stores
The number of store instructions in the SQ.
void incrStIdx(int &store_idx) const
Increments the given store index (circular queue).
Declaration of the Packet class.
unsigned numFreeStoreEntries()
Returns the number of free SQ entries.
SenderState * senderState
This packet's sender state.
void process()
Processes the writeback event.
int storesToWB
The number of store instructions in the SQ waiting to writeback.
void writebackStores()
Writes back stores.
bool sendStore(PacketPtr data_pkt)
Attempts to send a store to the cache.
PacketPtr pendingPacket
A second packet from a split store that needs sending.
int loads
The number of load instructions in the LQ.
O3CPU * cpu
Pointer to the CPU.
void insertStore(DynInstPtr &store_inst)
Inserts a store instruction.
PacketPtr mainPkt
The main packet from a split load, used during writeback.
const char * description() const
Returns the description of this event.
void clearLQ()
Clears all the entries in the LQ.
uint8_t outstanding
Number of outstanding packets to complete.
bool isInvalidate() const
Class that implements the actual LQ and SQ for each specific thread.
static PacketPtr createWrite(const RequestPtr req)
The request is a Load locked/store conditional.
void writeback(DynInstPtr &inst, PacketPtr pkt)
Writes back the instruction, sending it to IEW.
DynInstPtr inst
Instruction who initiated the access to memory.
std::shared_ptr< FaultBase > Fault
void handleLockedSnoopHit(XC *xc)
void cprintf(const char *format, const Args &...args)
Addr cacheBlockMask
Address Mask for a cache block (e.g.