38 #include "debug/GPUMem.hh"
39 #include "debug/GPUReg.hh"
47 computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size),
48 outOfOrderDataDelivery(p->out_of_order_data_delivery), inflightStores(0),
67 bool accessVrf =
true;
72 if ((m) && (m->isLoad() || m->isAtomicRet())) {
81 accessVrf && m->statusBitVector ==
VectorMask(0) &&
94 if (m->isStore() || m->isAtomic()) {
99 if (m->isLoad() || m->isAtomic()) {
115 if (mp->isLoad() || mp->isAtomic()) {
121 }
else if (mp->isStore()) {
142 std::make_pair(mp,
false)));
147 DPRINTF(GPUMem,
"CU%d: WF[%d][%d] Popping 0 mem_op = \n",
165 if (mem_req->second.second) {
166 return mem_req->second.first;
177 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
180 }
else if (gpuDynInst->isStore()) {
186 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
189 }
else if (gpuDynInst->isStore()) {
215 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
228 mem_req->second.second =
true;
236 .
name(
name() +
".load_vrf_bank_conflict_cycles")
237 .
desc(
"total number of cycles GM data are delayed before updating "
std::queue< GPUDynInstPtr > gmIssuedRequests
void handleResponse(GPUDynInstPtr gpuDynInst)
this method handles responses sent to this GM pipeline by the CU.
void completeRequest(GPUDynInstPtr gpuDynInst)
once a memory request is finished we remove it from the buffer.
GPUDynInstPtr getNextReadyResp()
find the next ready response to service.
std::bitset< std::numeric_limits< unsigned long long >::digits > VectorMask
std::map< uint64_t, std::pair< GPUDynInstPtr, bool > > gmOrderedRespBuffer
GlobalMemPipeline(const ComputeUnitParams *params)
std::queue< GPUDynInstPtr > gmReturnedStores
ComputeUnit * computeUnit
std::shared_ptr< GPUDynInst > GPUDynInstPtr
bool isGMLdRespFIFOWrRdy() const
Stats::Scalar loadVrfBankConflictCycles
ComputeUnit * computeUnit
const std::string & name() const
uint32_t outstandingReqsRdGm
bool outOfOrderDataDelivery
uint32_t outstandingReqsWrGm
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
virtual const std::string name() const
void ScheduleAdd(uint32_t *val, Tick when, int x)
void issueRequest(GPUDynInstPtr gpuDynInst)
issues a request to the pipeline - i.e., enqueue it in the request buffer.
std::vector< VectorRegisterFile * > vrf
std::queue< GPUDynInstPtr > gmReturnedLoads
void init(ComputeUnit *cu)
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
std::vector< WaitClass > wfWait
bool isGMStRespFIFOWrRdy() const