36 #ifndef __GLOBAL_MEMORY_PIPELINE_HH__
37 #define __GLOBAL_MEMORY_PIPELINE_HH__
43 #include "params/ComputeUnit.hh"
174 #endif // __GLOBAL_MEMORY_PIPELINE_HH__
bool isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
std::queue< GPUDynInstPtr > gmIssuedRequests
void handleResponse(GPUDynInstPtr gpuDynInst)
this method handles responses sent to this GM pipeline by the CU.
void incLoadVRFBankConflictCycles(int num_cycles)
void completeRequest(GPUDynInstPtr gpuDynInst)
once a memory request is finished we remove it from the buffer.
GPUDynInstPtr getNextReadyResp()
find the next ready response to service.
std::map< uint64_t, std::pair< GPUDynInstPtr, bool > > gmOrderedRespBuffer
GlobalMemPipeline(const ComputeUnitParams *params)
std::queue< GPUDynInstPtr > & getGMLdRespFIFO()
std::queue< GPUDynInstPtr > gmReturnedStores
std::queue< GPUDynInstPtr > & getGMStRespFIFO()
This is a simple scalar statistic, like a counter.
ComputeUnit * computeUnit
std::shared_ptr< GPUDynInst > GPUDynInstPtr
bool isGMLdRespFIFOWrRdy() const
Stats::Scalar loadVrfBankConflictCycles
const std::string & name() const
bool outOfOrderDataDelivery
void issueRequest(GPUDynInstPtr gpuDynInst)
issues a request to the pipeline - i.e., enqueue it in the request buffer.
std::queue< GPUDynInstPtr > gmReturnedLoads
void init(ComputeUnit *cu)
bool isGMStRespFIFOWrRdy() const