38 #include "debug/GPUMem.hh"
46 n_reg(0), useContinuation(false),
47 statusBitVector(0), _staticInst(static_inst), _seqNum(instSeqNum)
147 Enums::StorageClassType
157 DPRINTF(GPUMem,
"CU%d: WF[%d][%d]: mempacket status bitvector=%#x\n",
514 assert(number_pages_touched);
523 .insert(ComputeUnit::pageDataStruct::value_type(it.first,
524 std::make_pair(1, it.second)));
528 ret.first->second.first++;
529 ret.first->second.second += it.second;
bool isRelaxedOrder() const
bool isPrivateSeg() const
bool isUnconditionalJump() const
virtual void completeAcc(GPUDynInstPtr gpuDynInst)
bool isSystemCoherent() const
bool isSystemScope() const
std::vector< int > tlbHitLevel
bool isAtomicNoRet() const
bool isAcquireRelease() const
Stats::Scalar dynamicGMemInstrCnt
bool isWorkitemScope() const
int getOperandSize(int operandIdx)
virtual bool isCondRegister(int operandIndex)=0
const std::string & disassemble() const
Stats::Scalar dynamicLMemInstrCnt
bool isAtomicExch() const
GPUDynInst(ComputeUnit *_cu, Wavefront *_wf, GPUStaticInst *static_inst, uint64_t instSeqNum)
Enums::StorageClassType executedAs()
bool isKernArgSeg() const
bool isWorkgroupScope() const
bool isWavefrontScope() const
std::shared_ptr< GPUDynInst > GPUDynInstPtr
virtual bool isDstOperand(int operandIndex)=0
virtual bool isScalarRegister(int operandIndex)=0
virtual int numDstRegOperands()=0
Stats::Distribution pageDivergenceDist
bool isCondRegister(int operandIdx)
bool isAtomicNoRet() const
bool isWavefrontScope() const
bool isSystemCoherent() const
bool isPrivateSeg() const
bool isVectorRegister(int operandIdx)
Enums::StorageClassType executed_as
virtual int getRegisterIndex(int operandIndex, GPUDynInstPtr gpuDynInst)=0
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
bool isScalarRegister(int operandIdx)
bool isSrcOperand(int operandIdx)
bool isWorkitemScope() const
virtual bool isSrcOperand(int operandIndex)=0
bool isALU() const
accessor methods for the attributes of the underlying GPU static instruction
bool isGloballyCoherent() const
bool isDeviceScope() const
virtual int getNumOperands()=0
void completeAcc(GPUDynInstPtr gpuDynInst)
GPUStaticInst * _staticInst
int getRegisterIndex(int operandIdx, GPUDynInstPtr gpuDynInst)
bool isAtomicExch() const
bool isUnconditionalJump() const
void execute(GPUDynInstPtr gpuDynInst)
bool isReadOnlySeg() const
bool isKernArgSeg() const
const std::string & disassemble()
std::map< Addr, int > pagesTouched
ComputeUnit * computeUnit()
virtual int numSrcRegOperands()=0
bool isRelaxedOrder() const
virtual bool isVectorRegister(int operandIndex)=0
bool isDeviceScope() const
bool isGloballyCoherent() const
Coherence domain of a memory instruction.
virtual void execute(GPUDynInstPtr gpuDynInst)=0
bool isDstOperand(int operandIdx)
pageDataStruct pageAccesses
virtual void initiateAcc(GPUDynInstPtr gpuDynInst)
bool isWorkgroupScope() const
virtual int getOperandSize(int operandIndex)=0
bool isSystemScope() const
void initiateAcc(GPUDynInstPtr gpuDynInst)
bool isReadOnlySeg() const
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
bool isAcquireRelease() const