gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
global_memory_pipeline.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * For use for simulation and test purposes only
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its contributors
18  * may be used to endorse or promote products derived from this software
19  * without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Author: John Kalamatianos, Sooraj Puthoor
34  */
35 
36 #ifndef __GLOBAL_MEMORY_PIPELINE_HH__
37 #define __GLOBAL_MEMORY_PIPELINE_HH__
38 
39 #include <queue>
40 #include <string>
41 
42 #include "gpu-compute/misc.hh"
43 #include "params/ComputeUnit.hh"
44 #include "sim/stats.hh"
45 
46 /*
47  * @file global_memory_pipeline.hh
48  *
49  * The global memory pipeline issues newly created global memory packets
50  * from the pipeline to DTLB. The exec() method of the memory packet issues
51  * the packet to the DTLB if there is space available in the return fifo.
52  * This stage also retires previously issued loads and stores that have
53  * returned from the memory sub-system.
54  */
55 
56 class ComputeUnit;
57 
59 {
60  public:
61  GlobalMemPipeline(const ComputeUnitParams *params);
62  void init(ComputeUnit *cu);
63  void exec();
64 
65  std::queue<GPUDynInstPtr> &getGMStRespFIFO() { return gmReturnedStores; }
66  std::queue<GPUDynInstPtr> &getGMLdRespFIFO() { return gmReturnedLoads; }
67 
76 
82  void completeRequest(GPUDynInstPtr gpuDynInst);
83 
88  void issueRequest(GPUDynInstPtr gpuDynInst);
89 
97  void handleResponse(GPUDynInstPtr gpuDynInst);
98 
99  bool
101  {
102  return gmReturnedLoads.size() < gmQueueSize;
103  }
104 
105  bool
107  {
108  return gmReturnedStores.size() < gmQueueSize;
109  }
110 
111  bool
112  isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
113  {
114  return (gmIssuedRequests.size() + pendReqs) < gmQueueSize;
115  }
116 
117  const std::string &name() const { return _name; }
118  void regStats();
119 
120  void
122  {
123  loadVrfBankConflictCycles += num_cycles;
124  }
125 
126  private:
128  std::string _name;
131 
132  // number of cycles of delaying the update of a VGPR that is the
133  // target of a load instruction (or the load component of an atomic)
134  // The delay is due to VRF bank conflicts
136  // Counters to track the inflight loads and stores
137  // so that we can provide the proper backpressure
138  // on the number of inflight memory operations.
141 
142  // The size of global memory.
144 
145  /*
146  * this buffer holds the memory responses when in-order data
147  * deilvery is used - the responses are ordered by their unique
148  * sequence number, which is monotonically increasing. when a
149  * memory request returns its "done" flag is set to true. during
150  * each tick the the GM pipeline will check if the oldest request
151  * is finished, and if so it will be removed from the queue.
152  *
153  * key: memory instruction's sequence ID
154  *
155  * value: pair holding the instruction pointer and a bool that
156  * is used to indicate whether or not the request has
157  * completed
158  */
159  std::map<uint64_t, std::pair<GPUDynInstPtr, bool>> gmOrderedRespBuffer;
160 
161  // Global Memory Request FIFO: all global memory requests
162  // are issued to this FIFO from the memory pipelines
163  std::queue<GPUDynInstPtr> gmIssuedRequests;
164 
165  // Globa Store Response FIFO: all responses of global memory
166  // stores are sent to this FIFO from TCP
167  std::queue<GPUDynInstPtr> gmReturnedStores;
168 
169  // Global Load Response FIFO: all responses of global memory
170  // loads are sent to this FIFO from TCP
171  std::queue<GPUDynInstPtr> gmReturnedLoads;
172 };
173 
174 #endif // __GLOBAL_MEMORY_PIPELINE_HH__
bool isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
std::queue< GPUDynInstPtr > gmIssuedRequests
void handleResponse(GPUDynInstPtr gpuDynInst)
this method handles responses sent to this GM pipeline by the CU.
void incLoadVRFBankConflictCycles(int num_cycles)
void completeRequest(GPUDynInstPtr gpuDynInst)
once a memory request is finished we remove it from the buffer.
GPUDynInstPtr getNextReadyResp()
find the next ready response to service.
std::map< uint64_t, std::pair< GPUDynInstPtr, bool > > gmOrderedRespBuffer
GlobalMemPipeline(const ComputeUnitParams *params)
std::queue< GPUDynInstPtr > & getGMLdRespFIFO()
std::queue< GPUDynInstPtr > gmReturnedStores
std::queue< GPUDynInstPtr > & getGMStRespFIFO()
This is a simple scalar statistic, like a counter.
Definition: statistics.hh:2475
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition: misc.hh:48
bool isGMLdRespFIFOWrRdy() const
Stats::Scalar loadVrfBankConflictCycles
const std::string & name() const
void issueRequest(GPUDynInstPtr gpuDynInst)
issues a request to the pipeline - i.e., enqueue it in the request buffer.
std::queue< GPUDynInstPtr > gmReturnedLoads
void init(ComputeUnit *cu)
bool isGMStRespFIFOWrRdy() const

Generated on Fri Jun 9 2017 13:03:47 for gem5 by doxygen 1.8.6