gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
dram_ctrl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2017 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Authors: Andreas Hansson
41  * Ani Udipi
42  * Neha Agarwal
43  * Omar Naji
44  * Wendy Elsasser
45  */
46 
47 #include "mem/dram_ctrl.hh"
48 
49 #include "base/bitfield.hh"
50 #include "base/trace.hh"
51 #include "debug/DRAM.hh"
52 #include "debug/DRAMPower.hh"
53 #include "debug/DRAMState.hh"
54 #include "debug/Drain.hh"
55 #include "sim/system.hh"
56 
57 using namespace std;
58 using namespace Data;
59 
60 DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
61  AbstractMemory(p),
62  port(name() + ".port", *this), isTimingMode(false),
63  retryRdReq(false), retryWrReq(false),
64  busState(READ),
65  busStateNext(READ),
66  nextReqEvent(this), respondEvent(this),
67  deviceSize(p->device_size),
68  deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
69  deviceRowBufferSize(p->device_rowbuffer_size),
70  devicesPerRank(p->devices_per_rank),
71  burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
72  rowBufferSize(devicesPerRank * deviceRowBufferSize),
73  columnsPerRowBuffer(rowBufferSize / burstSize),
74  columnsPerStripe(range.interleaved() ? range.granularity() / burstSize : 1),
75  ranksPerChannel(p->ranks_per_channel),
76  bankGroupsPerRank(p->bank_groups_per_rank),
77  bankGroupArch(p->bank_groups_per_rank > 0),
78  banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
79  readBufferSize(p->read_buffer_size),
80  writeBufferSize(p->write_buffer_size),
81  writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
82  writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
83  minWritesPerSwitch(p->min_writes_per_switch),
84  writesThisTime(0), readsThisTime(0),
85  tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST),
86  tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
87  tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
88  tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS),
89  activationLimit(p->activation_limit),
90  memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
91  pageMgmt(p->page_policy),
92  maxAccessesPerRow(p->max_accesses_per_row),
93  frontendLatency(p->static_frontend_latency),
94  backendLatency(p->static_backend_latency),
95  busBusyUntil(0), prevArrival(0),
96  nextReqTime(0), activeRank(0), timeStampOffset(0)
97 {
98  // sanity check the ranks since we rely on bit slicing for the
99  // address decoding
100  fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is not "
101  "allowed, must be a power of two\n", ranksPerChannel);
102 
103  fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, "
104  "must be a power of two\n", burstSize);
105 
106  for (int i = 0; i < ranksPerChannel; i++) {
107  Rank* rank = new Rank(*this, p);
108  ranks.push_back(rank);
109 
110  rank->actTicks.resize(activationLimit, 0);
111  rank->banks.resize(banksPerRank);
112  rank->rank = i;
113 
114  for (int b = 0; b < banksPerRank; b++) {
115  rank->banks[b].bank = b;
116  // GDDR addressing of banks to BG is linear.
117  // Here we assume that all DRAM generations address bank groups as
118  // follows:
119  if (bankGroupArch) {
120  // Simply assign lower bits to bank group in order to
121  // rotate across bank groups as banks are incremented
122  // e.g. with 4 banks per bank group and 16 banks total:
123  // banks 0,4,8,12 are in bank group 0
124  // banks 1,5,9,13 are in bank group 1
125  // banks 2,6,10,14 are in bank group 2
126  // banks 3,7,11,15 are in bank group 3
127  rank->banks[b].bankgr = b % bankGroupsPerRank;
128  } else {
129  // No bank groups; simply assign to bank number
130  rank->banks[b].bankgr = b;
131  }
132  }
133  }
134 
135  // perform a basic check of the write thresholds
136  if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
137  fatal("Write buffer low threshold %d must be smaller than the "
138  "high threshold %d\n", p->write_low_thresh_perc,
139  p->write_high_thresh_perc);
140 
141  // determine the rows per bank by looking at the total capacity
142  uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
143 
144  // determine the dram actual capacity from the DRAM config in Mbytes
145  uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank *
146  ranksPerChannel;
147 
148  // if actual DRAM size does not match memory capacity in system warn!
149  if (deviceCapacity != capacity / (1024 * 1024))
150  warn("DRAM device capacity (%d Mbytes) does not match the "
151  "address range assigned (%d Mbytes)\n", deviceCapacity,
152  capacity / (1024 * 1024));
153 
154  DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
156 
157  DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
159 
161 
162  // some basic sanity checks
163  if (tREFI <= tRP || tREFI <= tRFC) {
164  fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
165  tREFI, tRP, tRFC);
166  }
167 
168  // basic bank group architecture checks ->
169  if (bankGroupArch) {
170  // must have at least one bank per bank group
172  fatal("banks per rank (%d) must be equal to or larger than "
173  "banks groups per rank (%d)\n",
175  }
176  // must have same number of banks in each bank group
177  if ((banksPerRank % bankGroupsPerRank) != 0) {
178  fatal("Banks per rank (%d) must be evenly divisible by bank groups "
179  "per rank (%d) for equal banks per bank group\n",
180  banksPerRank, bankGroupsPerRank);
181  }
182  // tCCD_L should be greater than minimal, back-to-back burst delay
183  if (tCCD_L <= tBURST) {
184  fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
185  "bank groups per rank (%d) is greater than 1\n",
186  tCCD_L, tBURST, bankGroupsPerRank);
187  }
188  // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
189  // some datasheets might specify it equal to tRRD
190  if (tRRD_L < tRRD) {
191  fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
192  "bank groups per rank (%d) is greater than 1\n",
193  tRRD_L, tRRD, bankGroupsPerRank);
194  }
195  }
196 
197 }
198 
199 void
201 {
203 
204  if (!port.isConnected()) {
205  fatal("DRAMCtrl %s is unconnected!\n", name());
206  } else {
208  }
209 
210  // a bit of sanity checks on the interleaving, save it for here to
211  // ensure that the system pointer is initialised
212  if (range.interleaved()) {
213  if (channels != range.stripes())
214  fatal("%s has %d interleaved address stripes but %d channel(s)\n",
215  name(), range.stripes(), channels);
216 
217  if (addrMapping == Enums::RoRaBaChCo) {
218  if (rowBufferSize != range.granularity()) {
219  fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
220  "address map\n", name());
221  }
222  } else if (addrMapping == Enums::RoRaBaCoCh ||
223  addrMapping == Enums::RoCoRaBaCh) {
224  // for the interleavings with channel bits in the bottom,
225  // if the system uses a channel striping granularity that
226  // is larger than the DRAM burst size, then map the
227  // sequential accesses within a stripe to a number of
228  // columns in the DRAM, effectively placing some of the
229  // lower-order column bits as the least-significant bits
230  // of the address (above the ones denoting the burst size)
231  assert(columnsPerStripe >= 1);
232 
233  // channel striping has to be done at a granularity that
234  // is equal or larger to a cache line
235  if (system()->cacheLineSize() > range.granularity()) {
236  fatal("Channel interleaving of %s must be at least as large "
237  "as the cache line size\n", name());
238  }
239 
240  // ...and equal or smaller than the row-buffer size
241  if (rowBufferSize < range.granularity()) {
242  fatal("Channel interleaving of %s must be at most as large "
243  "as the row-buffer size\n", name());
244  }
245  // this is essentially the check above, so just to be sure
247  }
248  }
249 }
250 
251 void
253 {
254  // remember the memory system mode of operation
256 
257  if (isTimingMode) {
258  // timestamp offset should be in clock cycles for DRAMPower
260 
261  // update the start tick for the precharge accounting to the
262  // current tick
263  for (auto r : ranks) {
264  r->startup(curTick() + tREFI - tRP);
265  }
266 
267  // shift the bus busy time sufficiently far ahead that we never
268  // have to worry about negative values when computing the time for
269  // the next request, this will add an insignificant bubble at the
270  // start of simulation
271  busBusyUntil = curTick() + tRP + tRCD + tCL;
272  }
273 }
274 
275 Tick
277 {
278  DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
279 
280  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
281  "is responding");
282 
283  // do the actual memory access and turn the packet into a response
284  access(pkt);
285 
286  Tick latency = 0;
287  if (pkt->hasData()) {
288  // this value is not supposed to be accurate, just enough to
289  // keep things going, mimic a closed page
290  latency = tRP + tRCD + tCL;
291  }
292  return latency;
293 }
294 
295 bool
296 DRAMCtrl::readQueueFull(unsigned int neededEntries) const
297 {
298  DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
299  readBufferSize, readQueue.size() + respQueue.size(),
300  neededEntries);
301 
302  return
303  (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
304 }
305 
306 bool
307 DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
308 {
309  DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
310  writeBufferSize, writeQueue.size(), neededEntries);
311  return (writeQueue.size() + neededEntries) > writeBufferSize;
312 }
313 
315 DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size,
316  bool isRead)
317 {
318  // decode the address based on the address mapping scheme, with
319  // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
320  // channel, respectively
321  uint8_t rank;
322  uint8_t bank;
323  // use a 64-bit unsigned during the computations as the row is
324  // always the top bits, and check before creating the DRAMPacket
325  uint64_t row;
326 
327  // truncate the address to a DRAM burst, which makes it unique to
328  // a specific column, row, bank, rank and channel
329  Addr addr = dramPktAddr / burstSize;
330 
331  // we have removed the lowest order address bits that denote the
332  // position within the column
333  if (addrMapping == Enums::RoRaBaChCo) {
334  // the lowest order bits denote the column to ensure that
335  // sequential cache lines occupy the same row
336  addr = addr / columnsPerRowBuffer;
337 
338  // take out the channel part of the address
339  addr = addr / channels;
340 
341  // after the channel bits, get the bank bits to interleave
342  // over the banks
343  bank = addr % banksPerRank;
344  addr = addr / banksPerRank;
345 
346  // after the bank, we get the rank bits which thus interleaves
347  // over the ranks
348  rank = addr % ranksPerChannel;
349  addr = addr / ranksPerChannel;
350 
351  // lastly, get the row bits, no need to remove them from addr
352  row = addr % rowsPerBank;
353  } else if (addrMapping == Enums::RoRaBaCoCh) {
354  // take out the lower-order column bits
355  addr = addr / columnsPerStripe;
356 
357  // take out the channel part of the address
358  addr = addr / channels;
359 
360  // next, the higher-order column bites
361  addr = addr / (columnsPerRowBuffer / columnsPerStripe);
362 
363  // after the column bits, we get the bank bits to interleave
364  // over the banks
365  bank = addr % banksPerRank;
366  addr = addr / banksPerRank;
367 
368  // after the bank, we get the rank bits which thus interleaves
369  // over the ranks
370  rank = addr % ranksPerChannel;
371  addr = addr / ranksPerChannel;
372 
373  // lastly, get the row bits, no need to remove them from addr
374  row = addr % rowsPerBank;
375  } else if (addrMapping == Enums::RoCoRaBaCh) {
376  // optimise for closed page mode and utilise maximum
377  // parallelism of the DRAM (at the cost of power)
378 
379  // take out the lower-order column bits
380  addr = addr / columnsPerStripe;
381 
382  // take out the channel part of the address, not that this has
383  // to match with how accesses are interleaved between the
384  // controllers in the address mapping
385  addr = addr / channels;
386 
387  // start with the bank bits, as this provides the maximum
388  // opportunity for parallelism between requests
389  bank = addr % banksPerRank;
390  addr = addr / banksPerRank;
391 
392  // next get the rank bits
393  rank = addr % ranksPerChannel;
394  addr = addr / ranksPerChannel;
395 
396  // next, the higher-order column bites
397  addr = addr / (columnsPerRowBuffer / columnsPerStripe);
398 
399  // lastly, get the row bits, no need to remove them from addr
400  row = addr % rowsPerBank;
401  } else
402  panic("Unknown address mapping policy chosen!");
403 
404  assert(rank < ranksPerChannel);
405  assert(bank < banksPerRank);
406  assert(row < rowsPerBank);
407  assert(row < Bank::NO_ROW);
408 
409  DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
410  dramPktAddr, rank, bank, row);
411 
412  // create the corresponding DRAM packet with the entry time and
413  // ready time set to the current tick, the latter will be updated
414  // later
415  uint16_t bank_id = banksPerRank * rank + bank;
416  return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
417  size, ranks[rank]->banks[bank], *ranks[rank]);
418 }
419 
420 void
421 DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
422 {
423  // only add to the read queue here. whenever the request is
424  // eventually done, set the readyTime, and call schedule()
425  assert(!pkt->isWrite());
426 
427  assert(pktCount != 0);
428 
429  // if the request size is larger than burst size, the pkt is split into
430  // multiple DRAM packets
431  // Note if the pkt starting address is not aligened to burst size, the
432  // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
433  // are aligned to burst size boundaries. This is to ensure we accurately
434  // check read packets against packets in write queue.
435  Addr addr = pkt->getAddr();
436  unsigned pktsServicedByWrQ = 0;
437  BurstHelper* burst_helper = NULL;
438  for (int cnt = 0; cnt < pktCount; ++cnt) {
439  unsigned size = std::min((addr | (burstSize - 1)) + 1,
440  pkt->getAddr() + pkt->getSize()) - addr;
441  readPktSize[ceilLog2(size)]++;
442  readBursts++;
443 
444  // First check write buffer to see if the data is already at
445  // the controller
446  bool foundInWrQ = false;
447  Addr burst_addr = burstAlign(addr);
448  // if the burst address is not present then there is no need
449  // looking any further
450  if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
451  for (const auto& p : writeQueue) {
452  // check if the read is subsumed in the write queue
453  // packet we are looking at
454  if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) {
455  foundInWrQ = true;
456  servicedByWrQ++;
457  pktsServicedByWrQ++;
458  DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
459  "write queue\n", addr, size);
461  break;
462  }
463  }
464  }
465 
466  // If not found in the write q, make a DRAM packet and
467  // push it onto the read queue
468  if (!foundInWrQ) {
469 
470  // Make the burst helper for split packets
471  if (pktCount > 1 && burst_helper == NULL) {
472  DPRINTF(DRAM, "Read to addr %lld translates to %d "
473  "dram requests\n", pkt->getAddr(), pktCount);
474  burst_helper = new BurstHelper(pktCount);
475  }
476 
477  DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
478  dram_pkt->burstHelper = burst_helper;
479 
480  assert(!readQueueFull(1));
481  rdQLenPdf[readQueue.size() + respQueue.size()]++;
482 
483  DPRINTF(DRAM, "Adding to read queue\n");
484 
485  readQueue.push_back(dram_pkt);
486 
487  // increment read entries of the rank
488  ++dram_pkt->rankRef.readEntries;
489 
490  // Update stats
491  avgRdQLen = readQueue.size() + respQueue.size();
492  }
493 
494  // Starting address of next dram pkt (aligend to burstSize boundary)
495  addr = (addr | (burstSize - 1)) + 1;
496  }
497 
498  // If all packets are serviced by write queue, we send the repsonse back
499  if (pktsServicedByWrQ == pktCount) {
501  return;
502  }
503 
504  // Update how many split packets are serviced by write queue
505  if (burst_helper != NULL)
506  burst_helper->burstsServiced = pktsServicedByWrQ;
507 
508  // If we are not already scheduled to get a request out of the
509  // queue, do so now
510  if (!nextReqEvent.scheduled()) {
511  DPRINTF(DRAM, "Request scheduled immediately\n");
513  }
514 }
515 
516 void
517 DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
518 {
519  // only add to the write queue here. whenever the request is
520  // eventually done, set the readyTime, and call schedule()
521  assert(pkt->isWrite());
522 
523  // if the request size is larger than burst size, the pkt is split into
524  // multiple DRAM packets
525  Addr addr = pkt->getAddr();
526  for (int cnt = 0; cnt < pktCount; ++cnt) {
527  unsigned size = std::min((addr | (burstSize - 1)) + 1,
528  pkt->getAddr() + pkt->getSize()) - addr;
529  writePktSize[ceilLog2(size)]++;
530  writeBursts++;
531 
532  // see if we can merge with an existing item in the write
533  // queue and keep track of whether we have merged or not
534  bool merged = isInWriteQueue.find(burstAlign(addr)) !=
535  isInWriteQueue.end();
536 
537  // if the item was not merged we need to create a new write
538  // and enqueue it
539  if (!merged) {
540  DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
541 
542  assert(writeQueue.size() < writeBufferSize);
543  wrQLenPdf[writeQueue.size()]++;
544 
545  DPRINTF(DRAM, "Adding to write queue\n");
546 
547  writeQueue.push_back(dram_pkt);
548  isInWriteQueue.insert(burstAlign(addr));
549  assert(writeQueue.size() == isInWriteQueue.size());
550 
551  // Update stats
552  avgWrQLen = writeQueue.size();
553 
554  // increment write entries of the rank
555  ++dram_pkt->rankRef.writeEntries;
556  } else {
557  DPRINTF(DRAM, "Merging write burst with existing queue entry\n");
558 
559  // keep track of the fact that this burst effectively
560  // disappeared as it was merged with an existing one
561  mergedWrBursts++;
562  }
563 
564  // Starting address of next dram pkt (aligend to burstSize boundary)
565  addr = (addr | (burstSize - 1)) + 1;
566  }
567 
568  // we do not wait for the writes to be send to the actual memory,
569  // but instead take responsibility for the consistency here and
570  // snoop the write queue for any upcoming reads
571  // @todo, if a pkt size is larger than burst size, we might need a
572  // different front end latency
574 
575  // If we are not already scheduled to get a request out of the
576  // queue, do so now
577  if (!nextReqEvent.scheduled()) {
578  DPRINTF(DRAM, "Request scheduled immediately\n");
580  }
581 }
582 
583 void
585  DPRINTF(DRAM, "===READ QUEUE===\n\n");
586  for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) {
587  DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
588  }
589  DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
590  for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) {
591  DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
592  }
593  DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
594  for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) {
595  DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
596  }
597 }
598 
599 bool
601 {
602  // This is where we enter from the outside world
603  DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
604  pkt->cmdString(), pkt->getAddr(), pkt->getSize());
605 
606  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
607  "is responding");
608 
609  panic_if(!(pkt->isRead() || pkt->isWrite()),
610  "Should only see read and writes at memory controller\n");
611 
612  // Calc avg gap between requests
613  if (prevArrival != 0) {
614  totGap += curTick() - prevArrival;
615  }
616  prevArrival = curTick();
617 
618 
619  // Find out how many dram packets a pkt translates to
620  // If the burst size is equal or larger than the pkt size, then a pkt
621  // translates to only one dram packet. Otherwise, a pkt translates to
622  // multiple dram packets
623  unsigned size = pkt->getSize();
624  unsigned offset = pkt->getAddr() & (burstSize - 1);
625  unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
626 
627  // check local buffers and do not accept if full
628  if (pkt->isRead()) {
629  assert(size != 0);
630  if (readQueueFull(dram_pkt_count)) {
631  DPRINTF(DRAM, "Read queue full, not accepting\n");
632  // remember that we have to retry this port
633  retryRdReq = true;
634  numRdRetry++;
635  return false;
636  } else {
637  addToReadQueue(pkt, dram_pkt_count);
638  readReqs++;
639  bytesReadSys += size;
640  }
641  } else {
642  assert(pkt->isWrite());
643  assert(size != 0);
644  if (writeQueueFull(dram_pkt_count)) {
645  DPRINTF(DRAM, "Write queue full, not accepting\n");
646  // remember that we have to retry this port
647  retryWrReq = true;
648  numWrRetry++;
649  return false;
650  } else {
651  addToWriteQueue(pkt, dram_pkt_count);
652  writeReqs++;
654  }
655  }
656 
657  return true;
658 }
659 
660 void
662 {
663  DPRINTF(DRAM,
664  "processRespondEvent(): Some req has reached its readyTime\n");
665 
666  DRAMPacket* dram_pkt = respQueue.front();
667 
668  // if a read has reached its ready-time, decrement the number of reads
669  // At this point the packet has been handled and there is a possibility
670  // to switch to low-power mode if no other packet is available
671  --dram_pkt->rankRef.readEntries;
672  DPRINTF(DRAM, "number of read entries for rank %d is %d\n",
673  dram_pkt->rank, dram_pkt->rankRef.readEntries);
674 
675  // counter should at least indicate one outstanding request
676  // for this read
677  assert(dram_pkt->rankRef.outstandingEvents > 0);
678  // read response received, decrement count
679  --dram_pkt->rankRef.outstandingEvents;
680 
681  // at this moment should not have transitioned to a low-power state
682  assert((dram_pkt->rankRef.pwrState != PWR_SREF) &&
683  (dram_pkt->rankRef.pwrState != PWR_PRE_PDN) &&
684  (dram_pkt->rankRef.pwrState != PWR_ACT_PDN));
685 
686  // track if this is the last packet before idling
687  // and that there are no outstanding commands to this rank
688  // if REF in progress, transition to LP state should not occur
689  // until REF completes
690  if ((dram_pkt->rankRef.refreshState == REF_IDLE) &&
691  (dram_pkt->rankRef.lowPowerEntryReady())) {
692  // verify that there are no events scheduled
693  assert(!dram_pkt->rankRef.activateEvent.scheduled());
694  assert(!dram_pkt->rankRef.prechargeEvent.scheduled());
695 
696  // if coming from active state, schedule power event to
697  // active power-down else go to precharge power-down
698  DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is "
699  "%d\n", dram_pkt->rank, curTick(), dram_pkt->rankRef.pwrState);
700 
701  // default to ACT power-down unless already in IDLE state
702  // could be in IDLE if PRE issued before data returned
703  PowerState next_pwr_state = PWR_ACT_PDN;
704  if (dram_pkt->rankRef.pwrState == PWR_IDLE) {
705  next_pwr_state = PWR_PRE_PDN;
706  }
707 
708  dram_pkt->rankRef.powerDownSleep(next_pwr_state, curTick());
709  }
710 
711  if (dram_pkt->burstHelper) {
712  // it is a split packet
713  dram_pkt->burstHelper->burstsServiced++;
714  if (dram_pkt->burstHelper->burstsServiced ==
715  dram_pkt->burstHelper->burstCount) {
716  // we have now serviced all children packets of a system packet
717  // so we can now respond to the requester
718  // @todo we probably want to have a different front end and back
719  // end latency for split packets
721  delete dram_pkt->burstHelper;
722  dram_pkt->burstHelper = NULL;
723  }
724  } else {
725  // it is not a split packet
727  }
728 
729  delete respQueue.front();
730  respQueue.pop_front();
731 
732  if (!respQueue.empty()) {
733  assert(respQueue.front()->readyTime >= curTick());
734  assert(!respondEvent.scheduled());
735  schedule(respondEvent, respQueue.front()->readyTime);
736  } else {
737  // if there is nothing left in any queue, signal a drain
738  if (drainState() == DrainState::Draining &&
739  writeQueue.empty() && readQueue.empty() && allRanksDrained()) {
740 
741  DPRINTF(Drain, "DRAM controller done draining\n");
742  signalDrainDone();
743  }
744  }
745 
746  // We have made a location in the queue available at this point,
747  // so if there is a read that was forced to wait, retry now
748  if (retryRdReq) {
749  retryRdReq = false;
750  port.sendRetryReq();
751  }
752 }
753 
754 bool
756 {
757  // This method does the arbitration between requests. The chosen
758  // packet is simply moved to the head of the queue. The other
759  // methods know that this is the place to look. For example, with
760  // FCFS, this method does nothing
761  assert(!queue.empty());
762 
763  // bool to indicate if a packet to an available rank is found
764  bool found_packet = false;
765  if (queue.size() == 1) {
766  DRAMPacket* dram_pkt = queue.front();
767  // available rank corresponds to state refresh idle
768  if (ranks[dram_pkt->rank]->isAvailable()) {
769  found_packet = true;
770  DPRINTF(DRAM, "Single request, going to a free rank\n");
771  } else {
772  DPRINTF(DRAM, "Single request, going to a busy rank\n");
773  }
774  return found_packet;
775  }
776 
777  if (memSchedPolicy == Enums::fcfs) {
778  // check if there is a packet going to a free rank
779  for (auto i = queue.begin(); i != queue.end() ; ++i) {
780  DRAMPacket* dram_pkt = *i;
781  if (ranks[dram_pkt->rank]->isAvailable()) {
782  queue.erase(i);
783  queue.push_front(dram_pkt);
784  found_packet = true;
785  break;
786  }
787  }
788  } else if (memSchedPolicy == Enums::frfcfs) {
789  found_packet = reorderQueue(queue, extra_col_delay);
790  } else
791  panic("No scheduling policy chosen\n");
792  return found_packet;
793 }
794 
795 bool
797 {
798  // Only determine this if needed
799  uint64_t earliest_banks = 0;
800  bool hidden_bank_prep = false;
801 
802  // search for seamless row hits first, if no seamless row hit is
803  // found then determine if there are other packets that can be issued
804  // without incurring additional bus delay due to bank timing
805  // Will select closed rows first to enable more open row possibilies
806  // in future selections
807  bool found_hidden_bank = false;
808 
809  // remember if we found a row hit, not seamless, but bank prepped
810  // and ready
811  bool found_prepped_pkt = false;
812 
813  // if we have no row hit, prepped or not, and no seamless packet,
814  // just go for the earliest possible
815  bool found_earliest_pkt = false;
816 
817  auto selected_pkt_it = queue.end();
818 
819  // time we need to issue a column command to be seamless
820  const Tick min_col_at = std::max(busBusyUntil - tCL + extra_col_delay,
821  curTick());
822 
823  for (auto i = queue.begin(); i != queue.end() ; ++i) {
824  DRAMPacket* dram_pkt = *i;
825  const Bank& bank = dram_pkt->bankRef;
826 
827  // check if rank is available, if not, jump to the next packet
828  if (dram_pkt->rankRef.isAvailable()) {
829  // check if it is a row hit
830  if (bank.openRow == dram_pkt->row) {
831  // no additional rank-to-rank or same bank-group
832  // delays, or we switched read/write and might as well
833  // go for the row hit
834  if (bank.colAllowedAt <= min_col_at) {
835  // FCFS within the hits, giving priority to
836  // commands that can issue seamlessly, without
837  // additional delay, such as same rank accesses
838  // and/or different bank-group accesses
839  DPRINTF(DRAM, "Seamless row buffer hit\n");
840  selected_pkt_it = i;
841  // no need to look through the remaining queue entries
842  break;
843  } else if (!found_hidden_bank && !found_prepped_pkt) {
844  // if we did not find a packet to a closed row that can
845  // issue the bank commands without incurring delay, and
846  // did not yet find a packet to a prepped row, remember
847  // the current one
848  selected_pkt_it = i;
849  found_prepped_pkt = true;
850  DPRINTF(DRAM, "Prepped row buffer hit\n");
851  }
852  } else if (!found_earliest_pkt) {
853  // if we have not initialised the bank status, do it
854  // now, and only once per scheduling decisions
855  if (earliest_banks == 0) {
856  // determine entries with earliest bank delay
857  pair<uint64_t, bool> bankStatus =
858  minBankPrep(queue, min_col_at);
859  earliest_banks = bankStatus.first;
860  hidden_bank_prep = bankStatus.second;
861  }
862 
863  // bank is amongst first available banks
864  // minBankPrep will give priority to packets that can
865  // issue seamlessly
866  if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
867  found_earliest_pkt = true;
868  found_hidden_bank = hidden_bank_prep;
869 
870  // give priority to packets that can issue
871  // bank commands 'behind the scenes'
872  // any additional delay if any will be due to
873  // col-to-col command requirements
874  if (hidden_bank_prep || !found_prepped_pkt)
875  selected_pkt_it = i;
876  }
877  }
878  }
879  }
880 
881  if (selected_pkt_it != queue.end()) {
882  DRAMPacket* selected_pkt = *selected_pkt_it;
883  queue.erase(selected_pkt_it);
884  queue.push_front(selected_pkt);
885  return true;
886  }
887 
888  return false;
889 }
890 
891 void
893 {
894  DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
895 
896  bool needsResponse = pkt->needsResponse();
897  // do the actual memory access which also turns the packet into a
898  // response
899  access(pkt);
900 
901  // turn packet around to go back to requester if response expected
902  if (needsResponse) {
903  // access already turned the packet into a response
904  assert(pkt->isResponse());
905  // response_time consumes the static latency and is charged also
906  // with headerDelay that takes into account the delay provided by
907  // the xbar and also the payloadDelay that takes into account the
908  // number of data beats.
909  Tick response_time = curTick() + static_latency + pkt->headerDelay +
910  pkt->payloadDelay;
911  // Here we reset the timing of the packet before sending it out.
912  pkt->headerDelay = pkt->payloadDelay = 0;
913 
914  // queue the packet in the response queue to be sent out after
915  // the static latency has passed
916  port.schedTimingResp(pkt, response_time, true);
917  } else {
918  // @todo the packet is going to be deleted, and the DRAMPacket
919  // is still having a pointer to it
920  pendingDelete.reset(pkt);
921  }
922 
923  DPRINTF(DRAM, "Done\n");
924 
925  return;
926 }
927 
928 void
929 DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
930  Tick act_tick, uint32_t row)
931 {
932  assert(rank_ref.actTicks.size() == activationLimit);
933 
934  DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
935 
936  // update the open row
937  assert(bank_ref.openRow == Bank::NO_ROW);
938  bank_ref.openRow = row;
939 
940  // start counting anew, this covers both the case when we
941  // auto-precharged, and when this access is forced to
942  // precharge
943  bank_ref.bytesAccessed = 0;
944  bank_ref.rowAccesses = 0;
945 
946  ++rank_ref.numBanksActive;
947  assert(rank_ref.numBanksActive <= banksPerRank);
948 
949  DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
950  bank_ref.bank, rank_ref.rank, act_tick,
951  ranks[rank_ref.rank]->numBanksActive);
952 
953  rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank,
954  act_tick));
955 
956  DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK) -
957  timeStampOffset, bank_ref.bank, rank_ref.rank);
958 
959  // The next access has to respect tRAS for this bank
960  bank_ref.preAllowedAt = act_tick + tRAS;
961 
962  // Respect the row-to-column command delay
963  bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
964 
965  // start by enforcing tRRD
966  for (int i = 0; i < banksPerRank; i++) {
967  // next activate to any bank in this rank must not happen
968  // before tRRD
969  if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
970  // bank group architecture requires longer delays between
971  // ACT commands within the same bank group. Use tRRD_L
972  // in this case
973  rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD_L,
974  rank_ref.banks[i].actAllowedAt);
975  } else {
976  // use shorter tRRD value when either
977  // 1) bank group architecture is not supportted
978  // 2) bank is in a different bank group
979  rank_ref.banks[i].actAllowedAt = std::max(act_tick + tRRD,
980  rank_ref.banks[i].actAllowedAt);
981  }
982  }
983 
984  // next, we deal with tXAW, if the activation limit is disabled
985  // then we directly schedule an activate power event
986  if (!rank_ref.actTicks.empty()) {
987  // sanity check
988  if (rank_ref.actTicks.back() &&
989  (act_tick - rank_ref.actTicks.back()) < tXAW) {
990  panic("Got %d activates in window %d (%llu - %llu) which "
991  "is smaller than %llu\n", activationLimit, act_tick -
992  rank_ref.actTicks.back(), act_tick,
993  rank_ref.actTicks.back(), tXAW);
994  }
995 
996  // shift the times used for the book keeping, the last element
997  // (highest index) is the oldest one and hence the lowest value
998  rank_ref.actTicks.pop_back();
999 
1000  // record an new activation (in the future)
1001  rank_ref.actTicks.push_front(act_tick);
1002 
1003  // cannot activate more than X times in time window tXAW, push the
1004  // next one (the X + 1'st activate) to be tXAW away from the
1005  // oldest in our window of X
1006  if (rank_ref.actTicks.back() &&
1007  (act_tick - rank_ref.actTicks.back()) < tXAW) {
1008  DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
1009  "no earlier than %llu\n", activationLimit,
1010  rank_ref.actTicks.back() + tXAW);
1011  for (int j = 0; j < banksPerRank; j++)
1012  // next activate must not happen before end of window
1013  rank_ref.banks[j].actAllowedAt =
1014  std::max(rank_ref.actTicks.back() + tXAW,
1015  rank_ref.banks[j].actAllowedAt);
1016  }
1017  }
1018 
1019  // at the point when this activate takes place, make sure we
1020  // transition to the active power state
1021  if (!rank_ref.activateEvent.scheduled())
1022  schedule(rank_ref.activateEvent, act_tick);
1023  else if (rank_ref.activateEvent.when() > act_tick)
1024  // move it sooner in time
1025  reschedule(rank_ref.activateEvent, act_tick);
1026 }
1027 
1028 void
1029 DRAMCtrl::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_at, bool trace)
1030 {
1031  // make sure the bank has an open row
1032  assert(bank.openRow != Bank::NO_ROW);
1033 
1034  // sample the bytes per activate here since we are closing
1035  // the page
1037 
1038  bank.openRow = Bank::NO_ROW;
1039 
1040  // no precharge allowed before this one
1041  bank.preAllowedAt = pre_at;
1042 
1043  Tick pre_done_at = pre_at + tRP;
1044 
1045  bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
1046 
1047  assert(rank_ref.numBanksActive != 0);
1048  --rank_ref.numBanksActive;
1049 
1050  DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
1051  "%d active\n", bank.bank, rank_ref.rank, pre_at,
1052  rank_ref.numBanksActive);
1053 
1054  if (trace) {
1055 
1056  rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank,
1057  pre_at));
1058  DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
1059  timeStampOffset, bank.bank, rank_ref.rank);
1060  }
1061  // if we look at the current number of active banks we might be
1062  // tempted to think the DRAM is now idle, however this can be
1063  // undone by an activate that is scheduled to happen before we
1064  // would have reached the idle state, so schedule an event and
1065  // rather check once we actually make it to the point in time when
1066  // the (last) precharge takes place
1067  if (!rank_ref.prechargeEvent.scheduled()) {
1068  schedule(rank_ref.prechargeEvent, pre_done_at);
1069  // New event, increment count
1070  ++rank_ref.outstandingEvents;
1071  } else if (rank_ref.prechargeEvent.when() < pre_done_at) {
1072  reschedule(rank_ref.prechargeEvent, pre_done_at);
1073  }
1074 }
1075 
1076 void
1078 {
1079  DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1080  dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1081 
1082  // get the rank
1083  Rank& rank = dram_pkt->rankRef;
1084 
1085  // are we in or transitioning to a low-power state and have not scheduled
1086  // a power-up event?
1087  // if so, wake up from power down to issue RD/WR burst
1088  if (rank.inLowPowerState) {
1089  assert(rank.pwrState != PWR_SREF);
1090  rank.scheduleWakeUpEvent(tXP);
1091  }
1092 
1093  // get the bank
1094  Bank& bank = dram_pkt->bankRef;
1095 
1096  // for the state we need to track if it is a row hit or not
1097  bool row_hit = true;
1098 
1099  // respect any constraints on the command (e.g. tRCD or tCCD)
1100  Tick cmd_at = std::max(bank.colAllowedAt, curTick());
1101 
1102  // Determine the access latency and update the bank state
1103  if (bank.openRow == dram_pkt->row) {
1104  // nothing to do
1105  } else {
1106  row_hit = false;
1107 
1108  // If there is a page open, precharge it.
1109  if (bank.openRow != Bank::NO_ROW) {
1110  prechargeBank(rank, bank, std::max(bank.preAllowedAt, curTick()));
1111  }
1112 
1113  // next we need to account for the delay in activating the
1114  // page
1115  Tick act_tick = std::max(bank.actAllowedAt, curTick());
1116 
1117  // Record the activation and deal with all the global timing
1118  // constraints caused be a new activation (tRRD and tXAW)
1119  activateBank(rank, bank, act_tick, dram_pkt->row);
1120 
1121  // issue the command as early as possible
1122  cmd_at = bank.colAllowedAt;
1123  }
1124 
1125  // we need to wait until the bus is available before we can issue
1126  // the command
1127  cmd_at = std::max(cmd_at, busBusyUntil - tCL);
1128 
1129  // update the packet ready time
1130  dram_pkt->readyTime = cmd_at + tCL + tBURST;
1131 
1132  // only one burst can use the bus at any one point in time
1133  assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
1134 
1135  // update the time for the next read/write burst for each
1136  // bank (add a max with tCCD/tCCD_L here)
1137  Tick cmd_dly;
1138  for (int j = 0; j < ranksPerChannel; j++) {
1139  for (int i = 0; i < banksPerRank; i++) {
1140  // next burst to same bank group in this rank must not happen
1141  // before tCCD_L. Different bank group timing requirement is
1142  // tBURST; Add tCS for different ranks
1143  if (dram_pkt->rank == j) {
1144  if (bankGroupArch &&
1145  (bank.bankgr == ranks[j]->banks[i].bankgr)) {
1146  // bank group architecture requires longer delays between
1147  // RD/WR burst commands to the same bank group.
1148  // Use tCCD_L in this case
1149  cmd_dly = tCCD_L;
1150  } else {
1151  // use tBURST (equivalent to tCCD_S), the shorter
1152  // cas-to-cas delay value, when either:
1153  // 1) bank group architecture is not supportted
1154  // 2) bank is in a different bank group
1155  cmd_dly = tBURST;
1156  }
1157  } else {
1158  // different rank is by default in a different bank group
1159  // use tBURST (equivalent to tCCD_S), which is the shorter
1160  // cas-to-cas delay in this case
1161  // Add tCS to account for rank-to-rank bus delay requirements
1162  cmd_dly = tBURST + tCS;
1163  }
1164  ranks[j]->banks[i].colAllowedAt = std::max(cmd_at + cmd_dly,
1165  ranks[j]->banks[i].colAllowedAt);
1166  }
1167  }
1168 
1169  // Save rank of current access
1170  activeRank = dram_pkt->rank;
1171 
1172  // If this is a write, we also need to respect the write recovery
1173  // time before a precharge, in the case of a read, respect the
1174  // read to precharge constraint
1175  bank.preAllowedAt = std::max(bank.preAllowedAt,
1176  dram_pkt->isRead ? cmd_at + tRTP :
1177  dram_pkt->readyTime + tWR);
1178 
1179  // increment the bytes accessed and the accesses per row
1180  bank.bytesAccessed += burstSize;
1181  ++bank.rowAccesses;
1182 
1183  // if we reached the max, then issue with an auto-precharge
1184  bool auto_precharge = pageMgmt == Enums::close ||
1186 
1187  // if we did not hit the limit, we might still want to
1188  // auto-precharge
1189  if (!auto_precharge &&
1190  (pageMgmt == Enums::open_adaptive ||
1191  pageMgmt == Enums::close_adaptive)) {
1192  // a twist on the open and close page policies:
1193  // 1) open_adaptive page policy does not blindly keep the
1194  // page open, but close it if there are no row hits, and there
1195  // are bank conflicts in the queue
1196  // 2) close_adaptive page policy does not blindly close the
1197  // page, but closes it only if there are no row hits in the queue.
1198  // In this case, only force an auto precharge when there
1199  // are no same page hits in the queue
1200  bool got_more_hits = false;
1201  bool got_bank_conflict = false;
1202 
1203  // either look at the read queue or write queue
1204  const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
1205  writeQueue;
1206  auto p = queue.begin();
1207  // make sure we are not considering the packet that we are
1208  // currently dealing with (which is the head of the queue)
1209  ++p;
1210 
1211  // keep on looking until we find a hit or reach the end of the queue
1212  // 1) if a hit is found, then both open and close adaptive policies keep
1213  // the page open
1214  // 2) if no hit is found, got_bank_conflict is set to true if a bank
1215  // conflict request is waiting in the queue
1216  while (!got_more_hits && p != queue.end()) {
1217  bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1218  (dram_pkt->bank == (*p)->bank);
1219  bool same_row = dram_pkt->row == (*p)->row;
1220  got_more_hits |= same_rank_bank && same_row;
1221  got_bank_conflict |= same_rank_bank && !same_row;
1222  ++p;
1223  }
1224 
1225  // auto pre-charge when either
1226  // 1) open_adaptive policy, we have not got any more hits, and
1227  // have a bank conflict
1228  // 2) close_adaptive policy and we have not got any more hits
1229  auto_precharge = !got_more_hits &&
1230  (got_bank_conflict || pageMgmt == Enums::close_adaptive);
1231  }
1232 
1233  // DRAMPower trace command to be written
1234  std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR";
1235 
1236  // MemCommand required for DRAMPower library
1237  MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
1238  MemCommand::WR;
1239 
1240  // Update bus state
1241  busBusyUntil = dram_pkt->readyTime;
1242 
1243  DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n",
1244  dram_pkt->addr, dram_pkt->readyTime, busBusyUntil);
1245 
1246  dram_pkt->rankRef.cmdList.push_back(Command(command, dram_pkt->bank,
1247  cmd_at));
1248 
1249  DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
1250  timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank);
1251 
1252  // if this access should use auto-precharge, then we are
1253  // closing the row after the read/write burst
1254  if (auto_precharge) {
1255  // if auto-precharge push a PRE command at the correct tick to the
1256  // list used by DRAMPower library to calculate power
1257  prechargeBank(rank, bank, std::max(curTick(), bank.preAllowedAt));
1258 
1259  DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1260  }
1261 
1262  // Update the minimum timing between the requests, this is a
1263  // conservative estimate of when we have to schedule the next
1264  // request to not introduce any unecessary bubbles. In most cases
1265  // we will wake up sooner than we have to.
1266  nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1267 
1268  // Update the stats and schedule the next request
1269  if (dram_pkt->isRead) {
1270  ++readsThisTime;
1271  if (row_hit)
1272  readRowHits++;
1274  perBankRdBursts[dram_pkt->bankId]++;
1275 
1276  // Update latency stats
1277  totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1278  totBusLat += tBURST;
1279  totQLat += cmd_at - dram_pkt->entryTime;
1280  } else {
1281  ++writesThisTime;
1282  if (row_hit)
1283  writeRowHits++;
1285  perBankWrBursts[dram_pkt->bankId]++;
1286  }
1287 }
1288 
1289 void
1291 {
1292  int busyRanks = 0;
1293  for (auto r : ranks) {
1294  if (!r->isAvailable()) {
1295  if (r->pwrState != PWR_SREF) {
1296  // rank is busy refreshing
1297  DPRINTF(DRAMState, "Rank %d is not available\n", r->rank);
1298  busyRanks++;
1299 
1300  // let the rank know that if it was waiting to drain, it
1301  // is now done and ready to proceed
1302  r->checkDrainDone();
1303  }
1304 
1305  // check if we were in self-refresh and haven't started
1306  // to transition out
1307  if ((r->pwrState == PWR_SREF) && r->inLowPowerState) {
1308  DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank);
1309  // if we have commands queued to this rank and we don't have
1310  // a minimum number of active commands enqueued,
1311  // exit self-refresh
1312  if (r->forceSelfRefreshExit()) {
1313  DPRINTF(DRAMState, "rank %d was in self refresh and"
1314  " should wake up\n", r->rank);
1315  //wake up from self-refresh
1316  r->scheduleWakeUpEvent(tXS);
1317  // things are brought back into action once a refresh is
1318  // performed after self-refresh
1319  // continue with selection for other ranks
1320  }
1321  }
1322  }
1323  }
1324 
1325  if (busyRanks == ranksPerChannel) {
1326  // if all ranks are refreshing wait for them to finish
1327  // and stall this state machine without taking any further
1328  // action, and do not schedule a new nextReqEvent
1329  return;
1330  }
1331 
1332  // pre-emptively set to false. Overwrite if in transitioning to
1333  // a new state
1334  bool switched_cmd_type = false;
1335  if (busState != busStateNext) {
1336  if (busState == READ) {
1337  DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1338  "waiting\n", readsThisTime, readQueue.size());
1339 
1340  // sample and reset the read-related stats as we are now
1341  // transitioning to writes, and all reads are done
1343  readsThisTime = 0;
1344 
1345  // now proceed to do the actual writes
1346  switched_cmd_type = true;
1347  } else {
1348  DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1349  "waiting\n", writesThisTime, writeQueue.size());
1350 
1352  writesThisTime = 0;
1353 
1354  switched_cmd_type = true;
1355  }
1356  // update busState to match next state until next transition
1358  }
1359 
1360  // when we get here it is either a read or a write
1361  if (busState == READ) {
1362 
1363  // track if we should switch or not
1364  bool switch_to_writes = false;
1365 
1366  if (readQueue.empty()) {
1367  // In the case there is no read request to go next,
1368  // trigger writes if we have passed the low threshold (or
1369  // if we are draining)
1370  if (!writeQueue.empty() &&
1372  writeQueue.size() > writeLowThreshold)) {
1373 
1374  switch_to_writes = true;
1375  } else {
1376  // check if we are drained
1377  // not done draining until in PWR_IDLE state
1378  // ensuring all banks are closed and
1379  // have exited low power states
1380  if (drainState() == DrainState::Draining &&
1381  respQueue.empty() && allRanksDrained()) {
1382 
1383  DPRINTF(Drain, "DRAM controller done draining\n");
1384  signalDrainDone();
1385  }
1386 
1387  // nothing to do, not even any point in scheduling an
1388  // event for the next request
1389  return;
1390  }
1391  } else {
1392  // bool to check if there is a read to a free rank
1393  bool found_read = false;
1394 
1395  // Figure out which read request goes next, and move it to the
1396  // front of the read queue
1397  // If we are changing command type, incorporate the minimum
1398  // bus turnaround delay which will be tCS (different rank) case
1399  found_read = chooseNext(readQueue,
1400  switched_cmd_type ? tCS : 0);
1401 
1402  // if no read to an available rank is found then return
1403  // at this point. There could be writes to the available ranks
1404  // which are above the required threshold. However, to
1405  // avoid adding more complexity to the code, return and wait
1406  // for a refresh event to kick things into action again.
1407  if (!found_read)
1408  return;
1409 
1410  DRAMPacket* dram_pkt = readQueue.front();
1411  assert(dram_pkt->rankRef.isAvailable());
1412 
1413  // here we get a bit creative and shift the bus busy time not
1414  // just the tWTR, but also a CAS latency to capture the fact
1415  // that we are allowed to prepare a new bank, but not issue a
1416  // read command until after tWTR, in essence we capture a
1417  // bubble on the data bus that is tWTR + tCL
1418  if (switched_cmd_type && dram_pkt->rank == activeRank) {
1419  busBusyUntil += tWTR + tCL;
1420  }
1421 
1422  doDRAMAccess(dram_pkt);
1423 
1424  // At this point we're done dealing with the request
1425  readQueue.pop_front();
1426 
1427  // Every respQueue which will generate an event, increment count
1428  ++dram_pkt->rankRef.outstandingEvents;
1429 
1430  // sanity check
1431  assert(dram_pkt->size <= burstSize);
1432  assert(dram_pkt->readyTime >= curTick());
1433 
1434  // Insert into response queue. It will be sent back to the
1435  // requestor at its readyTime
1436  if (respQueue.empty()) {
1437  assert(!respondEvent.scheduled());
1438  schedule(respondEvent, dram_pkt->readyTime);
1439  } else {
1440  assert(respQueue.back()->readyTime <= dram_pkt->readyTime);
1441  assert(respondEvent.scheduled());
1442  }
1443 
1444  respQueue.push_back(dram_pkt);
1445 
1446  // we have so many writes that we have to transition
1447  if (writeQueue.size() > writeHighThreshold) {
1448  switch_to_writes = true;
1449  }
1450  }
1451 
1452  // switching to writes, either because the read queue is empty
1453  // and the writes have passed the low threshold (or we are
1454  // draining), or because the writes hit the hight threshold
1455  if (switch_to_writes) {
1456  // transition to writing
1457  busStateNext = WRITE;
1458  }
1459  } else {
1460  // bool to check if write to free rank is found
1461  bool found_write = false;
1462 
1463  // If we are changing command type, incorporate the minimum
1464  // bus turnaround delay
1465  found_write = chooseNext(writeQueue,
1466  switched_cmd_type ? std::min(tRTW, tCS) : 0);
1467 
1468  // if no writes to an available rank are found then return.
1469  // There could be reads to the available ranks. However, to avoid
1470  // adding more complexity to the code, return at this point and wait
1471  // for a refresh event to kick things into action again.
1472  if (!found_write)
1473  return;
1474 
1475  DRAMPacket* dram_pkt = writeQueue.front();
1476  assert(dram_pkt->rankRef.isAvailable());
1477  // sanity check
1478  assert(dram_pkt->size <= burstSize);
1479 
1480  // add a bubble to the data bus, as defined by the
1481  // tRTW when access is to the same rank as previous burst
1482  // Different rank timing is handled with tCS, which is
1483  // applied to colAllowedAt
1484  if (switched_cmd_type && dram_pkt->rank == activeRank) {
1485  busBusyUntil += tRTW;
1486  }
1487 
1488  doDRAMAccess(dram_pkt);
1489 
1490  writeQueue.pop_front();
1491 
1492  // removed write from queue, decrement count
1493  --dram_pkt->rankRef.writeEntries;
1494 
1495  // Schedule write done event to decrement event count
1496  // after the readyTime has been reached
1497  // Only schedule latest write event to minimize events
1498  // required; only need to ensure that final event scheduled covers
1499  // the time that writes are outstanding and bus is active
1500  // to holdoff power-down entry events
1501  if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) {
1502  schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1503  // New event, increment count
1504  ++dram_pkt->rankRef.outstandingEvents;
1505 
1506  } else if (dram_pkt->rankRef.writeDoneEvent.when() <
1507  dram_pkt-> readyTime) {
1508  reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1509  }
1510 
1511  isInWriteQueue.erase(burstAlign(dram_pkt->addr));
1512  delete dram_pkt;
1513 
1514  // If we emptied the write queue, or got sufficiently below the
1515  // threshold (using the minWritesPerSwitch as the hysteresis) and
1516  // are not draining, or we have reads waiting and have done enough
1517  // writes, then switch to reads.
1518  if (writeQueue.empty() ||
1521  (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1522  // turn the bus back around for reads again
1523  busStateNext = READ;
1524 
1525  // note that the we switch back to reads also in the idle
1526  // case, which eventually will check for any draining and
1527  // also pause any further scheduling if there is really
1528  // nothing to do
1529  }
1530  }
1531  // It is possible that a refresh to another rank kicks things back into
1532  // action before reaching this point.
1533  if (!nextReqEvent.scheduled())
1534  schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1535 
1536  // If there is space available and we have writes waiting then let
1537  // them retry. This is done here to ensure that the retry does not
1538  // cause a nextReqEvent to be scheduled before we do so as part of
1539  // the next request processing
1540  if (retryWrReq && writeQueue.size() < writeBufferSize) {
1541  retryWrReq = false;
1542  port.sendRetryReq();
1543  }
1544 }
1545 
1548  Tick min_col_at) const
1549 {
1550  uint64_t bank_mask = 0;
1551  Tick min_act_at = MaxTick;
1552 
1553  // latest Tick for which ACT can occur without incurring additoinal
1554  // delay on the data bus
1555  const Tick hidden_act_max = std::max(min_col_at - tRCD, curTick());
1556 
1557  // Flag condition when burst can issue back-to-back with previous burst
1558  bool found_seamless_bank = false;
1559 
1560  // Flag condition when bank can be opened without incurring additional
1561  // delay on the data bus
1562  bool hidden_bank_prep = false;
1563 
1564  // determine if we have queued transactions targetting the
1565  // bank in question
1566  vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1567  for (const auto& p : queue) {
1568  if (p->rankRef.isAvailable())
1569  got_waiting[p->bankId] = true;
1570  }
1571 
1572  // Find command with optimal bank timing
1573  // Will prioritize commands that can issue seamlessly.
1574  for (int i = 0; i < ranksPerChannel; i++) {
1575  for (int j = 0; j < banksPerRank; j++) {
1576  uint16_t bank_id = i * banksPerRank + j;
1577 
1578  // if we have waiting requests for the bank, and it is
1579  // amongst the first available, update the mask
1580  if (got_waiting[bank_id]) {
1581  // make sure this rank is not currently refreshing.
1582  assert(ranks[i]->isAvailable());
1583  // simplistic approximation of when the bank can issue
1584  // an activate, ignoring any rank-to-rank switching
1585  // cost in this calculation
1586  Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
1587  std::max(ranks[i]->banks[j].actAllowedAt, curTick()) :
1588  std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
1589 
1590  // When is the earliest the R/W burst can issue?
1591  Tick col_at = std::max(ranks[i]->banks[j].colAllowedAt,
1592  act_at + tRCD);
1593 
1594  // bank can issue burst back-to-back (seamlessly) with
1595  // previous burst
1596  bool new_seamless_bank = col_at <= min_col_at;
1597 
1598  // if we found a new seamless bank or we have no
1599  // seamless banks, and got a bank with an earlier
1600  // activate time, it should be added to the bit mask
1601  if (new_seamless_bank ||
1602  (!found_seamless_bank && act_at <= min_act_at)) {
1603  // if we did not have a seamless bank before, and
1604  // we do now, reset the bank mask, also reset it
1605  // if we have not yet found a seamless bank and
1606  // the activate time is smaller than what we have
1607  // seen so far
1608  if (!found_seamless_bank &&
1609  (new_seamless_bank || act_at < min_act_at)) {
1610  bank_mask = 0;
1611  }
1612 
1613  found_seamless_bank |= new_seamless_bank;
1614 
1615  // ACT can occur 'behind the scenes'
1616  hidden_bank_prep = act_at <= hidden_act_max;
1617 
1618  // set the bit corresponding to the available bank
1619  replaceBits(bank_mask, bank_id, bank_id, 1);
1620  min_act_at = act_at;
1621  }
1622  }
1623  }
1624  }
1625 
1626  return make_pair(bank_mask, hidden_bank_prep);
1627 }
1628 
1629 DRAMCtrl::Rank::Rank(DRAMCtrl& _memory, const DRAMCtrlParams* _p)
1630  : EventManager(&_memory), memory(_memory),
1631  pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
1632  pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
1633  refreshState(REF_IDLE), inLowPowerState(false), rank(0),
1634  readEntries(0), writeEntries(0), outstandingEvents(0),
1635  wakeUpAllowedAt(0), power(_p, false), numBanksActive(0),
1636  writeDoneEvent(*this), activateEvent(*this), prechargeEvent(*this),
1637  refreshEvent(*this), powerEvent(*this), wakeUpEvent(*this)
1638 { }
1639 
1640 void
1642 {
1643  assert(ref_tick > curTick());
1644 
1645  pwrStateTick = curTick();
1646 
1647  // kick off the refresh, and give ourselves enough time to
1648  // precharge
1649  schedule(refreshEvent, ref_tick);
1650 }
1651 
1652 void
1654 {
1655  deschedule(refreshEvent);
1656 
1657  // Update the stats
1658  updatePowerStats();
1659 
1660  // don't automatically transition back to LP state after next REF
1661  pwrStatePostRefresh = PWR_IDLE;
1662 }
1663 
1664 bool
1666 {
1667  bool no_queued_cmds = ((memory.busStateNext == READ) && (readEntries == 0))
1668  || ((memory.busStateNext == WRITE) &&
1669  (writeEntries == 0));
1670 
1671  if (refreshState == REF_RUN) {
1672  // have not decremented outstandingEvents for refresh command
1673  // still check if there are no commands queued to force PD
1674  // entry after refresh completes
1675  return no_queued_cmds;
1676  } else {
1677  // ensure no commands in Q and no commands scheduled
1678  return (no_queued_cmds && (outstandingEvents == 0));
1679  }
1680 }
1681 
1682 void
1684 {
1685  // if this rank was waiting to drain it is now able to proceed to
1686  // precharge
1687  if (refreshState == REF_DRAIN) {
1688  DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1689 
1690  refreshState = REF_PD_EXIT;
1691 
1692  // hand control back to the refresh event loop
1693  schedule(refreshEvent, curTick());
1694  }
1695 }
1696 
1697 void
1699 {
1700  // at the moment sort the list of commands and update the counters
1701  // for DRAMPower libray when doing a refresh
1702  sort(cmdList.begin(), cmdList.end(), DRAMCtrl::sortTime);
1703 
1704  auto next_iter = cmdList.begin();
1705  // push to commands to DRAMPower
1706  for ( ; next_iter != cmdList.end() ; ++next_iter) {
1707  Command cmd = *next_iter;
1708  if (cmd.timeStamp <= curTick()) {
1709  // Move all commands at or before curTick to DRAMPower
1710  power.powerlib.doCommand(cmd.type, cmd.bank,
1711  divCeil(cmd.timeStamp, memory.tCK) -
1712  memory.timeStampOffset);
1713  } else {
1714  // done - found all commands at or before curTick()
1715  // next_iter references the 1st command after curTick
1716  break;
1717  }
1718  }
1719  // reset cmdList to only contain commands after curTick
1720  // if there are no commands after curTick, updated cmdList will be empty
1721  // in this case, next_iter is cmdList.end()
1722  cmdList.assign(next_iter, cmdList.end());
1723 }
1724 
1725 void
1727 {
1728  // we should transition to the active state as soon as any bank is active
1729  if (pwrState != PWR_ACT)
1730  // note that at this point numBanksActive could be back at
1731  // zero again due to a precharge scheduled in the future
1732  schedulePowerEvent(PWR_ACT, curTick());
1733 }
1734 
1735 void
1737 {
1738  // counter should at least indicate one outstanding request
1739  // for this precharge
1740  assert(outstandingEvents > 0);
1741  // precharge complete, decrement count
1742  --outstandingEvents;
1743 
1744  // if we reached zero, then special conditions apply as we track
1745  // if all banks are precharged for the power models
1746  if (numBanksActive == 0) {
1747  // no reads to this rank in the Q and no pending
1748  // RD/WR or refresh commands
1749  if (lowPowerEntryReady()) {
1750  // should still be in ACT state since bank still open
1751  assert(pwrState == PWR_ACT);
1752 
1753  // All banks closed - switch to precharge power down state.
1754  DPRINTF(DRAMState, "Rank %d sleep at tick %d\n",
1755  rank, curTick());
1756  powerDownSleep(PWR_PRE_PDN, curTick());
1757  } else {
1758  // we should transition to the idle state when the last bank
1759  // is precharged
1760  schedulePowerEvent(PWR_IDLE, curTick());
1761  }
1762  }
1763 }
1764 
1765 void
1767 {
1768  // counter should at least indicate one outstanding request
1769  // for this write
1770  assert(outstandingEvents > 0);
1771  // Write transfer on bus has completed
1772  // decrement per rank counter
1773  --outstandingEvents;
1774 }
1775 
1776 void
1778 {
1779  // when first preparing the refresh, remember when it was due
1780  if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) {
1781  // remember when the refresh is due
1782  refreshDueAt = curTick();
1783 
1784  // proceed to drain
1785  refreshState = REF_DRAIN;
1786 
1787  // make nonzero while refresh is pending to ensure
1788  // power down and self-refresh are not entered
1789  ++outstandingEvents;
1790 
1791  DPRINTF(DRAM, "Refresh due\n");
1792  }
1793 
1794  // let any scheduled read or write to the same rank go ahead,
1795  // after which it will
1796  // hand control back to this event loop
1797  if (refreshState == REF_DRAIN) {
1798  // if a request is at the moment being handled and this request is
1799  // accessing the current rank then wait for it to finish
1800  if ((rank == memory.activeRank)
1801  && (memory.nextReqEvent.scheduled())) {
1802  // hand control over to the request loop until it is
1803  // evaluated next
1804  DPRINTF(DRAM, "Refresh awaiting draining\n");
1805 
1806  return;
1807  } else {
1808  refreshState = REF_PD_EXIT;
1809  }
1810  }
1811 
1812  // at this point, ensure that rank is not in a power-down state
1813  if (refreshState == REF_PD_EXIT) {
1814  // if rank was sleeping and we have't started exit process,
1815  // wake-up for refresh
1816  if (inLowPowerState) {
1817  DPRINTF(DRAM, "Wake Up for refresh\n");
1818  // save state and return after refresh completes
1819  scheduleWakeUpEvent(memory.tXP);
1820  return;
1821  } else {
1822  refreshState = REF_PRE;
1823  }
1824  }
1825 
1826  // at this point, ensure that all banks are precharged
1827  if (refreshState == REF_PRE) {
1828  // precharge any active bank
1829  if (numBanksActive != 0) {
1830  // at the moment, we use a precharge all even if there is
1831  // only a single bank open
1832  DPRINTF(DRAM, "Precharging all\n");
1833 
1834  // first determine when we can precharge
1835  Tick pre_at = curTick();
1836 
1837  for (auto &b : banks) {
1838  // respect both causality and any existing bank
1839  // constraints, some banks could already have a
1840  // (auto) precharge scheduled
1841  pre_at = std::max(b.preAllowedAt, pre_at);
1842  }
1843 
1844  // make sure all banks per rank are precharged, and for those that
1845  // already are, update their availability
1846  Tick act_allowed_at = pre_at + memory.tRP;
1847 
1848  for (auto &b : banks) {
1849  if (b.openRow != Bank::NO_ROW) {
1850  memory.prechargeBank(*this, b, pre_at, false);
1851  } else {
1852  b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
1853  b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
1854  }
1855  }
1856 
1857  // precharge all banks in rank
1858  cmdList.push_back(Command(MemCommand::PREA, 0, pre_at));
1859 
1860  DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
1861  divCeil(pre_at, memory.tCK) -
1862  memory.timeStampOffset, rank);
1863  } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) {
1864  // Banks are closed, have transitioned to IDLE state, and
1865  // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1866  DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1867 
1868  // go ahead and kick the power state machine into gear since
1869  // we are already idle
1870  schedulePowerEvent(PWR_REF, curTick());
1871  } else {
1872  // banks state is closed but haven't transitioned pwrState to IDLE
1873  // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1874  // should have outstanding precharge event in this case
1875  assert(prechargeEvent.scheduled());
1876  // will start refresh when pwrState transitions to IDLE
1877  }
1878 
1879  assert(numBanksActive == 0);
1880 
1881  // wait for all banks to be precharged, at which point the
1882  // power state machine will transition to the idle state, and
1883  // automatically move to a refresh, at that point it will also
1884  // call this method to get the refresh event loop going again
1885  return;
1886  }
1887 
1888  // last but not least we perform the actual refresh
1889  if (refreshState == REF_START) {
1890  // should never get here with any banks active
1891  assert(numBanksActive == 0);
1892  assert(pwrState == PWR_REF);
1893 
1894  Tick ref_done_at = curTick() + memory.tRFC;
1895 
1896  for (auto &b : banks) {
1897  b.actAllowedAt = ref_done_at;
1898  }
1899 
1900  // at the moment this affects all ranks
1901  cmdList.push_back(Command(MemCommand::REF, 0, curTick()));
1902 
1903  // Update the stats
1904  updatePowerStats();
1905 
1906  DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), memory.tCK) -
1907  memory.timeStampOffset, rank);
1908 
1909  // Update for next refresh
1910  refreshDueAt += memory.tREFI;
1911 
1912  // make sure we did not wait so long that we cannot make up
1913  // for it
1914  if (refreshDueAt < ref_done_at) {
1915  fatal("Refresh was delayed so long we cannot catch up\n");
1916  }
1917 
1918  // Run the refresh and schedule event to transition power states
1919  // when refresh completes
1920  refreshState = REF_RUN;
1921  schedule(refreshEvent, ref_done_at);
1922  return;
1923  }
1924 
1925  if (refreshState == REF_RUN) {
1926  // should never get here with any banks active
1927  assert(numBanksActive == 0);
1928  assert(pwrState == PWR_REF);
1929 
1930  assert(!powerEvent.scheduled());
1931 
1932  if ((memory.drainState() == DrainState::Draining) ||
1933  (memory.drainState() == DrainState::Drained)) {
1934  // if draining, do not re-enter low-power mode.
1935  // simply go to IDLE and wait
1936  schedulePowerEvent(PWR_IDLE, curTick());
1937  } else {
1938  // At the moment, we sleep when the refresh ends and wait to be
1939  // woken up again if previously in a low-power state.
1940  if (pwrStatePostRefresh != PWR_IDLE) {
1941  // power State should be power Refresh
1942  assert(pwrState == PWR_REF);
1943  DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in "
1944  "power state %d before refreshing\n", rank,
1945  pwrStatePostRefresh);
1946  powerDownSleep(pwrState, curTick());
1947 
1948  // Force PRE power-down if there are no outstanding commands
1949  // in Q after refresh.
1950  } else if (lowPowerEntryReady()) {
1951  DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT"
1952  " in a low power state before refreshing\n", rank);
1953  powerDownSleep(PWR_PRE_PDN, curTick());
1954 
1955  } else {
1956  // move to the idle power state once the refresh is done, this
1957  // will also move the refresh state machine to the refresh
1958  // idle state
1959  schedulePowerEvent(PWR_IDLE, curTick());
1960  }
1961  }
1962 
1963  // if transitioning to self refresh do not schedule a new refresh;
1964  // when waking from self refresh, a refresh is scheduled again.
1965  if (pwrStateTrans != PWR_SREF) {
1966  // compensate for the delay in actually performing the refresh
1967  // when scheduling the next one
1968  schedule(refreshEvent, refreshDueAt - memory.tRP);
1969 
1970  DPRINTF(DRAMState, "Refresh done at %llu and next refresh"
1971  " at %llu\n", curTick(), refreshDueAt);
1972  }
1973  }
1974 }
1975 
1976 void
1978 {
1979  // respect causality
1980  assert(tick >= curTick());
1981 
1982  if (!powerEvent.scheduled()) {
1983  DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1984  tick, pwr_state);
1985 
1986  // insert the new transition
1987  pwrStateTrans = pwr_state;
1988 
1989  schedule(powerEvent, tick);
1990  } else {
1991  panic("Scheduled power event at %llu to state %d, "
1992  "with scheduled event at %llu to %d\n", tick, pwr_state,
1993  powerEvent.when(), pwrStateTrans);
1994  }
1995 }
1996 
1997 void
1999 {
2000  // if low power state is active low, schedule to active low power state.
2001  // in reality tCKE is needed to enter active low power. This is neglected
2002  // here and could be added in the future.
2003  if (pwr_state == PWR_ACT_PDN) {
2004  schedulePowerEvent(pwr_state, tick);
2005  // push command to DRAMPower
2006  cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick));
2007  DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick,
2008  memory.tCK) - memory.timeStampOffset, rank);
2009  } else if (pwr_state == PWR_PRE_PDN) {
2010  // if low power state is precharge low, schedule to precharge low
2011  // power state. In reality tCKE is needed to enter active low power.
2012  // This is neglected here.
2013  schedulePowerEvent(pwr_state, tick);
2014  //push Command to DRAMPower
2015  cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick));
2016  DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick,
2017  memory.tCK) - memory.timeStampOffset, rank);
2018  } else if (pwr_state == PWR_REF) {
2019  // if a refresh just occured
2020  // transition to PRE_PDN now that all banks are closed
2021  // do not transition to SREF if commands are in Q; stay in PRE_PDN
2022  if (pwrStatePostRefresh == PWR_ACT_PDN || !lowPowerEntryReady()) {
2023  // prechage power down requires tCKE to enter. For simplicity
2024  // this is not considered.
2025  schedulePowerEvent(PWR_PRE_PDN, tick);
2026  //push Command to DRAMPower
2027  cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick));
2028  DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick,
2029  memory.tCK) - memory.timeStampOffset, rank);
2030  } else {
2031  // last low power State was power precharge
2032  assert(pwrStatePostRefresh == PWR_PRE_PDN);
2033  // self refresh requires time tCKESR to enter. For simplicity,
2034  // this is not considered.
2035  schedulePowerEvent(PWR_SREF, tick);
2036  // push Command to DRAMPower
2037  cmdList.push_back(Command(MemCommand::SREN, 0, tick));
2038  DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick,
2039  memory.tCK) - memory.timeStampOffset, rank);
2040  }
2041  }
2042  // Ensure that we don't power-down and back up in same tick
2043  // Once we commit to PD entry, do it and wait for at least 1tCK
2044  // This could be replaced with tCKE if/when that is added to the model
2045  wakeUpAllowedAt = tick + memory.tCK;
2046 
2047  // Transitioning to a low power state, set flag
2048  inLowPowerState = true;
2049 }
2050 
2051 void
2053 {
2054  Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt);
2055 
2056  DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n",
2057  rank, wake_up_tick);
2058 
2059  // if waking for refresh, hold previous state
2060  // else reset state back to IDLE
2061  if (refreshState == REF_PD_EXIT) {
2062  pwrStatePostRefresh = pwrState;
2063  } else {
2064  // don't automatically transition back to LP state after next REF
2065  pwrStatePostRefresh = PWR_IDLE;
2066  }
2067 
2068  // schedule wake-up with event to ensure entry has completed before
2069  // we try to wake-up
2070  schedule(wakeUpEvent, wake_up_tick);
2071 
2072  for (auto &b : banks) {
2073  // respect both causality and any existing bank
2074  // constraints, some banks could already have a
2075  // (auto) precharge scheduled
2076  b.colAllowedAt = std::max(wake_up_tick + exit_delay, b.colAllowedAt);
2077  b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt);
2078  b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt);
2079  }
2080  // Transitioning out of low power state, clear flag
2081  inLowPowerState = false;
2082 
2083  // push to DRAMPower
2084  // use pwrStateTrans for cases where we have a power event scheduled
2085  // to enter low power that has not yet been processed
2086  if (pwrStateTrans == PWR_ACT_PDN) {
2087  cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick));
2088  DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick,
2089  memory.tCK) - memory.timeStampOffset, rank);
2090 
2091  } else if (pwrStateTrans == PWR_PRE_PDN) {
2092  cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick));
2093  DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick,
2094  memory.tCK) - memory.timeStampOffset, rank);
2095  } else if (pwrStateTrans == PWR_SREF) {
2096  cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick));
2097  DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick,
2098  memory.tCK) - memory.timeStampOffset, rank);
2099  }
2100 }
2101 
2102 void
2104 {
2105  // Should be in a power-down or self-refresh state
2106  assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) ||
2107  (pwrState == PWR_SREF));
2108 
2109  // Check current state to determine transition state
2110  if (pwrState == PWR_ACT_PDN) {
2111  // banks still open, transition to PWR_ACT
2112  schedulePowerEvent(PWR_ACT, curTick());
2113  } else {
2114  // transitioning from a precharge power-down or self-refresh state
2115  // banks are closed - transition to PWR_IDLE
2116  schedulePowerEvent(PWR_IDLE, curTick());
2117  }
2118 }
2119 
2120 void
2122 {
2123  assert(curTick() >= pwrStateTick);
2124  // remember where we were, and for how long
2125  Tick duration = curTick() - pwrStateTick;
2126  PowerState prev_state = pwrState;
2127 
2128  // update the accounting
2129  pwrStateTime[prev_state] += duration;
2130 
2131  // track to total idle time
2132  if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) ||
2133  (prev_state == PWR_SREF)) {
2134  totalIdleTime += duration;
2135  }
2136 
2137  pwrState = pwrStateTrans;
2138  pwrStateTick = curTick();
2139 
2140  // if rank was refreshing, make sure to start scheduling requests again
2141  if (prev_state == PWR_REF) {
2142  // bus IDLED prior to REF
2143  // counter should be one for refresh command only
2144  assert(outstandingEvents == 1);
2145  // REF complete, decrement count
2146  --outstandingEvents;
2147 
2148  DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
2149  // if sleeping after refresh
2150  if (pwrState != PWR_IDLE) {
2151  assert((pwrState == PWR_PRE_PDN) || (pwrState == PWR_SREF));
2152  DPRINTF(DRAMState, "Switching to power down state after refreshing"
2153  " rank %d at %llu tick\n", rank, curTick());
2154  }
2155  if (pwrState != PWR_SREF) {
2156  // rank is not available in SREF
2157  // don't transition to IDLE in this case
2158  refreshState = REF_IDLE;
2159  }
2160  // a request event could be already scheduled by the state
2161  // machine of the other rank
2162  if (!memory.nextReqEvent.scheduled()) {
2163  DPRINTF(DRAM, "Scheduling next request after refreshing rank %d\n",
2164  rank);
2165  schedule(memory.nextReqEvent, curTick());
2166  }
2167  } else if (pwrState == PWR_ACT) {
2168  if (refreshState == REF_PD_EXIT) {
2169  // kick the refresh event loop into action again
2170  assert(prev_state == PWR_ACT_PDN);
2171 
2172  // go back to REF event and close banks
2173  refreshState = REF_PRE;
2174  schedule(refreshEvent, curTick());
2175  }
2176  } else if (pwrState == PWR_IDLE) {
2177  DPRINTF(DRAMState, "All banks precharged\n");
2178  if (prev_state == PWR_SREF) {
2179  // set refresh state to REF_SREF_EXIT, ensuring isAvailable
2180  // continues to return false during tXS after SREF exit
2181  // Schedule a refresh which kicks things back into action
2182  // when it finishes
2183  refreshState = REF_SREF_EXIT;
2184  schedule(refreshEvent, curTick() + memory.tXS);
2185  } else {
2186  // if we have a pending refresh, and are now moving to
2187  // the idle state, directly transition to a refresh
2188  if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) {
2189  // ensure refresh is restarted only after final PRE command.
2190  // do not restart refresh if controller is in an intermediate
2191  // state, after PRE_PDN exit, when banks are IDLE but an
2192  // ACT is scheduled.
2193  if (!activateEvent.scheduled()) {
2194  // there should be nothing waiting at this point
2195  assert(!powerEvent.scheduled());
2196  // update the state in zero time and proceed below
2197  pwrState = PWR_REF;
2198  } else {
2199  // must have PRE scheduled to transition back to IDLE
2200  // and re-kick off refresh
2201  assert(prechargeEvent.scheduled());
2202  }
2203  }
2204  }
2205  }
2206 
2207  // we transition to the refresh state, let the refresh state
2208  // machine know of this state update and let it deal with the
2209  // scheduling of the next power state transition as well as the
2210  // following refresh
2211  if (pwrState == PWR_REF) {
2212  assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT);
2213  DPRINTF(DRAMState, "Refreshing\n");
2214 
2215  // kick the refresh event loop into action again, and that
2216  // in turn will schedule a transition to the idle power
2217  // state once the refresh is done
2218  if (refreshState == REF_PD_EXIT) {
2219  // Wait for PD exit timing to complete before issuing REF
2220  schedule(refreshEvent, curTick() + memory.tXP);
2221  } else {
2222  schedule(refreshEvent, curTick());
2223  }
2224  // Banks transitioned to IDLE, start REF
2225  refreshState = REF_START;
2226  }
2227 }
2228 
2229 void
2231 {
2232  // All commands up to refresh have completed
2233  // flush cmdList to DRAMPower
2234  flushCmdList();
2235 
2236  // update the counters for DRAMPower, passing false to
2237  // indicate that this is not the last command in the
2238  // list. DRAMPower requires this information for the
2239  // correct calculation of the background energy at the end
2240  // of the simulation. Ideally we would want to call this
2241  // function with true once at the end of the
2242  // simulation. However, the discarded energy is extremly
2243  // small and does not effect the final results.
2244  power.powerlib.updateCounters(false);
2245 
2246  // call the energy function
2247  power.powerlib.calcEnergy();
2248 
2249  // Get the energy and power from DRAMPower
2250  Data::MemoryPowerModel::Energy energy =
2251  power.powerlib.getEnergy();
2252  Data::MemoryPowerModel::Power rank_power =
2253  power.powerlib.getPower();
2254 
2255  actEnergy = energy.act_energy * memory.devicesPerRank;
2256  preEnergy = energy.pre_energy * memory.devicesPerRank;
2257  readEnergy = energy.read_energy * memory.devicesPerRank;
2258  writeEnergy = energy.write_energy * memory.devicesPerRank;
2259  refreshEnergy = energy.ref_energy * memory.devicesPerRank;
2260  actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank;
2261  preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank;
2262  actPowerDownEnergy = energy.f_act_pd_energy * memory.devicesPerRank;
2263  prePowerDownEnergy = energy.f_pre_pd_energy * memory.devicesPerRank;
2264  selfRefreshEnergy = energy.sref_energy * memory.devicesPerRank;
2265  totalEnergy = energy.total_energy * memory.devicesPerRank;
2266  averagePower = rank_power.average_power * memory.devicesPerRank;
2267 }
2268 
2269 void
2271 {
2272  DPRINTF(DRAM,"Computing final stats\n");
2273 
2274  // Force DRAM power to update counters based on time spent in
2275  // current state up to curTick()
2276  cmdList.push_back(Command(MemCommand::NOP, 0, curTick()));
2277 
2278  // Update the stats
2279  updatePowerStats();
2280 
2281  // final update of power state times
2282  pwrStateTime[pwrState] += (curTick() - pwrStateTick);
2283  pwrStateTick = curTick();
2284 
2285 }
2286 
2287 void
2289 {
2290  using namespace Stats;
2291 
2292  pwrStateTime
2293  .init(6)
2294  .name(name() + ".memoryStateTime")
2295  .desc("Time in different power states");
2296  pwrStateTime.subname(0, "IDLE");
2297  pwrStateTime.subname(1, "REF");
2298  pwrStateTime.subname(2, "SREF");
2299  pwrStateTime.subname(3, "PRE_PDN");
2300  pwrStateTime.subname(4, "ACT");
2301  pwrStateTime.subname(5, "ACT_PDN");
2302 
2303  actEnergy
2304  .name(name() + ".actEnergy")
2305  .desc("Energy for activate commands per rank (pJ)");
2306 
2307  preEnergy
2308  .name(name() + ".preEnergy")
2309  .desc("Energy for precharge commands per rank (pJ)");
2310 
2311  readEnergy
2312  .name(name() + ".readEnergy")
2313  .desc("Energy for read commands per rank (pJ)");
2314 
2315  writeEnergy
2316  .name(name() + ".writeEnergy")
2317  .desc("Energy for write commands per rank (pJ)");
2318 
2319  refreshEnergy
2320  .name(name() + ".refreshEnergy")
2321  .desc("Energy for refresh commands per rank (pJ)");
2322 
2323  actBackEnergy
2324  .name(name() + ".actBackEnergy")
2325  .desc("Energy for active background per rank (pJ)");
2326 
2327  preBackEnergy
2328  .name(name() + ".preBackEnergy")
2329  .desc("Energy for precharge background per rank (pJ)");
2330 
2331  actPowerDownEnergy
2332  .name(name() + ".actPowerDownEnergy")
2333  .desc("Energy for active power-down per rank (pJ)");
2334 
2335  prePowerDownEnergy
2336  .name(name() + ".prePowerDownEnergy")
2337  .desc("Energy for precharge power-down per rank (pJ)");
2338 
2339  selfRefreshEnergy
2340  .name(name() + ".selfRefreshEnergy")
2341  .desc("Energy for self refresh per rank (pJ)");
2342 
2343  totalEnergy
2344  .name(name() + ".totalEnergy")
2345  .desc("Total energy per rank (pJ)");
2346 
2347  averagePower
2348  .name(name() + ".averagePower")
2349  .desc("Core power per rank (mW)");
2350 
2351  totalIdleTime
2352  .name(name() + ".totalIdleTime")
2353  .desc("Total Idle time Per DRAM Rank");
2354 
2356 }
2357 void
2359 {
2360  using namespace Stats;
2361 
2363 
2364  for (auto r : ranks) {
2365  r->regStats();
2366  }
2367 
2368  readReqs
2369  .name(name() + ".readReqs")
2370  .desc("Number of read requests accepted");
2371 
2372  writeReqs
2373  .name(name() + ".writeReqs")
2374  .desc("Number of write requests accepted");
2375 
2376  readBursts
2377  .name(name() + ".readBursts")
2378  .desc("Number of DRAM read bursts, "
2379  "including those serviced by the write queue");
2380 
2381  writeBursts
2382  .name(name() + ".writeBursts")
2383  .desc("Number of DRAM write bursts, "
2384  "including those merged in the write queue");
2385 
2387  .name(name() + ".servicedByWrQ")
2388  .desc("Number of DRAM read bursts serviced by the write queue");
2389 
2391  .name(name() + ".mergedWrBursts")
2392  .desc("Number of DRAM write bursts merged with an existing one");
2393 
2395  .name(name() + ".neitherReadNorWriteReqs")
2396  .desc("Number of requests that are neither read nor write");
2397 
2400  .name(name() + ".perBankRdBursts")
2401  .desc("Per bank write bursts");
2402 
2404  .init(banksPerRank * ranksPerChannel)
2405  .name(name() + ".perBankWrBursts")
2406  .desc("Per bank write bursts");
2407 
2408  avgRdQLen
2409  .name(name() + ".avgRdQLen")
2410  .desc("Average read queue length when enqueuing")
2411  .precision(2);
2412 
2413  avgWrQLen
2414  .name(name() + ".avgWrQLen")
2415  .desc("Average write queue length when enqueuing")
2416  .precision(2);
2417 
2418  totQLat
2419  .name(name() + ".totQLat")
2420  .desc("Total ticks spent queuing");
2421 
2422  totBusLat
2423  .name(name() + ".totBusLat")
2424  .desc("Total ticks spent in databus transfers");
2425 
2426  totMemAccLat
2427  .name(name() + ".totMemAccLat")
2428  .desc("Total ticks spent from burst creation until serviced "
2429  "by the DRAM");
2430 
2431  avgQLat
2432  .name(name() + ".avgQLat")
2433  .desc("Average queueing delay per DRAM burst")
2434  .precision(2);
2435 
2437 
2438  avgBusLat
2439  .name(name() + ".avgBusLat")
2440  .desc("Average bus latency per DRAM burst")
2441  .precision(2);
2442 
2444 
2445  avgMemAccLat
2446  .name(name() + ".avgMemAccLat")
2447  .desc("Average memory access latency per DRAM burst")
2448  .precision(2);
2449 
2451 
2452  numRdRetry
2453  .name(name() + ".numRdRetry")
2454  .desc("Number of times read queue was full causing retry");
2455 
2456  numWrRetry
2457  .name(name() + ".numWrRetry")
2458  .desc("Number of times write queue was full causing retry");
2459 
2460  readRowHits
2461  .name(name() + ".readRowHits")
2462  .desc("Number of row buffer hits during reads");
2463 
2464  writeRowHits
2465  .name(name() + ".writeRowHits")
2466  .desc("Number of row buffer hits during writes");
2467 
2469  .name(name() + ".readRowHitRate")
2470  .desc("Row buffer hit rate for reads")
2471  .precision(2);
2472 
2474 
2476  .name(name() + ".writeRowHitRate")
2477  .desc("Row buffer hit rate for writes")
2478  .precision(2);
2479 
2481 
2482  readPktSize
2483  .init(ceilLog2(burstSize) + 1)
2484  .name(name() + ".readPktSize")
2485  .desc("Read request sizes (log2)");
2486 
2487  writePktSize
2488  .init(ceilLog2(burstSize) + 1)
2489  .name(name() + ".writePktSize")
2490  .desc("Write request sizes (log2)");
2491 
2492  rdQLenPdf
2494  .name(name() + ".rdQLenPdf")
2495  .desc("What read queue length does an incoming req see");
2496 
2497  wrQLenPdf
2499  .name(name() + ".wrQLenPdf")
2500  .desc("What write queue length does an incoming req see");
2501 
2504  .name(name() + ".bytesPerActivate")
2505  .desc("Bytes accessed per row activation")
2506  .flags(nozero);
2507 
2510  .name(name() + ".rdPerTurnAround")
2511  .desc("Reads before turning the bus around for writes")
2512  .flags(nozero);
2513 
2516  .name(name() + ".wrPerTurnAround")
2517  .desc("Writes before turning the bus around for reads")
2518  .flags(nozero);
2519 
2521  .name(name() + ".bytesReadDRAM")
2522  .desc("Total number of bytes read from DRAM");
2523 
2524  bytesReadWrQ
2525  .name(name() + ".bytesReadWrQ")
2526  .desc("Total number of bytes read from write queue");
2527 
2528  bytesWritten
2529  .name(name() + ".bytesWritten")
2530  .desc("Total number of bytes written to DRAM");
2531 
2532  bytesReadSys
2533  .name(name() + ".bytesReadSys")
2534  .desc("Total read bytes from the system interface side");
2535 
2537  .name(name() + ".bytesWrittenSys")
2538  .desc("Total written bytes from the system interface side");
2539 
2540  avgRdBW
2541  .name(name() + ".avgRdBW")
2542  .desc("Average DRAM read bandwidth in MiByte/s")
2543  .precision(2);
2544 
2545  avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
2546 
2547  avgWrBW
2548  .name(name() + ".avgWrBW")
2549  .desc("Average achieved write bandwidth in MiByte/s")
2550  .precision(2);
2551 
2552  avgWrBW = (bytesWritten / 1000000) / simSeconds;
2553 
2554  avgRdBWSys
2555  .name(name() + ".avgRdBWSys")
2556  .desc("Average system read bandwidth in MiByte/s")
2557  .precision(2);
2558 
2559  avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
2560 
2561  avgWrBWSys
2562  .name(name() + ".avgWrBWSys")
2563  .desc("Average system write bandwidth in MiByte/s")
2564  .precision(2);
2565 
2566  avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
2567 
2568  peakBW
2569  .name(name() + ".peakBW")
2570  .desc("Theoretical peak bandwidth in MiByte/s")
2571  .precision(2);
2572 
2573  peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
2574 
2575  busUtil
2576  .name(name() + ".busUtil")
2577  .desc("Data bus utilization in percentage")
2578  .precision(2);
2579  busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
2580 
2581  totGap
2582  .name(name() + ".totGap")
2583  .desc("Total gap between requests");
2584 
2585  avgGap
2586  .name(name() + ".avgGap")
2587  .desc("Average gap between requests")
2588  .precision(2);
2589 
2590  avgGap = totGap / (readReqs + writeReqs);
2591 
2592  // Stats for DRAM Power calculation based on Micron datasheet
2593  busUtilRead
2594  .name(name() + ".busUtilRead")
2595  .desc("Data bus utilization in percentage for reads")
2596  .precision(2);
2597 
2598  busUtilRead = avgRdBW / peakBW * 100;
2599 
2600  busUtilWrite
2601  .name(name() + ".busUtilWrite")
2602  .desc("Data bus utilization in percentage for writes")
2603  .precision(2);
2604 
2605  busUtilWrite = avgWrBW / peakBW * 100;
2606 
2607  pageHitRate
2608  .name(name() + ".pageHitRate")
2609  .desc("Row buffer hit rate, read and write combined")
2610  .precision(2);
2611 
2614 }
2615 
2616 void
2618 {
2619  // rely on the abstract memory
2620  functionalAccess(pkt);
2621 }
2622 
2624 DRAMCtrl::getSlavePort(const string &if_name, PortID idx)
2625 {
2626  if (if_name != "port") {
2627  return MemObject::getSlavePort(if_name, idx);
2628  } else {
2629  return port;
2630  }
2631 }
2632 
2633 DrainState
2635 {
2636  // if there is anything in any of our internal queues, keep track
2637  // of that as well
2638  if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty() &&
2639  allRanksDrained())) {
2640 
2641  DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
2642  " resp: %d\n", writeQueue.size(), readQueue.size(),
2643  respQueue.size());
2644 
2645  // the only queue that is not drained automatically over time
2646  // is the write queue, thus kick things into action if needed
2647  if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
2649  }
2650 
2651  // also need to kick off events to exit self-refresh
2652  for (auto r : ranks) {
2653  // force self-refresh exit, which in turn will issue auto-refresh
2654  if (r->pwrState == PWR_SREF) {
2655  DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n",
2656  r->rank);
2657  r->scheduleWakeUpEvent(tXS);
2658  }
2659  }
2660 
2661  return DrainState::Draining;
2662  } else {
2663  return DrainState::Drained;
2664  }
2665 }
2666 
2667 bool
2669 {
2670  // true until proven false
2671  bool all_ranks_drained = true;
2672  for (auto r : ranks) {
2673  // then verify that the power state is IDLE
2674  // ensuring all banks are closed and rank is not in a low power state
2675  all_ranks_drained = r->inPwrIdleState() && all_ranks_drained;
2676  }
2677  return all_ranks_drained;
2678 }
2679 
2680 void
2682 {
2683  if (!isTimingMode && system()->isTimingMode()) {
2684  // if we switched to timing mode, kick things into action,
2685  // and behave as if we restored from a checkpoint
2686  startup();
2687  } else if (isTimingMode && !system()->isTimingMode()) {
2688  // if we switch from timing mode, stop the refresh events to
2689  // not cause issues with KVM
2690  for (auto r : ranks) {
2691  r->suspend();
2692  }
2693  }
2694 
2695  // update the mode
2697 }
2698 
2699 DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
2700  : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
2701  memory(_memory)
2702 { }
2703 
2706 {
2707  AddrRangeList ranges;
2708  ranges.push_back(memory.getAddrRange());
2709  return ranges;
2710 }
2711 
2712 void
2714 {
2715  pkt->pushLabel(memory.name());
2716 
2717  if (!queue.checkFunctional(pkt)) {
2718  // Default implementation of SimpleTimingPort::recvFunctional()
2719  // calls recvAtomic() and throws away the latency; we can save a
2720  // little here by just not calculating the latency.
2721  memory.recvFunctional(pkt);
2722  }
2723 
2724  pkt->popLabel();
2725 }
2726 
2727 Tick
2729 {
2730  return memory.recvAtomic(pkt);
2731 }
2732 
2733 bool
2735 {
2736  // pass it to the memory controller
2737  return memory.recvTimingReq(pkt);
2738 }
2739 
2740 DRAMCtrl*
2741 DRAMCtrlParams::create()
2742 {
2743  return new DRAMCtrl(this);
2744 }
Stats::Formula avgRdBW
Definition: dram_ctrl.hh:1019
Stats::Vector rdQLenPdf
Definition: dram_ctrl.hh:1002
#define DPRINTF(x,...)
Definition: trace.hh:212
Stats::Scalar bytesWritten
Definition: dram_ctrl.hh:989
bool retryWrReq
Definition: dram_ctrl.hh:140
Enums::PageManage pageMgmt
Definition: dram_ctrl.hh:945
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
const uint32_t writeLowThreshold
Definition: dram_ctrl.hh:909
Stats::Scalar totQLat
Definition: dram_ctrl.hh:1009
Stats::Scalar totMemAccLat
Definition: dram_ctrl.hh:1010
const uint32_t activationLimit
Definition: dram_ctrl.hh:937
uint32_t bytesAccessed
Definition: dram_ctrl.hh:196
uint64_t granularity() const
Determing the interleaving granularity of the range.
Definition: addr_range.hh:196
PowerState
The power state captures the different operational states of the DRAM and interacts with the bus read...
Definition: dram_ctrl.hh:233
const Tick entryTime
When did request enter the controller.
Definition: dram_ctrl.hh:626
void prechargeBank(Rank &rank_ref, Bank &bank_ref, Tick pre_at, bool trace=true)
Precharge a given bank and also update when the precharge is done.
Definition: dram_ctrl.cc:1029
void printQs() const
Used for debugging to observe the contents of the queues.
Definition: dram_ctrl.cc:584
Stats::Scalar readRowHits
Definition: dram_ctrl.hh:1033
bool retryRdReq
Remember if we have to retry a request when available.
Definition: dram_ctrl.hh:139
const std::string & name()
Definition: trace.cc:49
bool interleaved() const
Determine if the range is interleaved or not.
Definition: addr_range.hh:184
const Tick tRCD
Definition: dram_ctrl.hh:924
const Tick tWR
Definition: dram_ctrl.hh:928
Bitfield< 7 > i
Definition: miscregs.hh:1378
std::vector< Command > cmdList
List of comamnds issued, to be sent to DRAMPpower at refresh and stats dump.
Definition: dram_ctrl.hh:437
Stats::Scalar numWrRetry
Definition: dram_ctrl.hh:998
std::vector< Rank * > ranks
Vector of ranks.
Definition: dram_ctrl.hh:883
STL pair class.
Definition: stl.hh:61
DrainState
Object drain/handover states.
Definition: drain.hh:71
void doDRAMAccess(DRAMPacket *dram_pkt)
Actually do the DRAM access - figure out the latency it will take to service the req based on bank st...
Definition: dram_ctrl.cc:1077
#define panic(...)
Definition: misc.hh:153
Running normally.
const Tick tRAS
Definition: dram_ctrl.hh:927
bool writeQueueFull(unsigned int pktCount) const
Check if the write queue has room for more entries.
Definition: dram_ctrl.cc:307
A DRAM packet stores packets along with the timestamp of when the packet entered the queue...
Definition: dram_ctrl.hh:621
uint32_t writeEntries
Track number of packets in write queue going to this rank.
Definition: dram_ctrl.hh:412
const Tick tRRD
Definition: dram_ctrl.hh:932
uint32_t openRow
Definition: dram_ctrl.hh:187
Stats::Formula avgGap
Definition: dram_ctrl.hh:1037
Stats::Scalar readReqs
Definition: dram_ctrl.hh:983
Stats::Histogram bytesPerActivate
Definition: dram_ctrl.hh:1004
Stats::Scalar bytesReadWrQ
Definition: dram_ctrl.hh:988
uint32_t readEntries
Track number of packets in read queue going to this rank.
Definition: dram_ctrl.hh:407
Stats::Histogram rdPerTurnAround
Definition: dram_ctrl.hh:1005
const Tick tWTR
Definition: dram_ctrl.hh:919
MemoryPort(const std::string &name, DRAMCtrl &_memory)
Definition: dram_ctrl.cc:2699
const Tick frontendLatency
Pipeline latency of the controller frontend.
Definition: dram_ctrl.hh:958
Stats::Formula pageHitRate
Definition: dram_ctrl.hh:1040
bool hasData() const
Definition: packet.hh:521
ip6_addr_t addr
Definition: inet.hh:335
bool isWrite() const
Definition: packet.hh:503
const Tick tXP
Definition: dram_ctrl.hh:935
panic_if(!root,"Invalid expression\n")
Stats::Scalar totGap
Definition: dram_ctrl.hh:999
Stats::Average avgWrQLen
Definition: dram_ctrl.hh:1030
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:381
const uint32_t channels
Definition: dram_ctrl.hh:904
bool recvTimingReq(PacketPtr pkt)
Definition: dram_ctrl.cc:600
Bitfield< 6 > granularity
Definition: misc.hh:944
DRAMCtrl(const DRAMCtrlParams *p)
Definition: dram_ctrl.cc:60
DrainState drain() override
Notify an object that it needs to drain its state.
Definition: dram_ctrl.cc:2634
uint32_t rowAccesses
Definition: dram_ctrl.hh:195
std::deque< DRAMPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition: dram_ctrl.hh:878
const Tick tCL
Definition: dram_ctrl.hh:925
unsigned int burstsServiced
Number of DRAM bursts serviced so far for a system packet.
Definition: dram_ctrl.hh:610
int ceilLog2(const T &n)
Definition: intmath.hh:174
RefreshState refreshState
current refresh state
Definition: dram_ctrl.hh:392
Addr addr
The starting address of the DRAM packet.
Definition: dram_ctrl.hh:654
Bitfield< 23, 0 > offset
Definition: types.hh:149
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2560
uint8_t bank
Definition: dram_ctrl.hh:188
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:59
Stats::Formula busUtilWrite
Definition: dram_ctrl.hh:1026
virtual BaseSlavePort & getSlavePort(const std::string &if_name, PortID idx=InvalidPortID)
Get a slave port with a given name and index.
Definition: mem_object.cc:58
The DRAM controller is a single-channel memory controller capturing the most important timing constra...
Definition: dram_ctrl.hh:96
uint8_t rank
Current Rank index.
Definition: dram_ctrl.hh:402
Stats::Scalar mergedWrBursts
Definition: dram_ctrl.hh:993
Stats::Average avgRdQLen
Definition: dram_ctrl.hh:1029
Stats::Scalar writeBursts
Definition: dram_ctrl.hh:986
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1160
const uint32_t ranksPerChannel
Definition: dram_ctrl.hh:900
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
Definition: core.cc:47
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:311
A BaseSlavePort is a protocol-agnostic slave port, responsible only for the structural connection to ...
Definition: port.hh:139
Stats::Formula simSeconds
Definition: stat_control.cc:64
Stats::Vector perBankRdBursts
Definition: dram_ctrl.hh:995
A burst helper helps organize and manage a packet that is larger than the DRAM burst size...
Definition: dram_ctrl.hh:602
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1118
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks, then bankId = 0 –> rank0, bank0 and bankId = 8 –> rank1, bank0.
Definition: dram_ctrl.hh:646
void deschedule(Event &event)
Definition: eventq.hh:734
void computeStats()
Computes stats just prior to dump event.
Definition: dram_ctrl.cc:2270
virtual void init() override
Initialise this memory.
Definition: dram_ctrl.cc:200
uint8_t outstandingEvents
Number of ACT, RD, and WR events currently scheduled Incremented when a refresh event is started as w...
Definition: dram_ctrl.hh:419
void startup(Tick ref_tick)
Kick off accounting for power and refresh states and schedule initial refresh.
Definition: dram_ctrl.cc:1641
virtual BaseSlavePort & getSlavePort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a slave port with a given name and index.
Definition: dram_ctrl.cc:2624
unsigned int numBanksActive
To track number of banks which are currently active for this rank.
Definition: dram_ctrl.hh:449
Stats::Vector writePktSize
Definition: dram_ctrl.hh:1001
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the master port.
Definition: dram_ctrl.cc:2713
bool isTimingMode
Remeber if the memory system is in timing mode.
Definition: dram_ctrl.hh:134
void processWriteDoneEvent()
Definition: dram_ctrl.cc:1766
#define warn(...)
Definition: misc.hh:219
Stats::Formula peakBW
Definition: dram_ctrl.hh:1023
Bitfield< 7 > b
Definition: miscregs.hh:1564
A basic class to track the bank state, i.e.
Definition: dram_ctrl.hh:180
Stats::Histogram wrPerTurnAround
Definition: dram_ctrl.hh:1006
const Tick MaxTick
Definition: types.hh:65
Stats::Scalar totBusLat
Definition: dram_ctrl.hh:1011
const uint32_t maxAccessesPerRow
Max column accesses (read and write) per row, before forefully closing it.
Definition: dram_ctrl.hh:951
const uint32_t bankGroupsPerRank
Definition: dram_ctrl.hh:901
const Tick tREFI
Definition: dram_ctrl.hh:931
Tick curTick()
The current simulated tick.
Definition: core.hh:47
Addr burstAlign(Addr addr) const
Burst-align an address.
Definition: dram_ctrl.hh:853
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the master port.
Definition: dram_ctrl.cc:2728
EventWrapper< DRAMCtrl,&DRAMCtrl::processNextReqEvent > nextReqEvent
Definition: dram_ctrl.hh:688
Stats::Formula avgRdBWSys
Definition: dram_ctrl.hh:1021
Enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition: dram_ctrl.hh:943
Enums::PwrState pwrState() const
const Tick backendLatency
Pipeline latency of the backend and PHY.
Definition: dram_ctrl.hh:965
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:340
Rank(DRAMCtrl &_memory, const DRAMCtrlParams *_p)
Definition: dram_ctrl.cc:1629
void processPowerEvent()
Definition: dram_ctrl.cc:2121
void updatePowerStats(Rank &rank_ref)
This function increments the energy when called.
BurstHelper * burstHelper
A pointer to the BurstHelper if this DRAMPacket is a split packet If not a split packet (common case)...
Definition: dram_ctrl.hh:666
const Tick tRP
Definition: dram_ctrl.hh:926
std::vector< Bank > banks
Vector of Banks.
Definition: dram_ctrl.hh:443
Stats::Formula avgQLat
Definition: dram_ctrl.hh:1014
uint64_t Tick
Tick count type.
Definition: types.hh:63
uint64_t power(uint32_t n, uint32_t e)
Definition: intmath.hh:79
DRAMCtrl declaration.
System * system() const
read the system pointer Implemented for completeness with the setter
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1170
PowerState pwrState
Current power state.
Definition: dram_ctrl.hh:387
void checkDrainDone()
Let the rank check if it was waiting for requests to drain to allow it to transition states...
Definition: dram_ctrl.cc:1683
void regStats() override
Register Statistics.
Definition: dram_ctrl.cc:2358
void replaceBits(T &val, int first, int last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
Definition: bitfield.hh:145
const Tick tRFC
Definition: dram_ctrl.hh:930
std::deque< Tick > actTicks
List to keep track of activate ticks.
Definition: dram_ctrl.hh:452
bool chooseNext(std::deque< DRAMPacket * > &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Definition: dram_ctrl.cc:755
Stats::Formula busUtil
Definition: dram_ctrl.hh:1024
uint32_t stripes() const
Determine the number of interleaved address stripes this range is part of.
Definition: addr_range.hh:207
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
uint64_t timeStampOffset
Definition: dram_ctrl.hh:1046
#define fatal(...)
Definition: misc.hh:163
uint32_t writesThisTime
Definition: dram_ctrl.hh:911
Stats::Vector readPktSize
Definition: dram_ctrl.hh:1000
Stats::Scalar readBursts
Definition: dram_ctrl.hh:985
Tick recvAtomic(PacketPtr pkt)
Definition: dram_ctrl.cc:276
const Tick tRTP
Definition: dram_ctrl.hh:929
bool isPowerOf2(const T &n)
Definition: intmath.hh:73
bool needsResponse() const
Definition: packet.hh:516
Stats::Formula avgMemAccLat
Definition: dram_ctrl.hh:1016
Stats::Scalar bytesReadDRAM
Definition: dram_ctrl.hh:987
bool isRead() const
Definition: packet.hh:502
static bool sortTime(const Command &cmd, const Command &cmd_next)
Function for sorting Command structures based on timeStamp.
Definition: dram_ctrl.hh:1072
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
std::pair< uint64_t, bool > minBankPrep(const std::deque< DRAMPacket * > &queue, Tick min_col_at) const
Find which are the earliest banks ready to issue an activate for the enqueued requests.
Definition: dram_ctrl.cc:1547
bool cacheResponding() const
Definition: packet.hh:558
Stats::Formula readRowHitRate
Definition: dram_ctrl.hh:1035
Derived & precision(int _precision)
Set the precision and marks this stat to print at the end of simulation.
Definition: statistics.hh:299
Draining buffers pending serialization/handover.
#define ULL(N)
uint64_t constant
Definition: types.hh:50
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:358
uint8_t bankgr
Definition: dram_ctrl.hh:189
bool reorderQueue(std::deque< DRAMPacket * > &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition: dram_ctrl.cc:796
const uint8_t rank
Will be populated by address decoder.
Definition: dram_ctrl.hh:637
Stats::Formula avgWrBWSys
Definition: dram_ctrl.hh:1022
void activateBank(Rank &rank_ref, Bank &bank_ref, Tick act_tick, uint32_t row)
Keep track of when row activations happen, in order to enforce the maximum number of activations in t...
Definition: dram_ctrl.cc:929
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
const Tick tCS
Definition: dram_ctrl.hh:921
const Tick M5_CLASS_VAR_USED tCK
Basic memory timing parameters initialized based on parameter values.
Definition: dram_ctrl.hh:918
uint64_t size() const
Get the memory size.
Stats::Scalar servicedByWrQ
Definition: dram_ctrl.hh:992
void processRefreshEvent()
Definition: dram_ctrl.cc:1777
Tick prevArrival
Definition: dram_ctrl.hh:972
const Tick tXAW
Definition: dram_ctrl.hh:934
Stats::Formula avgBusLat
Definition: dram_ctrl.hh:1015
const uint32_t row
Definition: dram_ctrl.hh:639
DRAMPacket * decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, bool isRead)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Definition: dram_ctrl.cc:315
const unsigned int burstCount
Number of DRAM bursts requred for a system packet.
Definition: dram_ctrl.hh:607
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: dram_ctrl.hh:1052
void addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
Decode the incoming pkt, create a dram_pkt and push to the back of the write queue.
Definition: dram_ctrl.cc:517
static const uint32_t NO_ROW
Definition: dram_ctrl.hh:185
void registerDumpCallback(Callback *cb)
Register a callback that should be called whenever statistics are about to be dumped.
Definition: statistics.cc:535
const uint32_t rowBufferSize
Definition: dram_ctrl.hh:897
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:410
const uint32_t devicesPerRank
Definition: dram_ctrl.hh:895
uint32_t rowsPerBank
Definition: dram_ctrl.hh:905
STL deque class.
Definition: stl.hh:47
Bitfield< 24 > j
Definition: miscregs.hh:1369
const uint32_t minWritesPerSwitch
Definition: dram_ctrl.hh:910
void suspend()
Stop the refresh events.
Definition: dram_ctrl.cc:1653
Stats::Scalar writeReqs
Definition: dram_ctrl.hh:984
void schedulePowerEvent(PowerState pwr_state, Tick tick)
Schedule a power state transition in the future, and potentially override an already scheduled transi...
Definition: dram_ctrl.cc:1977
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Definition: dram_ctrl.cc:892
const Tick tXS
Definition: dram_ctrl.hh:936
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:254
void updatePowerStats()
Function to update Power Stats.
Definition: dram_ctrl.cc:2230
void reschedule(Event &event, Tick when, bool always=false)
Definition: eventq.hh:740
AddrRange range
const uint32_t writeHighThreshold
Definition: dram_ctrl.hh:908
EventWrapper< Rank,&Rank::processActivateEvent > activateEvent
Definition: dram_ctrl.hh:564
Enums::AddrMap addrMapping
Definition: dram_ctrl.hh:944
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
Definition: dram_ctrl.hh:980
void regStats() override
Register Statistics.
Definition: abstract_mem.cc:81
EventWrapper< DRAMCtrl,&DRAMCtrl::processRespondEvent > respondEvent
Definition: dram_ctrl.hh:691
void init() override
Initialise this memory.
Definition: abstract_mem.cc:66
void processPrechargeEvent()
Definition: dram_ctrl.cc:1736
int size()
Definition: pagetable.hh:146
virtual const std::string name() const
Definition: sim_object.hh:117
Stats::Vector wrQLenPdf
Definition: dram_ctrl.hh:1003
void powerDownSleep(PowerState pwr_state, Tick tick)
Schedule a transition to power-down (sleep)
Definition: dram_ctrl.cc:1998
std::deque< DRAMPacket * > writeQueue
Definition: dram_ctrl.hh:859
bool lowPowerEntryReady() const
Check if the current rank is idle and should enter a low-pwer state.
Definition: dram_ctrl.cc:1665
const Tick tRTW
Definition: dram_ctrl.hh:920
bool readQueueFull(unsigned int pktCount) const
Check if the read queue has room for more entries.
Definition: dram_ctrl.cc:296
void processActivateEvent()
Definition: dram_ctrl.cc:1726
void sendRetryReq()
Send a retry to the master port that previously attempted a sendTimingReq to this slave port and fail...
Definition: port.cc:265
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:497
const Tick tCCD_L
Definition: dram_ctrl.hh:923
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example...
Definition: dram_ctrl.cc:1290
BusState busStateNext
Definition: dram_ctrl.hh:154
bool isConnected() const
Definition: port.cc:110
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
Definition: dram_ctrl.hh:868
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:267
const uint32_t columnsPerRowBuffer
Definition: dram_ctrl.hh:898
void recvFunctional(PacketPtr pkt)
Definition: dram_ctrl.cc:2617
T divCeil(const T &a, const U &b)
Definition: intmath.hh:198
void schedule(Event &event, Tick when)
Definition: eventq.hh:728
uint32_t readsThisTime
Definition: dram_ctrl.hh:912
BusState busState
Definition: dram_ctrl.hh:151
void schedTimingResp(PacketPtr pkt, Tick when, bool force_order=false)
Schedule the sending of a timing response.
Definition: qport.hh:91
An abstract memory represents a contiguous block of physical memory, with an associated address range...
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:282
Simple structure to hold the values needed to keep track of commands for DRAMPower.
Definition: dram_ctrl.hh:160
Stats::Formula avgWrBW
Definition: dram_ctrl.hh:1020
Stats::Scalar bytesReadSys
Definition: dram_ctrl.hh:990
void processWakeUpEvent()
Definition: dram_ctrl.cc:2103
const uint32_t deviceSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition: dram_ctrl.hh:891
Stats::Scalar numRdRetry
Definition: dram_ctrl.hh:997
uint8_t activeRank
Definition: dram_ctrl.hh:1043
Stats::Formula busUtilRead
Definition: dram_ctrl.hh:1025
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:287
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:181
Tick readyTime
When will request leave the controller.
Definition: dram_ctrl.hh:629
std::deque< DRAMPacket * > readQueue
The controller's main read and write queues.
Definition: dram_ctrl.hh:858
const uint32_t columnsPerStripe
Definition: dram_ctrl.hh:899
bool isAvailable() const
Check if the current rank is available for scheduling.
Definition: dram_ctrl.hh:483
const uint8_t bank
Definition: dram_ctrl.hh:638
Data::MemCommand::cmds type
Definition: dram_ctrl.hh:161
unsigned getSize() const
Definition: packet.hh:649
fatal_if(p->js_features.size() > 16,"Too many job slot feature registers specified (%i)\n", p->js_features.size())
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: dram_ctrl.cc:2705
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
Definition: bitfield.hh:67
bool allRanksDrained() const
Return true once refresh is complete for all ranks and there are no additional commands enqueued...
Definition: dram_ctrl.cc:2668
void processRespondEvent()
Definition: dram_ctrl.cc:661
const uint32_t writeBufferSize
Definition: dram_ctrl.hh:907
const uint32_t burstSize
Definition: dram_ctrl.hh:896
const uint32_t banksPerRank
Definition: dram_ctrl.hh:903
const uint32_t readBufferSize
Definition: dram_ctrl.hh:906
DRAMPower is a standalone tool which calculates the power consumed by a DRAM in the system...
Definition: drampower.hh:55
Stats::Formula writeRowHitRate
Definition: dram_ctrl.hh:1036
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:57
Bitfield< 0 > p
const Tick tRRD_L
Definition: dram_ctrl.hh:933
virtual void drainResume() override
Resume execution after a successful drain.
Definition: dram_ctrl.cc:2681
Stats::Scalar bytesWrittenSys
Definition: dram_ctrl.hh:991
Stats::Vector perBankWrBursts
Definition: dram_ctrl.hh:996
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: dram_ctrl.cc:252
Tick busBusyUntil
Till when has the main data bus been spoken for already?
Definition: dram_ctrl.hh:970
void addToReadQueue(PacketPtr pkt, unsigned int pktCount)
When a new read comes in, first check if the write q has a pending request to the same address...
Definition: dram_ctrl.cc:421
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than DRAM burst size...
Definition: dram_ctrl.hh:660
bool recvTimingReq(PacketPtr)
Receive a timing request from the master port.
Definition: dram_ctrl.cc:2734
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1869
const bool bankGroupArch
Definition: dram_ctrl.hh:902
EventWrapper< Rank,&Rank::processPrechargeEvent > prechargeEvent
Definition: dram_ctrl.hh:568
const PacketPtr pkt
This comes from the outside world.
Definition: dram_ctrl.hh:632
void scheduleWakeUpEvent(Tick exit_delay)
schedule and event to wake-up from power-down or self-refresh and update bank timing parameters ...
Definition: dram_ctrl.cc:2052
Stats::Scalar writeRowHits
Definition: dram_ctrl.hh:1034
Rank class includes a vector of banks.
Definition: dram_ctrl.hh:289
bool inLowPowerState
rank is in or transitioning to power-down or self-refresh
Definition: dram_ctrl.hh:397
EventWrapper< Rank,&Rank::processWriteDoneEvent > writeDoneEvent
Definition: dram_ctrl.hh:560
bool isResponse() const
Definition: packet.hh:506
Addr getAddr() const
Definition: packet.hh:639
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
Definition: dram_ctrl.hh:129
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:155
void flushCmdList()
Push command out of cmdList queue that are scheduled at or before curTick() to DRAMPower library All ...
Definition: dram_ctrl.cc:1698
const Tick tBURST
Definition: dram_ctrl.hh:922
Stats::Scalar neitherReadNorWrite
Definition: dram_ctrl.hh:994

Generated on Fri Jun 9 2017 13:03:48 for gem5 by doxygen 1.8.6