51 #include "debug/DRAM.hh"
52 #include "debug/DRAMPower.hh"
53 #include "debug/DRAMState.hh"
54 #include "debug/Drain.hh"
62 port(
name() +
".port", *this), isTimingMode(false),
63 retryRdReq(false), retryWrReq(false),
66 nextReqEvent(this), respondEvent(this),
67 deviceSize(p->device_size),
68 deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
69 deviceRowBufferSize(p->device_rowbuffer_size),
70 devicesPerRank(p->devices_per_rank),
71 burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
72 rowBufferSize(devicesPerRank * deviceRowBufferSize),
73 columnsPerRowBuffer(rowBufferSize / burstSize),
74 columnsPerStripe(range.interleaved() ? range.
granularity() / burstSize : 1),
75 ranksPerChannel(p->ranks_per_channel),
76 bankGroupsPerRank(p->bank_groups_per_rank),
77 bankGroupArch(p->bank_groups_per_rank > 0),
78 banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
79 readBufferSize(p->read_buffer_size),
80 writeBufferSize(p->write_buffer_size),
81 writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
82 writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
83 minWritesPerSwitch(p->min_writes_per_switch),
84 writesThisTime(0), readsThisTime(0),
85 tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST),
86 tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
87 tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
88 tRRD_L(p->tRRD_L), tXAW(p->tXAW), tXP(p->tXP), tXS(p->tXS),
89 activationLimit(p->activation_limit),
90 memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
91 pageMgmt(p->page_policy),
92 maxAccessesPerRow(p->max_accesses_per_row),
93 frontendLatency(p->static_frontend_latency),
94 backendLatency(p->static_backend_latency),
95 busBusyUntil(0), prevArrival(0),
96 nextReqTime(0), activeRank(0), timeStampOffset(0)
108 ranks.push_back(rank);
136 if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
137 fatal(
"Write buffer low threshold %d must be smaller than the "
138 "high threshold %d\n", p->write_low_thresh_perc,
139 p->write_high_thresh_perc);
149 if (deviceCapacity != capacity / (1024 * 1024))
150 warn(
"DRAM device capacity (%d Mbytes) does not match the "
151 "address range assigned (%d Mbytes)\n", deviceCapacity,
152 capacity / (1024 * 1024));
154 DPRINTF(DRAM,
"Memory capacity %lld (%lld) bytes\n", capacity,
157 DPRINTF(DRAM,
"Row buffer size %d bytes with %d columns per row buffer\n",
164 fatal(
"tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
172 fatal(
"banks per rank (%d) must be equal to or larger than "
173 "banks groups per rank (%d)\n",
178 fatal(
"Banks per rank (%d) must be evenly divisible by bank groups "
179 "per rank (%d) for equal banks per bank group\n",
184 fatal(
"tCCD_L (%d) should be larger than tBURST (%d) when "
185 "bank groups per rank (%d) is greater than 1\n",
191 fatal(
"tRRD_L (%d) should be larger than tRRD (%d) when "
192 "bank groups per rank (%d) is greater than 1\n",
205 fatal(
"DRAMCtrl %s is unconnected!\n",
name());
214 fatal(
"%s has %d interleaved address stripes but %d channel(s)\n",
219 fatal(
"Channel interleaving of %s doesn't match RoRaBaChCo "
220 "address map\n",
name());
236 fatal(
"Channel interleaving of %s must be at least as large "
237 "as the cache line size\n",
name());
242 fatal(
"Channel interleaving of %s must be at most as large "
243 "as the row-buffer size\n",
name());
298 DPRINTF(DRAM,
"Read queue limit %d, current size %d, entries needed %d\n",
309 DPRINTF(DRAM,
"Write queue limit %d, current size %d, entries needed %d\n",
402 panic(
"Unknown address mapping policy chosen!");
409 DPRINTF(DRAM,
"Address: %lld Rank %d Bank %d Row %d\n",
410 dramPktAddr, rank, bank, row);
416 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
417 size,
ranks[rank]->banks[bank], *
ranks[rank]);
427 assert(pktCount != 0);
436 unsigned pktsServicedByWrQ = 0;
438 for (
int cnt = 0; cnt < pktCount; ++cnt) {
446 bool foundInWrQ =
false;
454 if (
p->addr <= addr && (addr + size) <= (
p->addr +
p->size)) {
458 DPRINTF(DRAM,
"Read to addr %lld with size %d serviced by "
459 "write queue\n", addr, size);
471 if (pktCount > 1 && burst_helper == NULL) {
472 DPRINTF(DRAM,
"Read to addr %lld translates to %d "
473 "dram requests\n", pkt->
getAddr(), pktCount);
483 DPRINTF(DRAM,
"Adding to read queue\n");
499 if (pktsServicedByWrQ == pktCount) {
505 if (burst_helper != NULL)
511 DPRINTF(DRAM,
"Request scheduled immediately\n");
526 for (
int cnt = 0; cnt < pktCount; ++cnt) {
545 DPRINTF(DRAM,
"Adding to write queue\n");
557 DPRINTF(DRAM,
"Merging write burst with existing queue entry\n");
578 DPRINTF(DRAM,
"Request scheduled immediately\n");
585 DPRINTF(DRAM,
"===READ QUEUE===\n\n");
587 DPRINTF(DRAM,
"Read %lu\n", (*i)->addr);
589 DPRINTF(DRAM,
"\n===RESP QUEUE===\n\n");
591 DPRINTF(DRAM,
"Response %lu\n", (*i)->addr);
593 DPRINTF(DRAM,
"\n===WRITE QUEUE===\n\n");
595 DPRINTF(DRAM,
"Write %lu\n", (*i)->addr);
603 DPRINTF(DRAM,
"recvTimingReq: request %s addr %lld size %d\n",
610 "Should only see read and writes at memory controller\n");
631 DPRINTF(DRAM,
"Read queue full, not accepting\n");
645 DPRINTF(DRAM,
"Write queue full, not accepting\n");
664 "processRespondEvent(): Some req has reached its readyTime\n");
672 DPRINTF(DRAM,
"number of read entries for rank %d is %d\n",
698 DPRINTF(DRAMState,
"Rank %d sleep at tick %d; current power state is "
741 DPRINTF(Drain,
"DRAM controller done draining\n");
761 assert(!queue.empty());
764 bool found_packet =
false;
765 if (queue.size() == 1) {
768 if (
ranks[dram_pkt->
rank]->isAvailable()) {
770 DPRINTF(DRAM,
"Single request, going to a free rank\n");
772 DPRINTF(DRAM,
"Single request, going to a busy rank\n");
779 for (
auto i = queue.begin();
i != queue.end() ; ++
i) {
781 if (
ranks[dram_pkt->
rank]->isAvailable()) {
783 queue.push_front(dram_pkt);
791 panic(
"No scheduling policy chosen\n");
799 uint64_t earliest_banks = 0;
800 bool hidden_bank_prep =
false;
807 bool found_hidden_bank =
false;
811 bool found_prepped_pkt =
false;
815 bool found_earliest_pkt =
false;
817 auto selected_pkt_it = queue.end();
823 for (
auto i = queue.begin();
i != queue.end() ; ++
i) {
839 DPRINTF(DRAM,
"Seamless row buffer hit\n");
843 }
else if (!found_hidden_bank && !found_prepped_pkt) {
849 found_prepped_pkt =
true;
850 DPRINTF(DRAM,
"Prepped row buffer hit\n");
852 }
else if (!found_earliest_pkt) {
855 if (earliest_banks == 0) {
859 earliest_banks = bankStatus.first;
860 hidden_bank_prep = bankStatus.second;
867 found_earliest_pkt =
true;
868 found_hidden_bank = hidden_bank_prep;
874 if (hidden_bank_prep || !found_prepped_pkt)
881 if (selected_pkt_it != queue.end()) {
883 queue.erase(selected_pkt_it);
884 queue.push_front(selected_pkt);
930 Tick act_tick, uint32_t row)
934 DPRINTF(DRAM,
"Activate at tick %d\n", act_tick);
949 DPRINTF(DRAM,
"Activate bank %d, rank %d at tick %lld, now got %d active\n",
950 bank_ref.
bank, rank_ref.
rank, act_tick,
973 rank_ref.
banks[
i].actAllowedAt = std::max(act_tick +
tRRD_L,
974 rank_ref.
banks[
i].actAllowedAt);
979 rank_ref.
banks[
i].actAllowedAt = std::max(act_tick +
tRRD,
980 rank_ref.
banks[
i].actAllowedAt);
990 panic(
"Got %d activates in window %d (%llu - %llu) which "
1001 rank_ref.
actTicks.push_front(act_tick);
1008 DPRINTF(DRAM,
"Enforcing tXAW with X = %d, next activate "
1013 rank_ref.
banks[
j].actAllowedAt =
1015 rank_ref.
banks[
j].actAllowedAt);
1043 Tick pre_done_at = pre_at +
tRP;
1050 DPRINTF(DRAM,
"Precharging bank %d, rank %d at tick %lld, now got "
1051 "%d active\n", bank.
bank, rank_ref.
rank, pre_at,
1079 DPRINTF(DRAM,
"Timing access to addr %lld, rank/bank/row %d %d %d\n",
1097 bool row_hit =
true;
1143 if (dram_pkt->
rank ==
j) {
1162 cmd_dly = tBURST +
tCS;
1164 ranks[
j]->banks[
i].colAllowedAt = std::max(cmd_at + cmd_dly,
1165 ranks[
j]->banks[
i].colAllowedAt);
1184 bool auto_precharge =
pageMgmt == Enums::close ||
1189 if (!auto_precharge &&
1190 (
pageMgmt == Enums::open_adaptive ||
1191 pageMgmt == Enums::close_adaptive)) {
1200 bool got_more_hits =
false;
1201 bool got_bank_conflict =
false;
1206 auto p = queue.begin();
1216 while (!got_more_hits &&
p != queue.end()) {
1217 bool same_rank_bank = (dram_pkt->
rank == (*p)->rank) &&
1218 (dram_pkt->
bank == (*p)->bank);
1219 bool same_row = dram_pkt->
row == (*p)->row;
1220 got_more_hits |= same_rank_bank && same_row;
1221 got_bank_conflict |= same_rank_bank && !same_row;
1229 auto_precharge = !got_more_hits &&
1230 (got_bank_conflict ||
pageMgmt == Enums::close_adaptive);
1234 std::string mem_cmd = dram_pkt->
isRead ?
"RD" :
"WR";
1237 MemCommand::cmds command = (mem_cmd ==
"RD") ? MemCommand::RD :
1243 DPRINTF(DRAM,
"Access to %lld, ready at %lld bus busy until %lld.\n",
1254 if (auto_precharge) {
1259 DPRINTF(DRAM,
"Auto-precharged bank: %d\n", dram_pkt->
bankId);
1294 if (!
r->isAvailable()) {
1297 DPRINTF(DRAMState,
"Rank %d is not available\n",
r->rank);
1302 r->checkDrainDone();
1307 if ((
r->pwrState ==
PWR_SREF) &&
r->inLowPowerState) {
1308 DPRINTF(DRAMState,
"Rank %d is in self-refresh\n",
r->rank);
1312 if (
r->forceSelfRefreshExit()) {
1313 DPRINTF(DRAMState,
"rank %d was in self refresh and"
1314 " should wake up\n",
r->rank);
1316 r->scheduleWakeUpEvent(
tXS);
1334 bool switched_cmd_type =
false;
1337 DPRINTF(DRAM,
"Switching to writes after %d reads with %d reads "
1346 switched_cmd_type =
true;
1348 DPRINTF(DRAM,
"Switching to reads after %d writes with %d writes "
1354 switched_cmd_type =
true;
1364 bool switch_to_writes =
false;
1374 switch_to_writes =
true;
1383 DPRINTF(Drain,
"DRAM controller done draining\n");
1393 bool found_read =
false;
1400 switched_cmd_type ?
tCS : 0);
1448 switch_to_writes =
true;
1455 if (switch_to_writes) {
1461 bool found_write =
false;
1466 switched_cmd_type ? std::min(
tRTW,
tCS) : 0);
1507 dram_pkt-> readyTime) {
1548 Tick min_col_at)
const
1550 uint64_t bank_mask = 0;
1558 bool found_seamless_bank =
false;
1562 bool hidden_bank_prep =
false;
1567 for (
const auto&
p : queue) {
1568 if (
p->rankRef.isAvailable())
1569 got_waiting[
p->bankId] =
true;
1576 uint16_t bank_id =
i * banksPerRank +
j;
1580 if (got_waiting[bank_id]) {
1582 assert(
ranks[
i]->isAvailable());
1591 Tick col_at = std::max(
ranks[
i]->banks[j].colAllowedAt,
1596 bool new_seamless_bank = col_at <= min_col_at;
1601 if (new_seamless_bank ||
1602 (!found_seamless_bank && act_at <= min_act_at)) {
1608 if (!found_seamless_bank &&
1609 (new_seamless_bank || act_at < min_act_at)) {
1613 found_seamless_bank |= new_seamless_bank;
1616 hidden_bank_prep = act_at <= hidden_act_max;
1620 min_act_at = act_at;
1626 return make_pair(bank_mask, hidden_bank_prep);
1631 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
1632 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
1633 refreshState(REF_IDLE), inLowPowerState(false), rank(0),
1634 readEntries(0), writeEntries(0), outstandingEvents(0),
1635 wakeUpAllowedAt(0),
power(_p, false), numBanksActive(0),
1636 writeDoneEvent(*this), activateEvent(*this), prechargeEvent(*this),
1637 refreshEvent(*this), powerEvent(*this), wakeUpEvent(*this)
1667 bool no_queued_cmds = ((memory.busStateNext ==
READ) && (readEntries == 0))
1668 || ((memory.busStateNext ==
WRITE) &&
1669 (writeEntries == 0));
1671 if (refreshState ==
REF_RUN) {
1675 return no_queued_cmds;
1678 return (no_queued_cmds && (outstandingEvents == 0));
1688 DPRINTF(DRAM,
"Refresh drain done, now precharging\n");
1704 auto next_iter = cmdList.begin();
1706 for ( ; next_iter != cmdList.end() ; ++next_iter) {
1712 memory.timeStampOffset);
1722 cmdList.assign(next_iter, cmdList.end());
1740 assert(outstandingEvents > 0);
1742 --outstandingEvents;
1746 if (numBanksActive == 0) {
1749 if (lowPowerEntryReady()) {
1754 DPRINTF(DRAMState,
"Rank %d sleep at tick %d\n",
1770 assert(outstandingEvents > 0);
1773 --outstandingEvents;
1789 ++outstandingEvents;
1791 DPRINTF(DRAM,
"Refresh due\n");
1800 if ((rank == memory.activeRank)
1801 && (memory.nextReqEvent.scheduled())) {
1804 DPRINTF(DRAM,
"Refresh awaiting draining\n");
1816 if (inLowPowerState) {
1817 DPRINTF(DRAM,
"Wake Up for refresh\n");
1819 scheduleWakeUpEvent(memory.tXP);
1827 if (refreshState ==
REF_PRE) {
1829 if (numBanksActive != 0) {
1832 DPRINTF(DRAM,
"Precharging all\n");
1837 for (
auto &
b : banks) {
1841 pre_at = std::max(
b.preAllowedAt, pre_at);
1846 Tick act_allowed_at = pre_at + memory.tRP;
1848 for (
auto &
b : banks) {
1850 memory.prechargeBank(*
this,
b, pre_at,
false);
1852 b.actAllowedAt = std::max(
b.actAllowedAt, act_allowed_at);
1853 b.preAllowedAt = std::max(
b.preAllowedAt, pre_at);
1858 cmdList.push_back(
Command(MemCommand::PREA, 0, pre_at));
1862 memory.timeStampOffset, rank);
1866 DPRINTF(DRAM,
"All banks already precharged, starting refresh\n");
1875 assert(prechargeEvent.scheduled());
1879 assert(numBanksActive == 0);
1891 assert(numBanksActive == 0);
1896 for (
auto &
b : banks) {
1897 b.actAllowedAt = ref_done_at;
1907 memory.timeStampOffset, rank);
1910 refreshDueAt += memory.tREFI;
1914 if (refreshDueAt < ref_done_at) {
1915 fatal(
"Refresh was delayed so long we cannot catch up\n");
1921 schedule(refreshEvent, ref_done_at);
1925 if (refreshState ==
REF_RUN) {
1927 assert(numBanksActive == 0);
1930 assert(!powerEvent.scheduled());
1940 if (pwrStatePostRefresh !=
PWR_IDLE) {
1943 DPRINTF(DRAMState,
"Rank %d sleeping after refresh and was in "
1944 "power state %d before refreshing\n", rank,
1945 pwrStatePostRefresh);
1950 }
else if (lowPowerEntryReady()) {
1951 DPRINTF(DRAMState,
"Rank %d sleeping after refresh but was NOT"
1952 " in a low power state before refreshing\n", rank);
1968 schedule(refreshEvent, refreshDueAt - memory.tRP);
1970 DPRINTF(DRAMState,
"Refresh done at %llu and next refresh"
1971 " at %llu\n",
curTick(), refreshDueAt);
1982 if (!powerEvent.scheduled()) {
1983 DPRINTF(DRAMState,
"Scheduling power event at %llu to state %d\n",
1987 pwrStateTrans = pwr_state;
1991 panic(
"Scheduled power event at %llu to state %d, "
1992 "with scheduled event at %llu to %d\n", tick, pwr_state,
1993 powerEvent.when(), pwrStateTrans);
2004 schedulePowerEvent(pwr_state, tick);
2006 cmdList.push_back(
Command(MemCommand::PDN_F_ACT, 0, tick));
2008 memory.tCK) - memory.timeStampOffset, rank);
2013 schedulePowerEvent(pwr_state, tick);
2015 cmdList.push_back(
Command(MemCommand::PDN_F_PRE, 0, tick));
2017 memory.tCK) - memory.timeStampOffset, rank);
2018 }
else if (pwr_state ==
PWR_REF) {
2022 if (pwrStatePostRefresh ==
PWR_ACT_PDN || !lowPowerEntryReady()) {
2027 cmdList.push_back(
Command(MemCommand::PDN_F_PRE, 0, tick));
2029 memory.tCK) - memory.timeStampOffset, rank);
2035 schedulePowerEvent(
PWR_SREF, tick);
2037 cmdList.push_back(
Command(MemCommand::SREN, 0, tick));
2039 memory.tCK) - memory.timeStampOffset, rank);
2045 wakeUpAllowedAt = tick + memory.tCK;
2048 inLowPowerState =
true;
2054 Tick wake_up_tick = std::max(
curTick(), wakeUpAllowedAt);
2056 DPRINTF(DRAMState,
"Scheduling wake-up for rank %d at tick %d\n",
2057 rank, wake_up_tick);
2070 schedule(wakeUpEvent, wake_up_tick);
2072 for (
auto &
b : banks) {
2076 b.colAllowedAt = std::max(wake_up_tick + exit_delay,
b.colAllowedAt);
2077 b.preAllowedAt = std::max(wake_up_tick + exit_delay,
b.preAllowedAt);
2078 b.actAllowedAt = std::max(wake_up_tick + exit_delay,
b.actAllowedAt);
2081 inLowPowerState =
false;
2087 cmdList.push_back(
Command(MemCommand::PUP_ACT, 0, wake_up_tick));
2089 memory.tCK) - memory.timeStampOffset, rank);
2092 cmdList.push_back(
Command(MemCommand::PUP_PRE, 0, wake_up_tick));
2094 memory.tCK) - memory.timeStampOffset, rank);
2095 }
else if (pwrStateTrans ==
PWR_SREF) {
2096 cmdList.push_back(
Command(MemCommand::SREX, 0, wake_up_tick));
2098 memory.tCK) - memory.timeStampOffset, rank);
2123 assert(
curTick() >= pwrStateTick);
2129 pwrStateTime[prev_state] += duration;
2134 totalIdleTime += duration;
2144 assert(outstandingEvents == 1);
2146 --outstandingEvents;
2148 DPRINTF(DRAMState,
"Was refreshing for %llu ticks\n", duration);
2152 DPRINTF(DRAMState,
"Switching to power down state after refreshing"
2153 " rank %d at %llu tick\n", rank,
curTick());
2162 if (!memory.nextReqEvent.scheduled()) {
2163 DPRINTF(DRAM,
"Scheduling next request after refreshing rank %d\n",
2177 DPRINTF(DRAMState,
"All banks precharged\n");
2193 if (!activateEvent.scheduled()) {
2195 assert(!powerEvent.scheduled());
2201 assert(prechargeEvent.scheduled());
2213 DPRINTF(DRAMState,
"Refreshing\n");
2244 power.powerlib.updateCounters(
false);
2247 power.powerlib.calcEnergy();
2250 Data::MemoryPowerModel::Energy energy =
2251 power.powerlib.getEnergy();
2252 Data::MemoryPowerModel::Power rank_power =
2253 power.powerlib.getPower();
2255 actEnergy = energy.act_energy * memory.devicesPerRank;
2256 preEnergy = energy.pre_energy * memory.devicesPerRank;
2257 readEnergy = energy.read_energy * memory.devicesPerRank;
2258 writeEnergy = energy.write_energy * memory.devicesPerRank;
2259 refreshEnergy = energy.ref_energy * memory.devicesPerRank;
2260 actBackEnergy = energy.act_stdby_energy * memory.devicesPerRank;
2261 preBackEnergy = energy.pre_stdby_energy * memory.devicesPerRank;
2262 actPowerDownEnergy = energy.f_act_pd_energy * memory.devicesPerRank;
2263 prePowerDownEnergy = energy.f_pre_pd_energy * memory.devicesPerRank;
2264 selfRefreshEnergy = energy.sref_energy * memory.devicesPerRank;
2265 totalEnergy = energy.total_energy * memory.devicesPerRank;
2266 averagePower = rank_power.average_power * memory.devicesPerRank;
2272 DPRINTF(DRAM,
"Computing final stats\n");
2290 using namespace Stats;
2294 .name(
name() +
".memoryStateTime")
2295 .desc(
"Time in different power states");
2296 pwrStateTime.subname(0,
"IDLE");
2297 pwrStateTime.subname(1,
"REF");
2298 pwrStateTime.subname(2,
"SREF");
2299 pwrStateTime.subname(3,
"PRE_PDN");
2300 pwrStateTime.subname(4,
"ACT");
2301 pwrStateTime.subname(5,
"ACT_PDN");
2304 .name(
name() +
".actEnergy")
2305 .desc(
"Energy for activate commands per rank (pJ)");
2308 .name(
name() +
".preEnergy")
2309 .desc(
"Energy for precharge commands per rank (pJ)");
2312 .name(
name() +
".readEnergy")
2313 .desc(
"Energy for read commands per rank (pJ)");
2316 .name(
name() +
".writeEnergy")
2317 .desc(
"Energy for write commands per rank (pJ)");
2320 .name(
name() +
".refreshEnergy")
2321 .desc(
"Energy for refresh commands per rank (pJ)");
2324 .name(
name() +
".actBackEnergy")
2325 .desc(
"Energy for active background per rank (pJ)");
2328 .name(
name() +
".preBackEnergy")
2329 .desc(
"Energy for precharge background per rank (pJ)");
2332 .name(
name() +
".actPowerDownEnergy")
2333 .desc(
"Energy for active power-down per rank (pJ)");
2336 .name(
name() +
".prePowerDownEnergy")
2337 .desc(
"Energy for precharge power-down per rank (pJ)");
2340 .name(
name() +
".selfRefreshEnergy")
2341 .desc(
"Energy for self refresh per rank (pJ)");
2344 .name(
name() +
".totalEnergy")
2345 .desc(
"Total energy per rank (pJ)");
2348 .name(
name() +
".averagePower")
2349 .desc(
"Core power per rank (mW)");
2352 .name(
name() +
".totalIdleTime")
2353 .desc(
"Total Idle time Per DRAM Rank");
2360 using namespace Stats;
2370 .
desc(
"Number of read requests accepted");
2374 .
desc(
"Number of write requests accepted");
2378 .
desc(
"Number of DRAM read bursts, "
2379 "including those serviced by the write queue");
2383 .
desc(
"Number of DRAM write bursts, "
2384 "including those merged in the write queue");
2388 .
desc(
"Number of DRAM read bursts serviced by the write queue");
2392 .
desc(
"Number of DRAM write bursts merged with an existing one");
2395 .
name(
name() +
".neitherReadNorWriteReqs")
2396 .
desc(
"Number of requests that are neither read nor write");
2401 .
desc(
"Per bank write bursts");
2406 .
desc(
"Per bank write bursts");
2410 .
desc(
"Average read queue length when enqueuing")
2415 .
desc(
"Average write queue length when enqueuing")
2420 .
desc(
"Total ticks spent queuing");
2424 .
desc(
"Total ticks spent in databus transfers");
2428 .
desc(
"Total ticks spent from burst creation until serviced "
2433 .
desc(
"Average queueing delay per DRAM burst")
2440 .
desc(
"Average bus latency per DRAM burst")
2447 .
desc(
"Average memory access latency per DRAM burst")
2454 .
desc(
"Number of times read queue was full causing retry");
2458 .
desc(
"Number of times write queue was full causing retry");
2462 .
desc(
"Number of row buffer hits during reads");
2466 .
desc(
"Number of row buffer hits during writes");
2470 .
desc(
"Row buffer hit rate for reads")
2477 .
desc(
"Row buffer hit rate for writes")
2485 .
desc(
"Read request sizes (log2)");
2490 .
desc(
"Write request sizes (log2)");
2495 .
desc(
"What read queue length does an incoming req see");
2500 .
desc(
"What write queue length does an incoming req see");
2504 .
name(
name() +
".bytesPerActivate")
2505 .
desc(
"Bytes accessed per row activation")
2511 .
desc(
"Reads before turning the bus around for writes")
2517 .
desc(
"Writes before turning the bus around for reads")
2522 .
desc(
"Total number of bytes read from DRAM");
2526 .
desc(
"Total number of bytes read from write queue");
2530 .
desc(
"Total number of bytes written to DRAM");
2534 .
desc(
"Total read bytes from the system interface side");
2538 .
desc(
"Total written bytes from the system interface side");
2542 .
desc(
"Average DRAM read bandwidth in MiByte/s")
2549 .
desc(
"Average achieved write bandwidth in MiByte/s")
2556 .
desc(
"Average system read bandwidth in MiByte/s")
2563 .
desc(
"Average system write bandwidth in MiByte/s")
2570 .
desc(
"Theoretical peak bandwidth in MiByte/s")
2577 .
desc(
"Data bus utilization in percentage")
2583 .
desc(
"Total gap between requests");
2587 .
desc(
"Average gap between requests")
2595 .
desc(
"Data bus utilization in percentage for reads")
2602 .
desc(
"Data bus utilization in percentage for writes")
2609 .
desc(
"Row buffer hit rate, read and write combined")
2626 if (if_name !=
"port") {
2641 DPRINTF(Drain,
"DRAM controller not drained, write: %d, read: %d,"
2655 DPRINTF(DRAM,
"Rank%d: Forcing self-refresh wakeup in drain\n",
2657 r->scheduleWakeUpEvent(
tXS);
2671 bool all_ranks_drained =
true;
2675 all_ranks_drained =
r->inPwrIdleState() && all_ranks_drained;
2677 return all_ranks_drained;
2708 ranges.push_back(memory.getAddrRange());
2717 if (!queue.checkFunctional(pkt)) {
2721 memory.recvFunctional(pkt);
2730 return memory.recvAtomic(pkt);
2737 return memory.recvTimingReq(pkt);
2741 DRAMCtrlParams::create()
Stats::Scalar bytesWritten
Enums::PageManage pageMgmt
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
const uint32_t writeLowThreshold
Stats::Scalar totMemAccLat
const uint32_t activationLimit
uint64_t granularity() const
Determing the interleaving granularity of the range.
PowerState
The power state captures the different operational states of the DRAM and interacts with the bus read...
const Tick entryTime
When did request enter the controller.
void prechargeBank(Rank &rank_ref, Bank &bank_ref, Tick pre_at, bool trace=true)
Precharge a given bank and also update when the precharge is done.
void printQs() const
Used for debugging to observe the contents of the queues.
Stats::Scalar readRowHits
bool retryRdReq
Remember if we have to retry a request when available.
const std::string & name()
bool interleaved() const
Determine if the range is interleaved or not.
std::vector< Command > cmdList
List of comamnds issued, to be sent to DRAMPpower at refresh and stats dump.
std::vector< Rank * > ranks
Vector of ranks.
DrainState
Object drain/handover states.
void doDRAMAccess(DRAMPacket *dram_pkt)
Actually do the DRAM access - figure out the latency it will take to service the req based on bank st...
bool writeQueueFull(unsigned int pktCount) const
Check if the write queue has room for more entries.
A DRAM packet stores packets along with the timestamp of when the packet entered the queue...
uint32_t writeEntries
Track number of packets in write queue going to this rank.
Stats::Histogram bytesPerActivate
Stats::Scalar bytesReadWrQ
uint32_t readEntries
Track number of packets in read queue going to this rank.
Stats::Histogram rdPerTurnAround
MemoryPort(const std::string &name, DRAMCtrl &_memory)
const Tick frontendLatency
Pipeline latency of the controller frontend.
Stats::Formula pageHitRate
panic_if(!root,"Invalid expression\n")
bool scheduled() const
Determine if the current event is scheduled.
bool recvTimingReq(PacketPtr pkt)
Bitfield< 6 > granularity
DRAMCtrl(const DRAMCtrlParams *p)
DrainState drain() override
Notify an object that it needs to drain its state.
std::deque< DRAMPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
unsigned int burstsServiced
Number of DRAM bursts serviced so far for a system packet.
RefreshState refreshState
current refresh state
Addr addr
The starting address of the DRAM packet.
Histogram & init(size_type size)
Set the parameters of this histogram.
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Stats::Formula busUtilWrite
virtual BaseSlavePort & getSlavePort(const std::string &if_name, PortID idx=InvalidPortID)
Get a slave port with a given name and index.
The DRAM controller is a single-channel memory controller capturing the most important timing constra...
uint8_t rank
Current Rank index.
Stats::Scalar mergedWrBursts
Stats::Scalar writeBursts
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
const uint32_t ranksPerChannel
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
A BaseSlavePort is a protocol-agnostic slave port, responsible only for the structural connection to ...
Stats::Formula simSeconds
Stats::Vector perBankRdBursts
A burst helper helps organize and manage a packet that is larger than the DRAM burst size...
Derived & init(size_type size)
Set this vector to have the given size.
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks, then bankId = 0 –> rank0, bank0 and bankId = 8 –> rank1, bank0.
void deschedule(Event &event)
void computeStats()
Computes stats just prior to dump event.
virtual void init() override
Initialise this memory.
uint8_t outstandingEvents
Number of ACT, RD, and WR events currently scheduled Incremented when a refresh event is started as w...
void startup(Tick ref_tick)
Kick off accounting for power and refresh states and schedule initial refresh.
virtual BaseSlavePort & getSlavePort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a slave port with a given name and index.
unsigned int numBanksActive
To track number of banks which are currently active for this rank.
Stats::Vector writePktSize
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the master port.
bool isTimingMode
Remeber if the memory system is in timing mode.
void processWriteDoneEvent()
A basic class to track the bank state, i.e.
Stats::Histogram wrPerTurnAround
const uint32_t maxAccessesPerRow
Max column accesses (read and write) per row, before forefully closing it.
const uint32_t bankGroupsPerRank
Tick curTick()
The current simulated tick.
Addr burstAlign(Addr addr) const
Burst-align an address.
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the master port.
EventWrapper< DRAMCtrl,&DRAMCtrl::processNextReqEvent > nextReqEvent
Stats::Formula avgRdBWSys
Enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Enums::PwrState pwrState() const
const Tick backendLatency
Pipeline latency of the backend and PHY.
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Rank(DRAMCtrl &_memory, const DRAMCtrlParams *_p)
void updatePowerStats(Rank &rank_ref)
This function increments the energy when called.
BurstHelper * burstHelper
A pointer to the BurstHelper if this DRAMPacket is a split packet If not a split packet (common case)...
std::vector< Bank > banks
Vector of Banks.
uint64_t Tick
Tick count type.
uint64_t power(uint32_t n, uint32_t e)
System * system() const
read the system pointer Implemented for completeness with the setter
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
PowerState pwrState
Current power state.
void checkDrainDone()
Let the rank check if it was waiting for requests to drain to allow it to transition states...
void regStats() override
Register Statistics.
void replaceBits(T &val, int first, int last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
std::deque< Tick > actTicks
List to keep track of activate ticks.
bool chooseNext(std::deque< DRAMPacket * > &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
uint32_t stripes() const
Determine the number of interleaved address stripes this range is part of.
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Stats::Vector readPktSize
Tick recvAtomic(PacketPtr pkt)
bool isPowerOf2(const T &n)
bool needsResponse() const
Stats::Formula avgMemAccLat
Stats::Scalar bytesReadDRAM
static bool sortTime(const Command &cmd, const Command &cmd_next)
Function for sorting Command structures based on timeStamp.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
std::pair< uint64_t, bool > minBankPrep(const std::deque< DRAMPacket * > &queue, Tick min_col_at) const
Find which are the earliest banks ready to issue an activate for the enqueued requests.
bool cacheResponding() const
Stats::Formula readRowHitRate
Derived & precision(int _precision)
Set the precision and marks this stat to print at the end of simulation.
Draining buffers pending serialization/handover.
#define ULL(N)
uint64_t constant
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
bool reorderQueue(std::deque< DRAMPacket * > &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
const uint8_t rank
Will be populated by address decoder.
Stats::Formula avgWrBWSys
void activateBank(Rank &rank_ref, Bank &bank_ref, Tick act_tick, uint32_t row)
Keep track of when row activations happen, in order to enforce the maximum number of activations in t...
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
const Tick M5_CLASS_VAR_USED tCK
Basic memory timing parameters initialized based on parameter values.
uint64_t size() const
Get the memory size.
Stats::Scalar servicedByWrQ
void processRefreshEvent()
DRAMPacket * decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned int size, bool isRead)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
const unsigned int burstCount
Number of DRAM bursts requred for a system packet.
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
void addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
Decode the incoming pkt, create a dram_pkt and push to the back of the write queue.
static const uint32_t NO_ROW
void registerDumpCallback(Callback *cb)
Register a callback that should be called whenever statistics are about to be dumped.
const uint32_t rowBufferSize
void sendRangeChange() const
Called by the owner to send a range change.
const uint32_t devicesPerRank
const uint32_t minWritesPerSwitch
void suspend()
Stop the refresh events.
void schedulePowerEvent(PowerState pwr_state, Tick tick)
Schedule a power state transition in the future, and potentially override an already scheduled transi...
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
void updatePowerStats()
Function to update Power Stats.
void reschedule(Event &event, Tick when, bool always=false)
const uint32_t writeHighThreshold
EventWrapper< Rank,&Rank::processActivateEvent > activateEvent
Enums::AddrMap addrMapping
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
void regStats() override
Register Statistics.
EventWrapper< DRAMCtrl,&DRAMCtrl::processRespondEvent > respondEvent
void init() override
Initialise this memory.
void processPrechargeEvent()
virtual const std::string name() const
void powerDownSleep(PowerState pwr_state, Tick tick)
Schedule a transition to power-down (sleep)
std::deque< DRAMPacket * > writeQueue
bool lowPowerEntryReady() const
Check if the current rank is idle and should enter a low-pwer state.
bool readQueueFull(unsigned int pktCount) const
Check if the read queue has room for more entries.
void processActivateEvent()
void sendRetryReq()
Send a retry to the master port that previously attempted a sendTimingReq to this slave port and fail...
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example...
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
void signalDrainDone() const
Signal that an object is drained.
const uint32_t columnsPerRowBuffer
void recvFunctional(PacketPtr pkt)
T divCeil(const T &a, const U &b)
void schedule(Event &event, Tick when)
void schedTimingResp(PacketPtr pkt, Tick when, bool force_order=false)
Schedule the sending of a timing response.
An abstract memory represents a contiguous block of physical memory, with an associated address range...
DrainState drainState() const
Return the current drain state of an object.
Simple structure to hold the values needed to keep track of commands for DRAMPower.
Stats::Scalar bytesReadSys
void processWakeUpEvent()
const uint32_t deviceSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Stats::Formula busUtilRead
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Tick readyTime
When will request leave the controller.
std::deque< DRAMPacket * > readQueue
The controller's main read and write queues.
const uint32_t columnsPerStripe
bool isAvailable() const
Check if the current rank is available for scheduling.
Data::MemCommand::cmds type
fatal_if(p->js_features.size() > 16,"Too many job slot feature registers specified (%i)\n", p->js_features.size())
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
bool allRanksDrained() const
Return true once refresh is complete for all ranks and there are no additional commands enqueued...
void processRespondEvent()
const uint32_t writeBufferSize
const uint32_t banksPerRank
const uint32_t readBufferSize
DRAMPower is a standalone tool which calculates the power consumed by a DRAM in the system...
Stats::Formula writeRowHitRate
const FlagsType nozero
Don't print if this is zero.
virtual void drainResume() override
Resume execution after a successful drain.
Stats::Scalar bytesWrittenSys
Stats::Vector perBankWrBursts
virtual void startup() override
startup() is the final initialization call before simulation.
Tick busBusyUntil
Till when has the main data bus been spoken for already?
void addToReadQueue(PacketPtr pkt, unsigned int pktCount)
When a new read comes in, first check if the write q has a pending request to the same address...
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than DRAM burst size...
bool recvTimingReq(PacketPtr)
Receive a timing request from the master port.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
EventWrapper< Rank,&Rank::processPrechargeEvent > prechargeEvent
const PacketPtr pkt
This comes from the outside world.
void scheduleWakeUpEvent(Tick exit_delay)
schedule and event to wake-up from power-down or self-refresh and update bank timing parameters ...
Stats::Scalar writeRowHits
Rank class includes a vector of banks.
bool inLowPowerState
rank is in or transitioning to power-down or self-refresh
EventWrapper< Rank,&Rank::processWriteDoneEvent > writeDoneEvent
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
bool isTimingMode() const
Is the system in timing mode?
void flushCmdList()
Push command out of cmdList queue that are scheduled at or before curTick() to DRAMPower library All ...
Stats::Scalar neitherReadNorWrite