gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MessageBuffer.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
30 
31 #include <cassert>
32 
33 #include "base/cprintf.hh"
34 #include "base/misc.hh"
35 #include "base/random.hh"
36 #include "base/stl_helpers.hh"
37 #include "debug/RubyQueue.hh"
39 
40 using namespace std;
41 using m5::stl_helpers::operator<<;
42 
44  : SimObject(p), m_stall_map_size(0),
45  m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
46  m_time_last_time_enqueue(0), m_time_last_time_pop(0),
47  m_last_arrival_time(0), m_strict_fifo(p->ordered),
48  m_randomization(p->randomization)
49 {
50  m_msg_counter = 0;
51  m_consumer = NULL;
55  m_priority_rank = 0;
56 
57  m_stall_msg_map.clear();
58  m_input_link_id = 0;
59  m_vnet_id = 0;
60 
61  m_buf_msgs = 0;
62  m_stall_time = 0;
63 
64  m_dequeue_callback = nullptr;
65 }
66 
67 unsigned int
69 {
70  if (m_time_last_time_size_checked != curTime) {
73  }
74 
76 }
77 
78 bool
79 MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
80 {
81 
82  // fast path when message buffers have infinite size
83  if (m_max_size == 0) {
84  return true;
85  }
86 
87  // determine the correct size for the current cycle
88  // pop operations shouldn't effect the network's visible size
89  // until schd cycle, but enqueue operations effect the visible
90  // size immediately
91  unsigned int current_size = 0;
92 
93  if (m_time_last_time_pop < current_time) {
94  // no pops this cycle - heap size is correct
95  current_size = m_prio_heap.size();
96  } else {
97  if (m_time_last_time_enqueue < current_time) {
98  // no enqueues this cycle - m_size_at_cycle_start is correct
99  current_size = m_size_at_cycle_start;
100  } else {
101  // both pops and enqueues occured this cycle - add new
102  // enqueued msgs to m_size_at_cycle_start
103  current_size = m_size_at_cycle_start + m_msgs_this_cycle;
104  }
105  }
106 
107  // now compare the new size with our max size
108  if (current_size + m_stall_map_size + n <= m_max_size) {
109  return true;
110  } else {
111  DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
112  "m_max_size: %d\n",
113  n, current_size, m_prio_heap.size(), m_max_size);
115  return false;
116  }
117 }
118 
119 const Message*
121 {
122  DPRINTF(RubyQueue, "Peeking at head of queue.\n");
123  const Message* msg_ptr = m_prio_heap.front().get();
124  assert(msg_ptr);
125 
126  DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
127  return msg_ptr;
128 }
129 
130 // FIXME - move me somewhere else
131 Tick
133 {
134  Tick time = 1;
135  time += random_mt.random(0, 3); // [0...3]
136  if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
137  time += 100 + random_mt.random(1, 15); // 100 + [1...15]
138  }
139  return time;
140 }
141 
142 void
143 MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
144 {
145  // record current time incase we have a pop that also adjusts my size
146  if (m_time_last_time_enqueue < current_time) {
147  m_msgs_this_cycle = 0; // first msg this cycle
148  m_time_last_time_enqueue = current_time;
149  }
150 
151  m_msg_counter++;
153 
154  // Calculate the arrival time of the message, that is, the first
155  // cycle the message can be dequeued.
156  assert(delta > 0);
157  Tick arrival_time = 0;
158 
160  // No randomization
161  arrival_time = current_time + delta;
162  } else {
163  // Randomization - ignore delta
164  if (m_strict_fifo) {
165  if (m_last_arrival_time < current_time) {
166  m_last_arrival_time = current_time;
167  }
168  arrival_time = m_last_arrival_time + random_time();
169  } else {
170  arrival_time = current_time + random_time();
171  }
172  }
173 
174  // Check the arrival time
175  assert(arrival_time > current_time);
176  if (m_strict_fifo) {
177  if (arrival_time < m_last_arrival_time) {
178  panic("FIFO ordering violated: %s name: %s current time: %d "
179  "delta: %d arrival_time: %d last arrival_time: %d\n",
180  *this, name(), current_time, delta, arrival_time,
182  }
183  }
184 
185  // If running a cache trace, don't worry about the last arrival checks
187  m_last_arrival_time = arrival_time;
188  }
189 
190  // compute the delay cycles and set enqueue time
191  Message* msg_ptr = message.get();
192  assert(msg_ptr != NULL);
193 
194  assert(current_time >= msg_ptr->getLastEnqueueTime() &&
195  "ensure we aren't dequeued early");
196 
197  msg_ptr->updateDelayedTicks(current_time);
198  msg_ptr->setLastEnqueueTime(arrival_time);
199  msg_ptr->setMsgCounter(m_msg_counter);
200 
201  // Insert the message into the priority heap
202  m_prio_heap.push_back(message);
203  push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
204  // Increment the number of messages statistic
205  m_buf_msgs++;
206 
207  DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
208  arrival_time, *(message.get()));
209 
210  // Schedule the wakeup
211  assert(m_consumer != NULL);
212  m_consumer->scheduleEventAbsolute(arrival_time);
214 }
215 
216 Tick
217 MessageBuffer::dequeue(Tick current_time, bool decrement_messages)
218 {
219  DPRINTF(RubyQueue, "Popping\n");
220  assert(isReady(current_time));
221 
222  // get MsgPtr of the message about to be dequeued
223  MsgPtr message = m_prio_heap.front();
224 
225  // get the delay cycles
226  message->updateDelayedTicks(current_time);
227  Tick delay = message->getDelayedTicks();
228 
229  m_stall_time = curTick() - message->getTime();
230 
231  // record previous size and time so the current buffer size isn't
232  // adjusted until schd cycle
233  if (m_time_last_time_pop < current_time) {
235  m_time_last_time_pop = current_time;
236  }
237 
238  pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
239  m_prio_heap.pop_back();
240  if (decrement_messages) {
241  // If the message will be removed from the queue, decrement the
242  // number of message in the queue.
243  m_buf_msgs--;
244  }
245 
246  // if a dequeue callback was requested, call it now
247  if (m_dequeue_callback) {
249  }
250 
251  return delay;
252 }
253 
254 void
255 MessageBuffer::registerDequeueCallback(std::function<void()> callback)
256 {
257  m_dequeue_callback = callback;
258 }
259 
260 void
262 {
263  m_dequeue_callback = nullptr;
264 }
265 
266 void
268 {
269  m_prio_heap.clear();
270 
271  m_msg_counter = 0;
275  m_msgs_this_cycle = 0;
276 }
277 
278 void
279 MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
280 {
281  DPRINTF(RubyQueue, "Recycling.\n");
282  assert(isReady(current_time));
283  MsgPtr node = m_prio_heap.front();
284  pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
285 
286  Tick future_time = current_time + recycle_latency;
287  node->setLastEnqueueTime(future_time);
288 
289  m_prio_heap.back() = node;
290  push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
291  m_consumer->scheduleEventAbsolute(future_time);
292 }
293 
294 void
296 {
297  while (!lt.empty()) {
298  m_msg_counter++;
299  MsgPtr m = lt.front();
300  m->setLastEnqueueTime(schdTick);
301  m->setMsgCounter(m_msg_counter);
302 
303  m_prio_heap.push_back(m);
304  push_heap(m_prio_heap.begin(), m_prio_heap.end(),
305  greater<MsgPtr>());
306 
308  lt.pop_front();
309  }
310 }
311 
312 void
314 {
315  DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
316  assert(m_stall_msg_map.count(addr) > 0);
317 
318  //
319  // Put all stalled messages associated with this address back on the
320  // prio heap. The reanalyzeList call will make sure the consumer is
321  // scheduled for the current cycle so that the previously stalled messages
322  // will be observed before any younger messages that may arrive this cycle
323  //
325  assert(m_stall_map_size >= 0);
326  reanalyzeList(m_stall_msg_map[addr], current_time);
327  m_stall_msg_map.erase(addr);
328 }
329 
330 void
332 {
333  DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
334 
335  //
336  // Put all stalled messages associated with this address back on the
337  // prio heap. The reanalyzeList call will make sure the consumer is
338  // scheduled for the current cycle so that the previously stalled messages
339  // will be observed before any younger messages that may arrive this cycle.
340  //
341  for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
342  map_iter != m_stall_msg_map.end(); ++map_iter) {
343  m_stall_map_size -= map_iter->second.size();
344  assert(m_stall_map_size >= 0);
345  reanalyzeList(map_iter->second, current_time);
346  }
347  m_stall_msg_map.clear();
348 }
349 
350 void
352 {
353  DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
354  assert(isReady(current_time));
355  assert(getOffset(addr) == 0);
356  MsgPtr message = m_prio_heap.front();
357 
358  // Since the message will just be moved to stall map, indicate that the
359  // buffer should not decrement the m_buf_msgs statistic
360  dequeue(current_time, false);
361 
362  //
363  // Note: no event is scheduled to analyze the map at a later time.
364  // Instead the controller is responsible to call reanalyzeMessages when
365  // these addresses change state.
366  //
367  (m_stall_msg_map[addr]).push_back(message);
369  m_stall_count++;
370 }
371 
372 void
373 MessageBuffer::print(ostream& out) const
374 {
375  ccprintf(out, "[MessageBuffer: ");
376  if (m_consumer != NULL) {
377  ccprintf(out, " consumer-yes ");
378  }
379 
381  sort_heap(copy.begin(), copy.end(), greater<MsgPtr>());
382  ccprintf(out, "%s] %s", copy, name());
383 }
384 
385 bool
386 MessageBuffer::isReady(Tick current_time) const
387 {
388  return ((m_prio_heap.size() > 0) &&
389  (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
390 }
391 
392 void
394 {
396  .name(name() + ".not_avail_count")
397  .desc("Number of times this buffer did not have N slots available")
399 
400  m_buf_msgs
401  .name(name() + ".avg_buf_msgs")
402  .desc("Average number of messages in buffer")
404 
406  .name(name() + ".num_msg_stalls")
407  .desc("Number of times messages were stalled")
409 
411  .name(name() + ".avg_buf_occ")
412  .desc("Average occupancy of buffer capacity")
414 
416  .name(name() + ".avg_stall_time")
417  .desc("Average number of cycles messages are stalled in this MB")
419 
420  if (m_max_size > 0) {
422  } else {
423  m_occupancy = 0;
424  }
425 }
426 
427 uint32_t
429 {
430  uint32_t num_functional_writes = 0;
431 
432  // Check the priority heap and write any messages that may
433  // correspond to the address in the packet.
434  for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
435  Message *msg = m_prio_heap[i].get();
436  if (msg->functionalWrite(pkt)) {
437  num_functional_writes++;
438  }
439  }
440 
441  // Check the stall queue and write any messages that may
442  // correspond to the address in the packet.
443  for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
444  map_iter != m_stall_msg_map.end();
445  ++map_iter) {
446 
447  for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
448  it != (map_iter->second).end(); ++it) {
449 
450  Message *msg = (*it).get();
451  if (msg->functionalWrite(pkt)) {
452  num_functional_writes++;
453  }
454  }
455  }
456 
457  return num_functional_writes;
458 }
459 
461 MessageBufferParams::create()
462 {
463  return new MessageBuffer(this);
464 }
void ccprintf(cp::Print &print)
Definition: cprintf.hh:130
#define DPRINTF(x,...)
Definition: trace.hh:212
uint64_t m_msg_counter
void print(std::ostream &out) const
Tick m_time_last_time_size_checked
Tick getLastEnqueueTime() const
Definition: Message.hh:89
std::shared_ptr< Message > MsgPtr
Definition: Message.hh:40
const bool m_randomization
MessageBuffer(const Params *p)
Bitfield< 7 > i
Definition: miscregs.hh:1378
Bitfield< 0 > m
Definition: miscregs.hh:1577
#define panic(...)
Definition: misc.hh:153
StallMsgMapType m_stall_msg_map
A map from line addresses to lists of stalled messages for that line.
void recycle(Tick current_time, Tick recycle_latency)
void reanalyzeMessages(Addr addr, Tick current_time)
Tick m_last_arrival_time
ip6_addr_t addr
Definition: inet.hh:335
void stallMessage(Addr addr, Tick current_time)
Stats::Average m_stall_time
virtual void storeEventInfo(int info)
Definition: Consumer.hh:57
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:311
unsigned int m_size_last_time_size_checked
uint32_t functionalWrite(Packet *pkt)
Stats::Average m_buf_msgs
std::enable_if< std::is_integral< T >::value, T >::type random()
Use the SFINAE idiom to choose an implementation based on whether the type is integral or floating po...
Definition: random.hh:83
Bitfield< 31 > n
Definition: miscregs.hh:1636
void scheduleEventAbsolute(Tick timeAbs)
Definition: Consumer.cc:40
unsigned int getSize(Tick curTime)
static bool getWarmupEnabled()
Definition: RubySystem.hh:77
bool areNSlotsAvailable(unsigned int n, Tick curTime)
Tick curTick()
The current simulated tick.
Definition: core.hh:47
void setMsgCounter(uint64_t c)
Definition: Message.hh:92
virtual bool functionalWrite(Packet *pkt)=0
uint64_t Tick
Tick count type.
Definition: types.hh:63
Stats::Scalar m_not_avail_count
int m_stall_map_size
Current size of the stall map.
Tick dequeue(Tick current_time, bool decrement_messages=true)
Updates the delay cycles of the message at the head of the queue, removes it from the queue and retur...
Tick random_time()
void unregisterDequeueCallback()
Addr getOffset(Addr addr)
Definition: Address.cc:106
STL list class.
Definition: stl.hh:54
const Message * peek() const
Function for extracting the message at the head of the message queue.
unsigned int m_msgs_this_cycle
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void regStats()
Register statistics for this object.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
unsigned int m_size_at_cycle_start
Stats::Formula m_occupancy
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:254
bool isReady(Tick current_time) const
const bool m_strict_fifo
Consumer * m_consumer
Consumer to signal a wakeup(), can be NULL.
void reanalyzeList(std::list< MsgPtr > &, Tick)
virtual const std::string name() const
Definition: sim_object.hh:117
Random random_mt
Definition: random.cc:100
Bitfield< 31 > lt
Definition: miscregs.hh:47
Tick m_time_last_time_pop
Stats::Scalar m_stall_count
Tick m_time_last_time_enqueue
void reanalyzeAllMessages(Tick current_time)
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:287
const unsigned int m_max_size
The maximum capacity.
void updateDelayedTicks(Tick curTime)
Update the delay this message has experienced so far.
Definition: Message.hh:80
std::function< void()> m_dequeue_callback
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:57
Bitfield< 0 > p
std::vector< MsgPtr > m_prio_heap
void enqueue(MsgPtr message, Tick curTime, Tick delta)
void registerDequeueCallback(std::function< void()> callback)
MessageBufferParams Params
Abstract superclass for simulation objects.
Definition: sim_object.hh:94
void setLastEnqueueTime(const Tick &time)
Definition: Message.hh:88
static int getRandomization()
Definition: RubySystem.hh:73

Generated on Fri Jun 9 2017 13:03:50 for gem5 by doxygen 1.8.6