39 #include "debug/RubyCacheTrace.hh"
40 #include "debug/RubySystem.hh"
60 :
ClockedObject(p), m_access_backing_store(p->access_backing_store),
61 m_cache_recorder(NULL)
103 uint64_t cache_trace_size,
104 uint64_t block_size_bytes)
111 if (sequencer_ptr == NULL) {
112 sequencer_ptr = sequencer_map[cntrl];
116 assert(sequencer_ptr != NULL);
119 if (sequencer_map[cntrl] == NULL) {
120 sequencer_map[cntrl] = sequencer_ptr;
131 sequencer_map, block_size_bytes);
140 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
145 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
149 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
157 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled,"
158 " not recording\n", curr_head->
name());
160 original_events.push_back(make_pair(curr_head, curr_head->
when()));
166 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
169 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
182 while (!original_events.empty()) {
185 original_events.pop_back();
199 warn_once(
"Ruby memory writeback is experimental. Continuing simulation "
200 "afterwards may not always work as intended.");
208 uint64_t uncompressed_trace_size)
213 int fd = creat(thefile.c_str(), 0664);
216 fatal(
"Can't open memory trace file '%s'\n", filename);
219 gzFile compressedMemory = gzdopen(fd,
"wb");
220 if (compressedMemory == NULL)
221 fatal(
"Insufficient memory to allocate compression state for %s\n",
224 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
225 uncompressed_trace_size) {
226 fatal(
"Write failed on memory trace file '%s'\n", filename);
229 if (gzclose(compressedMemory)) {
230 fatal(
"Close failed on memory trace file '%s'\n", filename);
248 fatal(
"Call memWriteback() before serialize() to create ruby trace");
252 uint8_t *raw_data =
new uint8_t[4096];
255 string cache_trace_file =
name() +
".cache.gz";
275 uint64_t &uncompressed_trace_size)
278 gzFile compressedTrace;
281 int fd = open(filename.c_str(), O_RDONLY);
284 fatal(
"Unable to open trace file %s", filename);
287 compressedTrace = gzdopen(fd,
"rb");
288 if (compressedTrace == NULL) {
289 fatal(
"Insufficient memory to allocate compression state for %s\n",
293 raw_data =
new uint8_t[uncompressed_trace_size];
294 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
295 uncompressed_trace_size) {
296 fatal(
"Unable to read complete trace from file %s\n", filename);
299 if (gzclose(compressedTrace)) {
300 fatal(
"Failed to close cache trace file '%s'\n", filename);
307 uint8_t *uncompressed_trace = NULL;
315 string cache_trace_file;
316 uint64_t cache_trace_size = 0;
320 cache_trace_file = cp.
cptDir +
"/" + cache_trace_file;
352 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
404 AccessPermission access_perm = AccessPermission_NotPresent;
409 unsigned int num_ro = 0;
410 unsigned int num_rw = 0;
411 unsigned int num_busy = 0;
412 unsigned int num_backing_store = 0;
413 unsigned int num_invalid = 0;
417 for (
unsigned int i = 0;
i < num_controllers; ++
i) {
419 if (access_perm == AccessPermission_Read_Only)
421 else if (access_perm == AccessPermission_Read_Write)
423 else if (access_perm == AccessPermission_Busy)
425 else if (access_perm == AccessPermission_Backing_Store)
431 else if (access_perm == AccessPermission_Invalid ||
432 access_perm == AccessPermission_NotPresent)
445 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
447 for (
unsigned int i = 0;
i < num_controllers; ++
i) {
449 if (access_perm == AccessPermission_Backing_Store) {
454 }
else if (num_ro > 0 || num_rw == 1) {
460 num_busy, num_ro, num_rw);
464 for (
unsigned int i = 0;
i < num_controllers;++
i) {
466 if (access_perm == AccessPermission_Read_Only ||
467 access_perm == AccessPermission_Read_Write) {
486 AccessPermission access_perm = AccessPermission_NotPresent;
493 for (
unsigned int i = 0;
i < num_controllers;++
i) {
494 num_functional_writes +=
498 if (access_perm != AccessPermission_Invalid &&
499 access_perm != AccessPermission_NotPresent) {
500 num_functional_writes +=
511 #ifdef CHECK_COHERENCE
521 RubySystem::checkGlobalCoherenceInvariant(
const Address&
addr)
525 bool sharedDetected =
false;
528 for (
int i = 0;
i < m_chip_vector.size();
i++) {
529 if (m_chip_vector[
i]->isBlockExclusive(addr)) {
530 if (exclusive != -1) {
532 WARN_EXPR(exclusive);
533 WARN_EXPR(m_chip_vector[
i]->getID());
535 WARN_EXPR(getTime());
536 ERROR_MSG(
"Coherence Violation Detected -- 2 exclusive chips");
537 }
else if (sharedDetected) {
538 WARN_EXPR(lastShared);
539 WARN_EXPR(m_chip_vector[
i]->getID());
541 WARN_EXPR(getTime());
542 ERROR_MSG(
"Coherence Violation Detected -- exclusive chip with >=1 shared");
544 exclusive = m_chip_vector[
i]->getID();
546 }
else if (m_chip_vector[
i]->isBlockShared(addr)) {
547 sharedDetected =
true;
548 lastShared = m_chip_vector[
i]->getID();
550 if (exclusive != -1) {
551 WARN_EXPR(lastShared);
552 WARN_EXPR(exclusive);
554 WARN_EXPR(getTime());
555 ERROR_MSG(
"Coherence Violation Detected -- exclusive chip with >=1 shared");
564 RubySystemParams::create()
EventQueue * eventq
A pointer to this object's event queue.
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
Simulate for num_cycles additional cycles.
void serialize(CheckpointOut &cp) const override
Serialize an object.
SimpleMemory * m_phys_mem
virtual const std::string name() const
void deschedule(Event *event)
Deschedule the specified event.
RubySystem * m_ruby_system
static bool m_randomization
#define UNSERIALIZE_OPT_SCALAR(scalar)
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
CacheRecorder * m_cache_recorder
static uint32_t m_block_size_bytes
RubySystem(const Params *p)
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
bool functionalRead(Packet *ptr)
static bool m_warmup_enabled
void resetClock() const
Reset the object's clock using the current global tick value.
bool functionalWrite(Packet *ptr)
void drainResume() override
Resume execution after a successful drain.
Declaration of Statistics objects.
void enqueueRubyEvent(Tick tick)
SimpleMemory declaration.
MachineID getMachineID() const
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
static bool getWarmupEnabled()
#define UNSERIALIZE_SCALAR(scalar)
Tick curTick()
The current simulated tick.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
bool isAutoDelete() const
void registerNetwork(Network *)
Tick when() const
Get the time that the event is scheduled.
uint64_t Tick
Tick count type.
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
bool isPowerOf2(const T &n)
void startup() override
startup() is the final initialization call before simulation.
void setCurTick(Tick newVal)
void memWriteback() override
Write back dirty buffers to memory using functional writes.
static bool getCooldownEnabled()
static uint32_t m_memory_size_bits
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Addr makeLineAddress(Addr addr)
void resetStats() override
Reset statistics associated with this object.
static uint32_t m_block_size_bits
std::vector< AbstractController * > m_abs_cntrl_vec
void registerDumpCallback(Callback *cb)
Register a callback that should be called whenever statistics are about to be dumped.
#define SERIALIZE_SCALAR(scalar)
static const int NumArgumentRegs M5_VAR_USED
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
int floorLog2(unsigned x)
virtual const std::string name() const
virtual uint32_t functionalWrite(Packet *pkt)
std::ostream CheckpointOut
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
static bool m_cooldown_enabled
static unsigned m_systems_to_warmup
static uint32_t getBlockSizeBytes()
void registerAbstractController(AbstractController *)
void unserialize(CheckpointIn &cp) override
Unserialize an object.