gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
atomic.cc
Go to the documentation of this file.
1 /*
2  * Copyright 2014 Google, Inc.
3  * Copyright (c) 2012-2013,2015 ARM Limited
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2002-2005 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  *
41  * Authors: Steve Reinhardt
42  */
43 
44 #include "cpu/simple/atomic.hh"
45 
46 #include "arch/locked_mem.hh"
47 #include "arch/mmapped_ipr.hh"
48 #include "arch/utility.hh"
49 #include "base/bigint.hh"
50 #include "base/output.hh"
51 #include "config/the_isa.hh"
52 #include "cpu/exetrace.hh"
53 #include "debug/Drain.hh"
54 #include "debug/ExecFaulting.hh"
55 #include "debug/SimpleCPU.hh"
56 #include "mem/packet.hh"
57 #include "mem/packet_access.hh"
58 #include "mem/physical.hh"
59 #include "params/AtomicSimpleCPU.hh"
60 #include "sim/faults.hh"
61 #include "sim/full_system.hh"
62 #include "sim/system.hh"
63 
64 using namespace std;
65 using namespace TheISA;
66 
68  : Event(CPU_Tick_Pri), cpu(c)
69 {
70 }
71 
72 
73 void
75 {
76  cpu->tick();
77 }
78 
79 const char *
81 {
82  return "AtomicSimpleCPU tick";
83 }
84 
85 void
87 {
89 
90  int cid = threadContexts[0]->contextId();
94 }
95 
96 AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
97  : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
100  icachePort(name() + ".icache_port", this),
101  dcachePort(name() + ".dcache_port", this),
103  ppCommit(nullptr)
104 {
105  _status = Idle;
106 }
107 
108 
110 {
111  if (tickEvent.scheduled()) {
112  deschedule(tickEvent);
113  }
114 }
115 
118 {
119  if (switchedOut())
120  return DrainState::Drained;
121 
122  if (!isDrained()) {
123  DPRINTF(Drain, "Requesting drain.\n");
124  return DrainState::Draining;
125  } else {
126  if (tickEvent.scheduled())
127  deschedule(tickEvent);
128 
129  activeThreads.clear();
130  DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
131  return DrainState::Drained;
132  }
133 }
134 
135 void
137 {
138  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
139  pkt->cmdString());
140 
141  for (ThreadID tid = 0; tid < numThreads; tid++) {
142  if (tid != sender) {
143  if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
144  wakeup(tid);
145  }
146 
149  }
150  }
151 }
152 
153 void
155 {
156  assert(!tickEvent.scheduled());
157  if (switchedOut())
158  return;
159 
160  DPRINTF(SimpleCPU, "Resume\n");
162 
163  assert(!threadContexts.empty());
164 
166 
167  for (ThreadID tid = 0; tid < numThreads; tid++) {
168  if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
169  threadInfo[tid]->notIdleFraction = 1;
170  activeThreads.push_back(tid);
171  _status = BaseSimpleCPU::Running;
172 
173  // Tick if any threads active
174  if (!tickEvent.scheduled()) {
175  schedule(tickEvent, nextCycle());
176  }
177  } else {
178  threadInfo[tid]->notIdleFraction = 0;
179  }
180  }
181 }
182 
183 bool
185 {
186  if (drainState() != DrainState::Draining)
187  return false;
188 
189  DPRINTF(Drain, "tryCompleteDrain.\n");
190  if (!isDrained())
191  return false;
192 
193  DPRINTF(Drain, "CPU done draining, processing drain event\n");
194  signalDrainDone();
195 
196  return true;
197 }
198 
199 
200 void
202 {
203  BaseSimpleCPU::switchOut();
204 
205  assert(!tickEvent.scheduled());
206  assert(_status == BaseSimpleCPU::Running || _status == Idle);
207  assert(isDrained());
208 }
209 
210 
211 void
213 {
215 
216  // The tick event should have been descheduled by drain()
217  assert(!tickEvent.scheduled());
218 }
219 
220 void
222 {
223  if (!system->isAtomicMode()) {
224  fatal("The atomic CPU requires the memory system to be in "
225  "'atomic' mode.\n");
226  }
227 }
228 
229 void
231 {
232  DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
233 
234  assert(thread_num < numThreads);
235 
236  threadInfo[thread_num]->notIdleFraction = 1;
237  Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
238  threadInfo[thread_num]->thread->lastSuspend);
239  numCycles += delta;
240  ppCycles->notify(delta);
241 
242  if (!tickEvent.scheduled()) {
243  //Make sure ticks are still on multiples of cycles
244  schedule(tickEvent, clockEdge(Cycles(0)));
245  }
247  if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
248  == activeThreads.end()) {
249  activeThreads.push_back(thread_num);
250  }
251 
252  BaseCPU::activateContext(thread_num);
253 }
254 
255 
256 void
258 {
259  DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
260 
261  assert(thread_num < numThreads);
262  activeThreads.remove(thread_num);
263 
264  if (_status == Idle)
265  return;
266 
267  assert(_status == BaseSimpleCPU::Running);
268 
269  threadInfo[thread_num]->notIdleFraction = 0;
270 
271  if (activeThreads.empty()) {
272  _status = Idle;
273 
274  if (tickEvent.scheduled()) {
275  deschedule(tickEvent);
276  }
277  }
278 
279  BaseCPU::suspendContext(thread_num);
280 }
281 
282 
283 Tick
285 {
286  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
287  pkt->cmdString());
288 
289  // X86 ISA: Snooping an invalidation for monitor/mwait
291 
292  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
293  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
294  cpu->wakeup(tid);
295  }
296  }
297 
298  // if snoop invalidates, release any associated locks
299  // When run without caches, Invalidation packets will not be received
300  // hence we must check if the incoming packets are writes and wakeup
301  // the processor accordingly
302  if (pkt->isInvalidate() || pkt->isWrite()) {
303  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
304  pkt->getAddr());
305  for (auto &t_info : cpu->threadInfo) {
306  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
307  }
308  }
309 
310  return 0;
311 }
312 
313 void
315 {
316  DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(),
317  pkt->cmdString());
318 
319  // X86 ISA: Snooping an invalidation for monitor/mwait
320  AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
321  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
322  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
323  cpu->wakeup(tid);
324  }
325  }
326 
327  // if snoop invalidates, release any associated locks
328  if (pkt->isInvalidate()) {
329  DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
330  pkt->getAddr());
331  for (auto &t_info : cpu->threadInfo) {
332  TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
333  }
334  }
335 }
336 
337 Fault
339  Request::Flags flags)
340 {
342  SimpleThread* thread = t_info.thread;
343 
344  // use the CPU's statically allocated read request and packet objects
345  Request *req = &data_read_req;
346 
347  if (traceData)
348  traceData->setMem(addr, size, flags);
349 
350  //The size of the data we're trying to read.
351  int fullSize = size;
352 
353  //The address of the second part of this access if it needs to be split
354  //across a cache line boundary.
355  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
356 
357  if (secondAddr > addr)
358  size = secondAddr - addr;
359 
360  dcache_latency = 0;
361 
362  req->taskId(taskId());
363  while (1) {
364  req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
365 
366  // translate to physical address
367  Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
368  BaseTLB::Read);
369 
370  // Now do the access.
371  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
372  Packet pkt(req, Packet::makeReadCmd(req));
373  pkt.dataStatic(data);
374 
375  if (req->isMmappedIpr())
376  dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
377  else {
378  if (fastmem && system->isMemAddr(pkt.getAddr()))
379  system->getPhysMem().access(&pkt);
380  else
382  }
383  dcache_access = true;
384 
385  assert(!pkt.isError());
386 
387  if (req->isLLSC()) {
388  TheISA::handleLockedRead(thread, req);
389  }
390  }
391 
392  //If there's a fault, return it
393  if (fault != NoFault) {
394  if (req->isPrefetch()) {
395  return NoFault;
396  } else {
397  return fault;
398  }
399  }
400 
401  //If we don't need to access a second cache line, stop now.
402  if (secondAddr <= addr)
403  {
404  if (req->isLockedRMW() && fault == NoFault) {
405  assert(!locked);
406  locked = true;
407  }
408 
409  return fault;
410  }
411 
412  /*
413  * Set up for accessing the second cache line.
414  */
415 
416  //Move the pointer we're reading into to the correct location.
417  data += size;
418  //Adjust the size to get the remaining bytes.
419  size = addr + fullSize - secondAddr;
420  //And access the right address.
421  addr = secondAddr;
422  }
423 }
424 
425 Fault
427  Request::Flags flags)
428 {
429  panic("initiateMemRead() is for timing accesses, and should "
430  "never be called on AtomicSimpleCPU.\n");
431 }
432 
433 Fault
435  Request::Flags flags, uint64_t *res)
436 {
438  SimpleThread* thread = t_info.thread;
439  static uint8_t zero_array[64] = {};
440 
441  if (data == NULL) {
442  assert(size <= 64);
443  assert(flags & Request::CACHE_BLOCK_ZERO);
444  // This must be a cache block cleaning request
445  data = zero_array;
446  }
447 
448  // use the CPU's statically allocated write request and packet objects
449  Request *req = &data_write_req;
450 
451  if (traceData)
452  traceData->setMem(addr, size, flags);
453 
454  //The size of the data we're trying to read.
455  int fullSize = size;
456 
457  //The address of the second part of this access if it needs to be split
458  //across a cache line boundary.
459  Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
460 
461  if (secondAddr > addr)
462  size = secondAddr - addr;
463 
464  dcache_latency = 0;
465 
466  req->taskId(taskId());
467  while (1) {
468  req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
469 
470  // translate to physical address
471  Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write);
472 
473  // Now do the access.
474  if (fault == NoFault) {
475  MemCmd cmd = MemCmd::WriteReq; // default
476  bool do_access = true; // flag to suppress cache access
477 
478  if (req->isLLSC()) {
479  cmd = MemCmd::StoreCondReq;
480  do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
481  } else if (req->isSwap()) {
482  cmd = MemCmd::SwapReq;
483  if (req->isCondSwap()) {
484  assert(res);
485  req->setExtraData(*res);
486  }
487  }
488 
489  if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
490  Packet pkt = Packet(req, cmd);
491  pkt.dataStatic(data);
492 
493  if (req->isMmappedIpr()) {
494  dcache_latency +=
495  TheISA::handleIprWrite(thread->getTC(), &pkt);
496  } else {
497  if (fastmem && system->isMemAddr(pkt.getAddr()))
498  system->getPhysMem().access(&pkt);
499  else
501 
502  // Notify other threads on this CPU of write
503  threadSnoop(&pkt, curThread);
504  }
505  dcache_access = true;
506  assert(!pkt.isError());
507 
508  if (req->isSwap()) {
509  assert(res);
510  memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize);
511  }
512  }
513 
514  if (res && !req->isSwap()) {
515  *res = req->getExtraData();
516  }
517  }
518 
519  //If there's a fault or we don't need to access a second cache line,
520  //stop now.
521  if (fault != NoFault || secondAddr <= addr)
522  {
523  if (req->isLockedRMW() && fault == NoFault) {
524  assert(locked);
525  locked = false;
526  }
527 
528 
529  if (fault != NoFault && req->isPrefetch()) {
530  return NoFault;
531  } else {
532  return fault;
533  }
534  }
535 
536  /*
537  * Set up for accessing the second cache line.
538  */
539 
540  //Move the pointer we're reading into to the correct location.
541  data += size;
542  //Adjust the size to get the remaining bytes.
543  size = addr + fullSize - secondAddr;
544  //And access the right address.
545  addr = secondAddr;
546  }
547 }
548 
549 
550 void
552 {
553  DPRINTF(SimpleCPU, "Tick\n");
554 
555  // Change thread if multi-threaded
557 
558  // Set memroy request ids to current thread
559  if (numThreads > 1) {
560  ContextID cid = threadContexts[curThread]->contextId();
561 
562  ifetch_req.setContext(cid);
565  }
566 
568  SimpleThread* thread = t_info.thread;
569 
570  Tick latency = 0;
571 
572  for (int i = 0; i < width || locked; ++i) {
573  numCycles++;
574  ppCycles->notify(1);
575 
579  }
580 
581  // We must have just got suspended by a PC event
582  if (_status == Idle) {
584  return;
585  }
586 
587  Fault fault = NoFault;
588 
589  TheISA::PCState pcState = thread->pcState();
590 
591  bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
593  if (needToFetch) {
594  ifetch_req.taskId(taskId());
596  fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(),
598  }
599 
600  if (fault == NoFault) {
601  Tick icache_latency = 0;
602  bool icache_access = false;
603  dcache_access = false; // assume no dcache access
604 
605  if (needToFetch) {
606  // This is commented out because the decoder would act like
607  // a tiny cache otherwise. It wouldn't be flushed when needed
608  // like the I cache. It should be flushed, and when that works
609  // this code should be uncommented.
610  //Fetch more instruction memory if necessary
611  //if (decoder.needMoreBytes())
612  //{
613  icache_access = true;
614  Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
615  ifetch_pkt.dataStatic(&inst);
616 
617  if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
618  system->getPhysMem().access(&ifetch_pkt);
619  else
620  icache_latency = icachePort.sendAtomic(&ifetch_pkt);
621 
622  assert(!ifetch_pkt.isError());
623 
624  // ifetch_req is initialized to read the instruction directly
625  // into the CPU object's inst field.
626  //}
627  }
628 
629  preExecute();
630 
631  Tick stall_ticks = 0;
632  if (curStaticInst) {
633  fault = curStaticInst->execute(&t_info, traceData);
634 
635  // keep an instruction count
636  if (fault == NoFault) {
637  countInst();
638  ppCommit->notify(std::make_pair(thread, curStaticInst));
639  }
640  else if (traceData && !DTRACE(ExecFaulting)) {
641  delete traceData;
642  traceData = NULL;
643  }
644 
645  if (dynamic_pointer_cast<SyscallRetryFault>(fault)) {
646  // Retry execution of system calls after a delay.
647  // Prevents immediate re-execution since conditions which
648  // caused the retry are unlikely to change every tick.
649  stall_ticks += clockEdge(syscallRetryLatency) - curTick();
650  }
651 
652  postExecute();
653  }
654 
655  // @todo remove me after debugging with legion done
656  if (curStaticInst && (!curStaticInst->isMicroop() ||
658  instCnt++;
659 
660  if (simulate_inst_stalls && icache_access)
661  stall_ticks += icache_latency;
662 
664  stall_ticks += dcache_latency;
665 
666  if (stall_ticks) {
667  // the atomic cpu does its accounting in ticks, so
668  // keep counting in ticks but round to the clock
669  // period
670  latency += divCeil(stall_ticks, clockPeriod()) *
671  clockPeriod();
672  }
673 
674  }
675  if (fault != NoFault || !t_info.stayAtPC)
676  advancePC(fault);
677  }
678 
679  if (tryCompleteDrain())
680  return;
681 
682  // instruction takes at least one cycle
683  if (latency < clockPeriod())
684  latency = clockPeriod();
685 
686  if (_status != Idle)
687  reschedule(tickEvent, curTick() + latency, true);
688 }
689 
690 void
692 {
693  BaseCPU::regProbePoints();
694 
696  (getProbeManager(), "Commit");
697 }
698 
699 void
701 {
703 }
704 
706 //
707 // AtomicSimpleCPU Simulation Object
708 //
710 AtomicSimpleCPUParams::create()
711 {
712  return new AtomicSimpleCPU(this);
713 }
StaticInstPtr curStaticInst
Definition: base.hh:107
void advancePC(const Fault &fault)
Definition: base.cc:642
#define DPRINTF(x,...)
Definition: trace.hh:212
MemObject & owner
A reference to the MemObject that owns this port.
Definition: port.hh:80
AtomicCPUPort icachePort
Definition: atomic.hh:167
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the slave port.
Definition: atomic.cc:314
bool isFirstMicroop() const
Definition: static_inst.hh:172
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags) override
Definition: atomic.cc:338
std::list< ThreadID > activeThreads
Definition: base.hh:103
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
Fault initiateMemRead(Addr addr, unsigned size, Request::Flags flags) override
Definition: atomic.cc:426
Definition: packet.hh:73
void checkPcEventQueue()
Definition: base.cc:145
const std::string & name()
Definition: trace.cc:49
Bitfield< 7 > i
Definition: miscregs.hh:1378
DrainState
Object drain/handover states.
Definition: drain.hh:71
#define panic(...)
Definition: misc.hh:153
Running normally.
void setExtraData(uint64_t extraData)
Accessor function for store conditional return value.
Definition: request.hh:680
bool isDelayedCommit() const
Definition: static_inst.hh:170
bool isLockedRMW() const
Definition: request.hh:773
Bitfield< 8 > a
Definition: miscregs.hh:1377
TheISA::MachInst inst
Current instruction.
Definition: base.hh:106
uint64_t getExtraData() const
Accessor function for store conditional return value.
Definition: request.hh:672
ip6_addr_t addr
Definition: inet.hh:335
bool isWrite() const
Definition: packet.hh:503
void wakeup(ThreadID tid) override
Definition: base.cc:420
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:381
bool isMicroop() const
Definition: static_inst.hh:169
bool isMmappedIpr() const
Definition: request.hh:776
bool isSet() const
Definition: flags.hh:62
bool handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
Definition: locked_mem.hh:102
virtual Fault execute(ExecContext *xc, Trace::InstRecord *traceData) const =0
void setContext(ContextID context_id)
Set up Context numbers.
Definition: request.hh:449
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Request data_read_req
Definition: atomic.hh:172
This is a write that is targeted and zeroing an entire cache block.
Definition: request.hh:134
void checkForInterrupts()
Definition: base.cc:431
ThreadID curThread
Definition: base.hh:87
Tick dcache_latency
Definition: atomic.hh:176
void preExecute()
Definition: base.cc:468
bool isPrefetch() const
Definition: request.hh:770
void drainResume() override
Definition: atomic.cc:154
Cycles handleIprRead(ThreadContext *xc, Packet *pkt)
Helper function to handle IPRs when the target architecture doesn't need its own IPR handling...
Definition: mmapped_ipr.hh:139
Cycles handleIprWrite(ThreadContext *xc, Packet *pkt)
Helper function to handle IPRs when the target architecture doesn't need its own IPR handling...
Definition: mmapped_ipr.hh:160
bool isCondSwap() const
Definition: request.hh:775
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:909
TheISA::TLB * dtb
const char data[]
Definition: circlebuf.cc:43
bool isDrained()
Check if a system is in a drained state.
Definition: atomic.hh:100
void swapActiveThread()
Definition: base.cc:156
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
Definition: locked_mem.hh:69
const char * description() const
Return a C string describing the event.
Definition: atomic.cc:80
bool dcache_access
Definition: atomic.hh:175
system
Definition: isa.cc:226
Tick curTick()
The current simulated tick.
Definition: core.hh:47
void countInst()
Definition: base.cc:172
void init() override
Definition: base.cc:129
#define DTRACE(x)
Definition: trace.hh:210
bool isLLSC() const
Definition: request.hh:771
const bool simulate_inst_stalls
Definition: atomic.hh:77
void takeOverFrom(ThreadContext &ntc, ThreadContext &otc)
Copy state between thread contexts in preparation for CPU handover.
TickEvent(AtomicSimpleCPU *c)
Definition: atomic.cc:67
uint64_t Tick
Tick count type.
Definition: types.hh:63
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition: atomic.cc:700
void takeOverFrom(BaseCPU *oldCPU) override
Definition: atomic.cc:212
void tick()
Definition: atomic.cc:551
TheISA::TLB * itb
void setMem(Addr a, Addr s, unsigned f)
Definition: insttracer.hh:157
#define fatal(...)
Definition: misc.hh:163
T roundDown(const T &val, const U &align)
Definition: intmath.hh:213
static MemCmd makeReadCmd(const RequestPtr req)
Generate the appropriate read MemCmd based on the Request flags.
Definition: packet.hh:780
Status _status
Definition: base.hh:125
bool isSwap() const
Definition: request.hh:774
StaticInstPtr curMacroStaticInst
Definition: base.hh:108
SimpleThread * thread
Definition: exec_context.hh:69
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
void init() override
Definition: atomic.cc:86
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition: port.cc:200
Draining buffers pending serialization/handover.
virtual ~AtomicSimpleCPU()
Definition: atomic.cc:109
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition: atomic.hh:179
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:161
DrainState drain() override
Definition: atomic.cc:117
Request ifetch_req
Definition: atomic.hh:171
void activateContext(ThreadID thread_num) override
Definition: atomic.cc:230
TheISA::PCState pcState()
void switchOut() override
Definition: atomic.cc:201
Flags getFlags()
Accessor for flags.
Definition: request.hh:584
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:171
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition: atomic.cc:136
ProbePointArg generates a point for the class of Arg.
int size()
Definition: pagetable.hh:146
Bitfield< 29 > c
Definition: miscregs.hh:1365
Declaration of the Packet class.
The request should not cause a memory access.
Definition: request.hh:137
bool isError() const
Definition: packet.hh:528
Trace::InstRecord * traceData
Definition: base.hh:99
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:497
Definition: eventq.hh:185
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res) override
Definition: atomic.cc:434
AtomicSimpleCPU(AtomicSimpleCPUParams *params)
Definition: atomic.cc:96
void setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid, Addr pc)
Set up a virtual (e.g., CPU) request in a previously allocated Request object.
Definition: request.hh:460
std::vector< SimpleExecContext * > threadInfo
Definition: base.hh:102
const bool simulate_data_stalls
Definition: atomic.hh:76
T divCeil(const T &a, const U &b)
Definition: intmath.hh:198
bool tryCompleteDrain()
Try to complete a drain request.
Definition: atomic.cc:184
void verifyMemoryMode() const override
Definition: atomic.cc:221
void postExecute()
Definition: base.cc:558
const T * getConstPtr() const
Definition: packet.hh:967
void regProbePoints() override
Definition: atomic.cc:691
bool isInvalidate() const
Definition: packet.hh:517
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time...
Definition: port.cc:166
Bitfield< 0 > p
AtomicCPUDPort dcachePort
Definition: atomic.hh:168
Request data_write_req
Definition: atomic.hh:173
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
void setupFetchRequest(Request *req)
Definition: base.cc:451
int ContextID
Globally unique thread context ID.
Definition: types.hh:175
uint32_t taskId() const
Definition: request.hh:630
void handleLockedRead(XC *xc, Request *req)
Definition: locked_mem.hh:88
void suspendContext(ThreadID thread_num) override
Definition: atomic.cc:257
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:102
Addr getAddr() const
Definition: packet.hh:639
const int width
Definition: atomic.hh:74
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from the slave port.
Definition: atomic.cc:284
TickEvent tickEvent
Definition: atomic.hh:72

Generated on Fri Jun 9 2017 13:03:44 for gem5 by doxygen 1.8.6