gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
execute.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Andrew Bardsley
38  */
39 
40 #include "cpu/minor/execute.hh"
41 
42 #include "arch/locked_mem.hh"
43 #include "arch/registers.hh"
44 #include "arch/utility.hh"
45 #include "cpu/minor/cpu.hh"
47 #include "cpu/minor/fetch1.hh"
48 #include "cpu/minor/lsq.hh"
49 #include "cpu/op_class.hh"
50 #include "debug/Activity.hh"
51 #include "debug/Branch.hh"
52 #include "debug/Drain.hh"
53 #include "debug/MinorExecute.hh"
54 #include "debug/MinorInterrupt.hh"
55 #include "debug/MinorMem.hh"
56 #include "debug/MinorTrace.hh"
57 #include "debug/PCEvent.hh"
58 
59 namespace Minor
60 {
61 
62 Execute::Execute(const std::string &name_,
63  MinorCPU &cpu_,
64  MinorCPUParams &params,
67  Named(name_),
68  inp(inp_),
69  out(out_),
70  cpu(cpu_),
71  issueLimit(params.executeIssueLimit),
72  memoryIssueLimit(params.executeMemoryIssueLimit),
73  commitLimit(params.executeCommitLimit),
74  memoryCommitLimit(params.executeMemoryCommitLimit),
75  processMoreThanOneInput(params.executeCycleInput),
76  fuDescriptions(*params.executeFuncUnits),
77  numFuncUnits(fuDescriptions.funcUnits.size()),
78  setTraceTimeOnCommit(params.executeSetTraceTimeOnCommit),
79  setTraceTimeOnIssue(params.executeSetTraceTimeOnIssue),
80  allowEarlyMemIssue(params.executeAllowEarlyMemoryIssue),
81  noCostFUIndex(fuDescriptions.funcUnits.size() + 1),
82  lsq(name_ + ".lsq", name_ + ".dcache_port",
83  cpu_, *this,
84  params.executeMaxAccessesInMemory,
85  params.executeMemoryWidth,
86  params.executeLSQRequestsQueueSize,
87  params.executeLSQTransfersQueueSize,
88  params.executeLSQStoreBufferSize,
89  params.executeLSQMaxStoreBufferStoresPerCycle),
90  executeInfo(params.numThreads, ExecuteThreadInfo(params.executeCommitLimit)),
91  interruptPriority(0),
92  issuePriority(0),
93  commitPriority(0)
94 {
95  if (commitLimit < 1) {
96  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
97  commitLimit);
98  }
99 
100  if (issueLimit < 1) {
101  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
102  issueLimit);
103  }
104 
105  if (memoryIssueLimit < 1) {
106  fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_,
108  }
109 
111  fatal("%s: executeMemoryCommitLimit (%d) must be <="
112  " executeCommitLimit (%d)\n",
114  }
115 
116  if (params.executeInputBufferSize < 1) {
117  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
118  params.executeInputBufferSize);
119  }
120 
121  if (params.executeInputBufferSize < 1) {
122  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
123  params.executeInputBufferSize);
124  }
125 
126  /* This should be large enough to count all the in-FU instructions
127  * which need to be accounted for in the inFlightInsts
128  * queue */
129  unsigned int total_slots = 0;
130 
131  /* Make FUPipelines for each MinorFU */
132  for (unsigned int i = 0; i < numFuncUnits; i++) {
133  std::ostringstream fu_name;
134  MinorFU *fu_description = fuDescriptions.funcUnits[i];
135 
136  /* Note the total number of instruction slots (for sizing
137  * the inFlightInst queue) and the maximum latency of any FU
138  * (for sizing the activity recorder) */
139  total_slots += fu_description->opLat;
140 
141  fu_name << name_ << ".fu." << i;
142 
143  FUPipeline *fu = new FUPipeline(fu_name.str(), *fu_description, cpu);
144 
145  funcUnits.push_back(fu);
146  }
147 
149  for (int op_class = No_OpClass + 1; op_class < Num_OpClasses; op_class++) {
150  bool found_fu = false;
151  unsigned int fu_index = 0;
152 
153  while (fu_index < numFuncUnits && !found_fu)
154  {
155  if (funcUnits[fu_index]->provides(
156  static_cast<OpClass>(op_class)))
157  {
158  found_fu = true;
159  }
160  fu_index++;
161  }
162 
163  if (!found_fu) {
164  warn("No functional unit for OpClass %s\n",
165  Enums::OpClassStrings[op_class]);
166  }
167  }
168 
169  /* Per-thread structures */
170  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
171  std::string tid_str = std::to_string(tid);
172 
173  /* Input Buffers */
174  inputBuffer.push_back(
176  name_ + ".inputBuffer" + tid_str, "insts",
177  params.executeInputBufferSize));
178 
179  /* Scoreboards */
180  scoreboard.push_back(Scoreboard(name_ + ".scoreboard" + tid_str));
181 
182  /* In-flight instruction records */
183  executeInfo[tid].inFlightInsts = new Queue<QueuedInst,
185  name_ + ".inFlightInsts" + tid_str, "insts", total_slots);
186 
187  executeInfo[tid].inFUMemInsts = new Queue<QueuedInst,
189  name_ + ".inFUMemInsts" + tid_str, "insts", total_slots);
190  }
191 }
192 
193 const ForwardInstData *
195 {
196  /* Get a line from the inputBuffer to work with */
197  if (!inputBuffer[tid].empty()) {
198  const ForwardInstData &head = inputBuffer[tid].front();
199 
200  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
201  } else {
202  return NULL;
203  }
204 }
205 
206 void
208 {
209  if (!inputBuffer[tid].empty())
210  inputBuffer[tid].pop();
211 
212  executeInfo[tid].inputIndex = 0;
213 }
214 
215 void
217 {
218  ThreadContext *thread = cpu.getContext(inst->id.threadId);
219  const TheISA::PCState &pc_before = inst->pc;
220  TheISA::PCState target = thread->pcState();
221 
222  /* Force a branch for SerializeAfter instructions at the end of micro-op
223  * sequence when we're not suspended */
224  bool force_branch = thread->status() != ThreadContext::Suspended &&
225  !inst->isFault() &&
226  inst->isLastOpInInst() &&
227  (inst->staticInst->isSerializeAfter() ||
228  inst->staticInst->isIprAccess());
229 
230  DPRINTF(Branch, "tryToBranch before: %s after: %s%s\n",
231  pc_before, target, (force_branch ? " (forcing)" : ""));
232 
233  /* Will we change the PC to something other than the next instruction? */
234  bool must_branch = pc_before != target ||
235  fault != NoFault ||
236  force_branch;
237 
238  /* The reason for the branch data we're about to generate, set below */
240 
241  if (fault == NoFault)
242  {
243  TheISA::advancePC(target, inst->staticInst);
244  thread->pcState(target);
245 
246  DPRINTF(Branch, "Advancing current PC from: %s to: %s\n",
247  pc_before, target);
248  }
249 
250  if (inst->predictedTaken && !force_branch) {
251  /* Predicted to branch */
252  if (!must_branch) {
253  /* No branch was taken, change stream to get us back to the
254  * intended PC value */
255  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x but"
256  " none happened inst: %s\n",
257  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
258 
260  } else if (inst->predictedTarget == target) {
261  /* Branch prediction got the right target, kill the branch and
262  * carry on.
263  * Note that this information to the branch predictor might get
264  * overwritten by a "real" branch during this cycle */
265  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x correctly"
266  " inst: %s\n",
267  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
268 
270  } else {
271  /* Branch prediction got the wrong target */
272  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x"
273  " but got the wrong target (actual: 0x%x) inst: %s\n",
274  inst->pc.instAddr(), inst->predictedTarget.instAddr(),
275  target.instAddr(), *inst);
276 
278  }
279  } else if (must_branch) {
280  /* Unpredicted branch */
281  DPRINTF(Branch, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
282  inst->pc.instAddr(), target.instAddr(), *inst);
283 
285  } else {
286  /* No branch at all */
287  reason = BranchData::NoBranch;
288  }
289 
290  updateBranchData(inst->id.threadId, reason, inst, target, branch);
291 }
292 
293 void
295  ThreadID tid,
296  BranchData::Reason reason,
297  MinorDynInstPtr inst, const TheISA::PCState &target,
298  BranchData &branch)
299 {
300  if (reason != BranchData::NoBranch) {
301  /* Bump up the stream sequence number on a real branch*/
302  if (BranchData::isStreamChange(reason))
303  executeInfo[tid].streamSeqNum++;
304 
305  /* Branches (even mis-predictions) don't change the predictionSeqNum,
306  * just the streamSeqNum */
307  branch = BranchData(reason, tid,
308  executeInfo[tid].streamSeqNum,
309  /* Maintaining predictionSeqNum if there's no inst is just a
310  * courtesy and looks better on minorview */
311  (inst->isBubble() ? executeInfo[tid].lastPredictionSeqNum
312  : inst->id.predictionSeqNum),
313  target, inst);
314 
315  DPRINTF(Branch, "Branch data signalled: %s\n", branch);
316  }
317 }
318 
319 void
321  LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
322 {
323  ThreadID thread_id = inst->id.threadId;
324  ThreadContext *thread = cpu.getContext(thread_id);
325 
326  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
327 
328  PacketPtr packet = response->packet;
329 
330  bool is_load = inst->staticInst->isLoad();
331  bool is_store = inst->staticInst->isStore();
332  bool is_prefetch = inst->staticInst->isDataPrefetch();
333 
334  /* If true, the trace's predicate value will be taken from the exec
335  * context predicate, otherwise, it will be set to false */
336  bool use_context_predicate = true;
337 
338  if (response->fault != NoFault) {
339  /* Invoke memory faults. */
340  DPRINTF(MinorMem, "Completing fault from DTLB access: %s\n",
341  response->fault->name());
342 
343  if (inst->staticInst->isPrefetch()) {
344  DPRINTF(MinorMem, "Not taking fault on prefetch: %s\n",
345  response->fault->name());
346 
347  /* Don't assign to fault */
348  } else {
349  /* Take the fault raised during the TLB/memory access */
350  fault = response->fault;
351 
352  fault->invoke(thread, inst->staticInst);
353  }
354  } else if (!packet) {
355  DPRINTF(MinorMem, "Completing failed request inst: %s\n",
356  *inst);
357  use_context_predicate = false;
358  } else if (packet->isError()) {
359  DPRINTF(MinorMem, "Trying to commit error response: %s\n",
360  *inst);
361 
362  fatal("Received error response packet for inst: %s\n", *inst);
363  } else if (is_store || is_load || is_prefetch) {
364  assert(packet);
365 
366  DPRINTF(MinorMem, "Memory response inst: %s addr: 0x%x size: %d\n",
367  *inst, packet->getAddr(), packet->getSize());
368 
369  if (is_load && packet->getSize() > 0) {
370  DPRINTF(MinorMem, "Memory data[0]: 0x%x\n",
371  static_cast<unsigned int>(packet->getConstPtr<uint8_t>()[0]));
372  }
373 
374  /* Complete the memory access instruction */
375  fault = inst->staticInst->completeAcc(packet, &context,
376  inst->traceData);
377 
378  if (fault != NoFault) {
379  /* Invoke fault created by instruction completion */
380  DPRINTF(MinorMem, "Fault in memory completeAcc: %s\n",
381  fault->name());
382  fault->invoke(thread, inst->staticInst);
383  } else {
384  /* Stores need to be pushed into the store buffer to finish
385  * them off */
386  if (response->needsToBeSentToStoreBuffer())
387  lsq.sendStoreToStoreBuffer(response);
388  }
389  } else {
390  fatal("There should only ever be reads, "
391  "writes or faults at this point\n");
392  }
393 
394  lsq.popResponse(response);
395 
396  if (inst->traceData) {
397  inst->traceData->setPredicate((use_context_predicate ?
398  context.readPredicate() : false));
399  }
400 
402 
403  /* Generate output to account for branches */
404  tryToBranch(inst, fault, branch);
405 }
406 
407 bool
409 {
410  return cpu.checkInterrupts(cpu.getContext(thread_id));
411 }
412 
413 bool
415 {
416  DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
417  cpu.getContext(thread_id)->pcState());
418 
419  Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt
420  (cpu.getContext(thread_id));
421 
422  if (interrupt != NoFault) {
423  /* The interrupt *must* set pcState */
424  cpu.getInterruptController(thread_id)->updateIntrInfo
425  (cpu.getContext(thread_id));
426  interrupt->invoke(cpu.getContext(thread_id));
427 
428  assert(!lsq.accessesInFlight());
429 
430  DPRINTF(MinorInterrupt, "Invoking interrupt: %s to PC: %s\n",
431  interrupt->name(), cpu.getContext(thread_id)->pcState());
432 
433  /* Assume that an interrupt *must* cause a branch. Assert this? */
434 
436  MinorDynInst::bubble(), cpu.getContext(thread_id)->pcState(),
437  branch);
438  }
439 
440  return interrupt != NoFault;
441 }
442 
443 bool
445  bool &passed_predicate, Fault &fault)
446 {
447  bool issued = false;
448 
449  /* Set to true if the mem op. is issued and sent to the mem system */
450  passed_predicate = false;
451 
452  if (!lsq.canRequest()) {
453  /* Not acting on instruction yet as the memory
454  * queues are full */
455  issued = false;
456  } else {
457  ThreadContext *thread = cpu.getContext(inst->id.threadId);
458  TheISA::PCState old_pc = thread->pcState();
459 
460  ExecContext context(cpu, *cpu.threads[inst->id.threadId],
461  *this, inst);
462 
463  DPRINTF(MinorExecute, "Initiating memRef inst: %s\n", *inst);
464 
465  Fault init_fault = inst->staticInst->initiateAcc(&context,
466  inst->traceData);
467 
468  if (init_fault != NoFault) {
469  DPRINTF(MinorExecute, "Fault on memory inst: %s"
470  " initiateAcc: %s\n", *inst, init_fault->name());
471  fault = init_fault;
472  } else {
473  /* Only set this if the instruction passed its
474  * predicate */
475  passed_predicate = context.readPredicate();
476 
477  /* Set predicate in tracing */
478  if (inst->traceData)
479  inst->traceData->setPredicate(passed_predicate);
480 
481  /* If the instruction didn't pass its predicate (and so will not
482  * progress from here) Try to branch to correct and branch
483  * mis-prediction. */
484  if (!passed_predicate) {
485  /* Leave it up to commit to handle the fault */
486  lsq.pushFailedRequest(inst);
487  }
488  }
489 
490  /* Restore thread PC */
491  thread->pcState(old_pc);
492  issued = true;
493  }
494 
495  return issued;
496 }
497 
499 inline unsigned int
500 cyclicIndexInc(unsigned int index, unsigned int cycle_size)
501 {
502  unsigned int ret = index + 1;
503 
504  if (ret == cycle_size)
505  ret = 0;
506 
507  return ret;
508 }
509 
511 inline unsigned int
512 cyclicIndexDec(unsigned int index, unsigned int cycle_size)
513 {
514  int ret = index - 1;
515 
516  if (ret < 0)
517  ret = cycle_size - 1;
518 
519  return ret;
520 }
521 
522 unsigned int
524 {
525  const ForwardInstData *insts_in = getInput(thread_id);
526  ExecuteThreadInfo &thread = executeInfo[thread_id];
527 
528  /* Early termination if we have no instructions */
529  if (!insts_in)
530  return 0;
531 
532  /* Start from the first FU */
533  unsigned int fu_index = 0;
534 
535  /* Remains true while instructions are still being issued. If any
536  * instruction fails to issue, this is set to false and we exit issue.
537  * This strictly enforces in-order issue. For other issue behaviours,
538  * a more complicated test in the outer while loop below is needed. */
539  bool issued = true;
540 
541  /* Number of insts issues this cycle to check for issueLimit */
542  unsigned num_insts_issued = 0;
543 
544  /* Number of memory ops issues this cycle to check for memoryIssueLimit */
545  unsigned num_mem_insts_issued = 0;
546 
547  /* Number of instructions discarded this cycle in order to enforce a
548  * discardLimit. @todo, add that parameter? */
549  unsigned num_insts_discarded = 0;
550 
551  do {
552  MinorDynInstPtr inst = insts_in->insts[thread.inputIndex];
553  Fault fault = inst->fault;
554  bool discarded = false;
555  bool issued_mem_ref = false;
556 
557  if (inst->isBubble()) {
558  /* Skip */
559  issued = true;
560  } else if (cpu.getContext(thread_id)->status() ==
562  {
563  DPRINTF(MinorExecute, "Discarding inst: %s from suspended"
564  " thread\n", *inst);
565 
566  issued = true;
567  discarded = true;
568  } else if (inst->id.streamSeqNum != thread.streamSeqNum) {
569  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
570  " state was unexpected, expected: %d\n",
571  *inst, thread.streamSeqNum);
572  issued = true;
573  discarded = true;
574  } else {
575  /* Try and issue an instruction into an FU, assume we didn't and
576  * fix that in the loop */
577  issued = false;
578 
579  /* Try FU from 0 each instruction */
580  fu_index = 0;
581 
582  /* Try and issue a single instruction stepping through the
583  * available FUs */
584  do {
585  FUPipeline *fu = funcUnits[fu_index];
586 
587  DPRINTF(MinorExecute, "Trying to issue inst: %s to FU: %d\n",
588  *inst, fu_index);
589 
590  /* Does the examined fu have the OpClass-related capability
591  * needed to execute this instruction? Faults can always
592  * issue to any FU but probably should just 'live' in the
593  * inFlightInsts queue rather than having an FU. */
594  bool fu_is_capable = (!inst->isFault() ?
595  fu->provides(inst->staticInst->opClass()) : true);
596 
597  if (inst->isNoCostInst()) {
598  /* Issue free insts. to a fake numbered FU */
599  fu_index = noCostFUIndex;
600 
601  /* And start the countdown on activity to allow
602  * this instruction to get to the end of its FU */
604 
605  /* Mark the destinations for this instruction as
606  * busy */
607  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
608  Cycles(0), cpu.getContext(thread_id), false);
609 
610  DPRINTF(MinorExecute, "Issuing %s to %d\n", inst->id, noCostFUIndex);
611  inst->fuIndex = noCostFUIndex;
612  inst->extraCommitDelay = Cycles(0);
613  inst->extraCommitDelayExpr = NULL;
614 
615  /* Push the instruction onto the inFlight queue so
616  * it can be committed in order */
617  QueuedInst fu_inst(inst);
618  thread.inFlightInsts->push(fu_inst);
619 
620  issued = true;
621 
622  } else if (!fu_is_capable || fu->alreadyPushed()) {
623  /* Skip */
624  if (!fu_is_capable) {
625  DPRINTF(MinorExecute, "Can't issue as FU: %d isn't"
626  " capable\n", fu_index);
627  } else {
628  DPRINTF(MinorExecute, "Can't issue as FU: %d is"
629  " already busy\n", fu_index);
630  }
631  } else if (fu->stalled) {
632  DPRINTF(MinorExecute, "Can't issue inst: %s into FU: %d,"
633  " it's stalled\n",
634  *inst, fu_index);
635  } else if (!fu->canInsert()) {
636  DPRINTF(MinorExecute, "Can't issue inst: %s to busy FU"
637  " for another: %d cycles\n",
638  *inst, fu->cyclesBeforeInsert());
639  } else {
640  MinorFUTiming *timing = (!inst->isFault() ?
641  fu->findTiming(inst->staticInst) : NULL);
642 
643  const std::vector<Cycles> *src_latencies =
644  (timing ? &(timing->srcRegsRelativeLats)
645  : NULL);
646 
647  const std::vector<bool> *cant_forward_from_fu_indices =
649 
650  if (timing && timing->suppress) {
651  DPRINTF(MinorExecute, "Can't issue inst: %s as extra"
652  " decoding is suppressing it\n",
653  *inst);
654  } else if (!scoreboard[thread_id].canInstIssue(inst,
655  src_latencies, cant_forward_from_fu_indices,
656  cpu.curCycle(), cpu.getContext(thread_id)))
657  {
658  DPRINTF(MinorExecute, "Can't issue inst: %s yet\n",
659  *inst);
660  } else {
661  /* Can insert the instruction into this FU */
662  DPRINTF(MinorExecute, "Issuing inst: %s"
663  " into FU %d\n", *inst,
664  fu_index);
665 
666  Cycles extra_dest_retire_lat = Cycles(0);
667  TimingExpr *extra_dest_retire_lat_expr = NULL;
668  Cycles extra_assumed_lat = Cycles(0);
669 
670  /* Add the extraCommitDelay and extraAssumeLat to
671  * the FU pipeline timings */
672  if (timing) {
673  extra_dest_retire_lat =
674  timing->extraCommitLat;
675  extra_dest_retire_lat_expr =
676  timing->extraCommitLatExpr;
677  extra_assumed_lat =
678  timing->extraAssumedLat;
679  }
680 
681  issued_mem_ref = inst->isMemRef();
682 
683  QueuedInst fu_inst(inst);
684 
685  /* Decorate the inst with FU details */
686  inst->fuIndex = fu_index;
687  inst->extraCommitDelay = extra_dest_retire_lat;
688  inst->extraCommitDelayExpr =
689  extra_dest_retire_lat_expr;
690 
691  if (issued_mem_ref) {
692  /* Remember which instruction this memory op
693  * depends on so that initiateAcc can be called
694  * early */
695  if (allowEarlyMemIssue) {
696  inst->instToWaitFor =
697  scoreboard[thread_id].execSeqNumToWaitFor(inst,
698  cpu.getContext(thread_id));
699 
700  if (lsq.getLastMemBarrier(thread_id) >
701  inst->instToWaitFor)
702  {
703  DPRINTF(MinorExecute, "A barrier will"
704  " cause a delay in mem ref issue of"
705  " inst: %s until after inst"
706  " %d(exec)\n", *inst,
707  lsq.getLastMemBarrier(thread_id));
708 
709  inst->instToWaitFor =
710  lsq.getLastMemBarrier(thread_id);
711  } else {
712  DPRINTF(MinorExecute, "Memory ref inst:"
713  " %s must wait for inst %d(exec)"
714  " before issuing\n",
715  *inst, inst->instToWaitFor);
716  }
717 
718  inst->canEarlyIssue = true;
719  }
720  /* Also queue this instruction in the memory ref
721  * queue to ensure in-order issue to the LSQ */
722  DPRINTF(MinorExecute, "Pushing mem inst: %s\n",
723  *inst);
724  thread.inFUMemInsts->push(fu_inst);
725  }
726 
727  /* Issue to FU */
728  fu->push(fu_inst);
729  /* And start the countdown on activity to allow
730  * this instruction to get to the end of its FU */
732 
733  /* Mark the destinations for this instruction as
734  * busy */
735  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
736  fu->description.opLat +
737  extra_dest_retire_lat +
738  extra_assumed_lat,
739  cpu.getContext(thread_id),
740  issued_mem_ref && extra_assumed_lat == Cycles(0));
741 
742  /* Push the instruction onto the inFlight queue so
743  * it can be committed in order */
744  thread.inFlightInsts->push(fu_inst);
745 
746  issued = true;
747  }
748  }
749 
750  fu_index++;
751  } while (fu_index != numFuncUnits && !issued);
752 
753  if (!issued)
754  DPRINTF(MinorExecute, "Didn't issue inst: %s\n", *inst);
755  }
756 
757  if (issued) {
758  /* Generate MinorTrace's MinorInst lines. Do this at commit
759  * to allow better instruction annotation? */
760  if (DTRACE(MinorTrace) && !inst->isBubble())
761  inst->minorTraceInst(*this);
762 
763  /* Mark up barriers in the LSQ */
764  if (!discarded && inst->isInst() &&
765  inst->staticInst->isMemBarrier())
766  {
767  DPRINTF(MinorMem, "Issuing memory barrier inst: %s\n", *inst);
769  }
770 
771  if (inst->traceData && setTraceTimeOnIssue) {
772  inst->traceData->setWhen(curTick());
773  }
774 
775  if (issued_mem_ref)
776  num_mem_insts_issued++;
777 
778  if (discarded) {
779  num_insts_discarded++;
780  } else if (!inst->isBubble()) {
781  num_insts_issued++;
782 
783  if (num_insts_issued == issueLimit)
784  DPRINTF(MinorExecute, "Reached inst issue limit\n");
785  }
786 
787  thread.inputIndex++;
788  DPRINTF(MinorExecute, "Stepping to next inst inputIndex: %d\n",
789  thread.inputIndex);
790  }
791 
792  /* Got to the end of a line */
793  if (thread.inputIndex == insts_in->width()) {
794  popInput(thread_id);
795  /* Set insts_in to null to force us to leave the surrounding
796  * loop */
797  insts_in = NULL;
798 
800  DPRINTF(MinorExecute, "Wrapping\n");
801  insts_in = getInput(thread_id);
802  }
803  }
804  } while (insts_in && thread.inputIndex < insts_in->width() &&
805  /* We still have instructions */
806  fu_index != numFuncUnits && /* Not visited all FUs */
807  issued && /* We've not yet failed to issue an instruction */
808  num_insts_issued != issueLimit && /* Still allowed to issue */
809  num_mem_insts_issued != memoryIssueLimit);
810 
811  return num_insts_issued;
812 }
813 
814 bool
816 {
817  ThreadContext *thread = cpu.getContext(thread_id);
818  unsigned int num_pc_event_checks = 0;
819 
820  /* Handle PC events on instructions */
821  Addr oldPC;
822  do {
823  oldPC = thread->instAddr();
824  cpu.system->pcEventQueue.service(thread);
825  num_pc_event_checks++;
826  } while (oldPC != thread->instAddr());
827 
828  if (num_pc_event_checks > 1) {
829  DPRINTF(PCEvent, "Acting on PC Event to PC: %s\n",
830  thread->pcState());
831  }
832 
833  return num_pc_event_checks > 1;
834 }
835 
836 void
838 {
839  assert(!inst->isFault());
840 
841  MinorThread *thread = cpu.threads[inst->id.threadId];
842 
843  /* Increment the many and various inst and op counts in the
844  * thread and system */
845  if (!inst->staticInst->isMicroop() || inst->staticInst->isLastMicroop())
846  {
847  thread->numInst++;
848  thread->numInsts++;
849  cpu.stats.numInsts++;
850  cpu.system->totalNumInsts++;
851 
852  /* Act on events related to instruction counts */
853  cpu.comInstEventQueue[inst->id.threadId]->serviceEvents(thread->numInst);
854  cpu.system->instEventQueue.serviceEvents(cpu.system->totalNumInsts);
855  }
856  thread->numOp++;
857  thread->numOps++;
858  cpu.stats.numOps++;
859  cpu.stats.committedInstType[inst->id.threadId]
860  [inst->staticInst->opClass()]++;
861 
862  /* Set the CP SeqNum to the numOps commit number */
863  if (inst->traceData)
864  inst->traceData->setCPSeq(thread->numOp);
865 
866  cpu.probeInstCommit(inst->staticInst);
867 }
868 
869 bool
870 Execute::commitInst(MinorDynInstPtr inst, bool early_memory_issue,
871  BranchData &branch, Fault &fault, bool &committed,
872  bool &completed_mem_issue)
873 {
874  ThreadID thread_id = inst->id.threadId;
875  ThreadContext *thread = cpu.getContext(thread_id);
876 
877  bool completed_inst = true;
878  fault = NoFault;
879 
880  /* Is the thread for this instruction suspended? In that case, just
881  * stall as long as there are no pending interrupts */
882  if (thread->status() == ThreadContext::Suspended &&
883  !isInterrupted(thread_id))
884  {
885  panic("We should never hit the case where we try to commit from a "
886  "suspended thread as the streamSeqNum should not match");
887  } else if (inst->isFault()) {
888  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
889 
890  DPRINTF(MinorExecute, "Fault inst reached Execute: %s\n",
891  inst->fault->name());
892 
893  fault = inst->fault;
894  inst->fault->invoke(thread, NULL);
895 
896  tryToBranch(inst, fault, branch);
897  } else if (inst->staticInst->isMemRef()) {
898  /* Memory accesses are executed in two parts:
899  * executeMemRefInst -- calculates the EA and issues the access
900  * to memory. This is done here.
901  * handleMemResponse -- handles the response packet, done by
902  * Execute::commit
903  *
904  * While the memory access is in its FU, the EA is being
905  * calculated. At the end of the FU, when it is ready to
906  * 'commit' (in this function), the access is presented to the
907  * memory queues. When a response comes back from memory,
908  * Execute::commit will commit it.
909  */
910  bool predicate_passed = false;
911  bool completed_mem_inst = executeMemRefInst(inst, branch,
912  predicate_passed, fault);
913 
914  if (completed_mem_inst && fault != NoFault) {
915  if (early_memory_issue) {
916  DPRINTF(MinorExecute, "Fault in early executing inst: %s\n",
917  fault->name());
918  /* Don't execute the fault, just stall the instruction
919  * until it gets to the head of inFlightInsts */
920  inst->canEarlyIssue = false;
921  /* Not completed as we'll come here again to pick up
922  * the fault when we get to the end of the FU */
923  completed_inst = false;
924  } else {
925  DPRINTF(MinorExecute, "Fault in execute: %s\n",
926  fault->name());
927  fault->invoke(thread, NULL);
928 
929  tryToBranch(inst, fault, branch);
930  completed_inst = true;
931  }
932  } else {
933  completed_inst = completed_mem_inst;
934  }
935  completed_mem_issue = completed_inst;
936  } else if (inst->isInst() && inst->staticInst->isMemBarrier() &&
938  {
939  DPRINTF(MinorExecute, "Can't commit data barrier inst: %s yet as"
940  " there isn't space in the store buffer\n", *inst);
941 
942  completed_inst = false;
943  } else if (inst->isInst() && inst->staticInst->isQuiesce()
944  && !branch.isBubble()){
945  /* This instruction can suspend, need to be able to communicate
946  * backwards, so no other branches may evaluate this cycle*/
947  completed_inst = false;
948  } else {
949  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
950 
951  DPRINTF(MinorExecute, "Committing inst: %s\n", *inst);
952 
953  fault = inst->staticInst->execute(&context,
954  inst->traceData);
955 
956  /* Set the predicate for tracing and dump */
957  if (inst->traceData)
958  inst->traceData->setPredicate(context.readPredicate());
959 
960  committed = true;
961 
962  if (fault != NoFault) {
963  DPRINTF(MinorExecute, "Fault in execute of inst: %s fault: %s\n",
964  *inst, fault->name());
965  fault->invoke(thread, inst->staticInst);
966  }
967 
969  tryToBranch(inst, fault, branch);
970  }
971 
972  if (completed_inst) {
973  /* Keep a copy of this instruction's predictionSeqNum just in case
974  * we need to issue a branch without an instruction (such as an
975  * interrupt) */
976  executeInfo[thread_id].lastPredictionSeqNum = inst->id.predictionSeqNum;
977 
978  /* Check to see if this instruction suspended the current thread. */
979  if (!inst->isFault() &&
980  thread->status() == ThreadContext::Suspended &&
981  branch.isBubble() && /* It didn't branch too */
982  !isInterrupted(thread_id)) /* Don't suspend if we have
983  interrupts */
984  {
985  TheISA::PCState resume_pc = cpu.getContext(thread_id)->pcState();
986 
987  assert(resume_pc.microPC() == 0);
988 
989  DPRINTF(MinorInterrupt, "Suspending thread: %d from Execute"
990  " inst: %s\n", thread_id, *inst);
991 
993 
995  resume_pc, branch);
996  }
997  }
998 
999  return completed_inst;
1000 }
1001 
1002 void
1003 Execute::commit(ThreadID thread_id, bool only_commit_microops, bool discard,
1004  BranchData &branch)
1005 {
1006  Fault fault = NoFault;
1007  Cycles now = cpu.curCycle();
1008  ExecuteThreadInfo &ex_info = executeInfo[thread_id];
1009 
1035  /* Has an instruction been completed? Once this becomes false, we stop
1036  * trying to complete instructions. */
1037  bool completed_inst = true;
1038 
1039  /* Number of insts committed this cycle to check against commitLimit */
1040  unsigned int num_insts_committed = 0;
1041 
1042  /* Number of memory access instructions committed to check against
1043  * memCommitLimit */
1044  unsigned int num_mem_refs_committed = 0;
1045 
1046  if (only_commit_microops && !ex_info.inFlightInsts->empty()) {
1047  DPRINTF(MinorInterrupt, "Only commit microops %s %d\n",
1048  *(ex_info.inFlightInsts->front().inst),
1049  ex_info.lastCommitWasEndOfMacroop);
1050  }
1051 
1052  while (!ex_info.inFlightInsts->empty() && /* Some more instructions to process */
1053  !branch.isStreamChange() && /* No real branch */
1054  fault == NoFault && /* No faults */
1055  completed_inst && /* Still finding instructions to execute */
1056  num_insts_committed != commitLimit /* Not reached commit limit */
1057  )
1058  {
1059  if (only_commit_microops) {
1060  DPRINTF(MinorInterrupt, "Committing tail of insts before"
1061  " interrupt: %s\n",
1062  *(ex_info.inFlightInsts->front().inst));
1063  }
1064 
1065  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1066 
1067  InstSeqNum head_exec_seq_num =
1068  head_inflight_inst->inst->id.execSeqNum;
1069 
1070  /* The instruction we actually process if completed_inst
1071  * remains true to the end of the loop body.
1072  * Start by considering the the head of the in flight insts queue */
1073  MinorDynInstPtr inst = head_inflight_inst->inst;
1074 
1075  bool committed_inst = false;
1076  bool discard_inst = false;
1077  bool completed_mem_ref = false;
1078  bool issued_mem_ref = false;
1079  bool early_memory_issue = false;
1080 
1081  /* Must set this again to go around the loop */
1082  completed_inst = false;
1083 
1084  /* If we're just completing a macroop before an interrupt or drain,
1085  * can we stil commit another microop (rather than a memory response)
1086  * without crosing into the next full instruction? */
1087  bool can_commit_insts = !ex_info.inFlightInsts->empty() &&
1088  !(only_commit_microops && ex_info.lastCommitWasEndOfMacroop);
1089 
1090  /* Can we find a mem response for this inst */
1091  LSQ::LSQRequestPtr mem_response =
1092  (inst->inLSQ ? lsq.findResponse(inst) : NULL);
1093 
1094  DPRINTF(MinorExecute, "Trying to commit canCommitInsts: %d\n",
1095  can_commit_insts);
1096 
1097  /* Test for PC events after every instruction */
1098  if (isInbetweenInsts(thread_id) && tryPCEvents(thread_id)) {
1099  ThreadContext *thread = cpu.getContext(thread_id);
1100 
1101  /* Branch as there was a change in PC */
1103  MinorDynInst::bubble(), thread->pcState(), branch);
1104  } else if (mem_response &&
1105  num_mem_refs_committed < memoryCommitLimit)
1106  {
1107  /* Try to commit from the memory responses next */
1108  discard_inst = inst->id.streamSeqNum !=
1109  ex_info.streamSeqNum || discard;
1110 
1111  DPRINTF(MinorExecute, "Trying to commit mem response: %s\n",
1112  *inst);
1113 
1114  /* Complete or discard the response */
1115  if (discard_inst) {
1116  DPRINTF(MinorExecute, "Discarding mem inst: %s as its"
1117  " stream state was unexpected, expected: %d\n",
1118  *inst, ex_info.streamSeqNum);
1119 
1120  lsq.popResponse(mem_response);
1121  } else {
1122  handleMemResponse(inst, mem_response, branch, fault);
1123  committed_inst = true;
1124  }
1125 
1126  completed_mem_ref = true;
1127  completed_inst = true;
1128  } else if (can_commit_insts) {
1129  /* If true, this instruction will, subject to timing tweaks,
1130  * be considered for completion. try_to_commit flattens
1131  * the `if' tree a bit and allows other tests for inst
1132  * commit to be inserted here. */
1133  bool try_to_commit = false;
1134 
1135  /* Try and issue memory ops early if they:
1136  * - Can push a request into the LSQ
1137  * - Have reached the end of their FUs
1138  * - Have had all their dependencies satisfied
1139  * - Are from the right stream
1140  *
1141  * For any other case, leave it to the normal instruction
1142  * issue below to handle them.
1143  */
1144  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1145  DPRINTF(MinorExecute, "Trying to commit from mem FUs\n");
1146 
1147  const MinorDynInstPtr head_mem_ref_inst =
1148  ex_info.inFUMemInsts->front().inst;
1149  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1150  const MinorDynInstPtr &fu_inst = fu->front().inst;
1151 
1152  /* Use this, possibly out of order, inst as the one
1153  * to 'commit'/send to the LSQ */
1154  if (!fu_inst->isBubble() &&
1155  !fu_inst->inLSQ &&
1156  fu_inst->canEarlyIssue &&
1157  ex_info.streamSeqNum == fu_inst->id.streamSeqNum &&
1158  head_exec_seq_num > fu_inst->instToWaitFor)
1159  {
1160  DPRINTF(MinorExecute, "Issuing mem ref early"
1161  " inst: %s instToWaitFor: %d\n",
1162  *(fu_inst), fu_inst->instToWaitFor);
1163 
1164  inst = fu_inst;
1165  try_to_commit = true;
1166  early_memory_issue = true;
1167  completed_inst = true;
1168  }
1169  }
1170 
1171  /* Try and commit FU-less insts */
1172  if (!completed_inst && inst->isNoCostInst()) {
1173  DPRINTF(MinorExecute, "Committing no cost inst: %s", *inst);
1174 
1175  try_to_commit = true;
1176  completed_inst = true;
1177  }
1178 
1179  /* Try to issue from the ends of FUs and the inFlightInsts
1180  * queue */
1181  if (!completed_inst && !inst->inLSQ) {
1182  DPRINTF(MinorExecute, "Trying to commit from FUs\n");
1183 
1184  /* Try to commit from a functional unit */
1185  /* Is the head inst of the expected inst's FU actually the
1186  * expected inst? */
1187  QueuedInst &fu_inst =
1188  funcUnits[inst->fuIndex]->front();
1189  InstSeqNum fu_inst_seq_num = fu_inst.inst->id.execSeqNum;
1190 
1191  if (fu_inst.inst->isBubble()) {
1192  /* No instruction ready */
1193  completed_inst = false;
1194  } else if (fu_inst_seq_num != head_exec_seq_num) {
1195  /* Past instruction: we must have already executed it
1196  * in the same cycle and so the head inst isn't
1197  * actually at the end of its pipeline
1198  * Future instruction: handled above and only for
1199  * mem refs on their way to the LSQ */
1200  } else if (fu_inst.inst->id == inst->id) {
1201  /* All instructions can be committed if they have the
1202  * right execSeqNum and there are no in-flight
1203  * mem insts before us */
1204  try_to_commit = true;
1205  completed_inst = true;
1206  }
1207  }
1208 
1209  if (try_to_commit) {
1210  discard_inst = inst->id.streamSeqNum !=
1211  ex_info.streamSeqNum || discard;
1212 
1213  /* Is this instruction discardable as its streamSeqNum
1214  * doesn't match? */
1215  if (!discard_inst) {
1216  /* Try to commit or discard a non-memory instruction.
1217  * Memory ops are actually 'committed' from this FUs
1218  * and 'issued' into the memory system so we need to
1219  * account for them later (commit_was_mem_issue gets
1220  * set) */
1221  if (inst->extraCommitDelayExpr) {
1222  DPRINTF(MinorExecute, "Evaluating expression for"
1223  " extra commit delay inst: %s\n", *inst);
1224 
1225  ThreadContext *thread = cpu.getContext(thread_id);
1226 
1227  TimingExprEvalContext context(inst->staticInst,
1228  thread, NULL);
1229 
1230  uint64_t extra_delay = inst->extraCommitDelayExpr->
1231  eval(context);
1232 
1233  DPRINTF(MinorExecute, "Extra commit delay expr"
1234  " result: %d\n", extra_delay);
1235 
1236  if (extra_delay < 128) {
1237  inst->extraCommitDelay += Cycles(extra_delay);
1238  } else {
1239  DPRINTF(MinorExecute, "Extra commit delay was"
1240  " very long: %d\n", extra_delay);
1241  }
1242  inst->extraCommitDelayExpr = NULL;
1243  }
1244 
1245  /* Move the extraCommitDelay from the instruction
1246  * into the minimumCommitCycle */
1247  if (inst->extraCommitDelay != Cycles(0)) {
1248  inst->minimumCommitCycle = cpu.curCycle() +
1249  inst->extraCommitDelay;
1250  inst->extraCommitDelay = Cycles(0);
1251  }
1252 
1253  /* @todo Think about making lastMemBarrier be
1254  * MAX_UINT_64 to avoid using 0 as a marker value */
1255  if (!inst->isFault() && inst->isMemRef() &&
1256  lsq.getLastMemBarrier(thread_id) <
1257  inst->id.execSeqNum &&
1258  lsq.getLastMemBarrier(thread_id) != 0)
1259  {
1260  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1261  " as there are incomplete barriers in flight\n",
1262  *inst);
1263  completed_inst = false;
1264  } else if (inst->minimumCommitCycle > now) {
1265  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1266  " as it wants to be stalled for %d more cycles\n",
1267  *inst, inst->minimumCommitCycle - now);
1268  completed_inst = false;
1269  } else {
1270  completed_inst = commitInst(inst,
1271  early_memory_issue, branch, fault,
1272  committed_inst, issued_mem_ref);
1273  }
1274  } else {
1275  /* Discard instruction */
1276  completed_inst = true;
1277  }
1278 
1279  if (completed_inst) {
1280  /* Allow the pipeline to advance. If the FU head
1281  * instruction wasn't the inFlightInsts head
1282  * but had already been committed, it would have
1283  * unstalled the pipeline before here */
1284  if (inst->fuIndex != noCostFUIndex) {
1285  DPRINTF(MinorExecute, "Unstalling %d for inst %s\n", inst->fuIndex, inst->id);
1286  funcUnits[inst->fuIndex]->stalled = false;
1287  }
1288  }
1289  }
1290  } else {
1291  DPRINTF(MinorExecute, "No instructions to commit\n");
1292  completed_inst = false;
1293  }
1294 
1295  /* All discardable instructions must also be 'completed' by now */
1296  assert(!(discard_inst && !completed_inst));
1297 
1298  /* Instruction committed but was discarded due to streamSeqNum
1299  * mismatch */
1300  if (discard_inst) {
1301  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
1302  " state was unexpected, expected: %d\n",
1303  *inst, ex_info.streamSeqNum);
1304 
1305  if (fault == NoFault)
1307  }
1308 
1309  /* Mark the mem inst as being in the LSQ */
1310  if (issued_mem_ref) {
1311  inst->fuIndex = 0;
1312  inst->inLSQ = true;
1313  }
1314 
1315  /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1316  * as they've *definitely* exited the FUs */
1317  if (completed_inst && inst->isMemRef()) {
1318  /* The MemRef could have been discarded from the FU or the memory
1319  * queue, so just check an FU instruction */
1320  if (!ex_info.inFUMemInsts->empty() &&
1321  ex_info.inFUMemInsts->front().inst == inst)
1322  {
1323  ex_info.inFUMemInsts->pop();
1324  }
1325  }
1326 
1327  if (completed_inst && !(issued_mem_ref && fault == NoFault)) {
1328  /* Note that this includes discarded insts */
1329  DPRINTF(MinorExecute, "Completed inst: %s\n", *inst);
1330 
1331  /* Got to the end of a full instruction? */
1332  ex_info.lastCommitWasEndOfMacroop = inst->isFault() ||
1333  inst->isLastOpInInst();
1334 
1335  /* lastPredictionSeqNum is kept as a convenience to prevent its
1336  * value from changing too much on the minorview display */
1337  ex_info.lastPredictionSeqNum = inst->id.predictionSeqNum;
1338 
1339  /* Finished with the inst, remove it from the inst queue and
1340  * clear its dependencies */
1341  ex_info.inFlightInsts->pop();
1342 
1343  /* Complete barriers in the LSQ/move to store buffer */
1344  if (inst->isInst() && inst->staticInst->isMemBarrier()) {
1345  DPRINTF(MinorMem, "Completing memory barrier"
1346  " inst: %s committed: %d\n", *inst, committed_inst);
1347  lsq.completeMemBarrierInst(inst, committed_inst);
1348  }
1349 
1350  scoreboard[thread_id].clearInstDests(inst, inst->isMemRef());
1351  }
1352 
1353  /* Handle per-cycle instruction counting */
1354  if (committed_inst) {
1355  bool is_no_cost_inst = inst->isNoCostInst();
1356 
1357  /* Don't show no cost instructions as having taken a commit
1358  * slot */
1359  if (DTRACE(MinorTrace) && !is_no_cost_inst)
1360  ex_info.instsBeingCommitted.insts[num_insts_committed] = inst;
1361 
1362  if (!is_no_cost_inst)
1363  num_insts_committed++;
1364 
1365  if (num_insts_committed == commitLimit)
1366  DPRINTF(MinorExecute, "Reached inst commit limit\n");
1367 
1368  /* Re-set the time of the instruction if that's required for
1369  * tracing */
1370  if (inst->traceData) {
1372  inst->traceData->setWhen(curTick());
1373  inst->traceData->dump();
1374  }
1375 
1376  if (completed_mem_ref)
1377  num_mem_refs_committed++;
1378 
1379  if (num_mem_refs_committed == memoryCommitLimit)
1380  DPRINTF(MinorExecute, "Reached mem ref commit limit\n");
1381  }
1382  }
1383 }
1384 
1385 bool
1387 {
1388  return executeInfo[thread_id].lastCommitWasEndOfMacroop &&
1389  !lsq.accessesInFlight();
1390 }
1391 
1392 void
1394 {
1395  if (!inp.outputWire->isBubble())
1396  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
1397 
1398  BranchData &branch = *out.inputWire;
1399 
1400  unsigned int num_issued = 0;
1401 
1402  /* Do all the cycle-wise activities for dcachePort here to potentially
1403  * free up input spaces in the LSQ's requests queue */
1404  lsq.step();
1405 
1406  /* Check interrupts first. Will halt commit if interrupt found */
1407  bool interrupted = false;
1408  ThreadID interrupt_tid = checkInterrupts(branch, interrupted);
1409 
1410  if (interrupt_tid != InvalidThreadID) {
1411  /* Signalling an interrupt this cycle, not issuing/committing from
1412  * any other threads */
1413  } else if (!branch.isBubble()) {
1414  /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1415  * without overwriting them */
1416  DPRINTF(MinorInterrupt, "Execute skipping a cycle to allow old"
1417  " branch to complete\n");
1418  } else {
1419  ThreadID commit_tid = getCommittingThread();
1420 
1421  if (commit_tid != InvalidThreadID) {
1422  ExecuteThreadInfo& commit_info = executeInfo[commit_tid];
1423 
1424  DPRINTF(MinorExecute, "Attempting to commit [tid:%d]\n",
1425  commit_tid);
1426  /* commit can set stalled flags observable to issue and so *must* be
1427  * called first */
1428  if (commit_info.drainState != NotDraining) {
1429  if (commit_info.drainState == DrainCurrentInst) {
1430  /* Commit only micro-ops, don't kill anything else */
1431  commit(commit_tid, true, false, branch);
1432 
1433  if (isInbetweenInsts(commit_tid))
1434  setDrainState(commit_tid, DrainHaltFetch);
1435 
1436  /* Discard any generated branch */
1437  branch = BranchData::bubble();
1438  } else if (commit_info.drainState == DrainAllInsts) {
1439  /* Kill all instructions */
1440  while (getInput(commit_tid))
1441  popInput(commit_tid);
1442  commit(commit_tid, false, true, branch);
1443  }
1444  } else {
1445  /* Commit micro-ops only if interrupted. Otherwise, commit
1446  * anything you like */
1447  DPRINTF(MinorExecute, "Committing micro-ops for interrupt[tid:%d]\n",
1448  commit_tid);
1449  bool only_commit_microops = interrupted &&
1450  hasInterrupt(commit_tid);
1451  commit(commit_tid, only_commit_microops, false, branch);
1452  }
1453 
1454  /* Halt fetch, but don't do it until we have the current instruction in
1455  * the bag */
1456  if (commit_info.drainState == DrainHaltFetch) {
1458  MinorDynInst::bubble(), TheISA::PCState(0), branch);
1459 
1461  setDrainState(commit_tid, DrainAllInsts);
1462  }
1463  }
1464  ThreadID issue_tid = getIssuingThread();
1465  /* This will issue merrily even when interrupted in the sure and
1466  * certain knowledge that the interrupt with change the stream */
1467  if (issue_tid != InvalidThreadID) {
1468  DPRINTF(MinorExecute, "Attempting to issue [tid:%d]\n",
1469  issue_tid);
1470  num_issued = issue(issue_tid);
1471  }
1472 
1473  }
1474 
1475  /* Run logic to step functional units + decide if we are active on the next
1476  * clock cycle */
1477  std::vector<MinorDynInstPtr> next_issuable_insts;
1478  bool can_issue_next = false;
1479 
1480  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1481  /* Find the next issuable instruction for each thread and see if it can
1482  be issued */
1483  if (getInput(tid)) {
1484  unsigned int input_index = executeInfo[tid].inputIndex;
1485  MinorDynInstPtr inst = getInput(tid)->insts[input_index];
1486  if (inst->isFault()) {
1487  can_issue_next = true;
1488  } else if (!inst->isBubble()) {
1489  next_issuable_insts.push_back(inst);
1490  }
1491  }
1492  }
1493 
1494  bool becoming_stalled = true;
1495 
1496  /* Advance the pipelines and note whether they still need to be
1497  * advanced */
1498  for (unsigned int i = 0; i < numFuncUnits; i++) {
1499  FUPipeline *fu = funcUnits[i];
1500  fu->advance();
1501 
1502  /* If we need to tick again, the pipeline will have been left or set
1503  * to be unstalled */
1504  if (fu->occupancy !=0 && !fu->stalled)
1505  becoming_stalled = false;
1506 
1507  /* Could we possibly issue the next instruction from any thread?
1508  * This is quite an expensive test and is only used to determine
1509  * if the CPU should remain active, only run it if we aren't sure
1510  * we are active next cycle yet */
1511  for (auto inst : next_issuable_insts) {
1512  if (!fu->stalled && fu->provides(inst->staticInst->opClass()) &&
1513  scoreboard[inst->id.threadId].canInstIssue(inst,
1514  NULL, NULL, cpu.curCycle() + Cycles(1),
1515  cpu.getContext(inst->id.threadId))) {
1516  can_issue_next = true;
1517  break;
1518  }
1519  }
1520  }
1521 
1522  bool head_inst_might_commit = false;
1523 
1524  /* Could the head in flight insts be committed */
1525  for (auto const &info : executeInfo) {
1526  if (!info.inFlightInsts->empty()) {
1527  const QueuedInst &head_inst = info.inFlightInsts->front();
1528 
1529  if (head_inst.inst->isNoCostInst()) {
1530  head_inst_might_commit = true;
1531  } else {
1532  FUPipeline *fu = funcUnits[head_inst.inst->fuIndex];
1533  if ((fu->stalled &&
1534  fu->front().inst->id == head_inst.inst->id) ||
1535  lsq.findResponse(head_inst.inst))
1536  {
1537  head_inst_might_commit = true;
1538  break;
1539  }
1540  }
1541  }
1542  }
1543 
1544  DPRINTF(Activity, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1545  (num_issued != 0 ? " (issued some insts)" : ""),
1546  (becoming_stalled ? "(becoming stalled)" : "(not becoming stalled)"),
1547  (can_issue_next ? " (can issued next inst)" : ""),
1548  (head_inst_might_commit ? "(head inst might commit)" : ""),
1549  (lsq.needsToTick() ? " (LSQ needs to tick)" : ""),
1550  (interrupted ? " (interrupted)" : ""));
1551 
1552  bool need_to_tick =
1553  num_issued != 0 || /* Issued some insts this cycle */
1554  !becoming_stalled || /* Some FU pipelines can still move */
1555  can_issue_next || /* Can still issue a new inst */
1556  head_inst_might_commit || /* Could possible commit the next inst */
1557  lsq.needsToTick() || /* Must step the dcache port */
1558  interrupted; /* There are pending interrupts */
1559 
1560  if (!need_to_tick) {
1561  DPRINTF(Activity, "The next cycle might be skippable as there are no"
1562  " advanceable FUs\n");
1563  }
1564 
1565  /* Wake up if we need to tick again */
1566  if (need_to_tick)
1568 
1569  /* Note activity of following buffer */
1570  if (!branch.isBubble())
1572 
1573  /* Make sure the input (if any left) is pushed */
1574  if (!inp.outputWire->isBubble())
1575  inputBuffer[inp.outputWire->threadId].pushTail();
1576 }
1577 
1578 ThreadID
1579 Execute::checkInterrupts(BranchData& branch, bool& interrupted)
1580 {
1582  /* Evaluate interrupts in round-robin based upon service */
1583  do {
1584  /* Has an interrupt been signalled? This may not be acted on
1585  * straighaway so this is different from took_interrupt */
1586  bool thread_interrupted = false;
1587 
1588  if (FullSystem && cpu.getInterruptController(tid)) {
1589  /* This is here because it seems that after drainResume the
1590  * interrupt controller isn't always set */
1591  thread_interrupted = executeInfo[tid].drainState == NotDraining &&
1592  isInterrupted(tid);
1593  interrupted = interrupted || thread_interrupted;
1594  } else {
1595  DPRINTF(MinorInterrupt, "No interrupt controller\n");
1596  }
1597  DPRINTF(MinorInterrupt, "[tid:%d] thread_interrupted?=%d isInbetweenInsts?=%d\n",
1598  tid, thread_interrupted, isInbetweenInsts(tid));
1599  /* Act on interrupts */
1600  if (thread_interrupted && isInbetweenInsts(tid)) {
1601  if (takeInterrupt(tid, branch)) {
1602  interruptPriority = tid;
1603  return tid;
1604  }
1605  } else {
1606  tid = (tid + 1) % cpu.numThreads;
1607  }
1608  } while (tid != interruptPriority);
1609 
1610  return InvalidThreadID;
1611 }
1612 
1613 bool
1615 {
1616  if (FullSystem && cpu.getInterruptController(thread_id)) {
1617  return executeInfo[thread_id].drainState == NotDraining &&
1618  isInterrupted(thread_id);
1619  }
1620 
1621  return false;
1622 }
1623 
1624 void
1626 {
1627  std::ostringstream insts;
1628  std::ostringstream stalled;
1629 
1630  executeInfo[0].instsBeingCommitted.reportData(insts);
1631  lsq.minorTrace();
1632  inputBuffer[0].minorTrace();
1633  scoreboard[0].minorTrace();
1634 
1635  /* Report functional unit stalling in one string */
1636  unsigned int i = 0;
1637  while (i < numFuncUnits)
1638  {
1639  stalled << (funcUnits[i]->stalled ? '1' : 'E');
1640  i++;
1641  if (i != numFuncUnits)
1642  stalled << ',';
1643  }
1644 
1645  MINORTRACE("insts=%s inputIndex=%d streamSeqNum=%d"
1646  " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1647  insts.str(), executeInfo[0].inputIndex, executeInfo[0].streamSeqNum,
1648  stalled.str(), executeInfo[0].drainState, isInbetweenInsts(0));
1649 
1650  std::for_each(funcUnits.begin(), funcUnits.end(),
1651  std::mem_fun(&FUPipeline::minorTrace));
1652 
1653  executeInfo[0].inFlightInsts->minorTrace();
1654  executeInfo[0].inFUMemInsts->minorTrace();
1655 }
1656 
1657 inline ThreadID
1659 {
1660  std::vector<ThreadID> priority_list;
1661 
1662  switch (cpu.threadPolicy) {
1663  case Enums::SingleThreaded:
1664  return 0;
1665  case Enums::RoundRobin:
1666  priority_list = cpu.roundRobinPriority(commitPriority);
1667  break;
1668  case Enums::Random:
1669  priority_list = cpu.randomPriority();
1670  break;
1671  default:
1672  panic("Invalid thread policy");
1673  }
1674 
1675  for (auto tid : priority_list) {
1676  ExecuteThreadInfo &ex_info = executeInfo[tid];
1677  bool can_commit_insts = !ex_info.inFlightInsts->empty();
1678  if (can_commit_insts) {
1679  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1680  MinorDynInstPtr inst = head_inflight_inst->inst;
1681 
1682  can_commit_insts = can_commit_insts &&
1683  (!inst->inLSQ || (lsq.findResponse(inst) != NULL));
1684 
1685  if (!inst->inLSQ) {
1686  bool can_transfer_mem_inst = false;
1687  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1688  const MinorDynInstPtr head_mem_ref_inst =
1689  ex_info.inFUMemInsts->front().inst;
1690  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1691  const MinorDynInstPtr &fu_inst = fu->front().inst;
1692  can_transfer_mem_inst =
1693  !fu_inst->isBubble() &&
1694  fu_inst->id.threadId == tid &&
1695  !fu_inst->inLSQ &&
1696  fu_inst->canEarlyIssue &&
1697  inst->id.execSeqNum > fu_inst->instToWaitFor;
1698  }
1699 
1700  bool can_execute_fu_inst = inst->fuIndex == noCostFUIndex;
1701  if (can_commit_insts && !can_transfer_mem_inst &&
1702  inst->fuIndex != noCostFUIndex)
1703  {
1704  QueuedInst& fu_inst = funcUnits[inst->fuIndex]->front();
1705  can_execute_fu_inst = !fu_inst.inst->isBubble() &&
1706  fu_inst.inst->id == inst->id;
1707  }
1708 
1709  can_commit_insts = can_commit_insts &&
1710  (can_transfer_mem_inst || can_execute_fu_inst);
1711  }
1712  }
1713 
1714 
1715  if (can_commit_insts) {
1716  commitPriority = tid;
1717  return tid;
1718  }
1719  }
1720 
1721  return InvalidThreadID;
1722 }
1723 
1724 inline ThreadID
1726 {
1727  std::vector<ThreadID> priority_list;
1728 
1729  switch (cpu.threadPolicy) {
1730  case Enums::SingleThreaded:
1731  return 0;
1732  case Enums::RoundRobin:
1733  priority_list = cpu.roundRobinPriority(issuePriority);
1734  break;
1735  case Enums::Random:
1736  priority_list = cpu.randomPriority();
1737  break;
1738  default:
1739  panic("Invalid thread scheduling policy.");
1740  }
1741 
1742  for (auto tid : priority_list) {
1743  if (getInput(tid)) {
1744  issuePriority = tid;
1745  return tid;
1746  }
1747  }
1748 
1749  return InvalidThreadID;
1750 }
1751 
1752 void
1754 {
1755  DPRINTF(Drain, "MinorExecute drainResume\n");
1756 
1757  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1758  setDrainState(tid, NotDraining);
1759  }
1760 
1762 }
1763 
1764 std::ostream &operator <<(std::ostream &os, Execute::DrainState state)
1765 {
1766  switch (state)
1767  {
1768  case Execute::NotDraining:
1769  os << "NotDraining";
1770  break;
1772  os << "DrainCurrentInst";
1773  break;
1775  os << "DrainHaltFetch";
1776  break;
1778  os << "DrainAllInsts";
1779  break;
1780  default:
1781  os << "Drain-" << static_cast<int>(state);
1782  break;
1783  }
1784 
1785  return os;
1786 }
1787 
1788 void
1790 {
1791  DPRINTF(Drain, "setDrainState[%d]: %s\n", thread_id, state);
1792  executeInfo[thread_id].drainState = state;
1793 }
1794 
1795 unsigned int
1797 {
1798  DPRINTF(Drain, "MinorExecute drain\n");
1799 
1800  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1801  if (executeInfo[tid].drainState == NotDraining) {
1803 
1804  /* Go to DrainCurrentInst if we're between microops
1805  * or waiting on an unbufferable memory operation.
1806  * Otherwise we can go straight to DrainHaltFetch
1807  */
1808  if (isInbetweenInsts(tid))
1810  else
1812  }
1813  }
1814  return (isDrained() ? 0 : 1);
1815 }
1816 
1817 bool
1819 {
1820  if (!lsq.isDrained())
1821  return false;
1822 
1823  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1824  if (!inputBuffer[tid].empty() ||
1825  !executeInfo[tid].inFlightInsts->empty()) {
1826 
1827  return false;
1828  }
1829  }
1830 
1831  return true;
1832 }
1833 
1835 {
1836  for (unsigned int i = 0; i < numFuncUnits; i++)
1837  delete funcUnits[i];
1838 
1839  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
1840  delete executeInfo[tid].inFlightInsts;
1841 }
1842 
1843 bool
1845 {
1846  return inst->id.streamSeqNum == executeInfo[inst->id.threadId].streamSeqNum;
1847 }
1848 
1849 bool
1851 {
1852  bool ret = false;
1853 
1854  if (!executeInfo[inst->id.threadId].inFlightInsts->empty())
1855  ret = executeInfo[inst->id.threadId].inFlightInsts->front().inst->id == inst->id;
1856 
1857  return ret;
1858 }
1859 
1862 {
1863  return lsq.getDcachePort();
1864 }
1865 
1866 }
...ReportTraits are trait classes with the same functionality as ReportIF, but with elements explicit...
Definition: buffers.hh:92
DrainState
Stage cycle-by-cycle state.
Definition: execute.hh:138
#define DPRINTF(x,...)
Definition: trace.hh:212
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:63
void handleMemResponse(MinorDynInstPtr inst, LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
Handle extracting mem ref responses from the memory queues and completing the associated instructions...
Definition: execute.cc:320
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: execute.cc:207
Container class to box instructions in the FUs to make those queues have correct bubble behaviour whe...
Definition: func_unit.hh:198
Bitfield< 30, 0 > index
void commit(ThreadID thread_id, bool only_commit_microops, bool discard, BranchData &branch)
Try and commit instructions from the ends of the functional unit pipelines.
Definition: execute.cc:1003
virtual Addr instAddr()=0
decltype(nullptr) constexpr NoFault
Definition: types.hh:189
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
Top level definition of the Minor in-order CPU model.
unsigned int cyclicIndexInc(unsigned int index, unsigned int cycle_size)
Increment a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:500
bool allowEarlyMemIssue
Allow mem refs to leave their FUs before reaching the head of the in flight insts queue if their depe...
Definition: execute.hh:109
Bitfield< 7 > i
Definition: miscregs.hh:1378
bool isInbetweenInsts(ThreadID thread_id) const
Are we between instructions? Can we be interrupted?
Definition: execute.cc:1386
static BranchData bubble()
BubbleIF interface.
Definition: pipe_data.hh:149
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty...
Definition: buffers.hh:563
#define panic(...)
Definition: misc.hh:153
unsigned int memoryCommitLimit
Number of memory instructions that can be committed per cycle.
Definition: execute.hh:84
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:83
void doInstCommitAccounting(MinorDynInstPtr inst)
Do the stats handling and instruction count and PC event events related to the new instruction/op cou...
Definition: execute.cc:837
ThreadID issuePriority
Definition: execute.hh:203
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1412
unsigned int cyclicIndexDec(unsigned int index, unsigned int cycle_size)
Decrement a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:512
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1573
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue...
Definition: execute.cc:1850
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: execute.hh:72
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order...
Definition: lsq.cc:1525
All the fun of executing instructions from Decode and sending branch/new instruction stream info...
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:305
void setDrainState(ThreadID thread_id, DrainState state)
Set the drain state (with useful debugging messages)
Definition: execute.cc:1789
Fault fault
Fault generated performing this request.
Definition: lsq.hh:149
bool setTraceTimeOnCommit
Modify instruction trace times on commit.
Definition: execute.hh:102
unsigned int memoryIssueLimit
Number of memory ops that can be issued per cycle.
Definition: execute.hh:78
void tryToBranch(MinorDynInstPtr inst, Fault fault, BranchData &branch)
Generate Branch data based (into branch) on an observed (or not) change in PC while executing an inst...
Definition: execute.cc:216
unsigned int issue(ThreadID thread_id)
Try and issue instructions from the inputBuffer.
Definition: execute.cc:523
void activity()
Records that there is activity this cycle.
Definition: activity.cc:56
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:146
Object to gather the visible context for evaluation.
Definition: timing_expr.hh:72
unsigned int commitLimit
Number of instructions that can be committed per cycle.
Definition: execute.hh:81
ThreadID getCommittingThread()
Use the current threading policy to determine the next thread to decode from.
Definition: execute.cc:1658
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
bool accessesInFlight() const
Are there any accesses other than normal cached loads in the memory system or having received respons...
Definition: lsq.hh:688
ElemType & front()
Peek at the end element of the pipe.
Definition: buffers.hh:333
virtual TheISA::PCState pcState()=0
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFlightInsts
In-order instructions either in FUs or the LSQ.
Definition: execute.hh:167
A functional unit that can execute any of opClasses operations with a single op(eration)Lat(ency) and...
Definition: func_unit.hh:149
std::vector< FUPipeline * > funcUnits
The execution functional units.
Definition: execute.hh:122
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1377
Stats::Scalar numOps
Number of simulated insts and microops.
Definition: stats.hh:64
Cycles opLat
Delay from issuing the operation, to it reaching the end of the associated pipeline.
Definition: func_unit.hh:156
Wrapper for a queue type to act as a pipeline stage input queue.
Definition: buffers.hh:393
unsigned int numFuncUnits
Number of functional units to produce.
Definition: execute.hh:95
Cycles extraAssumedLat
Extra delay that results should show in the scoreboard after leaving the pipeline.
Definition: func_unit.hh:119
ThreadContext is the external interface to all thread state for anything outside of the CPU...
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:74
const MinorFU & description
Functional unit description that this pipeline implements.
Definition: func_unit.hh:228
Bitfield< 17 > os
Definition: misc.hh:804
ExecContext bears the exec_context interface for Minor.
InstSeqNum lastPredictionSeqNum
A prediction number for use where one isn't available from an instruction.
Definition: execute.hh:194
Derived SenderState to carry data access info.
Definition: lsq.hh:120
InstSeqNum streamSeqNum
Source of sequence number for instuction streams.
Definition: execute.hh:188
Stats::Scalar numFetchSuspends
Number of times fetch was asked to suspend by Execute.
Definition: stats.hh:70
Provide a non-protected base class for Minor's Ports as derived classes are created by Fetch1 and Exe...
Definition: cpu.hh:100
Execute(const std::string &name_, MinorCPU &cpu_, MinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< BranchData >::Input out_)
Definition: execute.cc:62
#define warn(...)
Definition: misc.hh:219
Definition: trace.hh:140
ForwardInstData instsBeingCommitted
Structure for reporting insts currently being processed/retired for MinorTrace.
Definition: execute.hh:182
bool canRequest()
Is their space in the request queue to be able to push a request by issuing an isMemRef instruction...
Definition: lsq.hh:668
std::vector< MinorFU * > funcUnits
Definition: func_unit.hh:184
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:815
void drainResume()
Definition: execute.cc:1753
bool provides(OpClass capability)
Definition: func_unit.cc:78
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:275
ThreadID getIssuingThread()
Definition: execute.cc:1725
Tick curTick()
The current simulated tick.
Definition: core.hh:47
bool takeInterrupt(ThreadID thread_id, BranchData &branch)
Act on an interrupt.
Definition: execute.cc:414
unsigned int noCostFUIndex
The FU index of the non-existent costless FU for instructions which pass the MinorDynInst::isNoCostIn...
Definition: execute.hh:113
#define DTRACE(x)
Definition: trace.hh:210
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:249
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: execute.hh:125
Stats::Scalar numInsts
Number of simulated instructions.
Definition: stats.hh:61
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:178
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:259
bool tryPCEvents(ThreadID thread_id)
Try to act on PC-related events.
Definition: execute.cc:815
#define fatal(...)
Definition: misc.hh:163
MinorCPU::MinorCPUPort & getDcachePort()
Return the raw-bindable port.
Definition: lsq.hh:728
Minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:90
uint64_t InstSeqNum
Definition: inst_seq.hh:40
unsigned int inputIndex
Index that we've completed upto in getInput data.
Definition: execute.hh:174
std::vector< Cycles > srcRegsRelativeLats
Cycle offsets from the scoreboard delivery times of register values for each of this instruction's so...
Definition: func_unit.hh:128
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer...
Definition: lsq.cc:124
InstSeqNum getLastMemBarrier(ThreadID thread_id) const
Get the execSeqNum of the last issued memory barrier.
Definition: lsq.hh:697
Extra timing capability to allow individual ops to have their source register dependency latencies tw...
Definition: func_unit.hh:95
unsigned int occupancy
The number of slots with non-bubbles in them.
Definition: buffers.hh:298
Latch< BranchData >::Input out
Input port carrying stream changes to Fetch1.
Definition: execute.hh:69
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
ThreadID commitPriority
Definition: execute.hh:204
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:108
MinorFUPool & fuDescriptions
Descriptions of the functional units we want to generate.
Definition: execute.hh:92
bool canPushIntoStoreBuffer() const
Must check this before trying to insert into the store buffer.
Definition: lsq.hh:680
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1447
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
MinorFUTiming * findTiming(const StaticInstPtr &inst)
Find the extra timing information for this instruction.
Definition: func_unit.cc:203
LSQ lsq
Dcache port to pass on to the CPU.
Definition: execute.hh:116
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:169
const ThreadID InvalidThreadID
Definition: types.hh:172
Bitfield< 12 > fu
Definition: miscregs.hh:84
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1366
Fetch1 is responsible for fetching "lines" from memory and passing them to Fetch2.
bool hasInterrupt(ThreadID thread_id)
Checks if a specific thread has an interrupt.
Definition: execute.cc:1614
A load/store queue that allows outstanding reads and writes.
cbk_int func interrupt
Definition: gpu_nomali.cc:94
bool processMoreThanOneInput
If true, more than one input line can be processed each cycle if there is room to execute more instru...
Definition: execute.hh:89
A scoreboard of register dependencies including, for each register: The number of in-flight instructi...
Definition: scoreboard.hh:59
Stats::Scalar numDiscardedOps
Number of ops discarded before committing.
Definition: stats.hh:67
ThreadID checkInterrupts(BranchData &branch, bool &interrupted)
Check all threads for possible interrupts.
Definition: execute.cc:1579
bool alreadyPushed()
Have we already pushed onto this pipe without advancing.
Definition: buffers.hh:338
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:171
DrainState drainState
State progression for draining NotDraining -> ...
Definition: execute.hh:197
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1434
bool suppress
If true, instructions matching this mask/match should not be issued in this FU.
Definition: func_unit.hh:108
TimingExpr * extraCommitLatExpr
Definition: func_unit.hh:113
int size()
Definition: pagetable.hh:146
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:64
ThreadID interruptPriority
Definition: execute.hh:202
Cycles cyclesBeforeInsert()
How many cycles must from curCycle before insertion into the pipeline is allowed. ...
Definition: func_unit.cc:170
bool lastCommitWasEndOfMacroop
The last commit was the end of a full instruction so an interrupt can safely happen.
Definition: execute.hh:178
Enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:114
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed? ...
Definition: execute.cc:1844
bool isStreamChange() const
As static isStreamChange but on this branch data.
Definition: pipe_data.hh:153
bool isError() const
Definition: packet.hh:528
bool readPredicate() override
void advance()
Step the pipeline.
Definition: func_unit.cc:185
static MinorDynInstPtr bubble()
There is a single bubble inst.
Definition: dyn_inst.hh:244
static const OpClass Num_OpClasses
Definition: op_class.hh:92
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
void minorTrace() const
Definition: lsq.cc:1532
bool executeMemRefInst(MinorDynInstPtr inst, BranchData &branch, bool &failed_predicate, Fault &fault)
Execute a memory reference instruction.
Definition: execute.cc:444
PacketPtr packet
Definition: lsq.hh:143
Stats::Vector2d committedInstType
Number of instructions by type (OpClass)
Definition: stats.hh:80
void minorTrace() const
Report buffer states from 'slot' 'from' to 'to'.
Definition: buffers.hh:189
std::vector< Minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:95
bool commitInst(MinorDynInstPtr inst, bool early_memory_issue, BranchData &branch, Fault &fault, bool &committed, bool &completed_mem_issue)
Commit a single instruction.
Definition: execute.cc:870
std::vector< ExecuteThreadInfo > executeInfo
Definition: execute.hh:200
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:62
std::vector< Scoreboard > scoreboard
Scoreboard of instruction dependencies.
Definition: execute.hh:119
virtual Status status() const =0
Temporarily inactive.
bool isBubble() const
Definition: pipe_data.hh:150
void push(ElemType &elem)
Write an element to the back of the pipeline.
Definition: buffers.hh:324
const T * getConstPtr() const
Definition: packet.hh:967
void minorTrace() const
Definition: execute.cc:1625
unsigned getSize() const
Definition: packet.hh:649
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:79
bool isDrained()
After thread suspension, has Execute been drained of in-flight instructions and memory accesses...
Definition: execute.cc:1818
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:243
Latch< ForwardInstData >::Output inp
Input port carrying instructions from Decode.
Definition: execute.hh:66
Cycles extraCommitLat
Extra latency that the instruction should spend at the end of the pipeline.
Definition: func_unit.hh:112
Minor::MinorStats stats
Processor-specific statistics.
Definition: cpu.hh:136
bool canInsert() const
Can an instruction be inserted now?
Definition: func_unit.cc:179
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: execute.cc:1393
bool stalled
If true, advance will not advance the pipeline.
Definition: buffers.hh:295
unsigned int drain()
Like the drain interface on SimObject.
Definition: execute.cc:1796
std::shared_ptr< FaultBase > Fault
Definition: types.hh:184
MinorCPU::MinorCPUPort & getDcachePort()
Returns the DcachePort owned by this Execute to pass upwards.
Definition: execute.cc:1861
std::vector< bool > cantForwardFromFUIndices
FUs which this pipeline can't receive a forwarded (i.e.
Definition: func_unit.hh:238
unsigned int issueLimit
Number of instructions that can be issued per cycle.
Definition: execute.hh:75
bool setTraceTimeOnIssue
Modify instruction trace times on issue.
Definition: execute.hh:105
MinorDynInstPtr inst
Definition: func_unit.hh:201
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:255
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFUMemInsts
Memory ref instructions still in the FUs.
Definition: execute.hh:170
A functional unit configured from a MinorFU object.
Definition: func_unit.hh:224
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: execute.cc:194
void updateBranchData(ThreadID tid, BranchData::Reason reason, MinorDynInstPtr inst, const TheISA::PCState &target, BranchData &branch)
Actually create a branch to communicate to Fetch1/Fetch2 and, if that is a stream-changing branch upd...
Definition: execute.cc:294
Addr getAddr() const
Definition: packet.hh:639
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1454
bool isInterrupted(ThreadID thread_id) const
Has an interrupt been raised.
Definition: execute.cc:408

Generated on Fri Jun 9 2017 13:03:42 for gem5 by doxygen 1.8.6