Clock: Add a Cycles wrapper class and use where applicable
This patch addresses the comments and feedback on the preceding patch that reworks the clocks and now more clearly shows where cycles (relative cycle counts) are used to express time. Instead of bumping the existing patch I chose to make this a separate patch, merely to try and focus the discussion around a smaller set of changes. The two patches will be pushed together though. This changes done as part of this patch are mostly following directly from the introduction of the wrapper class, and change enough code to make things compile and run again. There are definitely more places where int/uint/Tick is still used to represent cycles, and it will take some time to chase them all down. Similarly, a lot of parameters should be changed from Param.Tick and Param.Unsigned to Param.Cycles. In addition, the use of curTick is questionable as there should not be an absolute cycle. Potential solutions can be built on top of this patch. There is a similar situation in the o3 CPU where lastRunningCycle is currently counting in Cycles, and is still an absolute time. More discussion to be had in other words. An additional change that would be appropriate in the future is to perform a similar wrapping of Tick and probably also introduce a Ticks class along with suitable operators for all these classes.
This commit is contained in:
@@ -139,8 +139,8 @@ class BaseCPU(MemObject):
|
||||
"terminate when all threads have reached this load count")
|
||||
max_loads_any_thread = Param.Counter(0,
|
||||
"terminate when any thread reaches this load count")
|
||||
progress_interval = Param.Tick(0,
|
||||
"interval to print out the progress message")
|
||||
progress_interval = Param.Frequency('0Hz',
|
||||
"frequency to print out the progress message")
|
||||
|
||||
defer_registration = Param.Bool(False,
|
||||
"defer registration with system (for sampling)")
|
||||
|
||||
@@ -246,7 +246,7 @@ class BaseCPU : public MemObject
|
||||
/// Notify the CPU that the indicated context is now active. The
|
||||
/// delay parameter indicates the number of ticks to wait before
|
||||
/// executing (typically 0 or 1).
|
||||
virtual void activateContext(ThreadID thread_num, int delay) {}
|
||||
virtual void activateContext(ThreadID thread_num, Cycles delay) {}
|
||||
|
||||
/// Notify the CPU that the indicated context is now suspended.
|
||||
virtual void suspendContext(ThreadID thread_num) {}
|
||||
|
||||
@@ -156,13 +156,14 @@ class CheckerThreadContext : public ThreadContext
|
||||
|
||||
/// Set the status to Active. Optional delay indicates number of
|
||||
/// cycles to wait before beginning execution.
|
||||
void activate(int delay = 1) { actualTC->activate(delay); }
|
||||
void activate(Cycles delay = Cycles(1))
|
||||
{ actualTC->activate(delay); }
|
||||
|
||||
/// Set the status to Suspended.
|
||||
void suspend(int delay) { actualTC->suspend(delay); }
|
||||
void suspend(Cycles delay) { actualTC->suspend(delay); }
|
||||
|
||||
/// Set the status to Halted.
|
||||
void halt(int delay) { actualTC->halt(delay); }
|
||||
void halt(Cycles delay) { actualTC->halt(delay); }
|
||||
|
||||
void dumpFuncProfile() { actualTC->dumpFuncProfile(); }
|
||||
|
||||
|
||||
@@ -209,7 +209,7 @@ InOrderCPU::CPUEvent::description() const
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::CPUEvent::scheduleEvent(int delay)
|
||||
InOrderCPU::CPUEvent::scheduleEvent(Cycles delay)
|
||||
{
|
||||
assert(!scheduled() || squashed());
|
||||
cpu->reschedule(this, cpu->clockEdge(delay), true);
|
||||
@@ -407,7 +407,7 @@ InOrderCPU::InOrderCPU(Params *params)
|
||||
lockFlag = false;
|
||||
|
||||
// Schedule First Tick Event, CPU will reschedule itself from here on out.
|
||||
scheduleTickEvent(0);
|
||||
scheduleTickEvent(Cycles(0));
|
||||
}
|
||||
|
||||
InOrderCPU::~InOrderCPU()
|
||||
@@ -769,9 +769,9 @@ InOrderCPU::tick()
|
||||
} else {
|
||||
//Tick next_tick = curTick() + cycles(1);
|
||||
//tickEvent.schedule(next_tick);
|
||||
schedule(&tickEvent, clockEdge(1));
|
||||
schedule(&tickEvent, clockEdge(Cycles(1)));
|
||||
DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
|
||||
clockEdge(1));
|
||||
clockEdge(Cycles(1)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -877,7 +877,7 @@ InOrderCPU::checkForInterrupts()
|
||||
// Schedule Squash Through-out Resource Pool
|
||||
resPool->scheduleEvent(
|
||||
(InOrderCPU::CPUEventType)ResourcePool::SquashAll,
|
||||
dummyTrapInst[tid], 0);
|
||||
dummyTrapInst[tid], Cycles(0));
|
||||
|
||||
// Finally, Setup Trap to happen at end of cycle
|
||||
trapContext(interrupt, tid, dummyTrapInst[tid]);
|
||||
@@ -912,7 +912,8 @@ InOrderCPU::processInterrupts(Fault interrupt)
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
|
||||
InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
|
||||
Cycles delay)
|
||||
{
|
||||
scheduleCpuEvent(Trap, fault, tid, inst, delay);
|
||||
trapPending[tid] = true;
|
||||
@@ -926,7 +927,8 @@ InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst)
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay)
|
||||
InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid,
|
||||
Cycles delay)
|
||||
{
|
||||
scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
|
||||
}
|
||||
@@ -954,7 +956,7 @@ InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
|
||||
void
|
||||
InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
|
||||
ThreadID tid, DynInstPtr inst,
|
||||
unsigned delay, CPUEventPri event_pri)
|
||||
Cycles delay, CPUEventPri event_pri)
|
||||
{
|
||||
CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
|
||||
event_pri);
|
||||
@@ -967,7 +969,8 @@ InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
|
||||
// Broadcast event to the Resource Pool
|
||||
// Need to reset tid just in case this is a dummy instruction
|
||||
inst->setTid(tid);
|
||||
resPool->scheduleEvent(c_event, inst, 0, 0, tid);
|
||||
// @todo: Is this really right? Should the delay not be passed on?
|
||||
resPool->scheduleEvent(c_event, inst, Cycles(0), 0, tid);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -1071,7 +1074,7 @@ InOrderCPU::activateThreadInPipeline(ThreadID tid)
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::deactivateContext(ThreadID tid, int delay)
|
||||
InOrderCPU::deactivateContext(ThreadID tid, Cycles delay)
|
||||
{
|
||||
DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
|
||||
|
||||
@@ -1153,7 +1156,7 @@ InOrderCPU::tickThreadStats()
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::activateContext(ThreadID tid, int delay)
|
||||
InOrderCPU::activateContext(ThreadID tid, Cycles delay)
|
||||
{
|
||||
DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
|
||||
|
||||
@@ -1168,7 +1171,7 @@ InOrderCPU::activateContext(ThreadID tid, int delay)
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::activateNextReadyContext(int delay)
|
||||
InOrderCPU::activateNextReadyContext(Cycles delay)
|
||||
{
|
||||
DPRINTF(InOrderCPU,"Activating next ready thread\n");
|
||||
|
||||
@@ -1719,7 +1722,8 @@ InOrderCPU::wakeup()
|
||||
}
|
||||
|
||||
void
|
||||
InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay)
|
||||
InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
|
||||
Cycles delay)
|
||||
{
|
||||
// Syscall must be non-speculative, so squash from last stage
|
||||
unsigned squash_stage = NumStages - 1;
|
||||
@@ -1730,7 +1734,8 @@ InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay
|
||||
|
||||
// Schedule Squash Through-out Resource Pool
|
||||
resPool->scheduleEvent(
|
||||
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0);
|
||||
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst,
|
||||
Cycles(0));
|
||||
scheduleCpuEvent(Syscall, fault, tid, inst, delay, Syscall_Pri);
|
||||
}
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ class InOrderCPU : public BaseCPU
|
||||
TickEvent tickEvent;
|
||||
|
||||
/** Schedule tick event, regardless of its current state. */
|
||||
void scheduleTickEvent(int delay)
|
||||
void scheduleTickEvent(Cycles delay)
|
||||
{
|
||||
assert(!tickEvent.scheduled() || tickEvent.squashed());
|
||||
reschedule(&tickEvent, clockEdge(delay), true);
|
||||
@@ -279,7 +279,7 @@ class InOrderCPU : public BaseCPU
|
||||
const char *description() const;
|
||||
|
||||
/** Schedule Event */
|
||||
void scheduleEvent(int delay);
|
||||
void scheduleEvent(Cycles delay);
|
||||
|
||||
/** Unschedule This Event */
|
||||
void unscheduleEvent();
|
||||
@@ -287,7 +287,7 @@ class InOrderCPU : public BaseCPU
|
||||
|
||||
/** Schedule a CPU Event */
|
||||
void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid,
|
||||
DynInstPtr inst, unsigned delay = 0,
|
||||
DynInstPtr inst, Cycles delay = Cycles(0),
|
||||
CPUEventPri event_pri = InOrderCPU_Pri);
|
||||
|
||||
public:
|
||||
@@ -479,19 +479,20 @@ class InOrderCPU : public BaseCPU
|
||||
|
||||
/** Schedule a syscall on the CPU */
|
||||
void syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
|
||||
int delay = 0);
|
||||
Cycles delay = Cycles(0));
|
||||
|
||||
/** Executes a syscall.*/
|
||||
void syscall(int64_t callnum, ThreadID tid);
|
||||
|
||||
/** Schedule a trap on the CPU */
|
||||
void trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay = 0);
|
||||
void trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
|
||||
Cycles delay = Cycles(0));
|
||||
|
||||
/** Perform trap to Handle Given Fault */
|
||||
void trap(Fault fault, ThreadID tid, DynInstPtr inst);
|
||||
|
||||
/** Schedule thread activation on the CPU */
|
||||
void activateContext(ThreadID tid, int delay = 0);
|
||||
void activateContext(ThreadID tid, Cycles delay = Cycles(0));
|
||||
|
||||
/** Add Thread to Active Threads List. */
|
||||
void activateThread(ThreadID tid);
|
||||
@@ -500,13 +501,13 @@ class InOrderCPU : public BaseCPU
|
||||
void activateThreadInPipeline(ThreadID tid);
|
||||
|
||||
/** Schedule Thread Activation from Ready List */
|
||||
void activateNextReadyContext(int delay = 0);
|
||||
void activateNextReadyContext(Cycles delay = Cycles(0));
|
||||
|
||||
/** Add Thread From Ready List to Active Threads List. */
|
||||
void activateNextReadyThread();
|
||||
|
||||
/** Schedule a thread deactivation on the CPU */
|
||||
void deactivateContext(ThreadID tid, int delay = 0);
|
||||
void deactivateContext(ThreadID tid, Cycles delay = Cycles(0));
|
||||
|
||||
/** Remove from Active Thread List */
|
||||
void deactivateThread(ThreadID tid);
|
||||
@@ -529,7 +530,8 @@ class InOrderCPU : public BaseCPU
|
||||
* squashDueToMemStall() - squashes pipeline
|
||||
* @note: maybe squashContext/squashThread would be better?
|
||||
*/
|
||||
void squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay = 0);
|
||||
void squashFromMemStall(DynInstPtr inst, ThreadID tid,
|
||||
Cycles delay = Cycles(0));
|
||||
void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
|
||||
|
||||
void removePipelineStalls(ThreadID tid);
|
||||
|
||||
@@ -556,7 +556,7 @@ PipelineStage::activateThread(ThreadID tid)
|
||||
// prevent "double"-execution of instructions
|
||||
cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)
|
||||
ResourcePool::UpdateAfterContextSwitch,
|
||||
inst, 0, 0, tid);
|
||||
inst, Cycles(0), 0, tid);
|
||||
|
||||
// Clear switchout buffer
|
||||
switchedOutBuffer[tid] = NULL;
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
using namespace std;
|
||||
|
||||
Resource::Resource(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu)
|
||||
Cycles res_latency, InOrderCPU *_cpu)
|
||||
: resName(res_name), id(res_id),
|
||||
width(res_width), latency(res_latency), cpu(_cpu),
|
||||
resourceEvent(NULL)
|
||||
@@ -76,7 +76,7 @@ Resource::init()
|
||||
// If the resource has a zero-cycle (no latency)
|
||||
// function, then no reason to have events
|
||||
// that will process them for the right tick
|
||||
if (latency > 0)
|
||||
if (latency > Cycles(0))
|
||||
resourceEvent = new ResourceEvent[width];
|
||||
|
||||
|
||||
@@ -296,7 +296,8 @@ Resource::setupSquash(DynInstPtr inst, int stage_num, ThreadID tid)
|
||||
|
||||
// Schedule Squash Through-out Resource Pool
|
||||
cpu->resPool->scheduleEvent(
|
||||
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0);
|
||||
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst,
|
||||
Cycles(0));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -321,7 +322,7 @@ Resource::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num,
|
||||
|
||||
int req_slot_num = req_ptr->getSlot();
|
||||
|
||||
if (latency > 0) {
|
||||
if (latency > Cycles(0)) {
|
||||
if (resourceEvent[req_slot_num].scheduled())
|
||||
unscheduleEvent(req_slot_num);
|
||||
}
|
||||
@@ -362,17 +363,10 @@ Resource::squashThenTrap(int stage_num, DynInstPtr inst)
|
||||
cpu->trapContext(inst->fault, tid, inst);
|
||||
}
|
||||
|
||||
Tick
|
||||
Resource::ticks(int num_cycles)
|
||||
{
|
||||
return cpu->ticks(num_cycles);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Resource::scheduleExecution(int slot_num)
|
||||
{
|
||||
if (latency > 0) {
|
||||
if (latency > Cycles(0)) {
|
||||
scheduleEvent(slot_num, latency);
|
||||
} else {
|
||||
execute(slot_num);
|
||||
@@ -380,17 +374,17 @@ Resource::scheduleExecution(int slot_num)
|
||||
}
|
||||
|
||||
void
|
||||
Resource::scheduleEvent(int slot_idx, int delay)
|
||||
Resource::scheduleEvent(int slot_idx, Cycles delay)
|
||||
{
|
||||
DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n",
|
||||
reqs[slot_idx]->inst->readTid(),
|
||||
reqs[slot_idx]->inst->seqNum,
|
||||
cpu->ticks(delay) + curTick());
|
||||
cpu->clockEdge(delay));
|
||||
resourceEvent[slot_idx].scheduleEvent(delay);
|
||||
}
|
||||
|
||||
bool
|
||||
Resource::scheduleEvent(DynInstPtr inst, int delay)
|
||||
Resource::scheduleEvent(DynInstPtr inst, Cycles delay)
|
||||
{
|
||||
int slot_idx = findSlot(inst);
|
||||
|
||||
@@ -521,9 +515,9 @@ ResourceEvent::description() const
|
||||
}
|
||||
|
||||
void
|
||||
ResourceEvent::scheduleEvent(int delay)
|
||||
ResourceEvent::scheduleEvent(Cycles delay)
|
||||
{
|
||||
assert(!scheduled() || squashed());
|
||||
resource->cpu->reschedule(this,
|
||||
curTick() + resource->ticks(delay), true);
|
||||
resource->cpu->clockEdge(delay), true);
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ class Resource {
|
||||
|
||||
public:
|
||||
Resource(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu);
|
||||
Cycles res_latency, InOrderCPU *_cpu);
|
||||
virtual ~Resource();
|
||||
|
||||
|
||||
@@ -178,11 +178,11 @@ class Resource {
|
||||
int slotsInUse();
|
||||
|
||||
/** Schedule resource event, regardless of its current state. */
|
||||
void scheduleEvent(int slot_idx, int delay);
|
||||
void scheduleEvent(int slot_idx, Cycles delay);
|
||||
|
||||
/** Find instruction in list, Schedule resource event, regardless of its
|
||||
* current state. */
|
||||
bool scheduleEvent(DynInstPtr inst, int delay);
|
||||
bool scheduleEvent(DynInstPtr inst, Cycles delay);
|
||||
|
||||
/** Unschedule resource event, regardless of its current state. */
|
||||
void unscheduleEvent(int slot_idx);
|
||||
@@ -190,9 +190,6 @@ class Resource {
|
||||
/** Unschedule resource event, regardless of its current state. */
|
||||
bool unscheduleEvent(DynInstPtr inst);
|
||||
|
||||
/** Return the number of cycles in 'Tick' format */
|
||||
Tick ticks(int numCycles);
|
||||
|
||||
/** Find the request that corresponds to this instruction */
|
||||
virtual ResReqPtr findRequest(DynInstPtr inst);
|
||||
|
||||
@@ -206,7 +203,7 @@ class Resource {
|
||||
|
||||
/** Return Latency of Resource */
|
||||
/* Can be overridden for complex cases */
|
||||
virtual int getLatency(int slot_num) { return latency; }
|
||||
virtual Cycles getLatency(int slot_num) { return latency; }
|
||||
|
||||
protected:
|
||||
/** The name of this resource */
|
||||
@@ -226,7 +223,7 @@ class Resource {
|
||||
* Note: Dynamic latency resources set this to 0 and
|
||||
* manage the latency themselves
|
||||
*/
|
||||
const int latency;
|
||||
const Cycles latency;
|
||||
|
||||
public:
|
||||
/** List of all Requests the Resource is Servicing. Each request
|
||||
@@ -287,7 +284,7 @@ class ResourceEvent : public Event
|
||||
void setSlot(int slot) { slotIdx = slot; }
|
||||
|
||||
/** Schedule resource event, regardless of its current state. */
|
||||
void scheduleEvent(int delay);
|
||||
void scheduleEvent(Cycles delay);
|
||||
|
||||
/** Unschedule resource event, regardless of its current state. */
|
||||
void unscheduleEvent()
|
||||
|
||||
@@ -64,54 +64,57 @@ ResourcePool::ResourcePool(InOrderCPU *_cpu, ThePipeline::Params *params)
|
||||
// name - id - bandwidth - latency - CPU - Parameters
|
||||
// --------------------------------------------------
|
||||
resources.push_back(new FetchSeqUnit("fetch_seq_unit", FetchSeq,
|
||||
stage_width * 2, 0, _cpu, params));
|
||||
stage_width * 2, Cycles(0),
|
||||
_cpu, params));
|
||||
|
||||
// Keep track of the instruction fetch unit so we can easily
|
||||
// provide a pointer to it in the CPU.
|
||||
instUnit = new FetchUnit("icache_port", ICache,
|
||||
stage_width * 2 + MaxThreads, 0, _cpu,
|
||||
stage_width * 2 + MaxThreads, Cycles(0), _cpu,
|
||||
params);
|
||||
resources.push_back(instUnit);
|
||||
|
||||
resources.push_back(new DecodeUnit("decode_unit", Decode,
|
||||
stage_width, 0, _cpu, params));
|
||||
stage_width, Cycles(0), _cpu,
|
||||
params));
|
||||
|
||||
resources.push_back(new BranchPredictor("branch_predictor", BPred,
|
||||
stage_width, 0, _cpu, params));
|
||||
stage_width, Cycles(0),
|
||||
_cpu, params));
|
||||
|
||||
resources.push_back(new InstBuffer("fetch_buffer_t0", FetchBuff, 4,
|
||||
0, _cpu, params));
|
||||
Cycles(0), _cpu, params));
|
||||
|
||||
resources.push_back(new UseDefUnit("regfile_manager", RegManager,
|
||||
stage_width * 3, 0, _cpu,
|
||||
stage_width * 3, Cycles(0), _cpu,
|
||||
params));
|
||||
|
||||
resources.push_back(new AGENUnit("agen_unit", AGEN,
|
||||
stage_width, 0, _cpu, params));
|
||||
stage_width, Cycles(0), _cpu,
|
||||
params));
|
||||
|
||||
resources.push_back(new ExecutionUnit("execution_unit", ExecUnit,
|
||||
stage_width, 0, _cpu, params));
|
||||
stage_width, Cycles(0), _cpu,
|
||||
params));
|
||||
|
||||
resources.push_back(new MultDivUnit("mult_div_unit", MDU,
|
||||
stage_width * 2,
|
||||
0,
|
||||
_cpu,
|
||||
params));
|
||||
stage_width * 2, Cycles(0),
|
||||
_cpu, params));
|
||||
|
||||
// Keep track of the data load/store unit so we can easily provide
|
||||
// a pointer to it in the CPU.
|
||||
dataUnit = new CacheUnit("dcache_port", DCache,
|
||||
stage_width * 2 + MaxThreads, 0, _cpu,
|
||||
stage_width * 2 + MaxThreads, Cycles(0), _cpu,
|
||||
params);
|
||||
resources.push_back(dataUnit);
|
||||
|
||||
gradObjects.push_back(BPred);
|
||||
resources.push_back(new GraduationUnit("graduation_unit", Grad,
|
||||
stage_width, 0, _cpu,
|
||||
stage_width, Cycles(0), _cpu,
|
||||
params));
|
||||
|
||||
resources.push_back(new InstBuffer("fetch_buffer_t1", FetchBuff2, 4,
|
||||
0, _cpu, params));
|
||||
Cycles(0), _cpu, params));
|
||||
|
||||
}
|
||||
|
||||
@@ -234,7 +237,7 @@ ResourcePool::slotsInUse(int res_idx)
|
||||
// to the event construction
|
||||
void
|
||||
ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
|
||||
int delay, int res_idx, ThreadID tid)
|
||||
Cycles delay, int res_idx, ThreadID tid)
|
||||
{
|
||||
assert(delay >= 0);
|
||||
|
||||
@@ -456,7 +459,7 @@ ResourcePool::ResPoolEvent::description() const
|
||||
|
||||
/** Schedule resource event, regardless of its current state. */
|
||||
void
|
||||
ResourcePool::ResPoolEvent::scheduleEvent(int delay)
|
||||
ResourcePool::ResPoolEvent::scheduleEvent(Cycles delay)
|
||||
{
|
||||
InOrderCPU *cpu = resPool->cpu;
|
||||
assert(!scheduled() || squashed());
|
||||
|
||||
@@ -132,7 +132,7 @@ class ResourcePool {
|
||||
const char *description() const;
|
||||
|
||||
/** Schedule Event */
|
||||
void scheduleEvent(int delay);
|
||||
void scheduleEvent(Cycles delay);
|
||||
|
||||
/** Unschedule This Event */
|
||||
void unscheduleEvent();
|
||||
@@ -206,7 +206,8 @@ class ResourcePool {
|
||||
|
||||
/** Schedule resource event, regardless of its current state. */
|
||||
void scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst = NULL,
|
||||
int delay = 0, int res_idx = 0, ThreadID tid = 0);
|
||||
Cycles delay = Cycles(0), int res_idx = 0,
|
||||
ThreadID tid = 0);
|
||||
|
||||
/** UnSchedule resource event, regardless of its current state. */
|
||||
void unscheduleEvent(int res_idx, DynInstPtr inst);
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#include "debug/InOrderAGEN.hh"
|
||||
|
||||
AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu)
|
||||
{ }
|
||||
|
||||
@@ -48,7 +48,8 @@ class AGENUnit : public Resource {
|
||||
|
||||
public:
|
||||
AGENUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
enum Command {
|
||||
GenerateAddr
|
||||
|
||||
@@ -39,8 +39,9 @@ using namespace std;
|
||||
using namespace TheISA;
|
||||
using namespace ThePipeline;
|
||||
|
||||
BranchPredictor::BranchPredictor(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
BranchPredictor::BranchPredictor(std::string res_name, int res_id,
|
||||
int res_width, Cycles res_latency,
|
||||
InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu),
|
||||
branchPred(this, params)
|
||||
|
||||
@@ -54,7 +54,8 @@ class BranchPredictor : public Resource {
|
||||
|
||||
public:
|
||||
BranchPredictor(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
void regStats();
|
||||
|
||||
|
||||
@@ -67,7 +67,8 @@ printMemData(uint8_t *data, unsigned size)
|
||||
#endif
|
||||
|
||||
CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu),
|
||||
cachePort(NULL), cachePortBlocked(false)
|
||||
{
|
||||
|
||||
@@ -58,7 +58,8 @@ class CacheUnit : public Resource
|
||||
|
||||
public:
|
||||
CacheUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
enum Command {
|
||||
InitiateReadData,
|
||||
|
||||
@@ -40,7 +40,7 @@ using namespace ThePipeline;
|
||||
using namespace std;
|
||||
|
||||
DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu)
|
||||
{
|
||||
|
||||
@@ -48,7 +48,8 @@ class DecodeUnit : public Resource {
|
||||
|
||||
public:
|
||||
DecodeUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
enum Command {
|
||||
DecodeInst
|
||||
|
||||
@@ -44,7 +44,7 @@ using namespace std;
|
||||
using namespace ThePipeline;
|
||||
|
||||
ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu),
|
||||
lastExecuteTick(0), lastControlTick(0)
|
||||
|
||||
@@ -51,7 +51,8 @@ class ExecutionUnit : public Resource {
|
||||
|
||||
public:
|
||||
ExecutionUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
public:
|
||||
void regStats();
|
||||
|
||||
@@ -40,7 +40,7 @@ using namespace TheISA;
|
||||
using namespace ThePipeline;
|
||||
|
||||
FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu),
|
||||
instSize(sizeof(MachInst))
|
||||
|
||||
@@ -54,7 +54,8 @@ class FetchSeqUnit : public Resource {
|
||||
|
||||
public:
|
||||
FetchSeqUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
~FetchSeqUnit();
|
||||
|
||||
void init();
|
||||
|
||||
@@ -53,7 +53,7 @@ using namespace TheISA;
|
||||
using namespace ThePipeline;
|
||||
|
||||
FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
|
||||
instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize)
|
||||
|
||||
@@ -53,7 +53,8 @@ class FetchUnit : public CacheUnit
|
||||
{
|
||||
public:
|
||||
FetchUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
virtual ~FetchUnit();
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
using namespace ThePipeline;
|
||||
|
||||
GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu)
|
||||
{
|
||||
|
||||
@@ -52,7 +52,7 @@ class GraduationUnit : public Resource {
|
||||
|
||||
public:
|
||||
GraduationUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
void execute(int slot_num);
|
||||
|
||||
@@ -45,7 +45,7 @@ using namespace TheISA;
|
||||
using namespace ThePipeline;
|
||||
|
||||
InstBuffer::InstBuffer(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu)
|
||||
{ }
|
||||
|
||||
@@ -56,7 +56,8 @@ class InstBuffer : public Resource {
|
||||
|
||||
public:
|
||||
InstBuffer(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
void regStats();
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ class MemDepUnit : public Resource {
|
||||
|
||||
public:
|
||||
MemDepUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu);
|
||||
Cycles res_latency, InOrderCPU *_cpu);
|
||||
virtual ~MemDepUnit() {}
|
||||
|
||||
virtual void execute(int slot_num);
|
||||
|
||||
@@ -43,7 +43,7 @@ using namespace std;
|
||||
using namespace ThePipeline;
|
||||
|
||||
MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu),
|
||||
multRepeatRate(params->multRepeatRate),
|
||||
|
||||
@@ -56,7 +56,7 @@ class MultDivUnit : public Resource {
|
||||
|
||||
public:
|
||||
MultDivUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
public:
|
||||
@@ -86,23 +86,23 @@ class MultDivUnit : public Resource {
|
||||
protected:
|
||||
/** Latency & Repeat Rate for Multiply Insts */
|
||||
unsigned multRepeatRate;
|
||||
unsigned multLatency;
|
||||
Cycles multLatency;
|
||||
|
||||
/** Latency & Repeat Rate for 8-bit Divide Insts */
|
||||
unsigned div8RepeatRate;
|
||||
unsigned div8Latency;
|
||||
Cycles div8Latency;
|
||||
|
||||
/** Latency & Repeat Rate for 16-bit Divide Insts */
|
||||
unsigned div16RepeatRate;
|
||||
unsigned div16Latency;
|
||||
Cycles div16Latency;
|
||||
|
||||
/** Latency & Repeat Rate for 24-bit Divide Insts */
|
||||
unsigned div24RepeatRate;
|
||||
unsigned div24Latency;
|
||||
Cycles div24Latency;
|
||||
|
||||
/** Latency & Repeat Rate for 32-bit Divide Insts */
|
||||
unsigned div32RepeatRate;
|
||||
unsigned div32Latency;
|
||||
Cycles div32Latency;
|
||||
|
||||
/** Last cycle that MDU was used */
|
||||
Tick lastMDUCycle;
|
||||
|
||||
@@ -44,7 +44,8 @@ using namespace TheISA;
|
||||
using namespace ThePipeline;
|
||||
|
||||
TLBUnit::TLBUnit(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu)
|
||||
{
|
||||
// Hard-Code Selection For Now
|
||||
|
||||
@@ -55,7 +55,8 @@ class TLBUnit : public Resource
|
||||
|
||||
public:
|
||||
TLBUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
virtual ~TLBUnit() {}
|
||||
|
||||
void init();
|
||||
|
||||
@@ -45,7 +45,7 @@ using namespace TheISA;
|
||||
using namespace ThePipeline;
|
||||
|
||||
UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu,
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params)
|
||||
: Resource(res_name, res_id, res_width, res_latency, _cpu)
|
||||
{
|
||||
@@ -107,7 +107,7 @@ void
|
||||
UseDefUnit::init()
|
||||
{
|
||||
// Set Up Resource Events to Appropriate Resource BandWidth
|
||||
if (latency > 0) {
|
||||
if (latency > Cycles(0)) {
|
||||
resourceEvent = new ResourceEvent[width];
|
||||
} else {
|
||||
resourceEvent = NULL;
|
||||
|
||||
@@ -56,7 +56,8 @@ class UseDefUnit : public Resource {
|
||||
|
||||
public:
|
||||
UseDefUnit(std::string res_name, int res_id, int res_width,
|
||||
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
|
||||
Cycles res_latency, InOrderCPU *_cpu,
|
||||
ThePipeline::Params *params);
|
||||
|
||||
void init();
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ InOrderThreadContext::takeOverFrom(ThreadContext *old_context)
|
||||
}
|
||||
|
||||
void
|
||||
InOrderThreadContext::activate(int delay)
|
||||
InOrderThreadContext::activate(Cycles delay)
|
||||
{
|
||||
DPRINTF(InOrderCPU, "Calling activate on Thread Context %d\n",
|
||||
getThreadNum());
|
||||
@@ -113,7 +113,7 @@ InOrderThreadContext::activate(int delay)
|
||||
|
||||
|
||||
void
|
||||
InOrderThreadContext::suspend(int delay)
|
||||
InOrderThreadContext::suspend(Cycles delay)
|
||||
{
|
||||
DPRINTF(InOrderCPU, "Calling suspend on Thread Context %d\n",
|
||||
getThreadNum());
|
||||
@@ -126,7 +126,7 @@ InOrderThreadContext::suspend(int delay)
|
||||
}
|
||||
|
||||
void
|
||||
InOrderThreadContext::halt(int delay)
|
||||
InOrderThreadContext::halt(Cycles delay)
|
||||
{
|
||||
DPRINTF(InOrderCPU, "Calling halt on Thread Context %d\n",
|
||||
getThreadNum());
|
||||
|
||||
@@ -165,13 +165,13 @@ class InOrderThreadContext : public ThreadContext
|
||||
|
||||
/** Set the status to Active. Optional delay indicates number of
|
||||
* cycles to wait before beginning execution. */
|
||||
void activate(int delay = 1);
|
||||
void activate(Cycles delay = Cycles(1));
|
||||
|
||||
/** Set the status to Suspended. */
|
||||
void suspend(int delay = 0);
|
||||
void suspend(Cycles delay = Cycles(0));
|
||||
|
||||
/** Set the status to Halted. */
|
||||
void halt(int delay = 0);
|
||||
void halt(Cycles delay = Cycles(0));
|
||||
|
||||
/** Takes over execution of a thread from another CPU. */
|
||||
void takeOverFrom(ThreadContext *old_context);
|
||||
@@ -259,7 +259,7 @@ class InOrderThreadContext : public ThreadContext
|
||||
int flattenFloatIndex(int reg)
|
||||
{ return cpu->isa[thread->threadId()].flattenFloatIndex(reg); }
|
||||
|
||||
void activateContext(int delay)
|
||||
void activateContext(Cycles delay)
|
||||
{ cpu->activateContext(thread->threadId(), delay); }
|
||||
|
||||
void deallocateContext()
|
||||
|
||||
@@ -409,7 +409,7 @@ class DefaultCommit
|
||||
/** The latency to handle a trap. Used when scheduling trap
|
||||
* squash event.
|
||||
*/
|
||||
uint trapLatency;
|
||||
Cycles trapLatency;
|
||||
|
||||
/** The interrupt fault. */
|
||||
Fault interrupt;
|
||||
|
||||
@@ -256,7 +256,8 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params)
|
||||
globalSeqNum(1),
|
||||
system(params->system),
|
||||
drainCount(0),
|
||||
deferRegistration(params->defer_registration)
|
||||
deferRegistration(params->defer_registration),
|
||||
lastRunningCycle(curCycle())
|
||||
{
|
||||
if (!deferRegistration) {
|
||||
_status = Running;
|
||||
@@ -386,8 +387,6 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params)
|
||||
// Setup the ROB for whichever stages need it.
|
||||
commit.setROB(&rob);
|
||||
|
||||
lastRunningCycle = curCycle();
|
||||
|
||||
lastActivatedCycle = 0;
|
||||
#if 0
|
||||
// Give renameMap & rename stage access to the freeList;
|
||||
@@ -629,7 +628,7 @@ FullO3CPU<Impl>::tick()
|
||||
lastRunningCycle = curCycle();
|
||||
timesIdled++;
|
||||
} else {
|
||||
schedule(tickEvent, clockEdge(1));
|
||||
schedule(tickEvent, clockEdge(Cycles(1)));
|
||||
DPRINTF(O3CPU, "Scheduling next tick!\n");
|
||||
}
|
||||
}
|
||||
@@ -741,12 +740,12 @@ FullO3CPU<Impl>::totalOps() const
|
||||
|
||||
template <class Impl>
|
||||
void
|
||||
FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
|
||||
FullO3CPU<Impl>::activateContext(ThreadID tid, Cycles delay)
|
||||
{
|
||||
// Needs to set each stage to running as well.
|
||||
if (delay){
|
||||
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate "
|
||||
"on cycle %d\n", tid, curTick() + ticks(delay));
|
||||
"on cycle %d\n", tid, clockEdge(delay));
|
||||
scheduleActivateThreadEvent(tid, delay);
|
||||
} else {
|
||||
activateThread(tid);
|
||||
@@ -762,7 +761,8 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
|
||||
activityRec.activity();
|
||||
fetch.wakeFromQuiesce();
|
||||
|
||||
Tick cycles = curCycle() - lastRunningCycle;
|
||||
Cycles cycles(curCycle() - lastRunningCycle);
|
||||
// @todo: This is an oddity that is only here to match the stats
|
||||
if (cycles != 0)
|
||||
--cycles;
|
||||
quiesceCycles += cycles;
|
||||
@@ -776,12 +776,12 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
|
||||
template <class Impl>
|
||||
bool
|
||||
FullO3CPU<Impl>::scheduleDeallocateContext(ThreadID tid, bool remove,
|
||||
int delay)
|
||||
Cycles delay)
|
||||
{
|
||||
// Schedule removal of thread data from CPU
|
||||
if (delay){
|
||||
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate "
|
||||
"on cycle %d\n", tid, curTick() + ticks(delay));
|
||||
"on tick %d\n", tid, clockEdge(delay));
|
||||
scheduleDeallocateContextEvent(tid, remove, delay);
|
||||
return false;
|
||||
} else {
|
||||
@@ -797,7 +797,7 @@ void
|
||||
FullO3CPU<Impl>::suspendContext(ThreadID tid)
|
||||
{
|
||||
DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid);
|
||||
bool deallocated = scheduleDeallocateContext(tid, false, 1);
|
||||
bool deallocated = scheduleDeallocateContext(tid, false, Cycles(1));
|
||||
// If this was the last thread then unschedule the tick event.
|
||||
if ((activeThreads.size() == 1 && !deallocated) ||
|
||||
activeThreads.size() == 0)
|
||||
@@ -814,7 +814,7 @@ FullO3CPU<Impl>::haltContext(ThreadID tid)
|
||||
{
|
||||
//For now, this is the same as deallocate
|
||||
DPRINTF(O3CPU,"[tid:%i]: Halt Context called. Deallocating", tid);
|
||||
scheduleDeallocateContext(tid, true, 1);
|
||||
scheduleDeallocateContext(tid, true, Cycles(1));
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
@@ -854,7 +854,7 @@ FullO3CPU<Impl>::insertThread(ThreadID tid)
|
||||
|
||||
src_tc->setStatus(ThreadContext::Active);
|
||||
|
||||
activateContext(tid,1);
|
||||
activateContext(tid, Cycles(1));
|
||||
|
||||
//Reset ROB/IQ/LSQ Entries
|
||||
commit.rob->resetEntries();
|
||||
@@ -1672,7 +1672,8 @@ FullO3CPU<Impl>::wakeCPU()
|
||||
|
||||
DPRINTF(Activity, "Waking up CPU\n");
|
||||
|
||||
Tick cycles = curCycle() - lastRunningCycle;
|
||||
Cycles cycles(curCycle() - lastRunningCycle);
|
||||
// @todo: This is an oddity that is only here to match the stats
|
||||
if (cycles != 0)
|
||||
--cycles;
|
||||
idleCycles += cycles;
|
||||
|
||||
@@ -211,7 +211,7 @@ class FullO3CPU : public BaseO3CPU
|
||||
TickEvent tickEvent;
|
||||
|
||||
/** Schedule tick event, regardless of its current state. */
|
||||
void scheduleTickEvent(int delay)
|
||||
void scheduleTickEvent(Cycles delay)
|
||||
{
|
||||
if (tickEvent.squashed())
|
||||
reschedule(tickEvent, clockEdge(delay));
|
||||
@@ -251,7 +251,7 @@ class FullO3CPU : public BaseO3CPU
|
||||
|
||||
/** Schedule thread to activate , regardless of its current state. */
|
||||
void
|
||||
scheduleActivateThreadEvent(ThreadID tid, int delay)
|
||||
scheduleActivateThreadEvent(ThreadID tid, Cycles delay)
|
||||
{
|
||||
// Schedule thread to activate, regardless of its current state.
|
||||
if (activateThreadEvent[tid].squashed())
|
||||
@@ -314,7 +314,7 @@ class FullO3CPU : public BaseO3CPU
|
||||
|
||||
/** Schedule cpu to deallocate thread context.*/
|
||||
void
|
||||
scheduleDeallocateContextEvent(ThreadID tid, bool remove, int delay)
|
||||
scheduleDeallocateContextEvent(ThreadID tid, bool remove, Cycles delay)
|
||||
{
|
||||
// Schedule thread to activate, regardless of its current state.
|
||||
if (deallocateContextEvent[tid].squashed())
|
||||
@@ -392,7 +392,7 @@ class FullO3CPU : public BaseO3CPU
|
||||
virtual Counter totalOps() const;
|
||||
|
||||
/** Add Thread to Active Threads List. */
|
||||
void activateContext(ThreadID tid, int delay);
|
||||
void activateContext(ThreadID tid, Cycles delay);
|
||||
|
||||
/** Remove Thread from Active Threads List */
|
||||
void suspendContext(ThreadID tid);
|
||||
@@ -400,7 +400,8 @@ class FullO3CPU : public BaseO3CPU
|
||||
/** Remove Thread from Active Threads List &&
|
||||
* Possibly Remove Thread Context from CPU.
|
||||
*/
|
||||
bool scheduleDeallocateContext(ThreadID tid, bool remove, int delay = 1);
|
||||
bool scheduleDeallocateContext(ThreadID tid, bool remove,
|
||||
Cycles delay = Cycles(1));
|
||||
|
||||
/** Remove Thread from Active Threads List &&
|
||||
* Remove Thread Context from CPU.
|
||||
@@ -748,7 +749,7 @@ class FullO3CPU : public BaseO3CPU
|
||||
std::list<int> cpuWaitList;
|
||||
|
||||
/** The cycle that the CPU was last running, used for statistics. */
|
||||
Tick lastRunningCycle;
|
||||
Cycles lastRunningCycle;
|
||||
|
||||
/** The cycle that the CPU was last activated by a new thread*/
|
||||
Tick lastActivatedCycle;
|
||||
|
||||
@@ -646,7 +646,8 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req)
|
||||
assert(!finishTranslationEvent.scheduled());
|
||||
finishTranslationEvent.setFault(fault);
|
||||
finishTranslationEvent.setReq(mem_req);
|
||||
cpu->schedule(finishTranslationEvent, cpu->clockEdge(1));
|
||||
cpu->schedule(finishTranslationEvent,
|
||||
cpu->clockEdge(Cycles(1)));
|
||||
return;
|
||||
}
|
||||
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
|
||||
|
||||
@@ -828,7 +828,8 @@ InstructionQueue<Impl>::scheduleReadyInsts()
|
||||
FUCompletion *execution = new FUCompletion(issuing_inst,
|
||||
idx, this);
|
||||
|
||||
cpu->schedule(execution, cpu->clockEdge(op_latency - 1));
|
||||
cpu->schedule(execution,
|
||||
cpu->clockEdge(Cycles(op_latency - 1)));
|
||||
|
||||
// @todo: Enforce that issue_latency == 1 or op_latency
|
||||
if (issue_latency > 1) {
|
||||
|
||||
@@ -607,7 +607,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
|
||||
load_inst->memData = new uint8_t[64];
|
||||
|
||||
ThreadContext *thread = cpu->tcBase(lsqID);
|
||||
Tick delay;
|
||||
Cycles delay(0);
|
||||
PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
|
||||
|
||||
if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
|
||||
@@ -622,7 +622,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
|
||||
snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
|
||||
|
||||
delay = TheISA::handleIprRead(thread, fst_data_pkt);
|
||||
unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
|
||||
Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
|
||||
if (delay2 > delay)
|
||||
delay = delay2;
|
||||
|
||||
|
||||
@@ -134,13 +134,13 @@ class O3ThreadContext : public ThreadContext
|
||||
|
||||
/** Set the status to Active. Optional delay indicates number of
|
||||
* cycles to wait before beginning execution. */
|
||||
virtual void activate(int delay = 1);
|
||||
virtual void activate(Cycles delay = Cycles(1));
|
||||
|
||||
/** Set the status to Suspended. */
|
||||
virtual void suspend(int delay = 0);
|
||||
virtual void suspend(Cycles delay = Cycles(0));
|
||||
|
||||
/** Set the status to Halted. */
|
||||
virtual void halt(int delay = 0);
|
||||
virtual void halt(Cycles delay = Cycles(0));
|
||||
|
||||
/** Dumps the function profiling information.
|
||||
* @todo: Implement.
|
||||
|
||||
@@ -102,7 +102,7 @@ O3ThreadContext<Impl>::takeOverFrom(ThreadContext *old_context)
|
||||
|
||||
template <class Impl>
|
||||
void
|
||||
O3ThreadContext<Impl>::activate(int delay)
|
||||
O3ThreadContext<Impl>::activate(Cycles delay)
|
||||
{
|
||||
DPRINTF(O3CPU, "Calling activate on Thread Context %d\n",
|
||||
threadId());
|
||||
@@ -119,7 +119,7 @@ O3ThreadContext<Impl>::activate(int delay)
|
||||
|
||||
template <class Impl>
|
||||
void
|
||||
O3ThreadContext<Impl>::suspend(int delay)
|
||||
O3ThreadContext<Impl>::suspend(Cycles delay)
|
||||
{
|
||||
DPRINTF(O3CPU, "Calling suspend on Thread Context %d\n",
|
||||
threadId());
|
||||
@@ -136,7 +136,7 @@ O3ThreadContext<Impl>::suspend(int delay)
|
||||
|
||||
template <class Impl>
|
||||
void
|
||||
O3ThreadContext<Impl>::halt(int delay)
|
||||
O3ThreadContext<Impl>::halt(Cycles delay)
|
||||
{
|
||||
DPRINTF(O3CPU, "Calling halt on Thread Context %d\n",
|
||||
threadId());
|
||||
|
||||
@@ -197,7 +197,7 @@ AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
|
||||
|
||||
|
||||
void
|
||||
AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay)
|
||||
AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
|
||||
{
|
||||
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
|
||||
|
||||
@@ -208,7 +208,7 @@ AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay)
|
||||
assert(!tickEvent.scheduled());
|
||||
|
||||
notIdleFraction++;
|
||||
numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend);
|
||||
numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
|
||||
|
||||
//Make sure ticks are still on multiples of cycles
|
||||
schedule(tickEvent, clockEdge(delay));
|
||||
@@ -518,13 +518,11 @@ AtomicSimpleCPU::tick()
|
||||
stall_ticks += dcache_latency;
|
||||
|
||||
if (stall_ticks) {
|
||||
Tick stall_cycles = stall_ticks / clockPeriod();
|
||||
Tick aligned_stall_ticks = ticks(stall_cycles);
|
||||
|
||||
if (aligned_stall_ticks < stall_ticks)
|
||||
aligned_stall_ticks += 1;
|
||||
|
||||
latency += aligned_stall_ticks;
|
||||
// the atomic cpu does its accounting in ticks, so
|
||||
// keep counting in ticks but round to the clock
|
||||
// period
|
||||
latency += divCeil(stall_ticks, clockPeriod()) *
|
||||
clockPeriod();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ class AtomicSimpleCPU : public BaseSimpleCPU
|
||||
void switchOut();
|
||||
void takeOverFrom(BaseCPU *oldCPU);
|
||||
|
||||
virtual void activateContext(ThreadID thread_num, int delay);
|
||||
virtual void activateContext(ThreadID thread_num, Cycles delay);
|
||||
virtual void suspendContext(ThreadID thread_num);
|
||||
|
||||
Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags);
|
||||
|
||||
@@ -187,7 +187,7 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
|
||||
|
||||
|
||||
void
|
||||
TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
|
||||
TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
|
||||
{
|
||||
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
|
||||
|
||||
@@ -229,7 +229,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
|
||||
{
|
||||
RequestPtr req = pkt->req;
|
||||
if (req->isMmappedIpr()) {
|
||||
Tick delay = TheISA::handleIprRead(thread->getTC(), pkt);
|
||||
Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
|
||||
new IprEvent(pkt, this, clockEdge(delay));
|
||||
_status = DcacheWaitResponse;
|
||||
dcache_pkt = NULL;
|
||||
@@ -443,7 +443,7 @@ TimingSimpleCPU::handleWritePacket()
|
||||
{
|
||||
RequestPtr req = dcache_pkt->req;
|
||||
if (req->isMmappedIpr()) {
|
||||
Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
|
||||
Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
|
||||
new IprEvent(dcache_pkt, this, clockEdge(delay));
|
||||
_status = DcacheWaitResponse;
|
||||
dcache_pkt = NULL;
|
||||
|
||||
@@ -255,7 +255,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
|
||||
void switchOut();
|
||||
void takeOverFrom(BaseCPU *oldCPU);
|
||||
|
||||
virtual void activateContext(ThreadID thread_num, int delay);
|
||||
virtual void activateContext(ThreadID thread_num, Cycles delay);
|
||||
virtual void suspendContext(ThreadID thread_num);
|
||||
|
||||
Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags);
|
||||
|
||||
@@ -210,7 +210,7 @@ SimpleThread::dumpFuncProfile()
|
||||
}
|
||||
|
||||
void
|
||||
SimpleThread::activate(int delay)
|
||||
SimpleThread::activate(Cycles delay)
|
||||
{
|
||||
if (status() == ThreadContext::Active)
|
||||
return;
|
||||
|
||||
@@ -209,7 +209,7 @@ class SimpleThread : public ThreadState
|
||||
|
||||
/// Set the status to Active. Optional delay indicates number of
|
||||
/// cycles to wait before beginning execution.
|
||||
void activate(int delay = 1);
|
||||
void activate(Cycles delay = Cycles(1));
|
||||
|
||||
/// Set the status to Suspended.
|
||||
void suspend();
|
||||
|
||||
@@ -246,7 +246,7 @@ void
|
||||
MemTest::tick()
|
||||
{
|
||||
if (!tickEvent.scheduled())
|
||||
schedule(tickEvent, clockEdge(1));
|
||||
schedule(tickEvent, clockEdge(Cycles(1)));
|
||||
|
||||
if (++noResponseCycles >= 500000) {
|
||||
if (issueDmas) {
|
||||
|
||||
@@ -165,7 +165,7 @@ NetworkTest::tick()
|
||||
exitSimLoop("Network Tester completed simCycles");
|
||||
else {
|
||||
if (!tickEvent.scheduled())
|
||||
schedule(tickEvent, clockEdge(1));
|
||||
schedule(tickEvent, clockEdge(Cycles(1)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -163,13 +163,13 @@ class ThreadContext
|
||||
|
||||
/// Set the status to Active. Optional delay indicates number of
|
||||
/// cycles to wait before beginning execution.
|
||||
virtual void activate(int delay = 1) = 0;
|
||||
virtual void activate(Cycles delay = Cycles(1)) = 0;
|
||||
|
||||
/// Set the status to Suspended.
|
||||
virtual void suspend(int delay = 0) = 0;
|
||||
virtual void suspend(Cycles delay = Cycles(0)) = 0;
|
||||
|
||||
/// Set the status to Halted.
|
||||
virtual void halt(int delay = 0) = 0;
|
||||
virtual void halt(Cycles delay = Cycles(0)) = 0;
|
||||
|
||||
virtual void dumpFuncProfile() = 0;
|
||||
|
||||
@@ -329,13 +329,14 @@ class ProxyThreadContext : public ThreadContext
|
||||
|
||||
/// Set the status to Active. Optional delay indicates number of
|
||||
/// cycles to wait before beginning execution.
|
||||
void activate(int delay = 1) { actualTC->activate(delay); }
|
||||
void activate(Cycles delay = Cycles(1))
|
||||
{ actualTC->activate(delay); }
|
||||
|
||||
/// Set the status to Suspended.
|
||||
void suspend(int delay = 0) { actualTC->suspend(); }
|
||||
void suspend(Cycles delay = Cycles(0)) { actualTC->suspend(); }
|
||||
|
||||
/// Set the status to Halted.
|
||||
void halt(int delay = 0) { actualTC->halt(); }
|
||||
void halt(Cycles delay = Cycles(0)) { actualTC->halt(); }
|
||||
|
||||
void dumpFuncProfile() { actualTC->dumpFuncProfile(); }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user