ruby: replaces Time with Cycles in many places
The patch started of with replacing Time with Cycles in the Consumer class. But to get ruby to compile, the rest of the changes had to be carried out. Subsequent patches will further this process, till we completely replace Time with Cycles.
This commit is contained in:
@@ -39,6 +39,7 @@ using namespace std;
|
||||
using m5::stl_helpers::operator<<;
|
||||
|
||||
MessageBuffer::MessageBuffer(const string &name)
|
||||
: m_last_arrival_time(0)
|
||||
{
|
||||
m_msg_counter = 0;
|
||||
m_consumer_ptr = NULL;
|
||||
@@ -48,7 +49,6 @@ MessageBuffer::MessageBuffer(const string &name)
|
||||
m_strict_fifo = true;
|
||||
m_size = 0;
|
||||
m_max_size = -1;
|
||||
m_last_arrival_time = 0;
|
||||
m_randomization = true;
|
||||
m_size_last_time_size_checked = 0;
|
||||
m_time_last_time_size_checked = 0;
|
||||
@@ -139,19 +139,19 @@ MessageBuffer::peekAtHeadOfQueue() const
|
||||
}
|
||||
|
||||
// FIXME - move me somewhere else
|
||||
int
|
||||
Cycles
|
||||
random_time()
|
||||
{
|
||||
int time = 1;
|
||||
time += random() & 0x3; // [0...3]
|
||||
Cycles time(1);
|
||||
time += Cycles(random() & 0x3); // [0...3]
|
||||
if ((random() & 0x7) == 0) { // 1 in 8 chance
|
||||
time += 100 + (random() % 0xf); // 100 + [1...15]
|
||||
time += Cycles(100 + (random() % 0xf)); // 100 + [1...15]
|
||||
}
|
||||
return time;
|
||||
}
|
||||
|
||||
void
|
||||
MessageBuffer::enqueue(MsgPtr message, Time delta)
|
||||
MessageBuffer::enqueue(MsgPtr message, Cycles delta)
|
||||
{
|
||||
m_msg_counter++;
|
||||
m_size++;
|
||||
@@ -170,8 +170,9 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
|
||||
// Calculate the arrival time of the message, that is, the first
|
||||
// cycle the message can be dequeued.
|
||||
assert(delta>0);
|
||||
Time current_time = m_clockobj_ptr->curCycle();
|
||||
Time arrival_time = 0;
|
||||
Cycles current_time(m_clockobj_ptr->curCycle());
|
||||
Cycles arrival_time(0);
|
||||
|
||||
if (!RubySystem::getRandomization() || (m_randomization == false)) {
|
||||
// No randomization
|
||||
arrival_time = current_time + delta;
|
||||
@@ -304,6 +305,7 @@ MessageBuffer::recycle()
|
||||
MessageBufferNode node = m_prio_heap.front();
|
||||
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
|
||||
greater<MessageBufferNode>());
|
||||
|
||||
node.m_time = m_clockobj_ptr->curCycle() + m_recycle_latency;
|
||||
m_prio_heap.back() = node;
|
||||
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
|
||||
@@ -317,6 +319,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
|
||||
{
|
||||
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
|
||||
assert(m_stall_msg_map.count(addr) > 0);
|
||||
Cycles nextCycle(m_clockobj_ptr->curCycle() + Cycles(1));
|
||||
|
||||
//
|
||||
// Put all stalled messages associated with this address back on the
|
||||
@@ -324,8 +327,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
|
||||
//
|
||||
while(!m_stall_msg_map[addr].empty()) {
|
||||
m_msg_counter++;
|
||||
MessageBufferNode msgNode(m_clockobj_ptr->curCycle() + 1,
|
||||
m_msg_counter,
|
||||
MessageBufferNode msgNode(nextCycle, m_msg_counter,
|
||||
m_stall_msg_map[addr].front());
|
||||
|
||||
m_prio_heap.push_back(msgNode);
|
||||
@@ -342,6 +344,7 @@ void
|
||||
MessageBuffer::reanalyzeAllMessages()
|
||||
{
|
||||
DPRINTF(RubyQueue, "ReanalyzeAllMessages %s\n");
|
||||
Cycles nextCycle(m_clockobj_ptr->curCycle() + Cycles(1));
|
||||
|
||||
//
|
||||
// Put all stalled messages associated with this address back on the
|
||||
@@ -353,14 +356,13 @@ MessageBuffer::reanalyzeAllMessages()
|
||||
|
||||
while(!(map_iter->second).empty()) {
|
||||
m_msg_counter++;
|
||||
MessageBufferNode msgNode(m_clockobj_ptr->curCycle() + 1,
|
||||
m_msg_counter,
|
||||
MessageBufferNode msgNode(nextCycle, m_msg_counter,
|
||||
(map_iter->second).front());
|
||||
|
||||
m_prio_heap.push_back(msgNode);
|
||||
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
|
||||
greater<MessageBufferNode>());
|
||||
|
||||
|
||||
m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
|
||||
(map_iter->second).pop_front();
|
||||
}
|
||||
|
||||
@@ -54,11 +54,8 @@ class MessageBuffer
|
||||
|
||||
std::string name() const { return m_name; }
|
||||
|
||||
void
|
||||
setRecycleLatency(int recycle_latency)
|
||||
{
|
||||
m_recycle_latency = recycle_latency;
|
||||
}
|
||||
void setRecycleLatency(Cycles recycle_latency)
|
||||
{ m_recycle_latency = recycle_latency; }
|
||||
|
||||
void reanalyzeMessages(const Address& addr);
|
||||
void reanalyzeAllMessages();
|
||||
@@ -74,7 +71,7 @@ class MessageBuffer
|
||||
std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
|
||||
std::greater<MessageBufferNode>());
|
||||
m_prio_heap.pop_back();
|
||||
enqueue(node.m_msgptr, 1);
|
||||
enqueue(node.m_msgptr, Cycles(1));
|
||||
}
|
||||
|
||||
bool areNSlotsAvailable(int n);
|
||||
@@ -114,8 +111,8 @@ class MessageBuffer
|
||||
return m_prio_heap.front().m_msgptr;
|
||||
}
|
||||
|
||||
void enqueue(MsgPtr message) { enqueue(message, 1); }
|
||||
void enqueue(MsgPtr message, Time delta);
|
||||
void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
|
||||
void enqueue(MsgPtr message, Cycles delta);
|
||||
|
||||
//! returns delay ticks of the message.
|
||||
Time dequeue_getDelayCycles(MsgPtr& message);
|
||||
@@ -160,7 +157,7 @@ class MessageBuffer
|
||||
|
||||
private:
|
||||
//added by SS
|
||||
int m_recycle_latency;
|
||||
Cycles m_recycle_latency;
|
||||
|
||||
// Private Methods
|
||||
Time setAndReturnDelayCycles(MsgPtr message);
|
||||
@@ -204,7 +201,8 @@ class MessageBuffer
|
||||
bool m_strict_fifo;
|
||||
bool m_ordering_set;
|
||||
bool m_randomization;
|
||||
Time m_last_arrival_time;
|
||||
|
||||
Cycles m_last_arrival_time;
|
||||
|
||||
int m_input_link_id;
|
||||
int m_vnet_id;
|
||||
|
||||
@@ -37,22 +37,18 @@ class MessageBufferNode
|
||||
{
|
||||
public:
|
||||
MessageBufferNode()
|
||||
{
|
||||
m_time = 0;
|
||||
m_msg_counter = 0;
|
||||
}
|
||||
: m_time(0), m_msg_counter(0)
|
||||
{}
|
||||
|
||||
MessageBufferNode(const Time& time, int counter, const MsgPtr& msgptr)
|
||||
{
|
||||
m_time = time;
|
||||
m_msgptr = msgptr;
|
||||
m_msg_counter = counter;
|
||||
}
|
||||
MessageBufferNode(const Cycles& time, uint64_t counter,
|
||||
const MsgPtr& msgptr)
|
||||
: m_time(time), m_msg_counter(counter), m_msgptr(msgptr)
|
||||
{}
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
public:
|
||||
Time m_time;
|
||||
Cycles m_time;
|
||||
uint64 m_msg_counter; // FIXME, should this be a 64-bit value?
|
||||
MsgPtr m_msgptr;
|
||||
};
|
||||
|
||||
@@ -29,13 +29,14 @@
|
||||
#include "mem/ruby/common/Consumer.hh"
|
||||
|
||||
void
|
||||
Consumer::scheduleEvent(Time timeDelta)
|
||||
Consumer::scheduleEvent(Cycles timeDelta)
|
||||
{
|
||||
scheduleEventAbsolute(timeDelta + em->curCycle());
|
||||
timeDelta += em->curCycle();
|
||||
scheduleEventAbsolute(timeDelta);
|
||||
}
|
||||
|
||||
void
|
||||
Consumer::scheduleEventAbsolute(Time timeAbs)
|
||||
Consumer::scheduleEventAbsolute(Cycles timeAbs)
|
||||
{
|
||||
Tick evt_time = em->clockPeriod() * timeAbs;
|
||||
if (!alreadyScheduled(evt_time)) {
|
||||
|
||||
@@ -38,7 +38,6 @@
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
|
||||
#include "mem/ruby/common/TypeDefines.hh"
|
||||
#include "sim/clocked_object.hh"
|
||||
|
||||
class Consumer
|
||||
@@ -88,8 +87,8 @@ class Consumer
|
||||
m_scheduled_wakeups.erase(time);
|
||||
}
|
||||
|
||||
void scheduleEvent(Time timeDelta);
|
||||
void scheduleEventAbsolute(Time timeAbs);
|
||||
void scheduleEvent(Cycles timeDelta);
|
||||
void scheduleEventAbsolute(Cycles timeAbs);
|
||||
|
||||
private:
|
||||
Tick m_last_scheduled_wakeup;
|
||||
|
||||
@@ -52,7 +52,7 @@ class BasicLink : public SimObject
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
int m_latency;
|
||||
Cycles m_latency;
|
||||
int m_bandwidth_factor;
|
||||
int m_weight;
|
||||
};
|
||||
|
||||
@@ -34,7 +34,7 @@ class BasicLink(SimObject):
|
||||
type = 'BasicLink'
|
||||
cxx_header = "mem/ruby/network/BasicLink.hh"
|
||||
link_id = Param.Int("ID in relation to other links")
|
||||
latency = Param.Int(1, "latency")
|
||||
latency = Param.Cycles(1, "latency")
|
||||
# The following banwidth factor does not translate to the same value for
|
||||
# both the simple and Garnet models. For the most part, the bandwidth
|
||||
# factor is the width of the link in bytes, expect for certain situations
|
||||
|
||||
@@ -37,7 +37,7 @@ class NetworkLink_d(ClockedObject):
|
||||
type = 'NetworkLink_d'
|
||||
cxx_header = "mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh"
|
||||
link_id = Param.Int(Parent.link_id, "link id")
|
||||
link_latency = Param.Int(Parent.latency, "link latency")
|
||||
link_latency = Param.Cycles(Parent.latency, "link latency")
|
||||
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
|
||||
"virtual channels per virtual network")
|
||||
virt_nets = Param.Int(Parent.number_of_virtual_networks,
|
||||
|
||||
@@ -90,7 +90,7 @@ class InputUnit_d : public Consumer
|
||||
{
|
||||
flit_d *t_flit = new flit_d(in_vc, free_signal, curTime);
|
||||
creditQueue->insert(t_flit);
|
||||
m_credit_link->scheduleEvent(1);
|
||||
m_credit_link->scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
inline int
|
||||
|
||||
@@ -244,14 +244,14 @@ NetworkInterface_d::wakeup()
|
||||
free_signal = true;
|
||||
|
||||
outNode_ptr[t_flit->get_vnet()]->enqueue(
|
||||
t_flit->get_msg_ptr(), 1);
|
||||
t_flit->get_msg_ptr(), Cycles(1));
|
||||
}
|
||||
// Simply send a credit back since we are not buffering
|
||||
// this flit in the NI
|
||||
flit_d *credit_flit = new flit_d(t_flit->get_vc(), free_signal,
|
||||
m_net_ptr->curCycle());
|
||||
creditQueue->insert(credit_flit);
|
||||
m_ni_credit_link->scheduleEvent(1);
|
||||
m_ni_credit_link->scheduleEvent(Cycles(1));
|
||||
|
||||
int vnet = t_flit->get_vnet();
|
||||
m_net_ptr->increment_received_flits(vnet);
|
||||
@@ -324,7 +324,7 @@ NetworkInterface_d::scheduleOutputLink()
|
||||
t_flit->set_time(m_net_ptr->curCycle() + 1);
|
||||
outSrcQueue->insert(t_flit);
|
||||
// schedule the out link
|
||||
outNetLink->scheduleEvent(1);
|
||||
outNetLink->scheduleEvent(Cycles(1));
|
||||
|
||||
if (t_flit->get_type() == TAIL_ ||
|
||||
t_flit->get_type() == HEAD_TAIL_) {
|
||||
@@ -351,13 +351,13 @@ NetworkInterface_d::checkReschedule()
|
||||
{
|
||||
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
|
||||
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (int vc = 0; vc < m_num_vcs; vc++) {
|
||||
if (m_ni_buffers[vc]->isReadyForNext(m_net_ptr->curCycle())) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ class NetworkLink_d : public ClockedObject, public Consumer
|
||||
|
||||
protected:
|
||||
int m_id;
|
||||
int m_latency;
|
||||
Cycles m_latency;
|
||||
int channel_width;
|
||||
|
||||
GarnetNetwork_d *m_net_ptr;
|
||||
|
||||
@@ -84,7 +84,7 @@ class OutputUnit_d : public Consumer
|
||||
insert_flit(flit_d *t_flit)
|
||||
{
|
||||
m_out_buffer->insert(t_flit);
|
||||
m_out_link->scheduleEvent(1);
|
||||
m_out_link->scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
@@ -135,13 +135,13 @@ Router_d::route_req(flit_d *t_flit, InputUnit_d *in_unit, int invc)
|
||||
void
|
||||
Router_d::vcarb_req()
|
||||
{
|
||||
m_vc_alloc->scheduleEvent(1);
|
||||
m_vc_alloc->scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
void
|
||||
Router_d::swarb_req()
|
||||
{
|
||||
m_sw_alloc->scheduleEvent(1);
|
||||
m_sw_alloc->scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -154,7 +154,7 @@ void
|
||||
Router_d::update_sw_winner(int inport, flit_d *t_flit)
|
||||
{
|
||||
m_switch->update_sw_winner(inport, t_flit);
|
||||
m_switch->scheduleEvent(1);
|
||||
m_switch->scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -220,7 +220,7 @@ SWallocator_d::check_for_wakeup()
|
||||
for (int j = 0; j < m_num_vcs; j++) {
|
||||
if (m_input_unit[i]->need_stage_nextcycle(j, ACTIVE_, SA_,
|
||||
m_router->curCycle())) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ Switch_d::check_for_wakeup()
|
||||
{
|
||||
for (int inport = 0; inport < m_num_inports; inport++) {
|
||||
if (m_switch_buffer[inport]->isReadyForNext(m_router->curCycle())) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ VCallocator_d::check_for_wakeup()
|
||||
for (int j = 0; j < m_num_vcs; j++) {
|
||||
if (m_input_unit[i]->need_stage_nextcycle(j, VC_AB_, VA_,
|
||||
m_router->curCycle())) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ class NetworkLink(ClockedObject):
|
||||
type = 'NetworkLink'
|
||||
cxx_header = "mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh"
|
||||
link_id = Param.Int(Parent.link_id, "link id")
|
||||
link_latency = Param.Int(Parent.latency, "link latency")
|
||||
link_latency = Param.Cycles(Parent.latency, "link latency")
|
||||
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
|
||||
"virtual channels per virtual network")
|
||||
virt_nets = Param.Int(Parent.number_of_virtual_networks,
|
||||
|
||||
@@ -193,7 +193,7 @@ NetworkInterface::grant_vc(int out_port, int vc, Time grant_time)
|
||||
{
|
||||
assert(m_out_vc_state[vc]->isInState(VC_AB_, grant_time));
|
||||
m_out_vc_state[vc]->grant_vc(grant_time);
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
// The tail flit corresponding to this vc has been buffered at the next hop
|
||||
@@ -203,7 +203,7 @@ NetworkInterface::release_vc(int out_port, int vc, Time release_time)
|
||||
{
|
||||
assert(m_out_vc_state[vc]->isInState(ACTIVE_, release_time));
|
||||
m_out_vc_state[vc]->setState(IDLE_, release_time);
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
// Looking for a free output vc
|
||||
@@ -270,7 +270,7 @@ NetworkInterface::wakeup()
|
||||
m_id, m_net_ptr->curCycle());
|
||||
|
||||
outNode_ptr[t_flit->get_vnet()]->enqueue(
|
||||
t_flit->get_msg_ptr(), 1);
|
||||
t_flit->get_msg_ptr(), Cycles(1));
|
||||
|
||||
// signal the upstream router that this vc can be freed now
|
||||
inNetLink->release_vc_link(t_flit->get_vc(),
|
||||
@@ -316,7 +316,7 @@ NetworkInterface::scheduleOutputLink()
|
||||
outSrcQueue->insert(t_flit);
|
||||
|
||||
// schedule the out link
|
||||
outNetLink->scheduleEvent(1);
|
||||
outNetLink->scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -328,13 +328,13 @@ NetworkInterface::checkReschedule()
|
||||
{
|
||||
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
|
||||
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (int vc = 0; vc < m_num_vcs; vc++) {
|
||||
if (m_ni_buffers[vc]->isReadyForNext(m_net_ptr->curCycle())) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,7 +81,8 @@ class NetworkLink : public ClockedObject, public FlexibleConsumer
|
||||
uint32_t functionalWrite(Packet *);
|
||||
|
||||
protected:
|
||||
int m_id, m_latency;
|
||||
int m_id;
|
||||
Cycles m_latency;
|
||||
int m_in_port, m_out_port;
|
||||
int m_link_utilized;
|
||||
std::vector<int> m_vc_load;
|
||||
|
||||
@@ -130,7 +130,7 @@ Router::isBufferNotFull(int vc, int inport)
|
||||
// This has to be updated and arbitration performed
|
||||
void
|
||||
Router::request_vc(int in_vc, int in_port, NetDest destination,
|
||||
Time request_time)
|
||||
Cycles request_time)
|
||||
{
|
||||
assert(m_in_vc_state[in_port][in_vc]->isInState(IDLE_, request_time));
|
||||
|
||||
@@ -231,7 +231,7 @@ Router::grant_vc(int out_port, int vc, Time grant_time)
|
||||
{
|
||||
assert(m_out_vc_state[out_port][vc]->isInState(VC_AB_, grant_time));
|
||||
m_out_vc_state[out_port][vc]->grant_vc(grant_time);
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -239,7 +239,7 @@ Router::release_vc(int out_port, int vc, Time release_time)
|
||||
{
|
||||
assert(m_out_vc_state[out_port][vc]->isInState(ACTIVE_, release_time));
|
||||
m_out_vc_state[out_port][vc]->setState(IDLE_, release_time);
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
}
|
||||
|
||||
// This function calculated the output port for a particular destination.
|
||||
@@ -274,7 +274,8 @@ Router::routeCompute(flit *m_flit, int inport)
|
||||
m_router_buffers[outport][outvc]->insert(m_flit);
|
||||
|
||||
if (m_net_ptr->getNumPipeStages() > 1)
|
||||
scheduleEvent(m_net_ptr->getNumPipeStages() - 1 );
|
||||
scheduleEvent(Cycles(m_net_ptr->getNumPipeStages() - 1));
|
||||
|
||||
if ((m_flit->get_type() == HEAD_) || (m_flit->get_type() == HEAD_TAIL_)) {
|
||||
NetworkMessage *nm =
|
||||
safe_cast<NetworkMessage*>(m_flit->get_msg_ptr().get());
|
||||
@@ -290,6 +291,7 @@ Router::routeCompute(flit *m_flit, int inport)
|
||||
curCycle());
|
||||
}
|
||||
}
|
||||
|
||||
if ((m_flit->get_type() == TAIL_) || (m_flit->get_type() == HEAD_TAIL_)) {
|
||||
m_in_vc_state[inport][invc]->setState(IDLE_, curCycle() + 1);
|
||||
m_in_link[inport]->release_vc_link(invc, curCycle() + 1);
|
||||
@@ -361,7 +363,7 @@ Router::scheduleOutputLinks()
|
||||
m_router_buffers[port][vc_tolookat]->getTopFlit();
|
||||
t_flit->set_time(curCycle() + 1 );
|
||||
m_out_src_queue[port]->insert(t_flit);
|
||||
m_out_link[port]->scheduleEvent(1);
|
||||
m_out_link[port]->scheduleEvent(Cycles(1));
|
||||
break; // done for this port
|
||||
}
|
||||
}
|
||||
@@ -383,7 +385,7 @@ Router::checkReschedule()
|
||||
for (int port = 0; port < m_out_link.size(); port++) {
|
||||
for (int vc = 0; vc < m_num_vcs; vc++) {
|
||||
if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -396,7 +398,7 @@ Router::check_arbiter_reschedule()
|
||||
for (int port = 0; port < m_in_link.size(); port++) {
|
||||
for (int vc = 0; vc < m_num_vcs; vc++) {
|
||||
if (m_in_vc_state[port][vc]->isInState(VC_AB_, curCycle() + 1)) {
|
||||
m_vc_arbiter->scheduleEvent(1);
|
||||
m_vc_arbiter->scheduleEvent(Cycles(1));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ class Router : public BasicRouter, public FlexibleConsumer
|
||||
int link_weight);
|
||||
void wakeup();
|
||||
void request_vc(int in_vc, int in_port, NetDest destination,
|
||||
Time request_time);
|
||||
Cycles request_time);
|
||||
bool isBufferNotFull(int vc, int inport);
|
||||
void grant_vc(int out_port, int vc, Time grant_time);
|
||||
void release_vc(int out_port, int vc, Time release_time);
|
||||
|
||||
@@ -267,7 +267,7 @@ PerfectSwitch::wakeup()
|
||||
|
||||
// There were not enough resources
|
||||
if (!enough) {
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
DPRINTF(RubyNetwork, "Can't deliver message since a node "
|
||||
"is blocked\n");
|
||||
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
|
||||
|
||||
@@ -72,12 +72,12 @@ Switch::addInPort(const vector<MessageBuffer*>& in)
|
||||
|
||||
void
|
||||
Switch::addOutPort(const vector<MessageBuffer*>& out,
|
||||
const NetDest& routing_table_entry, int link_latency, int bw_multiplier)
|
||||
const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
|
||||
{
|
||||
// Create a throttle
|
||||
Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
|
||||
link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
|
||||
this);
|
||||
link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
|
||||
this);
|
||||
m_throttles.push_back(throttle_ptr);
|
||||
|
||||
// Create one buffer per vnet (these are intermediaryQueues)
|
||||
|
||||
@@ -63,7 +63,7 @@ class Switch : public BasicRouter
|
||||
void init();
|
||||
void addInPort(const std::vector<MessageBuffer*>& in);
|
||||
void addOutPort(const std::vector<MessageBuffer*>& out,
|
||||
const NetDest& routing_table_entry, int link_latency,
|
||||
const NetDest& routing_table_entry, Cycles link_latency,
|
||||
int bw_multiplier);
|
||||
const Throttle* getThrottle(LinkID link_number) const;
|
||||
const std::vector<Throttle*>* getThrottles() const;
|
||||
|
||||
@@ -48,7 +48,7 @@ const int PRIORITY_SWITCH_LIMIT = 128;
|
||||
|
||||
static int network_message_to_size(NetworkMessage* net_msg_ptr);
|
||||
|
||||
Throttle::Throttle(int sID, NodeID node, int link_latency,
|
||||
Throttle::Throttle(int sID, NodeID node, Cycles link_latency,
|
||||
int link_bandwidth_multiplier, int endpoint_bandwidth,
|
||||
ClockedObject *em)
|
||||
: Consumer(em)
|
||||
@@ -57,7 +57,7 @@ Throttle::Throttle(int sID, NodeID node, int link_latency,
|
||||
m_sID = sID;
|
||||
}
|
||||
|
||||
Throttle::Throttle(NodeID node, int link_latency,
|
||||
Throttle::Throttle(NodeID node, Cycles link_latency,
|
||||
int link_bandwidth_multiplier, int endpoint_bandwidth,
|
||||
ClockedObject *em)
|
||||
: Consumer(em)
|
||||
@@ -67,8 +67,8 @@ Throttle::Throttle(NodeID node, int link_latency,
|
||||
}
|
||||
|
||||
void
|
||||
Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier,
|
||||
int endpoint_bandwidth)
|
||||
Throttle::init(NodeID node, Cycles link_latency,
|
||||
int link_bandwidth_multiplier, int endpoint_bandwidth)
|
||||
{
|
||||
m_node = node;
|
||||
m_vnets = 0;
|
||||
@@ -222,7 +222,7 @@ Throttle::wakeup()
|
||||
|
||||
// We are out of bandwidth for this cycle, so wakeup next
|
||||
// cycle and continue
|
||||
scheduleEvent(1);
|
||||
scheduleEvent(Cycles(1));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -52,10 +52,10 @@ class MessageBuffer;
|
||||
class Throttle : public Consumer
|
||||
{
|
||||
public:
|
||||
Throttle(int sID, NodeID node, int link_latency,
|
||||
Throttle(int sID, NodeID node, Cycles link_latency,
|
||||
int link_bandwidth_multiplier, int endpoint_bandwidth,
|
||||
ClockedObject *em);
|
||||
Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier,
|
||||
Throttle(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
|
||||
int endpoint_bandwidth, ClockedObject *em);
|
||||
~Throttle() {}
|
||||
|
||||
@@ -70,13 +70,10 @@ class Throttle : public Consumer
|
||||
void clearStats();
|
||||
// The average utilization (a percent) since last clearStats()
|
||||
double getUtilization() const;
|
||||
int
|
||||
getLinkBandwidth() const
|
||||
{
|
||||
return m_endpoint_bandwidth * m_link_bandwidth_multiplier;
|
||||
}
|
||||
int getLatency() const { return m_link_latency; }
|
||||
int getLinkBandwidth() const
|
||||
{ return m_endpoint_bandwidth * m_link_bandwidth_multiplier; }
|
||||
|
||||
Cycles getLatency() const { return m_link_latency; }
|
||||
const std::vector<std::vector<int> >&
|
||||
getCounters() const
|
||||
{
|
||||
@@ -88,7 +85,7 @@ class Throttle : public Consumer
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
private:
|
||||
void init(NodeID node, int link_latency, int link_bandwidth_multiplier,
|
||||
void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
|
||||
int endpoint_bandwidth);
|
||||
void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr,
|
||||
ClockedObject *em);
|
||||
@@ -106,7 +103,7 @@ class Throttle : public Consumer
|
||||
int m_sID;
|
||||
NodeID m_node;
|
||||
int m_link_bandwidth_multiplier;
|
||||
int m_link_latency;
|
||||
Cycles m_link_latency;
|
||||
int m_wakeups_wo_switch;
|
||||
int m_endpoint_bandwidth;
|
||||
|
||||
|
||||
@@ -52,8 +52,6 @@ class AbstractCacheEntry : public AbstractEntry
|
||||
void changePermission(AccessPermission new_perm);
|
||||
|
||||
Address m_Address; // Address of this block, required by CacheMemory
|
||||
Time m_LastRef; // Last time this block was referenced, required
|
||||
// by CacheMemory
|
||||
int m_locked; // Holds info whether the address is locked,
|
||||
// required for implementing LL/SC
|
||||
};
|
||||
|
||||
@@ -106,7 +106,7 @@ class AbstractController : public ClockedObject, public Consumer
|
||||
protected:
|
||||
int m_transitions_per_cycle;
|
||||
int m_buffer_size;
|
||||
int m_recycle_latency;
|
||||
Cycles m_recycle_latency;
|
||||
std::string m_name;
|
||||
NodeID m_version;
|
||||
Network* m_net_ptr;
|
||||
|
||||
@@ -40,6 +40,6 @@ class RubyController(ClockedObject):
|
||||
transitions_per_cycle = \
|
||||
Param.Int(32, "no. of SLICC state machine transitions per cycle")
|
||||
buffer_size = Param.Int(0, "max buffer size 0 means infinite")
|
||||
recycle_latency = Param.Int(10, "")
|
||||
recycle_latency = Param.Cycles(10, "")
|
||||
number_of_TBEs = Param.Int(256, "")
|
||||
ruby_system = Param.RubySystem("");
|
||||
|
||||
@@ -37,9 +37,8 @@
|
||||
|
||||
#include "debug/RubySlicc.hh"
|
||||
#include "mem/ruby/common/Address.hh"
|
||||
#include "mem/ruby/common/Global.hh"
|
||||
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
|
||||
#include "mem/ruby/system/System.hh"
|
||||
#include "mem/packet.hh"
|
||||
|
||||
inline int
|
||||
random(int n)
|
||||
@@ -53,6 +52,8 @@ zero_time()
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline Cycles TimeToCycles(Time t) { return Cycles(t); }
|
||||
|
||||
inline NodeID
|
||||
intToID(int nodenum)
|
||||
{
|
||||
|
||||
@@ -36,7 +36,7 @@ class RubyCache(SimObject):
|
||||
cxx_class = 'CacheMemory'
|
||||
cxx_header = "mem/ruby/system/CacheMemory.hh"
|
||||
size = Param.MemorySize("capacity in bytes");
|
||||
latency = Param.Int("");
|
||||
latency = Param.Cycles("");
|
||||
assoc = Param.Int("");
|
||||
replacement_policy = Param.String("PSEUDO_LRU", "");
|
||||
start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line");
|
||||
|
||||
@@ -92,7 +92,7 @@ class CacheMemory : public SimObject
|
||||
AbstractCacheEntry* lookup(const Address& address);
|
||||
const AbstractCacheEntry* lookup(const Address& address) const;
|
||||
|
||||
int getLatency() const { return m_latency; }
|
||||
Cycles getLatency() const { return m_latency; }
|
||||
|
||||
// Hook for checkpointing the contents of the cache
|
||||
void recordCacheContents(int cntrl, CacheRecorder* tr) const;
|
||||
@@ -144,7 +144,7 @@ class CacheMemory : public SimObject
|
||||
|
||||
private:
|
||||
const std::string m_cache_name;
|
||||
int m_latency;
|
||||
Cycles m_latency;
|
||||
|
||||
// Data Members (m_prefix)
|
||||
bool m_is_instruction_only_cache;
|
||||
|
||||
@@ -374,10 +374,10 @@ RubyMemoryControl::printStats(ostream& out) const
|
||||
|
||||
// Queue up a completed request to send back to directory
|
||||
void
|
||||
RubyMemoryControl::enqueueToDirectory(MemoryNode req, int latency)
|
||||
RubyMemoryControl::enqueueToDirectory(MemoryNode req, Cycles latency)
|
||||
{
|
||||
Time arrival_time = curTick() + (latency * clock);
|
||||
Time ruby_arrival_time = arrival_time / g_system_ptr->clockPeriod();
|
||||
Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time);
|
||||
req.m_time = ruby_arrival_time;
|
||||
m_response_queue.push_back(req);
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ class RubyMemoryControl : public MemoryControl
|
||||
uint32_t functionalWriteBuffers(Packet *pkt);
|
||||
|
||||
private:
|
||||
void enqueueToDirectory(MemoryNode req, int latency);
|
||||
void enqueueToDirectory(MemoryNode req, Cycles latency);
|
||||
const int getRank(int bank) const;
|
||||
bool queueReady(int bank);
|
||||
void issueRequest(int bank);
|
||||
@@ -128,11 +128,11 @@ class RubyMemoryControl : public MemoryControl
|
||||
int m_rank_rank_delay;
|
||||
int m_read_write_delay;
|
||||
int m_basic_bus_busy_time;
|
||||
int m_mem_ctl_latency;
|
||||
Cycles m_mem_ctl_latency;
|
||||
int m_refresh_period;
|
||||
int m_mem_random_arbitrate;
|
||||
int m_tFaw;
|
||||
int m_mem_fixed_delay;
|
||||
Cycles m_mem_fixed_delay;
|
||||
|
||||
int m_total_banks;
|
||||
int m_total_ranks;
|
||||
|
||||
@@ -50,8 +50,8 @@ class RubyMemoryControl(MemoryControl):
|
||||
rank_rank_delay = Param.Int(1, "");
|
||||
read_write_delay = Param.Int(2, "");
|
||||
basic_bus_busy_time = Param.Int(2, "");
|
||||
mem_ctl_latency = Param.Int(12, "");
|
||||
refresh_period = Param.Int(1560, "");
|
||||
mem_ctl_latency = Param.Cycles(12, "");
|
||||
refresh_period = Param.Cycles(1560, "");
|
||||
tFaw = Param.Int(0, "");
|
||||
mem_random_arbitrate = Param.Int(0, "");
|
||||
mem_fixed_delay = Param.Int(0, "");
|
||||
mem_fixed_delay = Param.Cycles(0, "");
|
||||
|
||||
@@ -668,7 +668,7 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
|
||||
msg->getPhysicalAddress(),
|
||||
RubyRequestType_to_string(secondary_type));
|
||||
|
||||
Time latency = 0; // initialzed to an null value
|
||||
Cycles latency(0); // initialzed to an null value
|
||||
|
||||
if (secondary_type == RubyRequestType_IFETCH)
|
||||
latency = m_instCache_ptr->getLatency();
|
||||
|
||||
@@ -66,12 +66,13 @@ TimerTable::readyAddress() const
|
||||
}
|
||||
|
||||
void
|
||||
TimerTable::set(const Address& address, Time relative_latency)
|
||||
TimerTable::set(const Address& address, Cycles relative_latency)
|
||||
{
|
||||
assert(address == line_address(address));
|
||||
assert(relative_latency > 0);
|
||||
assert(!m_map.count(address));
|
||||
Time ready_time = m_clockobj_ptr->curCycle() + relative_latency;
|
||||
|
||||
Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
|
||||
m_map[address] = ready_time;
|
||||
assert(m_consumer_ptr != NULL);
|
||||
m_consumer_ptr->scheduleEventAbsolute(ready_time);
|
||||
|
||||
@@ -64,7 +64,10 @@ class TimerTable
|
||||
bool isReady() const;
|
||||
const Address& readyAddress() const;
|
||||
bool isSet(const Address& address) const { return !!m_map.count(address); }
|
||||
void set(const Address& address, Time relative_latency);
|
||||
void set(const Address& address, Cycles relative_latency);
|
||||
void set(const Address& address, uint64_t relative_latency)
|
||||
{ set(address, Cycles(relative_latency)); }
|
||||
|
||||
void unset(const Address& address);
|
||||
void print(std::ostream& out) const;
|
||||
|
||||
@@ -79,7 +82,7 @@ class TimerTable
|
||||
|
||||
// use a std::map for the address map as this container is sorted
|
||||
// and ensures a well-defined iteration order
|
||||
typedef std::map<Address, Time> AddressMap;
|
||||
typedef std::map<Address, Cycles> AddressMap;
|
||||
AddressMap m_map;
|
||||
mutable bool m_next_valid;
|
||||
mutable Time m_next_time; // Only valid if m_next_valid is true
|
||||
|
||||
@@ -70,12 +70,13 @@ WireBuffer::~WireBuffer()
|
||||
}
|
||||
|
||||
void
|
||||
WireBuffer::enqueue(MsgPtr message, int latency)
|
||||
WireBuffer::enqueue(MsgPtr message, Cycles latency)
|
||||
{
|
||||
m_msg_counter++;
|
||||
Time current_time = g_system_ptr->getTime();
|
||||
Time arrival_time = current_time + latency;
|
||||
Cycles current_time = g_system_ptr->getTime();
|
||||
Cycles arrival_time = current_time + latency;
|
||||
assert(arrival_time > current_time);
|
||||
|
||||
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
|
||||
m_message_queue.push_back(thisNode);
|
||||
if (m_consumer_ptr != NULL) {
|
||||
@@ -122,11 +123,12 @@ WireBuffer::recycle()
|
||||
MessageBufferNode node = m_message_queue.front();
|
||||
pop_heap(m_message_queue.begin(), m_message_queue.end(),
|
||||
greater<MessageBufferNode>());
|
||||
node.m_time = g_system_ptr->getTime() + 1;
|
||||
|
||||
node.m_time = g_system_ptr->getTime() + Cycles(1);
|
||||
m_message_queue.back() = node;
|
||||
push_heap(m_message_queue.begin(), m_message_queue.end(),
|
||||
greater<MessageBufferNode>());
|
||||
m_consumer_ptr->scheduleEventAbsolute(g_system_ptr->getTime() + 1);
|
||||
m_consumer_ptr->scheduleEventAbsolute(node.m_time);
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
@@ -72,7 +72,7 @@ class WireBuffer : public SimObject
|
||||
void setDescription(const std::string& name) { m_description = name; };
|
||||
std::string getDescription() { return m_description; };
|
||||
|
||||
void enqueue(MsgPtr message, int latency );
|
||||
void enqueue(MsgPtr message, Cycles latency);
|
||||
void dequeue();
|
||||
const Message* peek();
|
||||
MessageBufferNode peekNode();
|
||||
|
||||
Reference in New Issue
Block a user