Ruby: Remove RubyEventQueue

This patch removes RubyEventQueue. Consumer objects now rely on RubySystem
or themselves for scheduling events.
This commit is contained in:
Nilay Vaish
2012-08-27 01:00:55 -05:00
parent 7122b83d8f
commit 9190940511
64 changed files with 276 additions and 543 deletions

View File

@@ -42,7 +42,6 @@
#include "cpu/testers/directedtest/DirectedGenerator.hh"
#include "cpu/testers/directedtest/RubyDirectedTester.hh"
#include "debug/DirectedTest.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "sim/sim_exit.hh"
RubyDirectedTester::RubyDirectedTester(const Params *p)

View File

@@ -313,7 +313,7 @@ Check::performCallback(NodeID proc, SubBlock* data)
proc, address, data, byte_number,
(int)m_value + byte_number,
(int)data->getByte(byte_number), *this,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
}
}
DPRINTF(RubyTest, "Action/check success\n");
@@ -328,7 +328,7 @@ Check::performCallback(NodeID proc, SubBlock* data)
} else {
panic("Unexpected TesterStatus: %s proc: %d data: %s m_status: %s "
"time: %d\n",
*this, proc, data, m_status, g_eventQueue_ptr->getTime());
*this, proc, data, m_status, g_system_ptr->getTime());
}
DPRINTF(RubyTest, "proc: %d, Address: 0x%x\n", proc,

View File

@@ -45,7 +45,6 @@
#include "debug/RubyTest.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/System.hh"
#include "sim/sim_exit.hh"
#include "sim/system.hh"
@@ -192,7 +191,7 @@ void
RubyTester::hitCallback(NodeID proc, SubBlock* data)
{
// Mark that we made progress
m_last_progress_vector[proc] = g_eventQueue_ptr->getTime();
m_last_progress_vector[proc] = g_system_ptr->getTime();
DPRINTF(RubyTest, "completed request for proc: %d\n", proc);
DPRINTF(RubyTest, "addr: 0x%x, size: %d, data: ",
@@ -230,7 +229,7 @@ void
RubyTester::checkForDeadlock()
{
int size = m_last_progress_vector.size();
Time current_time = g_eventQueue_ptr->getTime();
Time current_time = g_system_ptr->getTime();
for (int processor = 0; processor < size; processor++) {
if ((current_time - m_last_progress_vector[processor]) >
m_deadlock_threshold) {

View File

@@ -66,10 +66,10 @@ MessageBuffer::MessageBuffer(const string &name)
int
MessageBuffer::getSize()
{
if (m_time_last_time_size_checked == g_eventQueue_ptr->getTime()) {
if (m_time_last_time_size_checked == g_system_ptr->getTime()) {
return m_size_last_time_size_checked;
} else {
m_time_last_time_size_checked = g_eventQueue_ptr->getTime();
m_time_last_time_size_checked = g_system_ptr->getTime();
m_size_last_time_size_checked = m_size;
return m_size;
}
@@ -89,11 +89,11 @@ MessageBuffer::areNSlotsAvailable(int n)
// until next cycle, but enqueue operations effect the visible
// size immediately
int current_size = max(m_size_at_cycle_start, m_size);
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
if (m_time_last_time_pop < g_system_ptr->getTime()) {
// no pops this cycle - m_size is correct
current_size = m_size;
} else {
if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) {
if (m_time_last_time_enqueue < g_system_ptr->getTime()) {
// no enqueues this cycle - m_size_at_cycle_start is correct
current_size = m_size_at_cycle_start;
} else {
@@ -155,9 +155,9 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
m_size++;
// record current time incase we have a pop that also adjusts my size
if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) {
if (m_time_last_time_enqueue < g_system_ptr->getTime()) {
m_msgs_this_cycle = 0; // first msg this cycle
m_time_last_time_enqueue = g_eventQueue_ptr->getTime();
m_time_last_time_enqueue = g_system_ptr->getTime();
}
m_msgs_this_cycle++;
@@ -168,7 +168,7 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
assert(delta>0);
Time current_time = g_eventQueue_ptr->getTime();
Time current_time = g_system_ptr->getTime();
Time arrival_time = 0;
if (!RubySystem::getRandomization() || (m_randomization == false)) {
// No randomization
@@ -192,10 +192,10 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
panic("FIFO ordering violated: %s name: %s current time: %d "
"delta: %d arrival_time: %d last arrival_time: %d\n",
*this, m_name,
current_time * g_eventQueue_ptr->getClock(),
delta * g_eventQueue_ptr->getClock(),
arrival_time * g_eventQueue_ptr->getClock(),
m_last_arrival_time * g_eventQueue_ptr->getClock());
current_time * g_system_ptr->getClock(),
delta * g_system_ptr->getClock(),
arrival_time * g_system_ptr->getClock(),
m_last_arrival_time * g_system_ptr->getClock());
}
}
@@ -208,10 +208,10 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
Message* msg_ptr = message.get();
assert(msg_ptr != NULL);
assert(g_eventQueue_ptr->getTime() >= msg_ptr->getLastEnqueueTime() &&
assert(g_system_ptr->getTime() >= msg_ptr->getLastEnqueueTime() &&
"ensure we aren't dequeued early");
msg_ptr->setDelayedCycles(g_eventQueue_ptr->getTime() -
msg_ptr->setDelayedCycles(g_system_ptr->getTime() -
msg_ptr->getLastEnqueueTime() +
msg_ptr->getDelayedCycles());
msg_ptr->setLastEnqueueTime(arrival_time);
@@ -223,12 +223,12 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
greater<MessageBufferNode>());
DPRINTF(RubyQueue, "Enqueue with arrival_time %lld.\n",
arrival_time * g_eventQueue_ptr->getClock());
arrival_time * g_system_ptr->getClock());
DPRINTF(RubyQueue, "Enqueue Message: %s.\n", (*(message.get())));
// Schedule the wakeup
if (m_consumer_ptr != NULL) {
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
m_consumer_ptr->scheduleEventAbsolute(arrival_time);
m_consumer_ptr->storeEventInfo(m_vnet_id);
} else {
panic("No consumer: %s name: %s\n", *this, m_name);
@@ -287,9 +287,9 @@ MessageBuffer::pop()
// record previous size and time so the current buffer size isn't
// adjusted until next cycle
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
if (m_time_last_time_pop < g_system_ptr->getTime()) {
m_size_at_cycle_start = m_size;
m_time_last_time_pop = g_eventQueue_ptr->getTime();
m_time_last_time_pop = g_system_ptr->getTime();
}
m_size--;
}
@@ -315,12 +315,12 @@ MessageBuffer::recycle()
MessageBufferNode node = m_prio_heap.front();
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
node.m_time = g_eventQueue_ptr->getTime() + m_recycle_latency;
node.m_time = g_system_ptr->getTime() + m_recycle_latency;
m_prio_heap.back() = node;
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr,
g_eventQueue_ptr->getTime() + m_recycle_latency);
m_consumer_ptr->scheduleEventAbsolute(g_system_ptr->getTime() +
m_recycle_latency);
}
void
@@ -335,7 +335,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
//
while(!m_stall_msg_map[addr].empty()) {
m_msg_counter++;
MessageBufferNode msgNode(g_eventQueue_ptr->getTime() + 1,
MessageBufferNode msgNode(g_system_ptr->getTime() + 1,
m_msg_counter,
m_stall_msg_map[addr].front());
@@ -343,7 +343,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, msgNode.m_time);
m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
m_stall_msg_map[addr].pop_front();
}
m_stall_msg_map.erase(addr);
@@ -364,7 +364,7 @@ MessageBuffer::reanalyzeAllMessages()
while(!(map_iter->second).empty()) {
m_msg_counter++;
MessageBufferNode msgNode(g_eventQueue_ptr->getTime() + 1,
MessageBufferNode msgNode(g_system_ptr->getTime() + 1,
m_msg_counter,
(map_iter->second).front());
@@ -372,8 +372,7 @@ MessageBuffer::reanalyzeAllMessages()
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr,
msgNode.m_time);
m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
(map_iter->second).pop_front();
}
}
@@ -407,8 +406,8 @@ MessageBuffer::setAndReturnDelayCycles(MsgPtr msg_ptr)
// this function should only be called on dequeue
// ensure the msg hasn't been enqueued
assert(msg_ptr->getLastEnqueueTime() <= g_eventQueue_ptr->getTime());
msg_ptr->setDelayedCycles(g_eventQueue_ptr->getTime() -
assert(msg_ptr->getLastEnqueueTime() <= g_system_ptr->getTime());
msg_ptr->setDelayedCycles(g_system_ptr->getTime() -
msg_ptr->getLastEnqueueTime() +
msg_ptr->getDelayedCycles());
delay_cycles = msg_ptr->getDelayedCycles();

View File

@@ -45,7 +45,6 @@
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/slicc_interface/Message.hh"
class MessageBuffer
@@ -70,7 +69,7 @@ class MessageBuffer
isReady() const
{
return ((m_prio_heap.size() > 0) &&
(m_prio_heap.front().m_time <= g_eventQueue_ptr->getTime()));
(m_prio_heap.front().m_time <= g_system_ptr->getTime()));
}
void

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* Copyright (c) 2012 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,43 +26,38 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cassert>
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/eventqueue/RubyEventQueueNode.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
RubyEventQueue::RubyEventQueue(EventQueue* eventq, Tick _clock)
: EventManager(eventq), m_clock(_clock)
{
}
RubyEventQueue::~RubyEventQueue()
void
Consumer::scheduleEvent(Time timeDelta)
{
scheduleEvent(g_system_ptr, timeDelta);
}
void
RubyEventQueue::scheduleEvent(Consumer* consumer, Time timeDelta)
Consumer::scheduleEvent(EventManager *em, Time timeDelta)
{
scheduleEventAbsolute(consumer, timeDelta + getTime());
scheduleEventAbsolute(em, timeDelta + g_system_ptr->getTime());
}
void
RubyEventQueue::scheduleEventAbsolute(Consumer* consumer, Time timeAbs)
Consumer::scheduleEventAbsolute(Time timeAbs)
{
// Check to see if this is a redundant wakeup
assert(consumer != NULL);
if (!consumer->alreadyScheduled(timeAbs)) {
scheduleEventAbsolute(g_system_ptr, timeAbs);
}
void
Consumer::scheduleEventAbsolute(EventManager *em, Time timeAbs)
{
Tick evt_time = timeAbs * g_system_ptr->getClock();
if (!alreadyScheduled(evt_time)) {
// This wakeup is not redundant
RubyEventQueueNode *thisNode = new RubyEventQueueNode(consumer, this);
assert(timeAbs > getTime());
schedule(thisNode, (timeAbs * m_clock));
consumer->insertScheduledWakeupTime(timeAbs);
ConsumerEvent *evt = new ConsumerEvent(this);
assert(timeAbs > g_system_ptr->getTime());
em->schedule(evt, evt_time);
insertScheduledWakeupTime(evt_time);
}
}
void
RubyEventQueue::print(std::ostream& out) const
{
out << "[Event Queue:]";
}

View File

@@ -38,10 +38,8 @@
#include <iostream>
#include <set>
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
class MessageBuffer;
#include "mem/ruby/common/TypeDefines.hh"
#include "sim/eventq.hh"
class Consumer
{
@@ -55,55 +53,68 @@ class Consumer
~Consumer()
{ }
void
triggerWakeup(RubyEventQueue *eventQueue)
{
Time time = eventQueue->getTime();
if (m_last_wakeup != time) {
wakeup();
m_last_wakeup = time;
}
}
virtual void wakeup() = 0;
virtual void print(std::ostream& out) const = 0;
virtual void storeEventInfo(int info) {}
const Time&
const Tick&
getLastScheduledWakeup() const
{
return m_last_scheduled_wakeup;
}
void
setLastScheduledWakeup(const Time& time)
setLastScheduledWakeup(const Tick& time)
{
m_last_scheduled_wakeup = time;
}
bool
alreadyScheduled(Time time)
alreadyScheduled(Tick time)
{
return m_scheduled_wakeups.find(time) != m_scheduled_wakeups.end();
}
void
insertScheduledWakeupTime(Time time)
insertScheduledWakeupTime(Tick time)
{
m_scheduled_wakeups.insert(time);
}
void
removeScheduledWakeupTime(Time time)
removeScheduledWakeupTime(Tick time)
{
assert(alreadyScheduled(time));
m_scheduled_wakeups.erase(time);
}
void scheduleEvent(EventManager* em, Time timeDelta);
void scheduleEventAbsolute(EventManager* em, Time timeAbs);
void scheduleEvent(Time timeDelta);
void scheduleEventAbsolute(Time timeAbs);
private:
Time m_last_scheduled_wakeup;
std::set<Time> m_scheduled_wakeups;
Time m_last_wakeup;
Tick m_last_scheduled_wakeup;
std::set<Tick> m_scheduled_wakeups;
Tick m_last_wakeup;
class ConsumerEvent : public Event
{
public:
ConsumerEvent(Consumer* _consumer)
: Event(Default_Pri, AutoDelete), m_consumer_ptr(_consumer)
{
}
void process()
{
m_consumer_ptr->wakeup();
m_consumer_ptr->removeScheduledWakeupTime(when());
}
private:
Consumer* m_consumer_ptr;
};
};
inline std::ostream&

View File

@@ -28,6 +28,4 @@
#include "mem/ruby/common/Global.hh"
RubyEventQueue* g_eventQueue_ptr = 0;
RubySystem* g_system_ptr = 0;

View File

@@ -31,9 +31,6 @@
#include "base/str.hh"
class RubyEventQueue;
extern RubyEventQueue* g_eventQueue_ptr;
class RubySystem;
extern RubySystem* g_system_ptr;

View File

@@ -34,6 +34,7 @@ if env['PROTOCOL'] == 'None':
Return()
Source('Address.cc')
Source('Consumer.cc')
Source('DataBlock.cc')
Source('Global.cc')
Source('Histogram.cc')

View File

@@ -35,8 +35,6 @@ typedef unsigned char uint8;
typedef unsigned int uint32;
typedef unsigned long long uint64;
typedef signed char int8;
typedef int int32;
typedef long long int64;
typedef long long integer_t;

View File

@@ -1,96 +0,0 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* The RubyEventQueue class implements an event queue which
* can be trigger events, allowing our simulation to be event driven.
*
* Currently, the only event we support is a Consumer being signaled
* by calling the consumer's wakeup() routine. Adding the event to
* the queue does not require a virtual function call, though calling
* wakeup() is a virtual function call.
*
* The method triggerEvents() is called with a global time. All
* events which are before or at this time are triggered in timestamp
* order. No ordering is enforced for events scheduled to occur at
* the same time. Events scheduled to wakeup the same consumer at the
* same time are combined into a single event.
*
* The method scheduleConsumerWakeup() is called with a global time
* and a consumer pointer. The event queue will call the wakeup()
* method of the consumer at the appropriate time.
*
* This implementation of RubyEventQueue uses a dynamically sized array
* managed as a heap. The algorithms used has O(lg n) for insert and
* O(lg n) for extract minimum element. (Based on chapter 7 of Cormen,
* Leiserson, and Rivest.) The array is dynamically sized and is
* automatically doubled in size when necessary.
*
*/
#ifndef __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUE_HH__
#define __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUE_HH__
#include <iostream>
#include "mem/ruby/common/TypeDefines.hh"
#include "sim/eventq.hh"
class Consumer;
class RubyEventQueueNode;
class RubyEventQueue : public EventManager
{
public:
RubyEventQueue(EventQueue* eventq, Tick _clock);
~RubyEventQueue();
Time getTime() const { return curTick()/m_clock; }
Tick getClock() const { return m_clock; }
void scheduleEvent(Consumer* consumer, Time timeDelta);
void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
void print(std::ostream& out) const;
private:
// Private copy constructor and assignment operator
RubyEventQueue(const RubyEventQueue& obj);
RubyEventQueue& operator=(const RubyEventQueue& obj);
// Data Members (m_ prefix)
Tick m_clock;
};
inline std::ostream&
operator<<(std::ostream& out, const RubyEventQueue& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif // __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUE_HH__

View File

@@ -1,41 +0,0 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/eventqueue/RubyEventQueueNode.hh"
void
RubyEventQueueNode::print(std::ostream& out) const
{
out << "[";
if (m_consumer_ptr != NULL) {
out << " Consumer=" << m_consumer_ptr;
} else {
out << " Consumer=NULL";
}
out << "]";
}

View File

@@ -1,69 +0,0 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUENODE_HH__
#define __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUENODE_HH__
#include <iostream>
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "sim/eventq.hh"
class RubyEventQueueNode : public Event
{
public:
RubyEventQueueNode(Consumer* _consumer, RubyEventQueue* _eventq)
: Event(Default_Pri, AutoDelete),
m_consumer_ptr(_consumer), m_eventq_ptr(_eventq)
{
}
void print(std::ostream& out) const;
virtual void
process()
{
m_consumer_ptr->wakeup();
m_consumer_ptr->removeScheduledWakeupTime(m_eventq_ptr->getTime());
}
virtual const char *description() const { return "Ruby Event"; }
private:
Consumer* m_consumer_ptr;
RubyEventQueue* m_eventq_ptr;
};
inline std::ostream&
operator<<(std::ostream& out, const RubyEventQueueNode& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif // __MEM_RUBY_EVENTQUEUE_EVENTQUEUENODE_HH__

View File

@@ -1,37 +0,0 @@
# -*- mode:python -*-
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
Import('*')
if env['PROTOCOL'] == 'None':
Return()
Source('RubyEventQueue.cc')
Source('RubyEventQueueNode.cc')

View File

@@ -127,7 +127,7 @@ BaseGarnetNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
void
BaseGarnetNetwork::clearStats()
{
m_ruby_start = g_eventQueue_ptr->getTime();
m_ruby_start = g_system_ptr->getTime();
}
Time

View File

@@ -272,7 +272,7 @@ GarnetNetwork_d::printLinkStats(ostream& out) const
for (int i = 0; i < m_link_ptr_vector.size(); i++) {
average_link_utilization +=
(double(m_link_ptr_vector[i]->getLinkUtilization())) /
(double(g_eventQueue_ptr->getTime()-m_ruby_start));
(double(g_system_ptr->getTime()-m_ruby_start));
vector<int> vc_load = m_link_ptr_vector[i]->getVcLoad();
for (int j = 0; j < vc_load.size(); j++) {
@@ -291,7 +291,7 @@ GarnetNetwork_d::printLinkStats(ostream& out) const
continue;
average_vc_load[i] = (double(average_vc_load[i]) /
(double(g_eventQueue_ptr->getTime()) - m_ruby_start));
(double(g_system_ptr->getTime()) - m_ruby_start));
out << "Average VC Load [" << i << "] = " << average_vc_load[i]
<< " flits/cycle " << endl;
}

View File

@@ -79,7 +79,7 @@ InputUnit_d::wakeup()
// Do the route computation for this vc
m_router->route_req(t_flit, this, vc);
m_vcs[vc]->set_enqueue_time(g_eventQueue_ptr->getTime());
m_vcs[vc]->set_enqueue_time(g_system_ptr->getTime());
} else {
t_flit->advance_stage(SA_);
m_router->swarb_req();

View File

@@ -90,7 +90,7 @@ class InputUnit_d : public Consumer
{
flit_d *t_flit = new flit_d(in_vc, free_signal);
creditQueue->insert(t_flit);
g_eventQueue_ptr->scheduleEvent(m_credit_link, 1);
m_credit_link->scheduleEvent(1);
}
inline int

View File

@@ -70,7 +70,7 @@ NetworkInterface_d::NetworkInterface_d(int id, int virtual_networks,
for (int i = 0; i < m_num_vcs; i++) {
m_out_vc_state.push_back(new OutVcState_d(i, m_net_ptr));
m_out_vc_state[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
m_out_vc_state[i]->setState(IDLE_, g_system_ptr->getTime());
}
}
@@ -169,11 +169,11 @@ NetworkInterface_d::flitisizeMessage(MsgPtr msg_ptr, int vnet)
for (int i = 0; i < num_flits; i++) {
m_net_ptr->increment_injected_flits(vnet);
flit_d *fl = new flit_d(i, vc, vnet, num_flits, new_msg_ptr);
fl->set_delay(g_eventQueue_ptr->getTime() - msg_ptr->getTime());
fl->set_delay(g_system_ptr->getTime() - msg_ptr->getTime());
m_ni_buffers[vc]->insert(fl);
}
m_ni_enqueue_time[vc] = g_eventQueue_ptr->getTime();
m_out_vc_state[vc]->setState(ACTIVE_, g_eventQueue_ptr->getTime());
m_ni_enqueue_time[vc] = g_system_ptr->getTime();
m_out_vc_state[vc]->setState(ACTIVE_, g_system_ptr->getTime());
}
return true ;
}
@@ -189,7 +189,7 @@ NetworkInterface_d::calculateVC(int vnet)
m_vc_allocator[vnet] = 0;
if (m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState(
IDLE_, g_eventQueue_ptr->getTime())) {
IDLE_, g_system_ptr->getTime())) {
return ((vnet*m_vc_per_vnet) + delta);
}
}
@@ -210,7 +210,7 @@ void
NetworkInterface_d::wakeup()
{
DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld",
m_id, g_eventQueue_ptr->getTime());
m_id, g_system_ptr->getTime());
MsgPtr msg_ptr;
@@ -245,11 +245,11 @@ NetworkInterface_d::wakeup()
// this flit in the NI
flit_d *credit_flit = new flit_d(t_flit->get_vc(), free_signal);
creditQueue->insert(credit_flit);
g_eventQueue_ptr->scheduleEvent(m_ni_credit_link, 1);
m_ni_credit_link->scheduleEvent(1);
int vnet = t_flit->get_vnet();
m_net_ptr->increment_received_flits(vnet);
int network_delay = g_eventQueue_ptr->getTime() -
int network_delay = g_system_ptr->getTime() -
t_flit->get_enqueue_time();
int queueing_delay = t_flit->get_delay();
m_net_ptr->increment_network_latency(network_delay, vnet);
@@ -264,7 +264,7 @@ NetworkInterface_d::wakeup()
m_out_vc_state[t_flit->get_vc()]->increment_credit();
if (t_flit->is_free_signal()) {
m_out_vc_state[t_flit->get_vc()]->setState(IDLE_,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
}
delete t_flit;
}
@@ -313,10 +313,10 @@ NetworkInterface_d::scheduleOutputLink()
m_out_vc_state[vc]->decrement_credit();
// Just removing the flit
flit_d *t_flit = m_ni_buffers[vc]->getTopFlit();
t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
t_flit->set_time(g_system_ptr->getTime() + 1);
outSrcQueue->insert(t_flit);
// schedule the out link
g_eventQueue_ptr->scheduleEvent(outNetLink, 1);
outNetLink->scheduleEvent(1);
if (t_flit->get_type() == TAIL_ ||
t_flit->get_type() == HEAD_TAIL_) {
@@ -343,13 +343,13 @@ NetworkInterface_d::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext()) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}

View File

@@ -68,9 +68,9 @@ NetworkLink_d::wakeup()
{
if (link_srcQueue->isReady()) {
flit_d *t_flit = link_srcQueue->getTopFlit();
t_flit->set_time(g_eventQueue_ptr->getTime() + m_latency);
t_flit->set_time(g_system_ptr->getTime() + m_latency);
linkBuffer->insert(t_flit);
g_eventQueue_ptr->scheduleEvent(link_consumer, m_latency);
link_consumer->scheduleEvent(m_latency);
m_link_utilized++;
m_vc_load[t_flit->get_vc()]++;
}

View File

@@ -29,15 +29,15 @@
*/
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/network/garnet/fixed-pipeline/OutVcState_d.hh"
#include "mem/ruby/system/System.hh"
OutVcState_d::OutVcState_d(int id, GarnetNetwork_d *network_ptr)
{
m_network_ptr = network_ptr;
m_id = id;
m_vc_state = IDLE_;
m_time = g_eventQueue_ptr->getTime();
m_time = g_system_ptr->getTime();
if (m_network_ptr->get_vnet_type(id) == DATA_VNET_)
m_credit_count = m_network_ptr->getBuffersPerDataVC();

View File

@@ -101,7 +101,7 @@ OutputUnit_d::set_credit_link(CreditLink_d *credit_link)
void
OutputUnit_d::update_vc(int vc, int in_port, int in_vc)
{
m_outvc_state[vc]->setState(ACTIVE_, g_eventQueue_ptr->getTime() + 1);
m_outvc_state[vc]->setState(ACTIVE_, g_system_ptr->getTime() + 1);
m_outvc_state[vc]->set_inport(in_port);
m_outvc_state[vc]->set_invc(in_vc);
m_router->update_incredit(in_port, in_vc,

View File

@@ -71,21 +71,21 @@ class OutputUnit_d : public Consumer
inline void
set_vc_state(VC_state_type state, int vc)
{
m_outvc_state[vc]->setState(state, g_eventQueue_ptr->getTime() + 1);
m_outvc_state[vc]->setState(state, g_system_ptr->getTime() + 1);
}
inline bool
is_vc_idle(int vc)
{
return (m_outvc_state[vc]->isInState(IDLE_,
g_eventQueue_ptr->getTime()));
g_system_ptr->getTime()));
}
inline void
insert_flit(flit_d *t_flit)
{
m_out_buffer->insert(t_flit);
g_eventQueue_ptr->scheduleEvent(m_out_link, 1);
m_out_link->scheduleEvent(1);
}
private:

View File

@@ -135,13 +135,13 @@ Router_d::route_req(flit_d *t_flit, InputUnit_d *in_unit, int invc)
void
Router_d::vcarb_req()
{
g_eventQueue_ptr->scheduleEvent(m_vc_alloc, 1);
m_vc_alloc->scheduleEvent(1);
}
void
Router_d::swarb_req()
{
g_eventQueue_ptr->scheduleEvent(m_sw_alloc, 1);
m_sw_alloc->scheduleEvent(1);
}
void
@@ -154,7 +154,7 @@ void
Router_d::update_sw_winner(int inport, flit_d *t_flit)
{
m_switch->update_sw_winner(inport, t_flit);
g_eventQueue_ptr->scheduleEvent(m_switch, 1);
m_switch->scheduleEvent(1);
}
void

View File

@@ -177,7 +177,7 @@ SWallocator_d::arbitrate_outports()
t_flit->advance_stage(ST_);
t_flit->set_vc(outvc);
t_flit->set_outport(outport);
t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
t_flit->set_time(g_system_ptr->getTime() + 1);
m_output_unit[outport]->decrement_credit(outvc);
m_router->update_sw_winner(inport, t_flit);
m_global_arbiter_activity++;
@@ -209,7 +209,7 @@ SWallocator_d::check_for_wakeup()
for (int i = 0; i < m_num_inports; i++) {
for (int j = 0; j < m_num_vcs; j++) {
if (m_input_unit[i]->need_stage_nextcycle(j, ACTIVE_, SA_)) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}

View File

@@ -64,7 +64,7 @@ void
Switch_d::wakeup()
{
DPRINTF(RubyNetwork, "Switch woke up at time: %lld\n",
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
for (int inport = 0; inport < m_num_inports; inport++) {
if (!m_switch_buffer[inport]->isReady())
@@ -73,7 +73,7 @@ Switch_d::wakeup()
if (t_flit->is_stage(ST_)) {
int outport = t_flit->get_outport();
t_flit->advance_stage(LT_);
t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
t_flit->set_time(g_system_ptr->getTime() + 1);
// This will take care of waking up the Network Link
m_output_unit[outport]->insert_flit(t_flit);
@@ -89,7 +89,7 @@ Switch_d::check_for_wakeup()
{
for (int inport = 0; inport < m_num_inports; inport++) {
if (m_switch_buffer[inport]->isReadyForNext()) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
break;
}
}

View File

@@ -256,7 +256,7 @@ VCallocator_d::check_for_wakeup()
for (int i = 0; i < m_num_inports; i++) {
for (int j = 0; j < m_num_vcs; j++) {
if (m_input_unit[i]->need_stage_nextcycle(j, VC_AB_, VA_)) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}

View File

@@ -35,7 +35,7 @@ VirtualChannel_d::VirtualChannel_d(int id)
m_id = id;
m_input_buffer = new flitBuffer_d();
m_vc_state.first = IDLE_;
m_vc_state.second = g_eventQueue_ptr->getTime();
m_vc_state.second = g_system_ptr->getTime();
m_enqueue_time = INFINITE_;
}
@@ -55,7 +55,7 @@ VirtualChannel_d::grant_vc(int out_vc)
{
m_output_vc = out_vc;
m_vc_state.first = ACTIVE_;
m_vc_state.second = g_eventQueue_ptr->getTime() + 1;
m_vc_state.second = g_system_ptr->getTime() + 1;
flit_d *t_flit = m_input_buffer->peekTopFlit();
t_flit->advance_stage(SA_);
}
@@ -64,7 +64,7 @@ bool
VirtualChannel_d::need_stage(VC_state_type state, flit_stage stage)
{
if ((m_vc_state.first == state) &&
(g_eventQueue_ptr->getTime() >= m_vc_state.second)) {
(g_system_ptr->getTime() >= m_vc_state.second)) {
if (m_input_buffer->isReady()) {
flit_d *t_flit = m_input_buffer->peekTopFlit();
return(t_flit->is_stage(stage)) ;
@@ -78,7 +78,7 @@ bool
VirtualChannel_d::need_stage_nextcycle(VC_state_type state, flit_stage stage)
{
if ((m_vc_state.first == state) &&
((g_eventQueue_ptr->getTime()+1) >= m_vc_state.second)) {
((g_system_ptr->getTime()+1) >= m_vc_state.second)) {
if (m_input_buffer->isReadyForNext()) {
flit_d *t_flit = m_input_buffer->peekTopFlit();
return(t_flit->is_next_stage(stage)) ;

View File

@@ -68,7 +68,7 @@ class VirtualChannel_d
set_state(VC_state_type m_state)
{
m_vc_state.first = m_state;
m_vc_state.second = g_eventQueue_ptr->getTime() + 1;
m_vc_state.second = g_system_ptr->getTime() + 1;
}
inline flit_d*

View File

@@ -51,7 +51,7 @@ flitBuffer_d::isReady()
{
if (m_buffer.size() != 0 ) {
flit_d *t_flit = peekTopFlit();
if (t_flit->get_time() <= g_eventQueue_ptr->getTime())
if (t_flit->get_time() <= g_system_ptr->getTime())
return true;
}
return false;
@@ -62,7 +62,7 @@ flitBuffer_d::isReadyForNext()
{
if (m_buffer.size() != 0 ) {
flit_d *t_flit = peekTopFlit();
if (t_flit->get_time() <= (g_eventQueue_ptr->getTime() + 1))
if (t_flit->get_time() <= (g_system_ptr->getTime() + 1))
return true;
}
return false;

View File

@@ -34,8 +34,8 @@ flit_d::flit_d(int id, int vc, int vnet, int size, MsgPtr msg_ptr)
{
m_size = size;
m_msg_ptr = msg_ptr;
m_enqueue_time = g_eventQueue_ptr->getTime();
m_time = g_eventQueue_ptr->getTime();
m_enqueue_time = g_system_ptr->getTime();
m_time = g_system_ptr->getTime();
m_id = id;
m_vnet = vnet;
m_vc = vc;
@@ -59,7 +59,7 @@ flit_d::flit_d(int vc, bool is_free_signal)
m_id = 0;
m_vc = vc;
m_is_free_signal = is_free_signal;
m_time = g_eventQueue_ptr->getTime();
m_time = g_system_ptr->getTime();
}
void

View File

@@ -61,21 +61,21 @@ class flit_d
is_stage(flit_stage t_stage)
{
return (m_stage.first == t_stage &&
g_eventQueue_ptr->getTime() >= m_stage.second);
g_system_ptr->getTime() >= m_stage.second);
}
bool
is_next_stage(flit_stage t_stage)
{
return (m_stage.first == t_stage &&
(g_eventQueue_ptr->getTime() + 1) >= m_stage.second);
(g_system_ptr->getTime() + 1) >= m_stage.second);
}
void
advance_stage(flit_stage t_stage)
{
m_stage.first = t_stage;
m_stage.second = g_eventQueue_ptr->getTime() + 1;
m_stage.second = g_system_ptr->getTime() + 1;
}
std::pair<flit_stage, Time>
get_stage()

View File

@@ -207,7 +207,7 @@ GarnetNetwork::printLinkStats(ostream& out) const
for (int i = 0; i < m_link_ptr_vector.size(); i++) {
average_link_utilization +=
(double(m_link_ptr_vector[i]->getLinkUtilization())) /
(double(g_eventQueue_ptr->getTime()-m_ruby_start));
(double(g_system_ptr->getTime()-m_ruby_start));
vector<int> vc_load = m_link_ptr_vector[i]->getVcLoad();
for (int j = 0; j < vc_load.size(); j++) {
@@ -226,7 +226,7 @@ GarnetNetwork::printLinkStats(ostream& out) const
continue;
average_vc_load[i] = (double(average_vc_load[i]) /
(double(g_eventQueue_ptr->getTime()) - m_ruby_start));
(double(g_system_ptr->getTime()) - m_ruby_start));
out << "Average VC Load [" << i << "] = " << average_vc_load[i]
<< " flits/cycle " << endl;
}

View File

@@ -67,7 +67,7 @@ NetworkInterface::NetworkInterface(int id, int virtual_networks,
for (int i = 0; i < m_num_vcs; i++) {
m_out_vc_state.push_back(new OutVcState(i));
m_out_vc_state[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
m_out_vc_state[i]->setState(IDLE_, g_system_ptr->getTime());
}
}
@@ -166,18 +166,18 @@ NetworkInterface::flitisizeMessage(MsgPtr msg_ptr, int vnet)
for (int i = 0; i < num_flits; i++) {
m_net_ptr->increment_injected_flits(vnet);
flit *fl = new flit(i, vc, vnet, num_flits, new_msg_ptr);
fl->set_delay(g_eventQueue_ptr->getTime() - msg_ptr->getTime());
fl->set_delay(g_system_ptr->getTime() - msg_ptr->getTime());
m_ni_buffers[vc]->insert(fl);
}
m_out_vc_state[vc]->setState(VC_AB_, g_eventQueue_ptr->getTime());
m_out_vc_state[vc]->setState(VC_AB_, g_system_ptr->getTime());
// setting an output vc request for the next hop.
// This flit will be ready to traverse the link and into the next hop
// only when an output vc is acquired at the next hop
outNetLink->request_vc_link(vc,
new_net_msg_ptr->getInternalDestination(),
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
}
return true ;
@@ -190,7 +190,7 @@ NetworkInterface::grant_vc(int out_port, int vc, Time grant_time)
{
assert(m_out_vc_state[vc]->isInState(VC_AB_, grant_time));
m_out_vc_state[vc]->grant_vc(grant_time);
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
}
// The tail flit corresponding to this vc has been buffered at the next hop
@@ -200,7 +200,7 @@ NetworkInterface::release_vc(int out_port, int vc, Time release_time)
{
assert(m_out_vc_state[vc]->isInState(ACTIVE_, release_time));
m_out_vc_state[vc]->setState(IDLE_, release_time);
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
}
// Looking for a free output vc
@@ -220,7 +220,7 @@ NetworkInterface::calculateVC(int vnet)
m_vc_allocator[vnet] = 0;
if (m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState(IDLE_,
g_eventQueue_ptr->getTime())) {
g_system_ptr->getTime())) {
return ((vnet*m_vc_per_vnet) + delta);
}
}
@@ -264,18 +264,18 @@ NetworkInterface::wakeup()
flit *t_flit = inNetLink->consumeLink();
if (t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_) {
DPRINTF(RubyNetwork, "m_id: %d, Message delivered at time: %lld\n",
m_id, g_eventQueue_ptr->getTime());
m_id, g_system_ptr->getTime());
outNode_ptr[t_flit->get_vnet()]->enqueue(
t_flit->get_msg_ptr(), 1);
// signal the upstream router that this vc can be freed now
inNetLink->release_vc_link(t_flit->get_vc(),
g_eventQueue_ptr->getTime() + 1);
g_system_ptr->getTime() + 1);
}
int vnet = t_flit->get_vnet();
m_net_ptr->increment_received_flits(vnet);
int network_delay = g_eventQueue_ptr->getTime() -
int network_delay = g_system_ptr->getTime() -
t_flit->get_enqueue_time();
int queueing_delay = t_flit->get_delay();
m_net_ptr->increment_network_latency(network_delay, vnet);
@@ -304,16 +304,16 @@ NetworkInterface::scheduleOutputLink()
vc = 0;
if (m_ni_buffers[vc]->isReady()) {
if (m_out_vc_state[vc]->isInState(ACTIVE_,
g_eventQueue_ptr->getTime()) &&
g_system_ptr->getTime()) &&
outNetLink->isBufferNotFull_link(vc)) { // buffer backpressure
// Just removing the flit
flit *t_flit = m_ni_buffers[vc]->getTopFlit();
t_flit->set_time(g_eventQueue_ptr->getTime() + 1);
t_flit->set_time(g_system_ptr->getTime() + 1);
outSrcQueue->insert(t_flit);
// schedule the out link
g_eventQueue_ptr->scheduleEvent(outNetLink, 1);
outNetLink->scheduleEvent(1);
return;
}
}
@@ -325,13 +325,13 @@ NetworkInterface::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext()) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}

View File

@@ -138,9 +138,9 @@ NetworkLink::wakeup()
return;
flit *t_flit = link_srcQueue->getTopFlit();
t_flit->set_time(g_eventQueue_ptr->getTime() + m_latency);
t_flit->set_time(g_system_ptr->getTime() + m_latency);
linkBuffer->insert(t_flit);
g_eventQueue_ptr->scheduleEvent(link_consumer, m_latency);
link_consumer->scheduleEvent(this, m_latency);
m_link_utilized++;
m_vc_load[t_flit->get_vc()]++;
}

View File

@@ -72,7 +72,7 @@ Router::addInPort(NetworkLink *in_link)
vector<InVcState *> in_vc_vector;
for (int i = 0; i < m_num_vcs; i++) {
in_vc_vector.push_back(new InVcState(i));
in_vc_vector[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
in_vc_vector[i]->setState(IDLE_, g_system_ptr->getTime());
}
m_in_vc_state.push_back(in_vc_vector);
m_in_link.push_back(in_link);
@@ -112,7 +112,7 @@ Router::addOutPort(NetworkLink *out_link, const NetDest& routing_table_entry,
vector<OutVcState *> out_vc_vector;
for (int i = 0; i < m_num_vcs; i++) {
out_vc_vector.push_back(new OutVcState(i));
out_vc_vector[i]->setState(IDLE_, g_eventQueue_ptr->getTime());
out_vc_vector[i]->setState(IDLE_, g_system_ptr->getTime());
}
m_out_vc_state.push_back(out_vc_vector);
m_link_weights.push_back(link_weight);
@@ -138,9 +138,9 @@ Router::request_vc(int in_vc, int in_port, NetDest destination,
int outport = getRoute(destination);
m_in_vc_state[in_port][in_vc]->setRoute(outport);
m_in_vc_state[in_port][in_vc]->setState(VC_AB_, request_time);
assert(request_time >= g_eventQueue_ptr->getTime());
if (request_time > g_eventQueue_ptr->getTime())
g_eventQueue_ptr->scheduleEventAbsolute(m_vc_arbiter, request_time);
assert(request_time >= g_system_ptr->getTime());
if (request_time > g_system_ptr->getTime())
m_vc_arbiter->scheduleEventAbsolute(request_time);
else
vc_arbitrate();
}
@@ -181,22 +181,22 @@ Router::vc_arbitrate()
InVcState *in_vc_state = m_in_vc_state[inport][invc];
if (in_vc_state->isInState(VC_AB_, g_eventQueue_ptr->getTime())) {
if (in_vc_state->isInState(VC_AB_, g_system_ptr->getTime())) {
int outport = in_vc_state->get_outport();
vector<int> valid_vcs = get_valid_vcs(invc);
for (int valid_vc_iter = 0; valid_vc_iter < valid_vcs.size();
valid_vc_iter++) {
if (m_out_vc_state[outport][valid_vcs[valid_vc_iter]]
->isInState(IDLE_, g_eventQueue_ptr->getTime())) {
->isInState(IDLE_, g_system_ptr->getTime())) {
in_vc_state->grant_vc(valid_vcs[valid_vc_iter],
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
m_in_link[inport]->grant_vc_link(invc,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
m_out_vc_state[outport][valid_vcs[valid_vc_iter]]
->setState(VC_AB_, g_eventQueue_ptr->getTime());
->setState(VC_AB_, g_system_ptr->getTime());
break;
}
}
@@ -233,7 +233,7 @@ Router::grant_vc(int out_port, int vc, Time grant_time)
{
assert(m_out_vc_state[out_port][vc]->isInState(VC_AB_, grant_time));
m_out_vc_state[out_port][vc]->grant_vc(grant_time);
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
}
void
@@ -241,7 +241,7 @@ Router::release_vc(int out_port, int vc, Time release_time)
{
assert(m_out_vc_state[out_port][vc]->isInState(ACTIVE_, release_time));
m_out_vc_state[out_port][vc]->setState(IDLE_, release_time);
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
}
// This function calculated the output port for a particular destination.
@@ -271,14 +271,13 @@ Router::routeCompute(flit *m_flit, int inport)
assert(m_net_ptr->getNumPipeStages() >= 1);
// Subtract 1 as 1 cycle will be consumed in scheduling the output link
m_flit->set_time(g_eventQueue_ptr->getTime() +
m_flit->set_time(g_system_ptr->getTime() +
(m_net_ptr->getNumPipeStages() - 1));
m_flit->set_vc(outvc);
m_router_buffers[outport][outvc]->insert(m_flit);
if (m_net_ptr->getNumPipeStages() > 1)
g_eventQueue_ptr->scheduleEvent(this,
m_net_ptr->getNumPipeStages() - 1 );
scheduleEvent(m_net_ptr->getNumPipeStages() - 1 );
if ((m_flit->get_type() == HEAD_) || (m_flit->get_type() == HEAD_TAIL_)) {
NetworkMessage *nm =
safe_cast<NetworkMessage*>(m_flit->get_msg_ptr().get());
@@ -286,24 +285,24 @@ Router::routeCompute(flit *m_flit, int inport)
if (m_net_ptr->getNumPipeStages() > 1) {
m_out_vc_state[outport][outvc]->setState(VC_AB_,
g_eventQueue_ptr->getTime() + 1);
g_system_ptr->getTime() + 1);
m_out_link[outport]->request_vc_link(outvc, destination,
g_eventQueue_ptr->getTime() + 1);
g_system_ptr->getTime() + 1);
} else {
m_out_vc_state[outport][outvc]->setState(VC_AB_,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
m_out_link[outport]->request_vc_link(outvc, destination,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
}
}
if ((m_flit->get_type() == TAIL_) || (m_flit->get_type() == HEAD_TAIL_)) {
m_in_vc_state[inport][invc]->setState(IDLE_,
g_eventQueue_ptr->getTime() + 1);
g_system_ptr->getTime() + 1);
m_in_link[inport]->release_vc_link(invc,
g_eventQueue_ptr->getTime() + 1);
g_system_ptr->getTime() + 1);
}
}
@@ -328,7 +327,7 @@ Router::wakeup()
// checking the incoming link
if (m_in_link[incoming_port]->isReady()) {
DPRINTF(RubyNetwork, "m_id: %d, Time: %lld\n",
m_id, g_eventQueue_ptr->getTime());
m_id, g_system_ptr->getTime());
t_flit = m_in_link[incoming_port]->peekLink();
routeCompute(t_flit, incoming_port);
m_in_link[incoming_port]->consumeLink();
@@ -366,14 +365,14 @@ Router::scheduleOutputLinks()
// models buffer backpressure
if (m_out_vc_state[port][vc_tolookat]->isInState(ACTIVE_,
g_eventQueue_ptr->getTime()) &&
g_system_ptr->getTime()) &&
m_out_link[port]->isBufferNotFull_link(vc_tolookat)) {
flit *t_flit =
m_router_buffers[port][vc_tolookat]->getTopFlit();
t_flit->set_time(g_eventQueue_ptr->getTime() + 1 );
t_flit->set_time(g_system_ptr->getTime() + 1 );
m_out_src_queue[port]->insert(t_flit);
g_eventQueue_ptr->scheduleEvent(m_out_link[port], 1);
m_out_link[port]->scheduleEvent(1);
break; // done for this port
}
}
@@ -395,7 +394,7 @@ Router::checkReschedule()
for (int port = 0; port < m_out_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_router_buffers[port][vc]->isReadyForNext()) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
return;
}
}
@@ -408,9 +407,9 @@ Router::check_arbiter_reschedule()
for (int port = 0; port < m_in_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_in_vc_state[port][vc]->isInState(VC_AB_,
g_eventQueue_ptr->getTime() + 1)) {
g_system_ptr->getTime() + 1)) {
g_eventQueue_ptr->scheduleEvent(m_vc_arbiter, 1);
m_vc_arbiter->scheduleEvent(1);
return;
}
}

View File

@@ -34,8 +34,8 @@ flit::flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr)
{
m_size = size;
m_msg_ptr = msg_ptr;
m_enqueue_time = g_eventQueue_ptr->getTime();
m_time = g_eventQueue_ptr->getTime();
m_enqueue_time = g_system_ptr->getTime();
m_time = g_system_ptr->getTime();
m_id = id;
m_vnet = vnet;
m_vc = vc;

View File

@@ -55,7 +55,7 @@ flitBuffer::isReady()
{
if (m_buffer.size() != 0 ) {
flit *t_flit = m_buffer.front();
if (t_flit->get_time() <= g_eventQueue_ptr->getTime())
if (t_flit->get_time() <= g_system_ptr->getTime())
return true;
}
return false;
@@ -66,7 +66,7 @@ flitBuffer::isReadyForNext()
{
if (m_buffer.size() != 0 ) {
flit *t_flit = m_buffer.front();
if (t_flit->get_time() <= (g_eventQueue_ptr->getTime() + 1))
if (t_flit->get_time() <= (g_system_ptr->getTime() + 1))
return true;
}
return false;

View File

@@ -41,7 +41,7 @@ Router_d::calculate_power()
calculate_performance_numbers();
double sim_cycles;
sim_cycles =
g_eventQueue_ptr->getTime() - m_network_ptr->getRubyStartTime();
g_system_ptr->getTime() - m_network_ptr->getRubyStartTime();
// Number of virtual networks/message classes declared in Ruby
// maybe greater than active virtual networks.
@@ -248,7 +248,7 @@ NetworkLink_d::calculate_power()
orion_cfg_ptr);
double sim_cycles =
(double)(g_eventQueue_ptr->getTime() - m_net_ptr->getRubyStartTime());
(double)(g_system_ptr->getTime() - m_net_ptr->getRubyStartTime());
// Dynamic Power
// Assume half the bits flipped on every link activity

View File

@@ -260,7 +260,7 @@ PerfectSwitch::wakeup()
// There were not enough resources
if (!enough) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
DPRINTF(RubyNetwork, "Can't deliver message since a node "
"is blocked\n");
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));

View File

@@ -167,7 +167,7 @@ Throttle::wakeup()
DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
"enqueueing net msg %d time: %lld.\n",
m_node, getLinkBandwidth(), m_units_remaining[vnet],
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
// Move the message
m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
@@ -215,7 +215,7 @@ Throttle::wakeup()
// We are out of bandwidth for this cycle, so wakeup next
// cycle and continue
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(1);
}
}
@@ -228,7 +228,7 @@ Throttle::printStats(ostream& out) const
void
Throttle::clearStats()
{
m_ruby_start = g_eventQueue_ptr->getTime();
m_ruby_start = g_system_ptr->getTime();
m_links_utilized = 0.0;
for (int i = 0; i < m_message_counters.size(); i++) {
@@ -242,7 +242,7 @@ double
Throttle::getUtilization() const
{
return 100.0 * double(m_links_utilized) /
double(g_eventQueue_ptr->getTime()-m_ruby_start);
double(g_system_ptr->getTime()-m_ruby_start);
}
void

View File

@@ -67,7 +67,7 @@ static double process_memory_total();
static double process_memory_resident();
Profiler::Profiler(const Params *p)
: SimObject(p)
: SimObject(p), m_event(this)
{
m_inst_profiler_ptr = NULL;
m_address_profiler_ptr = NULL;
@@ -113,13 +113,13 @@ Profiler::wakeup()
for (int i = 0; i < m_num_of_sequencers; i++) {
perProcCycleCount[i] =
g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
g_system_ptr->getTime() - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
ostream &out = *m_periodic_output_file_ptr;
out << "ruby_cycles: " << g_eventQueue_ptr->getTime()-m_ruby_start << endl
out << "ruby_cycles: " << g_system_ptr->getTime()-m_ruby_start << endl
<< "mbytes_resident: " << process_memory_resident() << endl
<< "mbytes_total: " << process_memory_total() << endl;
@@ -137,7 +137,7 @@ Profiler::wakeup()
}
//g_system_ptr->getNetwork()->printStats(out);
g_eventQueue_ptr->scheduleEvent(this, m_stats_period);
schedule(m_event, curTick() + m_stats_period * g_system_ptr->getClock());
}
void
@@ -151,7 +151,7 @@ Profiler::setPeriodicStatsFile(const string& filename)
}
m_periodic_output_file_ptr = new ofstream(filename.c_str());
g_eventQueue_ptr->scheduleEvent(this, 1);
schedule(m_event, curTick() + g_system_ptr->getClock());
}
void
@@ -161,7 +161,7 @@ Profiler::setPeriodicStatsInterval(integer_t period)
<< " Ruby cycles" << endl;
m_stats_period = period;
g_eventQueue_ptr->scheduleEvent(this, 1);
schedule(m_event, curTick() + g_system_ptr->getClock());
}
void
@@ -185,7 +185,7 @@ Profiler::printStats(ostream& out, bool short_stats)
double minutes = seconds / 60.0;
double hours = minutes / 60.0;
double days = hours / 24.0;
Time ruby_cycles = g_eventQueue_ptr->getTime()-m_ruby_start;
Time ruby_cycles = g_system_ptr->getTime()-m_ruby_start;
if (!short_stats) {
out << "Elapsed_time_in_seconds: " << seconds << endl;
@@ -208,7 +208,7 @@ Profiler::printStats(ostream& out, bool short_stats)
out << "Virtual_time_in_days: " << days << endl;
out << endl;
out << "Ruby_current_time: " << g_eventQueue_ptr->getTime() << endl;
out << "Ruby_current_time: " << g_system_ptr->getTime() << endl;
out << "Ruby_start_time: " << m_ruby_start << endl;
out << "Ruby_cycles: " << ruby_cycles << endl;
out << endl;
@@ -227,7 +227,7 @@ Profiler::printStats(ostream& out, bool short_stats)
for (int i = 0; i < m_num_of_sequencers; i++) {
perProcCycleCount[i] =
g_system_ptr->getCycleCount(i) - m_cycles_executed_at_start[i] + 1;
g_system_ptr->getTime() - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
@@ -437,14 +437,14 @@ Profiler::printResourceUsage(ostream& out) const
void
Profiler::clearStats()
{
m_ruby_start = g_eventQueue_ptr->getTime();
m_ruby_start = g_system_ptr->getTime();
m_cycles_executed_at_start.resize(m_num_of_sequencers);
for (int i = 0; i < m_num_of_sequencers; i++) {
if (g_system_ptr == NULL) {
m_cycles_executed_at_start[i] = 0;
} else {
m_cycles_executed_at_start[i] = g_system_ptr->getCycleCount(i);
m_cycles_executed_at_start[i] = g_system_ptr->getTime();
}
}
@@ -524,7 +524,7 @@ Profiler::clearStats()
//g_eventQueue_ptr->triggerAllEvents();
// update the start time
m_ruby_start = g_eventQueue_ptr->getTime();
m_ruby_start = g_system_ptr->getTime();
}
void
@@ -723,7 +723,7 @@ Profiler::rubyWatch(int id)
uint64 tr = 0;
Address watch_address = Address(tr);
DPRINTFN("%7s %3s RUBY WATCH %d\n", g_eventQueue_ptr->getTime(), id,
DPRINTFN("%7s %3s RUBY WATCH %d\n", g_system_ptr->getTime(), id,
watch_address);
// don't care about success or failure

View File

@@ -58,7 +58,6 @@
#include "mem/protocol/RubyAccessMode.hh"
#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/common/Set.hh"
@@ -70,7 +69,7 @@
class RubyRequest;
class AddressProfiler;
class Profiler : public SimObject, public Consumer
class Profiler : public SimObject
{
public:
typedef RubyProfilerParams Params;
@@ -244,6 +243,20 @@ class Profiler : public SimObject, public Consumer
bool m_all_instructions;
int m_num_of_sequencers;
protected:
class ProfileEvent : public Event
{
public:
ProfileEvent(Profiler *_profiler)
{
profiler = _profiler;
}
private:
void process() { profiler->wakeup(); }
Profiler *profiler;
};
ProfileEvent m_event;
};
inline std::ostream&
@@ -255,5 +268,3 @@ operator<<(std::ostream& out, const Profiler& obj)
}
#endif // __MEM_RUBY_PROFILER_PROFILER_HH__

View File

@@ -26,8 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/profiler/StoreTrace.hh"
#include "sim/core.hh"
using namespace std;
@@ -105,7 +105,7 @@ StoreTrace::clearSummary()
void
StoreTrace::store(NodeID node)
{
Time current = g_eventQueue_ptr->getTime();
Tick current = curTick();
assert((m_last_writer == -1) || (m_last_writer == node));
@@ -127,7 +127,7 @@ void
StoreTrace::downgrade(NodeID node)
{
if (node == m_last_writer) {
Time current = g_eventQueue_ptr->getTime();
Time current = curTick();
assert(m_stores_this_interval != 0);
assert(m_last_store != 0);
assert(m_first_store != 0);

View File

@@ -32,7 +32,6 @@
#include <iostream>
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"
class StoreTrace

View File

@@ -34,7 +34,7 @@
#include "base/refcnt.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/TypeDefines.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/System.hh"
class Message;
typedef RefCountingPtr<Message> MsgPtr;
@@ -43,8 +43,8 @@ class Message : public RefCounted
{
public:
Message()
: m_time(g_eventQueue_ptr->getTime()),
m_LastEnqueueTime(g_eventQueue_ptr->getTime()),
: m_time(g_system_ptr->getTime()),
m_LastEnqueueTime(g_system_ptr->getTime()),
m_DelayedCycles(0)
{ }

View File

@@ -49,7 +49,7 @@ random(int n)
inline Time
get_time()
{
return g_eventQueue_ptr->getTime();
return g_system_ptr->getTime();
}
inline Time

View File

@@ -154,8 +154,7 @@ CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type,
if (loc != -1) {
// Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
data_ptr = &(entry->getDataBlk());
if (entry->m_Permission == AccessPermission_Read_Write) {
@@ -183,8 +182,7 @@ CacheMemory::testCacheAccess(const Address& address, RubyRequestType type,
if (loc != -1) {
// Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
data_ptr = &(entry->getDataBlk());
return m_cache[cacheSet][loc]->m_Permission !=
@@ -258,8 +256,7 @@ CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
set[i]->m_locked = -1;
m_tag_index[address] = i;
m_replacementPolicy_ptr->
touch(cacheSet, i, g_eventQueue_ptr->getTime());
m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
return entry;
}
@@ -324,8 +321,7 @@ CacheMemory::setMRU(const Address& address)
int loc = findTagInSet(cacheSet, address);
if(loc != -1)
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
}
void
@@ -540,4 +536,3 @@ CacheMemory::checkResourceAvailable(CacheResourceType res, Address addr)
return true;
}
}

View File

@@ -29,7 +29,6 @@
#ifndef __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
#define __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
#include <iostream>
#include <string>
#include <vector>
@@ -172,4 +171,3 @@ class CacheMemory : public SimObject
};
#endif // __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__

View File

@@ -279,7 +279,7 @@ RubyMemoryControl::~RubyMemoryControl()
void
RubyMemoryControl::enqueue(const MsgPtr& message, int latency)
{
Time current_time = g_eventQueue_ptr->getTime();
Time current_time = g_system_ptr->getTime();
Time arrival_time = current_time + latency;
const MemoryMsg* memMess = safe_cast<const MemoryMsg*>(message.get());
physical_address_t addr = memMess->getAddress().getAddress();
@@ -302,7 +302,7 @@ RubyMemoryControl::enqueueMemRef(MemoryNode& memRef)
DPRINTF(RubyMemory,
"New memory request%7d: %#08x %c arrived at %10d bank = %3x sched %c\n",
m_msg_counter, addr, memRef.m_is_mem_read ? 'R':'W',
memRef.m_time * g_eventQueue_ptr->getClock(),
memRef.m_time * g_system_ptr->getClock(),
bank, m_event.scheduled() ? 'Y':'N');
m_profiler_ptr->profileMemReq(bank);
@@ -347,7 +347,7 @@ bool
RubyMemoryControl::isReady()
{
return ((!m_response_queue.empty()) &&
(m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
(m_response_queue.front().m_time <= g_system_ptr->getTime()));
}
void
@@ -377,17 +377,17 @@ RubyMemoryControl::printStats(ostream& out) const
void
RubyMemoryControl::enqueueToDirectory(MemoryNode req, int latency)
{
Time arrival_time = g_eventQueue_ptr->getTime()
Time arrival_time = g_system_ptr->getTime()
+ (latency * m_mem_bus_cycle_multiplier);
req.m_time = arrival_time;
m_response_queue.push_back(req);
DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n",
req.m_addr, req.m_is_mem_read ? 'R':'W',
arrival_time * g_eventQueue_ptr->getClock());
arrival_time * g_system_ptr->getClock());
// schedule the wake up
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
m_consumer_ptr->scheduleEventAbsolute(arrival_time);
}
// getBank returns an integer that is unique for each

View File

@@ -198,7 +198,7 @@ RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
// send next cycle
ruby_port->pio_port.schedTimingReq(pkt, curTick() +
g_eventQueue_ptr->getClock());
g_system_ptr->getClock());
return true;
}
@@ -651,7 +651,7 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
if (needsResponse) {
DPRINTF(RubyPort, "Sending packet back over port\n");
// send next cycle
schedTimingResp(pkt, curTick() + g_eventQueue_ptr->getClock());
schedTimingResp(pkt, curTick() + g_system_ptr->getClock());
} else {
delete pkt;
}

View File

@@ -43,11 +43,9 @@
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/slicc_interface/RubyRequest.hh"
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
#include "mem/packet.hh"
#include "params/RubySequencer.hh"
using namespace std;
@@ -88,7 +86,7 @@ void
Sequencer::wakeup()
{
// Check for deadlock of any of the requests
Time current_time = g_eventQueue_ptr->getTime();
Time current_time = g_system_ptr->getTime();
// Check across all outstanding requests
int total_outstanding = 0;
@@ -131,7 +129,7 @@ Sequencer::wakeup()
if (m_outstanding_count > 0) {
// If there are still outstanding requests, keep checking
schedule(deadlockCheckEvent,
m_deadlock_threshold * g_eventQueue_ptr->getClock() +
m_deadlock_threshold * g_system_ptr->getClock() +
curTick());
}
}
@@ -156,7 +154,7 @@ Sequencer::printProgress(ostream& out) const
#if 0
int total_demand = 0;
out << "Sequencer Stats Version " << m_version << endl;
out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
out << "Current time = " << g_system_ptr->getTime() << endl;
out << "---------------" << endl;
out << "outstanding requests" << endl;
@@ -212,7 +210,7 @@ Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
schedule(deadlockCheckEvent,
m_deadlock_threshold * g_eventQueue_ptr->getClock()
m_deadlock_threshold * g_system_ptr->getClock()
+ curTick());
}
@@ -239,7 +237,7 @@ Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
if (r.second) {
RequestTable::iterator i = r.first;
i->second = new SequencerRequest(pkt, request_type,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
m_outstanding_count++;
} else {
// There is an outstanding write request for the cache line
@@ -260,7 +258,7 @@ Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
if (r.second) {
RequestTable::iterator i = r.first;
i->second = new SequencerRequest(pkt, request_type,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
m_outstanding_count++;
} else {
// There is an outstanding read request for the cache line
@@ -476,8 +474,8 @@ Sequencer::hitCallback(SequencerRequest* srequest,
m_dataCache_ptr->setMRU(request_line_address);
}
assert(g_eventQueue_ptr->getTime() >= issued_time);
Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
assert(g_system_ptr->getTime() >= issued_time);
Time miss_latency = g_system_ptr->getTime() - issued_time;
// Profile the miss latency for all non-zero demand misses
if (miss_latency != 0) {
@@ -488,7 +486,7 @@ Sequencer::hitCallback(SequencerRequest* srequest,
initialRequestTime,
forwardRequestTime,
firstResponseTime,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
}
if (mach == GenericMachineType_Directory) {
@@ -496,7 +494,7 @@ Sequencer::hitCallback(SequencerRequest* srequest,
initialRequestTime,
forwardRequestTime,
firstResponseTime,
g_eventQueue_ptr->getTime());
g_system_ptr->getTime());
}
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",

View File

@@ -36,13 +36,11 @@
#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/SequencerRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "params/RubySequencer.hh"
class DataBlock;
class CacheMemory;
struct RubySequencerParams;
struct SequencerRequest
{
@@ -57,7 +55,7 @@ struct SequencerRequest
std::ostream& operator<<(std::ostream& out, const SequencerRequest& obj);
class Sequencer : public RubyPort, public Consumer
class Sequencer : public RubyPort
{
public:
typedef RubySequencerParams Params;

View File

@@ -77,7 +77,6 @@ RubySystem::RubySystem(const Params *p)
m_memory_size_bits = floorLog2(m_memory_size_bytes);
}
g_eventQueue_ptr = new RubyEventQueue(p->eventq, m_clock);
g_system_ptr = this;
if (p->no_mem_vec) {
m_mem_vec_ptr = NULL;
@@ -423,13 +422,13 @@ RubySystem::checkGlobalCoherenceInvariant(const Address& addr)
WARN_EXPR(exclusive);
WARN_EXPR(m_chip_vector[i]->getID());
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
WARN_EXPR(getTime());
ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
} else if (sharedDetected) {
WARN_EXPR(lastShared);
WARN_EXPR(m_chip_vector[i]->getID());
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
WARN_EXPR(getTime());
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
} else {
exclusive = m_chip_vector[i]->getID();
@@ -442,7 +441,7 @@ RubySystem::checkGlobalCoherenceInvariant(const Address& addr)
WARN_EXPR(lastShared);
WARN_EXPR(exclusive);
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
WARN_EXPR(getTime());
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
}
}

View File

@@ -37,7 +37,6 @@
#include "base/callback.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/MemoryVector.hh"
@@ -78,6 +77,8 @@ class RubySystem : public SimObject
static int getBlockSizeBits() { return m_block_size_bits; }
static uint64 getMemorySizeBytes() { return m_memory_size_bytes; }
static int getMemorySizeBits() { return m_memory_size_bits; }
Tick getTime() const { return curTick() / m_clock; }
Tick getClock() const { return m_clock; }
// Public Methods
static Network*
@@ -87,12 +88,6 @@ class RubySystem : public SimObject
return m_network_ptr;
}
static RubyEventQueue*
getEventQueue()
{
return g_eventQueue_ptr;
}
Profiler*
getProfiler()
{
@@ -111,11 +106,6 @@ class RubySystem : public SimObject
void clearStats() const;
uint64 getInstructionCount(int thread) { return 1; }
static uint64
getCycleCount(int thread)
{
return g_eventQueue_ptr->getTime();
}
void print(std::ostream& out) const;

View File

@@ -27,7 +27,7 @@
*/
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/system/TimerTable.hh"
TimerTable::TimerTable()
@@ -48,7 +48,7 @@ TimerTable::isReady() const
updateNext();
}
assert(m_next_valid);
return (g_eventQueue_ptr->getTime() >= m_next_time);
return (g_system_ptr->getTime() >= m_next_time);
}
const Address&
@@ -69,10 +69,10 @@ TimerTable::set(const Address& address, Time relative_latency)
assert(address == line_address(address));
assert(relative_latency > 0);
assert(!m_map.count(address));
Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
Time ready_time = g_system_ptr->getTime() + relative_latency;
m_map[address] = ready_time;
assert(m_consumer_ptr != NULL);
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
m_consumer_ptr->scheduleEventAbsolute(ready_time);
m_next_valid = false;
// Don't always recalculate the next ready address

View File

@@ -31,12 +31,11 @@
#include <cassert>
#include <iostream>
#include <map>
#include <string>
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
class Consumer;
#include "mem/ruby/common/Consumer.hh"
class TimerTable
{

View File

@@ -74,13 +74,13 @@ void
WireBuffer::enqueue(MsgPtr message, int latency)
{
m_msg_counter++;
Time current_time = g_eventQueue_ptr->getTime();
Time current_time = g_system_ptr->getTime();
Time arrival_time = current_time + latency;
assert(arrival_time > current_time);
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
m_message_queue.push_back(thisNode);
if (m_consumer_ptr != NULL) {
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
m_consumer_ptr->scheduleEventAbsolute(arrival_time);
} else {
panic("No Consumer for WireBuffer! %s\n", *this);
}
@@ -123,19 +123,18 @@ WireBuffer::recycle()
MessageBufferNode node = m_message_queue.front();
pop_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
node.m_time = g_eventQueue_ptr->getTime() + 1;
node.m_time = g_system_ptr->getTime() + 1;
m_message_queue.back() = node;
push_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr,
g_eventQueue_ptr->getTime() + 1);
m_consumer_ptr->scheduleEventAbsolute(g_system_ptr->getTime() + 1);
}
bool
WireBuffer::isReady()
{
return ((!m_message_queue.empty()) &&
(m_message_queue.front().m_time <= g_eventQueue_ptr->getTime()));
(m_message_queue.front().m_time <= g_system_ptr->getTime()));
}
void

View File

@@ -38,7 +38,6 @@
#include "mem/ruby/buffers/MessageBufferNode.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "params/RubyWireBuffer.hh"
#include "sim/sim_object.hh"

View File

@@ -56,7 +56,7 @@ class AST(PairContainer):
code('''
char c;
cerr << "Runtime Error at ${{self.location}}, Ruby Time: "
<< g_eventQueue_ptr->getTime() << ": "
<< g_system_ptr->getTime() << ": "
<< $message
<< ", PID: " << getpid() << endl
<< "press return to continue." << endl;

View File

@@ -142,7 +142,7 @@ class FuncCallExprAST(ExprAST):
}
if (result == TransitionResult_ResourceStall) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(this, 1);
// Cannot do anything with this transition, go check next doable transition (mostly likely of next port)
}
@@ -173,7 +173,7 @@ class FuncCallExprAST(ExprAST):
}
if (result1 == TransitionResult_ResourceStall) {
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(this, 1);
// Cannot do anything with this transition, go check next
// doable transition (mostly likely of next port)
}

View File

@@ -1037,7 +1037,7 @@ ${ident}_Controller::wakeup()
g_system_ptr->getProfiler()->controllerBusy(m_machineID);
// Wakeup in another cycle and try again
g_eventQueue_ptr->scheduleEvent(this, 1);
scheduleEvent(this, 1);
break;
}
''')
@@ -1064,7 +1064,6 @@ ${ident}_Controller::wakeup()
code('''
break; // If we got this far, we have nothing left todo
}
// g_eventQueue_ptr->scheduleEvent(this, 1);
}
''')
@@ -1126,11 +1125,8 @@ ${ident}_Controller::doTransition(${ident}_Event event,
${ident}_State next_state = state;
DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %s\\n",
*this,
g_eventQueue_ptr->getTime(),
${ident}_State_to_string(state),
${ident}_Event_to_string(event),
addr);
*this, g_system_ptr->getTime(), ${ident}_State_to_string(state),
${ident}_Event_to_string(event), addr);
TransitionResult result =
''')
@@ -1302,7 +1298,7 @@ if (!checkResourceAvailable(%s_RequestType_%s, addr)) {
default:
fatal("Invalid transition\\n"
"%s time: %d addr: %s event: %s state: %s\\n",
name(), g_eventQueue_ptr->getTime(), addr, event, state);
name(), g_system_ptr->getTime(), addr, event, state);
}
return TransitionResult_Valid;
}

View File

@@ -415,7 +415,7 @@ ${{self.c_ident}}::print(ostream& out) const
code('out << "${{dm.ident}} = " << m_${{dm.ident}} << " ";''')
if self.isMessage:
code('out << "Time = " << getTime() * g_eventQueue_ptr->getClock() << " ";')
code('out << "Time = " << getTime() * g_system_ptr->getClock() << " ";')
code.dedent()
# Trailer