ruby: style pass

This commit is contained in:
Nathan Binkert
2010-03-22 18:43:53 -07:00
parent 2620e08722
commit 5ab13e2deb
92 changed files with 7043 additions and 8245 deletions

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,318 +26,341 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/system/System.hh"
MessageBuffer::MessageBuffer(const string &name)
{
m_msg_counter = 0;
m_consumer_ptr = NULL;
m_ordering_set = false;
m_strict_fifo = true;
m_size = 0;
m_max_size = -1;
m_last_arrival_time = 0;
m_randomization = true;
m_size_last_time_size_checked = 0;
m_time_last_time_size_checked = 0;
m_time_last_time_enqueue = 0;
m_time_last_time_pop = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
m_not_avail_count = 0;
m_priority_rank = 0;
m_name = name;
m_msg_counter = 0;
m_consumer_ptr = NULL;
m_ordering_set = false;
m_strict_fifo = true;
m_size = 0;
m_max_size = -1;
m_last_arrival_time = 0;
m_randomization = true;
m_size_last_time_size_checked = 0;
m_time_last_time_size_checked = 0;
m_time_last_time_enqueue = 0;
m_time_last_time_pop = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
m_not_avail_count = 0;
m_priority_rank = 0;
m_name = name;
}
int MessageBuffer::getSize()
int
MessageBuffer::getSize()
{
if(m_time_last_time_size_checked == g_eventQueue_ptr->getTime()){
return m_size_last_time_size_checked;
} else {
m_time_last_time_size_checked = g_eventQueue_ptr->getTime();
m_size_last_time_size_checked = m_size;
return m_size;
}
}
bool MessageBuffer::areNSlotsAvailable(int n)
{
// fast path when message buffers have infinite size
if(m_max_size == -1) {
return true;
}
// determine my correct size for the current cycle
// pop operations shouldn't effect the network's visible size until next cycle,
// but enqueue operations effect the visible size immediately
int current_size = max(m_size_at_cycle_start, m_size);
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) { // no pops this cycle - m_size is correct
current_size = m_size;
} else {
if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) { // no enqueues this cycle - m_size_at_cycle_start is correct
current_size = m_size_at_cycle_start;
} else { // both pops and enqueues occured this cycle - add new enqueued msgs to m_size_at_cycle_start
current_size = m_size_at_cycle_start+m_msgs_this_cycle;
if (m_time_last_time_size_checked == g_eventQueue_ptr->getTime()) {
return m_size_last_time_size_checked;
} else {
m_time_last_time_size_checked = g_eventQueue_ptr->getTime();
m_size_last_time_size_checked = m_size;
return m_size;
}
}
// now compare the new size with our max size
if(current_size+n <= m_max_size){
return true;
} else {
DEBUG_MSG(QUEUE_COMP,MedPrio,n);
DEBUG_MSG(QUEUE_COMP,MedPrio,current_size);
DEBUG_MSG(QUEUE_COMP,MedPrio,m_size);
DEBUG_MSG(QUEUE_COMP,MedPrio,m_max_size);
m_not_avail_count++;
return false;
}
}
const MsgPtr MessageBuffer::getMsgPtrCopy() const
bool
MessageBuffer::areNSlotsAvailable(int n)
{
assert(isReady());
MsgPtr temp_msg;
temp_msg = *(m_prio_heap.peekMin().m_msgptr.ref());
assert(temp_msg.ref() != NULL);
return temp_msg;
// fast path when message buffers have infinite size
if (m_max_size == -1) {
return true;
}
// determine my correct size for the current cycle
// pop operations shouldn't effect the network's visible size
// until next cycle, but enqueue operations effect the visible
// size immediately
int current_size = max(m_size_at_cycle_start, m_size);
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
// no pops this cycle - m_size is correct
current_size = m_size;
} else {
if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) {
// no enqueues this cycle - m_size_at_cycle_start is correct
current_size = m_size_at_cycle_start;
} else {
// both pops and enqueues occured this cycle - add new
// enqueued msgs to m_size_at_cycle_start
current_size = m_size_at_cycle_start+m_msgs_this_cycle;
}
}
// now compare the new size with our max size
if (current_size + n <= m_max_size) {
return true;
} else {
DEBUG_MSG(QUEUE_COMP, MedPrio, n);
DEBUG_MSG(QUEUE_COMP, MedPrio, current_size);
DEBUG_MSG(QUEUE_COMP, MedPrio, m_size);
DEBUG_MSG(QUEUE_COMP, MedPrio, m_max_size);
m_not_avail_count++;
return false;
}
}
const Message* MessageBuffer::peekAtHeadOfQueue() const
const MsgPtr
MessageBuffer::getMsgPtrCopy() const
{
const Message* msg_ptr;
DEBUG_NEWLINE(QUEUE_COMP,MedPrio);
assert(isReady());
DEBUG_MSG(QUEUE_COMP,MedPrio,"Peeking at head of queue " + m_name + " time: "
+ int_to_string(g_eventQueue_ptr->getTime()) + ".");
assert(isReady());
MsgPtr temp_msg;
temp_msg = *(m_prio_heap.peekMin().m_msgptr.ref());
assert(temp_msg.ref() != NULL);
return temp_msg;
}
msg_ptr = m_prio_heap.peekMin().m_msgptr.ref();
assert(msg_ptr != NULL);
const Message*
MessageBuffer::peekAtHeadOfQueue() const
{
const Message* msg_ptr;
DEBUG_NEWLINE(QUEUE_COMP, MedPrio);
DEBUG_EXPR(QUEUE_COMP,MedPrio,*msg_ptr);
DEBUG_NEWLINE(QUEUE_COMP,MedPrio);
return msg_ptr;
DEBUG_MSG(QUEUE_COMP, MedPrio,
"Peeking at head of queue " + m_name + " time: "
+ int_to_string(g_eventQueue_ptr->getTime()) + ".");
assert(isReady());
msg_ptr = m_prio_heap.peekMin().m_msgptr.ref();
assert(msg_ptr != NULL);
DEBUG_EXPR(QUEUE_COMP, MedPrio, *msg_ptr);
DEBUG_NEWLINE(QUEUE_COMP, MedPrio);
return msg_ptr;
}
// FIXME - move me somewhere else
int random_time()
int
random_time()
{
int time = 1;
time += random() & 0x3; // [0...3]
if ((random() & 0x7) == 0) { // 1 in 8 chance
time += 100 + (random() % 0xf); // 100 + [1...15]
}
return time;
int time = 1;
time += random() & 0x3; // [0...3]
if ((random() & 0x7) == 0) { // 1 in 8 chance
time += 100 + (random() % 0xf); // 100 + [1...15]
}
return time;
}
void MessageBuffer::enqueue(const MsgPtr& message, Time delta)
void
MessageBuffer::enqueue(const MsgPtr& message, Time delta)
{
DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
DEBUG_MSG(QUEUE_COMP,HighPrio,"enqueue " + m_name + " time: "
+ int_to_string(g_eventQueue_ptr->getTime()) + ".");
DEBUG_EXPR(QUEUE_COMP,MedPrio,message);
DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
DEBUG_MSG(QUEUE_COMP, HighPrio, "enqueue " + m_name + " time: "
+ int_to_string(g_eventQueue_ptr->getTime()) + ".");
DEBUG_EXPR(QUEUE_COMP, MedPrio, message);
DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
m_msg_counter++;
m_size++;
m_msg_counter++;
m_size++;
// record current time incase we have a pop that also adjusts my size
if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) {
m_msgs_this_cycle = 0; // first msg this cycle
m_time_last_time_enqueue = g_eventQueue_ptr->getTime();
}
m_msgs_this_cycle++;
// record current time incase we have a pop that also adjusts my size
if (m_time_last_time_enqueue < g_eventQueue_ptr->getTime()) {
m_msgs_this_cycle = 0; // first msg this cycle
m_time_last_time_enqueue = g_eventQueue_ptr->getTime();
}
m_msgs_this_cycle++;
// ASSERT(m_max_size == -1 || m_size <= m_max_size + 1);
// the plus one is a kluge because of a SLICC issue
// ASSERT(m_max_size == -1 || m_size <= m_max_size + 1);
// the plus one is a kluge because of a SLICC issue
if (!m_ordering_set) {
// WARN_EXPR(*this);
WARN_EXPR(m_name);
ERROR_MSG("Ordering property of this queue has not been set");
}
if (!m_ordering_set) {
// WARN_EXPR(*this);
WARN_EXPR(m_name);
ERROR_MSG("Ordering property of this queue has not been set");
}
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
// printf ("delta %i \n", delta);
assert(delta>0);
Time current_time = g_eventQueue_ptr->getTime();
Time arrival_time = 0;
if (!RubySystem::getRandomization() || (m_randomization == false)) {
// No randomization
arrival_time = current_time + delta;
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
//printf ("delta %i \n", delta);
assert(delta>0);
Time current_time = g_eventQueue_ptr->getTime();
Time arrival_time = 0;
if (!RubySystem::getRandomization() || (m_randomization == false)) {
// No randomization
arrival_time = current_time + delta;
} else {
// Randomization - ignore delta
if (m_strict_fifo) {
if (m_last_arrival_time < current_time) {
m_last_arrival_time = current_time;
}
arrival_time = m_last_arrival_time + random_time();
} else {
arrival_time = current_time + random_time();
}
}
} else {
// Randomization - ignore delta
// Check the arrival time
assert(arrival_time > current_time);
if (m_strict_fifo) {
if (m_last_arrival_time < current_time) {
m_last_arrival_time = current_time;
}
arrival_time = m_last_arrival_time + random_time();
} else {
arrival_time = current_time + random_time();
if (arrival_time < m_last_arrival_time) {
WARN_EXPR(*this);
WARN_EXPR(m_name);
WARN_EXPR(current_time);
WARN_EXPR(delta);
WARN_EXPR(arrival_time);
WARN_EXPR(m_last_arrival_time);
ERROR_MSG("FIFO ordering violated");
}
}
}
m_last_arrival_time = arrival_time;
// Check the arrival time
assert(arrival_time > current_time);
if (m_strict_fifo) {
if (arrival_time >= m_last_arrival_time) {
// compute the delay cycles and set enqueue time
Message* msg_ptr = message.mod_ref();
assert(msg_ptr != NULL);
assert(g_eventQueue_ptr->getTime() >= msg_ptr->getLastEnqueueTime() &&
"ensure we aren't dequeued early");
msg_ptr->setDelayedCycles(g_eventQueue_ptr->getTime() -
msg_ptr->getLastEnqueueTime() +
msg_ptr->getDelayedCycles());
msg_ptr->setLastEnqueueTime(arrival_time);
// Insert the message into the priority heap
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
m_prio_heap.insert(thisNode);
DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
DEBUG_MSG(QUEUE_COMP, HighPrio, "enqueue " + m_name
+ " with arrival_time " + int_to_string(arrival_time)
+ " cur_time: " + int_to_string(g_eventQueue_ptr->getTime())
+ ".");
DEBUG_EXPR(QUEUE_COMP, MedPrio, message);
DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
// Schedule the wakeup
if (m_consumer_ptr != NULL) {
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
} else {
WARN_EXPR(*this);
WARN_EXPR(m_name);
WARN_EXPR(current_time);
WARN_EXPR(delta);
WARN_EXPR(arrival_time);
WARN_EXPR(m_last_arrival_time);
ERROR_MSG("FIFO ordering violated");
WARN_EXPR(*this);
WARN_EXPR(m_name);
ERROR_MSG("No consumer");
}
}
m_last_arrival_time = arrival_time;
// compute the delay cycles and set enqueue time
Message* msg_ptr = NULL;
msg_ptr = message.mod_ref();
assert(msg_ptr != NULL);
assert(g_eventQueue_ptr->getTime() >= msg_ptr->getLastEnqueueTime()); // ensure we aren't dequeued early
msg_ptr->setDelayedCycles((g_eventQueue_ptr->getTime() - msg_ptr->getLastEnqueueTime())+msg_ptr->getDelayedCycles());
msg_ptr->setLastEnqueueTime(arrival_time);
// Insert the message into the priority heap
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
m_prio_heap.insert(thisNode);
DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
DEBUG_MSG(QUEUE_COMP,HighPrio,"enqueue " + m_name
+ " with arrival_time " + int_to_string(arrival_time)
+ " cur_time: " + int_to_string(g_eventQueue_ptr->getTime()) + ".");
DEBUG_EXPR(QUEUE_COMP,MedPrio,message);
DEBUG_NEWLINE(QUEUE_COMP,HighPrio);
// Schedule the wakeup
if (m_consumer_ptr != NULL) {
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
} else {
WARN_EXPR(*this);
WARN_EXPR(m_name);
ERROR_MSG("No consumer");
}
}
int MessageBuffer::dequeue_getDelayCycles(MsgPtr& message)
int
MessageBuffer::dequeue_getDelayCycles(MsgPtr& message)
{
int delay_cycles = -1; // null value
int delay_cycles = -1; // null value
dequeue(message);
dequeue(message);
// get the delay cycles
delay_cycles = setAndReturnDelayCycles(message);
// get the delay cycles
delay_cycles = setAndReturnDelayCycles(message);
assert(delay_cycles >= 0);
return delay_cycles;
assert(delay_cycles >= 0);
return delay_cycles;
}
void MessageBuffer::dequeue(MsgPtr& message)
void
MessageBuffer::dequeue(MsgPtr& message)
{
DEBUG_MSG(QUEUE_COMP,MedPrio,"dequeue from " + m_name);
message = m_prio_heap.peekMin().m_msgptr;
DEBUG_MSG(QUEUE_COMP, MedPrio, "dequeue from " + m_name);
message = m_prio_heap.peekMin().m_msgptr;
pop();
DEBUG_EXPR(QUEUE_COMP,MedPrio,message);
pop();
DEBUG_EXPR(QUEUE_COMP, MedPrio, message);
}
int MessageBuffer::dequeue_getDelayCycles()
int
MessageBuffer::dequeue_getDelayCycles()
{
int delay_cycles = -1; // null value
int delay_cycles = -1; // null value
// get MsgPtr of the message about to be dequeued
MsgPtr message = m_prio_heap.peekMin().m_msgptr;
// get MsgPtr of the message about to be dequeued
MsgPtr message = m_prio_heap.peekMin().m_msgptr;
// get the delay cycles
delay_cycles = setAndReturnDelayCycles(message);
// get the delay cycles
delay_cycles = setAndReturnDelayCycles(message);
dequeue();
dequeue();
assert(delay_cycles >= 0);
return delay_cycles;
assert(delay_cycles >= 0);
return delay_cycles;
}
void MessageBuffer::pop()
void
MessageBuffer::pop()
{
DEBUG_MSG(QUEUE_COMP,MedPrio,"pop from " + m_name);
assert(isReady());
m_prio_heap.extractMin();
// record previous size and time so the current buffer size isn't adjusted until next cycle
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
m_size_at_cycle_start = m_size;
m_time_last_time_pop = g_eventQueue_ptr->getTime();
}
m_size--;
}
void MessageBuffer::clear()
{
while(m_prio_heap.size() > 0){
DEBUG_MSG(QUEUE_COMP, MedPrio, "pop from " + m_name);
assert(isReady());
m_prio_heap.extractMin();
}
ASSERT(m_prio_heap.size() == 0);
m_msg_counter = 0;
m_size = 0;
m_time_last_time_enqueue = 0;
m_time_last_time_pop = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
// record previous size and time so the current buffer size isn't
// adjusted until next cycle
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
m_size_at_cycle_start = m_size;
m_time_last_time_pop = g_eventQueue_ptr->getTime();
}
m_size--;
}
void MessageBuffer::recycle()
void
MessageBuffer::clear()
{
DEBUG_MSG(QUEUE_COMP,MedPrio,"recycling " + m_name);
assert(isReady());
MessageBufferNode node = m_prio_heap.extractMin();
node.m_time = g_eventQueue_ptr->getTime() + m_recycle_latency;
m_prio_heap.insert(node);
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, g_eventQueue_ptr->getTime() + m_recycle_latency);
while(m_prio_heap.size() > 0){
m_prio_heap.extractMin();
}
ASSERT(m_prio_heap.size() == 0);
m_msg_counter = 0;
m_size = 0;
m_time_last_time_enqueue = 0;
m_time_last_time_pop = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
}
int MessageBuffer::setAndReturnDelayCycles(MsgPtr& message)
void
MessageBuffer::recycle()
{
int delay_cycles = -1; // null value
// get the delay cycles of the message at the top of the queue
Message* msg_ptr = message.ref();
// this function should only be called on dequeue
// ensure the msg hasn't been enqueued
assert(msg_ptr->getLastEnqueueTime() <= g_eventQueue_ptr->getTime());
msg_ptr->setDelayedCycles((g_eventQueue_ptr->getTime() - msg_ptr->getLastEnqueueTime())+msg_ptr->getDelayedCycles());
delay_cycles = msg_ptr->getDelayedCycles();
assert(delay_cycles >= 0);
return delay_cycles;
DEBUG_MSG(QUEUE_COMP, MedPrio, "recycling " + m_name);
assert(isReady());
MessageBufferNode node = m_prio_heap.extractMin();
node.m_time = g_eventQueue_ptr->getTime() + m_recycle_latency;
m_prio_heap.insert(node);
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr,
g_eventQueue_ptr->getTime() + m_recycle_latency);
}
void MessageBuffer::print(ostream& out) const
int
MessageBuffer::setAndReturnDelayCycles(MsgPtr& message)
{
out << "[MessageBuffer: ";
if (m_consumer_ptr != NULL) {
out << " consumer-yes ";
}
out << m_prio_heap << "] " << m_name << endl;
int delay_cycles = -1; // null value
// get the delay cycles of the message at the top of the queue
Message* msg_ptr = message.ref();
// this function should only be called on dequeue
// ensure the msg hasn't been enqueued
assert(msg_ptr->getLastEnqueueTime() <= g_eventQueue_ptr->getTime());
msg_ptr->setDelayedCycles(g_eventQueue_ptr->getTime() -
msg_ptr->getLastEnqueueTime() +
msg_ptr->getDelayedCycles());
delay_cycles = msg_ptr->getDelayedCycles();
assert(delay_cycles >= 0);
return delay_cycles;
}
void MessageBuffer::printStats(ostream& out)
void
MessageBuffer::print(ostream& out) const
{
out << "MessageBuffer: " << m_name << " stats - msgs:" << m_msg_counter << " full:" << m_not_avail_count << endl;
out << "[MessageBuffer: ";
if (m_consumer_ptr != NULL) {
out << " consumer-yes ";
}
out << m_prio_heap << "] " << m_name << endl;
}
void
MessageBuffer::printStats(ostream& out)
{
out << "MessageBuffer: " << m_name << " stats - msgs:" << m_msg_counter
<< " full:" << m_not_avail_count << endl;
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,137 +27,156 @@
*/
/*
* $Id$
*
* Description: Unordered buffer of messages that can be inserted such
* Unordered buffer of messages that can be inserted such
* that they can be dequeued after a given delta time has expired.
*
*/
#ifndef MESSAGEBUFFER_H
#define MESSAGEBUFFER_H
#ifndef __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__
#define __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__
#include <iostream>
#include <string>
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/buffers/MessageBufferNode.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/slicc_interface/Message.hh"
#include "mem/gems_common/PrioHeap.hh"
#include "mem/gems_common/util.hh"
#include "mem/ruby/buffers/MessageBufferNode.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/slicc_interface/Message.hh"
class MessageBuffer {
public:
// Constructors
MessageBuffer(const std::string &name = "");
class MessageBuffer
{
public:
MessageBuffer(const std::string &name = "");
// ~MessageBuffer()
static void printConfig(std::ostream& out) {}
void
setRecycleLatency(int recycle_latency)
{
m_recycle_latency = recycle_latency;
}
// Public Methods
// TRUE if head of queue timestamp <= SystemTime
bool
isReady() const
{
return ((m_prio_heap.size() > 0) &&
(m_prio_heap.peekMin().m_time <= g_eventQueue_ptr->getTime()));
}
static void printConfig(std::ostream& out) {}
void setRecycleLatency(int recycle_latency) { m_recycle_latency = recycle_latency; }
void
delayHead()
{
MessageBufferNode node = m_prio_heap.extractMin();
enqueue(node.m_msgptr, 1);
}
// TRUE if head of queue timestamp <= SystemTime
bool isReady() const {
return ((m_prio_heap.size() > 0) &&
(m_prio_heap.peekMin().m_time <= g_eventQueue_ptr->getTime()));
}
bool areNSlotsAvailable(int n);
int getPriority() { return m_priority_rank; }
void setPriority(int rank) { m_priority_rank = rank; }
void setConsumer(Consumer* consumer_ptr)
{
ASSERT(m_consumer_ptr == NULL);
m_consumer_ptr = consumer_ptr;
}
void delayHead() {
MessageBufferNode node = m_prio_heap.extractMin();
enqueue(node.m_msgptr, 1);
}
void setDescription(const std::string& name) { m_name = name; }
std::string getDescription() { return m_name;}
bool areNSlotsAvailable(int n);
int getPriority() { return m_priority_rank; }
void setPriority(int rank) { m_priority_rank = rank; }
void setConsumer(Consumer* consumer_ptr) { ASSERT(m_consumer_ptr==NULL); m_consumer_ptr = consumer_ptr; }
void setDescription(const std::string& name) { m_name = name; }
std::string getDescription() { return m_name;}
Consumer* getConsumer() { return m_consumer_ptr; }
Consumer* getConsumer() { return m_consumer_ptr; }
const Message* peekAtHeadOfQueue() const;
const Message* peek() const { return peekAtHeadOfQueue(); }
const MsgPtr getMsgPtrCopy() const;
const Message* peekAtHeadOfQueue() const;
const Message* peek() const { return peekAtHeadOfQueue(); }
const MsgPtr getMsgPtrCopy() const;
const MsgPtr& peekMsgPtr() const { assert(isReady()); return m_prio_heap.peekMin().m_msgptr; }
const MsgPtr& peekMsgPtrEvenIfNotReady() const {return m_prio_heap.peekMin().m_msgptr; }
const MsgPtr&
peekMsgPtr() const
{
assert(isReady());
return m_prio_heap.peekMin().m_msgptr;
}
void enqueue(const MsgPtr& message) { enqueue(message, 1); }
void enqueue(const MsgPtr& message, Time delta);
// void enqueueAbsolute(const MsgPtr& message, Time absolute_time);
int dequeue_getDelayCycles(MsgPtr& message); // returns delay cycles of the message
void dequeue(MsgPtr& message);
int dequeue_getDelayCycles(); // returns delay cycles of the message
void dequeue() { pop(); }
void pop();
void recycle();
bool isEmpty() const { return m_prio_heap.size() == 0; }
const MsgPtr&
peekMsgPtrEvenIfNotReady() const
{
return m_prio_heap.peekMin().m_msgptr;
}
void setOrdering(bool order) { m_strict_fifo = order; m_ordering_set = true; }
void setSize(int size) {m_max_size = size;}
int getSize();
void setRandomization(bool random_flag) { m_randomization = random_flag; }
void enqueue(const MsgPtr& message) { enqueue(message, 1); }
void enqueue(const MsgPtr& message, Time delta);
// void enqueueAbsolute(const MsgPtr& message, Time absolute_time);
int dequeue_getDelayCycles(MsgPtr& message); // returns delay
// cycles of the
// message
void dequeue(MsgPtr& message);
int dequeue_getDelayCycles(); // returns delay cycles of the message
void dequeue() { pop(); }
void pop();
void recycle();
bool isEmpty() const { return m_prio_heap.size() == 0; }
void clear();
void
setOrdering(bool order)
{
m_strict_fifo = order;
m_ordering_set = true;
}
void setSize(int size) { m_max_size = size; }
int getSize();
void setRandomization(bool random_flag) { m_randomization = random_flag; }
void print(std::ostream& out) const;
void printStats(std::ostream& out);
void clearStats() { m_not_avail_count = 0; m_msg_counter = 0; }
void clear();
private:
//added by SS
int m_recycle_latency;
void print(std::ostream& out) const;
void printStats(std::ostream& out);
void clearStats() { m_not_avail_count = 0; m_msg_counter = 0; }
// Private Methods
int setAndReturnDelayCycles(MsgPtr& message);
private:
//added by SS
int m_recycle_latency;
// Private copy constructor and assignment operator
MessageBuffer(const MessageBuffer& obj);
MessageBuffer& operator=(const MessageBuffer& obj);
// Private Methods
int setAndReturnDelayCycles(MsgPtr& message);
// Data Members (m_ prefix)
Consumer* m_consumer_ptr; // Consumer to signal a wakeup(), can be NULL
PrioHeap<MessageBufferNode> m_prio_heap;
std::string m_name;
// Private copy constructor and assignment operator
MessageBuffer(const MessageBuffer& obj);
MessageBuffer& operator=(const MessageBuffer& obj);
int m_max_size;
int m_size;
// Data Members (m_ prefix)
Consumer* m_consumer_ptr; // Consumer to signal a wakeup(), can be NULL
PrioHeap<MessageBufferNode> m_prio_heap;
std::string m_name;
Time m_time_last_time_size_checked;
int m_size_last_time_size_checked;
int m_max_size;
int m_size;
// variables used so enqueues appear to happen imediately, while pop happen the next cycle
Time m_time_last_time_enqueue;
Time m_time_last_time_pop;
int m_size_at_cycle_start;
int m_msgs_this_cycle;
Time m_time_last_time_size_checked;
int m_size_last_time_size_checked;
int m_not_avail_count; // count the # of times I didn't have N slots available
int m_msg_counter;
int m_priority_rank;
bool m_strict_fifo;
bool m_ordering_set;
bool m_randomization;
Time m_last_arrival_time;
// variables used so enqueues appear to happen imediately, while
// pop happen the next cycle
Time m_time_last_time_enqueue;
Time m_time_last_time_pop;
int m_size_at_cycle_start;
int m_msgs_this_cycle;
int m_not_avail_count; // count the # of times I didn't have N
// slots available
int m_msg_counter;
int m_priority_rank;
bool m_strict_fifo;
bool m_ordering_set;
bool m_randomization;
Time m_last_arrival_time;
};
// Output operator declaration
//template <class TYPE>
std::ostream& operator<<(std::ostream& out, const MessageBuffer& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const MessageBuffer& obj)
inline std::ostream&
operator<<(std::ostream& out, const MessageBuffer& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //MESSAGEBUFFER_H
#endif // __MEM_RUBY_BUFFERS_MESSAGEBUFFER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -32,9 +31,9 @@
void
MessageBufferNode::print(std::ostream& out) const
{
out << "[";
out << m_time << ", ";
out << m_msg_counter << ", ";
out << m_msgptr << "; ";
out << "]";
out << "[";
out << m_time << ", ";
out << m_msg_counter << ", ";
out << m_msgptr << "; ";
out << "]";
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,64 +26,55 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MESSAGEBUFFERNODE_H
#define MESSAGEBUFFERNODE_H
#ifndef __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__
#define __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__
#include <iostream>
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/Message.hh"
class MessageBufferNode {
public:
// Constructors
MessageBufferNode() { m_time = 0; m_msg_counter = 0; }
MessageBufferNode(const Time& time, int counter, const MsgPtr& msgptr)
{ m_time = time; m_msgptr = msgptr; m_msg_counter = counter; }
// Destructor
//~MessageBufferNode();
class MessageBufferNode
{
public:
MessageBufferNode()
{
m_time = 0;
m_msg_counter = 0;
}
// Public Methods
void print(std::ostream& out) const;
private:
// Private Methods
MessageBufferNode(const Time& time, int counter, const MsgPtr& msgptr)
{
m_time = time;
m_msgptr = msgptr;
m_msg_counter = counter;
}
// Default copy constructor and assignment operator
// MessageBufferNode(const MessageBufferNode& obj);
// MessageBufferNode& operator=(const MessageBufferNode& obj);
void print(std::ostream& out) const;
// Data Members (m_ prefix)
public:
Time m_time;
int m_msg_counter; // FIXME, should this be a 64-bit value?
MsgPtr m_msgptr;
public:
Time m_time;
int m_msg_counter; // FIXME, should this be a 64-bit value?
MsgPtr m_msgptr;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const MessageBufferNode& obj);
// ******************* Definitions *******************
inline extern bool node_less_then_eq(const MessageBufferNode& n1, const MessageBufferNode& n2);
inline extern
bool node_less_then_eq(const MessageBufferNode& n1, const MessageBufferNode& n2)
inline bool
node_less_then_eq(const MessageBufferNode& n1, const MessageBufferNode& n2)
{
if (n1.m_time == n2.m_time) {
assert(n1.m_msg_counter != n2.m_msg_counter);
return (n1.m_msg_counter <= n2.m_msg_counter);
} else {
return (n1.m_time <= n2.m_time);
}
if (n1.m_time == n2.m_time) {
assert(n1.m_msg_counter != n2.m_msg_counter);
return (n1.m_msg_counter <= n2.m_msg_counter);
} else {
return (n1.m_time <= n2.m_time);
}
}
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const MessageBufferNode& obj)
inline std::ostream&
operator<<(std::ostream& out, const MessageBufferNode& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //MESSAGEBUFFERNODE_H
#endif // __MEM_RUBY_BUFFERS_MESSAGEBUFFERNODE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,42 +26,41 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#include "mem/ruby/common/Address.hh"
void Address::output(ostream& out) const
void
Address::output(ostream& out) const
{
// Note: this outputs addresses in the form "ffff", not "0xffff".
// This code should always be able to write out addresses in a
// format that can be read in by the below input() method. Please
// don't change this without talking to Milo first.
out << hex << m_address << dec;
// Note: this outputs addresses in the form "ffff", not "0xffff".
// This code should always be able to write out addresses in a
// format that can be read in by the below input() method. Please
// don't change this without talking to Milo first.
out << hex << m_address << dec;
}
void Address::input(istream& in)
void
Address::input(istream& in)
{
// Note: this only works with addresses in the form "ffff", not
// "0xffff". This code should always be able to read in addresses
// written out by the above output() method. Please don't change
// this without talking to Milo first.
in >> hex >> m_address >> dec;
// Note: this only works with addresses in the form "ffff", not
// "0xffff". This code should always be able to read in addresses
// written out by the above output() method. Please don't change
// this without talking to Milo first.
in >> hex >> m_address >> dec;
}
Address::Address(const Address& obj)
{
m_address = obj.m_address;
}
Address& Address::operator=(const Address& obj)
{
if (this == &obj) {
// assert(false);
} else {
m_address = obj.m_address;
}
return *this;
}
Address&
Address::operator=(const Address& obj)
{
if (this == &obj) {
// assert(false);
} else {
m_address = obj.m_address;
}
return *this;
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,20 +26,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#ifndef ADDRESS_H
#define ADDRESS_H
#ifndef __MEM_RUBY_COMMON_ADDRESS_HH__
#define __MEM_RUBY_COMMON_ADDRESS_HH__
#include <iomanip>
#include "base/hashmap.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/System.hh"
const int ADDRESS_WIDTH = 64; // address width in bytes
@@ -48,124 +43,131 @@ class Address;
typedef Address PhysAddress;
typedef Address VirtAddress;
class Address {
public:
// Constructors
Address() { m_address = 0; }
explicit Address(physical_address_t address) { m_address = address; }
class Address
{
public:
Address()
: m_address(0)
{ }
Address(const Address& obj);
Address& operator=(const Address& obj);
explicit
Address(physical_address_t address)
: m_address(address)
{ }
// Destructor
// ~Address();
Address(const Address& obj);
Address& operator=(const Address& obj);
// Public Methods
void setAddress(physical_address_t address) { m_address = address; }
physical_address_t getAddress() const {return m_address;}
// selects bits inclusive
physical_address_t bitSelect(int small, int big) const;
physical_address_t bitRemove(int small, int big) const;
physical_address_t maskLowOrderBits(int number) const;
physical_address_t maskHighOrderBits(int number) const;
physical_address_t shiftLowOrderBits(int number) const;
void setAddress(physical_address_t address) { m_address = address; }
physical_address_t getAddress() const {return m_address;}
// selects bits inclusive
physical_address_t bitSelect(int small, int big) const;
physical_address_t bitRemove(int small, int big) const;
physical_address_t maskLowOrderBits(int number) const;
physical_address_t maskHighOrderBits(int number) const;
physical_address_t shiftLowOrderBits(int number) const;
physical_address_t getLineAddress() const
{ return bitSelect(RubySystem::getBlockSizeBits(), ADDRESS_WIDTH); }
physical_address_t getOffset() const
{ return bitSelect(0, RubySystem::getBlockSizeBits()-1); }
physical_address_t
getLineAddress() const
{
return bitSelect(RubySystem::getBlockSizeBits(), ADDRESS_WIDTH);
}
void makeLineAddress() { m_address = maskLowOrderBits(RubySystem::getBlockSizeBits()); }
// returns the next stride address based on line address
void makeNextStrideAddress( int stride) {
m_address = maskLowOrderBits(RubySystem::getBlockSizeBits())
+ RubySystem::getBlockSizeBytes()*stride;
}
int getBankSetNum() const;
int getBankSetDist() const;
physical_address_t
getOffset() const
{
return bitSelect(0, RubySystem::getBlockSizeBits() - 1);
}
Index memoryModuleIndex() const;
void
makeLineAddress()
{
m_address = maskLowOrderBits(RubySystem::getBlockSizeBits());
}
void print(ostream& out) const;
void output(ostream& out) const;
void input(istream& in);
// returns the next stride address based on line address
void
makeNextStrideAddress(int stride)
{
m_address = maskLowOrderBits(RubySystem::getBlockSizeBits())
+ RubySystem::getBlockSizeBytes()*stride;
}
void setOffset( int offset ){
// first, zero out the offset bits
makeLineAddress();
m_address |= (physical_address_t) offset;
}
int getBankSetNum() const;
int getBankSetDist() const;
private:
// Private Methods
Index memoryModuleIndex() const;
// Private copy constructor and assignment operator
// Address(const Address& obj);
// Address& operator=(const Address& obj);
void print(ostream& out) const;
void output(ostream& out) const;
void input(istream& in);
// Data Members (m_ prefix)
physical_address_t m_address;
void
setOffset(int offset)
{
// first, zero out the offset bits
makeLineAddress();
m_address |= (physical_address_t) offset;
}
private:
physical_address_t m_address;
};
inline
Address line_address(const Address& addr) { Address temp(addr); temp.makeLineAddress(); return temp; }
// Output operator declaration
ostream& operator<<(ostream& out, const Address& obj);
// comparison operator declaration
bool operator==(const Address& obj1, const Address& obj2);
bool operator!=(const Address& obj1, const Address& obj2);
bool operator<(const Address& obj1, const Address& obj2);
/* Address& operator=(const physical_address_t address); */
inline
bool operator<(const Address& obj1, const Address& obj2)
inline Address
line_address(const Address& addr)
{
return obj1.getAddress() < obj2.getAddress();
Address temp(addr);
temp.makeLineAddress();
return temp;
}
// ******************* Definitions *******************
// Output operator definition
inline
ostream& operator<<(ostream& out, const Address& obj)
inline bool
operator<(const Address& obj1, const Address& obj2)
{
obj.print(out);
out << flush;
return out;
return obj1.getAddress() < obj2.getAddress();
}
inline
bool operator==(const Address& obj1, const Address& obj2)
inline ostream&
operator<<(ostream& out, const Address& obj)
{
return (obj1.getAddress() == obj2.getAddress());
obj.print(out);
out << flush;
return out;
}
inline
bool operator!=(const Address& obj1, const Address& obj2)
inline bool
operator==(const Address& obj1, const Address& obj2)
{
return (obj1.getAddress() != obj2.getAddress());
return (obj1.getAddress() == obj2.getAddress());
}
inline
physical_address_t Address::bitSelect(int small, int big) const // rips bits inclusive
inline bool
operator!=(const Address& obj1, const Address& obj2)
{
physical_address_t mask;
assert((unsigned)big >= (unsigned)small);
return (obj1.getAddress() != obj2.getAddress());
}
if (big >= ADDRESS_WIDTH - 1) {
return (m_address >> small);
} else {
mask = ~((physical_address_t)~0 << (big + 1));
// FIXME - this is slow to manipulate a 64-bit number using 32-bits
physical_address_t partial = (m_address & mask);
return (partial >> small);
}
// rips bits inclusive
inline physical_address_t
Address::bitSelect(int small, int big) const
{
physical_address_t mask;
assert((unsigned)big >= (unsigned)small);
if (big >= ADDRESS_WIDTH - 1) {
return (m_address >> small);
} else {
mask = ~((physical_address_t)~0 << (big + 1));
// FIXME - this is slow to manipulate a 64-bit number using 32-bits
physical_address_t partial = (m_address & mask);
return (partial >> small);
}
}
// removes bits inclusive
inline
physical_address_t Address::bitRemove(int small, int big) const
inline physical_address_t
Address::bitRemove(int small, int big) const
{
physical_address_t mask;
assert((unsigned)big >= (unsigned)small);
@@ -184,60 +186,64 @@ physical_address_t Address::bitRemove(int small, int big) const
mask = (physical_address_t)~0 << (big + 1);
physical_address_t higher_bits = m_address & mask;
//
// Shift the valid high bits over the removed section
//
higher_bits = higher_bits >> (big - small);
return (higher_bits | lower_bits);
}
}
inline
physical_address_t Address::maskLowOrderBits(int number) const
inline physical_address_t
Address::maskLowOrderBits(int number) const
{
physical_address_t mask;
if (number >= ADDRESS_WIDTH - 1) {
mask = ~0;
mask = ~0;
} else {
mask = (physical_address_t)~0 << number;
mask = (physical_address_t)~0 << number;
}
return (m_address & mask);
}
inline
physical_address_t Address::maskHighOrderBits(int number) const
inline physical_address_t
Address::maskHighOrderBits(int number) const
{
physical_address_t mask;
physical_address_t mask;
if (number >= ADDRESS_WIDTH - 1) {
mask = ~0;
} else {
mask = (physical_address_t)~0 >> number;
}
return (m_address & mask);
if (number >= ADDRESS_WIDTH - 1) {
mask = ~0;
} else {
mask = (physical_address_t)~0 >> number;
}
return (m_address & mask);
}
inline
physical_address_t Address::shiftLowOrderBits(int number) const
inline physical_address_t
Address::shiftLowOrderBits(int number) const
{
return (m_address >> number);
return (m_address >> number);
}
inline
integer_t Address::memoryModuleIndex() const
inline integer_t
Address::memoryModuleIndex() const
{
integer_t index = bitSelect(RubySystem::getBlockSizeBits()+RubySystem::getMemorySizeBits(), ADDRESS_WIDTH);
assert (index >= 0);
return index;
integer_t index =
bitSelect(RubySystem::getBlockSizeBits() +
RubySystem::getMemorySizeBits(), ADDRESS_WIDTH);
assert (index >= 0);
return index;
// Index indexHighPortion = address.bitSelect(MEMORY_SIZE_BITS-1, PAGE_SIZE_BITS+NUMBER_OF_MEMORY_MODULE_BITS);
// Index indexLowPortion = address.bitSelect(DATA_BLOCK_BITS, PAGE_SIZE_BITS-1);
// Index indexHighPortion =
// address.bitSelect(MEMORY_SIZE_BITS - 1,
// PAGE_SIZE_BITS + NUMBER_OF_MEMORY_MODULE_BITS);
// Index indexLowPortion =
// address.bitSelect(DATA_BLOCK_BITS, PAGE_SIZE_BITS - 1);
//
// Index index = indexLowPortion |
// (indexHighPortion << (PAGE_SIZE_BITS - DATA_BLOCK_BITS));
//Index index = indexLowPortion | (indexHighPortion << (PAGE_SIZE_BITS - DATA_BLOCK_BITS));
/*
Round-robin mapping of addresses, at page size granularity
/*
Round-robin mapping of addresses, at page size granularity
ADDRESS_WIDTH MEMORY_SIZE_BITS PAGE_SIZE_BITS DATA_BLOCK_BITS
| | | |
@@ -249,29 +255,39 @@ ADDRESS_WIDTH MEMORY_SIZE_BITS PAGE_SIZE_BITS DATA_BLOCK_BITS
indexHighPortion indexLowPortion
<------->
NUMBER_OF_MEMORY_MODULE_BITS
*/
*/
}
inline
void Address::print(ostream& out) const
inline void
Address::print(ostream& out) const
{
using namespace std;
out << "[" << hex << "0x" << m_address << "," << " line 0x" << maskLowOrderBits(RubySystem::getBlockSizeBits()) << dec << "]" << flush;
out << "[" << hex << "0x" << m_address << "," << " line 0x"
<< maskLowOrderBits(RubySystem::getBlockSizeBits()) << dec << "]"
<< flush;
}
class Address;
namespace __hash_namespace {
template <> struct hash<Address>
{
size_t operator()(const Address &s) const { return (size_t) s.getAddress(); }
};
}
template <> struct hash<Address>
{
size_t
operator()(const Address &s) const
{
return (size_t)s.getAddress();
}
};
/* namespace __hash_namespace */ }
namespace std {
template <> struct equal_to<Address>
{
bool operator()(const Address& s1, const Address& s2) const { return s1 == s2; }
};
}
#endif //ADDRESS_H
template <> struct equal_to<Address>
{
bool
operator()(const Address& s1, const Address& s2) const
{
return s1 == s2;
}
};
/* namespace std */ }
#endif // __MEM_RUBY_COMMON_ADDRESS_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,16 +27,13 @@
*/
/*
* $Id$
*
* Description: This is the virtual base class of all classes that can
* be the targets of wakeup events. There is only two methods,
* wakeup() and print() and no data members.
*
* This is the virtual base class of all classes that can be the
* targets of wakeup events. There is only two methods, wakeup() and
* print() and no data members.
*/
#ifndef CONSUMER_H
#define CONSUMER_H
#ifndef __MEM_RUBY_COMMON_CONSUMER_HH__
#define __MEM_RUBY_COMMON_CONSUMER_HH__
#include <iostream>
#include <set>
@@ -47,68 +43,74 @@
class MessageBuffer;
class Consumer {
public:
// Constructors
Consumer() { m_last_scheduled_wakeup = 0; m_last_wakeup = 0; }
class Consumer
{
public:
Consumer()
: m_last_scheduled_wakeup(0), m_last_wakeup(0)
{
}
// Destructor
virtual ~Consumer() { }
virtual
~Consumer()
{ }
// Public Methods - pure virtual methods
void triggerWakeup(RubyEventQueue * eventQueue)
{
Time time = eventQueue->getTime();
if (m_last_wakeup != time) {
wakeup(); m_last_wakeup = time;
}
}
virtual void wakeup() = 0;
virtual void print(std::ostream& out) const = 0;
const Time& getLastScheduledWakeup() const
{
return m_last_scheduled_wakeup;
}
void setLastScheduledWakeup(const Time& time)
{
m_last_scheduled_wakeup = time;
}
bool alreadyScheduled(Time time)
{
return (m_scheduled_wakeups.find(time) != m_scheduled_wakeups.end());
}
void insertScheduledWakeupTime(Time time)
{
m_scheduled_wakeups.insert(time);
}
void removeScheduledWakeupTime(Time time)
{
assert(alreadyScheduled(time));
m_scheduled_wakeups.erase(time);
}
void
triggerWakeup(RubyEventQueue *eventQueue)
{
Time time = eventQueue->getTime();
if (m_last_wakeup != time) {
wakeup();
m_last_wakeup = time;
}
}
private:
// Private Methods
virtual void wakeup() = 0;
virtual void print(std::ostream& out) const = 0;
// Data Members (m_ prefix)
Time m_last_scheduled_wakeup;
std::set<Time> m_scheduled_wakeups;
Time m_last_wakeup;
const Time&
getLastScheduledWakeup() const
{
return m_last_scheduled_wakeup;
}
void
setLastScheduledWakeup(const Time& time)
{
m_last_scheduled_wakeup = time;
}
bool
alreadyScheduled(Time time)
{
return m_scheduled_wakeups.find(time) != m_scheduled_wakeups.end();
}
void
insertScheduledWakeupTime(Time time)
{
m_scheduled_wakeups.insert(time);
}
void
removeScheduledWakeupTime(Time time)
{
assert(alreadyScheduled(time));
m_scheduled_wakeups.erase(time);
}
private:
Time m_last_scheduled_wakeup;
std::set<Time> m_scheduled_wakeups;
Time m_last_wakeup;
};
// Output operator declaration
inline extern
std::ostream& operator<<(std::ostream& out, const Consumer& obj);
// ******************* Definitions *******************
// Output operator definition
inline extern
std::ostream& operator<<(std::ostream& out, const Consumer& obj)
inline std::ostream&
operator<<(std::ostream& out, const Consumer& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //CONSUMER_H
#endif // __MEM_RUBY_COMMON_CONSUMER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -32,13 +31,14 @@
DataBlock &
DataBlock::operator=(const DataBlock & obj)
{
if (this == &obj) {
// assert(false);
} else {
if (!m_alloc)
m_data = new uint8[RubySystem::getBlockSizeBytes()];
memcpy(m_data, obj.m_data, RubySystem::getBlockSizeBytes());
m_alloc = true;
}
return *this;
if (this == &obj) {
// assert(false);
} else {
if (!m_alloc)
m_data = new uint8[RubySystem::getBlockSizeBytes()];
memcpy(m_data, obj.m_data, RubySystem::getBlockSizeBytes());
m_alloc = true;
}
return *this;
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,152 +26,144 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DATABLOCK_H
#define DATABLOCK_H
#ifndef __MEM_RUBY_COMMON_DATABLOCK_HH__
#define __MEM_RUBY_COMMON_DATABLOCK_HH__
#include <iomanip>
#include <iostream>
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
#include "mem/gems_common/Vector.hh"
class DataBlock {
public:
// Constructors
DataBlock() {alloc();}
DataBlock(const DataBlock & cp) {
m_data = new uint8[RubySystem::getBlockSizeBytes()];
memcpy(m_data, cp.m_data, RubySystem::getBlockSizeBytes());
m_alloc = true;
}
// Destructor
~DataBlock() {
if(m_alloc) {
delete [] m_data;
class DataBlock
{
public:
DataBlock()
{
alloc();
}
}
DataBlock& operator=(const DataBlock& obj);
DataBlock(const DataBlock &cp)
{
m_data = new uint8[RubySystem::getBlockSizeBytes()];
memcpy(m_data, cp.m_data, RubySystem::getBlockSizeBytes());
m_alloc = true;
}
// Public Methods
void assign(uint8* data);
~DataBlock()
{
if (m_alloc)
delete [] m_data;
}
void clear();
uint8 getByte(int whichByte) const;
const uint8* getData(int offset, int len) const;
void setByte(int whichByte, uint8 data);
void setData(uint8* data, int offset, int len);
void copyPartial(const DataBlock & dblk, int offset, int len);
bool equal(const DataBlock& obj) const;
void print(std::ostream& out) const;
DataBlock& operator=(const DataBlock& obj);
private:
void alloc();
// Data Members (m_ prefix)
uint8* m_data;
bool m_alloc;
void assign(uint8* data);
void clear();
uint8 getByte(int whichByte) const;
const uint8* getData(int offset, int len) const;
void setByte(int whichByte, uint8 data);
void setData(uint8* data, int offset, int len);
void copyPartial(const DataBlock & dblk, int offset, int len);
bool equal(const DataBlock& obj) const;
void print(std::ostream& out) const;
private:
void alloc();
uint8* m_data;
bool m_alloc;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const DataBlock& obj);
bool operator==(const DataBlock& obj1, const DataBlock& obj2);
// inline functions for speed
inline
void DataBlock::assign(uint8* data)
inline void
DataBlock::assign(uint8* data)
{
if (m_alloc) {
delete [] m_data;
}
m_data = data;
m_alloc = false;
if (m_alloc) {
delete [] m_data;
}
m_data = data;
m_alloc = false;
}
inline
void DataBlock::alloc()
inline void
DataBlock::alloc()
{
m_data = new uint8[RubySystem::getBlockSizeBytes()];
m_alloc = true;
clear();
m_data = new uint8[RubySystem::getBlockSizeBytes()];
m_alloc = true;
clear();
}
inline
void DataBlock::clear()
inline void
DataBlock::clear()
{
memset(m_data, 0, RubySystem::getBlockSizeBytes());
memset(m_data, 0, RubySystem::getBlockSizeBytes());
}
inline
bool DataBlock::equal(const DataBlock& obj) const
inline bool
DataBlock::equal(const DataBlock& obj) const
{
return !memcmp(m_data, obj.m_data, RubySystem::getBlockSizeBytes());
return !memcmp(m_data, obj.m_data, RubySystem::getBlockSizeBytes());
}
inline
void DataBlock::print(std::ostream& out) const
inline void
DataBlock::print(std::ostream& out) const
{
using namespace std;
using namespace std;
int size = RubySystem::getBlockSizeBytes();
out << "[ ";
for (int i = 0; i < size; i++) {
out << setw(2) << setfill('0') << hex << "0x" << (int)m_data[i] << " ";
out << setfill(' ');
}
out << dec << "]" << flush;
int size = RubySystem::getBlockSizeBytes();
out << "[ ";
for (int i = 0; i < size; i++) {
out << setw(2) << setfill('0') << hex << "0x" << (int)m_data[i] << " ";
out << setfill(' ');
}
out << dec << "]" << flush;
}
inline
uint8 DataBlock::getByte(int whichByte) const
inline uint8
DataBlock::getByte(int whichByte) const
{
return m_data[whichByte];
return m_data[whichByte];
}
inline
const uint8* DataBlock::getData(int offset, int len) const
inline const uint8*
DataBlock::getData(int offset, int len) const
{
assert(offset + len <= RubySystem::getBlockSizeBytes());
return &m_data[offset];
assert(offset + len <= RubySystem::getBlockSizeBytes());
return &m_data[offset];
}
inline
void DataBlock::setByte(int whichByte, uint8 data)
inline void
DataBlock::setByte(int whichByte, uint8 data)
{
m_data[whichByte] = data;
}
inline
void DataBlock::setData(uint8* data, int offset, int len)
inline void
DataBlock::setData(uint8* data, int offset, int len)
{
assert(offset + len <= RubySystem::getBlockSizeBytes());
memcpy(&m_data[offset], data, len);
assert(offset + len <= RubySystem::getBlockSizeBytes());
memcpy(&m_data[offset], data, len);
}
inline
void DataBlock::copyPartial(const DataBlock & dblk, int offset, int len)
inline void
DataBlock::copyPartial(const DataBlock & dblk, int offset, int len)
{
setData(&dblk.m_data[offset], offset, len);
setData(&dblk.m_data[offset], offset, len);
}
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const DataBlock& obj)
inline std::ostream&
operator<<(std::ostream& out, const DataBlock& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
extern inline
bool operator==(const DataBlock& obj1,const DataBlock& obj2)
inline bool
operator==(const DataBlock& obj1,const DataBlock& obj2)
{
return (obj1.equal(obj2));
return obj1.equal(obj2);
}
#endif //DATABLOCK_H
#endif // __MEM_RUBY_COMMON_DATABLOCK_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,19 +26,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#include <fstream>
#include <stdarg.h>
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Debug.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/gems_common/util.hh"
#include "base/misc.hh"
#include "mem/gems_common/util.hh"
#include "mem/ruby/common/Debug.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
using namespace std;
@@ -79,314 +73,273 @@ DebugComponentData debugComponents[] =
extern "C" void changeDebugVerbosity(VerbosityLevel vb);
extern "C" void changeDebugFilter(int filter);
void changeDebugVerbosity(VerbosityLevel vb)
void
changeDebugVerbosity(VerbosityLevel vb)
{
g_debug_ptr->setVerbosity(vb);
g_debug_ptr->setVerbosity(vb);
}
void changeDebugFilter(int filter)
void
changeDebugFilter(int filter)
{
g_debug_ptr->setFilter(filter);
g_debug_ptr->setFilter(filter);
}
Debug::Debug(const Params *p)
: SimObject(p)
{
clearFilter();
debug_cout_ptr = &cout;
clearFilter();
debug_cout_ptr = &cout;
setFilterString(p->filter_string.c_str());
setVerbosityString(p->verbosity_string.c_str());
setDebugOutputFile(p->output_filename.c_str());
m_starting_cycle = p->start_time;
m_protocol_trace = p->protocol_trace;
g_debug_ptr = this;
setFilterString(p->filter_string.c_str());
setVerbosityString(p->verbosity_string.c_str());
setDebugOutputFile(p->output_filename.c_str());
m_starting_cycle = p->start_time;
m_protocol_trace = p->protocol_trace;
g_debug_ptr = this;
}
Debug::~Debug()
{
}
void Debug::printVerbosity(ostream& out) const
void
Debug::printVerbosity(ostream& out) const
{
switch (getVerbosity()) {
case No_Verb:
out << "verbosity = No_Verb" << endl;
break;
case Low_Verb:
out << "verbosity = Low_Verb" << endl;
break;
case Med_Verb:
out << "verbosity = Med_Verb" << endl;
break;
case High_Verb:
out << "verbosity = High_Verb" << endl;
break;
default:
out << "verbosity = unknown" << endl;
}
}
bool Debug::validDebug(int module, PriorityLevel priority)
{
int local_module = (1 << module);
if(m_filter & local_module) {
if (g_eventQueue_ptr == NULL ||
g_eventQueue_ptr->getTime() >= m_starting_cycle) {
switch(m_verbosityLevel) {
switch (getVerbosity()) {
case No_Verb:
return false;
out << "verbosity = No_Verb" << endl;
break;
case Low_Verb:
if(priority == HighPrio) {
return true;
}else{
return false;
}
out << "verbosity = Low_Verb" << endl;
break;
case Med_Verb:
if(priority == HighPrio || priority == MedPrio ) {
return true;
}else{
return false;
}
out << "verbosity = Med_Verb" << endl;
break;
case High_Verb:
return true;
out << "verbosity = High_Verb" << endl;
break;
}
default:
out << "verbosity = unknown" << endl;
}
}
return false;
}
void Debug::setDebugTime(Time t)
bool
Debug::validDebug(int module, PriorityLevel priority)
{
m_starting_cycle = t;
}
void Debug::setVerbosity(VerbosityLevel vb)
{
m_verbosityLevel = vb;
}
void Debug::setFilter(int filter)
{
m_filter = filter;
}
bool Debug::checkVerbosityString(const char *verb_str)
{
if (verb_str == NULL) {
cerr << "Error: unrecognized verbosity (use none, low, med, high): NULL" << endl;
return true; // error
} else if ( (string(verb_str) == "none") ||
(string(verb_str) == "low") ||
(string(verb_str) == "med") ||
(string(verb_str) == "high") ) {
int local_module = (1 << module);
if (m_filter & local_module) {
if (g_eventQueue_ptr == NULL ||
g_eventQueue_ptr->getTime() >= m_starting_cycle) {
switch (m_verbosityLevel) {
case No_Verb:
return false;
case Low_Verb:
return (priority == HighPrio);
case Med_Verb:
return (priority == HighPrio || priority == MedPrio);
case High_Verb:
return true;
}
}
}
return false;
}
cerr << "Error: unrecognized verbosity (use none, low, med, high): NULL" << endl;
return true; // error
}
bool Debug::setVerbosityString(const char *verb_str)
void
Debug::setDebugTime(Time t)
{
bool check_fails = checkVerbosityString(verb_str);
if (check_fails) {
return true; // error
}
if (string(verb_str) == "none") {
setVerbosity(No_Verb);
} else if (string(verb_str) == "low") {
setVerbosity(Low_Verb);
} else if (string(verb_str) == "med") {
setVerbosity(Med_Verb);
} else if (string(verb_str) == "high") {
setVerbosity(High_Verb);
} else {
cerr << "Error: unrecognized verbosity (use none, low, med, high): " << verb_str << endl;
return true; // error
}
return false; // no error
m_starting_cycle = t;
}
bool Debug::checkFilter(char ch)
void
Debug::setVerbosity(VerbosityLevel vb)
{
for (int i=0; i<NUMBER_OF_COMPS; i++) {
// Look at all components to find a character match
if (debugComponents[i].ch == ch) {
// We found a match - return no error
return false; // no error
}
}
return true; // error
m_verbosityLevel = vb;
}
bool Debug::checkFilterString(const char *filter_str)
void
Debug::setFilter(int filter)
{
if (filter_str == NULL) {
cerr << "Error: unrecognized component filter: NULL" << endl;
return true; // error
}
// check for default filter ("none") before reporting RUBY_DEBUG error
if ( (string(filter_str) == "none") ) {
return false; // no error
}
if (RUBY_DEBUG == false) {
cerr << "Error: User specified set of debug components, but the RUBY_DEBUG compile-time flag is false." << endl;
cerr << "Solution: Re-compile with RUBY_DEBUG set to true." << endl;
return true; // error
}
if ( (string(filter_str) == "all") ) {
return false; // no error
}
// scan string checking each character
for (unsigned int i = 0; i < strlen(filter_str); i++) {
bool unrecognized = checkFilter( filter_str[i] );
if (unrecognized == true) {
return true; // error
}
}
return false; // no error
m_filter = filter;
}
bool Debug::setFilterString(const char *filter_str)
bool
Debug::setVerbosityString(const char *verb_str)
{
if (checkFilterString(filter_str)) {
return true; // error
}
if (string(filter_str) == "all" ) {
allFilter();
} else if (string(filter_str) == "none") {
clearFilter();
} else {
// scan string adding to bit mask for each component which is present
for (unsigned int i = 0; i < strlen(filter_str); i++) {
bool error = addFilter( filter_str[i] );
if (error) {
string verb = verb_str ? verb_str : "";
if (verb == "none") {
setVerbosity(No_Verb);
} else if (verb == "low") {
setVerbosity(Low_Verb);
} else if (verb == "med") {
setVerbosity(Med_Verb);
} else if (verb == "high") {
setVerbosity(High_Verb);
} else {
cerr << "Error: unrecognized verbosity (use none, low, med, high): "
<< verb << endl;
return true; // error
}
}
}
return false; // no error
return false; // no error
}
bool Debug::addFilter(char ch)
bool
Debug::checkFilter(char ch)
{
for (int i=0; i<NUMBER_OF_COMPS; i++) {
// Look at all components to find a character match
if (debugComponents[i].ch == ch) {
// We found a match - update the filter bit mask
cout << " Debug: Adding to filter: '" << ch << "' (" << debugComponents[i].desc << ")" << endl;
m_filter |= (1 << i);
return false; // no error
for (int i = 0; i < NUMBER_OF_COMPS; i++) {
// Look at all components to find a character match
if (debugComponents[i].ch == ch) {
// We found a match - return no error
return false; // no error
}
}
}
// We didn't find the character
cerr << "Error: unrecognized component filter: " << ch << endl;
usageInstructions();
return true; // error
return true; // error
}
void Debug::clearFilter()
bool
Debug::checkFilterString(const char *filter_str)
{
m_filter = 0;
if (filter_str == NULL) {
cerr << "Error: unrecognized component filter: NULL" << endl;
return true; // error
}
// check for default filter ("none") before reporting RUBY_DEBUG error
if (string(filter_str) == "none") {
return false; // no error
}
if (RUBY_DEBUG == false) {
cerr << "Error: User specified set of debug components, but the "
<< "RUBY_DEBUG compile-time flag is false." << endl
<< "Solution: Re-compile with RUBY_DEBUG set to true." << endl;
return true; // error
}
if (string(filter_str) == "all") {
return false; // no error
}
// scan string checking each character
for (unsigned int i = 0; i < strlen(filter_str); i++) {
bool unrecognized = checkFilter(filter_str[i]);
if (unrecognized == true) {
return true; // error
}
}
return false; // no error
}
bool
Debug::setFilterString(const char *filter_str)
{
if (checkFilterString(filter_str)) {
return true; // error
}
if (string(filter_str) == "all" ) {
allFilter();
} else if (string(filter_str) == "none") {
clearFilter();
} else {
// scan string adding to bit mask for each component which is present
for (unsigned int i = 0; i < strlen(filter_str); i++) {
bool error = addFilter( filter_str[i] );
if (error) {
return true; // error
}
}
}
return false; // no error
}
bool
Debug::addFilter(char ch)
{
for (int i = 0; i < NUMBER_OF_COMPS; i++) {
// Look at all components to find a character match
if (debugComponents[i].ch == ch) {
// We found a match - update the filter bit mask
cout << " Debug: Adding to filter: '" << ch << "' ("
<< debugComponents[i].desc << ")" << endl;
m_filter |= (1 << i);
return false; // no error
}
}
// We didn't find the character
cerr << "Error: unrecognized component filter: " << ch << endl;
usageInstructions();
return true; // error
}
void
Debug::clearFilter()
{
m_filter = 0;
}
void Debug::allFilter()
{
m_filter = ~0;
m_filter = ~0;
}
void Debug::usageInstructions(void)
void
Debug::usageInstructions(void)
{
cerr << "Debug components: " << endl;
for (int i=0; i<NUMBER_OF_COMPS; i++) {
cerr << " " << debugComponents[i].ch << ": " << debugComponents[i].desc << endl;
}
cerr << "Debug components: " << endl;
for (int i = 0; i < NUMBER_OF_COMPS; i++) {
cerr << " " << debugComponents[i].ch << ": "
<< debugComponents[i].desc << endl;
}
}
void Debug::print(ostream& out) const
void
Debug::print(ostream& out) const
{
out << "[Debug]" << endl;
out << "[Debug]" << endl;
}
void Debug::setDebugOutputFile (const char * filename)
void
Debug::setDebugOutputFile (const char *filename)
{
if ( (filename == NULL) ||
(!strcmp(filename, "none")) ) {
debug_cout_ptr = &cout;
return;
}
if (filename == NULL || !strcmp(filename, "none")) {
debug_cout_ptr = &cout;
return;
}
if (m_fout.is_open() ) {
m_fout.close ();
}
m_fout.open (filename, ios::out);
if (! m_fout.is_open() ) {
cerr << "setDebugOutputFile: can't open file " << filename << endl;
}
else {
debug_cout_ptr = &m_fout;
}
if (m_fout.is_open()) {
m_fout.close();
}
m_fout.open(filename, ios::out);
if (!m_fout.is_open()) {
cerr << "setDebugOutputFile: can't open file " << filename << endl;
} else {
debug_cout_ptr = &m_fout;
}
}
void Debug::closeDebugOutputFile ()
void
Debug::closeDebugOutputFile ()
{
if (m_fout.is_open() ) {
m_fout.close ();
debug_cout_ptr = &cout;
}
if (m_fout.is_open()) {
m_fout.close ();
debug_cout_ptr = &cout;
}
}
void Debug::debugMsg( const char *fmt, ... )
void
Debug::debugMsg( const char *fmt, ...)
{
va_list args;
// you could check validDebug() here before printing the message
va_start(args, fmt);
vfprintf(stdout, fmt, args);
va_end(args);
}
/*
void DEBUG_OUT( const char* fmt, ...) {
if (RUBY_DEBUG) {
cout << "Debug: in fn "
<< __PRETTY_FUNCTION__
<< " in " << __FILE__ << ":"
<< __LINE__ << ": ";
va_list args;
// you could check validDebug() here before printing the message
va_start(args, fmt);
vfprintf(stdout, fmt, args);
va_end(args);
}
}
void ERROR_OUT( const char* fmt, ... ) {
if (ERROR_MESSAGE_FLAG) {
cout << "error: in fn "
<< __PRETTY_FUNCTION__ << " in "
<< __FILE__ << ":"
<< __LINE__ << ": ";
va_list args;
va_start(args, fmt);
vfprintf(stdout, fmt, args);
va_end(args);
}
assert(0);
}
*/
Debug *
RubyDebugParams::create()
{

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,12 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#ifndef __MEM_RUBY_DEBUG_HH__
#define __MEM_RUBY_DEBUG_HH__
#ifndef __MEM_RUBY_COMMON_DEBUG_HH__
#define __MEM_RUBY_COMMON_DEBUG_HH__
#include <unistd.h>
@@ -75,68 +70,55 @@ enum DebugComponents
enum PriorityLevel {HighPrio, MedPrio, LowPrio};
enum VerbosityLevel {No_Verb, Low_Verb, Med_Verb, High_Verb};
class Debug : public SimObject {
public:
// Constructors
class Debug : public SimObject
{
public:
typedef RubyDebugParams Params;
Debug(const Params *p);
Debug(const Params *p);
~Debug();
// Destructor
~Debug();
static bool getProtocolTrace() { return m_protocol_trace; }
bool validDebug(int module, PriorityLevel priority);
void printVerbosity(std::ostream& out) const;
void setVerbosity(VerbosityLevel vb);
bool setVerbosityString(const char *);
VerbosityLevel getVerbosity() const { return m_verbosityLevel; }
void setFilter(int);
static bool checkFilter( char);
static bool checkFilterString(const char *);
bool setFilterString(const char *);
void setDebugTime(Time);
Time getDebugTime() const { return m_starting_cycle; }
bool addFilter(char);
void clearFilter();
void allFilter();
void print(std::ostream& out) const;
/* old school debugging "vararg": sends messages to screen and log */
void debugMsg(const char *fmt, ...);
// Public Methods
static bool getProtocolTrace() { return m_protocol_trace; }
bool validDebug(int module, PriorityLevel priority);
void printVerbosity(std::ostream& out) const;
void setVerbosity(VerbosityLevel vb);
static bool checkVerbosityString(const char *verb_str);
bool setVerbosityString(const char *);
VerbosityLevel getVerbosity() const { return m_verbosityLevel; }
void setFilter(int);
static bool checkFilter( char);
static bool checkFilterString(const char *);
bool setFilterString(const char *);
void setDebugTime(Time);
Time getDebugTime() const { return m_starting_cycle; }
bool addFilter(char);
void clearFilter();
void allFilter();
void print(std::ostream& out) const;
/* old school debugging "vararg": sends messages to screen and log */
void debugMsg( const char *fmt, ... );
void setDebugOutputFile (const char * filename);
void closeDebugOutputFile ();
static void usageInstructions(void);
void setDebugOutputFile (const char * filename);
void closeDebugOutputFile ();
static void usageInstructions(void);
private:
// Private copy constructor and assignment operator
Debug(const Debug& obj);
Debug& operator=(const Debug& obj);
private:
// Private Methods
static bool m_protocol_trace;
VerbosityLevel m_verbosityLevel;
int m_filter;
Time m_starting_cycle;
// Private copy constructor and assignment operator
Debug(const Debug& obj);
Debug& operator=(const Debug& obj);
// Data Members (m_ prefix)
static bool m_protocol_trace;
VerbosityLevel m_verbosityLevel;
int m_filter;
Time m_starting_cycle;
std::fstream m_fout;
std::fstream m_fout;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const Debug& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const Debug& obj)
inline std::ostream&
operator<<(std::ostream& out, const Debug& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
const bool ERROR_MESSAGE_FLAG = true;
@@ -151,178 +133,168 @@ const bool ASSERT_FLAG = true;
#undef assert
#define assert(EXPR) ASSERT(EXPR)
#undef ASSERT
#define ASSERT(EXPR)\
{\
using namespace std;\
if (ASSERT_FLAG) {\
if (!(EXPR)) {\
cerr << "failed assertion '"\
<< #EXPR << "' at fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << endl << flush;\
(* debug_cout_ptr) << "failed assertion '"\
<< #EXPR << "' at fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << endl << flush;\
if(isatty(STDIN_FILENO)) {\
cerr << "At this point you might want to attach a debug to ";\
cerr << "the running and get to the" << endl;\
cerr << "crash site; otherwise press enter to continue" << endl;\
cerr << "PID: " << getpid();\
cerr << endl << flush; \
char c; \
cin.get(c); \
}\
abort();\
}\
}\
}
#define ASSERT(EXPR) do { \
using namespace std; \
if (ASSERT_FLAG) { \
if (!(EXPR)) { \
cerr << "failed assertion '" \
<< #EXPR << "' at fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << endl << flush; \
(*debug_cout_ptr) << "failed assertion '" \
<< #EXPR << "' at fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << endl << flush; \
if (isatty(STDIN_FILENO)) { \
cerr << "At this point you might want to attach a debug to " \
<< "the running and get to the" << endl \
<< "crash site; otherwise press enter to continue" \
<< endl \
<< "PID: " << getpid() \
<< endl << flush; \
char c; \
cin.get(c); \
} \
abort(); \
} \
} \
} while (0)
#define BREAK(X)\
{\
using namespace std;\
cerr << "breakpoint '"\
<< #X << "' reached at fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << endl << flush;\
if(isatty(STDIN_FILENO)) {\
cerr << "press enter to continue" << endl;\
cerr << "PID: " << getpid();\
cerr << endl << flush; \
char c; \
cin.get(c); \
}\
}
#define BREAK(X) do { \
using namespace std; \
cerr << "breakpoint '" \
<< #X << "' reached at fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << endl << flush; \
if(isatty(STDIN_FILENO)) { \
cerr << "press enter to continue" << endl; \
cerr << "PID: " << getpid(); \
cerr << endl << flush; \
char c; \
cin.get(c); \
} \
} while (0)
#define ERROR_MSG(MESSAGE)\
{\
using namespace std;\
if (ERROR_MESSAGE_FLAG) {\
cerr << "Fatal Error: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": "\
<< (MESSAGE) << endl << flush;\
(* debug_cout_ptr) << "Fatal Error: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": "\
<< (MESSAGE) << endl << flush;\
abort();\
}\
}
#define ERROR_MSG(MESSAGE) do { \
using namespace std; \
if (ERROR_MESSAGE_FLAG) { \
cerr << "Fatal Error: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": " \
<< (MESSAGE) << endl << flush; \
(* debug_cout_ptr) << "Fatal Error: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": " \
<< (MESSAGE) << endl << flush; \
abort(); \
} \
} while(0)
#define WARN_MSG(MESSAGE)\
{\
using namespace std;\
if (WARNING_MESSAGE_FLAG) {\
cerr << "Warning: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": "\
<< (MESSAGE) << endl << flush;\
(* debug_cout_ptr) << "Warning: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": "\
<< (MESSAGE) << endl << flush;\
}\
}
#define WARN_MSG(MESSAGE) do { \
using namespace std; \
if (WARNING_MESSAGE_FLAG) { \
cerr << "Warning: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": " \
<< (MESSAGE) << endl << flush; \
(* debug_cout_ptr) << "Warning: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": " \
<< (MESSAGE) << endl << flush; \
} \
} while (0)
#define WARN_EXPR(EXPR)\
{\
using namespace std;\
if (WARNING_MESSAGE_FLAG) {\
cerr << "Warning: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": "\
<< #EXPR << " is "\
<< (EXPR) << endl << flush;\
(* debug_cout_ptr) << "Warning: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": "\
<< #EXPR << " is "\
<< (EXPR) << endl << flush;\
}\
}
#define WARN_EXPR(EXPR) do { \
using namespace std; \
if (WARNING_MESSAGE_FLAG) { \
cerr << "Warning: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": " \
<< #EXPR << " is " \
<< (EXPR) << endl << flush; \
(* debug_cout_ptr) << "Warning: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": " \
<< #EXPR << " is " \
<< (EXPR) << endl << flush; \
} \
} while (0)
#define DEBUG_MSG(module, priority, MESSAGE)\
{\
using namespace std;\
if (RUBY_DEBUG) {\
if (g_debug_ptr->validDebug(module, priority)) {\
(* debug_cout_ptr) << "Debug: in fn "\
<< __PRETTY_FUNCTION__\
<< " in " << __FILE__ << ":"\
<< __LINE__ << ": "\
<< (MESSAGE) << endl << flush;\
}\
}\
}
#define DEBUG_MSG(module, priority, MESSAGE) do { \
using namespace std; \
if (RUBY_DEBUG) { \
if (g_debug_ptr->validDebug(module, priority)) { \
(* debug_cout_ptr) << "Debug: in fn " \
<< __PRETTY_FUNCTION__ \
<< " in " << __FILE__ << ":" \
<< __LINE__ << ": " \
<< (MESSAGE) << endl << flush; \
} \
} \
} while (0)
#define DEBUG_EXPR(module, priority, EXPR)\
{\
using namespace std;\
if (RUBY_DEBUG) {\
if (g_debug_ptr->validDebug(module, priority)) {\
(* debug_cout_ptr) << "Debug: in fn "\
<< __PRETTY_FUNCTION__\
<< " in " << __FILE__ << ":"\
<< __LINE__ << ": "\
<< #EXPR << " is "\
<< (EXPR) << endl << flush;\
}\
}\
}
#define DEBUG_EXPR(module, priority, EXPR) do { \
using namespace std; \
if (RUBY_DEBUG) { \
if (g_debug_ptr->validDebug(module, priority)) { \
(* debug_cout_ptr) << "Debug: in fn " \
<< __PRETTY_FUNCTION__ \
<< " in " << __FILE__ << ":" \
<< __LINE__ << ": " \
<< #EXPR << " is " \
<< (EXPR) << endl << flush; \
} \
} \
} while (0)
#define DEBUG_NEWLINE(module, priority)\
{\
using namespace std;\
if (RUBY_DEBUG) {\
if (g_debug_ptr->validDebug(module, priority)) {\
(* debug_cout_ptr) << endl << flush;\
}\
}\
}
#define DEBUG_NEWLINE(module, priority) do { \
using namespace std; \
if (RUBY_DEBUG) { \
if (g_debug_ptr->validDebug(module, priority)) { \
(* debug_cout_ptr) << endl << flush; \
} \
} \
} while (0)
#define DEBUG_SLICC(priority, LINE, MESSAGE)\
{\
using namespace std;\
if (RUBY_DEBUG) {\
if (g_debug_ptr->validDebug(SLICC_COMP, priority)) {\
(* debug_cout_ptr) << (LINE) << (MESSAGE) << endl << flush;\
}\
}\
}
#define DEBUG_SLICC(priority, LINE, MESSAGE) do { \
using namespace std; \
if (RUBY_DEBUG) { \
if (g_debug_ptr->validDebug(SLICC_COMP, priority)) { \
(* debug_cout_ptr) << (LINE) << (MESSAGE) << endl << flush; \
} \
} \
} while (0)
#define DEBUG_OUT( rest... ) \
{\
using namespace std;\
if (RUBY_DEBUG) {\
cout << "Debug: in fn "\
<< __PRETTY_FUNCTION__\
<< " in " << __FILE__ << ":"\
<< __LINE__ << ": "; \
g_debug_ptr->debugMsg(rest); \
}\
}
#define DEBUG_OUT(rest... ) do { \
using namespace std; \
if (RUBY_DEBUG) { \
cout << "Debug: in fn " \
<< __PRETTY_FUNCTION__ \
<< " in " << __FILE__ << ":" \
<< __LINE__ << ": "; \
g_debug_ptr->debugMsg(rest); \
} \
} while (0)
#define ERROR_OUT( rest... ) \
{\
using namespace std;\
if (ERROR_MESSAGE_FLAG) {\
cout << "error: in fn "\
<< __PRETTY_FUNCTION__ << " in "\
<< __FILE__ << ":"\
<< __LINE__ << ": ";\
g_debug_ptr->debugMsg(rest); \
}\
}
#define ERROR_OUT( rest... ) do { \
using namespace std; \
if (ERROR_MESSAGE_FLAG) { \
cout << "error: in fn " \
<< __PRETTY_FUNCTION__ << " in " \
<< __FILE__ << ":" \
<< __LINE__ << ": "; \
g_debug_ptr->debugMsg(rest); \
} \
} while (0)
#endif //DEBUG_H
#endif // __MEM_RUBY_COMMON_DEBUG_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,6 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cassert>
#include "mem/ruby/common/Driver.hh"
Driver::Driver()
@@ -37,3 +38,40 @@ Driver::Driver()
Driver::~Driver()
{
}
integer_t
Driver::getInstructionCount(int procID) const
{
return 1;
}
integer_t
Driver::getCycleCount(int procID) const
{
return 1;
}
void
Driver::addThreadDependency(int procID, int requestor_thread,
int conflict_thread) const
{
assert(0);
}
void
Driver::printDebug()
{}
integer_t
Driver::readPhysicalMemory(int procID, physical_address_t address, int len)
{
assert(0);
return 0;
}
void
Driver::writePhysicalMemory(int procID, physical_address_t address,
integer_t value, int len)
{
assert(0);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,58 +26,40 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
* Description:
*
*/
#ifndef __MEM_RUBY_COMMON_DRIVER_HH__
#define __MEM_RUBY_COMMON_DRIVER_HH__
#ifndef DRIVER_H
#define DRIVER_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
class Driver
{
public:
Driver();
virtual ~Driver() = 0;
class Driver {
public:
// Constructors
Driver();
// Public Methods
virtual void get_network_config() {}
virtual void dmaHitCallback() {};
virtual void hitCallback(int64_t id) = 0; // Called by sequencer
virtual void go() = 0;
virtual integer_t getInstructionCount(int procID) const;
virtual integer_t getCycleCount(int procID) const;
virtual void addThreadDependency(int procID, int requestor_thread,
int conflict_thread) const;
virtual void printDebug(); //called by Sequencer
// Destructor
virtual ~Driver() = 0;
virtual void printStats(ostream& out) const = 0;
virtual void clearStats() = 0;
// Public Methods
virtual void get_network_config() {}
virtual void dmaHitCallback() {};
virtual void hitCallback(int64_t id) = 0; // Called by sequencer
virtual void go() = 0;
virtual integer_t getInstructionCount(int procID) const { return 1; }
virtual integer_t getCycleCount(int procID) const { return 1; }
virtual void addThreadDependency(int procID, int requestor_thread, int conflict_thread) const { assert(0);}
virtual void printDebug(){} //called by Sequencer
virtual void printStats(ostream& out) const = 0;
virtual void clearStats() = 0;
virtual void printConfig(ostream& out) const = 0;
virtual integer_t readPhysicalMemory(int procID, physical_address_t address,
int len ){ ASSERT(0); return 0; }
virtual void writePhysicalMemory( int procID, physical_address_t address,
integer_t value, int len ){ ASSERT(0); }
protected:
// accessible by subclasses
private:
// inaccessible by subclasses
virtual void printConfig(ostream& out) const = 0;
virtual integer_t readPhysicalMemory(int procID, physical_address_t addr,
int len);
virtual void writePhysicalMemory(int procID, physical_address_t addr,
integer_t value, int len);
};
#endif //DRIVER_H
#endif //__MEM_RUBY_COMMON_DRIVER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,42 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
* */
#ifndef GLOBAL_H
#define GLOBAL_H
/*
#ifdef SINGLE_LEVEL_CACHE
const bool TWO_LEVEL_CACHE = false;
#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // currently all protocols require L1s == nodes
#define L1D_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // "
#define L2_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // "
#define L2_CACHE_VARIABLE m_L1Cache_cacheMemory_vec
#else
const bool TWO_LEVEL_CACHE = true;
#ifdef IS_CMP
#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_L1IcacheMemory_vec[m_version]
#define L1D_CACHE_MEMBER_VARIABLE m_L1Cache_L1DcacheMemory_vec[m_version]
#define L2_CACHE_MEMBER_VARIABLE m_L2Cache_L2cacheMemory_vec[m_version]
#define L2_CACHE_VARIABLE m_L2Cache_L2cacheMemory_vec
#else // not IS_CMP
#define L1I_CACHE_MEMBER_VARIABLE m_L1Cache_L1IcacheMemory_vec[m_version] // currently all protocols require L1s == nodes
#define L1D_CACHE_MEMBER_VARIABLE m_L1Cache_L1DcacheMemory_vec[m_version] // "
// #define L2_CACHE_MEMBER_VARIABLE m_L1Cache_L2cacheMemory_vec[m_version] // old exclusive caches don't support L2s != nodes
#define L2_CACHE_MEMBER_VARIABLE m_L1Cache_cacheMemory_vec[m_version] // old exclusive caches don't support L2s != nodes
#define L2_CACHE_VARIABLE m_L1Cache_L2cacheMemory_vec
#endif // IS_CMP
#endif //SINGLE_LEVEL_CACHE
#define DIRECTORY_MEMBER_VARIABLE m_Directory_directory_vec[m_version]
#define TBE_TABLE_MEMBER_VARIABLE m_L1Cache_TBEs_vec[m_version]
*/
#ifndef __MEM_RUBY_COMMON_GLOBAL_HH__
#define __MEM_RUBY_COMMON_GLOBAL_HH__
// external includes for all classes
#include "mem/ruby/common/TypeDefines.hh"
@@ -85,13 +50,12 @@ extern RubySystem* g_system_ptr;
class Debug;
extern Debug* g_debug_ptr;
// FIXME: this is required by the contructor of Directory_Entry.hh. It can't go
// into slicc_util.hh because it opens a can of ugly worms
// FIXME: this is required by the contructor of Directory_Entry.hh.
// It can't go into slicc_util.hh because it opens a can of ugly worms
extern inline int max_tokens()
{
return 1024;
}
#endif //GLOBAL_H
#endif // __MEM_RUBY_COMMON_GLOBAL_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,164 +26,169 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#include <cmath>
#include <iomanip>
#include "base/intmath.hh"
#include "mem/ruby/common/Histogram.hh"
using namespace std;
Histogram::Histogram(int binsize, int bins)
{
m_binsize = binsize;
m_bins = bins;
clear();
m_binsize = binsize;
m_bins = bins;
clear();
}
Histogram::~Histogram()
{
}
void Histogram::clear(int binsize, int bins)
void
Histogram::clear(int binsize, int bins)
{
m_binsize = binsize;
clear(bins);
m_binsize = binsize;
clear(bins);
}
void Histogram::clear(int bins)
void
Histogram::clear(int bins)
{
m_bins = bins;
m_largest_bin = 0;
m_max = 0;
m_data.setSize(m_bins);
for (int i = 0; i < m_bins; i++) {
m_data[i] = 0;
}
m_count = 0;
m_max = 0;
m_sumSamples = 0;
m_sumSquaredSamples = 0;
}
void Histogram::add(int64 value)
{
assert(value >= 0);
m_max = max(m_max, value);
m_count++;
m_sumSamples += value;
m_sumSquaredSamples += (value*value);
int index;
if (m_binsize == -1) {
// This is a log base 2 histogram
if (value == 0) {
index = 0;
} else {
index = int(log(double(value))/log(2.0))+1;
if (index >= m_data.size()) {
index = m_data.size()-1;
}
}
} else {
// This is a linear histogram
while (m_max >= (m_bins * m_binsize)) {
for (int i = 0; i < m_bins/2; i++) {
m_data[i] = m_data[i*2] + m_data[i*2 + 1];
}
for (int i = m_bins/2; i < m_bins; i++) {
m_bins = bins;
m_largest_bin = 0;
m_max = 0;
m_data.setSize(m_bins);
for (int i = 0; i < m_bins; i++) {
m_data[i] = 0;
}
m_binsize *= 2;
}
index = value/m_binsize;
}
assert(index >= 0);
m_data[index]++;
m_largest_bin = max(m_largest_bin, index);
m_count = 0;
m_max = 0;
m_sumSamples = 0;
m_sumSquaredSamples = 0;
}
void Histogram::add(const Histogram& hist)
void
Histogram::add(int64 value)
{
assert(hist.getBins() == m_bins);
assert(hist.getBinSize() == -1); // assume log histogram
assert(m_binsize == -1);
assert(value >= 0);
m_max = max(m_max, value);
m_count++;
for (int j = 0; j < hist.getData(0); j++) {
add(0);
}
m_sumSamples += value;
m_sumSquaredSamples += (value*value);
for (int i = 1; i < m_bins; i++) {
for (int j = 0; j < hist.getData(i); j++) {
add(1<<(i-1)); // account for the + 1 index
int index;
if (m_binsize == -1) {
// This is a log base 2 histogram
if (value == 0) {
index = 0;
} else {
index = floorLog2(value) + 1;
if (index >= m_data.size()) {
index = m_data.size() - 1;
}
}
} else {
// This is a linear histogram
while (m_max >= (m_bins * m_binsize)) {
for (int i = 0; i < m_bins/2; i++) {
m_data[i] = m_data[i*2] + m_data[i*2 + 1];
}
for (int i = m_bins/2; i < m_bins; i++) {
m_data[i] = 0;
}
m_binsize *= 2;
}
index = value/m_binsize;
}
}
assert(index >= 0);
m_data[index]++;
m_largest_bin = max(m_largest_bin, index);
}
void
Histogram::add(const Histogram& hist)
{
assert(hist.getBins() == m_bins);
assert(hist.getBinSize() == -1); // assume log histogram
assert(m_binsize == -1);
for (int j = 0; j < hist.getData(0); j++) {
add(0);
}
for (int i = 1; i < m_bins; i++) {
for (int j = 0; j < hist.getData(i); j++) {
add(1<<(i-1)); // account for the + 1 index
}
}
}
// Computation of standard deviation of samples a1, a2, ... aN
// variance = [SUM {ai^2} - (SUM {ai})^2/N]/(N-1)
// std deviation equals square root of variance
double Histogram::getStandardDeviation() const
double
Histogram::getStandardDeviation() const
{
double variance;
if(m_count > 1){
variance = (double)(m_sumSquaredSamples - m_sumSamples*m_sumSamples/m_count)/(m_count - 1);
} else {
return 0;
}
return sqrt(variance);
if (m_count <= 1)
return 0.0;
double variance =
(double)(m_sumSquaredSamples - m_sumSamples * m_sumSamples / m_count)
/ (m_count - 1);
return sqrt(variance);
}
void Histogram::print(ostream& out) const
void
Histogram::print(ostream& out) const
{
printWithMultiplier(out, 1.0);
printWithMultiplier(out, 1.0);
}
void Histogram::printPercent(ostream& out) const
void
Histogram::printPercent(ostream& out) const
{
if (m_count == 0) {
printWithMultiplier(out, 0.0);
} else {
printWithMultiplier(out, 100.0/double(m_count));
}
}
void Histogram::printWithMultiplier(ostream& out, double multiplier) const
{
if (m_binsize == -1) {
out << "[binsize: log2 ";
} else {
out << "[binsize: " << m_binsize << " ";
}
out << "max: " << m_max << " ";
out << "count: " << m_count << " ";
// out << "total: " << m_sumSamples << " ";
if (m_count == 0) {
out << "average: NaN |";
out << "standard deviation: NaN |";
} else {
out << "average: " << setw(5) << ((double) m_sumSamples)/m_count << " | ";
out << "standard deviation: " << getStandardDeviation() << " |";
}
for (int i = 0; i < m_bins && i <= m_largest_bin; i++) {
if (multiplier == 1.0) {
out << " " << m_data[i];
if (m_count == 0) {
printWithMultiplier(out, 0.0);
} else {
out << " " << double(m_data[i]) * multiplier;
printWithMultiplier(out, 100.0 / double(m_count));
}
}
out << " ]";
}
bool node_less_then_eq(const Histogram* n1, const Histogram* n2)
void
Histogram::printWithMultiplier(ostream& out, double multiplier) const
{
return (n1->size() > n2->size());
if (m_binsize == -1) {
out << "[binsize: log2 ";
} else {
out << "[binsize: " << m_binsize << " ";
}
out << "max: " << m_max << " ";
out << "count: " << m_count << " ";
// out << "total: " << m_sumSamples << " ";
if (m_count == 0) {
out << "average: NaN |";
out << "standard deviation: NaN |";
} else {
out << "average: " << setw(5) << ((double) m_sumSamples)/m_count
<< " | ";
out << "standard deviation: " << getStandardDeviation() << " |";
}
for (int i = 0; i < m_bins && i <= m_largest_bin; i++) {
if (multiplier == 1.0) {
out << " " << m_data[i];
} else {
out << " " << double(m_data[i]) * multiplier;
}
}
out << " ]";
}
bool
node_less_then_eq(const Histogram* n1, const Histogram* n2)
{
return (n1->size() > n2->size());
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,80 +26,57 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
* Description: The histogram class implements a simple histogram
*
*/
#ifndef HISTOGRAM_H
#define HISTOGRAM_H
#ifndef __MEM_RUBY_COMMON_HISTOGRAM_HH__
#define __MEM_RUBY_COMMON_HISTOGRAM_HH__
#include <iostream>
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
class Histogram {
public:
// Constructors
Histogram(int binsize = 1, int bins = 50);
class Histogram
{
public:
Histogram(int binsize = 1, int bins = 50);
~Histogram();
// Destructor
~Histogram();
void add(int64 value);
void add(const Histogram& hist);
void clear() { clear(m_bins); }
void clear(int bins);
void clear(int binsize, int bins);
int64 size() const { return m_count; }
int getBins() const { return m_bins; }
int getBinSize() const { return m_binsize; }
int64 getTotal() const { return m_sumSamples; }
int64 getData(int index) const { return m_data[index]; }
// Public Methods
void printWithMultiplier(std::ostream& out, double multiplier) const;
void printPercent(std::ostream& out) const;
void print(std::ostream& out) const;
void add(int64 value);
void add(const Histogram& hist);
void clear() { clear(m_bins); }
void clear(int bins);
void clear(int binsize, int bins);
int64 size() const { return m_count; }
int getBins() const { return m_bins; }
int getBinSize() const { return m_binsize; }
int64 getTotal() const { return m_sumSamples; }
int64 getData(int index) const { return m_data[index]; }
void printWithMultiplier(std::ostream& out, double multiplier) const;
void printPercent(std::ostream& out) const;
void print(std::ostream& out) const;
private:
// Private Methods
Vector<int64> m_data;
int64 m_max; // the maximum value seen so far
int64 m_count; // the number of elements added
int m_binsize; // the size of each bucket
int m_bins; // the number of buckets
int m_largest_bin; // the largest bin used
// Private copy constructor and assignment operator
// Histogram(const Histogram& obj);
// Histogram& operator=(const Histogram& obj);
int64 m_sumSamples; // the sum of all samples
int64 m_sumSquaredSamples; // the sum of the square of all samples
// Data Members (m_ prefix)
Vector<int64> m_data;
int64 m_max; // the maximum value seen so far
int64 m_count; // the number of elements added
int m_binsize; // the size of each bucket
int m_bins; // the number of buckets
int m_largest_bin; // the largest bin used
int64 m_sumSamples; // the sum of all samples
int64 m_sumSquaredSamples; // the sum of the square of all samples
double getStandardDeviation() const;
double getStandardDeviation() const;
};
bool node_less_then_eq(const Histogram* n1, const Histogram* n2);
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const Histogram& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const Histogram& obj)
inline std::ostream&
operator<<(std::ostream& out, const Histogram& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //HISTOGRAM_H
#endif // __MEM_RUBY_COMMON_HISTOGRAM_HH__

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#include "mem/ruby/slicc_interface/Message.hh"

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,15 +26,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NetDest.C
*
* Description: See NetDest.hh
*
* $Id$
*
*/
#include "mem/ruby/common/NetDest.hh"
#include "mem/protocol/Protocol.hh"
@@ -44,216 +34,242 @@ NetDest::NetDest()
setSize();
}
void NetDest::add(MachineID newElement)
void
NetDest::add(MachineID newElement)
{
m_bits[vecIndex(newElement)].add(bitIndex(newElement.num));
m_bits[vecIndex(newElement)].add(bitIndex(newElement.num));
}
void NetDest::addNetDest(const NetDest& netDest)
void
NetDest::addNetDest(const NetDest& netDest)
{
assert(m_bits.size() == netDest.getSize());
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].addSet(netDest.m_bits[i]);
}
assert(m_bits.size() == netDest.getSize());
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].addSet(netDest.m_bits[i]);
}
}
void NetDest::addRandom()
void
NetDest::addRandom()
{
int i = random()%m_bits.size();
m_bits[i].addRandom();
int i = random()%m_bits.size();
m_bits[i].addRandom();
}
void NetDest::setNetDest(MachineType machine, const Set& set)
void
NetDest::setNetDest(MachineType machine, const Set& set)
{
// assure that there is only one set of destinations for this machine
assert(MachineType_base_level((MachineType)(machine+1)) - MachineType_base_level(machine) == 1);
m_bits[MachineType_base_level(machine)] = set;
// assure that there is only one set of destinations for this machine
assert(MachineType_base_level((MachineType)(machine + 1)) -
MachineType_base_level(machine) == 1);
m_bits[MachineType_base_level(machine)] = set;
}
void NetDest::remove(MachineID oldElement)
void
NetDest::remove(MachineID oldElement)
{
m_bits[vecIndex(oldElement)].remove(bitIndex(oldElement.num));
m_bits[vecIndex(oldElement)].remove(bitIndex(oldElement.num));
}
void NetDest::removeNetDest(const NetDest& netDest)
void
NetDest::removeNetDest(const NetDest& netDest)
{
assert(m_bits.size() == netDest.getSize());
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].removeSet(netDest.m_bits[i]);
}
assert(m_bits.size() == netDest.getSize());
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].removeSet(netDest.m_bits[i]);
}
}
void NetDest::clear()
void
NetDest::clear()
{
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].clear();
}
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].clear();
}
}
void NetDest::broadcast()
void
NetDest::broadcast()
{
for (MachineType machine = MachineType_FIRST; machine < MachineType_NUM; ++machine) {
broadcast(machine);
}
for (MachineType machine = MachineType_FIRST;
machine < MachineType_NUM; ++machine) {
broadcast(machine);
}
}
void NetDest::broadcast(MachineType machineType) {
for (int i = 0; i < MachineType_base_count(machineType); i++) {
MachineID mach = {machineType, i};
add(mach);
}
void
NetDest::broadcast(MachineType machineType)
{
for (int i = 0; i < MachineType_base_count(machineType); i++) {
MachineID mach = {machineType, i};
add(mach);
}
}
//For Princeton Network
Vector<NodeID> NetDest::getAllDest() {
Vector<NodeID> dest;
dest.clear();
for (int i=0; i<m_bits.size(); i++) {
for (int j=0; j<m_bits[i].getSize(); j++) {
if (m_bits[i].isElement(j)) {
dest.insertAtBottom((NodeID) (MachineType_base_number((MachineType) i) + j));
}
}
Vector<NodeID>
NetDest::getAllDest()
{
Vector<NodeID> dest;
dest.clear();
for (int i = 0; i < m_bits.size(); i++) {
for (int j = 0; j < m_bits[i].getSize(); j++) {
if (m_bits[i].isElement(j)) {
int id = MachineType_base_number((MachineType)i) + j;
dest.insertAtBottom((NodeID)id);
}
}
return dest;
}
int NetDest::count() const
{
int counter = 0;
for (int i=0; i<m_bits.size(); i++) {
counter += m_bits[i].count();
}
return counter;
}
NodeID NetDest::elementAt(MachineID index) {
return m_bits[vecIndex(index)].elementAt(bitIndex(index.num));
}
MachineID NetDest::smallestElement() const
{
assert(count() > 0);
for (int i=0; i<m_bits.size(); i++) {
for (int j=0; j<m_bits[i].getSize(); j++) {
if (m_bits[i].isElement(j)) {
MachineID mach = {MachineType_from_base_level(i), j};
return mach;
}
}
}
ERROR_MSG("No smallest element of an empty set.");
return dest;
}
MachineID NetDest::smallestElement(MachineType machine) const
int
NetDest::count() const
{
for (int j = 0; j < m_bits[MachineType_base_level(machine)].getSize(); j++) {
if (m_bits[MachineType_base_level(machine)].isElement(j)) {
MachineID mach = {machine, j};
return mach;
int counter = 0;
for (int i = 0; i < m_bits.size(); i++) {
counter += m_bits[i].count();
}
}
ERROR_MSG("No smallest element of given MachineType.");
return counter;
}
NodeID
NetDest::elementAt(MachineID index)
{
return m_bits[vecIndex(index)].elementAt(bitIndex(index.num));
}
MachineID
NetDest::smallestElement() const
{
assert(count() > 0);
for (int i = 0; i < m_bits.size(); i++) {
for (int j = 0; j < m_bits[i].getSize(); j++) {
if (m_bits[i].isElement(j)) {
MachineID mach = {MachineType_from_base_level(i), j};
return mach;
}
}
}
ERROR_MSG("No smallest element of an empty set.");
}
MachineID
NetDest::smallestElement(MachineType machine) const
{
int size = m_bits[MachineType_base_level(machine)].getSize();
for (int j = 0; j < size; j++) {
if (m_bits[MachineType_base_level(machine)].isElement(j)) {
MachineID mach = {machine, j};
return mach;
}
}
ERROR_MSG("No smallest element of given MachineType.");
}
// Returns true iff all bits are set
bool NetDest::isBroadcast() const
bool
NetDest::isBroadcast() const
{
for (int i=0; i<m_bits.size(); i++) {
if (!m_bits[i].isBroadcast()) {
return false;
for (int i = 0; i < m_bits.size(); i++) {
if (!m_bits[i].isBroadcast()) {
return false;
}
}
}
return true;
return true;
}
// Returns true iff no bits are set
bool NetDest::isEmpty() const
bool
NetDest::isEmpty() const
{
for (int i=0; i<m_bits.size(); i++) {
if (!m_bits[i].isEmpty()) {
return false;
for (int i = 0; i < m_bits.size(); i++) {
if (!m_bits[i].isEmpty()) {
return false;
}
}
}
return true;
return true;
}
// returns the logical OR of "this" set and orNetDest
NetDest NetDest::OR(const NetDest& orNetDest) const
NetDest
NetDest::OR(const NetDest& orNetDest) const
{
assert(m_bits.size() == orNetDest.getSize());
NetDest result;
for (int i=0; i<m_bits.size(); i++) {
result.m_bits[i] = m_bits[i].OR(orNetDest.m_bits[i]);
}
return result;
assert(m_bits.size() == orNetDest.getSize());
NetDest result;
for (int i = 0; i < m_bits.size(); i++) {
result.m_bits[i] = m_bits[i].OR(orNetDest.m_bits[i]);
}
return result;
}
// returns the logical AND of "this" set and andNetDest
NetDest NetDest::AND(const NetDest& andNetDest) const
NetDest
NetDest::AND(const NetDest& andNetDest) const
{
assert(m_bits.size() == andNetDest.getSize());
NetDest result;
for (int i=0; i<m_bits.size(); i++) {
result.m_bits[i] = m_bits[i].AND(andNetDest.m_bits[i]);
}
return result;
assert(m_bits.size() == andNetDest.getSize());
NetDest result;
for (int i = 0; i < m_bits.size(); i++) {
result.m_bits[i] = m_bits[i].AND(andNetDest.m_bits[i]);
}
return result;
}
// Returns true if the intersection of the two sets is non-empty
bool NetDest::intersectionIsNotEmpty(const NetDest& other_netDest) const
bool
NetDest::intersectionIsNotEmpty(const NetDest& other_netDest) const
{
assert(m_bits.size() == other_netDest.getSize());
for (int i=0; i<m_bits.size(); i++) {
if (m_bits[i].intersectionIsNotEmpty(other_netDest.m_bits[i])) {
return true;
assert(m_bits.size() == other_netDest.getSize());
for (int i = 0; i < m_bits.size(); i++) {
if (m_bits[i].intersectionIsNotEmpty(other_netDest.m_bits[i])) {
return true;
}
}
}
return false;
return false;
}
bool NetDest::isSuperset(const NetDest& test) const
bool
NetDest::isSuperset(const NetDest& test) const
{
assert(m_bits.size() == test.getSize());
assert(m_bits.size() == test.getSize());
for (int i=0; i<m_bits.size(); i++) {
if (!m_bits[i].isSuperset(test.m_bits[i])) {
return false;
for (int i = 0; i < m_bits.size(); i++) {
if (!m_bits[i].isSuperset(test.m_bits[i])) {
return false;
}
}
}
return true;
return true;
}
bool NetDest::isElement(MachineID element) const
bool
NetDest::isElement(MachineID element) const
{
return ((m_bits[vecIndex(element)])).isElement(bitIndex(element.num));
return ((m_bits[vecIndex(element)])).isElement(bitIndex(element.num));
}
void NetDest::setSize()
void
NetDest::setSize()
{
m_bits.setSize(MachineType_base_level(MachineType_NUM));
assert(m_bits.size() == MachineType_NUM);
m_bits.setSize(MachineType_base_level(MachineType_NUM));
assert(m_bits.size() == MachineType_NUM);
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].setSize(MachineType_base_count((MachineType)i));
}
}
void NetDest::print(ostream& out) const
{
out << "[NetDest (" << m_bits.size() << ") ";
for (int i=0; i<m_bits.size(); i++) {
for (int j=0; j<m_bits[i].getSize(); j++) {
out << (bool) m_bits[i].isElement(j) << " ";
for (int i = 0; i < m_bits.size(); i++) {
m_bits[i].setSize(MachineType_base_count((MachineType)i));
}
out << " - ";
}
out << "]";
}
void
NetDest::print(ostream& out) const
{
out << "[NetDest (" << m_bits.size() << ") ";
for (int i = 0; i < m_bits.size(); i++) {
for (int j = 0; j < m_bits[i].getSize(); j++) {
out << (bool) m_bits[i].isElement(j) << " ";
}
out << " - ";
}
out << "]";
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,22 +26,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Set.hh
*
* Description:
*
* $Id$
*
*/
// NetDest specifies the network destination of a NetworkMessage
// This is backward compatible with the Set class that was previously
// used to specify network destinations.
// NetDest supports both node networks and component networks
#ifndef NETDEST_H
#define NETDEST_H
#ifndef __MEM_RUBY_COMMON_NETDEST_HH__
#define __MEM_RUBY_COMMON_NETDEST_HH__
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
@@ -51,94 +41,92 @@
#include "mem/ruby/common/Set.hh"
#include "mem/protocol/MachineType.hh"
class Set;
class NetDest
{
public:
// Constructors
// creates and empty set
NetDest();
explicit NetDest(int bit_size);
class NetDest {
public:
// Constructors
// creates and empty set
NetDest();
explicit NetDest(int bit_size);
NetDest& operator=(const Set& obj);
NetDest& operator=(const Set& obj);
~NetDest()
{
DEBUG_MSG(MEMORY_COMP, LowPrio, "NetDest Destructor");
}
// Destructor
~NetDest() { DEBUG_MSG(MEMORY_COMP, LowPrio, "NetDest Destructor"); }
void add(MachineID newElement);
void addNetDest(const NetDest& netDest);
void addRandom();
void setNetDest(MachineType machine, const Set& set);
void remove(MachineID oldElement);
void removeNetDest(const NetDest& netDest);
void clear();
void broadcast();
void broadcast(MachineType machine);
int count() const;
bool isEqual(const NetDest& netDest);
// Public Methods
void add(MachineID newElement);
void addNetDest(const NetDest& netDest);
void addRandom();
void setNetDest(MachineType machine, const Set& set);
void remove(MachineID oldElement);
void removeNetDest(const NetDest& netDest);
void clear();
void broadcast();
void broadcast(MachineType machine);
int count() const;
bool isEqual(const NetDest& netDest);
// return the logical OR of this netDest and orNetDest
NetDest OR(const NetDest& orNetDest) const;
NetDest OR(const NetDest& orNetDest) const; // return the logical OR of this netDest and orNetDest
NetDest AND(const NetDest& andNetDest) const; // return the logical AND of this netDest and andNetDest
// return the logical AND of this netDest and andNetDest
NetDest AND(const NetDest& andNetDest) const;
// Returns true if the intersection of the two netDests is non-empty
bool intersectionIsNotEmpty(const NetDest& other_netDest) const;
// Returns true if the intersection of the two netDests is non-empty
bool intersectionIsNotEmpty(const NetDest& other_netDest) const;
// Returns true if the intersection of the two netDests is empty
bool intersectionIsEmpty(const NetDest& other_netDest) const;
// Returns true if the intersection of the two netDests is empty
bool intersectionIsEmpty(const NetDest& other_netDest) const;
bool isSuperset(const NetDest& test) const;
bool isSubset(const NetDest& test) const { return test.isSuperset(*this); }
bool isElement(MachineID element) const;
bool isBroadcast() const;
bool isEmpty() const;
bool isSuperset(const NetDest& test) const;
bool isSubset(const NetDest& test) const { return test.isSuperset(*this); }
bool isElement(MachineID element) const;
bool isBroadcast() const;
bool isEmpty() const;
//For Princeton Network
Vector<NodeID> getAllDest();
// For Princeton Network
Vector<NodeID> getAllDest();
MachineID smallestElement() const;
MachineID smallestElement(MachineType machine) const;
MachineID smallestElement() const;
MachineID smallestElement(MachineType machine) const;
void setSize();
int getSize() const { return m_bits.size(); }
void setSize();
int getSize() const { return m_bits.size(); }
// get element for a index
NodeID elementAt(MachineID index);
// get element for a index
NodeID elementAt(MachineID index);
void print(ostream& out) const;
void print(ostream& out) const;
private:
private:
// returns a value >= MachineType_base_level("this machine")
// and < MachineType_base_level("next highest machine")
int
vecIndex(MachineID m) const
{
int vec_index = MachineType_base_level(m.type);
assert(vec_index < m_bits.size());
return vec_index;
}
// Private Methods
// returns a value >= MachineType_base_level("this machine") and < MachineType_base_level("next highest machine")
int vecIndex(MachineID m) const {
int vec_index = MachineType_base_level(m.type);
assert(vec_index < m_bits.size());
return vec_index;
}
NodeID bitIndex(NodeID index) const {
return index;
}
// Data Members (m_ prefix)
Vector < Set > m_bits; // a Vector of bit vectors - i.e. Sets
NodeID
bitIndex(NodeID index) const
{
return index;
}
Vector <Set> m_bits; // a Vector of bit vectors - i.e. Sets
};
// Output operator declaration
ostream& operator<<(ostream& out, const NetDest& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const NetDest& obj)
inline ostream&
operator<<(ostream& out, const NetDest& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //NETDEST_H
#endif // __MEM_RUBY_COMMON_NETDEST_HH__

View File

@@ -41,7 +41,6 @@ Source('Debug.cc')
Source('Driver.cc')
Source('Global.cc')
Source('Histogram.cc')
Source('Message.cc')
Source('NetDest.cc')
Source('Set.cc', Werror=False)
Source('SubBlock.cc')

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,44 +26,44 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#include "mem/ruby/common/SubBlock.hh"
SubBlock::SubBlock(const Address& addr, int size)
{
m_address = addr;
setSize(size);
for(int i=0; i<size; i++) {
setByte(i, 0);
}
m_address = addr;
setSize(size);
for (int i = 0; i < size; i++) {
setByte(i, 0);
}
}
void SubBlock::internalMergeFrom(const DataBlock& data)
void
SubBlock::internalMergeFrom(const DataBlock& data)
{
int size = getSize();
assert(size > 0);
int offset = m_address.getOffset();
for(int i=0; i<size; i++) {
this->setByte(i, data.getByte(offset+i));
}
int size = getSize();
assert(size > 0);
int offset = m_address.getOffset();
for (int i = 0; i < size; i++) {
this->setByte(i, data.getByte(offset + i));
}
}
void SubBlock::internalMergeTo(DataBlock& data) const
void
SubBlock::internalMergeTo(DataBlock& data) const
{
int size = getSize();
assert(size > 0);
int offset = m_address.getOffset();
for(int i=0; i<size; i++) {
data.setByte(offset+i, this->getByte(i)); // This will detect crossing a cache line boundary
}
int size = getSize();
assert(size > 0);
int offset = m_address.getOffset();
for (int i = 0; i < size; i++) {
// This will detect crossing a cache line boundary
data.setByte(offset + i, this->getByte(i));
}
}
void SubBlock::print(ostream& out) const
void
SubBlock::print(ostream& out) const
{
out << "[" << m_address << ", " << getSize() << ", " << m_data << "]";
out << "[" << m_address << ", " << getSize() << ", " << m_data << "]";
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,69 +26,55 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#ifndef __MEM_RUBY_COMMON_SUBBLOCK_HH__
#define __MEM_RUBY_COMMON_SUBBLOCK_HH__
#ifndef SubBlock_H
#define SubBlock_H
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/DataBlock.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Global.hh"
class SubBlock {
public:
// Constructors
SubBlock() { }
SubBlock(const Address& addr, int size);
class SubBlock
{
public:
SubBlock() { }
SubBlock(const Address& addr, int size);
~SubBlock() { }
// Destructor
~SubBlock() { }
const Address& getAddress() const { return m_address; }
void setAddress(const Address& addr) { m_address = addr; }
// Public Methods
const Address& getAddress() const { return m_address; }
void setAddress(const Address& addr) { m_address = addr; }
int getSize() const { return m_data.size(); }
void setSize(int size) { m_data.setSize(size); }
uint8 getByte(int offset) const { return m_data[offset]; }
void setByte(int offset, uint8 data) { m_data[offset] = data; }
int getSize() const { return m_data.size(); }
void setSize(int size) { m_data.setSize(size); }
uint8 getByte(int offset) const { return m_data[offset]; }
void setByte(int offset, uint8 data) { m_data[offset] = data; }
// Shorthands
uint8 readByte() const { return getByte(0); }
void writeByte(uint8 data) { setByte(0, data); }
// Shorthands
uint8 readByte() const { return getByte(0); }
void writeByte(uint8 data) { setByte(0, data); }
// Merging to and from DataBlocks - We only need to worry about
// updates when we are using DataBlocks
void mergeTo(DataBlock& data) const { internalMergeTo(data); }
void mergeFrom(const DataBlock& data) { internalMergeFrom(data); }
// Merging to and from DataBlocks - We only need to worry about
// updates when we are using DataBlocks
void mergeTo(DataBlock& data) const { internalMergeTo(data); }
void mergeFrom(const DataBlock& data) { internalMergeFrom(data); }
void print(ostream& out) const;
void print(ostream& out) const;
private:
private:
void internalMergeTo(DataBlock& data) const;
void internalMergeFrom(const DataBlock& data);
void internalMergeTo(DataBlock& data) const;
void internalMergeFrom(const DataBlock& data);
// Data Members (m_ prefix)
Address m_address;
Vector<uint8_t> m_data;
// Data Members (m_ prefix)
Address m_address;
Vector<uint8_t> m_data;
};
// Output operator declaration
ostream& operator<<(ostream& out, const SubBlock& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const SubBlock& obj)
inline ostream&
operator<<(ostream& out, const SubBlock& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //SubBlock_H
#endif // __MEM_RUBY_COMMON_SUBBLOCK_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,16 +26,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/eventqueue/RubyEventQueueNode.hh"
// Class public method definitions
#include "mem/ruby/system/System.hh"
RubyEventQueue::RubyEventQueue(EventQueue* eventq, Tick _clock)
: EventManager(eventq), m_clock(_clock)
@@ -47,28 +40,28 @@ RubyEventQueue::~RubyEventQueue()
{
}
void RubyEventQueue::scheduleEvent(Consumer* consumer, Time timeDelta)
{
scheduleEventAbsolute(consumer, timeDelta + getTime());
}
void RubyEventQueue::scheduleEventAbsolute(Consumer* consumer, Time timeAbs)
void
RubyEventQueue::scheduleEvent(Consumer* consumer, Time timeDelta)
{
// Check to see if this is a redundant wakeup
ASSERT(consumer != NULL);
if (!consumer->alreadyScheduled(timeAbs)) {
// This wakeup is not redundant
RubyEventQueueNode *thisNode = new RubyEventQueueNode(consumer, this);
assert(timeAbs > getTime());
schedule(thisNode, (timeAbs * m_clock));
consumer->insertScheduledWakeupTime(timeAbs);
}
scheduleEventAbsolute(consumer, timeDelta + getTime());
}
// Class private method definitions
void
RubyEventQueue::scheduleEventAbsolute(Consumer* consumer, Time timeAbs)
{
// Check to see if this is a redundant wakeup
ASSERT(consumer != NULL);
if (!consumer->alreadyScheduled(timeAbs)) {
// This wakeup is not redundant
RubyEventQueueNode *thisNode = new RubyEventQueueNode(consumer, this);
assert(timeAbs > getTime());
schedule(thisNode, (timeAbs * m_clock));
consumer->insertScheduledWakeupTime(timeAbs);
}
}
void
RubyEventQueue::print(ostream& out) const
{
out << "[Event Queue:]";
out << "[Event Queue:]";
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,9 +27,7 @@
*/
/*
* $Id$
*
* Description: The RubyEventQueue class implements an event queue which
* The RubyEventQueue class implements an event queue which
* can be trigger events, allowing our simulation to be event driven.
*
* Currently, the only event we support is a Consumer being signaled
@@ -56,8 +53,8 @@
*
*/
#ifndef RUBYEVENTQUEUE_H
#define RUBYEVENTQUEUE_H
#ifndef __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUE_HH__
#define __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUE_HH__
#include <iostream>
@@ -70,48 +67,36 @@ class Consumer;
template <class TYPE> class PrioHeap;
class RubyEventQueueNode;
class RubyEventQueue : public EventManager {
public:
// Constructors
RubyEventQueue(EventQueue* eventq, Tick _clock);
class RubyEventQueue : public EventManager
{
public:
RubyEventQueue(EventQueue* eventq, Tick _clock);
~RubyEventQueue();
// Destructor
~RubyEventQueue();
Time getTime() const { return curTick/m_clock; }
Tick getClock() const { return m_clock; }
void scheduleEvent(Consumer* consumer, Time timeDelta);
void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
void print(std::ostream& out) const;
// Public Methods
void triggerEvents(Time t) { assert(0); }
void triggerAllEvents() { assert(0); }
Time getTime() const { return curTick/m_clock; }
Tick getClock() const { return m_clock; }
void scheduleEvent(Consumer* consumer, Time timeDelta);
void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
void print(std::ostream& out) const;
private:
// Private copy constructor and assignment operator
RubyEventQueue(const RubyEventQueue& obj);
RubyEventQueue& operator=(const RubyEventQueue& obj);
void triggerEvents(Time t) { assert(0); }
void triggerAllEvents() { assert(0); }
// Private Methods
private:
// Private copy constructor and assignment operator
RubyEventQueue(const RubyEventQueue& obj);
RubyEventQueue& operator=(const RubyEventQueue& obj);
// Data Members (m_ prefix)
Tick m_clock;
// Data Members (m_ prefix)
Tick m_clock;
};
// Output operator declaration
inline extern
std::ostream& operator<<(std::ostream& out, const RubyEventQueue& obj);
// ******************* Definitions *******************
// Output operator definition
inline extern
std::ostream& operator<<(std::ostream& out, const RubyEventQueue& obj)
inline std::ostream&
operator<<(std::ostream& out, const RubyEventQueue& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //EVENTQUEUE_H
#endif // __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,20 +26,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#include "mem/ruby/eventqueue/RubyEventQueueNode.hh"
void RubyEventQueueNode::print(std::ostream& out) const
void
RubyEventQueueNode::print(std::ostream& out) const
{
out << "[";
if (m_consumer_ptr != NULL) {
out << " Consumer=" << m_consumer_ptr;
} else {
out << " Consumer=NULL";
}
out << "]";
out << "[";
if (m_consumer_ptr != NULL) {
out << " Consumer=" << m_consumer_ptr;
} else {
out << " Consumer=NULL";
}
out << "]";
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,65 +26,44 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
#ifndef RUBYEVENTQUEUENODE_H
#define RUBYEVENTQUEUENODE_H
#ifndef __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUENODE_HH__
#define __MEM_RUBY_EVENTQUEUE_RUBYEVENTQUEUENODE_HH__
#include <iostream>
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "sim/eventq.hh"
#include "mem/ruby/common/Consumer.hh"
//class Consumer;
class RubyEventQueueNode : public Event {
public:
// Constructors
RubyEventQueueNode(Consumer* _consumer, RubyEventQueue* _eventq)
: m_consumer_ptr(_consumer), m_eventq_ptr(_eventq)
{
setFlags(AutoDelete);
}
class RubyEventQueueNode : public Event
{
public:
RubyEventQueueNode(Consumer* _consumer, RubyEventQueue* _eventq)
: m_consumer_ptr(_consumer), m_eventq_ptr(_eventq)
{
setFlags(AutoDelete);
}
// Destructor
//~RubyEventQueueNode();
void print(std::ostream& out) const;
virtual void
process()
{
m_consumer_ptr->wakeup();
m_consumer_ptr->removeScheduledWakeupTime(m_eventq_ptr->getTime());
}
virtual const char *description() const { return "Ruby Event"; }
// Public Methods
void print(std::ostream& out) const;
virtual void process()
{
m_consumer_ptr->wakeup();
m_consumer_ptr->removeScheduledWakeupTime(m_eventq_ptr->getTime());
}
virtual const char *description() const { return "Ruby Event"; }
private:
// Private Methods
// Default copy constructor and assignment operator
// RubyEventQueueNode(const RubyEventQueueNode& obj);
// Data Members (m_ prefix)
Consumer* m_consumer_ptr;
RubyEventQueue* m_eventq_ptr;
private:
Consumer* m_consumer_ptr;
RubyEventQueue* m_eventq_ptr;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const RubyEventQueueNode& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const RubyEventQueueNode& obj)
inline std::ostream&
operator<<(std::ostream& out, const RubyEventQueueNode& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //EVENTQUEUENODE_H
#endif // __MEM_RUBY_EVENTQUEUE_EVENTQUEUENODE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,44 +26,32 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* AbstractBloomFilter.hh
*
* Description:
*
*
*/
#ifndef __MEM_RUBY_FILTERS_ABSTRACTBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_ABSTRACTBLOOMFILTER_HH__
#ifndef ABSTRACT_BLOOM_FILTER_H
#define ABSTRACT_BLOOM_FILTER_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
class AbstractBloomFilter {
public:
class AbstractBloomFilter
{
public:
virtual ~AbstractBloomFilter() {};
virtual void clear() = 0;
virtual void increment(const Address& addr) = 0;
virtual void decrement(const Address& addr) = 0;
virtual void merge(AbstractBloomFilter * other_filter) = 0;
virtual void set(const Address& addr) = 0;
virtual void unset(const Address& addr) = 0;
virtual ~AbstractBloomFilter() {};
virtual void clear() = 0;
virtual void increment(const Address& addr) = 0;
virtual void decrement(const Address& addr) = 0;
virtual void merge(AbstractBloomFilter * other_filter) = 0;
virtual void set(const Address& addr) = 0;
virtual void unset(const Address& addr) = 0;
virtual bool isSet(const Address& addr) = 0;
virtual int getCount(const Address& addr) = 0;
virtual int getTotalCount() = 0;
virtual bool isSet(const Address& addr) = 0;
virtual int getCount(const Address& addr) = 0;
virtual int getTotalCount() = 0;
virtual void print(ostream& out) const = 0;
virtual int getIndex(const Address& addr) = 0;
virtual int readBit(const int index) = 0;
virtual void writeBit(const int index, const int value) = 0;
private:
virtual void print(ostream& out) const = 0;
virtual int getIndex(const Address& addr) = 0;
virtual int readBit(const int index) = 0;
virtual void writeBit(const int index, const int value) = 0;
};
#endif
#endif // __MEM_RUBY_FILTERS_ABSTRACTBLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,121 +26,133 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* BlockBloomFilter.cc
*
* Description:
*
*
*/
#include "mem/ruby/filters/BlockBloomFilter.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/BlockBloomFilter.hh"
BlockBloomFilter::BlockBloomFilter(string str)
{
string tail(str);
string head = string_split(tail, '_');
string tail(str);
string head = string_split(tail, '_');
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_filter.setSize(m_filter_size);
m_filter.setSize(m_filter_size);
clear();
clear();
}
BlockBloomFilter::~BlockBloomFilter(){
}
void BlockBloomFilter::clear()
BlockBloomFilter::~BlockBloomFilter()
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
void BlockBloomFilter::increment(const Address& addr)
void
BlockBloomFilter::clear()
{
// Not used
}
void BlockBloomFilter::decrement(const Address& addr)
{
// Not used
}
void BlockBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void BlockBloomFilter::set(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 1;
}
void BlockBloomFilter::unset(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 0;
}
bool BlockBloomFilter::isSet(const Address& addr)
{
int i = get_index(addr);
return (m_filter[i]);
}
int BlockBloomFilter::getCount(const Address& addr)
{
return m_filter[get_index(addr)];
}
int BlockBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
if (m_filter[i]) {
count++;
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
return count;
}
int BlockBloomFilter::getIndex(const Address& addr)
void
BlockBloomFilter::increment(const Address& addr)
{
return get_index(addr);
// Not used
}
void BlockBloomFilter::print(ostream& out) const
void
BlockBloomFilter::decrement(const Address& addr)
{
// Not used
}
void
BlockBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void
BlockBloomFilter::set(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 1;
}
void
BlockBloomFilter::unset(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 0;
}
bool
BlockBloomFilter::isSet(const Address& addr)
{
int i = get_index(addr);
return (m_filter[i]);
}
int
BlockBloomFilter::getCount(const Address& addr)
{
return m_filter[get_index(addr)];
}
int
BlockBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
if (m_filter[i]) {
count++;
}
}
return count;
}
int
BlockBloomFilter::getIndex(const Address& addr)
{
return get_index(addr);
}
void
BlockBloomFilter::print(ostream& out) const
{
}
int BlockBloomFilter::readBit(const int index) {
return m_filter[index];
}
void BlockBloomFilter::writeBit(const int index, const int value) {
m_filter[index] = value;
}
int BlockBloomFilter::get_index(const Address& addr)
int
BlockBloomFilter::readBit(const int index)
{
// Pull out some bit field ==> B1
// Pull out additional bits, not the same as B1 ==> B2
// XOR B1 and B2 to get hash index
physical_address_t block_bits = addr.bitSelect( RubySystem::getBlockSizeBits(), 2*RubySystem::getBlockSizeBits() - 1);
int offset = 5;
physical_address_t other_bits = addr.bitSelect( 2*RubySystem::getBlockSizeBits() + offset, 2*RubySystem::getBlockSizeBits() + offset + m_filter_size_bits - 1);
int index = block_bits ^ other_bits;
assert(index < m_filter_size);
return index;
return m_filter[index];
}
void
BlockBloomFilter::writeBit(const int index, const int value)
{
m_filter[index] = value;
}
int
BlockBloomFilter::get_index(const Address& addr)
{
// Pull out some bit field ==> B1
// Pull out additional bits, not the same as B1 ==> B2
// XOR B1 and B2 to get hash index
physical_address_t block_bits =
addr.bitSelect(RubySystem::getBlockSizeBits(),
2 * RubySystem::getBlockSizeBits() - 1);
int offset = 5;
physical_address_t other_bits =
addr.bitSelect(2 * RubySystem::getBlockSizeBits() + offset,
2 * RubySystem::getBlockSizeBits() + offset +
m_filter_size_bits - 1);
int index = block_bits ^ other_bits;
assert(index < m_filter_size);
return index;
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,55 +26,45 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* BlockBloomFilter.hh
*
* Description:
*
*
*/
#ifndef BLOCK_BLOOM_FILTER_H
#define BLOCK_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_BLOCKBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_BLOCKBLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
class BlockBloomFilter : public AbstractBloomFilter {
public:
class BlockBloomFilter : public AbstractBloomFilter
{
public:
BlockBloomFilter(string config);
~BlockBloomFilter();
~BlockBloomFilter();
BlockBloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
void print(ostream& out) const;
void print(ostream& out) const;
private:
int get_index(const Address& addr);
private:
Vector<int> m_filter;
int m_filter_size;
int m_filter_size_bits;
int get_index(const Address& addr);
Vector<int> m_filter;
int m_filter_size;
int m_filter_size_bits;
int m_count_bits;
int m_count;
int m_count_bits;
int m_count;
};
#endif
#endif // __MEM_RUBY_FILTERS_BLOCKBLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,206 +26,220 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* BulkBloomFilter.cc
*
* Description:
*
*
*/
#include "mem/ruby/filters/BulkBloomFilter.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/BulkBloomFilter.hh"
BulkBloomFilter::BulkBloomFilter(string str)
{
string tail(str);
string head = string_split(tail, '_');
string tail(str);
string head = string_split(tail, '_');
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
// split the filter bits in half, c0 and c1
m_sector_bits = m_filter_size_bits - 1;
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
// split the filter bits in half, c0 and c1
m_sector_bits = m_filter_size_bits - 1;
m_temp_filter.setSize(m_filter_size);
m_filter.setSize(m_filter_size);
clear();
m_temp_filter.setSize(m_filter_size);
m_filter.setSize(m_filter_size);
clear();
// clear temp filter
for(int i=0; i < m_filter_size; ++i){
m_temp_filter[i] = 0;
}
}
BulkBloomFilter::~BulkBloomFilter(){
}
void BulkBloomFilter::clear()
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
void BulkBloomFilter::increment(const Address& addr)
{
// Not used
}
void BulkBloomFilter::decrement(const Address& addr)
{
// Not used
}
void BulkBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void BulkBloomFilter::set(const Address& addr)
{
// c0 contains the cache index bits
int set_bits = m_sector_bits;
int block_bits = RubySystem::getBlockSizeBits();
int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
// c1 contains the lower m_sector_bits permuted bits
//Address permuted_bits = permute(addr);
//int c1 = permuted_bits.bitSelect(0, set_bits-1);
int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
//ASSERT(c0 < (m_filter_size/2));
//ASSERT(c0 + (m_filter_size/2) < m_filter_size);
//ASSERT(c1 < (m_filter_size/2));
// set v0 bit
m_filter[c0 + (m_filter_size/2)] = 1;
// set v1 bit
m_filter[c1] = 1;
}
void BulkBloomFilter::unset(const Address& addr)
{
// not used
}
bool BulkBloomFilter::isSet(const Address& addr)
{
// c0 contains the cache index bits
int set_bits = m_sector_bits;
int block_bits = RubySystem::getBlockSizeBits();
int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
// c1 contains the lower 10 permuted bits
//Address permuted_bits = permute(addr);
//int c1 = permuted_bits.bitSelect(0, set_bits-1);
int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
//ASSERT(c0 < (m_filter_size/2));
//ASSERT(c0 + (m_filter_size/2) < m_filter_size);
//ASSERT(c1 < (m_filter_size/2));
// set v0 bit
m_temp_filter[c0 + (m_filter_size/2)] = 1;
// set v1 bit
m_temp_filter[c1] = 1;
// perform filter intersection. If any c part is 0, no possibility of address being in signature.
// get first c intersection part
bool zero = false;
for(int i=0; i < m_filter_size/2; ++i){
// get intersection of signatures
m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
zero = zero || m_temp_filter[i];
}
zero = !zero;
if(zero){
// one section is zero, no possiblility of address in signature
// reset bits we just set
m_temp_filter[c0 + (m_filter_size/2)] = 0;
m_temp_filter[c1] = 0;
return false;
}
// check second section
zero = false;
for(int i=m_filter_size/2; i < m_filter_size; ++i){
// get intersection of signatures
m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
zero = zero || m_temp_filter[i];
}
zero = !zero;
if(zero){
// one section is zero, no possiblility of address in signature
m_temp_filter[c0 + (m_filter_size/2)] = 0;
m_temp_filter[c1] = 0;
return false;
}
// one section has at least one bit set
m_temp_filter[c0 + (m_filter_size/2)] = 0;
m_temp_filter[c1] = 0;
return true;
}
int BulkBloomFilter::getCount(const Address& addr)
{
// not used
return 0;
}
int BulkBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
if (m_filter[i]) {
count++;
// clear temp filter
for (int i = 0; i < m_filter_size; ++i) {
m_temp_filter[i] = 0;
}
}
return count;
}
int BulkBloomFilter::getIndex(const Address& addr)
{
return get_index(addr);
}
int BulkBloomFilter::readBit(const int index) {
return 0;
// TODO
}
void BulkBloomFilter::writeBit(const int index, const int value) {
// TODO
}
void BulkBloomFilter::print(ostream& out) const
BulkBloomFilter::~BulkBloomFilter()
{
}
int BulkBloomFilter::get_index(const Address& addr)
void
BulkBloomFilter::clear()
{
return addr.bitSelect( RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_filter_size_bits - 1);
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
Address BulkBloomFilter::permute(const Address & addr){
// permutes the original address bits according to Table 5
int block_offset = RubySystem::getBlockSizeBits();
physical_address_t part1 = addr.bitSelect( block_offset, block_offset + 6 );
physical_address_t part2 = addr.bitSelect( block_offset + 9, block_offset + 9 );
physical_address_t part3 = addr.bitSelect( block_offset + 11, block_offset + 11 );
physical_address_t part4 = addr.bitSelect( block_offset + 17, block_offset + 17 );
physical_address_t part5 = addr.bitSelect( block_offset + 7, block_offset + 8 );
physical_address_t part6 = addr.bitSelect( block_offset + 10, block_offset + 10 );
physical_address_t part7 = addr.bitSelect( block_offset + 12, block_offset + 12 );
physical_address_t part8 = addr.bitSelect( block_offset + 13, block_offset + 13 );
physical_address_t part9 = addr.bitSelect( block_offset + 15, block_offset + 16 );
physical_address_t part10 = addr.bitSelect( block_offset + 18, block_offset + 20 );
physical_address_t part11 = addr.bitSelect( block_offset + 14, block_offset + 14 );
physical_address_t result = (part1 << 14 ) | (part2 << 13 ) | (part3 << 12 ) | (part4 << 11 ) | (part5 << 9) | (part6 << 8)
| (part7 << 7) | (part8 << 6) | (part9 << 4) | (part10 << 1) | (part11);
// assume 32 bit addresses (both virtual and physical)
// select the remaining high-order 11 bits
physical_address_t remaining_bits = (addr.bitSelect( block_offset + 21, 31 )) << 21;
result = result | remaining_bits;
return Address(result);
void
BulkBloomFilter::increment(const Address& addr)
{
// Not used
}
void
BulkBloomFilter::decrement(const Address& addr)
{
// Not used
}
void
BulkBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void
BulkBloomFilter::set(const Address& addr)
{
// c0 contains the cache index bits
int set_bits = m_sector_bits;
int block_bits = RubySystem::getBlockSizeBits();
int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
// c1 contains the lower m_sector_bits permuted bits
//Address permuted_bits = permute(addr);
//int c1 = permuted_bits.bitSelect(0, set_bits-1);
int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
//ASSERT(c0 < (m_filter_size/2));
//ASSERT(c0 + (m_filter_size/2) < m_filter_size);
//ASSERT(c1 < (m_filter_size/2));
// set v0 bit
m_filter[c0 + (m_filter_size/2)] = 1;
// set v1 bit
m_filter[c1] = 1;
}
void
BulkBloomFilter::unset(const Address& addr)
{
// not used
}
bool
BulkBloomFilter::isSet(const Address& addr)
{
// c0 contains the cache index bits
int set_bits = m_sector_bits;
int block_bits = RubySystem::getBlockSizeBits();
int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
// c1 contains the lower 10 permuted bits
//Address permuted_bits = permute(addr);
//int c1 = permuted_bits.bitSelect(0, set_bits-1);
int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
//ASSERT(c0 < (m_filter_size/2));
//ASSERT(c0 + (m_filter_size/2) < m_filter_size);
//ASSERT(c1 < (m_filter_size/2));
// set v0 bit
m_temp_filter[c0 + (m_filter_size/2)] = 1;
// set v1 bit
m_temp_filter[c1] = 1;
// perform filter intersection. If any c part is 0, no possibility
// of address being in signature. get first c intersection part
bool zero = false;
for (int i = 0; i < m_filter_size/2; ++i){
// get intersection of signatures
m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
zero = zero || m_temp_filter[i];
}
zero = !zero;
if (zero) {
// one section is zero, no possiblility of address in signature
// reset bits we just set
m_temp_filter[c0 + (m_filter_size / 2)] = 0;
m_temp_filter[c1] = 0;
return false;
}
// check second section
zero = false;
for(int i = m_filter_size / 2; i < m_filter_size; ++i) {
// get intersection of signatures
m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
zero = zero || m_temp_filter[i];
}
zero = !zero;
if (zero) {
// one section is zero, no possiblility of address in signature
m_temp_filter[c0 + (m_filter_size / 2)] = 0;
m_temp_filter[c1] = 0;
return false;
}
// one section has at least one bit set
m_temp_filter[c0 + (m_filter_size / 2)] = 0;
m_temp_filter[c1] = 0;
return true;
}
int
BulkBloomFilter::getCount(const Address& addr)
{
// not used
return 0;
}
int
BulkBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
if (m_filter[i]) {
count++;
}
}
return count;
}
int
BulkBloomFilter::getIndex(const Address& addr)
{
return get_index(addr);
}
int
BulkBloomFilter::readBit(const int index)
{
return 0;
// TODO
}
void
BulkBloomFilter::writeBit(const int index, const int value)
{
// TODO
}
void
BulkBloomFilter::print(ostream& out) const
{
}
int
BulkBloomFilter::get_index(const Address& addr)
{
return addr.bitSelect(RubySystem::getBlockSizeBits(),
RubySystem::getBlockSizeBits() +
m_filter_size_bits - 1);
}
Address
BulkBloomFilter::permute(const Address & addr)
{
// permutes the original address bits according to Table 5
int block_offset = RubySystem::getBlockSizeBits();
physical_address_t part1 = addr.bitSelect(block_offset, block_offset + 6),
part2 = addr.bitSelect(block_offset + 9, block_offset + 9),
part3 = addr.bitSelect(block_offset + 11, block_offset + 11),
part4 = addr.bitSelect(block_offset + 17, block_offset + 17),
part5 = addr.bitSelect(block_offset + 7, block_offset + 8),
part6 = addr.bitSelect(block_offset + 10, block_offset + 10),
part7 = addr.bitSelect(block_offset + 12, block_offset + 12),
part8 = addr.bitSelect(block_offset + 13, block_offset + 13),
part9 = addr.bitSelect(block_offset + 15, block_offset + 16),
part10 = addr.bitSelect(block_offset + 18, block_offset + 20),
part11 = addr.bitSelect(block_offset + 14, block_offset + 14);
physical_address_t result =
(part1 << 14) | (part2 << 13) | (part3 << 12) | (part4 << 11) |
(part5 << 9) | (part6 << 8) | (part7 << 7) | (part8 << 6) |
(part9 << 4) | (part10 << 1) | (part11);
// assume 32 bit addresses (both virtual and physical)
// select the remaining high-order 11 bits
physical_address_t remaining_bits =
addr.bitSelect(block_offset + 21, 31) << 21;
result = result | remaining_bits;
return Address(result);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,60 +26,50 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* BulkBloomFilter.hh
*
* Description:
*
*
*/
#ifndef BULK_BLOOM_FILTER_H
#define BULK_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_BULKBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_BULKBLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
class BulkBloomFilter : public AbstractBloomFilter {
public:
class BulkBloomFilter : public AbstractBloomFilter
{
public:
BulkBloomFilter(string config);
~BulkBloomFilter();
~BulkBloomFilter();
BulkBloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
void print(ostream& out) const;
void print(ostream& out) const;
private:
int get_index(const Address& addr);
Address permute(const Address & addr);
private:
Vector<int> m_filter;
Vector<int> m_temp_filter;
int get_index(const Address& addr);
Address permute(const Address & addr);
int m_filter_size;
int m_filter_size_bits;
Vector<int> m_filter;
Vector<int> m_temp_filter;
int m_sector_bits;
int m_filter_size;
int m_filter_size_bits;
int m_sector_bits;
int m_count_bits;
int m_count;
int m_count_bits;
int m_count;
};
#endif
#endif // __MEM_RUBY_FILTERS_BULKBLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,123 +26,122 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* GenericBloomFilter.hh
*
* Description:
*
*
*/
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/GenericBloomFilter.hh"
#include "mem/ruby/filters/LSB_CountingBloomFilter.hh"
#include "mem/ruby/filters/NonCountingBloomFilter.hh"
#include "mem/ruby/filters/BulkBloomFilter.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/BlockBloomFilter.hh"
#include "mem/ruby/filters/MultiGrainBloomFilter.hh"
#include "mem/ruby/filters/MultiBitSelBloomFilter.hh"
#include "mem/ruby/filters/BulkBloomFilter.hh"
#include "mem/ruby/filters/GenericBloomFilter.hh"
#include "mem/ruby/filters/H3BloomFilter.hh"
#include "mem/ruby/filters/LSB_CountingBloomFilter.hh"
#include "mem/ruby/filters/MultiBitSelBloomFilter.hh"
#include "mem/ruby/filters/MultiGrainBloomFilter.hh"
#include "mem/ruby/filters/NonCountingBloomFilter.hh"
GenericBloomFilter::GenericBloomFilter(string config)
{
string tail(config);
string head = string_split(tail,'_');
string tail(config);
string head = string_split(tail,'_');
if (head == "LSB_Counting" ) {
m_filter = new LSB_CountingBloomFilter(tail);
}
else if(head == "NonCounting" ) {
m_filter = new NonCountingBloomFilter(tail);
}
else if(head == "Bulk" ) {
m_filter = new BulkBloomFilter(tail);
}
else if(head == "Block") {
m_filter = new BlockBloomFilter(tail);
}
else if(head == "Multigrain"){
m_filter = new MultiGrainBloomFilter(tail);
}
else if(head == "MultiBitSel"){
m_filter = new MultiBitSelBloomFilter(tail);
}
else if(head == "H3"){
m_filter = new H3BloomFilter(tail);
}
else {
assert(0);
}
if (head == "LSB_Counting" ) {
m_filter = new LSB_CountingBloomFilter(tail);
} else if(head == "NonCounting" ) {
m_filter = new NonCountingBloomFilter(tail);
} else if(head == "Bulk" ) {
m_filter = new BulkBloomFilter(tail);
} else if(head == "Block") {
m_filter = new BlockBloomFilter(tail);
} else if(head == "Multigrain"){
m_filter = new MultiGrainBloomFilter(tail);
} else if(head == "MultiBitSel"){
m_filter = new MultiBitSelBloomFilter(tail);
} else if(head == "H3"){
m_filter = new H3BloomFilter(tail);
} else {
assert(0);
}
}
GenericBloomFilter::~GenericBloomFilter()
{
delete m_filter;
delete m_filter;
}
void GenericBloomFilter::clear()
void
GenericBloomFilter::clear()
{
m_filter->clear();
m_filter->clear();
}
void GenericBloomFilter::increment(const Address& addr)
void
GenericBloomFilter::increment(const Address& addr)
{
m_filter->increment(addr);
m_filter->increment(addr);
}
void GenericBloomFilter::decrement(const Address& addr)
void
GenericBloomFilter::decrement(const Address& addr)
{
m_filter->decrement(addr);
m_filter->decrement(addr);
}
void GenericBloomFilter::merge(GenericBloomFilter * other_filter)
void
GenericBloomFilter::merge(GenericBloomFilter * other_filter)
{
m_filter->merge(other_filter->getFilter());
m_filter->merge(other_filter->getFilter());
}
void GenericBloomFilter::set(const Address& addr)
void
GenericBloomFilter::set(const Address& addr)
{
m_filter->set(addr);
m_filter->set(addr);
}
void GenericBloomFilter::unset(const Address& addr)
void
GenericBloomFilter::unset(const Address& addr)
{
m_filter->unset(addr);
m_filter->unset(addr);
}
bool GenericBloomFilter::isSet(const Address& addr)
bool
GenericBloomFilter::isSet(const Address& addr)
{
return m_filter->isSet(addr);
return m_filter->isSet(addr);
}
int GenericBloomFilter::getCount(const Address& addr)
int
GenericBloomFilter::getCount(const Address& addr)
{
return m_filter->getCount(addr);
return m_filter->getCount(addr);
}
int GenericBloomFilter::getTotalCount()
int
GenericBloomFilter::getTotalCount()
{
return m_filter->getTotalCount();
return m_filter->getTotalCount();
}
int GenericBloomFilter::getIndex(const Address& addr)
int
GenericBloomFilter::getIndex(const Address& addr)
{
return m_filter->getIndex(addr);
return m_filter->getIndex(addr);
}
int GenericBloomFilter::readBit(const int index) {
return m_filter->readBit(index);
}
void GenericBloomFilter::writeBit(const int index, const int value) {
m_filter->writeBit(index, value);
}
void GenericBloomFilter::print(ostream& out) const
int
GenericBloomFilter::readBit(const int index)
{
return m_filter->print(out);
return m_filter->readBit(index);
}
void
GenericBloomFilter::writeBit(const int index, const int value)
{
m_filter->writeBit(index, value);
}
void
GenericBloomFilter::print(ostream& out) const
{
return m_filter->print(out);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,67 +26,54 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* GenericBloomFilter.hh
*
* Description:
*
*
*/
#ifndef GENERIC_BLOOM_FILTER_H
#define GENERIC_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_GENERICBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_GENERICBLOOMFILTER_HH__
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
class GenericBloomFilter {
public:
class GenericBloomFilter
{
public:
GenericBloomFilter(string config);
~GenericBloomFilter();
// Constructors
GenericBloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(GenericBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
AbstractBloomFilter *
getFilter()
{
return m_filter;
}
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(GenericBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
AbstractBloomFilter * getFilter(){
return m_filter;
}
bool isSet(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
void print(ostream& out) const;
void printConfig(ostream& out) { out << "GenericBloomFilter" << endl; }
void print(ostream& out) const;
void printConfig(ostream& out) { out << "GenericBloomFilter" << endl; }
// Destructor
~GenericBloomFilter();
private:
AbstractBloomFilter* m_filter;
private:
AbstractBloomFilter* m_filter;
};
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const GenericBloomFilter& obj)
inline ostream&
operator<<(ostream& out, const GenericBloomFilter& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif
#endif // __MEM_RUBY_FILTERS_GENERICBLOOMFILTER_HH__

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,76 +26,65 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* H3BloomFilter.hh
*
* Description:
*
*
*/
#ifndef H3_BLOOM_FILTER_H
#define H3_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_H3BLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_H3BLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
class H3BloomFilter : public AbstractBloomFilter {
public:
class H3BloomFilter : public AbstractBloomFilter
{
public:
H3BloomFilter(string config);
~H3BloomFilter();
~H3BloomFilter();
H3BloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
void print(ostream& out) const;
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
void print(ostream& out) const;
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
int
operator[](const int index) const
{
return this->m_filter[index];
}
int operator[](const int index) const{
return this->m_filter[index];
}
private:
int get_index(const Address& addr, int hashNumber);
private:
int hash_H3(uint64 value, int index);
int get_index(const Address& addr, int hashNumber);
Vector<int> m_filter;
int m_filter_size;
int m_num_hashes;
int m_filter_size_bits;
int hash_H3(uint64 value, int index);
int m_par_filter_size;
int m_par_filter_size_bits;
Vector<int> m_filter;
int m_filter_size;
int m_num_hashes;
int m_filter_size_bits;
int m_count_bits;
int m_count;
int m_par_filter_size;
int m_par_filter_size_bits;
int m_count_bits;
int m_count;
int primes_list[6];// = {9323,11279,10247,30637,25717,43711};
int mults_list[6]; //= {255,29,51,3,77,43};
int adds_list[6]; //= {841,627,1555,241,7777,65391};
bool isParallel;
int primes_list[6];// = {9323,11279,10247,30637,25717,43711};
int mults_list[6]; //= {255,29,51,3,77,43};
int adds_list[6]; //= {841,627,1555,241,7777,65391};
bool isParallel;
};
#endif
#endif // __MEM_RUBY_FILTERS_H3BLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,116 +26,126 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* LSB_CountingBloomFilter.cc
*
* Description:
*
*
*/
#include "mem/ruby/filters/LSB_CountingBloomFilter.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/LSB_CountingBloomFilter.hh"
LSB_CountingBloomFilter::LSB_CountingBloomFilter(string str)
{
string tail(str);
string head = string_split(tail, ':');
string tail(str);
string head = string_split(tail, ':');
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_count = atoi(tail.c_str());
m_count_bits = log_int(m_count);
m_count = atoi(tail.c_str());
m_count_bits = log_int(m_count);
m_filter.setSize(m_filter_size);
clear();
m_filter.setSize(m_filter_size);
clear();
}
LSB_CountingBloomFilter::~LSB_CountingBloomFilter(){
}
void LSB_CountingBloomFilter::clear()
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
void LSB_CountingBloomFilter::increment(const Address& addr)
{
int i = get_index(addr);
if (m_filter[i] < m_count);
m_filter[i] += 1;
}
void LSB_CountingBloomFilter::decrement(const Address& addr)
{
int i = get_index(addr);
if (m_filter[i] > 0)
m_filter[i] -= 1;
}
void LSB_CountingBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void LSB_CountingBloomFilter::set(const Address& addr)
{
// TODO
}
void LSB_CountingBloomFilter::unset(const Address& addr)
{
// TODO
}
bool LSB_CountingBloomFilter::isSet(const Address& addr)
{
// TODO
return false;
}
int LSB_CountingBloomFilter::getCount(const Address& addr)
{
return m_filter[get_index(addr)];
}
int LSB_CountingBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
return count;
}
int LSB_CountingBloomFilter::getIndex(const Address& addr)
{
return get_index(addr);
}
void LSB_CountingBloomFilter::print(ostream& out) const
LSB_CountingBloomFilter::~LSB_CountingBloomFilter()
{
}
int LSB_CountingBloomFilter::readBit(const int index) {
return 0;
// TODO
}
void LSB_CountingBloomFilter::writeBit(const int index, const int value) {
// TODO
}
int LSB_CountingBloomFilter::get_index(const Address& addr)
void
LSB_CountingBloomFilter::clear()
{
return addr.bitSelect( RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_filter_size_bits - 1);
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
void
LSB_CountingBloomFilter::increment(const Address& addr)
{
int i = get_index(addr);
if (m_filter[i] < m_count)
m_filter[i] += 1;
}
void
LSB_CountingBloomFilter::decrement(const Address& addr)
{
int i = get_index(addr);
if (m_filter[i] > 0)
m_filter[i] -= 1;
}
void
LSB_CountingBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void
LSB_CountingBloomFilter::set(const Address& addr)
{
// TODO
}
void
LSB_CountingBloomFilter::unset(const Address& addr)
{
// TODO
}
bool
LSB_CountingBloomFilter::isSet(const Address& addr)
{
// TODO
return false;
}
int
LSB_CountingBloomFilter::getCount(const Address& addr)
{
return m_filter[get_index(addr)];
}
int
LSB_CountingBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
return count;
}
int
LSB_CountingBloomFilter::getIndex(const Address& addr)
{
return get_index(addr);
}
void
LSB_CountingBloomFilter::print(ostream& out) const
{
}
int
LSB_CountingBloomFilter::readBit(const int index)
{
return 0;
// TODO
}
void
LSB_CountingBloomFilter::writeBit(const int index, const int value)
{
// TODO
}
int
LSB_CountingBloomFilter::get_index(const Address& addr)
{
return addr.bitSelect(RubySystem::getBlockSizeBits(),
RubySystem::getBlockSizeBits() +
m_filter_size_bits - 1);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,55 +26,45 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* LSB_CountingBloomFilter.hh
*
* Description:
*
*
*/
#ifndef LSB_COUNTING_BLOOM_FILTER_H
#define LSB_COUNTING_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_LSBCOUNTINGBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_LSBCOUNTINGBLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
class LSB_CountingBloomFilter : public AbstractBloomFilter {
public:
class LSB_CountingBloomFilter : public AbstractBloomFilter
{
public:
LSB_CountingBloomFilter(string config);
~LSB_CountingBloomFilter();
~LSB_CountingBloomFilter();
LSB_CountingBloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
void print(ostream& out) const;
void print(ostream& out) const;
private:
int get_index(const Address& addr);
private:
Vector<int> m_filter;
int m_filter_size;
int m_filter_size_bits;
int get_index(const Address& addr);
Vector<int> m_filter;
int m_filter_size;
int m_filter_size_bits;
int m_count_bits;
int m_count;
int m_count_bits;
int m_count;
};
#endif
#endif // __MEM_RUBY_FILTERS_LSBCOUNTINGBLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,165 +26,173 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NonCountingBloomFilter.cc
*
* Description:
*
*
*/
#include "mem/ruby/filters/MultiBitSelBloomFilter.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/MultiBitSelBloomFilter.hh"
MultiBitSelBloomFilter::MultiBitSelBloomFilter(string str)
{
string tail(str);
string head = string_split(tail, '_');
string tail(str);
string head = string_split(tail, '_');
// head contains filter size, tail contains bit offset from block number
m_filter_size = atoi(head.c_str());
// head contains filter size, tail contains bit offset from block number
m_filter_size = atoi(head.c_str());
head = string_split(tail, '_');
m_num_hashes = atoi(head.c_str());
head = string_split(tail, '_');
m_num_hashes = atoi(head.c_str());
head = string_split(tail, '_');
m_skip_bits = atoi(head.c_str());
head = string_split(tail, '_');
m_skip_bits = atoi(head.c_str());
if(tail == "Regular") {
isParallel = false;
} else if (tail == "Parallel") {
isParallel = true;
} else {
cout << "ERROR: Incorrect config string for MultiBitSel Bloom! :"
<< str << endl;
assert(0);
}
if(tail == "Regular") {
isParallel = false;
} else if (tail == "Parallel") {
isParallel = true;
} else {
cout << "ERROR: Incorrect config string for MultiBitSel Bloom! :" << str << endl;
m_filter_size_bits = log_int(m_filter_size);
m_par_filter_size = m_filter_size/m_num_hashes;
m_par_filter_size_bits = log_int(m_par_filter_size);
m_filter.setSize(m_filter_size);
clear();
}
MultiBitSelBloomFilter::~MultiBitSelBloomFilter()
{
}
void
MultiBitSelBloomFilter::clear()
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
void
MultiBitSelBloomFilter::increment(const Address& addr)
{
// Not used
}
void
MultiBitSelBloomFilter::decrement(const Address& addr)
{
// Not used
}
void
MultiBitSelBloomFilter::merge(AbstractBloomFilter *other_filter)
{
// assumes both filters are the same size!
MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
for(int i = 0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
void
MultiBitSelBloomFilter::set(const Address& addr)
{
for (int i = 0; i < m_num_hashes; i++) {
int idx = get_index(addr, i);
m_filter[idx] = 1;
}
}
void
MultiBitSelBloomFilter::unset(const Address& addr)
{
cout << "ERROR: Unset should never be called in a Bloom filter";
assert(0);
}
m_filter_size_bits = log_int(m_filter_size);
m_par_filter_size = m_filter_size/m_num_hashes;
m_par_filter_size_bits = log_int(m_par_filter_size);
m_filter.setSize(m_filter_size);
clear();
}
MultiBitSelBloomFilter::~MultiBitSelBloomFilter(){
}
void MultiBitSelBloomFilter::clear()
bool
MultiBitSelBloomFilter::isSet(const Address& addr)
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
bool res = true;
for (int i=0; i < m_num_hashes; i++) {
int idx = get_index(addr, i);
res = res && m_filter[idx];
}
return res;
}
void MultiBitSelBloomFilter::increment(const Address& addr)
int
MultiBitSelBloomFilter::getCount(const Address& addr)
{
// Not used
return isSet(addr)? 1: 0;
}
void MultiBitSelBloomFilter::decrement(const Address& addr)
int
MultiBitSelBloomFilter::getIndex(const Address& addr)
{
// Not used
return 0;
}
void MultiBitSelBloomFilter::merge(AbstractBloomFilter * other_filter){
// assumes both filters are the same size!
MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
for(int i=0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
void MultiBitSelBloomFilter::set(const Address& addr)
int
MultiBitSelBloomFilter::readBit(const int index)
{
for (int i = 0; i < m_num_hashes; i++) {
int idx = get_index(addr, i);
m_filter[idx] = 1;
//Profile hash value distribution
//g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); //gem5:Arka for decomissioning of log_tm
}
return 0;
}
void MultiBitSelBloomFilter::unset(const Address& addr)
{
cout << "ERROR: Unset should never be called in a Bloom filter";
assert(0);
}
bool MultiBitSelBloomFilter::isSet(const Address& addr)
{
bool res = true;
for (int i=0; i < m_num_hashes; i++) {
int idx = get_index(addr, i);
res = res && m_filter[idx];
}
return res;
}
int MultiBitSelBloomFilter::getCount(const Address& addr)
{
return isSet(addr)? 1: 0;
}
int MultiBitSelBloomFilter::getIndex(const Address& addr)
{
return 0;
}
int MultiBitSelBloomFilter::readBit(const int index) {
return 0;
}
void MultiBitSelBloomFilter::writeBit(const int index, const int value) {
}
int MultiBitSelBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
return count;
}
void MultiBitSelBloomFilter::print(ostream& out) const
void
MultiBitSelBloomFilter::writeBit(const int index, const int value)
{
}
int MultiBitSelBloomFilter::get_index(const Address& addr, int i)
int
MultiBitSelBloomFilter::getTotalCount()
{
// m_skip_bits is used to perform BitSelect after skipping some bits. Used to simulate BitSel hashing on larger than cache-line granularities
uint64 x = (addr.getLineAddress()) >> m_skip_bits;
int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
//36-bit addresses, 6-bit cache lines
int count = 0;
if(isParallel) {
return (y % m_par_filter_size) + i*m_par_filter_size;
} else {
return y % m_filter_size;
}
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
return count;
}
int MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits) {
uint64 mask = 1;
int result = 0;
int bit, i;
for(i = 0; i < numBits; i++) {
bit = (index + jump*i) % maxBits;
if (value & (mask << bit)) result += mask << i;
}
return result;
void
MultiBitSelBloomFilter::print(ostream& out) const
{
}
int
MultiBitSelBloomFilter::get_index(const Address& addr, int i)
{
// m_skip_bits is used to perform BitSelect after skipping some
// bits. Used to simulate BitSel hashing on larger than cache-line
// granularities
uint64 x = (addr.getLineAddress()) >> m_skip_bits;
int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
//36-bit addresses, 6-bit cache lines
if (isParallel) {
return (y % m_par_filter_size) + i*m_par_filter_size;
} else {
return y % m_filter_size;
}
}
int
MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump,
int maxBits, int numBits)
{
uint64 mask = 1;
int result = 0;
int bit, i;
for (i = 0; i < numBits; i++) {
bit = (index + jump*i) % maxBits;
if (value & (mask << bit)) result += mask << i;
}
return result;
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,70 +26,63 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* MultiBitSelBloomFilter.hh
*
* Description:
*
*
*/
#ifndef MULTIBITSEL_BLOOM_FILTER_H
#define MULTIBITSEL_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_MULTIBITSELBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_MULTIBITSELBLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
class MultiBitSelBloomFilter : public AbstractBloomFilter {
public:
class MultiBitSelBloomFilter : public AbstractBloomFilter
{
public:
MultiBitSelBloomFilter(string config);
~MultiBitSelBloomFilter();
~MultiBitSelBloomFilter();
MultiBitSelBloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
void print(ostream& out) const;
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
void print(ostream& out) const;
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
int
operator[](const int index) const
{
return this->m_filter[index];
}
int operator[](const int index) const{
return this->m_filter[index];
}
private:
int get_index(const Address& addr, int hashNumber);
private:
int hash_bitsel(uint64 value, int index, int jump, int maxBits,
int numBits);
int get_index(const Address& addr, int hashNumber);
Vector<int> m_filter;
int m_filter_size;
int m_num_hashes;
int m_filter_size_bits;
int m_skip_bits;
int hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits);
int m_par_filter_size;
int m_par_filter_size_bits;
Vector<int> m_filter;
int m_filter_size;
int m_num_hashes;
int m_filter_size_bits;
int m_skip_bits;
int m_par_filter_size;
int m_par_filter_size_bits;
int m_count_bits;
int m_count;
bool isParallel;
int m_count_bits;
int m_count;
bool isParallel;
};
#endif
#endif // __MEM_RUBY_FILTERS_MULTIBITSELBLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,142 +26,156 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* MultiGrainBloomFilter.cc
*
* Description:
*
*
*/
#include "mem/ruby/filters/MultiGrainBloomFilter.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/MultiGrainBloomFilter.hh"
MultiGrainBloomFilter::MultiGrainBloomFilter(string str)
{
string tail(str);
string tail(str);
// split into the 2 filter sizes
string head = string_split(tail, '_');
// split into the 2 filter sizes
string head = string_split(tail, '_');
// head contains size of 1st bloom filter, tail contains size of 2nd bloom filter
// head contains size of 1st bloom filter, tail contains size of
// 2nd bloom filter
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_filter_size = atoi(head.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_page_filter_size = atoi(tail.c_str());
m_page_filter_size_bits = log_int(m_page_filter_size);
m_page_filter_size = atoi(tail.c_str());
m_page_filter_size_bits = log_int(m_page_filter_size);
m_filter.setSize(m_filter_size);
m_page_filter.setSize(m_page_filter_size);
clear();
m_filter.setSize(m_filter_size);
m_page_filter.setSize(m_page_filter_size);
clear();
}
MultiGrainBloomFilter::~MultiGrainBloomFilter(){
}
void MultiGrainBloomFilter::clear()
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
for(int i=0; i < m_page_filter_size; ++i){
m_page_filter[i] = 0;
}
}
void MultiGrainBloomFilter::increment(const Address& addr)
{
// Not used
}
void MultiGrainBloomFilter::decrement(const Address& addr)
{
// Not used
}
void MultiGrainBloomFilter::merge(AbstractBloomFilter * other_filter)
{
// TODO
}
void MultiGrainBloomFilter::set(const Address& addr)
{
int i = get_block_index(addr);
assert(i < m_filter_size);
assert(get_page_index(addr) < m_page_filter_size);
m_filter[i] = 1;
m_page_filter[i] = 1;
}
void MultiGrainBloomFilter::unset(const Address& addr)
{
// not used
}
bool MultiGrainBloomFilter::isSet(const Address& addr)
{
int i = get_block_index(addr);
assert(i < m_filter_size);
assert(get_page_index(addr) < m_page_filter_size);
// we have to have both indices set
return (m_filter[i] && m_page_filter[i]);
}
int MultiGrainBloomFilter::getCount(const Address& addr)
{
// not used
return 0;
}
int MultiGrainBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
for(int i=0; i < m_page_filter_size; ++i){
count += m_page_filter[i] = 0;
}
return count;
}
int MultiGrainBloomFilter::getIndex(const Address& addr)
{
return 0;
// TODO
}
int MultiGrainBloomFilter::readBit(const int index) {
return 0;
// TODO
}
void MultiGrainBloomFilter::writeBit(const int index, const int value) {
// TODO
}
void MultiGrainBloomFilter::print(ostream& out) const
MultiGrainBloomFilter::~MultiGrainBloomFilter()
{
}
int MultiGrainBloomFilter::get_block_index(const Address& addr)
void
MultiGrainBloomFilter::clear()
{
// grap a chunk of bits after byte offset
return addr.bitSelect( RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_filter_size_bits - 1);
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
for(int i=0; i < m_page_filter_size; ++i){
m_page_filter[i] = 0;
}
}
int MultiGrainBloomFilter::get_page_index(const Address & addr)
void
MultiGrainBloomFilter::increment(const Address& addr)
{
// grap a chunk of bits after first chunk
return addr.bitSelect( RubySystem::getBlockSizeBits() + m_filter_size_bits - 1,
RubySystem::getBlockSizeBits() + m_filter_size_bits - 1 + m_page_filter_size_bits - 1);
// Not used
}
void
MultiGrainBloomFilter::decrement(const Address& addr)
{
// Not used
}
void
MultiGrainBloomFilter::merge(AbstractBloomFilter *other_filter)
{
// TODO
}
void
MultiGrainBloomFilter::set(const Address& addr)
{
int i = get_block_index(addr);
assert(i < m_filter_size);
assert(get_page_index(addr) < m_page_filter_size);
m_filter[i] = 1;
m_page_filter[i] = 1;
}
void
MultiGrainBloomFilter::unset(const Address& addr)
{
// not used
}
bool
MultiGrainBloomFilter::isSet(const Address& addr)
{
int i = get_block_index(addr);
assert(i < m_filter_size);
assert(get_page_index(addr) < m_page_filter_size);
// we have to have both indices set
return (m_filter[i] && m_page_filter[i]);
}
int
MultiGrainBloomFilter::getCount(const Address& addr)
{
// not used
return 0;
}
int
MultiGrainBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
for(int i=0; i < m_page_filter_size; ++i) {
count += m_page_filter[i] = 0;
}
return count;
}
int
MultiGrainBloomFilter::getIndex(const Address& addr)
{
return 0;
// TODO
}
int
MultiGrainBloomFilter::readBit(const int index)
{
return 0;
// TODO
}
void
MultiGrainBloomFilter::writeBit(const int index, const int value)
{
// TODO
}
void
MultiGrainBloomFilter::print(ostream& out) const
{
}
int
MultiGrainBloomFilter::get_block_index(const Address& addr)
{
// grap a chunk of bits after byte offset
return addr.bitSelect(RubySystem::getBlockSizeBits(),
RubySystem::getBlockSizeBits() +
m_filter_size_bits - 1);
}
int
MultiGrainBloomFilter::get_page_index(const Address & addr)
{
int bits = RubySystem::getBlockSizeBits() + m_filter_size_bits - 1;
// grap a chunk of bits after first chunk
return addr.bitSelect(bits, bits + m_page_filter_size_bits - 1);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,61 +26,51 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* MultiGrainBloomFilter.hh
*
* Description:
*
*
*/
#ifndef MULTIGRAIN_BLOOM_FILTER_H
#define MULTIGRAIN_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_MULTIGRAINBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_MULTIGRAINBLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
class MultiGrainBloomFilter : public AbstractBloomFilter {
public:
class MultiGrainBloomFilter : public AbstractBloomFilter
{
public:
MultiGrainBloomFilter(string str);
~MultiGrainBloomFilter();
~MultiGrainBloomFilter();
MultiGrainBloomFilter(string str);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
void print(ostream& out) const;
void print(ostream& out) const;
private:
int get_block_index(const Address& addr);
int get_page_index(const Address & addr);
private:
// The block filter
Vector<int> m_filter;
int m_filter_size;
int m_filter_size_bits;
// The page number filter
Vector<int> m_page_filter;
int m_page_filter_size;
int m_page_filter_size_bits;
int get_block_index(const Address& addr);
int get_page_index(const Address & addr);
// The block filter
Vector<int> m_filter;
int m_filter_size;
int m_filter_size_bits;
// The page number filter
Vector<int> m_page_filter;
int m_page_filter_size;
int m_page_filter_size_bits;
int m_count_bits;
int m_count;
int m_count_bits;
int m_count;
};
#endif
#endif // __MEM_RUBY_FILTERS_MULTIGRAINBLOOMFILTER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,118 +26,126 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NonCountingBloomFilter.cc
*
* Description:
*
*
*/
#include "mem/ruby/filters/NonCountingBloomFilter.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/filters/NonCountingBloomFilter.hh"
NonCountingBloomFilter::NonCountingBloomFilter(string str)
{
string tail(str);
string head = string_split(tail, '_');
string tail(str);
string head = string_split(tail, '_');
// head contains filter size, tail contains bit offset from block number
m_filter_size = atoi(head.c_str());
m_offset = atoi(tail.c_str());
m_filter_size_bits = log_int(m_filter_size);
// head contains filter size, tail contains bit offset from block number
m_filter_size = atoi(head.c_str());
m_offset = atoi(tail.c_str());
m_filter_size_bits = log_int(m_filter_size);
m_filter.setSize(m_filter_size);
clear();
m_filter.setSize(m_filter_size);
clear();
}
NonCountingBloomFilter::~NonCountingBloomFilter(){
}
void NonCountingBloomFilter::clear()
NonCountingBloomFilter::~NonCountingBloomFilter()
{
for (int i = 0; i < m_filter_size; i++) {
}
void
NonCountingBloomFilter::clear()
{
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
}
void
NonCountingBloomFilter::increment(const Address& addr)
{
// Not used
}
void
NonCountingBloomFilter::decrement(const Address& addr)
{
// Not used
}
void
NonCountingBloomFilter::merge(AbstractBloomFilter *other_filter)
{
// assumes both filters are the same size!
NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
for(int i = 0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
void
NonCountingBloomFilter::set(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 1;
}
void
NonCountingBloomFilter::unset(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 0;
}
}
void NonCountingBloomFilter::increment(const Address& addr)
bool
NonCountingBloomFilter::isSet(const Address& addr)
{
// Not used
int i = get_index(addr);
return (m_filter[i]);
}
void NonCountingBloomFilter::decrement(const Address& addr)
int
NonCountingBloomFilter::getCount(const Address& addr)
{
// Not used
return m_filter[get_index(addr)];
}
void NonCountingBloomFilter::merge(AbstractBloomFilter * other_filter){
// assumes both filters are the same size!
NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
for(int i=0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
void NonCountingBloomFilter::set(const Address& addr)
int
NonCountingBloomFilter::getTotalCount()
{
int i = get_index(addr);
m_filter[i] = 1;
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
return count;
}
void NonCountingBloomFilter::unset(const Address& addr)
{
int i = get_index(addr);
m_filter[i] = 0;
}
bool NonCountingBloomFilter::isSet(const Address& addr)
{
int i = get_index(addr);
return (m_filter[i]);
}
int NonCountingBloomFilter::getCount(const Address& addr)
{
return m_filter[get_index(addr)];
}
int NonCountingBloomFilter::getTotalCount()
{
int count = 0;
for (int i = 0; i < m_filter_size; i++) {
count += m_filter[i];
}
return count;
}
void NonCountingBloomFilter::print(ostream& out) const
void
NonCountingBloomFilter::print(ostream& out) const
{
}
int NonCountingBloomFilter::getIndex(const Address& addr)
int
NonCountingBloomFilter::getIndex(const Address& addr)
{
return get_index(addr);
return get_index(addr);
}
int NonCountingBloomFilter::readBit(const int index) {
return m_filter[index];
}
void NonCountingBloomFilter::writeBit(const int index, const int value) {
m_filter[index] = value;
}
int NonCountingBloomFilter::get_index(const Address& addr)
int
NonCountingBloomFilter::readBit(const int index)
{
return addr.bitSelect( RubySystem::getBlockSizeBits() + m_offset,
RubySystem::getBlockSizeBits() + m_offset + m_filter_size_bits - 1);
return m_filter[index];
}
void
NonCountingBloomFilter::writeBit(const int index, const int value)
{
m_filter[index] = value;
}
int
NonCountingBloomFilter::get_index(const Address& addr)
{
return addr.bitSelect(RubySystem::getBlockSizeBits() + m_offset,
RubySystem::getBlockSizeBits() + m_offset +
m_filter_size_bits - 1);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,61 +26,53 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NonCountingBloomFilter.hh
*
* Description:
*
*
*/
#ifndef NONCOUNTING_BLOOM_FILTER_H
#define NONCOUNTING_BLOOM_FILTER_H
#ifndef __MEM_RUBY_FILTERS_NONCOUNTINGBLOOMFILTER_HH__
#define __MEM_RUBY_FILTERS_NONCOUNTINGBLOOMFILTER_HH__
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/filters/AbstractBloomFilter.hh"
class NonCountingBloomFilter : public AbstractBloomFilter {
public:
class NonCountingBloomFilter : public AbstractBloomFilter
{
public:
NonCountingBloomFilter(string config);
~NonCountingBloomFilter();
~NonCountingBloomFilter();
NonCountingBloomFilter(string config);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
void clear();
void increment(const Address& addr);
void decrement(const Address& addr);
void merge(AbstractBloomFilter * other_filter);
void set(const Address& addr);
void unset(const Address& addr);
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
bool isSet(const Address& addr);
int getCount(const Address& addr);
int getTotalCount();
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
int getIndex(const Address& addr);
int readBit(const int index);
void writeBit(const int index, const int value);
void print(ostream& out) const;
void print(ostream& out) const;
int
operator[](const int index) const
{
return this->m_filter[index];
}
int operator[](const int index) const{
return this->m_filter[index];
}
private:
int get_index(const Address& addr);
private:
Vector<int> m_filter;
int m_filter_size;
int m_offset;
int m_filter_size_bits;
int get_index(const Address& addr);
Vector<int> m_filter;
int m_filter_size;
int m_offset;
int m_filter_size_bits;
int m_count_bits;
int m_count;
int m_count_bits;
int m_count;
};
#endif
#endif // __MEM_RUBY_FILTERS_NONCOUNTINGBLOOMFILTER_HH__

View File

@@ -30,97 +30,101 @@
#include <algorithm>
#include "config/gems_root.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/libruby_internal.hh"
#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/system/MemoryVector.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/MemoryVector.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/recorder/Tracer.hh"
string RubyRequestType_to_string(const RubyRequestType& obj)
string
RubyRequestType_to_string(const RubyRequestType& obj)
{
switch(obj) {
case RubyRequestType_IFETCH:
return "IFETCH";
case RubyRequestType_LD:
return "LD";
case RubyRequestType_ST:
return "ST";
case RubyRequestType_Locked_Read:
return "Locked_Read";
case RubyRequestType_Locked_Write:
return "Locked_Write";
case RubyRequestType_RMW_Read:
return "RMW_Read";
case RubyRequestType_RMW_Write:
return "RMW_Write";
case RubyRequestType_NULL:
default:
assert(0);
return "";
}
switch(obj) {
case RubyRequestType_IFETCH:
return "IFETCH";
case RubyRequestType_LD:
return "LD";
case RubyRequestType_ST:
return "ST";
case RubyRequestType_Locked_Read:
return "Locked_Read";
case RubyRequestType_Locked_Write:
return "Locked_Write";
case RubyRequestType_RMW_Read:
return "RMW_Read";
case RubyRequestType_RMW_Write:
return "RMW_Write";
case RubyRequestType_NULL:
default:
assert(0);
return "";
}
}
RubyRequestType string_to_RubyRequestType(std::string str)
RubyRequestType
string_to_RubyRequestType(std::string str)
{
if (str == "IFETCH")
return RubyRequestType_IFETCH;
else if (str == "LD")
return RubyRequestType_LD;
else if (str == "ST")
return RubyRequestType_ST;
else if (str == "Locked_Read")
return RubyRequestType_Locked_Read;
else if (str == "Locked_Write")
return RubyRequestType_Locked_Write;
else if (str == "RMW_Read")
return RubyRequestType_RMW_Read;
else if (str == "RMW_Write")
return RubyRequestType_RMW_Write;
else
assert(0);
return RubyRequestType_NULL;
if (str == "IFETCH")
return RubyRequestType_IFETCH;
else if (str == "LD")
return RubyRequestType_LD;
else if (str == "ST")
return RubyRequestType_ST;
else if (str == "Locked_Read")
return RubyRequestType_Locked_Read;
else if (str == "Locked_Write")
return RubyRequestType_Locked_Write;
else if (str == "RMW_Read")
return RubyRequestType_RMW_Read;
else if (str == "RMW_Write")
return RubyRequestType_RMW_Write;
else
assert(0);
return RubyRequestType_NULL;
}
ostream& operator<<(ostream& out, const RubyRequestType& obj)
ostream&
operator<<(ostream& out, const RubyRequestType& obj)
{
out << RubyRequestType_to_string(obj);
out << flush;
return out;
out << RubyRequestType_to_string(obj);
out << flush;
return out;
}
ostream& operator<<(std::ostream& out, const RubyRequest& obj)
ostream&
operator<<(std::ostream& out, const RubyRequest& obj)
{
out << hex << "0x" << obj.paddr << " data: 0x" << flush;
for (int i = 0; i < obj.len; i++) {
out << (int)obj.data[i];
}
out << dec << " type: " << RubyRequestType_to_string(obj.type) << endl;
return out;
out << hex << "0x" << obj.paddr << " data: 0x" << flush;
for (int i = 0; i < obj.len; i++) {
out << (int)obj.data[i];
}
out << dec << " type: " << RubyRequestType_to_string(obj.type) << endl;
return out;
}
vector<string> tokenizeString(string str, string delims)
vector<string>
tokenizeString(string str, string delims)
{
vector<string> tokens;
char* pch;
char* tmp;
const char* c_delims = delims.c_str();
tmp = new char[str.length()+1];
strcpy(tmp, str.c_str());
pch = strtok(tmp, c_delims);
while (pch != NULL) {
string tmp_str(pch);
if (tmp_str == "null") tmp_str = "";
tokens.push_back(tmp_str);
vector<string> tokens;
char* pch;
char* tmp;
const char* c_delims = delims.c_str();
tmp = new char[str.length()+1];
strcpy(tmp, str.c_str());
pch = strtok(tmp, c_delims);
while (pch != NULL) {
string tmp_str(pch);
if (tmp_str == "null") tmp_str = "";
tokens.push_back(tmp_str);
pch = strtok(NULL, c_delims);
}
delete [] tmp;
return tokens;
pch = strtok(NULL, c_delims);
}
delete [] tmp;
return tokens;
}
/*
* The current state of M5/Ruby integration breaks the libruby
* interface. This code is ifdef'd out for now so that we can move
@@ -129,41 +133,44 @@ vector<string> tokenizeString(string str, string delims)
* later date.
*/
#if 0
void libruby_init(const char* cfg_filename)
void
libruby_init(const char* cfg_filename)
{
ifstream cfg_output(cfg_filename);
ifstream cfg_output(cfg_filename);
vector<RubyObjConf> * sys_conf = new vector<RubyObjConf>;
vector<RubyObjConf> * sys_conf = new vector<RubyObjConf>;
string line;
getline(cfg_output, line) ;
while ( !cfg_output.eof() ) {
vector<string> tokens = tokenizeString(line, " ");
assert(tokens.size() >= 2);
vector<string> argv;
for (size_t i=2; i<tokens.size(); i++) {
std::replace(tokens[i].begin(), tokens[i].end(), '%', ' ');
std::replace(tokens[i].begin(), tokens[i].end(), '#', '\n');
argv.push_back(tokens[i]);
string line;
getline(cfg_output, line) ;
while ( !cfg_output.eof() ) {
vector<string> tokens = tokenizeString(line, " ");
assert(tokens.size() >= 2);
vector<string> argv;
for (size_t i=2; i<tokens.size(); i++) {
std::replace(tokens[i].begin(), tokens[i].end(), '%', ' ');
std::replace(tokens[i].begin(), tokens[i].end(), '#', '\n');
argv.push_back(tokens[i]);
}
sys_conf->push_back(RubyObjConf(tokens[0], tokens[1], argv));
tokens.clear();
argv.clear();
getline(cfg_output, line);
}
sys_conf->push_back(RubyObjConf(tokens[0], tokens[1], argv));
tokens.clear();
argv.clear();
getline(cfg_output, line);
}
RubySystem::create(*sys_conf);
delete sys_conf;
RubySystem::create(*sys_conf);
delete sys_conf;
}
#endif
RubyPortHandle libruby_get_port(const char* port_name, void (*hit_callback)(int64_t access_id))
RubyPortHandle
libruby_get_port(const char* port_name,
void (*hit_callback)(int64_t access_id))
{
//
// Fix me: Hit callback is now a non-static member function pointer of
// RubyPort and cannot be set to an arbitrary global function
//
return NULL;//static_cast<RubyPortHandle>(RubySystem::getPort(port_name, hit_callback));
//
// Fix me: Hit callback is now a non-static member function pointer of
// RubyPort and cannot be set to an arbitrary global function
//
return NULL;//static_cast<RubyPortHandle>(RubySystem::getPort(port_name, hit_callback));
}
RubyPortHandle libruby_get_port_by_name(const char* port_name)
@@ -175,17 +182,20 @@ RubyPortHandle libruby_get_port_by_name(const char* port_name)
return NULL;//static_cast<RubyPortHandle>(RubySystem::getPortOnly(port_name));
}
void libruby_write_ram(uint64_t paddr, uint8_t* data, int len)
void
libruby_write_ram(uint64_t paddr, uint8_t* data, int len)
{
RubySystem::getMemoryVector()->write(Address(paddr), data, len);
RubySystem::getMemoryVector()->write(Address(paddr), data, len);
}
void libruby_read_ram(uint64_t paddr, uint8_t* data, int len)
void
libruby_read_ram(uint64_t paddr, uint8_t* data, int len)
{
RubySystem::getMemoryVector()->read(Address(paddr), data, len);
RubySystem::getMemoryVector()->read(Address(paddr), data, len);
}
int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request)
int64_t
libruby_issue_request(RubyPortHandle p, struct RubyRequest request)
{
//
// Fix me: Ports should now be accessed using the python configuration
@@ -194,45 +204,58 @@ int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request)
return 0;//return static_cast<RubyPort*>(p)->makeRequest(request);
}
int libruby_tick(int n)
int
libruby_tick(int n)
{
RubySystem::getEventQueue()->triggerEvents(RubySystem::getEventQueue()->getTime() + n);
return 0;
RubyEventQueue *eventq = RubySystem::getEventQueue();
eventq->triggerEvents(eventq->getTime() + n);
return 0;
}
void libruby_destroy()
void
libruby_destroy()
{
}
const char* libruby_last_error()
const char*
libruby_last_error()
{
return "";
return "";
}
void libruby_print_config(std::ostream & out)
void
libruby_print_config(std::ostream & out)
{
RubySystem::printConfig(out);
RubySystem::printConfig(out);
}
void libruby_print_stats(std::ostream & out)
void
libruby_print_stats(std::ostream & out)
{
RubySystem::printStats(out);
RubySystem::printStats(out);
}
void libruby_playback_trace(char * trace_filename)
void
libruby_playback_trace(char * trace_filename)
{
RubySystem::getTracer()->playbackTrace(trace_filename);
RubySystem::getTracer()->playbackTrace(trace_filename);
}
void libruby_start_tracing(char * record_filename) {
// start the trace
RubySystem::getTracer()->startTrace(record_filename);
void
libruby_start_tracing(char * record_filename)
{
// start the trace
RubySystem::getTracer()->startTrace(record_filename);
}
void libruby_stop_tracing() {
// start the trace
RubySystem::getTracer()->stopTrace();
void
libruby_stop_tracing()
{
// start the trace
RubySystem::getTracer()->stopTrace();
}
uint64_t libruby_get_time() {
return RubySystem::getCycleCount(0);
uint64_t
libruby_get_time()
{
return RubySystem::getCycleCount(0);
}

View File

@@ -26,11 +26,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBRUBY_H
#define LIBRUBY_H
#ifndef __MEM_RUBY_LIBRUBY_HH__
#define __MEM_RUBY_LIBRUBY_HH__
#include <stdint.h>
#include <ostream>
#include "base/types.hh"
#include "mem/packet.hh"
typedef void* RubyPortHandle;
@@ -52,34 +53,35 @@ enum RubyAccessMode {
RubyAccessMode_Device
};
struct RubyRequest {
uint64_t paddr;
uint8_t* data;
int len;
uint64_t pc;
RubyRequestType type;
RubyAccessMode access_mode;
PacketPtr pkt;
unsigned proc_id;
struct RubyRequest
{
uint64_t paddr;
uint8_t* data;
int len;
uint64_t pc;
RubyRequestType type;
RubyAccessMode access_mode;
PacketPtr pkt;
unsigned proc_id;
RubyRequest() {}
RubyRequest(uint64_t _paddr,
uint8_t* _data,
int _len,
uint64_t _pc,
RubyRequestType _type,
RubyAccessMode _access_mode,
PacketPtr _pkt,
unsigned _proc_id = 100)
: paddr(_paddr),
data(_data),
len(_len),
pc(_pc),
type(_type),
access_mode(_access_mode),
pkt(_pkt),
proc_id(_proc_id)
{}
RubyRequest() {}
RubyRequest(uint64_t _paddr,
uint8_t* _data,
int _len,
uint64_t _pc,
RubyRequestType _type,
RubyAccessMode _access_mode,
PacketPtr _pkt,
unsigned _proc_id = 100)
: paddr(_paddr),
data(_data),
len(_len),
pc(_pc),
type(_type),
access_mode(_access_mode),
pkt(_pkt),
proc_id(_proc_id)
{}
};
std::ostream& operator<<(std::ostream& out, const RubyRequest& obj);
@@ -90,7 +92,8 @@ std::ostream& operator<<(std::ostream& out, const RubyRequest& obj);
void libruby_init(const char* cfg_file);
/**
* Tear down a configured system. Must be invoked after a call to libruby_init.
* Tear down a configured system. Must be invoked after a call to
* libruby_init.
*/
void libruby_destroy();
@@ -105,7 +108,8 @@ const char* libruby_last_error();
* this port to use when a request completes. Only one handle to a
* port is allowed at a time.
*/
RubyPortHandle libruby_get_port(const char* name, void (*hit_callback)(int64_t access_id));
RubyPortHandle libruby_get_port(const char* name,
void (*hit_callback)(int64_t access_id));
/**
* Retrieve a handle to a RubyPort object, identified by name in the
@@ -113,7 +117,6 @@ RubyPortHandle libruby_get_port(const char* name, void (*hit_callback)(int64_t a
*/
RubyPortHandle libruby_get_port_by_name(const char* name);
/**
* issue_request returns a unique access_id to identify the ruby
* transaction. This access_id is later returned to the caller via
@@ -126,14 +129,14 @@ int64_t libruby_issue_request(RubyPortHandle p, struct RubyRequest request);
* ignores caches, and should be considered incoherent after
* simulation starts.
*/
void libruby_write_ram(uint64_t paddr, uint8_t * data, int len);
void libruby_write_ram(uint64_t paddr, uint8_t *data, int len);
/**
* reads data directory from Ruby's data array. Note that this
* ignores caches, and should be considered incoherent after
* simulation starts
*/
void libruby_read_ram(uint64_t paddr, uint8_t * data, int len);
void libruby_read_ram(uint64_t paddr, uint8_t *data, int len);
/**
* tick the system n cycles. Eventually, will return the number of
@@ -144,22 +147,22 @@ int libruby_tick(int n);
/**
* self explainitory
*/
void libruby_print_config(std::ostream & out);
void libruby_print_config(std::ostream &out);
/**
* self explainitory
*/
void libruby_print_stats(std::ostream & out);
void libruby_print_stats(std::ostream &out);
/**
* does not return until done
*/
void libruby_playback_trace(char * trace_filename);
void libruby_playback_trace(char *trace_filename);
/*
* enables the tracer and opens the trace file
*/
void libruby_start_tracing(char * record_filename);
void libruby_start_tracing(char *record_filename);
/*
* closes the trace file
@@ -170,4 +173,5 @@ void libruby_stop_tracing();
* get time
*/
uint64_t libruby_get_time();
#endif
#endif // __MEM_RUBY_LIBRUBY_HH__

View File

@@ -26,16 +26,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBRUBY_INTERNAL_H
#define LIBRUBY_INTERNAL_H
#include "mem/ruby/libruby.hh"
#ifndef __MEM_RUBY_LIBRUBY_INTERNAL_HH__
#define __MEM_RUBY_LIBRUBY_INTERNAL_HH__
#include <ostream>
#include <string>
#include "mem/ruby/libruby.hh"
std::string RubyRequestType_to_string(const RubyRequestType& obj);
RubyRequestType string_to_RubyRequestType(std::string);
std::ostream& operator<<(std::ostream& out, const RubyRequestType& obj);
#endif
#endif // __MEM_RUBY_LIBRUBY_INTERNAL_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,21 +26,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
* Description: See AbstractCacheEntry.hh
*
*/
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
AbstractCacheEntry::AbstractCacheEntry() {
m_Address.setAddress(0);
m_Permission = AccessPermission_NotPresent;
AbstractCacheEntry::AbstractCacheEntry()
{
m_Address.setAddress(0);
m_Permission = AccessPermission_NotPresent;
}
// still need to define destructor for subclasses
AbstractCacheEntry::~AbstractCacheEntry() {
AbstractCacheEntry::~AbstractCacheEntry()
{
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,49 +27,39 @@
*/
/*
* $Id$
*
* Description: Common base class for a machine node.
*
* Common base class for a machine node.
*/
#ifndef AbstractCacheEntry_H
#define AbstractCacheEntry_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCACHEENTRY_HH__
#define __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCACHEENTRY_HH__
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
class DataBlock;
class AbstractCacheEntry : public AbstractEntry {
public:
// Constructors
AbstractCacheEntry();
class AbstractCacheEntry : public AbstractEntry
{
public:
AbstractCacheEntry();
virtual ~AbstractCacheEntry() = 0;
// Destructor, prevent it from instantiation
virtual ~AbstractCacheEntry() = 0;
// Data Members (m_ prefix)
Address m_Address; // Address of this block, required by CacheMemory
Time m_LastRef; // Last time this block was referenced, required by CacheMemory
AccessPermission m_Permission; // Access permission for this block, required by CacheMemory
Address m_Address; // Address of this block, required by CacheMemory
Time m_LastRef; // Last time this block was referenced, required
// by CacheMemory
AccessPermission m_Permission; // Access permission for this
// block, required by CacheMemory
};
// Output operator declaration
ostream& operator<<(ostream& out, const AbstractCacheEntry& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const AbstractCacheEntry& obj)
inline ostream&
operator<<(ostream& out, const AbstractCacheEntry& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //AbstractCacheEntry_H
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCACHEENTRY_HH__

View File

@@ -26,8 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ABSTRACTCONTROLLER_H
#define ABSTRACTCONTROLLER_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCONTROLLER_HH__
#define __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCONTROLLER_HH__
#include "sim/sim_object.hh"
#include "params/RubyController.hh"
@@ -40,29 +40,30 @@
class MessageBuffer;
class Network;
class AbstractController : public SimObject, public Consumer {
public:
class AbstractController : public SimObject, public Consumer
{
public:
typedef RubyControllerParams Params;
AbstractController(const Params *p) : SimObject(p) {}
// returns the number of controllers created of the specific subtype
// virtual int getNumberOfControllers() const = 0;
virtual MessageBuffer* getMandatoryQueue() const = 0;
virtual const int & getVersion() const = 0;
virtual const string toString() const = 0; // returns text version of controller type
virtual const string getName() const = 0; // return instance name
virtual const MachineType getMachineType() const = 0;
virtual void blockOnQueue(Address, MessageBuffer*) = 0;
virtual void unblock(Address) = 0;
virtual void initNetworkPtr(Network* net_ptr) = 0;
virtual void print(ostream & out) const = 0;
virtual void printStats(ostream & out) const = 0;
virtual void printConfig(ostream & out) const = 0;
virtual void wakeup() = 0;
// virtual void dumpStats(ostream & out) = 0;
virtual void clearStats() = 0;
// returns the number of controllers created of the specific subtype
// virtual int getNumberOfControllers() const = 0;
virtual MessageBuffer* getMandatoryQueue() const = 0;
virtual const int & getVersion() const = 0;
virtual const string toString() const = 0; // returns text version of
// controller type
virtual const string getName() const = 0; // return instance name
virtual const MachineType getMachineType() const = 0;
virtual void blockOnQueue(Address, MessageBuffer*) = 0;
virtual void unblock(Address) = 0;
virtual void initNetworkPtr(Network* net_ptr) = 0;
virtual void print(ostream & out) const = 0;
virtual void printStats(ostream & out) const = 0;
virtual void printConfig(ostream & out) const = 0;
virtual void wakeup() = 0;
// virtual void dumpStats(ostream & out) = 0;
virtual void clearStats() = 0;
};
#endif
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCONTROLLER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -29,10 +28,10 @@
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
// Must define constructor and destructor in subclasses
AbstractEntry::AbstractEntry() {
AbstractEntry::AbstractEntry()
{
}
AbstractEntry::~AbstractEntry() {
AbstractEntry::~AbstractEntry()
{
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,47 +26,35 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef AbstractEntry_H
#define AbstractEntry_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_ABSTRACTENTRY_HH__
#define __MEM_RUBY_SLICC_INTERFACE_ABSTRACTENTRY_HH__
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/protocol/AccessPermission.hh"
class DataBlock;
class AbstractEntry {
public:
// Constructors
AbstractEntry();
class AbstractEntry
{
public:
AbstractEntry();
virtual ~AbstractEntry() = 0;
// Destructor, prevent it from instantiation
virtual ~AbstractEntry() = 0;
// Public Methods
// The methods below are those called by ruby runtime, add when it is
// absolutely necessary and should all be virtual function.
virtual DataBlock& getDataBlk() = 0;
virtual void print(ostream& out) const = 0;
// The methods below are those called by ruby runtime, add when it
// is absolutely necessary and should all be virtual function.
virtual DataBlock& getDataBlk() = 0;
virtual void print(ostream& out) const = 0;
};
// Output operator declaration
ostream& operator<<(ostream& out, const AbstractEntry& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const AbstractEntry& obj)
inline ostream&
operator<<(ostream& out, const AbstractEntry& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //AbstractEntry_H
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTENTRY_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,33 +27,24 @@
*/
/*
* $Id$
*
* Description: Define all possible protocol parameters and their
* default value here. Normally, all parameters should
* have default value "false" means the feature of the
* protocol is turned off.
*
* Define all possible protocol parameters and their default value
* here. Normally, all parameters should have default value "false"
* means the feature of the protocol is turned off.
*/
#ifndef AbstractProtocol_H
#define AbstractProtocol_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_ABSTRACTPROTOCOL_HH__
#define __MEM_RUBY_SLICC_INTERFACE_ABSTRACTPROTOCOL_HH__
class AbstractProtocol {
public:
// Constructors
AbstractProtocol() {};
class AbstractProtocol
{
public:
AbstractProtocol() {}
// Destructor, no instantiation
// No definition also, so no subclass can be instantiated also
virtual ~AbstractProtocol() = 0;
virtual ~AbstractProtocol() = 0;
// Public Methods
// Data Members (m_ prefix)
static const bool m_CMP = false ;
static const bool m_TwoLevelCache = false ;
static const bool m_CMP = false ;
static const bool m_TwoLevelCache = false ;
};
#endif //AbstractProtocol_H
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTPROTOCOL_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,67 +26,58 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#ifndef MESSAGE_H
#define MESSAGE_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_MESSAGE_HH__
#define __MEM_RUBY_SLICC_INTERFACE_MESSAGE_HH__
#include <iostream>
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/RefCnt.hh"
#include "mem/gems_common/RefCountable.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
class Message;
typedef RefCnt<Message> MsgPtr;
class Message : public RefCountable {
public:
// Constructors
Message() : RefCountable() { m_time = g_eventQueue_ptr->getTime(); m_LastEnqueueTime = g_eventQueue_ptr->getTime(); m_DelayedCycles = 0;}
class Message : public RefCountable
{
public:
Message()
: RefCountable()
{
m_time = g_eventQueue_ptr->getTime();
m_LastEnqueueTime = g_eventQueue_ptr->getTime();
m_DelayedCycles = 0;
}
// Destructor
virtual ~Message() { }
virtual ~Message() { }
// Public Methods
virtual Message* clone() const = 0;
virtual void destroy() = 0;
virtual void print(std::ostream& out) const = 0;
virtual Message* clone() const = 0;
virtual void destroy() = 0;
virtual void print(std::ostream& out) const = 0;
void setDelayedCycles(const int& cycles) { m_DelayedCycles = cycles; }
const int& getDelayedCycles() const {return m_DelayedCycles;}
int& getDelayedCycles() {return m_DelayedCycles;}
void setLastEnqueueTime(const Time& time) { m_LastEnqueueTime = time; }
const Time& getLastEnqueueTime() const {return m_LastEnqueueTime;}
Time& getLastEnqueueTime() {return m_LastEnqueueTime;}
void setDelayedCycles(const int& cycles) { m_DelayedCycles = cycles; }
const int& getDelayedCycles() const {return m_DelayedCycles;}
int& getDelayedCycles() {return m_DelayedCycles;}
void setLastEnqueueTime(const Time& time) { m_LastEnqueueTime = time; }
const Time& getLastEnqueueTime() const {return m_LastEnqueueTime;}
Time& getLastEnqueueTime() {return m_LastEnqueueTime;}
const Time& getTime() const { return m_time; }
void setTime(const Time& new_time) { m_time = new_time; }
private:
// Private Methods
// Data Members (m_ prefix)
Time m_time;
Time m_LastEnqueueTime; // my last enqueue time
int m_DelayedCycles; // my delayed cycles
const Time& getTime() const { return m_time; }
void setTime(const Time& new_time) { m_time = new_time; }
private:
Time m_time;
Time m_LastEnqueueTime; // my last enqueue time
int m_DelayedCycles; // my delayed cycles
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const Message& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const Message& obj)
inline std::ostream&
operator<<(std::ostream& out, const Message& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //MESSAGE_H
#endif // __MEM_RUBY_SLICC_INTERFACE_MESSAGE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,89 +26,69 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NetworkMessage.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_SLICC_INTERFACE_NETWORKMESSAGE_HH__
#define __MEM_RUBY_SLICC_INTERFACE_NETWORKMESSAGE_HH__
#ifndef NetworkMessage_H
#define NetworkMessage_H
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/RefCnt.hh"
#include "mem/gems_common/RefCountable.hh"
#include "mem/ruby/slicc_interface/Message.hh"
#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/NetDest.hh"
#include "mem/ruby/slicc_interface/Message.hh"
class Address;
class NetworkMessage;
typedef RefCnt<NetworkMessage> NetMsgPtr;
class NetworkMessage : public Message {
public:
// Constructors
NetworkMessage()
:Message()
class NetworkMessage : public Message
{
public:
NetworkMessage()
: Message()
{
m_internal_dest_valid = false;
m_internal_dest_valid = false;
}
// Destructor
virtual ~NetworkMessage() { }
virtual ~NetworkMessage() { }
// Public Methods
virtual const NetDest& getDestination() const = 0;
virtual NetDest& getDestination() = 0;
virtual const MessageSizeType& getMessageSize() const = 0;
virtual MessageSizeType& getMessageSize() = 0;
virtual const NetDest& getDestination() const = 0;
virtual NetDest& getDestination() = 0;
virtual const MessageSizeType& getMessageSize() const = 0;
virtual MessageSizeType& getMessageSize() = 0;
// virtual const Address& getAddress() const = 0;
// virtual Address& getAddress() = 0;
const NetDest&
getInternalDestination() const
{
if (m_internal_dest_valid == false)
return getDestination();
const NetDest& getInternalDestination() const {
if (m_internal_dest_valid == false) {
return getDestination();
} else {
return m_internal_dest;
return m_internal_dest;
}
}
NetDest& getInternalDestination() {
if (m_internal_dest_valid == false) {
m_internal_dest = getDestination();
m_internal_dest_valid = true;
NetDest&
getInternalDestination()
{
if (m_internal_dest_valid == false) {
m_internal_dest = getDestination();
m_internal_dest_valid = true;
}
return m_internal_dest;
}
return m_internal_dest;
}
virtual void print(ostream& out) const = 0;
virtual void print(ostream& out) const = 0;
private:
// Private Methods
// Data Members (m_ prefix)
NetDest m_internal_dest;
bool m_internal_dest_valid;
private:
NetDest m_internal_dest;
bool m_internal_dest_valid;
};
// Output operator declaration
ostream& operator<<(ostream& out, const NetworkMessage& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const NetworkMessage& obj)
inline ostream&
operator<<(ostream& out, const NetworkMessage& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //NetworkMessage_H
#endif // __MEM_RUBY_SLICC_INTERFACE_NETWORKMESSAGE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,7 +26,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/CacheMemory.hh"

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,22 +26,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#ifndef __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_COMPONENTMAPPINGS_HH__
#define __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_COMPONENTMAPPINGS_HH__
#ifndef COMPONENTMAPPINGFNS_H
#define COMPONENTMAPPINGFNS_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Set.hh"
#include "mem/ruby/common/NetDest.hh"
#include "mem/protocol/GenericMachineType.hh"
#include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/NetDest.hh"
#include "mem/ruby/common/Set.hh"
#include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/ruby/system/NodeID.hh"
#ifdef MACHINETYPE_L1Cache
#define MACHINETYPE_L1CACHE_ENUM MachineType_L1Cache
@@ -70,88 +65,100 @@
// used to determine the home directory
// returns a value between 0 and total_directories_within_the_system
inline
NodeID map_Address_to_DirectoryNode(const Address& addr)
inline NodeID
map_Address_to_DirectoryNode(const Address& addr)
{
return DirectoryMemory::mapAddressToDirectoryVersion(addr);
return DirectoryMemory::mapAddressToDirectoryVersion(addr);
}
// used to determine the home directory
// returns a value between 0 and total_directories_within_the_system
inline
MachineID map_Address_to_Directory(const Address &addr)
inline MachineID
map_Address_to_Directory(const Address &addr)
{
MachineID mach = {MachineType_Directory, map_Address_to_DirectoryNode(addr)};
return mach;
}
inline
MachineID map_Address_to_DMA(const Address & addr)
{
MachineID dma = {MACHINETYPE_DMA_ENUM, 0};
return dma;
}
inline
NetDest broadcast(MachineType type)
{
NetDest dest;
for (int i=0; i<MachineType_base_count(type); i++) {
MachineID mach = {type, i};
dest.add(mach);
}
return dest;
}
inline
MachineID mapAddressToRange(const Address & addr, MachineType type, int low_bit, int num_bits)
{
MachineID mach = {type, 0};
if (num_bits == 0)
MachineID mach =
{MachineType_Directory, map_Address_to_DirectoryNode(addr)};
return mach;
mach.num = addr.bitSelect(low_bit, low_bit+num_bits-1);
return mach;
}
extern inline NodeID machineIDToNodeID(MachineID machID)
inline MachineID
map_Address_to_DMA(const Address & addr)
{
return machID.num;
MachineID dma = {MACHINETYPE_DMA_ENUM, 0};
return dma;
}
extern inline MachineType machineIDToMachineType(MachineID machID)
inline NetDest
broadcast(MachineType type)
{
return machID.type;
NetDest dest;
for (int i = 0; i < MachineType_base_count(type); i++) {
MachineID mach = {type, i};
dest.add(mach);
}
return dest;
}
extern inline NodeID L1CacheMachIDToProcessorNum(MachineID machID)
inline MachineID
mapAddressToRange(const Address & addr, MachineType type, int low_bit,
int num_bits)
{
assert(machID.type == MachineType_L1Cache);
return machID.num;
MachineID mach = {type, 0};
if (num_bits == 0)
return mach;
mach.num = addr.bitSelect(low_bit, low_bit + num_bits - 1);
return mach;
}
extern inline MachineID getL1MachineID(NodeID L1RubyNode)
inline NodeID
machineIDToNodeID(MachineID machID)
{
MachineID mach = {MACHINETYPE_L1CACHE_ENUM, L1RubyNode};
return mach;
return machID.num;
}
extern inline GenericMachineType ConvertMachToGenericMach(MachineType machType) {
if (machType == MACHINETYPE_L1CACHE_ENUM) {
return GenericMachineType_L1Cache;
} else if (machType == MACHINETYPE_L2CACHE_ENUM) {
return GenericMachineType_L2Cache;
} else if (machType == MACHINETYPE_L3CACHE_ENUM) {
return GenericMachineType_L3Cache;
} else if (machType == MachineType_Directory) {
return GenericMachineType_Directory;
} else {
inline MachineType
machineIDToMachineType(MachineID machID)
{
return machID.type;
}
inline NodeID
L1CacheMachIDToProcessorNum(MachineID machID)
{
assert(machID.type == MachineType_L1Cache);
return machID.num;
}
inline MachineID
getL1MachineID(NodeID L1RubyNode)
{
MachineID mach = {MACHINETYPE_L1CACHE_ENUM, L1RubyNode};
return mach;
}
inline GenericMachineType
ConvertMachToGenericMach(MachineType machType)
{
if (machType == MACHINETYPE_L1CACHE_ENUM)
return GenericMachineType_L1Cache;
if (machType == MACHINETYPE_L2CACHE_ENUM)
return GenericMachineType_L2Cache;
if (machType == MACHINETYPE_L3CACHE_ENUM)
return GenericMachineType_L3Cache;
if (machType == MachineType_Directory)
return GenericMachineType_Directory;
ERROR_MSG("cannot convert to a GenericMachineType");
return GenericMachineType_NULL;
}
}
extern inline int machineCount(MachineType machType) {
inline int
machineCount(MachineType machType)
{
return MachineType_base_count(machType);
}
#endif // COMPONENTMAPPINGFNS_H
#endif // __MEM_RUBY_SLICC_INTERFACE_COMPONENTMAPPINGS_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,66 +26,75 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* slicc_util.cc
*
* Description: See slicc_util.hh
*
* $Id$
*
*/
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
// #include "TransactionInterfaceManager.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh"
#include "mem/ruby/system/System.hh"
void profile_request(int cache_state, Directory_State directory_state, GenericRequestType request_type)
void
profile_request(int cache_state, Directory_State directory_state,
GenericRequestType request_type)
{
string requestStr = L1Cache_State_to_string(L1Cache_State(cache_state))+":"+
Directory_State_to_string(directory_state)+":"+
GenericRequestType_to_string(request_type);
g_system_ptr->getProfiler()->profileRequest(requestStr);
string requestStr = L1Cache_State_to_string(L1Cache_State(cache_state))+
":" +
Directory_State_to_string(directory_state) + ":" +
GenericRequestType_to_string(request_type);
g_system_ptr->getProfiler()->profileRequest(requestStr);
}
void profile_request(const string& L1CacheState, const string& L2CacheState, const string& directoryState, const string& requestType)
void
profile_request(const string& L1CacheState, const string& L2CacheState,
const string& directoryState, const string& requestType)
{
string requestStr = L1CacheState+":"+L2CacheState+":"+directoryState+":"+requestType;
g_system_ptr->getProfiler()->profileRequest(requestStr);
string requestStr = L1CacheState + ":" + L2CacheState + ":" +
directoryState + ":" + requestType;
g_system_ptr->getProfiler()->profileRequest(requestStr);
}
void profile_outstanding_request(int outstanding)
void
profile_outstanding_request(int outstanding)
{
g_system_ptr->getProfiler()->profileOutstandingRequest(outstanding);
g_system_ptr->getProfiler()->profileOutstandingRequest(outstanding);
}
void profile_average_latency_estimate(int latency)
void
profile_average_latency_estimate(int latency)
{
g_system_ptr->getProfiler()->profileAverageLatencyEstimate(latency);
g_system_ptr->getProfiler()->profileAverageLatencyEstimate(latency);
}
void profile_sharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner)
void
profile_sharing(const Address& addr, AccessType type, NodeID requestor,
const Set& sharers, const Set& owner)
{
g_system_ptr->getProfiler()->profileSharing(addr, type, requestor, sharers, owner);
g_system_ptr->getProfiler()->
profileSharing(addr, type, requestor, sharers, owner);
}
void profileMsgDelay(int virtualNetwork, int delayCycles)
void
profileMsgDelay(int virtualNetwork, int delayCycles)
{
g_system_ptr->getProfiler()->profileMsgDelay(virtualNetwork, delayCycles);
g_system_ptr->getProfiler()->profileMsgDelay(virtualNetwork, delayCycles);
}
void profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
void
profileGetX(const Address& datablock, const Address& PC, const Set& owner,
const Set& sharers, NodeID requestor)
{
g_system_ptr->getProfiler()->getAddressProfiler()->profileGetX(datablock, PC, owner, sharers, requestor);
g_system_ptr->getProfiler()->getAddressProfiler()->
profileGetX(datablock, PC, owner, sharers, requestor);
}
void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor)
void
profileGetS(const Address& datablock, const Address& PC, const Set& owner,
const Set& sharers, NodeID requestor)
{
g_system_ptr->getProfiler()->getAddressProfiler()->profileGetS(datablock, PC, owner, sharers, requestor);
g_system_ptr->getProfiler()->getAddressProfiler()->
profileGetS(datablock, PC, owner, sharers, requestor);
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,36 +27,37 @@
*/
/*
* slicc_util.hh
*
* Description: These are the functions that exported to slicc from ruby.
*
* $Id$
*
* These are the functions that exported to slicc from ruby.
*/
#ifndef RUBYSLICC_PROFILER_INTERFACE_H
#define RUBYSLICC_PROFILER_INTERFACE_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_PROFILER_INTERFACE_HH__
#define __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_PROFILER_INTERFACE_HH__
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/Directory_State.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/L1Cache_State.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/L1Cache_State.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/Directory_State.hh"
#include "mem/ruby/system/NodeID.hh"
class Set;
void profile_request(int cache_state, Directory_State directory_state, GenericRequestType request_type);
void profile_request(int cache_state, Directory_State directory_state,
GenericRequestType request_type);
void profile_outstanding_persistent_request(int outstanding);
void profile_outstanding_request(int outstanding);
void profile_sharing(const Address& addr, AccessType type, NodeID requestor, const Set& sharers, const Set& owner);
void profile_request(const string& L1CacheStateStr, const string& L2CacheStateStr, const string& directoryStateStr, const string& requestTypeStr);
void profile_sharing(const Address& addr, AccessType type, NodeID requestor,
const Set& sharers, const Set& owner);
void profile_request(const string& L1CacheStateStr,
const string& L2CacheStateStr,
const string& directoryStateStr,
const string& requestTypeStr);
void profile_miss(const CacheMsg& msg, NodeID id);
void profile_L1Cache_miss(const CacheMsg& msg, NodeID id);
void profile_L2Cache_miss(GenericRequestType requestType, AccessModeType type, int msgSize, PrefetchBit pfBit, NodeID l2cacheID);
void profile_L2Cache_miss(GenericRequestType requestType, AccessModeType type,
int msgSize, PrefetchBit pfBit, NodeID l2cacheID);
void profile_token_retry(const Address& addr, AccessType type, int count);
void profile_filter_action(int action);
void profile_persistent_prediction(const Address& addr, AccessType type);
@@ -65,9 +65,11 @@ void profile_average_latency_estimate(int latency);
void profileMsgDelay(int virtualNetwork, int delayCycles);
void profile_multicast_retry(const Address& addr, int count);
void profileGetX(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
void profileGetS(const Address& datablock, const Address& PC, const Set& owner, const Set& sharers, NodeID requestor);
void profileGetX(const Address& datablock, const Address& PC, const Set& owner,
const Set& sharers, NodeID requestor);
void profileGetS(const Address& datablock, const Address& PC, const Set& owner,
const Set& sharers, NodeID requestor);
void profileOverflow(const Address & addr, MachineID mach);
#endif // RUBYSLICC_PROFILER_INTERFACE_H
#endif // __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_PROFILER_INTERFACE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,149 +27,163 @@
*/
/*
* slicc_util.hh
*
* Description: These are the functions that exported to slicc from ruby.
*
* $Id$
*
* These are the functions that exported to slicc from ruby.
*/
#ifndef SLICC_UTIL_H
#define SLICC_UTIL_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_RUBYSLICCUTIL_HH__
#define __MEM_RUBY_SLICC_INTERFACE_RUBYSLICCUTIL_HH__
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/Directory_State.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/L1Cache_State.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/ruby/system/NodeID.hh"
#include "mem/ruby/system/System.hh"
class Set;
class NetDest;
extern inline int random(int n)
inline int
random(int n)
{
return random() % n;
}
extern inline bool multicast_retry()
inline bool
multicast_retry()
{
if (RubySystem::getRandomization()) {
return (random() & 0x1);
} else {
return true;
}
if (RubySystem::getRandomization()) {
return (random() & 0x1);
} else {
return true;
}
}
extern inline int cache_state_to_int(L1Cache_State state)
inline int
cache_state_to_int(L1Cache_State state)
{
return state;
return state;
}
extern inline Time get_time()
inline Time
get_time()
{
return g_eventQueue_ptr->getTime();
return g_eventQueue_ptr->getTime();
}
extern inline Time zero_time()
inline Time
zero_time()
{
return 0;
}
extern inline NodeID intToID(int nodenum)
{
NodeID id = nodenum;
return id;
}
extern inline int IDToInt(NodeID id)
{
int nodenum = id;
return nodenum;
}
extern inline int addressToInt(Address addr)
{
return (int) addr.getLineAddress();
}
extern inline bool long_enough_ago(Time event)
{
return ((get_time() - event) > 200);
}
extern inline int getAddThenMod(int addend1, int addend2, int modulus)
{
return (addend1 + addend2) % modulus;
}
extern inline Time getTimeModInt(Time time, int modulus)
{
return time % modulus;
}
extern inline Time getTimePlusInt(Time addend1, int addend2)
{
return (Time) addend1 + addend2;
}
extern inline Time getTimeMinusTime(Time t1, Time t2)
{
ASSERT(t1 >= t2);
return t1 - t2;
}
extern inline Time getPreviousDelayedCycles(Time t1, Time t2)
{
if (RubySystem::getRandomization()) { // when randomizing delayed
return 0;
} else {
return getTimeMinusTime(t1, t2);
}
}
extern inline void WARN_ERROR_TIME(Time time)
inline NodeID
intToID(int nodenum)
{
WARN_EXPR(time);
NodeID id = nodenum;
return id;
}
// Return type for time_to_int is "Time" and not "int" so we get a 64-bit integer
extern inline Time time_to_int(Time time)
inline int
IDToInt(NodeID id)
{
return time;
int nodenum = id;
return nodenum;
}
inline int
addressToInt(Address addr)
{
return (int)addr.getLineAddress();
}
inline bool
long_enough_ago(Time event)
{
return ((get_time() - event) > 200);
}
inline int
getAddThenMod(int addend1, int addend2, int modulus)
{
return (addend1 + addend2) % modulus;
}
inline Time
getTimeModInt(Time time, int modulus)
{
return time % modulus;
}
inline Time
getTimePlusInt(Time addend1, int addend2)
{
return (Time) addend1 + addend2;
}
inline Time
getTimeMinusTime(Time t1, Time t2)
{
ASSERT(t1 >= t2);
return t1 - t2;
}
inline Time
getPreviousDelayedCycles(Time t1, Time t2)
{
if (RubySystem::getRandomization()) { // when randomizing delayed
return 0;
} else {
return getTimeMinusTime(t1, t2);
}
}
inline void
WARN_ERROR_TIME(Time time)
{
WARN_EXPR(time);
}
// Return type for time_to_int is "Time" and not "int" so we get a
// 64-bit integer
inline Time
time_to_int(Time time)
{
return time;
}
// Appends an offset to an address
extern inline Address setOffset(Address addr, int offset)
inline Address
setOffset(Address addr, int offset)
{
Address result = addr;
result.setOffset(offset);
return result;
Address result = addr;
result.setOffset(offset);
return result;
}
// Makes an address into a line address
extern inline Address makeLineAddress(Address addr)
inline Address
makeLineAddress(Address addr)
{
Address result = addr;
result.makeLineAddress();
return result;
Address result = addr;
result.makeLineAddress();
return result;
}
extern inline int addressOffset(Address addr)
inline int
addressOffset(Address addr)
{
return addr.getOffset();
return addr.getOffset();
}
#endif //SLICC_UTIL_H
#endif // __MEM_RUBY_SLICC_INTERFACE_RUBYSLICCUTIL_HH__

View File

@@ -26,12 +26,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RUBYSLICC_INCLUDES_H
#define RUBYSLICC_INCLUDES_H
#ifndef __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_INCLUDES_HH__
#define __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_INCLUDES_HH__
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Profiler_interface.hh"
#endif
#endif // __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_INCLUDES_HH__

View File

@@ -26,42 +26,34 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* AbstractMemOrCache.hh
*
* Description:
*
*
*/
#ifndef __MEM_RUBY_SYSTEM_ABSTRACTMEMORCACHE_HH__
#define __MEM_RUBY_SYSTEM_ABSTRACTMEMORCACHE_HH__
#ifndef ABSTRACT_MEM_OR_CACHE_H
#define ABSTRACT_MEM_OR_CACHE_H
#include <iosfwd>
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/slicc_interface/Message.hh"
class AbstractMemOrCache {
public:
class Consumer;
class MemoryNode;
class Message;
virtual ~AbstractMemOrCache() {};
virtual void setConsumer(Consumer* consumer_ptr) = 0;
virtual Consumer* getConsumer() = 0;
virtual void enqueue (const MsgPtr& message, int latency ) = 0;
virtual void enqueueMemRef (MemoryNode& memRef) = 0;
virtual void dequeue () = 0;
virtual const Message* peek () = 0;
virtual bool isReady () = 0;
virtual MemoryNode peekNode () = 0;
virtual bool areNSlotsAvailable (int n) = 0;
virtual void printConfig (ostream& out) = 0;
virtual void print (ostream& out) const = 0;
virtual void setDebug (int debugFlag) = 0;
private:
class AbstractMemOrCache
{
public:
virtual ~AbstractMemOrCache() {};
virtual void setConsumer(Consumer* consumer_ptr) = 0;
virtual Consumer* getConsumer() = 0;
virtual void enqueue (const MsgPtr& message, int latency) = 0;
virtual void enqueueMemRef (MemoryNode& memRef) = 0;
virtual void dequeue () = 0;
virtual const Message* peek () = 0;
virtual bool isReady () = 0;
virtual MemoryNode peekNode () = 0;
virtual bool areNSlotsAvailable (int n) = 0;
virtual void printConfig (std::ostream& out) = 0;
virtual void print (std::ostream& out) const = 0;
virtual void setDebug (int debugFlag) = 0;
};
#endif
#endif // __MEM_RUBY_SYSTEM_ABSTRACTMEMORCACHE_HH__

View File

@@ -26,64 +26,64 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ABSTRACTREPLACEMENTPOLICY_H
#define ABSTRACTREPLACEMENTPOLICY_H
#ifndef __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
#define __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
#include "mem/ruby/common/Global.hh"
class AbstractReplacementPolicy {
class AbstractReplacementPolicy
{
public:
AbstractReplacementPolicy(Index num_sets, Index assoc);
virtual ~AbstractReplacementPolicy();
public:
/* touch a block. a.k.a. update timestamp */
virtual void touch(Index set, Index way, Time time) = 0;
AbstractReplacementPolicy(Index num_sets, Index assoc);
virtual ~AbstractReplacementPolicy();
/* returns the way to replace */
virtual Index getVictim(Index set) const = 0;
/* touch a block. a.k.a. update timestamp */
virtual void touch(Index set, Index way, Time time) = 0;
/* get the time of the last access */
Time getLastAccess(Index set, Index way);
/* returns the way to replace */
virtual Index getVictim(Index set) const = 0;
/* get the time of the last access */
Time getLastAccess(Index set, Index way);
protected:
unsigned int m_num_sets; /** total number of sets */
unsigned int m_assoc; /** set associativity */
Time **m_last_ref_ptr; /** timestamp of last reference */
protected:
unsigned m_num_sets; /** total number of sets */
unsigned m_assoc; /** set associativity */
Time **m_last_ref_ptr; /** timestamp of last reference */
};
inline
AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets, Index assoc)
AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets,
Index assoc)
{
m_num_sets = num_sets;
m_assoc = assoc;
m_last_ref_ptr = new Time*[m_num_sets];
for(unsigned int i = 0; i < m_num_sets; i++){
m_last_ref_ptr[i] = new Time[m_assoc];
for(unsigned int j = 0; j < m_assoc; j++){
m_last_ref_ptr[i][j] = 0;
m_num_sets = num_sets;
m_assoc = assoc;
m_last_ref_ptr = new Time*[m_num_sets];
for(unsigned i = 0; i < m_num_sets; i++){
m_last_ref_ptr[i] = new Time[m_assoc];
for(unsigned j = 0; j < m_assoc; j++){
m_last_ref_ptr[i][j] = 0;
}
}
}
}
inline
AbstractReplacementPolicy::~AbstractReplacementPolicy()
{
if(m_last_ref_ptr != NULL){
for(unsigned int i = 0; i < m_num_sets; i++){
if(m_last_ref_ptr[i] != NULL){
delete[] m_last_ref_ptr[i];
}
if (m_last_ref_ptr != NULL){
for (unsigned i = 0; i < m_num_sets; i++){
if (m_last_ref_ptr[i] != NULL){
delete[] m_last_ref_ptr[i];
}
}
delete[] m_last_ref_ptr;
}
delete[] m_last_ref_ptr;
}
}
inline
Time AbstractReplacementPolicy::getLastAccess(Index set, Index way)
inline Time
AbstractReplacementPolicy::getLastAccess(Index set, Index way)
{
return m_last_ref_ptr[set][way];
return m_last_ref_ptr[set][way];
}
#endif // ABSTRACTREPLACEMENTPOLICY_H
#endif // __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__

View File

@@ -28,19 +28,14 @@
#include "mem/ruby/system/CacheMemory.hh"
// ******************* Definitions *******************
// Output operator definition
ostream& operator<<(ostream& out, const CacheMemory& obj)
ostream&
operator<<(ostream& out, const CacheMemory& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
// ****************************************************************
CacheMemory *
RubyCacheParams::create()
{
@@ -57,410 +52,451 @@ CacheMemory::CacheMemory(const Params *p)
m_profiler_ptr = new CacheProfiler(name());
}
void CacheMemory::init()
void
CacheMemory::init()
{
m_cache_num_sets = (m_cache_size / m_cache_assoc) / RubySystem::getBlockSizeBytes();
m_cache_num_sets = (m_cache_size / m_cache_assoc) /
RubySystem::getBlockSizeBytes();
assert(m_cache_num_sets > 1);
m_cache_num_set_bits = log_int(m_cache_num_sets);
assert(m_cache_num_set_bits > 0);
if(m_policy == "PSEUDO_LRU")
m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
else if (m_policy == "LRU")
m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
else
assert(false);
m_cache.setSize(m_cache_num_sets);
m_locked.setSize(m_cache_num_sets);
for (int i = 0; i < m_cache_num_sets; i++) {
m_cache[i].setSize(m_cache_assoc);
m_locked[i].setSize(m_cache_assoc);
for (int j = 0; j < m_cache_assoc; j++) {
m_cache[i][j] = NULL;
m_locked[i][j] = -1;
if (m_policy == "PSEUDO_LRU")
m_replacementPolicy_ptr =
new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
else if (m_policy == "LRU")
m_replacementPolicy_ptr =
new LRUPolicy(m_cache_num_sets, m_cache_assoc);
else
assert(false);
m_cache.setSize(m_cache_num_sets);
m_locked.setSize(m_cache_num_sets);
for (int i = 0; i < m_cache_num_sets; i++) {
m_cache[i].setSize(m_cache_assoc);
m_locked[i].setSize(m_cache_assoc);
for (int j = 0; j < m_cache_assoc; j++) {
m_cache[i][j] = NULL;
m_locked[i][j] = -1;
}
}
}
}
CacheMemory::~CacheMemory()
{
if(m_replacementPolicy_ptr != NULL)
delete m_replacementPolicy_ptr;
delete m_profiler_ptr;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
delete m_cache[i][j];
if (m_replacementPolicy_ptr != NULL)
delete m_replacementPolicy_ptr;
delete m_profiler_ptr;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
delete m_cache[i][j];
}
}
}
}
void CacheMemory::printConfig(ostream& out)
void
CacheMemory::printConfig(ostream& out)
{
out << "Cache config: " << m_cache_name << endl;
out << " cache_associativity: " << m_cache_assoc << endl;
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
const int cache_num_sets = 1 << m_cache_num_set_bits;
out << " num_cache_sets: " << cache_num_sets << endl;
out << " cache_set_size_bytes: " << cache_num_sets * RubySystem::getBlockSizeBytes() << endl;
out << " cache_set_size_Kbytes: "
<< double(cache_num_sets * RubySystem::getBlockSizeBytes()) / (1<<10) << endl;
out << " cache_set_size_Mbytes: "
<< double(cache_num_sets * RubySystem::getBlockSizeBytes()) / (1<<20) << endl;
out << " cache_size_bytes: "
<< cache_num_sets * RubySystem::getBlockSizeBytes() * m_cache_assoc << endl;
out << " cache_size_Kbytes: "
<< double(cache_num_sets * RubySystem::getBlockSizeBytes() * m_cache_assoc) / (1<<10) << endl;
out << " cache_size_Mbytes: "
<< double(cache_num_sets * RubySystem::getBlockSizeBytes() * m_cache_assoc) / (1<<20) << endl;
}
int block_size = RubySystem::getBlockSizeBytes();
// PRIVATE METHODS
out << "Cache config: " << m_cache_name << endl;
out << " cache_associativity: " << m_cache_assoc << endl;
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
const int cache_num_sets = 1 << m_cache_num_set_bits;
out << " num_cache_sets: " << cache_num_sets << endl;
out << " cache_set_size_bytes: " << cache_num_sets * block_size << endl;
out << " cache_set_size_Kbytes: "
<< double(cache_num_sets * block_size) / (1<<10) << endl;
out << " cache_set_size_Mbytes: "
<< double(cache_num_sets * block_size) / (1<<20) << endl;
out << " cache_size_bytes: "
<< cache_num_sets * block_size * m_cache_assoc << endl;
out << " cache_size_Kbytes: "
<< double(cache_num_sets * block_size * m_cache_assoc) / (1<<10)
<< endl;
out << " cache_size_Mbytes: "
<< double(cache_num_sets * block_size * m_cache_assoc) / (1<<20)
<< endl;
}
// convert a Address to its location in the cache
Index CacheMemory::addressToCacheSet(const Address& address) const
Index
CacheMemory::addressToCacheSet(const Address& address) const
{
assert(address == line_address(address));
return address.bitSelect(RubySystem::getBlockSizeBits(), RubySystem::getBlockSizeBits() + m_cache_num_set_bits-1);
assert(address == line_address(address));
return address.bitSelect(RubySystem::getBlockSizeBits(),
RubySystem::getBlockSizeBits() + m_cache_num_set_bits - 1);
}
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
int
CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
{
assert(tag == line_address(tag));
// search the set for the tags
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
if (it != m_tag_index.end())
if (m_cache[cacheSet][it->second]->m_Permission != AccessPermission_NotPresent)
return it->second;
return -1; // Not found
assert(tag == line_address(tag));
// search the set for the tags
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
if (it != m_tag_index.end())
if (m_cache[cacheSet][it->second]->m_Permission !=
AccessPermission_NotPresent)
return it->second;
return -1; // Not found
}
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int CacheMemory::findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const
int
CacheMemory::findTagInSetIgnorePermissions(Index cacheSet,
const Address& tag) const
{
assert(tag == line_address(tag));
// search the set for the tags
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
if (it != m_tag_index.end())
return it->second;
return -1; // Not found
assert(tag == line_address(tag));
// search the set for the tags
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
if (it != m_tag_index.end())
return it->second;
return -1; // Not found
}
// PUBLIC METHODS
bool CacheMemory::tryCacheAccess(const Address& address,
CacheRequestType type,
DataBlock*& data_ptr)
bool
CacheMemory::tryCacheAccess(const Address& address, CacheRequestType type,
DataBlock*& data_ptr)
{
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc != -1){ // Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
data_ptr = &(entry->getDataBlk());
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
// Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
data_ptr = &(entry->getDataBlk());
if(entry->m_Permission == AccessPermission_Read_Write) {
return true;
if (entry->m_Permission == AccessPermission_Read_Write) {
return true;
}
if ((entry->m_Permission == AccessPermission_Read_Only) &&
(type == CacheRequestType_LD || type == CacheRequestType_IFETCH)) {
return true;
}
// The line must not be accessible
}
if ((entry->m_Permission == AccessPermission_Read_Only) &&
(type == CacheRequestType_LD || type == CacheRequestType_IFETCH)) {
return true;
}
// The line must not be accessible
}
data_ptr = NULL;
return false;
data_ptr = NULL;
return false;
}
bool CacheMemory::testCacheAccess(const Address& address,
CacheRequestType type,
DataBlock*& data_ptr)
bool
CacheMemory::testCacheAccess(const Address& address, CacheRequestType type,
DataBlock*& data_ptr)
{
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc != -1){ // Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
data_ptr = &(entry->getDataBlk());
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
return (m_cache[cacheSet][loc]->m_Permission != AccessPermission_NotPresent);
}
data_ptr = NULL;
return false;
if (loc != -1) {
// Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
data_ptr = &(entry->getDataBlk());
return m_cache[cacheSet][loc]->m_Permission !=
AccessPermission_NotPresent;
}
data_ptr = NULL;
return false;
}
// tests to see if an address is present in the cache
bool CacheMemory::isTagPresent(const Address& address) const
bool
CacheMemory::isTagPresent(const Address& address) const
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int location = findTagInSet(cacheSet, address);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (location == -1) {
// We didn't find the tag
if (loc == -1) {
// We didn't find the tag
DEBUG_EXPR(CACHE_COMP, LowPrio, address);
DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match");
return false;
}
DEBUG_EXPR(CACHE_COMP, LowPrio, address);
DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match");
return false;
}
DEBUG_EXPR(CACHE_COMP, LowPrio, address);
DEBUG_MSG(CACHE_COMP, LowPrio, "found");
return true;
DEBUG_MSG(CACHE_COMP, LowPrio, "found");
return true;
}
// Returns true if there is:
// a) a tag match on this address or there is
// b) an unused line in the same cache "way"
bool CacheMemory::cacheAvail(const Address& address) const
bool
CacheMemory::cacheAvail(const Address& address) const
{
assert(address == line_address(address));
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
Index cacheSet = addressToCacheSet(address);
for (int i=0; i < m_cache_assoc; i++) {
AbstractCacheEntry* entry = m_cache[cacheSet][i];
if (entry != NULL) {
if (entry->m_Address == address || // Already in the cache
entry->m_Permission == AccessPermission_NotPresent) { // We found an empty entry
return true;
}
} else {
return true;
for (int i = 0; i < m_cache_assoc; i++) {
AbstractCacheEntry* entry = m_cache[cacheSet][i];
if (entry != NULL) {
if (entry->m_Address == address ||
entry->m_Permission == AccessPermission_NotPresent) {
// Already in the cache or we found an empty entry
return true;
}
} else {
return true;
}
}
}
return false;
return false;
}
void CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
void
CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
{
assert(address == line_address(address));
assert(!isTagPresent(address));
assert(cacheAvail(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
assert(address == line_address(address));
assert(!isTagPresent(address));
assert(cacheAvail(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
// Find the first open slot
Index cacheSet = addressToCacheSet(address);
for (int i=0; i < m_cache_assoc; i++) {
if (m_cache[cacheSet][i] == NULL ||
m_cache[cacheSet][i]->m_Permission == AccessPermission_NotPresent) {
m_cache[cacheSet][i] = entry; // Init entry
m_cache[cacheSet][i]->m_Address = address;
m_cache[cacheSet][i]->m_Permission = AccessPermission_Invalid;
DPRINTF(RubyCache, "Allocate clearing lock for addr: %llx\n", address);
m_locked[cacheSet][i] = -1;
m_tag_index[address] = i;
// Find the first open slot
Index cacheSet = addressToCacheSet(address);
Vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
for (int i = 0; i < m_cache_assoc; i++) {
if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
set[i] = entry; // Init entry
set[i]->m_Address = address;
set[i]->m_Permission = AccessPermission_Invalid;
DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
address);
m_locked[cacheSet][i] = -1;
m_tag_index[address] = i;
m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
m_replacementPolicy_ptr->
touch(cacheSet, i, g_eventQueue_ptr->getTime());
return;
return;
}
}
}
ERROR_MSG("Allocate didn't find an available entry");
ERROR_MSG("Allocate didn't find an available entry");
}
void CacheMemory::deallocate(const Address& address)
void
CacheMemory::deallocate(const Address& address)
{
assert(address == line_address(address));
assert(isTagPresent(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int location = findTagInSet(cacheSet, address);
if (location != -1){
delete m_cache[cacheSet][location];
m_cache[cacheSet][location] = NULL;
DPRINTF(RubyCache, "Deallocate clearing lock for addr: %llx\n", address);
m_locked[cacheSet][location] = -1;
m_tag_index.erase(address);
}
assert(address == line_address(address));
assert(isTagPresent(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
delete m_cache[cacheSet][loc];
m_cache[cacheSet][loc] = NULL;
DPRINTF(RubyCache, "Deallocate clearing lock for addr: %x\n",
address);
m_locked[cacheSet][loc] = -1;
m_tag_index.erase(address);
}
}
// Returns with the physical address of the conflicting cache line
Address CacheMemory::cacheProbe(const Address& address) const
Address
CacheMemory::cacheProbe(const Address& address) const
{
assert(address == line_address(address));
assert(!cacheAvail(address));
assert(address == line_address(address));
assert(!cacheAvail(address));
Index cacheSet = addressToCacheSet(address);
return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->m_Address;
Index cacheSet = addressToCacheSet(address);
return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
m_Address;
}
// looks an address up in the cache
AbstractCacheEntry& CacheMemory::lookup(const Address& address)
AbstractCacheEntry&
CacheMemory::lookup(const Address& address)
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
return *m_cache[cacheSet][loc];
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
return *m_cache[cacheSet][loc];
}
// looks an address up in the cache
const AbstractCacheEntry& CacheMemory::lookup(const Address& address) const
const AbstractCacheEntry&
CacheMemory::lookup(const Address& address) const
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
return *m_cache[cacheSet][loc];
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
return *m_cache[cacheSet][loc];
}
AccessPermission CacheMemory::getPermission(const Address& address) const
AccessPermission
CacheMemory::getPermission(const Address& address) const
{
assert(address == line_address(address));
return lookup(address).m_Permission;
assert(address == line_address(address));
return lookup(address).m_Permission;
}
void CacheMemory::changePermission(const Address& address, AccessPermission new_perm)
void
CacheMemory::changePermission(const Address& address,
AccessPermission new_perm)
{
assert(address == line_address(address));
lookup(address).m_Permission = new_perm;
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (new_perm != AccessPermission_Read_Write) {
DPRINTF(RubyCache, "Permission clearing lock for addr: %llx\n", address);
m_locked[cacheSet][loc] = -1;
}
assert(getPermission(address) == new_perm);
assert(address == line_address(address));
lookup(address).m_Permission = new_perm;
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (new_perm != AccessPermission_Read_Write) {
DPRINTF(RubyCache, "Permission clearing lock for addr: %x\n", address);
m_locked[cacheSet][loc] = -1;
}
assert(getPermission(address) == new_perm);
}
// Sets the most recently used bit for a cache block
void CacheMemory::setMRU(const Address& address)
void
CacheMemory::setMRU(const Address& address)
{
Index cacheSet;
Index cacheSet;
cacheSet = addressToCacheSet(address);
m_replacementPolicy_ptr->touch(cacheSet,
findTagInSet(cacheSet, address),
g_eventQueue_ptr->getTime());
cacheSet = addressToCacheSet(address);
m_replacementPolicy_ptr->
touch(cacheSet, findTagInSet(cacheSet, address),
g_eventQueue_ptr->getTime());
}
void CacheMemory::profileMiss(const CacheMsg & msg)
void
CacheMemory::profileMiss(const CacheMsg& msg)
{
m_profiler_ptr->addStatSample(msg.getType(), msg.getAccessMode(),
msg.getSize(), msg.getPrefetch());
m_profiler_ptr->addStatSample(msg.getType(), msg.getAccessMode(),
msg.getSize(), msg.getPrefetch());
}
void CacheMemory::recordCacheContents(CacheRecorder& tr) const
void
CacheMemory::recordCacheContents(CacheRecorder& tr) const
{
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
AccessPermission perm = m_cache[i][j]->m_Permission;
CacheRequestType request_type = CacheRequestType_NULL;
if (perm == AccessPermission_Read_Only) {
if (m_is_instruction_only_cache) {
request_type = CacheRequestType_IFETCH;
} else {
request_type = CacheRequestType_LD;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
AccessPermission perm = m_cache[i][j]->m_Permission;
CacheRequestType request_type = CacheRequestType_NULL;
if (perm == AccessPermission_Read_Only) {
if (m_is_instruction_only_cache) {
request_type = CacheRequestType_IFETCH;
} else {
request_type = CacheRequestType_LD;
}
} else if (perm == AccessPermission_Read_Write) {
request_type = CacheRequestType_ST;
}
if (request_type != CacheRequestType_NULL) {
#if 0
tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
Address(0), request_type,
m_replacementPolicy_ptr->getLastAccess(i, j));
#endif
}
}
} else if (perm == AccessPermission_Read_Write) {
request_type = CacheRequestType_ST;
}
if (request_type != CacheRequestType_NULL) {
// tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
// Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
}
}
}
}
void CacheMemory::print(ostream& out) const
void
CacheMemory::print(ostream& out) const
{
out << "Cache dump: " << m_cache_name << endl;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
if (m_cache[i][j] != NULL) {
out << " Index: " << i
<< " way: " << j
<< " entry: " << *m_cache[i][j] << endl;
} else {
out << " Index: " << i
<< " way: " << j
<< " entry: NULL" << endl;
}
out << "Cache dump: " << m_cache_name << endl;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
if (m_cache[i][j] != NULL) {
out << " Index: " << i
<< " way: " << j
<< " entry: " << *m_cache[i][j] << endl;
} else {
out << " Index: " << i
<< " way: " << j
<< " entry: NULL" << endl;
}
}
}
}
}
void CacheMemory::printData(ostream& out) const
void
CacheMemory::printData(ostream& out) const
{
out << "printData() not supported" << endl;
out << "printData() not supported" << endl;
}
void CacheMemory::clearStats() const
void
CacheMemory::clearStats() const
{
m_profiler_ptr->clearStats();
m_profiler_ptr->clearStats();
}
void CacheMemory::printStats(ostream& out) const
void
CacheMemory::printStats(ostream& out) const
{
m_profiler_ptr->printStats(out);
m_profiler_ptr->printStats(out);
}
void CacheMemory::getMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes ){
AbstractCacheEntry& entry = lookup(line_address(addr));
unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
for(unsigned int i=0; i<size_in_bytes; ++i){
value[i] = entry.getDataBlk().getByte(i + startByte);
}
}
void CacheMemory::setMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes ){
AbstractCacheEntry& entry = lookup(line_address(addr));
unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
assert(size_in_bytes > 0);
for(unsigned int i=0; i<size_in_bytes; ++i){
entry.getDataBlk().setByte(i + startByte, value[i]);
}
// entry = lookup(line_address(addr));
}
void
CacheMemory::setLocked(const Address& address, int context)
{
DPRINTF(RubyCache,
"Setting Lock for addr: %llx to %d\n",
address,
context);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
m_locked[cacheSet][loc] = context;
}
void
CacheMemory::clearLocked(const Address& address)
void
CacheMemory::getMemoryValue(const Address& addr, char* value,
unsigned size_in_bytes)
{
DPRINTF(RubyCache, "Clear Lock for addr: %llx\n", address);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
m_locked[cacheSet][loc] = -1;
AbstractCacheEntry& entry = lookup(line_address(addr));
unsigned startByte = addr.getAddress() - line_address(addr).getAddress();
for (unsigned i = 0; i < size_in_bytes; ++i) {
value[i] = entry.getDataBlk().getByte(i + startByte);
}
}
void
CacheMemory::setMemoryValue(const Address& addr, char* value,
unsigned size_in_bytes)
{
AbstractCacheEntry& entry = lookup(line_address(addr));
unsigned startByte = addr.getAddress() - line_address(addr).getAddress();
assert(size_in_bytes > 0);
for (unsigned i = 0; i < size_in_bytes; ++i) {
entry.getDataBlk().setByte(i + startByte, value[i]);
}
// entry = lookup(line_address(addr));
}
void
CacheMemory::setLocked(const Address& address, int context)
{
DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
m_locked[cacheSet][loc] = context;
}
void
CacheMemory::clearLocked(const Address& address)
{
DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
m_locked[cacheSet][loc] = -1;
}
bool
CacheMemory::isLocked(const Address& address, int context)
{
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
DPRINTF(RubyCache,
"Testing Lock for addr: %llx cur %d con %d\n",
address,
m_locked[cacheSet][loc],
context);
return m_locked[cacheSet][loc] == context;
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
address, m_locked[cacheSet][loc], context);
return m_locked[cacheSet][loc] == context;
}

View File

@@ -26,151 +26,144 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* CacheMemory.hh
*
* Description:
*
* $Id: CacheMemory.hh,v 3.7 2004/06/18 20:15:15 beckmann Exp $
*
*/
#ifndef __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
#define __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
#ifndef CACHEMEMORY_H
#define CACHEMEMORY_H
#include "sim/sim_object.hh"
#include "params/RubyCache.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/DataBlock.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/PseudoLRUPolicy.hh"
#include "mem/ruby/system/LRUPolicy.hh"
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/profiler/CacheProfiler.hh"
#include "mem/protocol/CacheMsg.hh"
#include "base/hashmap.hh"
#include <vector>
class CacheMemory : public SimObject {
public:
#include "base/hashmap.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/DataBlock.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/CacheProfiler.hh"
#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/LRUPolicy.hh"
#include "mem/ruby/system/PseudoLRUPolicy.hh"
#include "mem/ruby/system/System.hh"
#include "params/RubyCache.hh"
#include "sim/sim_object.hh"
class CacheMemory : public SimObject
{
public:
typedef RubyCacheParams Params;
// Constructors
CacheMemory(const Params *p);
// CacheMemory(const string & name);
void init();
CacheMemory(const Params *p);
~CacheMemory();
// Destructor
~CacheMemory();
void init();
// Public Methods
void printConfig(ostream& out);
// Public Methods
void printConfig(ostream& out);
// perform a cache access and see if we hit or not. Return true on a hit.
bool tryCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
// perform a cache access and see if we hit or not. Return true on a hit.
bool tryCacheAccess(const Address& address, CacheRequestType type,
DataBlock*& data_ptr);
// similar to above, but doesn't require full access check
bool testCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
// similar to above, but doesn't require full access check
bool testCacheAccess(const Address& address, CacheRequestType type,
DataBlock*& data_ptr);
// tests to see if an address is present in the cache
bool isTagPresent(const Address& address) const;
// tests to see if an address is present in the cache
bool isTagPresent(const Address& address) const;
// Returns true if there is:
// a) a tag match on this address or there is
// b) an unused line in the same cache "way"
bool cacheAvail(const Address& address) const;
// Returns true if there is:
// a) a tag match on this address or there is
// b) an unused line in the same cache "way"
bool cacheAvail(const Address& address) const;
// find an unused entry and sets the tag appropriate for the address
void allocate(const Address& address, AbstractCacheEntry* new_entry);
// find an unused entry and sets the tag appropriate for the address
void allocate(const Address& address, AbstractCacheEntry* new_entry);
// Explicitly free up this address
void deallocate(const Address& address);
// Explicitly free up this address
void deallocate(const Address& address);
// Returns with the physical address of the conflicting cache line
Address cacheProbe(const Address& address) const;
// Returns with the physical address of the conflicting cache line
Address cacheProbe(const Address& address) const;
// looks an address up in the cache
AbstractCacheEntry& lookup(const Address& address);
const AbstractCacheEntry& lookup(const Address& address) const;
// looks an address up in the cache
AbstractCacheEntry& lookup(const Address& address);
const AbstractCacheEntry& lookup(const Address& address) const;
// Get/Set permission of cache block
AccessPermission getPermission(const Address& address) const;
void changePermission(const Address& address, AccessPermission new_perm);
// Get/Set permission of cache block
AccessPermission getPermission(const Address& address) const;
void changePermission(const Address& address, AccessPermission new_perm);
int getLatency() const { return m_latency; }
int getLatency() const { return m_latency; }
// Hook for checkpointing the contents of the cache
void recordCacheContents(CacheRecorder& tr) const;
void setAsInstructionCache(bool is_icache) { m_is_instruction_only_cache = is_icache; }
// Hook for checkpointing the contents of the cache
void recordCacheContents(CacheRecorder& tr) const;
void
setAsInstructionCache(bool is_icache)
{
m_is_instruction_only_cache = is_icache;
}
// Set this address to most recently used
void setMRU(const Address& address);
// Set this address to most recently used
void setMRU(const Address& address);
void profileMiss(const CacheMsg & msg);
void profileMiss(const CacheMsg & msg);
void getMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes );
void setMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes );
void getMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes);
void setMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes);
void setLocked (const Address& addr, int context);
void clearLocked (const Address& addr);
bool isLocked (const Address& addr, int context);
// Print cache contents
void print(ostream& out) const;
void printData(ostream& out) const;
void setLocked (const Address& addr, int context);
void clearLocked (const Address& addr);
bool isLocked (const Address& addr, int context);
// Print cache contents
void print(ostream& out) const;
void printData(ostream& out) const;
void clearStats() const;
void printStats(ostream& out) const;
void clearStats() const;
void printStats(ostream& out) const;
private:
// Private Methods
private:
// convert a Address to its location in the cache
Index addressToCacheSet(const Address& address) const;
// convert a Address to its location in the cache
Index addressToCacheSet(const Address& address) const;
// Given a cache tag: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int findTagInSet(Index line, const Address& tag) const;
int findTagInSetIgnorePermissions(Index cacheSet,
const Address& tag) const;
// Given a cache tag: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int findTagInSet(Index line, const Address& tag) const;
int findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const;
// Private copy constructor and assignment operator
CacheMemory(const CacheMemory& obj);
CacheMemory& operator=(const CacheMemory& obj);
// Private copy constructor and assignment operator
CacheMemory(const CacheMemory& obj);
CacheMemory& operator=(const CacheMemory& obj);
private:
const string m_cache_name;
int m_latency;
private:
const string m_cache_name;
int m_latency;
// Data Members (m_prefix)
bool m_is_instruction_only_cache;
bool m_is_data_only_cache;
// Data Members (m_prefix)
bool m_is_instruction_only_cache;
bool m_is_data_only_cache;
// The first index is the # of cache lines.
// The second index is the the amount associativity.
m5::hash_map<Address, int> m_tag_index;
Vector<Vector<AbstractCacheEntry*> > m_cache;
Vector<Vector<int> > m_locked;
// The first index is the # of cache lines.
// The second index is the the amount associativity.
m5::hash_map<Address, int> m_tag_index;
Vector<Vector<AbstractCacheEntry*> > m_cache;
Vector<Vector<int> > m_locked;
AbstractReplacementPolicy *m_replacementPolicy_ptr;
AbstractReplacementPolicy *m_replacementPolicy_ptr;
CacheProfiler* m_profiler_ptr;
CacheProfiler* m_profiler_ptr;
int m_cache_size;
string m_policy;
int m_cache_num_sets;
int m_cache_num_set_bits;
int m_cache_assoc;
int m_cache_size;
string m_policy;
int m_cache_num_sets;
int m_cache_num_set_bits;
int m_cache_assoc;
};
#endif //CACHEMEMORY_H
#endif // __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__

View File

@@ -26,148 +26,147 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/system/DMASequencer.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
/* SLICC generated types */
#include "mem/protocol/SequencerMsg.hh"
#include "mem/protocol/SequencerRequestType.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/DMASequencer.hh"
#include "mem/ruby/system/System.hh"
//
// Fix me: This code needs comments!
//
DMASequencer::DMASequencer(const Params *p)
: RubyPort(p)
: RubyPort(p)
{
}
void DMASequencer::init()
void
DMASequencer::init()
{
RubyPort::init();
m_is_busy = false;
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
}
RequestStatus DMASequencer::makeRequest(const RubyRequest & request)
{
uint64_t paddr = request.paddr;
uint8_t* data = request.data;
int len = request.len;
bool write = false;
switch(request.type) {
case RubyRequestType_LD:
write = false;
break;
case RubyRequestType_ST:
write = true;
break;
case RubyRequestType_NULL:
case RubyRequestType_IFETCH:
case RubyRequestType_Locked_Read:
case RubyRequestType_Locked_Write:
case RubyRequestType_RMW_Read:
case RubyRequestType_RMW_Write:
case RubyRequestType_NUM:
panic("DMASequencer::makeRequest does not support the RubyRequestType");
return RequestStatus_NULL;
}
assert(!m_is_busy); // only support one outstanding DMA request
m_is_busy = true;
active_request.start_paddr = paddr;
active_request.write = write;
active_request.data = data;
active_request.len = len;
active_request.bytes_completed = 0;
active_request.bytes_issued = 0;
active_request.pkt = request.pkt;
SequencerMsg msg;
msg.getPhysicalAddress() = Address(paddr);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
msg.getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
int offset = paddr & m_data_block_mask;
msg.getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
len :
RubySystem::getBlockSizeBytes() - offset;
if (write) {
msg.getDataBlk().setData(data, offset, msg.getLen());
}
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
return RequestStatus_Issued;
}
void DMASequencer::issueNext()
{
assert(m_is_busy == true);
active_request.bytes_completed = active_request.bytes_issued;
if (active_request.len == active_request.bytes_completed) {
ruby_hit_callback(active_request.pkt);
RubyPort::init();
m_is_busy = false;
return;
}
SequencerMsg msg;
msg.getPhysicalAddress() = Address(active_request.start_paddr +
active_request.bytes_completed);
assert((msg.getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
msg.getType() = (active_request.write ? SequencerRequestType_ST :
SequencerRequestType_LD);
msg.getLen() = (active_request.len -
active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
active_request.len - active_request.bytes_completed :
RubySystem::getBlockSizeBytes());
if (active_request.write) {
msg.getDataBlk().setData(&active_request.data[active_request.bytes_completed],
0, msg.getLen());
msg.getType() = SequencerRequestType_ST;
} else {
msg.getType() = SequencerRequestType_LD;
}
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
}
void DMASequencer::dataCallback(const DataBlock & dblk)
RequestStatus
DMASequencer::makeRequest(const RubyRequest &request)
{
assert(m_is_busy == true);
int len = active_request.bytes_issued - active_request.bytes_completed;
int offset = 0;
if (active_request.bytes_completed == 0)
offset = active_request.start_paddr & m_data_block_mask;
assert( active_request.write == false );
memcpy(&active_request.data[active_request.bytes_completed],
dblk.getData(offset, len), len);
issueNext();
uint64_t paddr = request.paddr;
uint8_t* data = request.data;
int len = request.len;
bool write = false;
switch(request.type) {
case RubyRequestType_LD:
write = false;
break;
case RubyRequestType_ST:
write = true;
break;
case RubyRequestType_NULL:
case RubyRequestType_IFETCH:
case RubyRequestType_Locked_Read:
case RubyRequestType_Locked_Write:
case RubyRequestType_RMW_Read:
case RubyRequestType_RMW_Write:
case RubyRequestType_NUM:
panic("DMASequencer::makeRequest does not support RubyRequestType");
return RequestStatus_NULL;
}
assert(!m_is_busy); // only support one outstanding DMA request
m_is_busy = true;
active_request.start_paddr = paddr;
active_request.write = write;
active_request.data = data;
active_request.len = len;
active_request.bytes_completed = 0;
active_request.bytes_issued = 0;
active_request.pkt = request.pkt;
SequencerMsg msg;
msg.getPhysicalAddress() = Address(paddr);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
msg.getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
int offset = paddr & m_data_block_mask;
msg.getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
len : RubySystem::getBlockSizeBytes() - offset;
if (write) {
msg.getDataBlk().setData(data, offset, msg.getLen());
}
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
return RequestStatus_Issued;
}
void DMASequencer::ackCallback()
void
DMASequencer::issueNext()
{
issueNext();
assert(m_is_busy == true);
active_request.bytes_completed = active_request.bytes_issued;
if (active_request.len == active_request.bytes_completed) {
ruby_hit_callback(active_request.pkt);
m_is_busy = false;
return;
}
SequencerMsg msg;
msg.getPhysicalAddress() = Address(active_request.start_paddr +
active_request.bytes_completed);
assert((msg.getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
msg.getType() = (active_request.write ? SequencerRequestType_ST :
SequencerRequestType_LD);
msg.getLen() =
(active_request.len -
active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
active_request.len - active_request.bytes_completed :
RubySystem::getBlockSizeBytes());
if (active_request.write) {
msg.getDataBlk().
setData(&active_request.data[active_request.bytes_completed],
0, msg.getLen());
msg.getType() = SequencerRequestType_ST;
} else {
msg.getType() = SequencerRequestType_LD;
}
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
}
void DMASequencer::printConfig(ostream & out)
void
DMASequencer::dataCallback(const DataBlock & dblk)
{
assert(m_is_busy == true);
int len = active_request.bytes_issued - active_request.bytes_completed;
int offset = 0;
if (active_request.bytes_completed == 0)
offset = active_request.start_paddr & m_data_block_mask;
assert(active_request.write == false);
memcpy(&active_request.data[active_request.bytes_completed],
dblk.getData(offset, len), len);
issueNext();
}
void
DMASequencer::ackCallback()
{
issueNext();
}
void
DMASequencer::printConfig(ostream & out)
{
}
DMASequencer *
DMASequencerParams::create()

View File

@@ -26,48 +26,50 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DMASEQUENCER_H
#define DMASEQUENCER_H
#ifndef __MEM_RUBY_SYSTEM_DMASEQUENCER_HH__
#define __MEM_RUBY_SYSTEM_DMASEQUENCER_HH__
#include <ostream>
#include "mem/ruby/common/DataBlock.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "params/DMASequencer.hh"
struct DMARequest {
uint64_t start_paddr;
int len;
bool write;
int bytes_completed;
int bytes_issued;
uint8* data;
PacketPtr pkt;
struct DMARequest
{
uint64_t start_paddr;
int len;
bool write;
int bytes_completed;
int bytes_issued;
uint8* data;
PacketPtr pkt;
};
class DMASequencer :public RubyPort {
public:
class DMASequencer : public RubyPort
{
public:
typedef DMASequencerParams Params;
DMASequencer(const Params *);
void init();
/* external interface */
RequestStatus makeRequest(const RubyRequest & request);
bool busy() { return m_is_busy;}
DMASequencer(const Params *);
void init();
/* external interface */
RequestStatus makeRequest(const RubyRequest & request);
bool busy() { return m_is_busy;}
/* SLICC callback */
void dataCallback(const DataBlock & dblk);
void ackCallback();
/* SLICC callback */
void dataCallback(const DataBlock & dblk);
void ackCallback();
void printConfig(std::ostream & out);
void printConfig(std::ostream & out);
private:
void issueNext();
private:
void issueNext();
private:
bool m_is_busy;
uint64_t m_data_block_mask;
DMARequest active_request;
int num_active_requests;
private:
bool m_is_busy;
uint64_t m_data_block_mask;
DMARequest active_request;
int num_active_requests;
};
#endif // DMACONTROLLER_H
#endif // __MEM_RUBY_SYSTEM_DMASEQUENCER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,19 +26,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DirectoryMemory.cc
*
* Description: See DirectoryMemory.hh
*
* $Id$
*
*/
#include "mem/ruby/system/System.hh"
#include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
#include "mem/gems_common/util.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
#include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/ruby/system/System.hh"
int DirectoryMemory::m_num_directories = 0;
int DirectoryMemory::m_num_directories_bits = 0;
@@ -58,89 +48,95 @@ DirectoryMemory::DirectoryMemory(const Params *p)
m_numa_high_bit = p->numa_high_bit;
}
void DirectoryMemory::init()
void
DirectoryMemory::init()
{
m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes();
m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes();
if (m_use_map) {
int entry_bits = log_int(m_num_entries);
assert(entry_bits >= m_map_levels);
m_sparseMemory = new SparseMemory(entry_bits, m_map_levels);
} else {
m_entries = new Directory_Entry*[m_num_entries];
for (int i=0; i < m_num_entries; i++)
m_entries[i] = NULL;
m_ram = g_system_ptr->getMemoryVector();
}
if (m_use_map) {
int entry_bits = log_int(m_num_entries);
assert(entry_bits >= m_map_levels);
m_sparseMemory = new SparseMemory(entry_bits, m_map_levels);
} else {
m_entries = new Directory_Entry*[m_num_entries];
for (int i = 0; i < m_num_entries; i++)
m_entries[i] = NULL;
m_ram = g_system_ptr->getMemoryVector();
}
m_num_directories++;
m_num_directories_bits = log_int(m_num_directories);
m_total_size_bytes += m_size_bytes;
m_num_directories++;
m_num_directories_bits = log_int(m_num_directories);
m_total_size_bytes += m_size_bytes;
if (m_numa_high_bit == 0) {
m_numa_high_bit = RubySystem::getMemorySizeBits();
}
assert(m_numa_high_bit != 0);
if (m_numa_high_bit == 0) {
m_numa_high_bit = RubySystem::getMemorySizeBits();
}
assert(m_numa_high_bit != 0);
}
DirectoryMemory::~DirectoryMemory()
{
// free up all the directory entries
if (m_entries != NULL) {
for (uint64 i = 0; i < m_num_entries; i++) {
if (m_entries[i] != NULL) {
delete m_entries[i];
}
}
delete [] m_entries;
} else if (m_use_map) {
delete m_sparseMemory;
}
// free up all the directory entries
if (m_entries != NULL) {
for (uint64 i = 0; i < m_num_entries; i++) {
if (m_entries[i] != NULL) {
delete m_entries[i];
}
}
delete [] m_entries;
} else if (m_use_map) {
delete m_sparseMemory;
}
}
void DirectoryMemory::printConfig(ostream& out) const
void
DirectoryMemory::printConfig(ostream& out) const
{
out << "DirectoryMemory module config: " << m_name << endl;
out << " version: " << m_version << endl;
out << " memory_bits: " << m_size_bits << endl;
out << " memory_size_bytes: " << m_size_bytes << endl;
out << " memory_size_Kbytes: " << double(m_size_bytes) / (1<<10) << endl;
out << " memory_size_Mbytes: " << double(m_size_bytes) / (1<<20) << endl;
out << " memory_size_Gbytes: " << double(m_size_bytes) / (1<<30) << endl;
out << "DirectoryMemory module config: " << m_name << endl
<< " version: " << m_version << endl
<< " memory_bits: " << m_size_bits << endl
<< " memory_size_bytes: " << m_size_bytes << endl
<< " memory_size_Kbytes: " << double(m_size_bytes) / (1<<10) << endl
<< " memory_size_Mbytes: " << double(m_size_bytes) / (1<<20) << endl
<< " memory_size_Gbytes: " << double(m_size_bytes) / (1<<30) << endl;
}
// Static method
void DirectoryMemory::printGlobalConfig(ostream & out)
void
DirectoryMemory::printGlobalConfig(ostream & out)
{
out << "DirectoryMemory Global Config: " << endl;
out << " number of directory memories: " << m_num_directories << endl;
if (m_num_directories > 1) {
out << " number of selection bits: " << m_num_directories_bits << endl;
out << " selection bits: " << m_numa_high_bit
<< "-" << m_numa_high_bit-m_num_directories_bits
<< endl;
}
out << " total memory size bytes: " << m_total_size_bytes << endl;
out << " total memory bits: " << log_int(m_total_size_bytes) << endl;
out << "DirectoryMemory Global Config: " << endl;
out << " number of directory memories: " << m_num_directories << endl;
if (m_num_directories > 1) {
out << " number of selection bits: " << m_num_directories_bits << endl
<< " selection bits: " << m_numa_high_bit
<< "-" << m_numa_high_bit-m_num_directories_bits
<< endl;
}
out << " total memory size bytes: " << m_total_size_bytes << endl;
out << " total memory bits: " << log_int(m_total_size_bytes) << endl;
}
uint64 DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address)
uint64
DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address)
{
if (m_num_directories_bits == 0) return 0;
uint64 ret = address.bitSelect(m_numa_high_bit - m_num_directories_bits,
m_numa_high_bit);
return ret;
if (m_num_directories_bits == 0)
return 0;
uint64 ret = address.bitSelect(m_numa_high_bit - m_num_directories_bits,
m_numa_high_bit);
return ret;
}
// Public method
bool DirectoryMemory::isPresent(PhysAddress address)
bool
DirectoryMemory::isPresent(PhysAddress address)
{
bool ret = (mapAddressToDirectoryVersion(address) == m_version);
return ret;
bool ret = (mapAddressToDirectoryVersion(address) == m_version);
return ret;
}
uint64 DirectoryMemory::mapAddressToLocalIdx(PhysAddress address)
uint64
DirectoryMemory::mapAddressToLocalIdx(PhysAddress address)
{
uint64 ret = address.bitRemove(m_numa_high_bit - m_num_directories_bits,
m_numa_high_bit)
@@ -148,98 +144,99 @@ uint64 DirectoryMemory::mapAddressToLocalIdx(PhysAddress address)
return ret;
}
Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
Directory_Entry&
DirectoryMemory::lookup(PhysAddress address)
{
assert(isPresent(address));
Directory_Entry* entry;
uint64 idx;
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
if (m_use_map) {
if (m_sparseMemory->exist(address)) {
entry = m_sparseMemory->lookup(address);
assert(entry != NULL);
} else {
//
// Note: SparseMemory internally creates a new Directory Entry
//
m_sparseMemory->add(address);
entry = m_sparseMemory->lookup(address);
}
} else {
idx = mapAddressToLocalIdx(address);
assert(idx < m_num_entries);
entry = m_entries[idx];
if (entry == NULL) {
entry = new Directory_Entry();
entry->getDataBlk().assign(m_ram->getBlockPtr(address));
m_entries[idx] = entry;
}
}
return (*entry);
}
/*
Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
{
assert(isPresent(address));
Index index = address.memoryModuleIndex();
if (index < 0 || index > m_size) {
WARN_EXPR(address.getAddress());
WARN_EXPR(index);
WARN_EXPR(m_size);
ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
}
Directory_Entry* entry = m_entries[index];
// allocate the directory entry on demand.
if (entry == NULL) {
entry = new Directory_Entry;
entry->getDataBlk().assign(m_ram->getBlockPtr(address));
// store entry to the table
m_entries[index] = entry;
}
return (*entry);
}
*/
void DirectoryMemory::invalidateBlock(PhysAddress address)
{
if (m_use_map) {
assert(m_sparseMemory->exist(address));
m_sparseMemory->remove(address);
}
/*
else {
assert(isPresent(address));
Index index = address.memoryModuleIndex();
if (index < 0 || index > m_size) {
ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
Directory_Entry* entry;
uint64 idx;
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
if (m_use_map) {
if (m_sparseMemory->exist(address)) {
entry = m_sparseMemory->lookup(address);
assert(entry != NULL);
} else {
// Note: SparseMemory internally creates a new Directory Entry
m_sparseMemory->add(address);
entry = m_sparseMemory->lookup(address);
}
} else {
idx = mapAddressToLocalIdx(address);
assert(idx < m_num_entries);
entry = m_entries[idx];
if (entry == NULL) {
entry = new Directory_Entry();
entry->getDataBlk().assign(m_ram->getBlockPtr(address));
m_entries[idx] = entry;
}
}
if(m_entries[index] != NULL){
delete m_entries[index];
m_entries[index] = NULL;
}
}
*/
return *entry;
}
void DirectoryMemory::print(ostream& out) const
#if 0
Directory_Entry&
DirectoryMemory::lookup(PhysAddress address)
{
assert(isPresent(address));
Index index = address.memoryModuleIndex();
if (index < 0 || index > m_size) {
WARN_EXPR(address.getAddress());
WARN_EXPR(index);
WARN_EXPR(m_size);
ERROR_MSG("Directory Memory Assertion: accessing memory out of range");
}
Directory_Entry* entry = m_entries[index];
// allocate the directory entry on demand.
if (entry == NULL) {
entry = new Directory_Entry;
entry->getDataBlk().assign(m_ram->getBlockPtr(address));
// store entry to the table
m_entries[index] = entry;
}
return *entry;
}
#endif
void
DirectoryMemory::invalidateBlock(PhysAddress address)
{
if (m_use_map) {
assert(m_sparseMemory->exist(address));
m_sparseMemory->remove(address);
}
#if 0
else {
assert(isPresent(address));
Index index = address.memoryModuleIndex();
if (index < 0 || index > m_size) {
ERROR_MSG("Directory Memory Assertion: "
"accessing memory out of range.");
}
if (m_entries[index] != NULL){
delete m_entries[index];
m_entries[index] = NULL;
}
}
#endif
}
void DirectoryMemory::printStats(ostream& out) const
void
DirectoryMemory::print(ostream& out) const
{
}
void
DirectoryMemory::printStats(ostream& out) const
{
if (m_use_map) {
m_sparseMemory->printStats(out);

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,94 +26,74 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DirectoryMemory.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
#define __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
#ifndef DIRECTORYMEMORY_H
#define DIRECTORYMEMORY_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/system/MemoryVector.hh"
#include "mem/protocol/Directory_Entry.hh"
#include "sim/sim_object.hh"
#include "params/RubyDirectoryMemory.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/MemoryVector.hh"
#include "mem/ruby/system/SparseMemory.hh"
#include "params/RubyDirectoryMemory.hh"
#include "sim/sim_object.hh"
class DirectoryMemory : public SimObject {
public:
// Constructors
class DirectoryMemory : public SimObject
{
public:
typedef RubyDirectoryMemoryParams Params;
DirectoryMemory(const Params *p);
void init();
// DirectoryMemory(int version);
~DirectoryMemory();
// Destructor
~DirectoryMemory();
void init();
uint64 mapAddressToLocalIdx(PhysAddress address);
static uint64 mapAddressToDirectoryVersion(PhysAddress address);
uint64 mapAddressToLocalIdx(PhysAddress address);
static uint64 mapAddressToDirectoryVersion(PhysAddress address);
bool isSparseImplementation() { return m_use_map; }
uint64 getSize() { return m_size_bytes; }
bool isSparseImplementation() { return m_use_map; }
uint64 getSize() { return m_size_bytes; }
// Public Methods
void printConfig(ostream& out) const;
static void printGlobalConfig(ostream & out);
bool isPresent(PhysAddress address);
Directory_Entry& lookup(PhysAddress address);
void printConfig(ostream& out) const;
static void printGlobalConfig(ostream & out);
bool isPresent(PhysAddress address);
Directory_Entry& lookup(PhysAddress address);
void invalidateBlock(PhysAddress address);
void invalidateBlock(PhysAddress address);
void print(ostream& out) const;
void printStats(ostream& out) const;
void print(ostream& out) const;
void printStats(ostream& out) const;
private:
// Private Methods
private:
// Private copy constructor and assignment operator
DirectoryMemory(const DirectoryMemory& obj);
DirectoryMemory& operator=(const DirectoryMemory& obj);
// Private copy constructor and assignment operator
DirectoryMemory(const DirectoryMemory& obj);
DirectoryMemory& operator=(const DirectoryMemory& obj);
private:
const string m_name;
Directory_Entry **m_entries;
// int m_size; // # of memory module blocks this directory is
// responsible for
uint64 m_size_bytes;
uint64 m_size_bits;
uint64 m_num_entries;
int m_version;
private:
const string m_name;
// Data Members (m_ prefix)
Directory_Entry **m_entries;
// int m_size; // # of memory module blocks this directory is responsible for
uint64 m_size_bytes;
uint64 m_size_bits;
uint64 m_num_entries;
int m_version;
static int m_num_directories;
static int m_num_directories_bits;
static uint64_t m_total_size_bytes;
static int m_numa_high_bit;
static int m_num_directories;
static int m_num_directories_bits;
static uint64_t m_total_size_bytes;
static int m_numa_high_bit;
MemoryVector* m_ram;
SparseMemory* m_sparseMemory;
bool m_use_map;
int m_map_levels;
MemoryVector* m_ram;
SparseMemory* m_sparseMemory;
bool m_use_map;
int m_map_levels;
};
// Output operator declaration
ostream& operator<<(ostream& out, const DirectoryMemory& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const DirectoryMemory& obj)
inline ostream&
operator<<(ostream& out, const DirectoryMemory& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //DIRECTORYMEMORY_H
#endif // __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__

View File

@@ -26,26 +26,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LRUPOLICY_H
#define LRUPOLICY_H
#ifndef __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
#define __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
#include "mem/ruby/system/AbstractReplacementPolicy.hh"
/* Simple true LRU replacement policy */
class LRUPolicy : public AbstractReplacementPolicy {
public:
class LRUPolicy : public AbstractReplacementPolicy
{
public:
LRUPolicy(Index num_sets, Index assoc);
~LRUPolicy();
LRUPolicy(Index num_sets, Index assoc);
~LRUPolicy();
void touch(Index set, Index way, Time time);
Index getVictim(Index set) const;
void touch(Index set, Index way, Time time);
Index getVictim(Index set) const;
};
inline
LRUPolicy::LRUPolicy(Index num_sets, Index assoc)
: AbstractReplacementPolicy(num_sets, assoc)
: AbstractReplacementPolicy(num_sets, assoc)
{
}
@@ -54,39 +54,42 @@ LRUPolicy::~LRUPolicy()
{
}
inline
void LRUPolicy::touch(Index set, Index index, Time time){
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
inline void
LRUPolicy::touch(Index set, Index index, Time time)
{
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
m_last_ref_ptr[set][index] = time;
m_last_ref_ptr[set][index] = time;
}
inline
Index LRUPolicy::getVictim(Index set) const {
// assert(m_assoc != 0);
Time time, smallest_time;
Index smallest_index;
inline Index
LRUPolicy::getVictim(Index set) const
{
// assert(m_assoc != 0);
Time time, smallest_time;
Index smallest_index;
smallest_index = 0;
smallest_time = m_last_ref_ptr[set][0];
smallest_index = 0;
smallest_time = m_last_ref_ptr[set][0];
for (unsigned int i=0; i < m_assoc; i++) {
time = m_last_ref_ptr[set][i];
//assert(m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent);
for (unsigned i = 0; i < m_assoc; i++) {
time = m_last_ref_ptr[set][i];
// assert(m_cache[cacheSet][i].m_Permission !=
// AccessPermission_NotPresent);
if (time < smallest_time){
smallest_index = i;
smallest_time = time;
if (time < smallest_time) {
smallest_index = i;
smallest_time = time;
}
}
}
// DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
// DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
// DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
// DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
// DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
// DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
// DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
// DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
return smallest_index;
return smallest_index;
}
#endif // PSEUDOLRUBITS_H
#endif // __MEM_RUBY_SYSTEM_LRUPOLICY_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,66 +26,55 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NodeID.hh
*
* Description:
*
* $Id$
*
*/
#ifndef MACHINEID_H
#define MACHINEID_H
#ifndef __MEM_RUBY_SYSTEM_MACHINEID_HH__
#define __MEM_RUBY_SYSTEM_MACHINEID_HH__
#include <iostream>
#include <string>
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/util.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/ruby/common/Global.hh"
struct MachineID {
MachineType type;
int num; // range: 0 ... number of this machine's components in the system - 1
struct MachineID
{
MachineType type;
int num; // range: 0 ... number of this machine's components in system - 1
};
extern inline
std::string MachineIDToString (MachineID machine) {
return MachineType_to_string(machine.type)+"_"+int_to_string(machine.num);
inline std::string
MachineIDToString(MachineID machine)
{
return MachineType_to_string(machine.type)+"_"+int_to_string(machine.num);
}
extern inline
bool operator==(const MachineID & obj1, const MachineID & obj2)
inline bool
operator==(const MachineID & obj1, const MachineID & obj2)
{
return (obj1.type == obj2.type && obj1.num == obj2.num);
return (obj1.type == obj2.type && obj1.num == obj2.num);
}
extern inline
bool operator!=(const MachineID & obj1, const MachineID & obj2)
inline bool
operator!=(const MachineID & obj1, const MachineID & obj2)
{
return (obj1.type != obj2.type || obj1.num != obj2.num);
return (obj1.type != obj2.type || obj1.num != obj2.num);
}
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const MachineID& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const MachineID& obj)
inline std::ostream&
operator<<(std::ostream& out, const MachineID& obj)
{
if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) {
out << MachineType_to_string(obj.type);
} else {
out << "NULL";
}
out << "-";
out << obj.num;
out << std::flush;
return out;
if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) {
out << MachineType_to_string(obj.type);
} else {
out << "NULL";
}
out << "-";
out << obj.num;
out << std::flush;
return out;
}
#endif //MACHINEID_H
#endif // __MEM_RUBY_SYSTEM_MACHINEID_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,8 +27,6 @@
*/
/*
* MemoryControl.cc
*
* Description: This module simulates a basic DDR-style memory controller
* (and can easily be extended to do FB-DIMM as well).
*
@@ -105,25 +102,21 @@
* then no more than four activates may happen within any 16 cycle window.
* Refreshes are included in the activates.
*
*
* $Id: $
*
*/
#include "mem/ruby/common/Global.hh"
#include <list>
#include "base/cprintf.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/slicc_interface/NetworkMessage.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/slicc_interface/NetworkMessage.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/system/MemoryControl.hh"
#include <list>
#include "mem/ruby/system/System.hh"
class Consumer;
@@ -140,11 +133,12 @@ class Consumer;
// Output operator definition
ostream& operator<<(ostream& out, const MemoryControl& obj)
ostream&
operator<<(ostream& out, const MemoryControl& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
@@ -178,471 +172,509 @@ MemoryControl::MemoryControl(const Params *p)
m_dimms_per_channel);
}
void MemoryControl::init()
void
MemoryControl::init()
{
m_msg_counter = 0;
m_msg_counter = 0;
m_debug = 0;
m_debug = 0;
assert(m_tFaw <= 62); // must fit in a uint64 shift register
assert(m_tFaw <= 62); // must fit in a uint64 shift register
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
m_refresh_period_system = m_refresh_period / m_total_banks;
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
m_refresh_period_system = m_refresh_period / m_total_banks;
m_bankQueues = new list<MemoryNode> [m_total_banks];
assert(m_bankQueues);
m_bankQueues = new list<MemoryNode> [m_total_banks];
assert(m_bankQueues);
m_bankBusyCounter = new int [m_total_banks];
assert(m_bankBusyCounter);
m_bankBusyCounter = new int [m_total_banks];
assert(m_bankBusyCounter);
m_oldRequest = new int [m_total_banks];
assert(m_oldRequest);
m_oldRequest = new int [m_total_banks];
assert(m_oldRequest);
for (int i=0; i<m_total_banks; i++) {
m_bankBusyCounter[i] = 0;
m_oldRequest[i] = 0;
}
for (int i = 0; i < m_total_banks; i++) {
m_bankBusyCounter[i] = 0;
m_oldRequest[i] = 0;
}
m_busBusyCounter_Basic = 0;
m_busBusyCounter_Write = 0;
m_busBusyCounter_ReadNewRank = 0;
m_busBusy_WhichRank = 0;
m_busBusyCounter_Basic = 0;
m_busBusyCounter_Write = 0;
m_busBusyCounter_ReadNewRank = 0;
m_busBusy_WhichRank = 0;
m_roundRobin = 0;
m_refresh_count = 1;
m_need_refresh = 0;
m_refresh_bank = 0;
m_awakened = 0;
m_idleCount = 0;
m_ageCounter = 0;
m_roundRobin = 0;
m_refresh_count = 1;
m_need_refresh = 0;
m_refresh_bank = 0;
m_awakened = 0;
m_idleCount = 0;
m_ageCounter = 0;
// Each tfaw shift register keeps a moving bit pattern
// which shows when recent activates have occurred.
// m_tfaw_count keeps track of how many 1 bits are set
// in each shift register. When m_tfaw_count is >= 4,
// new activates are not allowed.
m_tfaw_shift = new uint64 [m_total_ranks];
m_tfaw_count = new int [m_total_ranks];
for (int i=0; i<m_total_ranks; i++) {
m_tfaw_shift[i] = 0;
m_tfaw_count[i] = 0;
}
// Each tfaw shift register keeps a moving bit pattern
// which shows when recent activates have occurred.
// m_tfaw_count keeps track of how many 1 bits are set
// in each shift register. When m_tfaw_count is >= 4,
// new activates are not allowed.
m_tfaw_shift = new uint64[m_total_ranks];
m_tfaw_count = new int[m_total_ranks];
for (int i = 0; i < m_total_ranks; i++) {
m_tfaw_shift[i] = 0;
m_tfaw_count[i] = 0;
}
}
// DESTRUCTOR
MemoryControl::~MemoryControl () {
delete [] m_bankQueues;
delete [] m_bankBusyCounter;
delete [] m_oldRequest;
delete m_profiler_ptr;
MemoryControl::~MemoryControl()
{
delete [] m_bankQueues;
delete [] m_bankBusyCounter;
delete [] m_oldRequest;
delete m_profiler_ptr;
}
// PUBLIC METHODS
// enqueue new request from directory
void MemoryControl::enqueue (const MsgPtr& message, int latency) {
Time current_time = g_eventQueue_ptr->getTime();
Time arrival_time = current_time + latency;
const MemoryMsg* memMess = dynamic_cast<const MemoryMsg*>(message.ref());
physical_address_t addr = memMess->getAddress().getAddress();
MemoryRequestType type = memMess->getType();
bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
enqueueMemRef(thisReq);
void
MemoryControl::enqueue(const MsgPtr& message, int latency)
{
Time current_time = g_eventQueue_ptr->getTime();
Time arrival_time = current_time + latency;
const MemoryMsg* memMess = dynamic_cast<const MemoryMsg*>(message.ref());
physical_address_t addr = memMess->getAddress().getAddress();
MemoryRequestType type = memMess->getType();
bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
enqueueMemRef(thisReq);
}
// Alternate entry point used when we already have a MemoryNode structure built.
// Alternate entry point used when we already have a MemoryNode
// structure built.
void
MemoryControl::enqueueMemRef(MemoryNode& memRef)
{
m_msg_counter++;
memRef.m_msg_counter = m_msg_counter;
Time arrival_time = memRef.m_time;
uint64 at = arrival_time;
bool is_mem_read = memRef.m_is_mem_read;
physical_address_t addr = memRef.m_addr;
int bank = getBank(addr);
if (m_debug) {
cprintf("New memory request%7d: %#08x %c arrived at %10d bank = %3x\n",
m_msg_counter, addr, is_mem_read? 'R':'W', at, bank);
}
void MemoryControl::enqueueMemRef (MemoryNode& memRef) {
m_msg_counter++;
memRef.m_msg_counter = m_msg_counter;
Time arrival_time = memRef.m_time;
uint64 at = arrival_time;
bool is_mem_read = memRef.m_is_mem_read;
physical_address_t addr = memRef.m_addr;
int bank = getBank(addr);
if (m_debug) {
printf("New memory request%7d: 0x%08llx %c arrived at %10lld ", m_msg_counter, addr, is_mem_read? 'R':'W', at);
printf("bank =%3x\n", bank);
}
m_profiler_ptr->profileMemReq(bank);
m_input_queue.push_back(memRef);
if (!m_awakened) {
g_eventQueue_ptr->scheduleEvent(this, 1);
m_awakened = 1;
}
m_profiler_ptr->profileMemReq(bank);
m_input_queue.push_back(memRef);
if (!m_awakened) {
g_eventQueue_ptr->scheduleEvent(this, 1);
m_awakened = 1;
}
}
// dequeue, peek, and isReady are used to transfer completed requests
// back to the directory
void MemoryControl::dequeue () {
assert(isReady());
m_response_queue.pop_front();
}
const Message* MemoryControl::peek () {
MemoryNode node = peekNode();
Message* msg_ptr = node.m_msgptr.ref();
assert(msg_ptr != NULL);
return msg_ptr;
}
MemoryNode MemoryControl::peekNode () {
assert(isReady());
MemoryNode req = m_response_queue.front();
uint64 returnTime = req.m_time;
if (m_debug) {
printf("Old memory request%7d: 0x%08llx %c peeked at %10lld\n",
req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', returnTime);
}
return req;
}
bool MemoryControl::isReady () {
return ((!m_response_queue.empty()) &&
(m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
}
void MemoryControl::setConsumer (Consumer* consumer_ptr) {
m_consumer_ptr = consumer_ptr;
}
void MemoryControl::print (ostream& out) const {
}
void MemoryControl::printConfig (ostream& out) {
out << "Memory Control " << name() << ":" << endl;
out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier << endl;
out << " Basic read latency: " << m_mem_ctl_latency << endl;
if (m_mem_fixed_delay) {
out << " Fixed Latency mode: Added cycles = " << m_mem_fixed_delay << endl;
} else {
out << " Bank busy time: " << m_bank_busy_time << " memory cycles" << endl;
out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
out << " Dead cycles between reads to different ranks: " << m_rank_rank_delay << endl;
out << " Dead cycle between a read and a write: " << m_read_write_delay << endl;
out << " tFaw (four-activate) window: " << m_tFaw << endl;
}
out << " Banks per rank: " << m_banks_per_rank << endl;
out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
out << " DIMMs per channel: " << m_dimms_per_channel << endl;
out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
out << " Max size of each bank queue: " << m_bank_queue_size << endl;
out << " Refresh period (within one bank): " << m_refresh_period << endl;
out << " Arbitration randomness: " << m_mem_random_arbitrate << endl;
}
void MemoryControl::setDebug (int debugFlag) {
m_debug = debugFlag;
}
void MemoryControl::clearStats() const
void
MemoryControl::dequeue()
{
m_profiler_ptr->clearStats();
assert(isReady());
m_response_queue.pop_front();
}
void MemoryControl::printStats(ostream& out) const
const Message*
MemoryControl::peek()
{
m_profiler_ptr->printStats(out);
MemoryNode node = peekNode();
Message* msg_ptr = node.m_msgptr.ref();
assert(msg_ptr != NULL);
return msg_ptr;
}
MemoryNode
MemoryControl::peekNode()
{
assert(isReady());
MemoryNode req = m_response_queue.front();
uint64 returnTime = req.m_time;
if (m_debug) {
cprintf("Old memory request%7d: %#08x %c peeked at %10d\n",
req.m_msg_counter, req.m_addr, req.m_is_mem_read ? 'R':'W',
returnTime);
}
return req;
}
// ****************************************************************
bool
MemoryControl::isReady()
{
return ((!m_response_queue.empty()) &&
(m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
}
// PRIVATE METHODS
void
MemoryControl::setConsumer(Consumer* consumer_ptr)
{
m_consumer_ptr = consumer_ptr;
}
void
MemoryControl::print(ostream& out) const
{
}
void
MemoryControl::printConfig(ostream& out)
{
out << "Memory Control " << name() << ":" << endl;
out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier
<< endl;
out << " Basic read latency: " << m_mem_ctl_latency << endl;
if (m_mem_fixed_delay) {
out << " Fixed Latency mode: Added cycles = " << m_mem_fixed_delay
<< endl;
} else {
out << " Bank busy time: " << m_bank_busy_time << " memory cycles"
<< endl;
out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
out << " Dead cycles between reads to different ranks: "
<< m_rank_rank_delay << endl;
out << " Dead cycle between a read and a write: "
<< m_read_write_delay << endl;
out << " tFaw (four-activate) window: " << m_tFaw << endl;
}
out << " Banks per rank: " << m_banks_per_rank << endl;
out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
out << " DIMMs per channel: " << m_dimms_per_channel << endl;
out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
out << " Max size of each bank queue: " << m_bank_queue_size << endl;
out << " Refresh period (within one bank): " << m_refresh_period << endl;
out << " Arbitration randomness: " << m_mem_random_arbitrate << endl;
}
void
MemoryControl::setDebug(int debugFlag)
{
m_debug = debugFlag;
}
void
MemoryControl::clearStats() const
{
m_profiler_ptr->clearStats();
}
void
MemoryControl::printStats(ostream& out) const
{
m_profiler_ptr->printStats(out);
}
// Queue up a completed request to send back to directory
void
MemoryControl::enqueueToDirectory(MemoryNode req, int latency)
{
Time arrival_time = g_eventQueue_ptr->getTime()
+ (latency * m_mem_bus_cycle_multiplier);
req.m_time = arrival_time;
m_response_queue.push_back(req);
void MemoryControl::enqueueToDirectory (MemoryNode req, int latency) {
Time arrival_time = g_eventQueue_ptr->getTime()
+ (latency * m_mem_bus_cycle_multiplier);
req.m_time = arrival_time;
m_response_queue.push_back(req);
// schedule the wake up
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
// schedule the wake up
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
}
// getBank returns an integer that is unique for each
// bank across this memory controller.
int MemoryControl::getBank (physical_address_t addr) {
int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
return (dimm * m_ranks_per_dimm * m_banks_per_rank)
+ (rank * m_banks_per_rank)
+ bank;
int
MemoryControl::getBank(physical_address_t addr)
{
int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
return (dimm * m_ranks_per_dimm * m_banks_per_rank)
+ (rank * m_banks_per_rank)
+ bank;
}
// getRank returns an integer that is unique for each rank
// and independent of individual bank.
int MemoryControl::getRank (int bank) {
int rank = (bank / m_banks_per_rank);
assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
return rank;
int
MemoryControl::getRank(int bank)
{
int rank = (bank / m_banks_per_rank);
assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
return rank;
}
// queueReady determines if the head item in a bank queue
// can be issued this cycle
bool MemoryControl::queueReady (int bank) {
if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) {
m_profiler_ptr->profileMemBankBusy();
//if (m_debug) printf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
return false;
}
if (m_mem_random_arbitrate >= 2) {
if ((random() % 100) < m_mem_random_arbitrate) {
m_profiler_ptr->profileMemRandBusy();
return false;
bool
MemoryControl::queueReady(int bank)
{
if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) {
m_profiler_ptr->profileMemBankBusy();
#if 0
if (m_debug)
printf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
#endif
return false;
}
}
if (m_mem_fixed_delay) return true;
if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
m_profiler_ptr->profileMemNotOld();
return false;
}
if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
// Another bank must have issued this same cycle.
// For profiling, we count this as an arb wait rather than
// a bus wait. This is a little inaccurate since it MIGHT
// have also been blocked waiting for a read-write or a
// read-read instead, but it's pretty close.
m_profiler_ptr->profileMemArbWait(1);
return false;
}
if (m_busBusyCounter_Basic > 0) {
m_profiler_ptr->profileMemBusBusy();
return false;
}
int rank = getRank(bank);
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
m_profiler_ptr->profileMemTfawBusy();
return false;
}
bool write = !m_bankQueues[bank].front().m_is_mem_read;
if (write && (m_busBusyCounter_Write > 0)) {
m_profiler_ptr->profileMemReadWriteBusy();
return false;
}
if (!write && (rank != m_busBusy_WhichRank)
&& (m_busBusyCounter_ReadNewRank > 0)) {
m_profiler_ptr->profileMemDataBusBusy();
return false;
}
return true;
}
if (m_mem_random_arbitrate >= 2) {
if ((random() % 100) < m_mem_random_arbitrate) {
m_profiler_ptr->profileMemRandBusy();
return false;
}
}
if (m_mem_fixed_delay)
return true;
if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
m_profiler_ptr->profileMemNotOld();
return false;
}
if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
// Another bank must have issued this same cycle. For
// profiling, we count this as an arb wait rather than a bus
// wait. This is a little inaccurate since it MIGHT have also
// been blocked waiting for a read-write or a read-read
// instead, but it's pretty close.
m_profiler_ptr->profileMemArbWait(1);
return false;
}
if (m_busBusyCounter_Basic > 0) {
m_profiler_ptr->profileMemBusBusy();
return false;
}
int rank = getRank(bank);
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
m_profiler_ptr->profileMemTfawBusy();
return false;
}
bool write = !m_bankQueues[bank].front().m_is_mem_read;
if (write && (m_busBusyCounter_Write > 0)) {
m_profiler_ptr->profileMemReadWriteBusy();
return false;
}
if (!write && (rank != m_busBusy_WhichRank)
&& (m_busBusyCounter_ReadNewRank > 0)) {
m_profiler_ptr->profileMemDataBusBusy();
return false;
}
return true;
}
// issueRefresh checks to see if this bank has a refresh scheduled
// and, if so, does the refresh and returns true
bool
MemoryControl::issueRefresh(int bank)
{
if (!m_need_refresh || (m_refresh_bank != bank))
return false;
if (m_bankBusyCounter[bank] > 0)
return false;
// Note that m_busBusyCounter will prevent multiple issues during
// the same cycle, as well as on different but close cycles:
if (m_busBusyCounter_Basic > 0)
return false;
int rank = getRank(bank);
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW)
return false;
bool MemoryControl::issueRefresh (int bank) {
if (!m_need_refresh || (m_refresh_bank != bank)) return false;
if (m_bankBusyCounter[bank] > 0) return false;
// Note that m_busBusyCounter will prevent multiple issues during
// the same cycle, as well as on different but close cycles:
if (m_busBusyCounter_Basic > 0) return false;
int rank = getRank(bank);
if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) return false;
// Issue it:
#if 0
if (m_debug) {
uint64 current_time = g_eventQueue_ptr->getTime();
printf(" Refresh bank %3x at %lld\n", bank, current_time);
}
#endif
// Issue it:
//if (m_debug) {
//uint64 current_time = g_eventQueue_ptr->getTime();
//printf(" Refresh bank %3x at %lld\n", bank, current_time);
//}
m_profiler_ptr->profileMemRefresh();
m_need_refresh--;
m_refresh_bank++;
if (m_refresh_bank >= m_total_banks) m_refresh_bank = 0;
m_bankBusyCounter[bank] = m_bank_busy_time;
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time;
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
markTfaw(rank);
return true;
}
// Mark the activate in the tFaw shift register
void MemoryControl::markTfaw (int rank) {
if (m_tFaw) {
m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
m_tfaw_count[rank]++;
}
}
// Issue a memory request: Activate the bank,
// reserve the address and data buses, and queue
// the request for return to the requesting
// processor after a fixed latency.
void MemoryControl::issueRequest (int bank) {
int rank = getRank(bank);
MemoryNode req = m_bankQueues[bank].front();
m_bankQueues[bank].pop_front();
if (m_debug) {
uint64 current_time = g_eventQueue_ptr->getTime();
printf(" Mem issue request%7d: 0x%08llx %c at %10lld bank =%3x\n",
req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', current_time, bank);
}
if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
enqueueToDirectory(req, m_mem_ctl_latency + m_mem_fixed_delay);
}
m_oldRequest[bank] = 0;
markTfaw(rank);
m_bankBusyCounter[bank] = m_bank_busy_time;
m_busBusy_WhichRank = rank;
if (req.m_is_mem_read) {
m_profiler_ptr->profileMemRead();
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time + m_rank_rank_delay;
} else {
m_profiler_ptr->profileMemWrite();
m_profiler_ptr->profileMemRefresh();
m_need_refresh--;
m_refresh_bank++;
if (m_refresh_bank >= m_total_banks)
m_refresh_bank = 0;
m_bankBusyCounter[bank] = m_bank_busy_time;
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time;
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
}
markTfaw(rank);
return true;
}
// Mark the activate in the tFaw shift register
void
MemoryControl::markTfaw(int rank)
{
if (m_tFaw) {
m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
m_tfaw_count[rank]++;
}
}
// Issue a memory request: Activate the bank, reserve the address and
// data buses, and queue the request for return to the requesting
// processor after a fixed latency.
void
MemoryControl::issueRequest(int bank)
{
int rank = getRank(bank);
MemoryNode req = m_bankQueues[bank].front();
m_bankQueues[bank].pop_front();
if (m_debug) {
uint64 current_time = g_eventQueue_ptr->getTime();
cprintf(" Mem issue request%7d: %#08x %c at %10d "
"bank=%3x\n",
req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W',
current_time, bank);
}
if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
enqueueToDirectory(req, m_mem_ctl_latency + m_mem_fixed_delay);
}
m_oldRequest[bank] = 0;
markTfaw(rank);
m_bankBusyCounter[bank] = m_bank_busy_time;
m_busBusy_WhichRank = rank;
if (req.m_is_mem_read) {
m_profiler_ptr->profileMemRead();
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
m_busBusyCounter_ReadNewRank =
m_basic_bus_busy_time + m_rank_rank_delay;
} else {
m_profiler_ptr->profileMemWrite();
m_busBusyCounter_Basic = m_basic_bus_busy_time;
m_busBusyCounter_Write = m_basic_bus_busy_time;
m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
}
}
// executeCycle: This function is called once per memory clock cycle
// to simulate all the periodic hardware.
void MemoryControl::executeCycle () {
// Keep track of time by counting down the busy counters:
for (int bank=0; bank < m_total_banks; bank++) {
if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
}
if (m_busBusyCounter_Write > 0) m_busBusyCounter_Write--;
if (m_busBusyCounter_ReadNewRank > 0) m_busBusyCounter_ReadNewRank--;
if (m_busBusyCounter_Basic > 0) m_busBusyCounter_Basic--;
// Count down the tFAW shift registers:
for (int rank=0; rank < m_total_ranks; rank++) {
if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
m_tfaw_shift[rank] >>= 1;
}
// After time period expires, latch an indication that we need a refresh.
// Disable refresh if in mem_fixed_delay mode.
if (!m_mem_fixed_delay) m_refresh_count--;
if (m_refresh_count == 0) {
m_refresh_count = m_refresh_period_system;
assert (m_need_refresh < 10); // Are we overrunning our ability to refresh?
m_need_refresh++;
}
// If this batch of requests is all done, make a new batch:
m_ageCounter++;
int anyOld = 0;
for (int bank=0; bank < m_total_banks; bank++) {
anyOld |= m_oldRequest[bank];
}
if (!anyOld) {
void
MemoryControl::executeCycle()
{
// Keep track of time by counting down the busy counters:
for (int bank=0; bank < m_total_banks; bank++) {
if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
}
m_ageCounter = 0;
}
if (m_busBusyCounter_Write > 0)
m_busBusyCounter_Write--;
if (m_busBusyCounter_ReadNewRank > 0)
m_busBusyCounter_ReadNewRank--;
if (m_busBusyCounter_Basic > 0)
m_busBusyCounter_Basic--;
// If randomness desired, re-randomize round-robin position each cycle
if (m_mem_random_arbitrate) {
m_roundRobin = random() % m_total_banks;
}
// For each channel, scan round-robin, and pick an old, ready
// request and issue it. Treat a refresh request as if it
// were at the head of its bank queue. After we issue something,
// keep scanning the queues just to gather statistics about
// how many are waiting. If in mem_fixed_delay mode, we can issue
// more than one request per cycle.
int queueHeads = 0;
int banksIssued = 0;
for (int i = 0; i < m_total_banks; i++) {
m_roundRobin++;
if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
issueRefresh(m_roundRobin);
int qs = m_bankQueues[m_roundRobin].size();
if (qs > 1) {
m_profiler_ptr->profileMemBankQ(qs-1);
// Count down the tFAW shift registers:
for (int rank=0; rank < m_total_ranks; rank++) {
if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
m_tfaw_shift[rank] >>= 1;
}
if (qs > 0) {
m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is queued
queueHeads++;
if (queueReady(m_roundRobin)) {
issueRequest(m_roundRobin);
banksIssued++;
if (m_mem_fixed_delay) {
m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay);
// After time period expires, latch an indication that we need a refresh.
// Disable refresh if in mem_fixed_delay mode.
if (!m_mem_fixed_delay) m_refresh_count--;
if (m_refresh_count == 0) {
m_refresh_count = m_refresh_period_system;
// Are we overrunning our ability to refresh?
assert(m_need_refresh < 10);
m_need_refresh++;
}
// If this batch of requests is all done, make a new batch:
m_ageCounter++;
int anyOld = 0;
for (int bank=0; bank < m_total_banks; bank++) {
anyOld |= m_oldRequest[bank];
}
if (!anyOld) {
for (int bank=0; bank < m_total_banks; bank++) {
if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
}
}
m_ageCounter = 0;
}
}
// memWaitCycles is a redundant catch-all for the specific counters in queueReady
m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued);
// Check input queue and move anything to bank queues if not full.
// Since this is done here at the end of the cycle, there will always
// be at least one cycle of latency in the bank queue.
// We deliberately move at most one request per cycle (to simulate
// typical hardware). Note that if one bank queue fills up, other
// requests can get stuck behind it here.
if (!m_input_queue.empty()) {
m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is pending
MemoryNode req = m_input_queue.front();
int bank = getBank(req.m_addr);
if (m_bankQueues[bank].size() < m_bank_queue_size) {
m_input_queue.pop_front();
m_bankQueues[bank].push_back(req);
// If randomness desired, re-randomize round-robin position each cycle
if (m_mem_random_arbitrate) {
m_roundRobin = random() % m_total_banks;
}
// For each channel, scan round-robin, and pick an old, ready
// request and issue it. Treat a refresh request as if it were at
// the head of its bank queue. After we issue something, keep
// scanning the queues just to gather statistics about how many
// are waiting. If in mem_fixed_delay mode, we can issue more
// than one request per cycle.
int queueHeads = 0;
int banksIssued = 0;
for (int i = 0; i < m_total_banks; i++) {
m_roundRobin++;
if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
issueRefresh(m_roundRobin);
int qs = m_bankQueues[m_roundRobin].size();
if (qs > 1) {
m_profiler_ptr->profileMemBankQ(qs-1);
}
if (qs > 0) {
// we're not idle if anything is queued
m_idleCount = IDLECOUNT_MAX_VALUE;
queueHeads++;
if (queueReady(m_roundRobin)) {
issueRequest(m_roundRobin);
banksIssued++;
if (m_mem_fixed_delay) {
m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay);
}
}
}
}
// memWaitCycles is a redundant catch-all for the specific
// counters in queueReady
m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued);
// Check input queue and move anything to bank queues if not full.
// Since this is done here at the end of the cycle, there will
// always be at least one cycle of latency in the bank queue. We
// deliberately move at most one request per cycle (to simulate
// typical hardware). Note that if one bank queue fills up, other
// requests can get stuck behind it here.
if (!m_input_queue.empty()) {
// we're not idle if anything is pending
m_idleCount = IDLECOUNT_MAX_VALUE;
MemoryNode req = m_input_queue.front();
int bank = getBank(req.m_addr);
if (m_bankQueues[bank].size() < m_bank_queue_size) {
m_input_queue.pop_front();
m_bankQueues[bank].push_back(req);
}
m_profiler_ptr->profileMemInputQ(m_input_queue.size());
}
m_profiler_ptr->profileMemInputQ(m_input_queue.size());
}
}
// wakeup: This function is called once per memory controller clock cycle.
void
MemoryControl::wakeup()
{
// execute everything
executeCycle();
void MemoryControl::wakeup () {
// execute everything
executeCycle();
m_idleCount--;
if (m_idleCount <= 0) {
m_awakened = 0;
} else {
// Reschedule ourselves so that we run every memory cycle:
g_eventQueue_ptr->scheduleEvent(this, m_mem_bus_cycle_multiplier);
}
m_idleCount--;
if (m_idleCount <= 0) {
m_awakened = 0;
} else {
// Reschedule ourselves so that we run every memory cycle:
g_eventQueue_ptr->scheduleEvent(this, m_mem_bus_cycle_multiplier);
}
}
MemoryControl *

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,35 +26,25 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* MemoryControl.hh
*
* Description: See MemoryControl.cc
*
* $Id: $
*
*/
#ifndef MEMORY_CONTROL_H
#define MEMORY_CONTROL_H
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/MemCntrlProfiler.hh"
#include "mem/ruby/system/System.hh"
#include "mem/ruby/slicc_interface/Message.hh"
#include "mem/gems_common/util.hh"
#include "mem/ruby/system/MemoryNode.hh"
// Note that "MemoryMsg" is in the "generated" directory:
#include "mem/protocol/MemoryMsg.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/system/AbstractMemOrCache.hh"
#include "sim/sim_object.hh"
#include "params/RubyMemoryControl.hh"
#ifndef __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
#define __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
#include <list>
#include "mem/gems_common/Map.hh"
#include "mem/gems_common/util.hh"
#include "mem/protocol/MemoryMsg.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/MemCntrlProfiler.hh"
#include "mem/ruby/slicc_interface/Message.hh"
#include "mem/ruby/system/AbstractMemOrCache.hh"
#include "mem/ruby/system/MemoryNode.hh"
#include "mem/ruby/system/System.hh"
#include "params/RubyMemoryControl.hh"
#include "sim/sim_object.hh"
// This constant is part of the definition of tFAW; see
// the comments in header to MemoryControl.cc
#define ACTIVATE_PER_TFAW 4
@@ -64,124 +53,117 @@
class Consumer;
class MemoryControl : public SimObject, public Consumer, public AbstractMemOrCache {
public:
// Constructors
class MemoryControl :
public SimObject, public Consumer, public AbstractMemOrCache
{
public:
typedef RubyMemoryControlParams Params;
MemoryControl(const Params *p);
void init();
void init();
// Destructor
~MemoryControl ();
~MemoryControl();
// Public Methods
void wakeup();
void wakeup() ;
void setConsumer(Consumer* consumer_ptr);
Consumer* getConsumer() { return m_consumer_ptr; };
void setDescription(const string& name) { m_description = name; };
string getDescription() { return m_description; };
void setConsumer (Consumer* consumer_ptr);
Consumer* getConsumer () { return m_consumer_ptr; };
void setDescription (const string& name) { m_description = name; };
string getDescription () { return m_description; };
// Called from the directory:
void enqueue(const MsgPtr& message, int latency );
void enqueueMemRef(MemoryNode& memRef);
void dequeue();
const Message* peek();
MemoryNode peekNode();
bool isReady();
bool areNSlotsAvailable(int n) { return true; }; // infinite queue length
// Called from the directory:
void enqueue (const MsgPtr& message, int latency );
void enqueueMemRef (MemoryNode& memRef);
void dequeue ();
const Message* peek ();
MemoryNode peekNode ();
bool isReady();
bool areNSlotsAvailable (int n) { return true; }; // infinite queue length
//// Called from L3 cache:
//void writeBack(physical_address_t addr);
//// Called from L3 cache:
//void writeBack(physical_address_t addr);
void printConfig(ostream& out);
void print(ostream& out) const;
void setDebug(int debugFlag);
void clearStats() const;
void printStats(ostream& out) const;
void printConfig (ostream& out);
void print (ostream& out) const;
void setDebug (int debugFlag);
void clearStats() const;
void printStats(ostream& out) const;
//added by SS
int getBanksPerRank() { return m_banks_per_rank; };
int getRanksPerDimm() { return m_ranks_per_dimm; };
int getDimmsPerChannel() { return m_dimms_per_channel; }
private:
void enqueueToDirectory(MemoryNode req, int latency);
int getBank(physical_address_t addr);
int getRank(int bank);
bool queueReady(int bank);
void issueRequest(int bank);
bool issueRefresh(int bank);
void markTfaw(int rank);
void executeCycle();
//added by SS
int getBanksPerRank() { return m_banks_per_rank; };
int getRanksPerDimm() { return m_ranks_per_dimm; };
int getDimmsPerChannel() { return m_dimms_per_channel; }
// Private copy constructor and assignment operator
MemoryControl (const MemoryControl& obj);
MemoryControl& operator=(const MemoryControl& obj);
private:
// data members
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
string m_description;
int m_msg_counter;
int m_awakened;
void enqueueToDirectory (MemoryNode req, int latency);
int getBank (physical_address_t addr);
int getRank (int bank);
bool queueReady (int bank);
void issueRequest (int bank);
bool issueRefresh (int bank);
void markTfaw (int rank);
void executeCycle ();
int m_mem_bus_cycle_multiplier;
int m_banks_per_rank;
int m_ranks_per_dimm;
int m_dimms_per_channel;
int m_bank_bit_0;
int m_rank_bit_0;
int m_dimm_bit_0;
unsigned int m_bank_queue_size;
int m_bank_busy_time;
int m_rank_rank_delay;
int m_read_write_delay;
int m_basic_bus_busy_time;
int m_mem_ctl_latency;
int m_refresh_period;
int m_mem_random_arbitrate;
int m_tFaw;
int m_mem_fixed_delay;
// Private copy constructor and assignment operator
MemoryControl (const MemoryControl& obj);
MemoryControl& operator=(const MemoryControl& obj);
int m_total_banks;
int m_total_ranks;
int m_refresh_period_system;
// data members
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
string m_description;
int m_msg_counter;
int m_awakened;
// queues where memory requests live
list<MemoryNode> m_response_queue;
list<MemoryNode> m_input_queue;
list<MemoryNode>* m_bankQueues;
int m_mem_bus_cycle_multiplier;
int m_banks_per_rank;
int m_ranks_per_dimm;
int m_dimms_per_channel;
int m_bank_bit_0;
int m_rank_bit_0;
int m_dimm_bit_0;
unsigned int m_bank_queue_size;
int m_bank_busy_time;
int m_rank_rank_delay;
int m_read_write_delay;
int m_basic_bus_busy_time;
int m_mem_ctl_latency;
int m_refresh_period;
int m_mem_random_arbitrate;
int m_tFaw;
int m_mem_fixed_delay;
// Each entry indicates number of address-bus cycles until bank
// is reschedulable:
int* m_bankBusyCounter;
int* m_oldRequest;
int m_total_banks;
int m_total_ranks;
int m_refresh_period_system;
uint64* m_tfaw_shift;
int* m_tfaw_count;
// queues where memory requests live
// Each of these indicates number of address-bus cycles until
// we can issue a new request of the corresponding type:
int m_busBusyCounter_Write;
int m_busBusyCounter_ReadNewRank;
int m_busBusyCounter_Basic;
list<MemoryNode> m_response_queue;
list<MemoryNode> m_input_queue;
list<MemoryNode>* m_bankQueues;
int m_busBusy_WhichRank; // which rank last granted
int m_roundRobin; // which bank queue was last granted
int m_refresh_count; // cycles until next refresh
int m_need_refresh; // set whenever m_refresh_count goes to zero
int m_refresh_bank; // which bank to refresh next
int m_ageCounter; // age of old requests; to detect starvation
int m_idleCount; // watchdog timer for shutting down
int m_debug; // turn on printf's
// Each entry indicates number of address-bus cycles until bank
// is reschedulable:
int* m_bankBusyCounter;
int* m_oldRequest;
uint64* m_tfaw_shift;
int* m_tfaw_count;
// Each of these indicates number of address-bus cycles until
// we can issue a new request of the corresponding type:
int m_busBusyCounter_Write;
int m_busBusyCounter_ReadNewRank;
int m_busBusyCounter_Basic;
int m_busBusy_WhichRank; // which rank last granted
int m_roundRobin; // which bank queue was last granted
int m_refresh_count; // cycles until next refresh
int m_need_refresh; // set whenever m_refresh_count goes to zero
int m_refresh_bank; // which bank to refresh next
int m_ageCounter; // age of old requests; to detect starvation
int m_idleCount; // watchdog timer for shutting down
int m_debug; // turn on printf's
MemCntrlProfiler* m_profiler_ptr;
MemCntrlProfiler* m_profiler_ptr;
};
#endif // MEMORY_CONTROL_H
#endif // __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__

View File

@@ -30,11 +30,12 @@
using namespace std;
void MemoryNode::print(ostream& out) const
void
MemoryNode::print(ostream& out) const
{
out << "[";
out << m_time << ", ";
out << m_msg_counter << ", ";
out << m_msgptr << "; ";
out << "]";
out << "[";
out << m_time << ", ";
out << m_msg_counter << ", ";
out << m_msgptr << "; ";
out << "]";
}

View File

@@ -35,68 +35,59 @@
* message is enqueued to be sent back to the directory.
*/
#ifndef MEMORYNODE_H
#define MEMORYNODE_H
#ifndef __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
#define __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
#include <iostream>
#include "mem/protocol/MemoryRequestType.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/Message.hh"
#include "mem/protocol/MemoryRequestType.hh"
class MemoryNode {
class MemoryNode
{
public:
// old constructor
MemoryNode(const Time& time, int counter, const MsgPtr& msgptr,
const physical_address_t addr, const bool is_mem_read)
{
m_time = time;
m_msg_counter = counter;
m_msgptr = msgptr;
m_addr = addr;
m_is_mem_read = is_mem_read;
m_is_dirty_wb = !is_mem_read;
}
public:
// Constructors
// new constructor
MemoryNode(const Time& time, const MsgPtr& msgptr,
const physical_address_t addr, const bool is_mem_read,
const bool is_dirty_wb)
{
m_time = time;
m_msg_counter = 0;
m_msgptr = msgptr;
m_addr = addr;
m_is_mem_read = is_mem_read;
m_is_dirty_wb = is_dirty_wb;
}
// old one:
MemoryNode(const Time& time, int counter, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read) {
m_time = time;
m_msg_counter = counter;
m_msgptr = msgptr;
m_addr = addr;
m_is_mem_read = is_mem_read;
m_is_dirty_wb = !is_mem_read;
}
void print(std::ostream& out) const;
// new one:
MemoryNode(const Time& time, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read, const bool is_dirty_wb) {
m_time = time;
m_msg_counter = 0;
m_msgptr = msgptr;
m_addr = addr;
m_is_mem_read = is_mem_read;
m_is_dirty_wb = is_dirty_wb;
}
// Destructor
~MemoryNode() {};
// Public Methods
void print(std::ostream& out) const;
// Data Members (m_ prefix) (all public -- this is really more a struct)
Time m_time;
int m_msg_counter;
MsgPtr m_msgptr;
physical_address_t m_addr;
bool m_is_mem_read;
bool m_is_dirty_wb;
Time m_time;
int m_msg_counter;
MsgPtr m_msgptr;
physical_address_t m_addr;
bool m_is_mem_read;
bool m_is_dirty_wb;
};
// Output operator declaration
std::ostream& operator<<(std::ostream& out, const MemoryNode& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
std::ostream& operator<<(std::ostream& out, const MemoryNode& obj)
inline std::ostream&
operator<<(std::ostream& out, const MemoryNode& obj)
{
obj.print(out);
out << std::flush;
return out;
obj.print(out);
out << std::flush;
return out;
}
#endif //MEMORYNODE_H
#endif // __MEM_RUBY_SYSTEM_MEMORYNODE_HH__

View File

@@ -26,8 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MEMORYVECTOR_H
#define MEMORYVECTOR_H
#ifndef __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
#define __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
#include "mem/ruby/common/Address.hh"
@@ -36,117 +36,121 @@ class DirectoryMemory;
/**
* MemoryVector holds memory data (DRAM only)
*/
class MemoryVector {
public:
MemoryVector();
MemoryVector(uint32 size);
~MemoryVector();
friend class DirectoryMemory;
class MemoryVector
{
public:
MemoryVector();
MemoryVector(uint32 size);
~MemoryVector();
friend class DirectoryMemory;
void setSize(uint32 size); // destructive
void setSize(uint32 size); // destructive
void write(const Address & paddr, uint8* data, int len);
uint8* read(const Address & paddr, uint8* data, int len);
void write(const Address & paddr, uint8* data, int len);
uint8* read(const Address & paddr, uint8* data, int len);
private:
uint8* getBlockPtr(const PhysAddress & addr);
private:
uint8* getBlockPtr(const PhysAddress & addr);
uint32 m_size;
uint8** m_pages;
uint32 m_num_pages;
const uint32 m_page_offset_mask;
uint32 m_size;
uint8** m_pages;
uint32 m_num_pages;
const uint32 m_page_offset_mask;
};
inline
MemoryVector::MemoryVector()
: m_page_offset_mask(4095)
: m_page_offset_mask(4095)
{
m_size = 0;
m_num_pages = 0;
m_pages = NULL;
m_size = 0;
m_num_pages = 0;
m_pages = NULL;
}
inline
MemoryVector::MemoryVector(uint32 size)
: m_page_offset_mask(4095)
: m_page_offset_mask(4095)
{
setSize(size);
setSize(size);
}
inline
MemoryVector::~MemoryVector()
{
for (int i=0; i<m_num_pages; i++) {
if (m_pages[i] != 0) {
delete [] m_pages[i];
}
}
delete [] m_pages;
}
inline
void MemoryVector::setSize(uint32 size)
{
if (m_pages != NULL){
for (int i=0; i<m_num_pages; i++) {
if (m_pages[i] != 0) {
delete [] m_pages[i];
}
for (int i = 0; i < m_num_pages; i++) {
if (m_pages[i] != 0) {
delete [] m_pages[i];
}
}
delete [] m_pages;
}
m_size = size;
assert(size%4096 == 0);
m_num_pages = size >> 12;
m_pages = new uint8*[m_num_pages];
memset(m_pages, 0, m_num_pages * sizeof(uint8*));
}
inline
void MemoryVector::write(const Address & paddr, uint8* data, int len)
inline void
MemoryVector::setSize(uint32 size)
{
assert(paddr.getAddress() + len <= m_size);
uint32 page_num = paddr.getAddress() >> 12;
if (m_pages[page_num] == 0) {
bool all_zeros = true;
for (int i=0;i<len;i++) {
if (data[i] != 0) {
all_zeros = false;
break;
}
if (m_pages != NULL){
for (int i = 0; i < m_num_pages; i++) {
if (m_pages[i] != 0) {
delete [] m_pages[i];
}
}
delete [] m_pages;
}
if (all_zeros) return;
m_pages[page_num] = new uint8[4096];
memset(m_pages[page_num], 0, 4096);
uint32 offset = paddr.getAddress() & m_page_offset_mask;
memcpy(&m_pages[page_num][offset], data, len);
} else {
memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask], data, len);
}
m_size = size;
assert(size%4096 == 0);
m_num_pages = size >> 12;
m_pages = new uint8*[m_num_pages];
memset(m_pages, 0, m_num_pages * sizeof(uint8*));
}
inline
uint8* MemoryVector::read(const Address & paddr, uint8* data, int len)
inline void
MemoryVector::write(const Address & paddr, uint8* data, int len)
{
assert(paddr.getAddress() + len <= m_size);
uint32 page_num = paddr.getAddress() >> 12;
if (m_pages[page_num] == 0) {
memset(data, 0, len);
} else {
memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask], len);
}
return data;
assert(paddr.getAddress() + len <= m_size);
uint32 page_num = paddr.getAddress() >> 12;
if (m_pages[page_num] == 0) {
bool all_zeros = true;
for (int i = 0; i < len;i++) {
if (data[i] != 0) {
all_zeros = false;
break;
}
}
if (all_zeros)
return;
m_pages[page_num] = new uint8[4096];
memset(m_pages[page_num], 0, 4096);
uint32 offset = paddr.getAddress() & m_page_offset_mask;
memcpy(&m_pages[page_num][offset], data, len);
} else {
memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
data, len);
}
}
inline
uint8* MemoryVector::getBlockPtr(const PhysAddress & paddr)
inline uint8*
MemoryVector::read(const Address & paddr, uint8* data, int len)
{
uint32 page_num = paddr.getAddress() >> 12;
if (m_pages[page_num] == 0) {
m_pages[page_num] = new uint8[4096];
memset(m_pages[page_num], 0, 4096);
}
return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask];
assert(paddr.getAddress() + len <= m_size);
uint32 page_num = paddr.getAddress() >> 12;
if (m_pages[page_num] == 0) {
memset(data, 0, len);
} else {
memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
len);
}
return data;
}
#endif // MEMORYVECTOR_H
inline uint8*
MemoryVector::getBlockPtr(const PhysAddress & paddr)
{
uint32 page_num = paddr.getAddress() >> 12;
if (m_pages[page_num] == 0) {
m_pages[page_num] = new uint8[4096];
memset(m_pages[page_num], 0, 4096);
}
return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask];
}
#endif // __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,26 +26,20 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NodeID.hh
*
* Description:
*
* $Id: NodeID.hh,v 3.3 2003/12/04 15:01:39 xu Exp $
*
*/
#ifndef NODEID_H
#define NODEID_H
#ifndef __MEM_RUBY_SYSTEM_NODEID_HH__
#define __MEM_RUBY_SYSTEM_NODEID_HH__
#include <string>
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/util.hh"
#include "mem/ruby/common/Global.hh"
typedef int NodeID;
extern inline
std::string NodeIDToString (NodeID node) { return int_to_string(node); }
inline std::string
NodeIDToString(NodeID node)
{
return int_to_string(node);
}
#endif //NODEID_H
#endif // __MEM_RUBY_SYSTEM_NODEID_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,215 +26,188 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* PerfectCacheMemory.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
#define __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
#ifndef PERFECTCACHEMEMORY_H
#define PERFECTCACHEMEMORY_H
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Map.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
template<class ENTRY>
class PerfectCacheLineState {
public:
PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
AccessPermission m_permission;
ENTRY m_entry;
struct PerfectCacheLineState
{
PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
AccessPermission m_permission;
ENTRY m_entry;
};
template<class ENTRY>
extern inline
ostream& operator<<(ostream& out, const PerfectCacheLineState<ENTRY>& obj)
inline ostream&
operator<<(ostream& out, const PerfectCacheLineState<ENTRY>& obj)
{
return out;
return out;
}
template<class ENTRY>
class PerfectCacheMemory {
public:
class PerfectCacheMemory
{
public:
PerfectCacheMemory();
// Constructors
PerfectCacheMemory();
static void printConfig(ostream& out);
// Destructor
//~PerfectCacheMemory();
// perform a cache access and see if we hit or not. Return true
// on a hit.
bool tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry);
// Public Methods
// tests to see if an address is present in the cache
bool isTagPresent(const Address& address) const;
static void printConfig(ostream& out);
// Returns true if there is:
// a) a tag match on this address or there is
// b) an Invalid line in the same cache "way"
bool cacheAvail(const Address& address) const;
// perform a cache access and see if we hit or not. Return true on
// a hit.
bool tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry);
// find an Invalid entry and sets the tag appropriate for the address
void allocate(const Address& address);
// tests to see if an address is present in the cache
bool isTagPresent(const Address& address) const;
void deallocate(const Address& address);
// Returns true if there is:
// a) a tag match on this address or there is
// b) an Invalid line in the same cache "way"
bool cacheAvail(const Address& address) const;
// Returns with the physical address of the conflicting cache line
Address cacheProbe(const Address& newAddress) const;
// find an Invalid entry and sets the tag appropriate for the address
void allocate(const Address& address);
// looks an address up in the cache
ENTRY& lookup(const Address& address);
const ENTRY& lookup(const Address& address) const;
void deallocate(const Address& address);
// Get/Set permission of cache block
AccessPermission getPermission(const Address& address) const;
void changePermission(const Address& address, AccessPermission new_perm);
// Returns with the physical address of the conflicting cache line
Address cacheProbe(const Address& newAddress) const;
// Print cache contents
void print(ostream& out) const;
// looks an address up in the cache
ENTRY& lookup(const Address& address);
const ENTRY& lookup(const Address& address) const;
private:
// Private copy constructor and assignment operator
PerfectCacheMemory(const PerfectCacheMemory& obj);
PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
// Get/Set permission of cache block
AccessPermission getPermission(const Address& address) const;
void changePermission(const Address& address, AccessPermission new_perm);
// Print cache contents
void print(ostream& out) const;
private:
// Private Methods
// Private copy constructor and assignment operator
PerfectCacheMemory(const PerfectCacheMemory& obj);
PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
// Data Members (m_prefix)
Map<Address, PerfectCacheLineState<ENTRY> > m_map;
// Data Members (m_prefix)
Map<Address, PerfectCacheLineState<ENTRY> > m_map;
};
// Output operator declaration
//ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj);
// ******************* Definitions *******************
// Output operator definition
template<class ENTRY>
extern inline
ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj)
inline ostream&
operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
// ****************************************************************
template<class ENTRY>
extern inline
inline
PerfectCacheMemory<ENTRY>::PerfectCacheMemory()
{
}
// STATIC METHODS
template<class ENTRY>
extern inline
void PerfectCacheMemory<ENTRY>::printConfig(ostream& out)
inline void
PerfectCacheMemory<ENTRY>::printConfig(ostream& out)
{
}
// PUBLIC METHODS
template<class ENTRY>
extern inline
bool PerfectCacheMemory<ENTRY>::tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry)
inline bool
PerfectCacheMemory<ENTRY>::tryCacheAccess(const CacheMsg& msg,
bool& block_stc, ENTRY*& entry)
{
ERROR_MSG("not implemented");
ERROR_MSG("not implemented");
}
// tests to see if an address is present in the cache
template<class ENTRY>
extern inline
bool PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
inline bool
PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
{
return m_map.exist(line_address(address));
return m_map.exist(line_address(address));
}
template<class ENTRY>
extern inline
bool PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
inline bool
PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
{
return true;
return true;
}
// find an Invalid or already allocated entry and sets the tag
// appropriate for the address
template<class ENTRY>
extern inline
void PerfectCacheMemory<ENTRY>::allocate(const Address& address)
inline void
PerfectCacheMemory<ENTRY>::allocate(const Address& address)
{
PerfectCacheLineState<ENTRY> line_state;
line_state.m_permission = AccessPermission_Busy;
line_state.m_entry = ENTRY();
m_map.add(line_address(address), line_state);
PerfectCacheLineState<ENTRY> line_state;
line_state.m_permission = AccessPermission_Busy;
line_state.m_entry = ENTRY();
m_map.add(line_address(address), line_state);
}
// deallocate entry
template<class ENTRY>
extern inline
void PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
inline void
PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
{
m_map.erase(line_address(address));
m_map.erase(line_address(address));
}
// Returns with the physical address of the conflicting cache line
template<class ENTRY>
extern inline
Address PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
inline Address
PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
{
ERROR_MSG("cacheProbe called in perfect cache");
ERROR_MSG("cacheProbe called in perfect cache");
}
// looks an address up in the cache
template<class ENTRY>
extern inline
ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address)
inline ENTRY&
PerfectCacheMemory<ENTRY>::lookup(const Address& address)
{
return m_map.lookup(line_address(address)).m_entry;
return m_map.lookup(line_address(address)).m_entry;
}
// looks an address up in the cache
template<class ENTRY>
extern inline
const ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
inline const ENTRY&
PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
{
return m_map.lookup(line_address(address)).m_entry;
return m_map.lookup(line_address(address)).m_entry;
}
template<class ENTRY>
extern inline
AccessPermission PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
inline AccessPermission
PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
{
return m_map.lookup(line_address(address)).m_permission;
return m_map.lookup(line_address(address)).m_permission;
}
template<class ENTRY>
extern inline
void PerfectCacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
inline void
PerfectCacheMemory<ENTRY>::changePermission(const Address& address,
AccessPermission new_perm)
{
Address line_address = address;
line_address.makeLineAddress();
PerfectCacheLineState<ENTRY>& line_state = m_map.lookup(line_address);
AccessPermission old_perm = line_state.m_permission;
line_state.m_permission = new_perm;
Address line_address = address;
line_address.makeLineAddress();
PerfectCacheLineState<ENTRY>& line_state = m_map.lookup(line_address);
AccessPermission old_perm = line_state.m_permission;
line_state.m_permission = new_perm;
}
template<class ENTRY>
extern inline
void PerfectCacheMemory<ENTRY>::print(ostream& out) const
inline void
PerfectCacheMemory<ENTRY>::print(ostream& out) const
{
}
#endif //PERFECTCACHEMEMORY_H
#endif // __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,178 +26,187 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/system/PersistentTable.hh"
#include "mem/gems_common/util.hh"
#include "mem/ruby/system/PersistentTable.hh"
// randomize so that handoffs are not locality-aware
// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
// int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
#if 0
int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6,
10, 14, 3, 7, 11, 15};
int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15};
#endif
PersistentTable::PersistentTable()
{
m_map_ptr = new Map<Address, PersistentTableEntry>;
m_map_ptr = new Map<Address, PersistentTableEntry>;
}
PersistentTable::~PersistentTable()
{
delete m_map_ptr;
m_map_ptr = NULL;
delete m_map_ptr;
m_map_ptr = NULL;
}
void PersistentTable::persistentRequestLock(const Address& address,
MachineID locker,
AccessType type)
void
PersistentTable::persistentRequestLock(const Address& address,
MachineID locker,
AccessType type)
{
#if 0
if (locker == m_chip_ptr->getID())
cout << "Chip " << m_chip_ptr->getID() << ": " << llocker
<< " requesting lock for " << address << endl;
// if (locker == m_chip_ptr->getID() )
// cout << "Chip " << m_chip_ptr->getID() << ": " << llocker
// << " requesting lock for " << address << endl;
MachineID locker = (MachineID) persistent_randomize[llocker];
#endif
// MachineID locker = (MachineID) persistent_randomize[llocker];
assert(address == line_address(address));
if (!m_map_ptr->exist(address)) {
// Allocate if not present
PersistentTableEntry entry;
entry.m_starving.add(locker);
if (type == AccessType_Write) {
entry.m_request_to_write.add(locker);
assert(address == line_address(address));
if (!m_map_ptr->exist(address)) {
// Allocate if not present
PersistentTableEntry entry;
entry.m_starving.add(locker);
if (type == AccessType_Write) {
entry.m_request_to_write.add(locker);
}
m_map_ptr->add(address, entry);
} else {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
//
// Make sure we're not already in the locked set
//
assert(!(entry.m_starving.isElement(locker)));
entry.m_starving.add(locker);
if (type == AccessType_Write) {
entry.m_request_to_write.add(locker);
}
assert(entry.m_marked.isSubset(entry.m_starving));
}
m_map_ptr->add(address, entry);
} else {
}
void
PersistentTable::persistentRequestUnlock(const Address& address,
MachineID unlocker)
{
#if 0
if (unlocker == m_chip_ptr->getID())
cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker
<< " requesting unlock for " << address << endl;
MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
#endif
assert(address == line_address(address));
assert(m_map_ptr->exist(address));
PersistentTableEntry& entry = m_map_ptr->lookup(address);
//
// Make sure we're not already in the locked set
// Make sure we're in the locked set
//
assert(!(entry.m_starving.isElement(locker)));
entry.m_starving.add(locker);
if (type == AccessType_Write) {
entry.m_request_to_write.add(locker);
}
assert(entry.m_starving.isElement(unlocker));
assert(entry.m_marked.isSubset(entry.m_starving));
}
entry.m_starving.remove(unlocker);
entry.m_marked.remove(unlocker);
entry.m_request_to_write.remove(unlocker);
assert(entry.m_marked.isSubset(entry.m_starving));
// Deallocate if empty
if (entry.m_starving.isEmpty()) {
assert(entry.m_marked.isEmpty());
m_map_ptr->erase(address);
}
}
void PersistentTable::persistentRequestUnlock(const Address& address,
MachineID unlocker)
bool
PersistentTable::okToIssueStarving(const Address& address,
MachineID machId) const
{
// if (unlocker == m_chip_ptr->getID() )
// cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker
// << " requesting unlock for " << address << endl;
// MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
assert(address == line_address(address));
assert(m_map_ptr->exist(address));
PersistentTableEntry& entry = m_map_ptr->lookup(address);
//
// Make sure we're in the locked set
//
assert(entry.m_starving.isElement(unlocker));
assert(entry.m_marked.isSubset(entry.m_starving));
entry.m_starving.remove(unlocker);
entry.m_marked.remove(unlocker);
entry.m_request_to_write.remove(unlocker);
assert(entry.m_marked.isSubset(entry.m_starving));
// Deallocate if empty
if (entry.m_starving.isEmpty()) {
assert(entry.m_marked.isEmpty());
m_map_ptr->erase(address);
}
assert(address == line_address(address));
if (!m_map_ptr->exist(address)) {
// No entry present
return true;
} else if (m_map_ptr->lookup(address).m_starving.isElement(machId)) {
// We can't issue another lockdown until are previous unlock
// has occurred
return false;
} else {
return m_map_ptr->lookup(address).m_marked.isEmpty();
}
}
bool PersistentTable::okToIssueStarving(const Address& address,
MachineID machId) const
MachineID
PersistentTable::findSmallest(const Address& address) const
{
assert(address == line_address(address));
if (!m_map_ptr->exist(address)) {
//
// No entry present
//
return true;
} else if (m_map_ptr->lookup(address).m_starving.isElement(machId)) {
//
// We can't issue another lockdown until are previous unlock has occurred
//
return false;
} else {
return (m_map_ptr->lookup(address).m_marked.isEmpty());
}
assert(address == line_address(address));
assert(m_map_ptr->exist(address));
const PersistentTableEntry& entry = m_map_ptr->lookup(address);
return entry.m_starving.smallestElement();
}
MachineID PersistentTable::findSmallest(const Address& address) const
AccessType
PersistentTable::typeOfSmallest(const Address& address) const
{
assert(address == line_address(address));
assert(m_map_ptr->exist(address));
const PersistentTableEntry& entry = m_map_ptr->lookup(address);
return entry.m_starving.smallestElement();
assert(address == line_address(address));
assert(m_map_ptr->exist(address));
const PersistentTableEntry& entry = m_map_ptr->lookup(address);
if (entry.m_request_to_write.
isElement(entry.m_starving.smallestElement())) {
return AccessType_Write;
} else {
return AccessType_Read;
}
}
AccessType PersistentTable::typeOfSmallest(const Address& address) const
void
PersistentTable::markEntries(const Address& address)
{
assert(address == line_address(address));
assert(m_map_ptr->exist(address));
const PersistentTableEntry& entry = m_map_ptr->lookup(address);
if (entry.m_request_to_write.isElement(entry.m_starving.smallestElement())) {
return AccessType_Write;
} else {
return AccessType_Read;
}
assert(address == line_address(address));
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
// None should be marked
assert(entry.m_marked.isEmpty());
// Mark all the nodes currently in the table
entry.m_marked = entry.m_starving;
}
}
void PersistentTable::markEntries(const Address& address)
bool
PersistentTable::isLocked(const Address& address) const
{
assert(address == line_address(address));
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
assert(address == line_address(address));
//
// None should be marked
//
assert(entry.m_marked.isEmpty());
//
// Mark all the nodes currently in the table
//
entry.m_marked = entry.m_starving;
}
// If an entry is present, it must be locked
return m_map_ptr->exist(address);
}
bool PersistentTable::isLocked(const Address& address) const
int
PersistentTable::countStarvingForAddress(const Address& address) const
{
assert(address == line_address(address));
// If an entry is present, it must be locked
return (m_map_ptr->exist(address));
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
return (entry.m_starving.count());
} else {
return 0;
}
}
int PersistentTable::countStarvingForAddress(const Address& address) const
int
PersistentTable::countReadStarvingForAddress(const Address& address) const
{
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
return (entry.m_starving.count());
}
else {
return 0;
}
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
return (entry.m_starving.count() - entry.m_request_to_write.count());
} else {
return 0;
}
}
int PersistentTable::countReadStarvingForAddress(const Address& address) const
{
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
return (entry.m_starving.count() - entry.m_request_to_write.count());
}
else {
return 0;
}
}
void PersistentTable::print(ostream& out) const
void
PersistentTable::print(ostream& out) const
{
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,76 +26,74 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PersistentTable_H
#define PersistentTable_H
#ifndef __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
#define __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/system/MachineID.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/NetDest.hh"
#include "mem/ruby/system/MachineID.hh"
class PersistentTableEntry {
public:
void print(ostream& out) const {}
class PersistentTableEntry
{
public:
void print(ostream& out) const {}
NetDest m_starving;
NetDest m_marked;
NetDest m_request_to_write;
NetDest m_starving;
NetDest m_marked;
NetDest m_request_to_write;
};
class PersistentTable {
public:
// Constructors
PersistentTable();
class PersistentTable
{
public:
// Constructors
PersistentTable();
// Destructor
~PersistentTable();
// Destructor
~PersistentTable();
// Public Methods
void persistentRequestLock(const Address& address, MachineID locker, AccessType type);
void persistentRequestUnlock(const Address& address, MachineID unlocker);
bool okToIssueStarving(const Address& address, MachineID machID) const;
MachineID findSmallest(const Address& address) const;
AccessType typeOfSmallest(const Address& address) const;
void markEntries(const Address& address);
bool isLocked(const Address& addr) const;
int countStarvingForAddress(const Address& addr) const;
int countReadStarvingForAddress(const Address& addr) const;
// Public Methods
void persistentRequestLock(const Address& address, MachineID locker,
AccessType type);
void persistentRequestUnlock(const Address& address, MachineID unlocker);
bool okToIssueStarving(const Address& address, MachineID machID) const;
MachineID findSmallest(const Address& address) const;
AccessType typeOfSmallest(const Address& address) const;
void markEntries(const Address& address);
bool isLocked(const Address& addr) const;
int countStarvingForAddress(const Address& addr) const;
int countReadStarvingForAddress(const Address& addr) const;
static void printConfig(ostream& out) {}
static void printConfig(ostream& out) {}
void print(ostream& out) const;
private:
// Private Methods
void print(ostream& out) const;
// Private copy constructor and assignment operator
PersistentTable(const PersistentTable& obj);
PersistentTable& operator=(const PersistentTable& obj);
private:
// Private copy constructor and assignment operator
PersistentTable(const PersistentTable& obj);
PersistentTable& operator=(const PersistentTable& obj);
// Data Members (m_prefix)
Map<Address, PersistentTableEntry>* m_map_ptr;
// Data Members (m_prefix)
Map<Address, PersistentTableEntry>* m_map_ptr;
};
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const PersistentTable& obj)
inline ostream&
operator<<(ostream& out, const PersistentTable& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const PersistentTableEntry& obj)
inline ostream&
operator<<(ostream& out, const PersistentTableEntry& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //PersistentTable_H
#endif // __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__

View File

@@ -26,8 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PSEUDOLRUPOLICY_H
#define PSEUDOLRUPOLICY_H
#ifndef __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
#define __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
#include "mem/ruby/system/AbstractReplacementPolicy.hh"
@@ -44,94 +44,97 @@
* 2 is one below the associativy, and most fair when it is one above.
*/
class PseudoLRUPolicy : public AbstractReplacementPolicy {
public:
class PseudoLRUPolicy : public AbstractReplacementPolicy
{
public:
PseudoLRUPolicy(Index num_sets, Index assoc);
~PseudoLRUPolicy();
PseudoLRUPolicy(Index num_sets, Index assoc);
~PseudoLRUPolicy();
void touch(Index set, Index way, Time time);
Index getVictim(Index set) const;
void touch(Index set, Index way, Time time);
Index getVictim(Index set) const;
private:
unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
unsigned int m_num_levels; /** number of levels in the tree */
uint64* m_trees; /** bit representation of the trees, one for each set */
private:
unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
unsigned int m_num_levels; /** number of levels in the tree */
uint64* m_trees; /** bit representation of the
* trees, one for each set */
};
inline
PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc)
: AbstractReplacementPolicy(num_sets, assoc)
: AbstractReplacementPolicy(num_sets, assoc)
{
int num_tree_nodes;
int num_tree_nodes;
// associativity cannot exceed capacity of tree representation
assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
// associativity cannot exceed capacity of tree representation
assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
m_trees = NULL;
m_num_levels = 0;
m_trees = NULL;
m_num_levels = 0;
m_effective_assoc = 1;
while(m_effective_assoc < assoc){
m_effective_assoc <<= 1; // effective associativity is ceiling power of 2
}
assoc = m_effective_assoc;
while(true){
assoc /= 2;
if(!assoc) break;
m_num_levels++;
}
assert(m_num_levels < sizeof(unsigned int)*4);
num_tree_nodes = (1 << m_num_levels) - 1;
m_trees = new uint64[m_num_sets];
for(unsigned int i=0; i< m_num_sets; i++){
m_trees[i] = 0;
}
m_effective_assoc = 1;
while (m_effective_assoc < assoc) {
// effective associativity is ceiling power of 2
m_effective_assoc <<= 1;
}
assoc = m_effective_assoc;
while (true) {
assoc /= 2;
if(!assoc) break;
m_num_levels++;
}
assert(m_num_levels < sizeof(unsigned int)*4);
num_tree_nodes = (1 << m_num_levels) - 1;
m_trees = new uint64[m_num_sets];
for (unsigned i = 0; i < m_num_sets; i++) {
m_trees[i] = 0;
}
}
inline
PseudoLRUPolicy::~PseudoLRUPolicy()
{
if(m_trees != NULL)
delete[] m_trees;
if (m_trees != NULL)
delete[] m_trees;
}
inline
void PseudoLRUPolicy::touch(Index set, Index index, Time time){
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
inline void
PseudoLRUPolicy::touch(Index set, Index index, Time time)
{
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
int tree_index = 0;
int node_val;
for(int i=m_num_levels -1; i>=0; i--){
node_val = (index >> i)&1;
if(node_val)
m_trees[set] |= node_val << tree_index;
else
m_trees[set] &= ~(1 << tree_index);
tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
}
m_last_ref_ptr[set][index] = time;
int tree_index = 0;
int node_val;
for (int i = m_num_levels - 1; i >= 0; i--) {
node_val = (index >> i)&1;
if (node_val)
m_trees[set] |= node_val << tree_index;
else
m_trees[set] &= ~(1 << tree_index);
tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
}
m_last_ref_ptr[set][index] = time;
}
inline
Index PseudoLRUPolicy::getVictim(Index set) const {
// assert(m_assoc != 0);
inline Index
PseudoLRUPolicy::getVictim(Index set) const
{
// assert(m_assoc != 0);
Index index = 0;
Index index = 0;
int tree_index = 0;
int node_val;
for (unsigned i = 0; i < m_num_levels; i++){
node_val = (m_trees[set] >> tree_index) & 1;
index += node_val ? 0 : (m_effective_assoc >> (i + 1));
tree_index = node_val ? (tree_index * 2) + 1 : (tree_index * 2) + 2;
}
assert(index >= 0 && index < m_effective_assoc);
int tree_index = 0;
int node_val;
for(unsigned int i=0;i<m_num_levels;i++){
node_val = (m_trees[set]>>tree_index)&1;
index += node_val?0:(m_effective_assoc >> (i+1));
tree_index = node_val? (tree_index*2)+1 : (tree_index*2)+2;
}
assert(index >= 0 && index < m_effective_assoc);
/* return either the found index or the max possible index */
/* NOTE: this is not a fair replacement when assoc is not a power of 2 */
return (index > (m_assoc-1)) ? m_assoc-1:index;
/* return either the found index or the max possible index */
/* NOTE: this is not a fair replacement when assoc is not a power of 2 */
return (index > (m_assoc - 1)) ? m_assoc - 1 : index;
}
#endif // PSEUDOLRUPOLICY_H
#endif // __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
@@ -27,10 +26,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/physical.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "cpu/rubytest/RubyTester.hh"
#include "mem/physical.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/RubyPort.hh"
RubyPort::RubyPort(const Params *p)
: MemObject(p)
@@ -39,7 +38,7 @@ RubyPort::RubyPort(const Params *p)
assert(m_version != -1);
physmem = p->physmem;
m_controller = NULL;
m_mandatory_q_ptr = NULL;
@@ -48,7 +47,8 @@ RubyPort::RubyPort(const Params *p)
physMemPort = NULL;
}
void RubyPort::init()
void
RubyPort::init()
{
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
@@ -59,38 +59,38 @@ RubyPort::getPort(const std::string &if_name, int idx)
{
if (if_name == "port") {
return new M5Port(csprintf("%s-port%d", name(), idx), this);
} else if (if_name == "pio_port") {
//
}
if (if_name == "pio_port") {
// ensure there is only one pio port
//
assert(pio_port == NULL);
pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
this);
pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this);
return pio_port;
} else if (if_name == "physMemPort") {
//
}
if (if_name == "physMemPort") {
// RubyPort should only have one port to physical memory
//
assert (physMemPort == NULL);
physMemPort = new M5Port(csprintf("%s-physMemPort", name()),
this);
physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this);
return physMemPort;
} else if (if_name == "functional") {
//
// Calls for the functional port only want to access functional memory.
// Therefore, directly pass these calls ports to physmem.
//
}
if (if_name == "functional") {
// Calls for the functional port only want to access
// functional memory. Therefore, directly pass these calls
// ports to physmem.
assert(physmem != NULL);
return physmem->getPort(if_name, idx);
}
return NULL;
}
RubyPort::PioPort::PioPort(const std::string &_name,
RubyPort::PioPort::PioPort(const std::string &_name,
RubyPort *_port)
: SimpleTimingPort(_name, _port)
{
@@ -98,7 +98,7 @@ RubyPort::PioPort::PioPort(const std::string &_name,
ruby_port = _port;
}
RubyPort::M5Port::M5Port(const std::string &_name,
RubyPort::M5Port::M5Port(const std::string &_name,
RubyPort *_port)
: SimpleTimingPort(_name, _port)
{
@@ -113,7 +113,6 @@ RubyPort::PioPort::recvAtomic(PacketPtr pkt)
return 0;
}
Tick
RubyPort::M5Port::recvAtomic(PacketPtr pkt)
{
@@ -125,48 +124,39 @@ RubyPort::M5Port::recvAtomic(PacketPtr pkt)
bool
RubyPort::PioPort::recvTiming(PacketPtr pkt)
{
//
// In FS mode, ruby memory will receive pio responses from devices and
// it must forward these responses back to the particular CPU.
//
DPRINTF(MemoryAccess,
"Pio response for address %#x\n",
pkt->getAddr());
// In FS mode, ruby memory will receive pio responses from devices
// and it must forward these responses back to the particular CPU.
DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr());
assert(pkt->isResponse());
//
// First we must retrieve the request port from the sender State
//
RubyPort::SenderState *senderState =
RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->senderState);
M5Port *port = senderState->port;
assert(port != NULL);
// pop the sender state from the packet
pkt->senderState = senderState->saved;
delete senderState;
port->sendTiming(pkt);
return true;
}
bool
RubyPort::M5Port::recvTiming(PacketPtr pkt)
{
DPRINTF(MemoryAccess,
"Timing access caught for address %#x\n",
pkt->getAddr());
DPRINTF(MemoryAccess,
"Timing access caught for address %#x\n", pkt->getAddr());
//dsm: based on SimpleTimingPort::recvTiming(pkt);
//
// The received packets should only be M5 requests, which should never
// get nacked. There used to be code to hanldle nacks here, but
// I'm pretty sure it didn't work correctly with the drain code,
// The received packets should only be M5 requests, which should never
// get nacked. There used to be code to hanldle nacks here, but
// I'm pretty sure it didn't work correctly with the drain code,
// so that would need to be fixed if we ever added it back.
//
assert(pkt->isRequest());
if (pkt->memInhibitAsserted()) {
@@ -177,34 +167,26 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
return true;
}
//
// Save the port in the sender state object to be used later to
// route the response
//
pkt->senderState = new SenderState(this, pkt->senderState);
//
// Check for pio requests and directly send them to the dedicated
// pio port.
//
if (!isPhysMemAddress(pkt->getAddr())) {
assert(ruby_port->pio_port != NULL);
DPRINTF(MemoryAccess,
DPRINTF(MemoryAccess,
"Request for address 0x%#x is assumed to be a pio request\n",
pkt->getAddr());
return ruby_port->pio_port->sendTiming(pkt);
}
//
// For DMA and CPU requests, translate them to ruby requests before
// sending them to our assigned ruby port.
//
RubyRequestType type = RubyRequestType_NULL;
//
// If valid, copy the pc to the ruby request
//
Addr pc = 0;
if (pkt->req->hasPC()) {
pc = pkt->req->getPC();
@@ -224,47 +206,38 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
if (pkt->req->isInstFetch()) {
type = RubyRequestType_IFETCH;
} else {
type = RubyRequestType_LD;
type = RubyRequestType_LD;
}
} else if (pkt->isWrite()) {
type = RubyRequestType_ST;
} else if (pkt->isReadWrite()) {
//
// Fix me. This conditional will never be executed because
// isReadWrite() is just an OR of isRead() and isWrite().
// Furthermore, just because the packet is a read/write request does
// not necessary mean it is a read-modify-write atomic operation.
//
// Fix me. This conditional will never be executed
// because isReadWrite() is just an OR of isRead() and
// isWrite(). Furthermore, just because the packet is a
// read/write request does not necessary mean it is a
// read-modify-write atomic operation.
type = RubyRequestType_RMW_Write;
} else {
panic("Unsupported ruby packet type\n");
}
}
RubyRequest ruby_request(pkt->getAddr(),
pkt->getPtr<uint8_t>(),
pkt->getSize(),
pc,
type,
RubyAccessMode_Supervisor,
pkt);
RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
pkt->getSize(), pc, type,
RubyAccessMode_Supervisor, pkt);
// Submit the ruby request
RequestStatus requestStatus = ruby_port->makeRequest(ruby_request);
//
// If the request successfully issued or the SC request completed because
// exclusive permission was lost, then we should return true.
// Otherwise, we need to delete the senderStatus we just created and return
// false.
//
if ((requestStatus == RequestStatus_Issued) ||
(requestStatus == RequestStatus_LlscFailed)) {
//
// The communicate to M5 whether the SC command succeeded by seting the
// packet's extra data.
//
if (pkt->isLLSC() && pkt->isWrite()) {
if (requestStatus == RequestStatus_LlscFailed) {
DPRINTF(MemoryAccess, "SC failed and request completed\n");
@@ -276,11 +249,10 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
return true;
}
DPRINTF(MemoryAccess,
DPRINTF(MemoryAccess,
"Request for address #x did not issue because %s\n",
pkt->getAddr(),
RequestStatus_to_string(requestStatus));
pkt->getAddr(), RequestStatus_to_string(requestStatus));
SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
pkt->senderState = senderState->saved;
delete senderState;
@@ -290,14 +262,12 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
void
RubyPort::ruby_hit_callback(PacketPtr pkt)
{
//
// Retrieve the request port from the sender State
//
RubyPort::SenderState *senderState =
RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->senderState);
M5Port *port = senderState->port;
assert(port != NULL);
// pop the sender state from the packet
pkt->senderState = senderState->saved;
delete senderState;
@@ -308,11 +278,9 @@ RubyPort::ruby_hit_callback(PacketPtr pkt)
void
RubyPort::M5Port::hitCallback(PacketPtr pkt)
{
bool needsResponse = pkt->needsResponse();
DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
needsResponse);
DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse);
ruby_port->physMemPort->sendAtomic(pkt);
@@ -349,9 +317,9 @@ RubyPort::M5Port::isPhysMemAddress(Addr addr)
AddrRangeList physMemAddrList;
bool snoop = false;
ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
for(AddrRangeIter iter = physMemAddrList.begin();
iter != physMemAddrList.end();
iter++) {
for (AddrRangeIter iter = physMemAddrList.begin();
iter != physMemAddrList.end();
iter++) {
if (addr >= iter->start && addr <= iter->end) {
DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
iter->start, iter->end);

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
@@ -27,18 +26,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RUBYPORT_H
#define RUBYPORT_H
#ifndef __MEM_RUBY_SYSTEM_RUBYPORT_HH__
#define __MEM_RUBY_SYSTEM_RUBYPORT_HH__
#include "mem/ruby/libruby.hh"
#include <cassert>
#include <string>
#include <assert.h>
#include "mem/mem_object.hh"
#include "mem/tport.hh"
#include "mem/physical.hh"
#include "mem/protocol/RequestStatus.hh"
#include "mem/ruby/libruby.hh"
#include "mem/tport.hh"
#include "params/RubyPort.hh"
using namespace std;
@@ -46,17 +44,16 @@ using namespace std;
class MessageBuffer;
class AbstractController;
class RubyPort : public MemObject {
public:
class RubyPort : public MemObject
{
public:
class M5Port : public SimpleTimingPort
{
private:
RubyPort *ruby_port;
public:
M5Port(const std::string &_name,
RubyPort *_port);
M5Port(const std::string &_name, RubyPort *_port);
bool sendTiming(PacketPtr pkt);
void hitCallback(PacketPtr pkt);
@@ -72,12 +69,11 @@ public:
class PioPort : public SimpleTimingPort
{
private:
RubyPort *ruby_port;
public:
PioPort(const std::string &_name,
RubyPort *_port);
PioPort(const std::string &_name, RubyPort *_port);
bool sendTiming(PacketPtr pkt);
protected:
@@ -92,8 +88,7 @@ public:
M5Port* port;
Packet::SenderState *saved;
SenderState(M5Port* _port,
Packet::SenderState *sender_state = NULL)
SenderState(M5Port* _port, Packet::SenderState *sender_state = NULL)
: port(_port), saved(sender_state)
{}
};
@@ -114,17 +109,17 @@ public:
//
void setController(AbstractController* _cntrl) { m_controller = _cntrl; }
protected:
const string m_name;
void ruby_hit_callback(PacketPtr pkt);
void hit(PacketPtr pkt);
protected:
const string m_name;
void ruby_hit_callback(PacketPtr pkt);
void hit(PacketPtr pkt);
int m_version;
AbstractController* m_controller;
MessageBuffer* m_mandatory_q_ptr;
int m_version;
AbstractController* m_controller;
MessageBuffer* m_mandatory_q_ptr;
PioPort* pio_port;
private:
private:
uint16_t m_port_id;
uint64_t m_request_cnt;
@@ -133,4 +128,4 @@ private:
PhysicalMemory* physmem;
};
#endif
#endif // __MEM_RUBY_SYSTEM_RUBYPORT_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,22 +26,21 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/libruby.hh"
#include "cpu/rubytest/RubyTester.hh"
#include "mem/gems_common/Map.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/libruby.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "cpu/rubytest/RubyTester.hh"
#include "params/RubySequencer.hh"
Sequencer *
@@ -50,7 +48,7 @@ RubySequencerParams::create()
{
return new Sequencer(this);
}
Sequencer::Sequencer(const Params *p)
: RubyPort(p), deadlockCheckEvent(this)
{
@@ -58,7 +56,7 @@ Sequencer::Sequencer(const Params *p)
m_store_waiting_on_store_cycles = 0;
m_load_waiting_on_store_cycles = 0;
m_load_waiting_on_load_cycles = 0;
m_outstanding_count = 0;
m_max_outstanding_requests = 0;
@@ -78,478 +76,524 @@ Sequencer::Sequencer(const Params *p)
assert(m_dataCache_ptr != NULL);
}
Sequencer::~Sequencer() {
Sequencer::~Sequencer()
{
}
void Sequencer::wakeup() {
// Check for deadlock of any of the requests
Time current_time = g_eventQueue_ptr->getTime();
void
Sequencer::wakeup()
{
// Check for deadlock of any of the requests
Time current_time = g_eventQueue_ptr->getTime();
// Check across all outstanding requests
int total_outstanding = 0;
// Check across all outstanding requests
int total_outstanding = 0;
Vector<Address> keys = m_readRequestTable.keys();
for (int i=0; i<keys.size(); i++) {
SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
if (current_time - request->issue_time >= m_deadlock_threshold) {
WARN_MSG("Possible Deadlock detected");
WARN_EXPR(request);
WARN_EXPR(m_version);
WARN_EXPR(request->ruby_request.paddr);
WARN_EXPR(keys.size());
WARN_EXPR(current_time);
WARN_EXPR(request->issue_time);
WARN_EXPR(current_time - request->issue_time);
ERROR_MSG("Aborting");
Vector<Address> keys = m_readRequestTable.keys();
for (int i = 0; i < keys.size(); i++) {
SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
if (current_time - request->issue_time >= m_deadlock_threshold) {
WARN_MSG("Possible Deadlock detected");
WARN_EXPR(request);
WARN_EXPR(m_version);
WARN_EXPR(request->ruby_request.paddr);
WARN_EXPR(keys.size());
WARN_EXPR(current_time);
WARN_EXPR(request->issue_time);
WARN_EXPR(current_time - request->issue_time);
ERROR_MSG("Aborting");
}
}
}
keys = m_writeRequestTable.keys();
for (int i=0; i<keys.size(); i++) {
SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
if (current_time - request->issue_time >= m_deadlock_threshold) {
WARN_MSG("Possible Deadlock detected");
WARN_EXPR(request);
WARN_EXPR(m_version);
WARN_EXPR(current_time);
WARN_EXPR(request->issue_time);
WARN_EXPR(current_time - request->issue_time);
WARN_EXPR(keys.size());
ERROR_MSG("Aborting");
keys = m_writeRequestTable.keys();
for (int i = 0; i < keys.size(); i++) {
SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
if (current_time - request->issue_time >= m_deadlock_threshold) {
WARN_MSG("Possible Deadlock detected");
WARN_EXPR(request);
WARN_EXPR(m_version);
WARN_EXPR(current_time);
WARN_EXPR(request->issue_time);
WARN_EXPR(current_time - request->issue_time);
WARN_EXPR(keys.size());
ERROR_MSG("Aborting");
}
}
}
total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
total_outstanding += m_writeRequestTable.size();
total_outstanding += m_readRequestTable.size();
if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
schedule(deadlockCheckEvent,
(m_deadlock_threshold * g_eventQueue_ptr->getClock()) + curTick);
}
assert(m_outstanding_count == total_outstanding);
if (m_outstanding_count > 0) {
// If there are still outstanding requests, keep checking
schedule(deadlockCheckEvent,
m_deadlock_threshold * g_eventQueue_ptr->getClock() +
curTick);
}
}
void Sequencer::printStats(ostream & out) const {
out << "Sequencer: " << m_name << endl;
out << " store_waiting_on_load_cycles: " << m_store_waiting_on_load_cycles << endl;
out << " store_waiting_on_store_cycles: " << m_store_waiting_on_store_cycles << endl;
out << " load_waiting_on_load_cycles: " << m_load_waiting_on_load_cycles << endl;
out << " load_waiting_on_store_cycles: " << m_load_waiting_on_store_cycles << endl;
void
Sequencer::printStats(ostream & out) const
{
out << "Sequencer: " << m_name << endl
<< " store_waiting_on_load_cycles: "
<< m_store_waiting_on_load_cycles << endl
<< " store_waiting_on_store_cycles: "
<< m_store_waiting_on_store_cycles << endl
<< " load_waiting_on_load_cycles: "
<< m_load_waiting_on_load_cycles << endl
<< " load_waiting_on_store_cycles: "
<< m_load_waiting_on_store_cycles << endl;
}
void Sequencer::printProgress(ostream& out) const{
/*
int total_demand = 0;
out << "Sequencer Stats Version " << m_version << endl;
out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
out << "---------------" << endl;
out << "outstanding requests" << endl;
void
Sequencer::printProgress(ostream& out) const
{
#if 0
int total_demand = 0;
out << "Sequencer Stats Version " << m_version << endl;
out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
out << "---------------" << endl;
out << "outstanding requests" << endl;
Vector<Address> rkeys = m_readRequestTable.keys();
int read_size = rkeys.size();
out << "proc " << m_version << " Read Requests = " << read_size << endl;
// print the request table
for(int i=0; i < read_size; ++i){
SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
total_demand++;
}
Vector<Address> rkeys = m_readRequestTable.keys();
int read_size = rkeys.size();
out << "proc " << m_version << " Read Requests = " << read_size << endl;
Vector<Address> wkeys = m_writeRequestTable.keys();
int write_size = wkeys.size();
out << "proc " << m_version << " Write Requests = " << write_size << endl;
// print the request table
for(int i=0; i < write_size; ++i){
CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
if( request.getPrefetch() == PrefetchBit_No ){
// print the request table
for (int i = 0; i < read_size; ++i) {
SequencerRequest *request = m_readRequestTable.lookup(rkeys[i]);
out << "\tRequest[ " << i << " ] = " << request->type
<< " Address " << rkeys[i]
<< " Posted " << request->issue_time
<< " PF " << PrefetchBit_No << endl;
total_demand++;
}
}
}
out << endl;
Vector<Address> wkeys = m_writeRequestTable.keys();
int write_size = wkeys.size();
out << "proc " << m_version << " Write Requests = " << write_size << endl;
out << "Total Number Outstanding: " << m_outstanding_count << endl;
out << "Total Number Demand : " << total_demand << endl;
out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
out << endl;
out << endl;
*/
// print the request table
for (int i = 0; i < write_size; ++i){
CacheMsg &request = m_writeRequestTable.lookup(wkeys[i]);
out << "\tRequest[ " << i << " ] = " << request.getType()
<< " Address " << wkeys[i]
<< " Posted " << request.getTime()
<< " PF " << request.getPrefetch() << endl;
if (request.getPrefetch() == PrefetchBit_No) {
total_demand++;
}
}
out << endl;
out << "Total Number Outstanding: " << m_outstanding_count << endl
<< "Total Number Demand : " << total_demand << endl
<< "Total Number Prefetches : " << m_outstanding_count - total_demand
<< endl << endl << endl;
#endif
}
void Sequencer::printConfig(ostream& out) const {
out << "Seqeuncer config: " << m_name << endl;
out << " controller: " << m_controller->getName() << endl;
out << " version: " << m_version << endl;
out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
out << " deadlock_threshold: " << m_deadlock_threshold << endl;
void
Sequencer::printConfig(ostream& out) const
{
out << "Seqeuncer config: " << m_name << endl
<< " controller: " << m_controller->getName() << endl
<< " version: " << m_version << endl
<< " max_outstanding_requests: " << m_max_outstanding_requests << endl
<< " deadlock_threshold: " << m_deadlock_threshold << endl;
}
// Insert the request on the correct request table. Return true if
// the entry was already present.
bool Sequencer::insertRequest(SequencerRequest* request) {
int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
bool
Sequencer::insertRequest(SequencerRequest* request)
{
int total_outstanding =
m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
assert(m_outstanding_count == total_outstanding);
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
}
Address line_addr(request->ruby_request.paddr);
line_addr.makeLineAddress();
if ((request->ruby_request.type == RubyRequestType_ST) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Locked_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_Write)) {
if (m_writeRequestTable.exist(line_addr)) {
m_writeRequestTable.lookup(line_addr) = request;
// return true;
assert(0); // drh5: isn't this an error? do you lose the initial request?
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
}
m_writeRequestTable.allocate(line_addr);
m_writeRequestTable.lookup(line_addr) = request;
m_outstanding_count++;
} else {
if (m_readRequestTable.exist(line_addr)) {
m_readRequestTable.lookup(line_addr) = request;
// return true;
assert(0); // drh5: isn't this an error? do you lose the initial request?
}
m_readRequestTable.allocate(line_addr);
m_readRequestTable.lookup(line_addr) = request;
m_outstanding_count++;
}
g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
Address line_addr(request->ruby_request.paddr);
line_addr.makeLineAddress();
if ((request->ruby_request.type == RubyRequestType_ST) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Locked_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_Write)) {
if (m_writeRequestTable.exist(line_addr)) {
m_writeRequestTable.lookup(line_addr) = request;
// return true;
total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
return false;
}
void Sequencer::removeRequest(SequencerRequest* srequest) {
assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
const RubyRequest & ruby_request = srequest->ruby_request;
Address line_addr(ruby_request.paddr);
line_addr.makeLineAddress();
if ((ruby_request.type == RubyRequestType_ST) ||
(ruby_request.type == RubyRequestType_RMW_Read) ||
(ruby_request.type == RubyRequestType_RMW_Write) ||
(ruby_request.type == RubyRequestType_Locked_Read) ||
(ruby_request.type == RubyRequestType_Locked_Write)) {
m_writeRequestTable.deallocate(line_addr);
} else {
m_readRequestTable.deallocate(line_addr);
}
m_outstanding_count--;
assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
}
void Sequencer::writeCallback(const Address& address, DataBlock& data) {
assert(address == line_address(address));
assert(m_writeRequestTable.exist(line_address(address)));
SequencerRequest* request = m_writeRequestTable.lookup(address);
removeRequest(request);
assert((request->ruby_request.type == RubyRequestType_ST) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Locked_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_Write));
if (request->ruby_request.type == RubyRequestType_Locked_Read) {
m_dataCache_ptr->setLocked(address, m_version);
}
else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
}
else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
m_controller->unblock(address);
}
hitCallback(request, data);
}
void Sequencer::readCallback(const Address& address, DataBlock& data) {
assert(address == line_address(address));
assert(m_readRequestTable.exist(line_address(address)));
SequencerRequest* request = m_readRequestTable.lookup(address);
removeRequest(request);
assert((request->ruby_request.type == RubyRequestType_LD) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_IFETCH));
hitCallback(request, data);
}
void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
const RubyRequest & ruby_request = srequest->ruby_request;
Address request_address(ruby_request.paddr);
Address request_line_address(ruby_request.paddr);
request_line_address.makeLineAddress();
RubyRequestType type = ruby_request.type;
Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
if (type == RubyRequestType_IFETCH) {
if (m_instCache_ptr->isTagPresent(request_line_address) )
m_instCache_ptr->setMRU(request_line_address);
} else {
if (m_dataCache_ptr->isTagPresent(request_line_address) )
m_dataCache_ptr->setMRU(request_line_address);
}
assert(g_eventQueue_ptr->getTime() >= issued_time);
Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
// Profile the miss latency for all non-zero demand misses
if (miss_latency != 0) {
g_system_ptr->getProfiler()->missLatency(miss_latency, type);
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
"", "Done", "", int_to_string(miss_latency)+" cycles");
}
}
/*
if (request.getPrefetch() == PrefetchBit_Yes) {
return; // Ignore the prefetch
}
*/
// update the data
if (ruby_request.data != NULL) {
if ((type == RubyRequestType_LD) ||
(type == RubyRequestType_IFETCH) ||
(type == RubyRequestType_RMW_Read) ||
(type == RubyRequestType_Locked_Read)) {
memcpy(ruby_request.data,
data.getData(request_address.getOffset(), ruby_request.len),
ruby_request.len);
// drh5: isn't this an error? do you lose the initial request?
assert(0);
}
m_writeRequestTable.allocate(line_addr);
m_writeRequestTable.lookup(line_addr) = request;
m_outstanding_count++;
} else {
if (m_readRequestTable.exist(line_addr)) {
m_readRequestTable.lookup(line_addr) = request;
// return true;
data.setData(ruby_request.data,
request_address.getOffset(),
ruby_request.len);
// drh5: isn't this an error? do you lose the initial request?
assert(0);
}
m_readRequestTable.allocate(line_addr);
m_readRequestTable.lookup(line_addr) = request;
m_outstanding_count++;
}
} else {
DPRINTF(MemoryAccess,
"WARNING. Data not transfered from Ruby to M5 for type %s\n",
RubyRequestType_to_string(type));
}
//
// If using the RubyTester, update the RubyTester sender state's subBlock
// with the recieved data. The tester will later access this state.
// Note: RubyPort will access it's sender state before the RubyTester.
//
if (m_usingRubyTester) {
RubyTester::SenderState* testerSenderState;
testerSenderState = safe_cast<RubyTester::SenderState*>( \
safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState)->saved);
testerSenderState->subBlock->mergeFrom(data);
}
g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
ruby_hit_callback(ruby_request.pkt);
delete srequest;
total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
return false;
}
void
Sequencer::removeRequest(SequencerRequest* srequest)
{
assert(m_outstanding_count ==
m_writeRequestTable.size() + m_readRequestTable.size());
const RubyRequest & ruby_request = srequest->ruby_request;
Address line_addr(ruby_request.paddr);
line_addr.makeLineAddress();
if ((ruby_request.type == RubyRequestType_ST) ||
(ruby_request.type == RubyRequestType_RMW_Read) ||
(ruby_request.type == RubyRequestType_RMW_Write) ||
(ruby_request.type == RubyRequestType_Locked_Read) ||
(ruby_request.type == RubyRequestType_Locked_Write)) {
m_writeRequestTable.deallocate(line_addr);
} else {
m_readRequestTable.deallocate(line_addr);
}
m_outstanding_count--;
assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
}
void
Sequencer::writeCallback(const Address& address, DataBlock& data)
{
assert(address == line_address(address));
assert(m_writeRequestTable.exist(line_address(address)));
SequencerRequest* request = m_writeRequestTable.lookup(address);
removeRequest(request);
assert((request->ruby_request.type == RubyRequestType_ST) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Locked_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_Write));
if (request->ruby_request.type == RubyRequestType_Locked_Read) {
m_dataCache_ptr->setLocked(address, m_version);
} else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
} else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
m_controller->unblock(address);
}
hitCallback(request, data);
}
void
Sequencer::readCallback(const Address& address, DataBlock& data)
{
assert(address == line_address(address));
assert(m_readRequestTable.exist(line_address(address)));
SequencerRequest* request = m_readRequestTable.lookup(address);
removeRequest(request);
assert((request->ruby_request.type == RubyRequestType_LD) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_IFETCH));
hitCallback(request, data);
}
void
Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data)
{
const RubyRequest & ruby_request = srequest->ruby_request;
Address request_address(ruby_request.paddr);
Address request_line_address(ruby_request.paddr);
request_line_address.makeLineAddress();
RubyRequestType type = ruby_request.type;
Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
if (type == RubyRequestType_IFETCH) {
if (m_instCache_ptr->isTagPresent(request_line_address))
m_instCache_ptr->setMRU(request_line_address);
} else {
if (m_dataCache_ptr->isTagPresent(request_line_address))
m_dataCache_ptr->setMRU(request_line_address);
}
assert(g_eventQueue_ptr->getTime() >= issued_time);
Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
// Profile the miss latency for all non-zero demand misses
if (miss_latency != 0) {
g_system_ptr->getProfiler()->missLatency(miss_latency, type);
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->
profileTransition("Seq", m_version,
Address(ruby_request.paddr), "", "Done", "",
csprintf("%d cycles", miss_latency));
}
}
#if 0
if (request.getPrefetch() == PrefetchBit_Yes) {
return; // Ignore the prefetch
}
#endif
// update the data
if (ruby_request.data != NULL) {
if ((type == RubyRequestType_LD) ||
(type == RubyRequestType_IFETCH) ||
(type == RubyRequestType_RMW_Read) ||
(type == RubyRequestType_Locked_Read)) {
memcpy(ruby_request.data,
data.getData(request_address.getOffset(), ruby_request.len),
ruby_request.len);
} else {
data.setData(ruby_request.data, request_address.getOffset(),
ruby_request.len);
}
} else {
DPRINTF(MemoryAccess,
"WARNING. Data not transfered from Ruby to M5 for type %s\n",
RubyRequestType_to_string(type));
}
// If using the RubyTester, update the RubyTester sender state's
// subBlock with the recieved data. The tester will later access
// this state.
// Note: RubyPort will access it's sender state before the
// RubyTester.
if (m_usingRubyTester) {
RubyPort::SenderState *requestSenderState =
safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
RubyTester::SenderState* testerSenderState =
safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
testerSenderState->subBlock->mergeFrom(data);
}
ruby_hit_callback(ruby_request.pkt);
delete srequest;
}
// Returns true if the sequencer already has a load or store outstanding
RequestStatus Sequencer::getRequestStatus(const RubyRequest& request) {
bool is_outstanding_store = m_writeRequestTable.exist(line_address(Address(request.paddr)));
bool is_outstanding_load = m_readRequestTable.exist(line_address(Address(request.paddr)));
if ( is_outstanding_store ) {
if ((request.type == RubyRequestType_LD) ||
(request.type == RubyRequestType_IFETCH) ||
(request.type == RubyRequestType_RMW_Read)) {
m_store_waiting_on_load_cycles++;
} else {
m_store_waiting_on_store_cycles++;
}
return RequestStatus_Aliased;
} else if ( is_outstanding_load ) {
if ((request.type == RubyRequestType_ST) ||
(request.type == RubyRequestType_RMW_Write) ) {
m_load_waiting_on_store_cycles++;
} else {
m_load_waiting_on_load_cycles++;
}
return RequestStatus_Aliased;
}
if (m_outstanding_count >= m_max_outstanding_requests) {
return RequestStatus_BufferFull;
}
return RequestStatus_Ready;
}
bool Sequencer::empty() const {
return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
}
RequestStatus Sequencer::makeRequest(const RubyRequest & request)
RequestStatus
Sequencer::getRequestStatus(const RubyRequest& request)
{
assert(Address(request.paddr).getOffset() + request.len <=
RubySystem::getBlockSizeBytes());
RequestStatus status = getRequestStatus(request);
if (status == RequestStatus_Ready) {
SequencerRequest *srequest = new SequencerRequest(request,
g_eventQueue_ptr->getTime());
bool found = insertRequest(srequest);
if (!found) {
if (request.type == RubyRequestType_Locked_Write) {
//
// NOTE: it is OK to check the locked flag here as the mandatory queue
// will be checked first ensuring that nothing comes between checking
// the flag and servicing the store.
//
if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)),
m_version)) {
removeRequest(srequest);
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->profileTransition("Seq",
m_version,
Address(request.paddr),
"",
"SC Fail",
"",
RubyRequestType_to_string(request.type));
}
return RequestStatus_LlscFailed;
}
else {
m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
bool is_outstanding_store =
m_writeRequestTable.exist(line_address(Address(request.paddr)));
bool is_outstanding_load =
m_readRequestTable.exist(line_address(Address(request.paddr)));
if (is_outstanding_store) {
if ((request.type == RubyRequestType_LD) ||
(request.type == RubyRequestType_IFETCH) ||
(request.type == RubyRequestType_RMW_Read)) {
m_store_waiting_on_load_cycles++;
} else {
m_store_waiting_on_store_cycles++;
}
}
issueRequest(request);
// TODO: issue hardware prefetches here
return RequestStatus_Issued;
return RequestStatus_Aliased;
} else if (is_outstanding_load) {
if ((request.type == RubyRequestType_ST) ||
(request.type == RubyRequestType_RMW_Write)) {
m_load_waiting_on_store_cycles++;
} else {
m_load_waiting_on_load_cycles++;
}
return RequestStatus_Aliased;
}
else {
panic("Sequencer::makeRequest should never be called if the request"\
"is already outstanding\n");
if (m_outstanding_count >= m_max_outstanding_requests) {
return RequestStatus_BufferFull;
}
return RequestStatus_Ready;
}
bool
Sequencer::empty() const
{
return m_writeRequestTable.size() == 0 && m_readRequestTable.size() == 0;
}
RequestStatus
Sequencer::makeRequest(const RubyRequest &request)
{
assert(Address(request.paddr).getOffset() + request.len <=
RubySystem::getBlockSizeBytes());
RequestStatus status = getRequestStatus(request);
if (status != RequestStatus_Ready)
return status;
SequencerRequest *srequest =
new SequencerRequest(request, g_eventQueue_ptr->getTime());
bool found = insertRequest(srequest);
if (found) {
panic("Sequencer::makeRequest should never be called if the "
"request is already outstanding\n");
return RequestStatus_NULL;
}
} else {
return status;
}
if (request.type == RubyRequestType_Locked_Write) {
// NOTE: it is OK to check the locked flag here as the
// mandatory queue will be checked first ensuring that nothing
// comes between checking the flag and servicing the store.
Address line_addr = line_address(Address(request.paddr));
if (!m_dataCache_ptr->isLocked(line_addr, m_version)) {
removeRequest(srequest);
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->
profileTransition("Seq", m_version,
Address(request.paddr),
"", "SC Fail", "",
RubyRequestType_to_string(request.type));
}
return RequestStatus_LlscFailed;
} else {
m_dataCache_ptr->clearLocked(line_addr);
}
}
issueRequest(request);
// TODO: issue hardware prefetches here
return RequestStatus_Issued;
}
void Sequencer::issueRequest(const RubyRequest& request) {
void
Sequencer::issueRequest(const RubyRequest& request)
{
// TODO: get rid of CacheMsg, CacheRequestType, and
// AccessModeTYpe, & have SLICC use RubyRequest and subtypes
// natively
CacheRequestType ctype;
switch(request.type) {
case RubyRequestType_IFETCH:
ctype = CacheRequestType_IFETCH;
break;
case RubyRequestType_LD:
ctype = CacheRequestType_LD;
break;
case RubyRequestType_ST:
ctype = CacheRequestType_ST;
break;
case RubyRequestType_Locked_Read:
case RubyRequestType_Locked_Write:
ctype = CacheRequestType_ATOMIC;
break;
case RubyRequestType_RMW_Read:
ctype = CacheRequestType_ATOMIC;
break;
case RubyRequestType_RMW_Write:
ctype = CacheRequestType_ATOMIC;
break;
default:
assert(0);
}
// TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
CacheRequestType ctype;
switch(request.type) {
case RubyRequestType_IFETCH:
ctype = CacheRequestType_IFETCH;
break;
case RubyRequestType_LD:
ctype = CacheRequestType_LD;
break;
case RubyRequestType_ST:
ctype = CacheRequestType_ST;
break;
case RubyRequestType_Locked_Read:
case RubyRequestType_Locked_Write:
ctype = CacheRequestType_ATOMIC;
break;
case RubyRequestType_RMW_Read:
ctype = CacheRequestType_ATOMIC;
break;
case RubyRequestType_RMW_Write:
ctype = CacheRequestType_ATOMIC;
break;
default:
assert(0);
}
AccessModeType amtype;
switch(request.access_mode){
case RubyAccessMode_User:
amtype = AccessModeType_UserMode;
break;
case RubyAccessMode_Supervisor:
amtype = AccessModeType_SupervisorMode;
break;
case RubyAccessMode_Device:
amtype = AccessModeType_UserMode;
break;
default:
assert(0);
}
Address line_addr(request.paddr);
line_addr.makeLineAddress();
CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
AccessModeType amtype;
switch(request.access_mode){
case RubyAccessMode_User:
amtype = AccessModeType_UserMode;
break;
case RubyAccessMode_Supervisor:
amtype = AccessModeType_SupervisorMode;
break;
case RubyAccessMode_Device:
amtype = AccessModeType_UserMode;
break;
default:
assert(0);
}
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
"", "Begin", "", RubyRequestType_to_string(request.type));
}
Address line_addr(request.paddr);
line_addr.makeLineAddress();
CacheMsg msg(line_addr, Address(request.paddr), ctype,
Address(request.pc), amtype, request.len, PrefetchBit_No,
request.proc_id);
if (g_system_ptr->getTracer()->traceEnabled()) {
g_system_ptr->getTracer()->traceRequest(this, line_addr, Address(request.pc),
request.type, g_eventQueue_ptr->getTime());
}
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->
profileTransition("Seq", m_version, Address(request.paddr),
"", "Begin", "",
RubyRequestType_to_string(request.type));
}
Time latency = 0; // initialzed to an null value
if (g_system_ptr->getTracer()->traceEnabled()) {
g_system_ptr->getTracer()->
traceRequest(this, line_addr, Address(request.pc),
request.type, g_eventQueue_ptr->getTime());
}
if (request.type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
else
latency = m_dataCache_ptr->getLatency();
Time latency = 0; // initialzed to an null value
// Send the message to the cache controller
assert(latency > 0);
if (request.type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
else
latency = m_dataCache_ptr->getLatency();
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg, latency);
}
/*
bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
AccessModeType access_mode,
int size, DataBlock*& data_ptr) {
if (type == CacheRequestType_IFETCH) {
return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
} else {
return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
}
}
*/
// Send the message to the cache controller
assert(latency > 0);
void Sequencer::print(ostream& out) const {
out << "[Sequencer: " << m_version
<< ", outstanding requests: " << m_outstanding_count;
out << ", read request table: " << m_readRequestTable
<< ", write request table: " << m_writeRequestTable;
out << "]";
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg, latency);
}
// this can be called from setState whenever coherence permissions are upgraded
// when invoked, coherence violations will be checked for the given block
void Sequencer::checkCoherence(const Address& addr) {
#if 0
bool
Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
AccessModeType access_mode,
int size, DataBlock*& data_ptr)
{
CacheMemory *cache =
(type == CacheRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr;
return cache->tryCacheAccess(line_address(addr), type, data_ptr);
}
#endif
void
Sequencer::print(ostream& out) const
{
out << "[Sequencer: " << m_version
<< ", outstanding requests: " << m_outstanding_count
<< ", read request table: " << m_readRequestTable
<< ", write request table: " << m_writeRequestTable
<< "]";
}
// this can be called from setState whenever coherence permissions are
// upgraded when invoked, coherence violations will be checked for the
// given block
void
Sequencer::checkCoherence(const Address& addr)
{
#ifdef CHECK_COHERENCE
g_system_ptr->checkGlobalCoherenceInvariant(addr);
g_system_ptr->checkGlobalCoherenceInvariant(addr);
#endif
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,25 +26,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: Sequencer.hh 1.70 2006/09/27 14:56:41-05:00 bobba@s1-01.cs.wisc.edu $
*
* Description:
*
*/
#ifndef __MEM_RUBY_SYSTEM_SEQUENCER_HH__
#define __MEM_RUBY_SYSTEM_SEQUENCER_HH__
#ifndef SEQUENCER_H
#define SEQUENCER_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/gems_common/Map.hh"
#include "mem/protocol/AccessModeType.hh"
#include "mem/protocol/CacheRequestType.hh"
#include "mem/protocol/GenericMachineType.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/RubyPort.hh"
class DataBlock;
class CacheMsg;
@@ -54,109 +46,100 @@ class CacheMemory;
class RubySequencerParams;
struct SequencerRequest {
RubyRequest ruby_request;
Time issue_time;
struct SequencerRequest
{
RubyRequest ruby_request;
Time issue_time;
SequencerRequest(const RubyRequest & _ruby_request,
Time _issue_time)
: ruby_request(_ruby_request),
issue_time(_issue_time)
{}
SequencerRequest(const RubyRequest & _ruby_request, Time _issue_time)
: ruby_request(_ruby_request), issue_time(_issue_time)
{}
};
std::ostream& operator<<(std::ostream& out, const SequencerRequest& obj);
class Sequencer : public RubyPort, public Consumer {
public:
class Sequencer : public RubyPort, public Consumer
{
public:
typedef RubySequencerParams Params;
// Constructors
Sequencer(const Params *);
Sequencer(const Params *);
~Sequencer();
// Destructor
~Sequencer();
// Public Methods
void wakeup(); // Used only for deadlock detection
// Public Methods
void wakeup(); // Used only for deadlock detection
void printConfig(ostream& out) const;
void printConfig(ostream& out) const;
void printProgress(ostream& out) const;
void printProgress(ostream& out) const;
void writeCallback(const Address& address, DataBlock& data);
void readCallback(const Address& address, DataBlock& data);
void writeCallback(const Address& address, DataBlock& data);
void readCallback(const Address& address, DataBlock& data);
RequestStatus makeRequest(const RubyRequest & request);
RequestStatus getRequestStatus(const RubyRequest& request);
bool empty() const;
RequestStatus makeRequest(const RubyRequest & request);
RequestStatus getRequestStatus(const RubyRequest& request);
bool empty() const;
void print(ostream& out) const;
void printStats(ostream & out) const;
void checkCoherence(const Address& address);
void print(ostream& out) const;
void printStats(ostream & out) const;
void checkCoherence(const Address& address);
void removeRequest(SequencerRequest* request);
// bool getRubyMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes);
// bool setRubyMemoryValue(const Address& addr, char *value, unsigned int size_in_bytes);
private:
bool tryCacheAccess(const Address& addr, CacheRequestType type,
const Address& pc, AccessModeType access_mode,
int size, DataBlock*& data_ptr);
void issueRequest(const RubyRequest& request);
void removeRequest(SequencerRequest* request);
private:
// Private Methods
bool tryCacheAccess(const Address& addr, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, DataBlock*& data_ptr);
void issueRequest(const RubyRequest& request);
void hitCallback(SequencerRequest* request, DataBlock& data);
bool insertRequest(SequencerRequest* request);
void hitCallback(SequencerRequest* request, DataBlock& data);
bool insertRequest(SequencerRequest* request);
// Private copy constructor and assignment operator
Sequencer(const Sequencer& obj);
Sequencer& operator=(const Sequencer& obj);
// Private copy constructor and assignment operator
Sequencer(const Sequencer& obj);
Sequencer& operator=(const Sequencer& obj);
private:
int m_max_outstanding_requests;
int m_deadlock_threshold;
private:
int m_max_outstanding_requests;
int m_deadlock_threshold;
CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr;
CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr;
Map<Address, SequencerRequest*> m_writeRequestTable;
Map<Address, SequencerRequest*> m_readRequestTable;
// Global outstanding request count, across all request tables
int m_outstanding_count;
bool m_deadlock_check_scheduled;
Map<Address, SequencerRequest*> m_writeRequestTable;
Map<Address, SequencerRequest*> m_readRequestTable;
// Global outstanding request count, across all request tables
int m_outstanding_count;
bool m_deadlock_check_scheduled;
int m_store_waiting_on_load_cycles;
int m_store_waiting_on_store_cycles;
int m_load_waiting_on_store_cycles;
int m_load_waiting_on_load_cycles;
int m_store_waiting_on_load_cycles;
int m_store_waiting_on_store_cycles;
int m_load_waiting_on_store_cycles;
int m_load_waiting_on_load_cycles;
bool m_usingRubyTester;
bool m_usingRubyTester;
class SequencerWakeupEvent : public Event
{
Sequencer *m_sequencer_ptr;
class SequencerWakeupEvent : public Event
{
private:
Sequencer *m_sequencer_ptr;
public:
SequencerWakeupEvent(Sequencer *_seq) : m_sequencer_ptr(_seq) {}
void process() { m_sequencer_ptr->wakeup(); }
const char *description() const { return "Sequencer deadlock check"; }
};
public:
SequencerWakeupEvent(Sequencer *_seq) : m_sequencer_ptr(_seq) {}
void process() { m_sequencer_ptr->wakeup(); }
const char *description() const { return "Sequencer deadlock check"; }
};
SequencerWakeupEvent deadlockCheckEvent;
SequencerWakeupEvent deadlockCheckEvent;
};
// Output operator declaration
ostream& operator<<(ostream& out, const Sequencer& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const Sequencer& obj)
inline ostream&
operator<<(ostream& out, const Sequencer& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //SEQUENCER_H
#endif // __MEM_RUBY_SYSTEM_SEQUENCER_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
@@ -29,10 +28,6 @@
#include "mem/ruby/system/SparseMemory.hh"
// ****************************************************************
SparseMemory::SparseMemory(int number_of_bits, int number_of_levels)
{
int even_level_bits;
@@ -81,22 +76,16 @@ SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel)
SparseMapType::iterator iter;
for (iter = curTable->begin(); iter != curTable->end(); iter++) {
SparseMemEntry_t* entryStruct = &((*iter).second);
SparseMemEntry* entryStruct = &((*iter).second);
if (curLevel != (m_number_of_levels - 1)) {
//
// If the not at the last level, analyze those lower level tables first,
// then delete those next tables
//
SparseMapType* nextTable;
nextTable = (SparseMapType*)(entryStruct->entry);
// If the not at the last level, analyze those lower level
// tables first, then delete those next tables
SparseMapType* nextTable = (SparseMapType*)(entryStruct->entry);
recursivelyRemoveTables(nextTable, (curLevel + 1));
delete nextTable;
} else {
//
// If at the last level, delete the directory entry
//
Directory_Entry* dirEntry;
dirEntry = (Directory_Entry*)(entryStruct->entry);
delete dirEntry;
@@ -104,15 +93,10 @@ SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel)
entryStruct->entry = NULL;
}
//
// Once all entries have been deleted, erase the entries
//
curTable->erase(curTable->begin(), curTable->end());
}
// PUBLIC METHODS
// tests to see if an address is present in the memory
bool
SparseMemory::exist(const Address& address) const
@@ -120,22 +104,19 @@ SparseMemory::exist(const Address& address) const
SparseMapType* curTable = m_map_head;
Address curAddress;
//
// Initiallize the high bit to be the total number of bits plus the block
// offset. However the highest bit index is one less than this value.
//
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
int lowBit;
assert(address == line_address(address));
DEBUG_EXPR(CACHE_COMP, HighPrio, address);
for (int level = 0; level < m_number_of_levels; level++) {
//
// Create the appropriate sub address for this level
// Note: that set Address is inclusive of the specified range, thus the
// high bit is one less than the total number of bits used to create the
// address.
//
// Note: that set Address is inclusive of the specified range,
// thus the high bit is one less than the total number of bits
// used to create the address.
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
@@ -144,15 +125,11 @@ SparseMemory::exist(const Address& address) const
DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1);
DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
//
// Adjust the highBit value for the next level
//
highBit -= m_number_of_bits_per_level[level];
//
// If the address is found, move on to the next level. Otherwise,
// return not found
//
// If the address is found, move on to the next level.
// Otherwise, return not found
if (curTable->count(curAddress) != 0) {
curTable = (SparseMapType*)(((*curTable)[curAddress]).entry);
} else {
@@ -176,43 +153,34 @@ SparseMemory::add(const Address& address)
Address curAddress;
SparseMapType* curTable = m_map_head;
SparseMemEntry_t* entryStruct = NULL;
SparseMemEntry* entryStruct = NULL;
//
// Initiallize the high bit to be the total number of bits plus the block
// offset. However the highest bit index is one less than this value.
//
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
int lowBit;
void* newEntry = NULL;
for (int level = 0; level < m_number_of_levels; level++) {
//
// create the appropriate address for this level
// Note: that set Address is inclusive of the specified range, thus the
// high bit is one less than the total number of bits used to create the
// address.
//
// Note: that set Address is inclusive of the specified range,
// thus the high bit is one less than the total number of bits
// used to create the address.
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
//
// Adjust the highBit value for the next level
//
highBit -= m_number_of_bits_per_level[level];
//
// if the address exists in the cur table, move on. Otherwise
// create a new table.
//
if (curTable->count(curAddress) != 0) {
curTable = (SparseMapType*)(((*curTable)[curAddress]).entry);
} else {
} else {
m_adds_per_level[level]++;
//
// if the last level, add a directory entry. Otherwise add a map.
//
if (level == (m_number_of_levels - 1)) {
Directory_Entry* tempDirEntry = new Directory_Entry();
tempDirEntry->getDataBlk().clear();
@@ -222,17 +190,13 @@ SparseMemory::add(const Address& address)
newEntry = (void*)(tempMap);
}
//
// Create the pointer container SparseMemEntry_t and add it to the
// table.
//
entryStruct = new SparseMemEntry_t;
// Create the pointer container SparseMemEntry and add it
// to the table.
entryStruct = new SparseMemEntry;
entryStruct->entry = newEntry;
(*curTable)[curAddress] = *entryStruct;
//
// Move to the next level of the heirarchy
//
curTable = (SparseMapType*)newEntry;
}
}
@@ -244,20 +208,17 @@ SparseMemory::add(const Address& address)
// recursively search table hierarchy for the lowest level table.
// remove the lowest entry and any empty tables above it.
int
SparseMemory::recursivelyRemoveLevels(
const Address& address,
curNextInfo& curInfo)
SparseMemory::recursivelyRemoveLevels(const Address& address,
CurNextInfo& curInfo)
{
Address curAddress;
curNextInfo nextInfo;
SparseMemEntry_t* entryStruct;
CurNextInfo nextInfo;
SparseMemEntry* entryStruct;
//
// create the appropriate address for this level
// Note: that set Address is inclusive of the specified range, thus the
// high bit is one less than the total number of bits used to create the
// address.
//
// Note: that set Address is inclusive of the specified range,
// thus the high bit is one less than the total number of bits
// used to create the address.
curAddress.setAddress(address.bitSelect(curInfo.lowBit,
curInfo.highBit - 1));
@@ -272,9 +233,7 @@ SparseMemory::recursivelyRemoveLevels(
entryStruct = &((*(curInfo.curTable))[curAddress]);
if (curInfo.level < (m_number_of_levels - 1)) {
//
// set up next level's info
//
nextInfo.curTable = (SparseMapType*)(entryStruct->entry);
nextInfo.level = curInfo.level + 1;
@@ -284,15 +243,11 @@ SparseMemory::recursivelyRemoveLevels(
nextInfo.lowBit = curInfo.lowBit -
m_number_of_bits_per_level[curInfo.level + 1];
//
// recursively search the table hierarchy
//
int tableSize = recursivelyRemoveLevels(address, nextInfo);
//
// If this table below is now empty, we must delete it and erase it from
// our table.
//
// If this table below is now empty, we must delete it and
// erase it from our table.
if (tableSize == 0) {
m_removes_per_level[curInfo.level]++;
delete nextInfo.curTable;
@@ -300,10 +255,9 @@ SparseMemory::recursivelyRemoveLevels(
curInfo.curTable->erase(curAddress);
}
} else {
//
// if this is the last level, we have reached the Directory Entry and thus
// we should delete it including the SparseMemEntry container struct.
//
// if this is the last level, we have reached the Directory
// Entry and thus we should delete it including the
// SparseMemEntry container struct.
Directory_Entry* dirEntry;
dirEntry = (Directory_Entry*)(entryStruct->entry);
entryStruct->entry = NULL;
@@ -323,26 +277,21 @@ SparseMemory::remove(const Address& address)
m_total_removes++;
curNextInfo nextInfo;
CurNextInfo nextInfo;
//
// Initialize table pointer and level value
//
nextInfo.curTable = m_map_head;
nextInfo.level = 0;
//
// Initiallize the high bit to be the total number of bits plus the block
// offset. However the highest bit index is one less than this value.
//
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];;
//
// recursively search the table hierarchy for empty tables starting from the
// level 0. Note we do not check the return value because the head table is
// never deleted;
//
// recursively search the table hierarchy for empty tables
// starting from the level 0. Note we do not check the return
// value because the head table is never deleted;
recursivelyRemoveLevels(address, nextInfo);
assert(!exist(address));
@@ -362,20 +311,17 @@ SparseMemory::lookup(const Address& address)
SparseMapType* curTable = m_map_head;
Directory_Entry* entry = NULL;
//
// Initiallize the high bit to be the total number of bits plus the block
// offset. However the highest bit index is one less than this value.
//
// Initiallize the high bit to be the total number of bits plus
// the block offset. However the highest bit index is one less
// than this value.
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
int lowBit;
for (int level = 0; level < m_number_of_levels; level++) {
//
// create the appropriate address for this level
// Note: that set Address is inclusive of the specified range, thus the
// high bit is one less than the total number of bits used to create the
// address.
//
// Note: that set Address is inclusive of the specified range,
// thus the high bit is one less than the total number of bits
// used to create the address.
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
@@ -384,21 +330,15 @@ SparseMemory::lookup(const Address& address)
DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1);
DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
//
// Adjust the highBit value for the next level
//
highBit -= m_number_of_bits_per_level[level];
//
// The entry should be in the table and valid
//
curTable = (SparseMapType*)(((*curTable)[curAddress]).entry);
assert(curTable != NULL);
}
//
// The last entry actually points to the Directory entry not a table
//
entry = (Directory_Entry*)curTable;
return entry;

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 2009 Advanced Micro Devices, Inc.
* All rights reserved.
@@ -27,39 +26,35 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
#define __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
#ifndef SPARSEMEMORY_H
#define SPARSEMEMORY_H
#include "mem/ruby/common/Global.hh"
#include "base/hashmap.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/Directory_Entry.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
typedef struct SparseMemEntry {
struct SparseMemEntry
{
void* entry;
} SparseMemEntry_t;
};
typedef m5::hash_map<Address, SparseMemEntry_t> SparseMapType;
typedef m5::hash_map<Address, SparseMemEntry> SparseMapType;
typedef struct curNextInfo {
struct CurNextInfo
{
SparseMapType* curTable;
int level;
int highBit;
int lowBit;
};
class SparseMemory {
class SparseMemory
{
public:
// Constructors
SparseMemory(int number_of_bits, int number_of_levels);
// Destructor
~SparseMemory();
// Public Methods
void printConfig(ostream& out) { }
bool exist(const Address& address) const;
@@ -83,7 +78,7 @@ class SparseMemory {
void recursivelyRemoveTables(SparseMapType* currentTable, int level);
// recursive search for address and remove associated entries
int recursivelyRemoveLevels(const Address& address, curNextInfo& curInfo);
int recursivelyRemoveLevels(const Address& address, CurNextInfo& curInfo);
// Data Members (m_prefix)
SparseMapType* m_map_head;
@@ -98,17 +93,12 @@ class SparseMemory {
uint64_t* m_removes_per_level;
};
// Output operator declaration
ostream& operator<<(ostream& out, const SparseMemEntry& obj);
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const SparseMemEntry& obj)
inline ostream&
operator<<(ostream& out, const SparseMemEntry& obj)
{
out << "SparseMemEntry";
out << flush;
return out;
}
#endif //SPARSEMEMORY_H
#endif // __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,25 +26,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* RubySystem.cc
*
* Description: See System.hh
*
* $Id$
*
*/
#include "mem/ruby/system/System.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/system/MemoryVector.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "base/output.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/MemoryVector.hh"
#include "mem/ruby/system/System.hh"
int RubySystem::m_random_seed;
bool RubySystem::m_randomization;
@@ -103,68 +92,68 @@ RubySystem::RubySystem(const Params *p)
registerExitCallback(rubyExitCB);
}
void RubySystem::init()
void
RubySystem::init()
{
m_profiler_ptr->clearStats();
m_profiler_ptr->clearStats();
}
RubySystem::~RubySystem()
{
delete m_network_ptr;
delete m_profiler_ptr;
delete m_tracer_ptr;
if (m_mem_vec_ptr != NULL) {
delete m_mem_vec_ptr;
}
delete m_network_ptr;
delete m_profiler_ptr;
delete m_tracer_ptr;
if (m_mem_vec_ptr)
delete m_mem_vec_ptr;
}
void RubySystem::printSystemConfig(ostream & out)
void
RubySystem::printSystemConfig(ostream & out)
{
out << "RubySystem config:" << endl;
out << " random_seed: " << m_random_seed << endl;
out << " randomization: " << m_randomization << endl;
out << " cycle_period: " << m_clock << endl;
out << " block_size_bytes: " << m_block_size_bytes << endl;
out << " block_size_bits: " << m_block_size_bits << endl;
out << " memory_size_bytes: " << m_memory_size_bytes << endl;
out << " memory_size_bits: " << m_memory_size_bits << endl;
out << "RubySystem config:" << endl
<< " random_seed: " << m_random_seed << endl
<< " randomization: " << m_randomization << endl
<< " cycle_period: " << m_clock << endl
<< " block_size_bytes: " << m_block_size_bytes << endl
<< " block_size_bits: " << m_block_size_bits << endl
<< " memory_size_bytes: " << m_memory_size_bytes << endl
<< " memory_size_bits: " << m_memory_size_bits << endl;
}
void RubySystem::printConfig(ostream& out)
void
RubySystem::printConfig(ostream& out)
{
out << "\n================ Begin RubySystem Configuration Print ================\n\n";
printSystemConfig(out);
m_network_ptr->printConfig(out);
m_profiler_ptr->printConfig(out);
out << "\n================ End RubySystem Configuration Print ================\n\n";
out << "\n================ Begin RubySystem Configuration Print ================\n\n";
printSystemConfig(out);
m_network_ptr->printConfig(out);
m_profiler_ptr->printConfig(out);
out << "\n================ End RubySystem Configuration Print ================\n\n";
}
void RubySystem::printStats(ostream& out)
void
RubySystem::printStats(ostream& out)
{
const time_t T = time(NULL);
tm *localTime = localtime(&T);
char buf[100];
strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime);
const time_t T = time(NULL);
tm *localTime = localtime(&T);
char buf[100];
strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime);
out << "Real time: " << buf << endl;
out << "Real time: " << buf << endl;
m_profiler_ptr->printStats(out);
m_network_ptr->printStats(out);
m_profiler_ptr->printStats(out);
m_network_ptr->printStats(out);
}
void RubySystem::clearStats() const
void
RubySystem::clearStats() const
{
m_profiler_ptr->clearStats();
m_network_ptr->clearStats();
m_profiler_ptr->clearStats();
m_network_ptr->clearStats();
}
void RubySystem::recordCacheContents(CacheRecorder& tr) const
void
RubySystem::recordCacheContents(CacheRecorder& tr) const
{
}
#ifdef CHECK_COHERENCE
@@ -176,48 +165,46 @@ void RubySystem::recordCacheContents(CacheRecorder& tr) const
// in setState. The SLICC spec must also define methods "isBlockShared"
// and "isBlockExclusive" that are specific to that protocol
//
void RubySystem::checkGlobalCoherenceInvariant(const Address& addr ) {
/*
NodeID exclusive = -1;
bool sharedDetected = false;
NodeID lastShared = -1;
void
RubySystem::checkGlobalCoherenceInvariant(const Address& addr)
{
#if 0
NodeID exclusive = -1;
bool sharedDetected = false;
NodeID lastShared = -1;
for (int i = 0; i < m_chip_vector.size(); i++) {
for (int i = 0; i < m_chip_vector.size(); i++) {
if (m_chip_vector[i]->isBlockExclusive(addr)) {
if (exclusive != -1) {
// coherence violation
WARN_EXPR(exclusive);
WARN_EXPR(m_chip_vector[i]->getID());
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
} else if (sharedDetected) {
WARN_EXPR(lastShared);
WARN_EXPR(m_chip_vector[i]->getID());
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
} else {
exclusive = m_chip_vector[i]->getID();
}
} else if (m_chip_vector[i]->isBlockShared(addr)) {
sharedDetected = true;
lastShared = m_chip_vector[i]->getID();
if (m_chip_vector[i]->isBlockExclusive(addr)) {
if (exclusive != -1) {
// coherence violation
WARN_EXPR(exclusive);
WARN_EXPR(m_chip_vector[i]->getID());
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
}
else if (sharedDetected) {
WARN_EXPR(lastShared);
WARN_EXPR(m_chip_vector[i]->getID());
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
}
else {
exclusive = m_chip_vector[i]->getID();
}
if (exclusive != -1) {
WARN_EXPR(lastShared);
WARN_EXPR(exclusive);
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
}
}
}
else if (m_chip_vector[i]->isBlockShared(addr)) {
sharedDetected = true;
lastShared = m_chip_vector[i]->getID();
if (exclusive != -1) {
WARN_EXPR(lastShared);
WARN_EXPR(exclusive);
WARN_EXPR(addr);
WARN_EXPR(g_eventQueue_ptr->getTime());
ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
}
}
}
*/
#endif
}
#endif
@@ -231,7 +218,8 @@ RubySystemParams::create()
* virtual process function that is invoked when the callback
* queue is executed.
*/
void RubyExitCallback::process()
void
RubyExitCallback::process()
{
std::ostream *os = simout.create(stats_filename);
RubySystem::printConfig(*os);

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -28,32 +27,27 @@
*/
/*
* System.hh
*
* Description: Contains all of the various parts of the system we are
* simulating. Performs allocation, deallocation, and setup of all
* the major components of the system
*
* $Id$
*
* Contains all of the various parts of the system we are simulating.
* Performs allocation, deallocation, and setup of all the major
* components of the system
*/
#ifndef SYSTEM_H
#define SYSTEM_H
#ifndef __MEM_RUBY_SYSTEM_SYSTEM_HH__
#define __MEM_RUBY_SYSTEM_SYSTEM_HH__
#include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "sim/sim_object.hh"
#include "params/RubySystem.hh"
#include "base/callback.hh"
#include "mem/gems_common/Vector.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "params/RubySystem.hh"
#include "sim/sim_object.hh"
class Profiler;
class Network;
class CacheRecorder;
class Tracer;
class MemoryVector;
class Network;
class Profiler;
class Tracer;
/*
* This defines the number of longs (32-bits on 32 bit machines,
@@ -68,83 +62,103 @@ class MemoryVector;
*/
const int NUMBER_WORDS_PER_SET = 1;
class RubySystem : public SimObject {
public:
class RubySystem : public SimObject
{
public:
typedef RubySystemParams Params;
RubySystem(const Params *p);
// Destructor
~RubySystem();
~RubySystem();
// config accessors
static int getRandomSeed() { return m_random_seed; }
static int getRandomization() { return m_randomization; }
static int getBlockSizeBytes() { return m_block_size_bytes; }
static int getBlockSizeBits() { return m_block_size_bits; }
static uint64 getMemorySizeBytes() { return m_memory_size_bytes; }
static int getMemorySizeBits() { return m_memory_size_bits; }
// config accessors
static int getRandomSeed() { return m_random_seed; }
static int getRandomization() { return m_randomization; }
static int getBlockSizeBytes() { return m_block_size_bytes; }
static int getBlockSizeBits() { return m_block_size_bits; }
static uint64 getMemorySizeBytes() { return m_memory_size_bytes; }
static int getMemorySizeBits() { return m_memory_size_bits; }
// Public Methods
static Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
static RubyEventQueue* getEventQueue() { return g_eventQueue_ptr; }
Profiler* getProfiler() {assert(m_profiler_ptr != NULL); return m_profiler_ptr; }
static Tracer* getTracer() { assert(m_tracer_ptr != NULL); return m_tracer_ptr; }
static MemoryVector* getMemoryVector() { assert(m_mem_vec_ptr != NULL); return m_mem_vec_ptr;}
// Public Methods
static Network*
getNetwork()
{
assert(m_network_ptr != NULL);
return m_network_ptr;
}
void recordCacheContents(CacheRecorder& tr) const;
static void printConfig(ostream& out);
static void printStats(ostream& out);
void clearStats() const;
static RubyEventQueue*
getEventQueue()
{
return g_eventQueue_ptr;
}
uint64 getInstructionCount(int thread) { return 1; }
static uint64 getCycleCount(int thread) { return g_eventQueue_ptr->getTime(); }
Profiler*
getProfiler()
{
assert(m_profiler_ptr != NULL);
return m_profiler_ptr;
}
void print(ostream& out) const;
/*
#ifdef CHECK_COHERENCE
void checkGlobalCoherenceInvariant(const Address& addr);
#endif
*/
static Tracer*
getTracer()
{
assert(m_tracer_ptr != NULL);
return m_tracer_ptr;
}
private:
// Private copy constructor and assignment operator
RubySystem(const RubySystem& obj);
RubySystem& operator=(const RubySystem& obj);
static MemoryVector*
getMemoryVector()
{
assert(m_mem_vec_ptr != NULL);
return m_mem_vec_ptr;
}
void init();
void recordCacheContents(CacheRecorder& tr) const;
static void printConfig(ostream& out);
static void printStats(ostream& out);
void clearStats() const;
static void printSystemConfig(ostream& out);
uint64 getInstructionCount(int thread) { return 1; }
static uint64
getCycleCount(int thread)
{
return g_eventQueue_ptr->getTime();
}
private:
// configuration parameters
static int m_random_seed;
static bool m_randomization;
static Tick m_clock;
static int m_block_size_bytes;
static int m_block_size_bits;
static uint64 m_memory_size_bytes;
static int m_memory_size_bits;
void print(ostream& out) const;
// Data Members (m_ prefix)
static Network* m_network_ptr;
private:
// Private copy constructor and assignment operator
RubySystem(const RubySystem& obj);
RubySystem& operator=(const RubySystem& obj);
public:
static Profiler* m_profiler_ptr;
static Tracer* m_tracer_ptr;
static MemoryVector* m_mem_vec_ptr;
void init();
static void printSystemConfig(ostream& out);
private:
// configuration parameters
static int m_random_seed;
static bool m_randomization;
static Tick m_clock;
static int m_block_size_bytes;
static int m_block_size_bits;
static uint64 m_memory_size_bytes;
static int m_memory_size_bits;
static Network* m_network_ptr;
public:
static Profiler* m_profiler_ptr;
static Tracer* m_tracer_ptr;
static MemoryVector* m_mem_vec_ptr;
};
// Output operator declaration
ostream& operator<<(ostream& out, const RubySystem& obj);
// ******************* Definitions *******************
// Output operator definition
inline
ostream& operator<<(ostream& out, const RubySystem& obj)
inline ostream&
operator<<(ostream& out, const RubySystem& obj)
{
// obj.print(out);
out << flush;
return out;
//obj.print(out);
out << flush;
return out;
}
class RubyExitCallback : public Callback
@@ -153,22 +167,17 @@ class RubyExitCallback : public Callback
string stats_filename;
public:
/**
* virtualize the destructor to make sure that the correct one
* gets called.
*/
virtual ~RubyExitCallback() {}
RubyExitCallback(const string& _stats_filename)
{
stats_filename = _stats_filename;
stats_filename = _stats_filename;
}
virtual void process();
};
#endif //SYSTEM_H
#endif // __MEM_RUBY_SYSTEM_SYSTEM_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,141 +26,115 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* TBETable.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_SYSTEM_TBETABLE_HH__
#define __MEM_RUBY_SYSTEM_TBETABLE_HH__
#ifndef TBETABLE_H
#define TBETABLE_H
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"
template<class ENTRY>
class TBETable {
public:
class TBETable
{
public:
TBETable(int number_of_TBEs)
: m_number_of_TBEs(number_of_TBEs)
{
}
// Constructors
TBETable(int number_of_TBEs);
void
printConfig(ostream& out)
{
out << "TBEs_per_TBETable: " << m_number_of_TBEs << endl;
}
bool isPresent(const Address& address) const;
void allocate(const Address& address);
void deallocate(const Address& address);
bool
areNSlotsAvailable(int n) const
{
return (m_number_of_TBEs - m_map.size()) >= n;
}
// Destructor
//~TBETable();
ENTRY& lookup(const Address& address);
const ENTRY& lookup(const Address& address) const;
// Public Methods
// Print cache contents
void print(ostream& out) const;
void printConfig(ostream& out) { out << "TBEs_per_TBETable: " << m_number_of_TBEs << endl; }
private:
// Private copy constructor and assignment operator
TBETable(const TBETable& obj);
TBETable& operator=(const TBETable& obj);
bool isPresent(const Address& address) const;
void allocate(const Address& address);
void deallocate(const Address& address);
bool areNSlotsAvailable(int n) const { return (m_number_of_TBEs - m_map.size()) >= n; }
// Data Members (m_prefix)
Map<Address, ENTRY> m_map;
ENTRY& lookup(const Address& address);
const ENTRY& lookup(const Address& address) const;
// Print cache contents
void print(ostream& out) const;
private:
// Private Methods
// Private copy constructor and assignment operator
TBETable(const TBETable& obj);
TBETable& operator=(const TBETable& obj);
// Data Members (m_prefix)
Map<Address, ENTRY> m_map;
private:
int m_number_of_TBEs;
private:
int m_number_of_TBEs;
};
// Output operator declaration
//ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj);
// ******************* Definitions *******************
// Output operator definition
template<class ENTRY>
extern inline
ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj)
inline ostream&
operator<<(ostream& out, const TBETable<ENTRY>& obj)
{
obj.print(out);
out << flush;
return out;
}
// ****************************************************************
template<class ENTRY>
extern inline
TBETable<ENTRY>::TBETable(int number_of_TBEs)
{
m_number_of_TBEs = number_of_TBEs;
}
// PUBLIC METHODS
// tests to see if an address is present in the cache
template<class ENTRY>
extern inline
bool TBETable<ENTRY>::isPresent(const Address& address) const
{
assert(address == line_address(address));
assert(m_map.size() <= m_number_of_TBEs);
return m_map.exist(address);
obj.print(out);
out << flush;
return out;
}
template<class ENTRY>
extern inline
void TBETable<ENTRY>::allocate(const Address& address)
inline bool
TBETable<ENTRY>::isPresent(const Address& address) const
{
assert(isPresent(address) == false);
assert(m_map.size() < m_number_of_TBEs);
m_map.add(address, ENTRY());
assert(address == line_address(address));
assert(m_map.size() <= m_number_of_TBEs);
return m_map.exist(address);
}
template<class ENTRY>
extern inline
void TBETable<ENTRY>::deallocate(const Address& address)
inline void
TBETable<ENTRY>::allocate(const Address& address)
{
assert(isPresent(address) == true);
assert(m_map.size() > 0);
m_map.erase(address);
assert(isPresent(address) == false);
assert(m_map.size() < m_number_of_TBEs);
m_map.add(address, ENTRY());
}
template<class ENTRY>
inline void
TBETable<ENTRY>::deallocate(const Address& address)
{
assert(isPresent(address) == true);
assert(m_map.size() > 0);
m_map.erase(address);
}
// looks an address up in the cache
template<class ENTRY>
extern inline
ENTRY& TBETable<ENTRY>::lookup(const Address& address)
inline ENTRY&
TBETable<ENTRY>::lookup(const Address& address)
{
assert(isPresent(address) == true);
return m_map.lookup(address);
assert(isPresent(address) == true);
return m_map.lookup(address);
}
// looks an address up in the cache
template<class ENTRY>
extern inline
const ENTRY& TBETable<ENTRY>::lookup(const Address& address) const
inline const ENTRY&
TBETable<ENTRY>::lookup(const Address& address) const
{
assert(isPresent(address) == true);
return m_map.lookup(address);
assert(isPresent(address) == true);
return m_map.lookup(address);
}
template<class ENTRY>
extern inline
void TBETable<ENTRY>::print(ostream& out) const
inline void
TBETable<ENTRY>::print(ostream& out) const
{
}
#endif //TBETABLE_H
#endif // __MEM_RUBY_SYSTEM_TBETABLE_HH__

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,101 +26,101 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/system/TimerTable.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
#include "mem/ruby/system/TimerTable.hh"
TimerTable::TimerTable()
{
m_consumer_ptr = NULL;
m_next_valid = false;
m_next_address = Address(0);
m_next_time = 0;
}
bool TimerTable::isReady() const
{
if (m_map.size() == 0) {
return false;
}
if (!m_next_valid) {
updateNext();
}
assert(m_next_valid);
return (g_eventQueue_ptr->getTime() >= m_next_time);
}
const Address& TimerTable::readyAddress() const
{
assert(isReady());
if (!m_next_valid) {
updateNext();
}
assert(m_next_valid);
return m_next_address;
}
void TimerTable::set(const Address& address, Time relative_latency)
{
assert(address == line_address(address));
assert(relative_latency > 0);
assert(m_map.exist(address) == false);
Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
m_map.add(address, ready_time);
assert(m_consumer_ptr != NULL);
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
m_next_valid = false;
// Don't always recalculate the next ready address
if (ready_time <= m_next_time) {
m_consumer_ptr = NULL;
m_next_valid = false;
}
m_next_address = Address(0);
m_next_time = 0;
}
void TimerTable::unset(const Address& address)
bool
TimerTable::isReady() const
{
assert(address == line_address(address));
assert(m_map.exist(address) == true);
m_map.remove(address);
// Don't always recalculate the next ready address
if (address == m_next_address) {
m_next_valid = false;
}
}
void TimerTable::print(ostream& out) const
{
}
void TimerTable::updateNext() const
{
if (m_map.size() == 0) {
assert(m_next_valid == false);
return;
}
Vector<Address> addresses = m_map.keys();
m_next_address = addresses[0];
m_next_time = m_map.lookup(m_next_address);
// Search for the minimum time
int size = addresses.size();
for (int i=1; i<size; i++) {
Address maybe_next_address = addresses[i];
Time maybe_next_time = m_map.lookup(maybe_next_address);
if (maybe_next_time < m_next_time) {
m_next_time = maybe_next_time;
m_next_address= maybe_next_address;
if (m_map.size() == 0) {
return false;
}
}
m_next_valid = true;
if (!m_next_valid) {
updateNext();
}
assert(m_next_valid);
return (g_eventQueue_ptr->getTime() >= m_next_time);
}
const Address&
TimerTable::readyAddress() const
{
assert(isReady());
if (!m_next_valid) {
updateNext();
}
assert(m_next_valid);
return m_next_address;
}
void
TimerTable::set(const Address& address, Time relative_latency)
{
assert(address == line_address(address));
assert(relative_latency > 0);
assert(m_map.exist(address) == false);
Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
m_map.add(address, ready_time);
assert(m_consumer_ptr != NULL);
g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
m_next_valid = false;
// Don't always recalculate the next ready address
if (ready_time <= m_next_time) {
m_next_valid = false;
}
}
void
TimerTable::unset(const Address& address)
{
assert(address == line_address(address));
assert(m_map.exist(address) == true);
m_map.remove(address);
// Don't always recalculate the next ready address
if (address == m_next_address) {
m_next_valid = false;
}
}
void
TimerTable::print(ostream& out) const
{
}
void
TimerTable::updateNext() const
{
if (m_map.size() == 0) {
assert(m_next_valid == false);
return;
}
Vector<Address> addresses = m_map.keys();
m_next_address = addresses[0];
m_next_time = m_map.lookup(m_next_address);
// Search for the minimum time
int size = addresses.size();
for (int i=1; i<size; i++) {
Address maybe_next_address = addresses[i];
Time maybe_next_time = m_map.lookup(maybe_next_address);
if (maybe_next_time < m_next_time) {
m_next_time = maybe_next_time;
m_next_address= maybe_next_address;
}
}
m_next_valid = true;
}

View File

@@ -1,4 +1,3 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
@@ -27,70 +26,69 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* TimerTable.hh
*
* Description:
*
* $Id$
*
*/
#ifndef __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
#define __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
#ifndef TIMERTABLE_H
#define TIMERTABLE_H
#include <cassert>
#include "mem/ruby/common/Global.hh"
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
class Consumer;
class TimerTable {
public:
class TimerTable
{
public:
TimerTable();
// Constructors
TimerTable();
static void printConfig(ostream& out) {}
// Destructor
//~TimerTable();
void
setConsumer(Consumer* consumer_ptr)
{
assert(m_consumer_ptr == NULL);
m_consumer_ptr = consumer_ptr;
}
// Class Methods
static void printConfig(ostream& out) {}
void
setDescription(const string& name)
{
m_name = name;
}
// Public Methods
void setConsumer(Consumer* consumer_ptr) { ASSERT(m_consumer_ptr==NULL); m_consumer_ptr = consumer_ptr; }
void setDescription(const string& name) { m_name = name; }
bool isReady() const;
const Address& readyAddress() const;
bool isSet(const Address& address) const { return m_map.exist(address); }
void set(const Address& address, Time relative_latency);
void unset(const Address& address);
void print(ostream& out) const;
bool isReady() const;
const Address& readyAddress() const;
bool isSet(const Address& address) const { return m_map.exist(address); }
void set(const Address& address, Time relative_latency);
void unset(const Address& address);
void print(ostream& out) const;
private:
// Private Methods
void updateNext() const;
private:
void updateNext() const;
// Private copy constructor and assignment operator
TimerTable(const TimerTable& obj);
TimerTable& operator=(const TimerTable& obj);
// Private copy constructor and assignment operator
TimerTable(const TimerTable& obj);
TimerTable& operator=(const TimerTable& obj);
// Data Members (m_prefix)
Map<Address, Time> m_map;
mutable bool m_next_valid;
mutable Time m_next_time; // Only valid if m_next_valid is true
mutable Address m_next_address; // Only valid if m_next_valid is true
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
string m_name;
// Data Members (m_prefix)
Map<Address, Time> m_map;
mutable bool m_next_valid;
mutable Time m_next_time; // Only valid if m_next_valid is true
mutable Address m_next_address; // Only valid if m_next_valid is true
Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
string m_name;
};
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const TimerTable& obj)
inline ostream&
operator<<(ostream& out, const TimerTable& obj)
{
obj.print(out);
out << flush;
return out;
obj.print(out);
out << flush;
return out;
}
#endif //TIMERTABLE_H
#endif // __MEM_RUBY_SYSTEM_TIMERTABLE_HH__