SMT atomics modifications:
don't allow enquing from other threads if servicing and atomic for a thread
This commit is contained in:
@@ -31,10 +31,11 @@ struct RubyRequest {
|
||||
uint64_t pc;
|
||||
RubyRequestType type;
|
||||
RubyAccessMode access_mode;
|
||||
unsigned proc_id;
|
||||
|
||||
RubyRequest() {}
|
||||
RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, RubyRequestType _type, RubyAccessMode _access_mode)
|
||||
: paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type), access_mode(_access_mode)
|
||||
RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc, RubyRequestType _type, RubyAccessMode _access_mode, unsigned _proc_id = 0)
|
||||
: paddr(_paddr), data(_data), len(_len), pc(_pc), type(_type), access_mode(_access_mode), proc_id(_proc_id)
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -61,6 +61,8 @@ void Sequencer::init(const vector<string> & argv)
|
||||
m_instCache_ptr = NULL;
|
||||
m_dataCache_ptr = NULL;
|
||||
m_controller = NULL;
|
||||
m_servicing_atomic = -1;
|
||||
m_atomics_counter = 0;
|
||||
for (size_t i=0; i<argv.size(); i+=2) {
|
||||
if ( argv[i] == "controller") {
|
||||
m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
|
||||
@@ -342,7 +344,7 @@ void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
|
||||
}
|
||||
|
||||
// Returns true if the sequencer already has a load or store outstanding
|
||||
bool Sequencer::isReady(const RubyRequest& request) const {
|
||||
bool Sequencer::isReady(const RubyRequest& request) {
|
||||
// POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
|
||||
// to simulate stalling of the front-end
|
||||
// Do we stall all the sequencers? If it is atomic instruction - yes!
|
||||
@@ -357,6 +359,31 @@ bool Sequencer::isReady(const RubyRequest& request) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) {
|
||||
assert(m_atomics_counter > 0);
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
if (request.type == RubyRequestType_RMW_Read) {
|
||||
if (m_servicing_atomic == -1) {
|
||||
assert(m_atomics_counter == 0);
|
||||
m_servicing_atomic = (int)request.proc_id;
|
||||
}
|
||||
else {
|
||||
assert(m_servicing_atomic == (int)request.proc_id);
|
||||
}
|
||||
m_atomics_counter++;
|
||||
}
|
||||
else if (request.type == RubyRequestType_RMW_Write) {
|
||||
assert(m_servicing_atomic == (int)request.proc_id);
|
||||
assert(m_atomics_counter > 0);
|
||||
m_atomics_counter--;
|
||||
if (m_atomics_counter == 0) {
|
||||
m_servicing_atomic = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -438,7 +465,7 @@ void Sequencer::issueRequest(const RubyRequest& request) {
|
||||
}
|
||||
Address line_addr(request.paddr);
|
||||
line_addr.makeLineAddress();
|
||||
CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
|
||||
CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
|
||||
|
||||
if (Debug::getProtocolTrace()) {
|
||||
g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
|
||||
|
||||
@@ -84,7 +84,7 @@ public:
|
||||
|
||||
// called by Tester or Simics
|
||||
int64_t makeRequest(const RubyRequest & request);
|
||||
bool isReady(const RubyRequest& request) const;
|
||||
bool isReady(const RubyRequest& request);
|
||||
bool empty() const;
|
||||
|
||||
void print(ostream& out) const;
|
||||
@@ -125,7 +125,8 @@ private:
|
||||
// Global outstanding request count, across all request tables
|
||||
int m_outstanding_count;
|
||||
bool m_deadlock_check_scheduled;
|
||||
|
||||
int m_servicing_atomic;
|
||||
int m_atomics_counter;
|
||||
};
|
||||
|
||||
// Output operator declaration
|
||||
|
||||
Reference in New Issue
Block a user