mem-ruby: Fix segfault in pa_performAtomics in GPU_VIPER-TCC.sm

When the cache is performing an atomics and receives data, it performs.
pa_performAtomic. This action peeks into the coreRequest queue to check
the messaage type. This queue, however, is already dequeued in the
transition that precedes the one that contains pa_performAtomic. When
pa_performAtomic is called, the simulation crashes. This commit fixes
the crash by using the TBE entry information instead of peeking when TBE
entry exists, and peeking when it doesn't
This commit is contained in:
Vishnu Ramadas
2024-12-06 18:41:03 -06:00
committed by Bobby R. Bruce
parent 93b58fbf64
commit 6aa9db28f1

View File

@@ -933,14 +933,18 @@ machine(MachineType:TCC, "TCC Cache")
}
action(pa_performAtomic, "pa", desc="Perform atomic") {
peek(coreRequestNetwork_in, CPURequestMsg) {
if ((is_valid(tbe) && tbe.atomicDataReturn) || in_msg.Type == CoherenceRequestType:AtomicReturn) {
cache_entry.DataBlk.atomicPartial(cache_entry.DataBlk, cache_entry.writeMask, false);
} else {
// Set the isAtomicNoReturn flag to ensure that logs are not
// generated erroneously
assert((is_valid(tbe) && tbe.atomicDataNoReturn) || in_msg.Type == CoherenceRequestType:AtomicNoReturn);
cache_entry.DataBlk.atomicPartial(cache_entry.DataBlk, cache_entry.writeMask, true);
if (is_valid(tbe) && tbe.atomicDataReturn) {
cache_entry.DataBlk.atomicPartial(cache_entry.DataBlk, cache_entry.writeMask, false);
} else if (is_valid(tbe) && tbe.atomicDataNoReturn) {
cache_entry.DataBlk.atomicPartial(cache_entry.DataBlk, cache_entry.writeMask, true);
} else {
peek(coreRequestNetwork_in, CPURequestMsg) {
if (in_msg.Type == CoherenceRequestType:AtomicReturn) {
cache_entry.DataBlk.atomicPartial(cache_entry.DataBlk, cache_entry.writeMask, false);
} else {
assert(in_msg.Type == CoherenceRequestType:AtomicNoReturn);
cache_entry.DataBlk.atomicPartial(cache_entry.DataBlk, cache_entry.writeMask, true);
}
}
}
}