arch-vega: Pass s_memtime through smem pipe (#1350)

The Vega ISA's s_memtime instruction is used to obtain a cycle value
from the GPU. Previously, this was implemented to obtain the cycle count
when the memtime instruction reached the execute stage of the GPU
pipeline. However, from microbenchmarking we have found that this under
reports the latency for memtime instructions relative to real hardware.
Thus, we changed its behavior to go through the scalar memory pipeline
and obtain a latency value from the the SQC (L1 I$). This mirrors the
suggestion of the AMD Vega ISA manual that s_memtime should be treated
like a s_load_dwordx2.

The default latency was set based on microbenchmarking.

Change-Id: I5e251dde28c06fe1c492aea4abf9f34f05784420
This commit is contained in:
Marco Kurzynski
2024-08-26 22:47:04 -04:00
committed by GitHub
parent 9bd79bc160
commit a8447b7fc0
25 changed files with 397 additions and 56 deletions

View File

@@ -295,6 +295,14 @@ parser.add_argument(
help="Latency for scalar responses from ruby to the cu.",
)
parser.add_argument(
"--memtime-latency",
type=int,
# Set to a default of 41 from micro-benchmarks
default=41,
help="Latency for memtimes in scalar memory pipeline.",
)
parser.add_argument("--TLB-prefetch", type=int, help="prefetch depth for TLBs")
parser.add_argument(
"--pf-type",
@@ -539,6 +547,7 @@ for i in range(n_cu):
mem_resp_latency=args.mem_resp_latency,
scalar_mem_req_latency=args.scalar_mem_req_latency,
scalar_mem_resp_latency=args.scalar_mem_resp_latency,
memtime_latency=args.memtime_latency,
localDataStore=LdsState(
banks=args.numLdsBanks,
bankConflictPenalty=args.ldsBankConflictPenalty,

View File

@@ -253,3 +253,10 @@ def addAmdGPUOptions(parser):
default=0,
help="number of registers in cache",
)
parser.add_argument(
"--memtime-latency",
type=int,
# Set to a default of 41 from micro-benchmarks
default=41,
help="Latency for memtimes in scalar memory pipeline.",
)

View File

@@ -75,6 +75,7 @@ def createGPU(system, args):
execPolicy=args.CUExecPolicy,
localMemBarrier=args.LocalMemBarrier,
countPages=args.countPages,
memtime_latency=args.memtime_latency,
localDataStore=LdsState(
banks=args.numLdsBanks,
bankConflictPenalty=args.ldsBankConflictPenalty,

View File

@@ -254,7 +254,7 @@ class SQCCntrl(SQC_Controller, CntrlBase):
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer = VIPERSequencer()
self.sequencer.version = self.seqCount()
self.sequencer.dcache = self.L1cache

View File

@@ -379,7 +379,11 @@ namespace X86ISA
assert(seg != segment_idx::Ms);
Addr vaddr = req->getVaddr();
DPRINTF(GPUTLB, "TLB Lookup for vaddr %#x.\n", vaddr);
if (req->hasNoAddr()) {
return true;
} else {
DPRINTF(GPUTLB, "TLB Lookup for vaddr %#x.\n", vaddr);
}
HandyM5Reg m5Reg = tc->readMiscRegNoEffect(misc_reg::M5Reg);
if (m5Reg.prot) {
@@ -693,13 +697,19 @@ namespace X86ISA
if (success) {
lookup_outcome = TLB_HIT;
// Put the entry in SenderState
TlbEntry *entry = lookup(tmp_req->getVaddr(), false);
assert(entry);
auto p = sender_state->tc->getProcessPtr();
sender_state->tlbEntry =
new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
false, false);
if (pkt->req->hasNoAddr()) {
sender_state->tlbEntry =
new TlbEntry(p->pid(), 0, 0,
false, false);
} else {
TlbEntry *entry = lookup(tmp_req->getVaddr(), false);
assert(entry);
sender_state->tlbEntry =
new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
false, false);
}
if (update_stats) {
// the reqCnt has an entry per level, so its size tells us

View File

@@ -204,11 +204,17 @@ initMemReqScalarHelper(GPUDynInstPtr gpuDynInst, MemCmd mem_req_type)
* than the address of the first byte then we have a misaligned
* access.
*/
bool misaligned_acc = split_addr > vaddr;
bool misaligned_acc = split_addr > vaddr &&
!gpuDynInst->staticInstruction()->hasNoAddr();
RequestPtr req = std::make_shared<Request>(vaddr, req_size, 0,
gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId);
Request::Flags flags;
if (gpuDynInst->staticInstruction()->hasNoAddr()) {
flags.set(Request::HAS_NO_ADDR);
}
RequestPtr req = std::make_shared<Request>(
vaddr, req_size, std::move(flags),
gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId);
if (misaligned_acc) {
RequestPtr req1, req2;

View File

@@ -6107,6 +6107,8 @@ namespace VegaISA
}
} // getOperandSize
void initiateAcc(GPUDynInstPtr gpuDynInst) override;
void completeAcc(GPUDynInstPtr gpuDynInst) override;
void execute(GPUDynInstPtr) override;
}; // Inst_SMEM__S_MEMTIME

View File

@@ -937,8 +937,9 @@ namespace VegaISA
Inst_SMEM__S_MEMTIME::Inst_SMEM__S_MEMTIME(InFmt_SMEM *iFmt)
: Inst_SMEM(iFmt, "s_memtime")
{
// s_memtime does not issue a memory request
setFlag(ALU);
setFlag(NoAddr);
setFlag(MemoryRef);
setFlag(Load);
} // Inst_SMEM__S_MEMTIME
Inst_SMEM__S_MEMTIME::~Inst_SMEM__S_MEMTIME()
@@ -950,10 +951,26 @@ namespace VegaISA
void
Inst_SMEM__S_MEMTIME::execute(GPUDynInstPtr gpuDynInst)
{
ScalarOperandU64 sdst(gpuDynInst, instData.SDATA);
sdst = (ScalarRegU64)gpuDynInst->computeUnit()->curCycle();
sdst.write();
Wavefront *wf = gpuDynInst->wavefront();
gpuDynInst->execUnitId = wf->execUnitId;
gpuDynInst->latency.init(gpuDynInst->computeUnit());
gpuDynInst->latency.set(gpuDynInst->computeUnit()->memtime_latency);
gpuDynInst->scalarAddr = 0;
gpuDynInst->computeUnit()->scalarMemoryPipe.issueRequest(gpuDynInst);
} // execute
void Inst_SMEM__S_MEMTIME::initiateAcc(GPUDynInstPtr gpuDynInst)
{
initMemRead<2>(gpuDynInst);
} // initiateAcc
void
Inst_SMEM__S_MEMTIME::completeAcc(GPUDynInstPtr gpuDynInst)
{
// use U64 because 2 requests, each size 32
ScalarOperandU64 sdst(gpuDynInst, instData.SDATA);
sdst.write();
} // completeAcc
// --- Inst_SMEM__S_MEMREALTIME class methods ---
Inst_SMEM__S_MEMREALTIME::Inst_SMEM__S_MEMREALTIME(InFmt_SMEM *iFmt)

View File

@@ -259,6 +259,9 @@ GpuTLB::demapPage(Addr va, uint64_t asn)
VegaTlbEntry *
GpuTLB::tlbLookup(const RequestPtr &req, bool update_stats)
{
if (req->hasNoAddr()) {
return NULL;
}
Addr vaddr = req->getVaddr();
Addr alignedVaddr = pageAlign(vaddr);
DPRINTF(GPUTLB, "TLB Lookup for vaddr %#x.\n", vaddr);
@@ -342,20 +345,27 @@ GpuTLB::issueTLBLookup(PacketPtr pkt)
// Access the TLB and figure out if it's a hit or a miss.
auto entry = tlbLookup(tmp_req, update_stats);
if (entry) {
lookup_outcome = TLB_HIT;
if (entry || pkt->req->hasNoAddr()) {
// Put the entry in SenderState
VegaTlbEntry *entry = lookup(virt_page_addr, false);
assert(entry);
lookup_outcome = TLB_HIT;
if (pkt->req->hasNoAddr()) {
sender_state->tlbEntry =
new VegaTlbEntry(1 /* VMID */, 0, 0, 0, 0);
// set false because we shouldn't go to
// host memory for a memtime request
pkt->req->setSystemReq(false);
} else {
VegaTlbEntry *entry = lookup(virt_page_addr, false);
assert(entry);
// Set if this is a system request
pkt->req->setSystemReq(entry->pte.s);
// Set if this is a system request
pkt->req->setSystemReq(entry->pte.s);
Addr alignedPaddr = pageAlign(entry->paddr);
sender_state->tlbEntry =
new VegaTlbEntry(1 /* VMID */, virt_page_addr, alignedPaddr,
entry->logBytes, entry->pte);
Addr alignedPaddr = pageAlign(entry->paddr);
sender_state->tlbEntry =
new VegaTlbEntry(1 /* VMID */, virt_page_addr, alignedPaddr,
entry->logBytes, entry->pte);
}
if (update_stats) {
// the reqCnt has an entry per level, so its size tells us

View File

@@ -204,6 +204,11 @@ class ComputeUnit(ClockedObject):
"TCP and cu as well as TCP data array "
"access. Specified in GPU clock cycles",
)
memtime_latency = Param.Int(
41,
"Latency for memtimes in scalar memory pipeline. "
"Specified in GPU clock cycles",
)
system = Param.System(Parent.any, "system object")
cu_id = Param.Int("CU id")
vrf_to_coalescer_bus_width = Param.Int(

View File

@@ -108,4 +108,5 @@ class GPUStaticInstFlags(Enum):
"MAC", # MAC
"MAD", # MAD
"MFMA", # MFMA
"NoAddr", # Request has no address but goes to SQC for timing
]

View File

@@ -105,6 +105,7 @@ ComputeUnit::ComputeUnit(const Params &p) : ClockedObject(p),
p.scalar_mem_req_latency * p.clk_domain->clockPeriod()),
scalar_resp_tick_latency(
p.scalar_mem_resp_latency * p.clk_domain->clockPeriod()),
memtime_latency(p.memtime_latency * p.clk_domain->clockPeriod()),
_requestorId(p.system->getRequestorId(this, "ComputeUnit")),
lds(*p.localDataStore), gmTokenPort(name() + ".gmTokenPort", this),
ldsPort(csprintf("%s-port", name()), this),

View File

@@ -362,6 +362,8 @@ class ComputeUnit : public ClockedObject
Tick scalar_req_tick_latency;
Tick scalar_resp_tick_latency;
Tick memtime_latency;
/**
* Number of WFs to schedule to each SIMD. This vector is populated
* by hasDispResources(), and consumed by the subsequent call to

View File

@@ -222,6 +222,8 @@ class GPUStaticInst : public GPUStaticInstFlags
bool isMAD() const { return _flags[MAD]; }
bool isMFMA() const { return _flags[MFMA]; }
bool hasNoAddr() const { return _flags[NoAddr]; }
virtual int instSize() const = 0;
// only used for memory instructions

View File

@@ -255,7 +255,10 @@ class Request : public Extensible<Request>
* These flags are *not* cleared when a Request object is
* reused (assigned a new address).
*/
STICKY_FLAGS = INST_FETCH
STICKY_FLAGS = INST_FETCH,
/** TLBI_EXT_SYNC_COMP seems to be the largest value
of FlagsType, so HAS_NO_ADDR's value is that << 1 */
HAS_NO_ADDR = 0x0001000000000000,
};
static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
CLEAN | INVALIDATE;
@@ -1013,6 +1016,7 @@ class Request : public Extensible<Request>
bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
bool hasNoAddr() const { return _flags.isSet(HAS_NO_ADDR); }
bool
isPrefetch() const
{

View File

@@ -129,3 +129,4 @@ MakeInclude('system/Sequencer.hh')
MakeInclude('system/GPUCoalescer.hh')
MakeInclude('system/HTMSequencer.hh')
MakeInclude('system/VIPERCoalescer.hh')
MakeInclude('system/VIPERSequencer.hh')

View File

@@ -243,12 +243,17 @@ machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
TBE tbe := TBEs.lookup(in_msg.LineAddress);
DPRINTF(RubySlicc, "%s\n", in_msg);
if (in_msg.Type == RubyRequestType:REPLACEMENT) {
trigger(Event:Evict, in_msg.LineAddress, cache_entry, tbe);
if (in_msg.Type == RubyRequestType:hasNoAddr) {
sequencer.readCallback(in_msg.LineAddress, cache_entry.DataBlk, true, MachineType:L1Cache);
mandatoryQueue_in.dequeue(clockEdge());
} else {
trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
TBE tbe := TBEs.lookup(in_msg.LineAddress);
DPRINTF(RubySlicc, "%s\n", in_msg);
if (in_msg.Type == RubyRequestType:REPLACEMENT) {
trigger(Event:Evict, in_msg.LineAddress, cache_entry, tbe);
} else {
trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
}
}
}
}

View File

@@ -193,6 +193,7 @@ enumeration(RubyRequestType, desc="...", default="RubyRequestType_NULL") {
TLBI_SYNC, desc="TLB Invalidation Sync operation - Potential initiation";
TLBI_EXT_SYNC, desc="TLB Invalidation Sync operation - External Sync has been requested";
TLBI_EXT_SYNC_COMP, desc="TLB Invalidation Sync operation - External Sync has been completed";
hasNoAddr, desc="Request for timing purposes in VIPERSequencer hitCallback and processReadCallback but reads no address";
}
bool isWriteRequest(RubyRequestType type);

View File

@@ -268,7 +268,7 @@ RubyPort::MemResponsePort::recvTimingReq(PacketPtr pkt)
}
// Check for pio requests and directly send them to the dedicated
// pio port.
if (pkt->cmd != MemCmd::MemSyncReq) {
if (pkt->cmd != MemCmd::MemSyncReq && !pkt->req->hasNoAddr()) {
if (!pkt->req->isMemMgmt() && !isPhysMemAddress(pkt)) {
assert(owner.memRequestPort.isConnected());
DPRINTF(RubyPort, "Request address %#x assumed to be a "
@@ -456,7 +456,9 @@ RubyPort::ruby_hit_callback(PacketPtr pkt)
// The packet was destined for memory and has not yet been turned
// into a response
assert(system->isMemAddr(pkt->getAddr()) || system->isDeviceMemAddr(pkt));
assert(system->isMemAddr(pkt->getAddr()) ||
system->isDeviceMemAddr(pkt) ||
pkt->req->hasNoAddr());
assert(pkt->isRequest());
// First we must retrieve the request port from the sender State
@@ -613,7 +615,7 @@ RubyPort::MemResponsePort::hitCallback(PacketPtr pkt)
// Flush, acquire, release requests don't access physical memory
if (pkt->isFlush() || pkt->cmd == MemCmd::MemSyncReq
|| pkt->cmd == MemCmd::WriteCompleteResp) {
|| pkt->cmd == MemCmd::WriteCompleteResp || pkt->req->hasNoAddr()) {
accessPhysMem = false;
}

View File

@@ -56,6 +56,7 @@ SimObject('Sequencer.py', sim_objects=[
'DMASequencer'])
if env['CONF']['BUILD_GPU']:
SimObject('VIPERCoalescer.py', sim_objects=['VIPERCoalescer'])
SimObject('VIPERSequencer.py', sim_objects=['VIPERSequencer'])
Source('CacheRecorder.cc')
Source('DMASequencer.cc')
@@ -68,3 +69,4 @@ Source('RubySystem.cc')
Source('Sequencer.cc')
if env['CONF']['BUILD_GPU']:
Source('VIPERCoalescer.cc')
Source('VIPERSequencer.cc')

View File

@@ -562,6 +562,33 @@ Sequencer::writeCallback(Addr address, DataBlock& data,
}
}
bool
Sequencer::processReadCallback(SequencerRequest &seq_req,
DataBlock& data,
const bool ruby_request,
bool externalHit,
const MachineType mach,
Cycles initialRequestTime,
Cycles forwardRequestTime,
Cycles firstResponseTime)
{
if (ruby_request) {
assert((seq_req.m_type == RubyRequestType_LD) ||
(seq_req.m_type == RubyRequestType_Load_Linked) ||
(seq_req.m_type == RubyRequestType_IFETCH));
}
if ((seq_req.m_type != RubyRequestType_LD) &&
(seq_req.m_type != RubyRequestType_Load_Linked) &&
(seq_req.m_type != RubyRequestType_IFETCH) &&
(seq_req.m_type != RubyRequestType_REPLACEMENT)) {
// Write request: reissue request to the cache hierarchy
issueRequest(seq_req.pkt, seq_req.m_second_type);
return true;
}
return false;
}
void
Sequencer::readCallback(Addr address, DataBlock& data,
bool externalHit, const MachineType mach,
@@ -583,17 +610,9 @@ Sequencer::readCallback(Addr address, DataBlock& data,
bool ruby_request = true;
while (!seq_req_list.empty()) {
SequencerRequest &seq_req = seq_req_list.front();
if (ruby_request) {
assert((seq_req.m_type == RubyRequestType_LD) ||
(seq_req.m_type == RubyRequestType_Load_Linked) ||
(seq_req.m_type == RubyRequestType_IFETCH));
}
if ((seq_req.m_type != RubyRequestType_LD) &&
(seq_req.m_type != RubyRequestType_Load_Linked) &&
(seq_req.m_type != RubyRequestType_IFETCH) &&
(seq_req.m_type != RubyRequestType_REPLACEMENT)) {
// Write request: reissue request to the cache hierarchy
issueRequest(seq_req.pkt, seq_req.m_second_type);
if (processReadCallback(seq_req, data, ruby_request, externalHit, mach,
initialRequestTime, forwardRequestTime,
firstResponseTime)) {
break;
}
if (ruby_request) {
@@ -983,6 +1002,8 @@ Sequencer::makeRequest(PacketPtr pkt)
}
#endif
} else if (pkt->req->hasNoAddr()) {
primary_type = secondary_type = RubyRequestType_hasNoAddr;
} else {
//
// To support SwapReq, we need to check isWrite() first: a SwapReq

View File

@@ -45,6 +45,7 @@
#include <list>
#include <unordered_map>
#include "cpu/testers/rubytest/RubyTester.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/protocol/MachineType.hh"
#include "mem/ruby/protocol/RubyRequestType.hh"
@@ -210,16 +211,24 @@ class Sequencer : public RubyPort
statistics::Counter getIncompleteTimes(const MachineType t) const
{ return m_IncompleteTimes[t]; }
private:
protected:
void issueRequest(PacketPtr pkt, RubyRequestType type);
virtual void hitCallback(SequencerRequest* srequest, DataBlock& data,
bool llscSuccess,
const MachineType mach, const bool externalHit,
const Cycles initialRequestTime,
const Cycles forwardRequestTime,
const Cycles firstResponseTime,
const bool was_coalesced);
void hitCallback(SequencerRequest* srequest, DataBlock& data,
bool llscSuccess,
const MachineType mach, const bool externalHit,
const Cycles initialRequestTime,
const Cycles forwardRequestTime,
const Cycles firstResponseTime,
const bool was_coalesced);
virtual bool processReadCallback(SequencerRequest &seq_req,
DataBlock& data,
const bool rubyRequest,
bool externalHit,
const MachineType mach,
Cycles initialRequestTime,
Cycles forwardRequestTime,
Cycles firstResponseTime);
void recordMissLatency(SequencerRequest* srequest, bool llscSuccess,
const MachineType respondingMach,
@@ -227,6 +236,7 @@ class Sequencer : public RubyPort
Cycles forwardRequestTime,
Cycles firstResponseTime);
private:
// Private copy constructor and assignment operator
Sequencer(const Sequencer& obj);
Sequencer& operator=(const Sequencer& obj);

View File

@@ -0,0 +1,109 @@
/*
* Copyright (c) 2024 The University of Wisconsin
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/system/VIPERSequencer.hh"
#include "debug/RubyHitMiss.hh"
#include "debug/RubySequencer.hh"
#include "gpu-compute/gpu_dyn_inst.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "params/VIPERSequencer.hh"
namespace gem5
{
namespace ruby
{
VIPERSequencer::VIPERSequencer(const Params &p)
: Sequencer(p)
{
}
VIPERSequencer::~VIPERSequencer()
{
}
void
VIPERSequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
bool llscSuccess,
const MachineType mach, const bool externalHit,
const Cycles initialRequestTime,
const Cycles forwardRequestTime,
const Cycles firstResponseTime,
const bool was_coalesced)
{
if (srequest->m_type != RubyRequestType_hasNoAddr) {
return Sequencer::hitCallback(
srequest, data, llscSuccess, mach, externalHit, initialRequestTime,
forwardRequestTime, firstResponseTime, was_coalesced);
}
PacketPtr pkt = srequest->pkt;
assert(!was_coalesced);
DPRINTF(RubySequencer, "Setting hasNoAddr ticks\n");
Cycles curCycle =
pkt->findNextSenderState
<ComputeUnit::ScalarDataPort::SenderState>()
->_gpuDynInst->computeUnit()->curCycle();
pkt->setData((const uint8_t *)&curCycle);
// If using the RubyTester, update the RubyTester sender state's
// subBlock with the recieved data. The tester will later access
// this state.
assert(!m_usingRubyTester);
assert(!RubySystem::getWarmupEnabled());
assert(!RubySystem::getCooldownEnabled());
ruby_hit_callback(pkt);
testDrainComplete();
}
bool
VIPERSequencer::processReadCallback(SequencerRequest &seq_req,
DataBlock& data,
const bool ruby_request,
bool externalHit,
const MachineType mach,
Cycles initialRequestTime,
Cycles forwardRequestTime,
Cycles firstResponseTime)
{
if (seq_req.m_type != RubyRequestType_hasNoAddr) {
return Sequencer::processReadCallback(
seq_req, data, ruby_request, externalHit, mach, initialRequestTime,
forwardRequestTime, firstResponseTime);
}
return false;
}
} // namespace ruby
} // namespace gem5

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2024 The University of Wisconsin
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MEM_RUBY_SYSTEM_VIPERSEQUENCER_HH__
#define __MEM_RUBY_SYSTEM_VIPERSEQUENCER_HH__
#include <iostream>
#include "mem/ruby/system/Sequencer.hh"
namespace gem5
{
struct VIPERSequencerParams;
namespace ruby
{
class VIPERSequencer : public Sequencer
{
public:
typedef VIPERSequencerParams Params;
VIPERSequencer(const Params &p);
~VIPERSequencer();
private:
void hitCallback(SequencerRequest* srequest, DataBlock& data,
bool llscSuccess,
const MachineType mach, const bool externalHit,
const Cycles initialRequestTime,
const Cycles forwardRequestTime,
const Cycles firstResponseTime,
const bool was_coalesced);
bool processReadCallback(SequencerRequest &seq_req,
DataBlock& data,
const bool rubyRequest,
bool externalHit,
const MachineType mach,
Cycles initialRequestTime,
Cycles forwardRequestTime,
Cycles firstResponseTime);
};
} // namespace ruby
} // namespace gem5
#endif //__MEM_RUBY_SYSTEM_VIPERSEQUENCER_HH__

View File

@@ -0,0 +1,37 @@
# Copyright (c) 2024 The University of Wisconsin
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.objects.Sequencer import *
from m5.params import *
from m5.proxy import *
class VIPERSequencer(RubySequencer):
type = "VIPERSequencer"
cxx_class = "gem5::ruby::VIPERSequencer"
cxx_header = "mem/ruby/system/VIPERSequencer.hh"