misc: Merge branch 'release-staging-v21-0' into develop
Change-Id: I0ad043ded56fb848e045057a1e7a56ea39797906
This commit is contained in:
@@ -114,9 +114,11 @@ MakeInclude('slicc_interface/RubyRequest.hh')
|
||||
MakeInclude('common/Address.hh')
|
||||
MakeInclude('common/BoolVec.hh')
|
||||
MakeInclude('common/DataBlock.hh')
|
||||
MakeInclude('common/ExpectedMap.hh')
|
||||
MakeInclude('common/IntVec.hh')
|
||||
MakeInclude('common/MachineID.hh')
|
||||
MakeInclude('common/NetDest.hh')
|
||||
MakeInclude('common/TriggerQueue.hh')
|
||||
MakeInclude('common/Set.hh')
|
||||
MakeInclude('common/WriteMask.hh')
|
||||
MakeInclude('network/MessageBuffer.hh')
|
||||
|
||||
232
src/mem/ruby/common/ExpectedMap.hh
Normal file
232
src/mem/ruby/common/ExpectedMap.hh
Normal file
@@ -0,0 +1,232 @@
|
||||
/*
|
||||
* Copyright (c) 2021 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_COMMON_EXPECTEDMAP_HH__
|
||||
#define __MEM_RUBY_COMMON_EXPECTEDMAP_HH__
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <unordered_map>
|
||||
|
||||
// ExpectedMap helper class is used to facilitate tracking of pending
|
||||
// response and data messages in the CHI protocol. It offers additional
|
||||
// functionality when compared to plain counters:
|
||||
// - tracks the expected type for received messages
|
||||
// - tracks segmented data messages (i.e. when a line transfer is split in
|
||||
// multiple messages)
|
||||
|
||||
template<typename RespType, typename DataType>
|
||||
class ExpectedMap
|
||||
{
|
||||
private:
|
||||
|
||||
template<typename Type>
|
||||
struct ExpectedState
|
||||
{
|
||||
struct EnumClassHash
|
||||
{
|
||||
std::size_t operator()(Type t) const
|
||||
{
|
||||
return static_cast<std::size_t>(t);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
// chunks is the number segmented messages we expect to receive
|
||||
// before incrementing numReceived. This is tipically always 1 for all
|
||||
// non-data messages
|
||||
int chunks;
|
||||
int currChunk;
|
||||
int numReceived;
|
||||
std::unordered_map<Type, bool, EnumClassHash> expectedTypes;
|
||||
|
||||
public:
|
||||
ExpectedState()
|
||||
:chunks(1), currChunk(0), numReceived(0)
|
||||
{}
|
||||
|
||||
void
|
||||
clear(int msg_chunks)
|
||||
{
|
||||
chunks = msg_chunks;
|
||||
currChunk = 0;
|
||||
numReceived = 0;
|
||||
expectedTypes.clear();
|
||||
}
|
||||
|
||||
void
|
||||
addExpectedType(const Type &val)
|
||||
{
|
||||
expectedTypes[val] = false;
|
||||
}
|
||||
|
||||
int received() const { return numReceived; }
|
||||
|
||||
bool
|
||||
increaseReceived(const Type &val)
|
||||
{
|
||||
if (expectedTypes.find(val) == expectedTypes.end())
|
||||
return false;
|
||||
|
||||
expectedTypes[val] = true;
|
||||
++currChunk;
|
||||
if (currChunk == chunks) {
|
||||
++numReceived;
|
||||
currChunk = 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
receivedType(const Type &val) const
|
||||
{
|
||||
auto i = expectedTypes.find(val);
|
||||
if (i != expectedTypes.end())
|
||||
return i->second;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
ExpectedState<DataType> expectedData;
|
||||
ExpectedState<RespType> expectedResp;
|
||||
int totalExpected;
|
||||
|
||||
public:
|
||||
ExpectedMap()
|
||||
:expectedData(), expectedResp(), totalExpected(0)
|
||||
{}
|
||||
|
||||
// Clear the tracking state and specified the number of chunks are required
|
||||
// to receive a complete data message
|
||||
void
|
||||
clear(int dataChunks)
|
||||
{
|
||||
expectedData.clear(dataChunks);
|
||||
expectedResp.clear(1);
|
||||
totalExpected = 0;
|
||||
}
|
||||
|
||||
// Register an expected response message type
|
||||
void
|
||||
addExpectedRespType(const RespType &val)
|
||||
{
|
||||
expectedResp.addExpectedType(val);
|
||||
}
|
||||
|
||||
// Register an expected data message type
|
||||
void
|
||||
addExpectedDataType(const DataType &val)
|
||||
{
|
||||
expectedData.addExpectedType(val);
|
||||
}
|
||||
|
||||
// Set the number of expected messages
|
||||
void setExpectedCount(int val) { totalExpected = val; }
|
||||
|
||||
void addExpectedCount(int val) { totalExpected += val; }
|
||||
|
||||
// Returns the number of messages received.
|
||||
// Notice that a data message counts as received only after all of
|
||||
// its chunks are received.
|
||||
int
|
||||
received() const
|
||||
{
|
||||
return expectedData.received() + expectedResp.received();
|
||||
}
|
||||
|
||||
// Returns the remaining number of expected messages
|
||||
int expected() const { return totalExpected - received(); }
|
||||
|
||||
// Has any expected message ?
|
||||
bool hasExpected() const { return expected() != 0; }
|
||||
|
||||
// Has received any data ?
|
||||
bool hasReceivedData() const { return expectedData.received() != 0; }
|
||||
|
||||
// Has received any response ?
|
||||
bool hasReceivedResp() const { return expectedResp.received() != 0; }
|
||||
|
||||
|
||||
// Notifies that a response message was received
|
||||
bool
|
||||
receiveResp(const RespType &val)
|
||||
{
|
||||
assert(received() < totalExpected);
|
||||
return expectedResp.increaseReceived(val);
|
||||
}
|
||||
|
||||
// Notifies that a data message chunk was received
|
||||
bool
|
||||
receiveData(const DataType &val)
|
||||
{
|
||||
assert(received() <= totalExpected);
|
||||
return expectedData.increaseReceived(val);
|
||||
}
|
||||
|
||||
// Has received any data of the given type ?
|
||||
bool
|
||||
receivedDataType(const DataType &val) const
|
||||
{
|
||||
return expectedData.receivedType(val);
|
||||
}
|
||||
|
||||
// Has received any response of the given type ?
|
||||
bool
|
||||
receivedRespType(const RespType &val) const
|
||||
{
|
||||
return expectedResp.receivedType(val);
|
||||
}
|
||||
|
||||
void
|
||||
print(std::ostream& out) const
|
||||
{
|
||||
out << expected();
|
||||
}
|
||||
};
|
||||
|
||||
template<typename RespType, typename DataType>
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const ExpectedMap<RespType,DataType>& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
#endif // __MEM_RUBY_COMMON_EXPECTEDMAP_HH__
|
||||
125
src/mem/ruby/common/TriggerQueue.hh
Normal file
125
src/mem/ruby/common/TriggerQueue.hh
Normal file
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright (c) 2021 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __MEM_RUBY_COMMON_QUEUE_HH__
|
||||
#define __MEM_RUBY_COMMON_QUEUE_HH__
|
||||
|
||||
#include <deque>
|
||||
#include <iostream>
|
||||
|
||||
// TriggerQueue helper class is used keep a list of events that trigger the
|
||||
// actions that need to be executed before an ouststanding transaction
|
||||
// completes in the CHI protocol. When a transaction no longer has pending
|
||||
// respose or data messages, this queue is checked and the event at the head
|
||||
// of the queue is triggered. If the queue is empty, the transactions is
|
||||
// finalized. Events can be marked as NB (non-blocking). NB are triggered by
|
||||
// the protocol even if the transactions has pending data/responses.
|
||||
|
||||
template<typename T>
|
||||
class TriggerQueue
|
||||
{
|
||||
private:
|
||||
struct ValType {
|
||||
T val;
|
||||
bool non_blocking;
|
||||
};
|
||||
std::deque<ValType> queue;
|
||||
|
||||
public:
|
||||
// Returns the head of the queue
|
||||
const T& front() const { return queue.front().val; }
|
||||
|
||||
// Returns the head of the queue
|
||||
// NOTE: SLICC won't allow to reuse front() or different
|
||||
// values of the template parameter, thus we use an additional
|
||||
// def. to workaround that
|
||||
const T& next() const { return queue.front().val; }
|
||||
|
||||
// Returns the end of the queue
|
||||
const T& back() const { return queue.back().val; }
|
||||
|
||||
// Is the head event non-blocking ?
|
||||
bool frontNB() const { return queue.front().non_blocking; }
|
||||
|
||||
// Is the last event non-blocking ?
|
||||
bool backNB() const { return queue.back().non_blocking; }
|
||||
|
||||
// Is the queue empty ?
|
||||
bool empty() const { return queue.empty(); }
|
||||
|
||||
// put an event at the end of the queue
|
||||
void push(const T &elem) { queue.push_back({elem,false}); }
|
||||
|
||||
// emplace an event at the end of the queue
|
||||
template<typename... Ts>
|
||||
void
|
||||
emplace(Ts&&... args)
|
||||
{
|
||||
queue.push_back({T(std::forward<Ts>(args)...),false});
|
||||
}
|
||||
|
||||
// put an event at the head of the queue
|
||||
void pushFront(const T &elem) { queue.push_front({elem,false}); }
|
||||
|
||||
// put a non-blocking event at the end of the queue
|
||||
void pushNB(const T &elem) { queue.push_back({elem,true}); }
|
||||
|
||||
// put a non-blocking event at the head of the queue
|
||||
void pushFrontNB(const T &elem) { queue.push_front({elem,true}); }
|
||||
|
||||
// pop the head of the queue
|
||||
void pop() { queue.pop_front(); }
|
||||
|
||||
void print(std::ostream& out) const;
|
||||
};
|
||||
|
||||
template<class T>
|
||||
inline std::ostream&
|
||||
operator<<(std::ostream& out, const TriggerQueue<T>& obj)
|
||||
{
|
||||
obj.print(out);
|
||||
out << std::flush;
|
||||
return out;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
inline void
|
||||
TriggerQueue<T>::print(std::ostream& out) const
|
||||
{
|
||||
}
|
||||
|
||||
#endif // __MEM_RUBY_COMMON_QUEUE_HH__
|
||||
@@ -262,7 +262,9 @@ enumeration(MachineType, desc="...", default="MachineType_NULL") {
|
||||
TCCdir, desc="Directory at the GPU L2 Cache (TCC)";
|
||||
SQC, desc="GPU L1 Instr Cache (Sequencer Cache)";
|
||||
RegionDir, desc="Region-granular directory";
|
||||
RegionBuffer,desc="Region buffer for CPU and GPU";
|
||||
RegionBuffer, desc="Region buffer for CPU and GPU";
|
||||
Cache, desc="Generic coherent cache controller";
|
||||
Memory, desc="Memory controller interface";
|
||||
NULL, desc="null mach type";
|
||||
}
|
||||
|
||||
|
||||
3057
src/mem/ruby/protocol/chi/CHI-cache-actions.sm
Normal file
3057
src/mem/ruby/protocol/chi/CHI-cache-actions.sm
Normal file
File diff suppressed because it is too large
Load Diff
1255
src/mem/ruby/protocol/chi/CHI-cache-funcs.sm
Normal file
1255
src/mem/ruby/protocol/chi/CHI-cache-funcs.sm
Normal file
File diff suppressed because it is too large
Load Diff
398
src/mem/ruby/protocol/chi/CHI-cache-ports.sm
Normal file
398
src/mem/ruby/protocol/chi/CHI-cache-ports.sm
Normal file
@@ -0,0 +1,398 @@
|
||||
/*
|
||||
* Copyright (c) 2021 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// Outbound port definitions
|
||||
|
||||
out_port(reqOutPort, CHIRequestMsg, reqOut);
|
||||
out_port(snpOutPort, CHIRequestMsg, snpOut);
|
||||
out_port(rspOutPort, CHIResponseMsg, rspOut);
|
||||
out_port(datOutPort, CHIDataMsg, datOut);
|
||||
out_port(triggerOutPort, TriggerMsg, triggerQueue);
|
||||
out_port(retryTriggerOutPort, RetryTriggerMsg, retryTriggerQueue);
|
||||
out_port(replTriggerOutPort, TriggerMsg, replTriggerQueue);
|
||||
out_port(reqRdyOutPort, CHIRequestMsg, reqRdy);
|
||||
out_port(snpRdyOutPort, CHIRequestMsg, snpRdy);
|
||||
|
||||
|
||||
// Include helper functions here. Some of them require the outports to be
|
||||
// already defined
|
||||
// Notice 'processNextState' and 'wakeupPending*' functions are defined after
|
||||
// the required input ports. Currently the SLICC compiler does not support
|
||||
// separate declaration and definition of functions in the .sm files.
|
||||
include "CHI-cache-funcs.sm";
|
||||
|
||||
|
||||
// Inbound port definitions and internal triggers queues
|
||||
// Notice we never stall input ports connected to the network
|
||||
// Incoming data and responses are always consumed.
|
||||
// Incoming requests/snoop are moved to the respective internal rdy queue
|
||||
// if a TBE can be allocated, or retried otherwise.
|
||||
|
||||
// Trigger events from the UD_T state
|
||||
in_port(useTimerTable_in, Addr, useTimerTable, rank=11) {
|
||||
if (useTimerTable_in.isReady(clockEdge())) {
|
||||
Addr readyAddress := useTimerTable.nextAddress();
|
||||
trigger(Event:UseTimeout, readyAddress, getCacheEntry(readyAddress),
|
||||
getCurrentActiveTBE(readyAddress));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Response
|
||||
in_port(rspInPort, CHIResponseMsg, rspIn, rank=10,
|
||||
rsc_stall_handler=rspInPort_rsc_stall_handler) {
|
||||
if (rspInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(rspInPort, CHIResponseMsg) {
|
||||
TBE tbe := getCurrentActiveTBE(in_msg.addr);
|
||||
trigger(respToEvent(in_msg.type, tbe), in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), tbe);
|
||||
}
|
||||
}
|
||||
}
|
||||
bool rspInPort_rsc_stall_handler() {
|
||||
error("rspInPort must never stall\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Data
|
||||
in_port(datInPort, CHIDataMsg, datIn, rank=9,
|
||||
rsc_stall_handler=datInPort_rsc_stall_handler) {
|
||||
if (datInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(datInPort, CHIDataMsg) {
|
||||
int received := in_msg.bitMask.count();
|
||||
assert((received <= data_channel_size) && (received > 0));
|
||||
trigger(dataToEvent(in_msg.type), in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), getCurrentActiveTBE(in_msg.addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
bool datInPort_rsc_stall_handler() {
|
||||
error("datInPort must never stall\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Snoops with an allocated TBE
|
||||
in_port(snpRdyPort, CHIRequestMsg, snpRdy, rank=8,
|
||||
rsc_stall_handler=snpRdyPort_rsc_stall_handler) {
|
||||
if (snpRdyPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(snpRdyPort, CHIRequestMsg) {
|
||||
assert(in_msg.allowRetry == false);
|
||||
TBE tbe := getCurrentActiveTBE(in_msg.addr);
|
||||
if (is_valid(tbe) && tbe.hasUseTimeout) {
|
||||
// we may be in the BUSY_INTR waiting for a cache block, but if
|
||||
// the timeout is set the snoop must still wait, so trigger the
|
||||
// stall form here to prevent creating other states
|
||||
trigger(Event:SnpStalled, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), tbe);
|
||||
} else {
|
||||
trigger(snpToEvent(in_msg.type), in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), tbe);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
bool snpRdyPort_rsc_stall_handler() {
|
||||
error("snpRdyPort must never stall\n");
|
||||
return false;
|
||||
}
|
||||
void wakeupPendingSnps(TBE tbe) {
|
||||
if (tbe.wakeup_pending_snp) {
|
||||
Addr addr := tbe.addr;
|
||||
wakeup_port(snpRdyPort, addr);
|
||||
tbe.wakeup_pending_snp := false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Incoming snoops
|
||||
// Not snoops are not retried, so the snoop channel is stalled if no
|
||||
// Snp TBEs available
|
||||
in_port(snpInPort, CHIRequestMsg, snpIn, rank=7) {
|
||||
if (snpInPort.isReady(clockEdge())) {
|
||||
assert(is_HN == false);
|
||||
printResources();
|
||||
peek(snpInPort, CHIRequestMsg) {
|
||||
assert(in_msg.allowRetry == false);
|
||||
trigger(Event:AllocSnoop, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), getCurrentActiveTBE(in_msg.addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Retry action triggers
|
||||
// These are handled before other triggers since a retried request should
|
||||
// be enqueued ahead of a new request
|
||||
// TODO: consider moving DoRetry to the triggerQueue
|
||||
in_port(retryTriggerInPort, RetryTriggerMsg, retryTriggerQueue, rank=6,
|
||||
rsc_stall_handler=retryTriggerInPort_rsc_stall_handler) {
|
||||
if (retryTriggerInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(retryTriggerInPort, RetryTriggerMsg) {
|
||||
Event ev := in_msg.event;
|
||||
TBE tbe := getCurrentActiveTBE(in_msg.addr);
|
||||
assert((ev == Event:SendRetryAck) || (ev == Event:SendPCrdGrant) ||
|
||||
(ev == Event:DoRetry));
|
||||
if (ev == Event:DoRetry) {
|
||||
assert(is_valid(tbe));
|
||||
if (tbe.is_req_hazard || tbe.is_repl_hazard) {
|
||||
ev := Event:DoRetry_Hazard;
|
||||
}
|
||||
}
|
||||
trigger(ev, in_msg.addr, getCacheEntry(in_msg.addr), tbe);
|
||||
}
|
||||
}
|
||||
}
|
||||
bool retryTriggerInPort_rsc_stall_handler() {
|
||||
DPRINTF(RubySlicc, "Retry trigger queue resource stall\n");
|
||||
retryTriggerInPort.recycle(clockEdge(), cyclesToTicks(stall_recycle_lat));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Action triggers
|
||||
in_port(triggerInPort, TriggerMsg, triggerQueue, rank=5,
|
||||
rsc_stall_handler=triggerInPort_rsc_stall_handler) {
|
||||
if (triggerInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(triggerInPort, TriggerMsg) {
|
||||
TBE tbe := getCurrentActiveTBE(in_msg.addr);
|
||||
assert(is_valid(tbe));
|
||||
if (in_msg.from_hazard != (tbe.is_req_hazard || tbe.is_repl_hazard)) {
|
||||
// possible when handling a snoop hazard and an action from the
|
||||
// the initial transaction got woken up. Stall the action until the
|
||||
// hazard ends
|
||||
assert(in_msg.from_hazard == false);
|
||||
assert(tbe.is_req_hazard || tbe.is_repl_hazard);
|
||||
trigger(Event:ActionStalledOnHazard, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), tbe);
|
||||
} else {
|
||||
trigger(tbe.pendAction, in_msg.addr, getCacheEntry(in_msg.addr), tbe);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
bool triggerInPort_rsc_stall_handler() {
|
||||
DPRINTF(RubySlicc, "Trigger queue resource stall\n");
|
||||
triggerInPort.recycle(clockEdge(), cyclesToTicks(stall_recycle_lat));
|
||||
return true;
|
||||
}
|
||||
void wakeupPendingTgrs(TBE tbe) {
|
||||
if (tbe.wakeup_pending_tgr) {
|
||||
Addr addr := tbe.addr;
|
||||
wakeup_port(triggerInPort, addr);
|
||||
tbe.wakeup_pending_tgr := false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// internally triggered evictions
|
||||
// no stall handler for this one since it doesn't make sense try the next
|
||||
// request when out of TBEs
|
||||
in_port(replTriggerInPort, ReplacementMsg, replTriggerQueue, rank=4) {
|
||||
if (replTriggerInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(replTriggerInPort, ReplacementMsg) {
|
||||
TBE tbe := getCurrentActiveTBE(in_msg.addr);
|
||||
CacheEntry cache_entry := getCacheEntry(in_msg.addr);
|
||||
Event trigger := Event:null;
|
||||
if (is_valid(cache_entry) &&
|
||||
((upstreamHasUnique(cache_entry.state) && dealloc_backinv_unique) ||
|
||||
(upstreamHasShared(cache_entry.state) && dealloc_backinv_shared))) {
|
||||
trigger := Event:Global_Eviction;
|
||||
} else {
|
||||
if (is_HN) {
|
||||
trigger := Event:LocalHN_Eviction;
|
||||
} else {
|
||||
trigger := Event:Local_Eviction;
|
||||
}
|
||||
}
|
||||
trigger(trigger, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Requests with an allocated TBE
|
||||
in_port(reqRdyPort, CHIRequestMsg, reqRdy, rank=3,
|
||||
rsc_stall_handler=reqRdyPort_rsc_stall_handler) {
|
||||
if (reqRdyPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(reqRdyPort, CHIRequestMsg) {
|
||||
CacheEntry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := getCurrentActiveTBE(in_msg.addr);
|
||||
|
||||
DirEntry dir_entry := getDirEntry(in_msg.addr);
|
||||
|
||||
// Special case for possibly stale writebacks or evicts
|
||||
if (in_msg.type == CHIRequestType:WriteBackFull) {
|
||||
if (is_invalid(dir_entry) || (dir_entry.ownerExists == false) ||
|
||||
(dir_entry.owner != in_msg.requestor)) {
|
||||
trigger(Event:WriteBackFull_Stale, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.type == CHIRequestType:WriteEvictFull) {
|
||||
if (is_invalid(dir_entry) || (dir_entry.ownerExists == false) ||
|
||||
(dir_entry.ownerIsExcl == false) || (dir_entry.owner != in_msg.requestor)) {
|
||||
trigger(Event:WriteEvictFull_Stale, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.type == CHIRequestType:WriteCleanFull) {
|
||||
if (is_invalid(dir_entry) || (dir_entry.ownerExists == false) ||
|
||||
(dir_entry.ownerIsExcl == false) || (dir_entry.owner != in_msg.requestor)) {
|
||||
trigger(Event:WriteCleanFull_Stale, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.type == CHIRequestType:Evict) {
|
||||
if (is_invalid(dir_entry) ||
|
||||
(dir_entry.sharers.isElement(in_msg.requestor) == false)) {
|
||||
trigger(Event:Evict_Stale, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
|
||||
// Normal request path
|
||||
trigger(reqToEvent(in_msg.type, in_msg.is_local_pf), in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
}
|
||||
bool reqRdyPort_rsc_stall_handler() {
|
||||
DPRINTF(RubySlicc, "ReqRdy queue resource stall\n");
|
||||
reqRdyPort.recycle(clockEdge(), cyclesToTicks(stall_recycle_lat));
|
||||
return true;
|
||||
}
|
||||
void wakeupPendingReqs(TBE tbe) {
|
||||
if (tbe.wakeup_pending_req) {
|
||||
Addr addr := tbe.addr;
|
||||
wakeup_port(reqRdyPort, addr);
|
||||
tbe.wakeup_pending_req := false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Incoming new requests
|
||||
in_port(reqInPort, CHIRequestMsg, reqIn, rank=2,
|
||||
rsc_stall_handler=reqInPort_rsc_stall_handler) {
|
||||
if (reqInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(reqInPort, CHIRequestMsg) {
|
||||
if (in_msg.allowRetry) {
|
||||
trigger(Event:AllocRequest, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), getCurrentActiveTBE(in_msg.addr));
|
||||
} else {
|
||||
trigger(Event:AllocRequestWithCredit, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), getCurrentActiveTBE(in_msg.addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
bool reqInPort_rsc_stall_handler() {
|
||||
error("reqInPort must never stall\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Incoming new sequencer requests
|
||||
in_port(seqInPort, RubyRequest, mandatoryQueue, rank=1) {
|
||||
if (seqInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(seqInPort, RubyRequest) {
|
||||
trigger(Event:AllocSeqRequest, in_msg.LineAddress,
|
||||
getCacheEntry(in_msg.LineAddress),
|
||||
getCurrentActiveTBE(in_msg.LineAddress));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Incoming new prefetch requests
|
||||
in_port(pfInPort, RubyRequest, prefetchQueue, rank=0) {
|
||||
if (pfInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(pfInPort, RubyRequest) {
|
||||
trigger(Event:AllocPfRequest, in_msg.LineAddress,
|
||||
getCacheEntry(in_msg.LineAddress),
|
||||
getCurrentActiveTBE(in_msg.LineAddress));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void processNextState(Addr address, TBE tbe, CacheEntry cache_entry) {
|
||||
assert(is_valid(tbe));
|
||||
DPRINTF(RubySlicc, "GoToNextState expected_req_resp=%d expected_snp_resp=%d snd_pendEv=%d snd_pendBytes=%d\n",
|
||||
tbe.expected_req_resp.expected(),
|
||||
tbe.expected_snp_resp.expected(),
|
||||
tbe.snd_pendEv, tbe.snd_pendBytes.count());
|
||||
|
||||
// if no pending trigger and not expecting to receive anything, enqueue
|
||||
// next
|
||||
bool has_nb_trigger := (tbe.actions.empty() == false) &&
|
||||
tbe.actions.frontNB() &&
|
||||
(tbe.snd_pendEv == false);
|
||||
int expected_msgs := tbe.expected_req_resp.expected() +
|
||||
tbe.expected_snp_resp.expected() +
|
||||
tbe.snd_pendBytes.count();
|
||||
if ((tbe.pendAction == Event:null) && ((expected_msgs == 0) || has_nb_trigger)) {
|
||||
Cycles trigger_latency := intToCycles(0);
|
||||
if (tbe.delayNextAction > curTick()) {
|
||||
trigger_latency := ticksToCycles(tbe.delayNextAction) -
|
||||
ticksToCycles(curTick());
|
||||
tbe.delayNextAction := intToTick(0);
|
||||
}
|
||||
|
||||
tbe.pendAction := Event:null;
|
||||
if (tbe.actions.empty()) {
|
||||
// time to go to the final state
|
||||
tbe.pendAction := Event:Final;
|
||||
} else {
|
||||
tbe.pendAction := tbe.actions.front();
|
||||
tbe.actions.pop();
|
||||
}
|
||||
assert(tbe.pendAction != Event:null);
|
||||
enqueue(triggerOutPort, TriggerMsg, trigger_latency) {
|
||||
out_msg.addr := tbe.addr;
|
||||
out_msg.from_hazard := tbe.is_req_hazard || tbe.is_repl_hazard;
|
||||
}
|
||||
}
|
||||
|
||||
printTBEState(tbe);
|
||||
|
||||
// we might be going to BUSY_INTERRUPTABLE so wakeup pending snoops
|
||||
// if any
|
||||
wakeupPendingSnps(tbe);
|
||||
}
|
||||
1218
src/mem/ruby/protocol/chi/CHI-cache-transitions.sm
Normal file
1218
src/mem/ruby/protocol/chi/CHI-cache-transitions.sm
Normal file
File diff suppressed because it is too large
Load Diff
775
src/mem/ruby/protocol/chi/CHI-cache.sm
Normal file
775
src/mem/ruby/protocol/chi/CHI-cache.sm
Normal file
@@ -0,0 +1,775 @@
|
||||
/*
|
||||
* Copyright (c) 2021 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
machine(MachineType:Cache, "Cache coherency protocol") :
|
||||
// Sequencer to insert Load/Store requests.
|
||||
// May be null if this is not a L1 cache
|
||||
Sequencer * sequencer;
|
||||
|
||||
// Cache for storing local lines.
|
||||
// NOTE: it is assumed that a cache tag and directory lookups and updates
|
||||
// happen in parallel. The cache tag latency is used for both cases.
|
||||
CacheMemory * cache;
|
||||
|
||||
// Additional pipeline latency modeling for the different request types
|
||||
// When defined, these are applied after the initial tag array read and
|
||||
// sending necessary snoops.
|
||||
Cycles read_hit_latency := 0;
|
||||
Cycles read_miss_latency := 0;
|
||||
Cycles write_fe_latency := 0; // Front-end: Rcv req -> Snd req
|
||||
Cycles write_be_latency := 0; // Back-end: Rcv ack -> Snd data
|
||||
Cycles fill_latency := 0; // Fill latency
|
||||
Cycles snp_latency := 0; // Applied before handling any snoop
|
||||
Cycles snp_inv_latency := 0; // Additional latency for invalidating snoops
|
||||
|
||||
// Waits for cache data array write to complete before executing next action
|
||||
// Note a new write will always block if bank stalls are enabled in the cache
|
||||
bool wait_for_cache_wr := "False";
|
||||
|
||||
// Request TBE allocation latency
|
||||
Cycles allocation_latency := 0;
|
||||
|
||||
// Enqueue latencies for outgoing messages
|
||||
// NOTE: should remove this and only use parameters above?
|
||||
Cycles request_latency := 1;
|
||||
Cycles response_latency := 1;
|
||||
Cycles snoop_latency := 1;
|
||||
Cycles data_latency := 1;
|
||||
|
||||
// When an SC fails, unique lines are locked to this controller for a period
|
||||
// proportional to the number of consecutive failed SC requests. See
|
||||
// the usage of sc_lock_multiplier and llscCheckMonitor for details
|
||||
int sc_lock_base_latency_cy := 4;
|
||||
int sc_lock_multiplier_inc := 4;
|
||||
int sc_lock_multiplier_decay := 1;
|
||||
int sc_lock_multiplier_max := 256;
|
||||
bool sc_lock_enabled;
|
||||
|
||||
// Recycle latency on resource stalls
|
||||
Cycles stall_recycle_lat := 1;
|
||||
|
||||
// Notify the sequencer when a line is evicted. This should be set is the
|
||||
// sequencer is not null and handled LL/SC request types.
|
||||
bool send_evictions;
|
||||
|
||||
// Number of entries in the snoop and replacement TBE tables
|
||||
// notice the "number_of_TBEs" parameter is defined by AbstractController
|
||||
int number_of_snoop_TBEs;
|
||||
int number_of_repl_TBEs;
|
||||
|
||||
// replacements use the same TBE slot as the request that triggered it
|
||||
// in this case the number_of_repl_TBEs parameter is ignored
|
||||
bool unify_repl_TBEs;
|
||||
|
||||
// wait for the final tag update to complete before deallocating TBE and
|
||||
// going to final stable state
|
||||
bool dealloc_wait_for_tag := "False";
|
||||
|
||||
// Width of the data channel. Data transfer are split in multiple messages
|
||||
// at the protocol level when this is less than the cache line size.
|
||||
int data_channel_size;
|
||||
|
||||
// Set when this is used as the home node and point of coherency of the
|
||||
// system. Must be false for every other cache level.
|
||||
bool is_HN;
|
||||
|
||||
// Enables direct memory transfers between SNs and RNs when the data is
|
||||
// not cache in the HN.
|
||||
bool enable_DMT;
|
||||
|
||||
// Use ReadNoSnpSep instead of ReadNoSnp for DMT requests, which allows
|
||||
// the TBE to be deallocated at HNFs before the requester receives the data
|
||||
bool enable_DMT_early_dealloc := "False";
|
||||
|
||||
// Enables direct cache transfers, i.e., use forwarding snoops whenever
|
||||
// possible.
|
||||
bool enable_DCT;
|
||||
|
||||
// Use separate Comp/DBIDResp responses for WriteUnique
|
||||
bool comp_wu := "False";
|
||||
// additional latency for the WU Comp response
|
||||
Cycles comp_wu_latency := 0;
|
||||
|
||||
// Controls cache clusivity for different request types.
|
||||
// set all alloc_on* to false to completelly disable caching
|
||||
bool alloc_on_readshared;
|
||||
bool alloc_on_readunique;
|
||||
bool alloc_on_readonce;
|
||||
bool alloc_on_writeback;
|
||||
bool alloc_on_seq_acc;
|
||||
bool alloc_on_seq_line_write;
|
||||
// Controls if the clusivity is strict.
|
||||
bool dealloc_on_unique;
|
||||
bool dealloc_on_shared;
|
||||
bool dealloc_backinv_unique;
|
||||
bool dealloc_backinv_shared;
|
||||
|
||||
// If the responder has the line in UC or UD state, propagate this state
|
||||
// on a ReadShared. Notice data won't be deallocated if dealloc_on_unique is
|
||||
// set
|
||||
bool fwd_unique_on_readshared := "False";
|
||||
|
||||
// Allow receiving data in SD state.
|
||||
bool allow_SD;
|
||||
|
||||
// stall new requests to destinations with a pending retry
|
||||
bool throttle_req_on_retry := "True";
|
||||
|
||||
// Use prefetcher
|
||||
bool use_prefetcher, default="false";
|
||||
|
||||
// Message Queues
|
||||
|
||||
// Interface to the network
|
||||
// Note vnet_type is used by Garnet only. "response" type is assumed to
|
||||
// have data, so use it for data channels and "none" for the rest.
|
||||
// network="To" for outbound queue; network="From" for inbound
|
||||
// virtual networks: 0=request, 1=snoop, 2=response, 3=data
|
||||
|
||||
MessageBuffer * reqOut, network="To", virtual_network="0", vnet_type="none";
|
||||
MessageBuffer * snpOut, network="To", virtual_network="1", vnet_type="none";
|
||||
MessageBuffer * rspOut, network="To", virtual_network="2", vnet_type="none";
|
||||
MessageBuffer * datOut, network="To", virtual_network="3", vnet_type="response";
|
||||
|
||||
MessageBuffer * reqIn, network="From", virtual_network="0", vnet_type="none";
|
||||
MessageBuffer * snpIn, network="From", virtual_network="1", vnet_type="none";
|
||||
MessageBuffer * rspIn, network="From", virtual_network="2", vnet_type="none";
|
||||
MessageBuffer * datIn, network="From", virtual_network="3", vnet_type="response";
|
||||
|
||||
// Mandatory queue for receiving requests from the sequencer
|
||||
MessageBuffer * mandatoryQueue;
|
||||
|
||||
// Internal queue for trigger events
|
||||
MessageBuffer * triggerQueue;
|
||||
|
||||
// Internal queue for retry trigger events
|
||||
MessageBuffer * retryTriggerQueue;
|
||||
|
||||
// Internal queue for accepted requests
|
||||
MessageBuffer * reqRdy;
|
||||
|
||||
// Internal queue for accepted snoops
|
||||
MessageBuffer * snpRdy;
|
||||
|
||||
// Internal queue for eviction requests
|
||||
MessageBuffer * replTriggerQueue;
|
||||
|
||||
// Prefetch queue for receiving prefetch requests from prefetcher
|
||||
MessageBuffer * prefetchQueue;
|
||||
|
||||
// Requests that originated from a prefetch in a upstream cache are treated
|
||||
// as demand access in this cache. Notice the demand access stats are still
|
||||
// updated only on true demand requests.
|
||||
bool upstream_prefetch_trains_prefetcher := "False";
|
||||
|
||||
{
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// States
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
state_declaration(State, default="Cache_State_null") {
|
||||
// Stable states
|
||||
|
||||
I, AccessPermission:Invalid, desk="Invalid / not present locally or upstream";
|
||||
|
||||
// States when block is present in local cache only
|
||||
SC, AccessPermission:Read_Only, desc="Shared Clean";
|
||||
UC, AccessPermission:Read_Write, desc="Unique Clean";
|
||||
SD, AccessPermission:Read_Only, desc="Shared Dirty";
|
||||
UD, AccessPermission:Read_Write, desc="Unique Dirty";
|
||||
UD_T, AccessPermission:Read_Write, desc="UD with use timeout";
|
||||
|
||||
// Invalid in local cache but present in upstream caches
|
||||
RU, AccessPermission:Invalid, desk="Upstream requester has line in UD/UC";
|
||||
RSC, AccessPermission:Invalid, desk="Upstream requester has line in SC";
|
||||
RSD, AccessPermission:Invalid, desk="Upstream requester has line in SD and maybe SC";
|
||||
RUSC, AccessPermission:Invalid, desk="RSC + this node stills has exclusive access";
|
||||
RUSD, AccessPermission:Invalid, desk="RSD + this node stills has exclusive access";
|
||||
|
||||
// Both in local and upstream caches. In some cases local maybe stale
|
||||
SC_RSC, AccessPermission:Read_Only, desk="SC + RSC";
|
||||
SD_RSC, AccessPermission:Read_Only, desk="SD + RSC";
|
||||
SD_RSD, AccessPermission:Read_Only, desk="SD + RSD";
|
||||
UC_RSC, AccessPermission:Read_Write, desk="UC + RSC";
|
||||
UC_RU, AccessPermission:Invalid, desk="UC + RU";
|
||||
UD_RU, AccessPermission:Invalid, desk="UD + RU";
|
||||
UD_RSD, AccessPermission:Read_Write, desk="UD + RSD";
|
||||
UD_RSC, AccessPermission:Read_Write, desk="UD + RSC";
|
||||
|
||||
// Generic transient state
|
||||
// There is only a transient "BUSY" state. The actions taken at this state
|
||||
// and the final stable state are defined by information in the TBE.
|
||||
// While on BUSY_INTR, we will reply to incoming snoops and the
|
||||
// state of the cache line may change. While on BUSY_BLKD snoops
|
||||
// are blocked
|
||||
BUSY_INTR, AccessPermission:Busy, desc="Waiting for data and/or ack";
|
||||
BUSY_BLKD, AccessPermission:Busy, desc="Waiting for data and/or ack; blocks snoops";
|
||||
|
||||
// Null state for debugging
|
||||
null, AccessPermission:Invalid, desc="Null state";
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Events
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
enumeration(Event) {
|
||||
// Events triggered by incoming requests. Allocate TBE and move
|
||||
// request or snoop to the ready queue
|
||||
AllocRequest, desc="Allocates a TBE for a request. Triggers a retry if table is full";
|
||||
AllocRequestWithCredit, desc="Allocates a TBE for a request. Always succeeds.";
|
||||
AllocSeqRequest, desc="Allocates a TBE for a sequencer request. Stalls requests if table is full";
|
||||
AllocPfRequest, desc="Allocates a TBE for a prefetch request. Stalls requests if table is full";
|
||||
AllocSnoop, desc="Allocates a TBE for a snoop. Stalls snoop if table is full";
|
||||
|
||||
// Events triggered by sequencer requests or snoops in the rdy queue
|
||||
// See CHIRequestType in CHi-msg.sm for descriptions
|
||||
Load;
|
||||
Store;
|
||||
Prefetch;
|
||||
ReadShared;
|
||||
ReadNotSharedDirty;
|
||||
ReadUnique;
|
||||
ReadUnique_PoC;
|
||||
ReadOnce;
|
||||
CleanUnique;
|
||||
Evict;
|
||||
WriteBackFull;
|
||||
WriteEvictFull;
|
||||
WriteCleanFull;
|
||||
WriteUnique;
|
||||
WriteUniquePtl_PoC;
|
||||
WriteUniqueFull_PoC;
|
||||
WriteUniqueFull_PoC_Alloc;
|
||||
SnpCleanInvalid;
|
||||
SnpShared;
|
||||
SnpSharedFwd;
|
||||
SnpNotSharedDirtyFwd;
|
||||
SnpUnique;
|
||||
SnpUniqueFwd;
|
||||
SnpOnce;
|
||||
SnpOnceFwd;
|
||||
SnpStalled; // A snoop stall triggered from the inport
|
||||
|
||||
// Events triggered by incoming response messages
|
||||
// See CHIResponseType in CHi-msg.sm for descriptions
|
||||
CompAck;
|
||||
Comp_I;
|
||||
Comp_UC;
|
||||
Comp_SC;
|
||||
CompDBIDResp;
|
||||
DBIDResp;
|
||||
Comp;
|
||||
ReadReceipt;
|
||||
RespSepData;
|
||||
SnpResp_I;
|
||||
SnpResp_I_Fwded_UC;
|
||||
SnpResp_I_Fwded_UD_PD;
|
||||
SnpResp_SC;
|
||||
SnpResp_SC_Fwded_SC;
|
||||
SnpResp_SC_Fwded_SD_PD;
|
||||
SnpResp_UC_Fwded_I;
|
||||
SnpResp_UD_Fwded_I;
|
||||
SnpResp_SC_Fwded_I;
|
||||
SnpResp_SD_Fwded_I;
|
||||
RetryAck;
|
||||
RetryAck_PoC;
|
||||
PCrdGrant;
|
||||
PCrdGrant_PoC;
|
||||
RetryAck_Hazard;
|
||||
RetryAck_PoC_Hazard;
|
||||
PCrdGrant_Hazard;
|
||||
PCrdGrant_PoC_Hazard;
|
||||
|
||||
// Events triggered by incoming data response messages
|
||||
// See CHIDataType in CHi-msg.sm for descriptions
|
||||
CompData_I;
|
||||
CompData_UC;
|
||||
CompData_SC;
|
||||
CompData_UD_PD;
|
||||
CompData_SD_PD;
|
||||
DataSepResp_UC;
|
||||
CBWrData_I;
|
||||
CBWrData_UC;
|
||||
CBWrData_SC;
|
||||
CBWrData_UD_PD;
|
||||
CBWrData_SD_PD;
|
||||
NCBWrData;
|
||||
SnpRespData_I;
|
||||
SnpRespData_I_PD;
|
||||
SnpRespData_SC;
|
||||
SnpRespData_SC_PD;
|
||||
SnpRespData_SD;
|
||||
SnpRespData_UC;
|
||||
SnpRespData_UD;
|
||||
SnpRespData_SC_Fwded_SC;
|
||||
SnpRespData_SC_Fwded_SD_PD;
|
||||
SnpRespData_SC_PD_Fwded_SC;
|
||||
SnpRespData_I_Fwded_SD_PD;
|
||||
SnpRespData_I_PD_Fwded_SC;
|
||||
SnpRespData_I_Fwded_SC;
|
||||
|
||||
// We use special events for requests that we detect to be stale. This is
|
||||
// done for debugging only. We sent a stale response so the requester can
|
||||
// confirm the request is indeed stale and this is not a protocol bug.
|
||||
// A Write or Evict becomes stale when the requester receives a snoop that
|
||||
// changes the state of the data while the request was pending.
|
||||
// Actual CHI implementations don't have this check.
|
||||
Evict_Stale;
|
||||
WriteBackFull_Stale;
|
||||
WriteEvictFull_Stale;
|
||||
WriteCleanFull_Stale;
|
||||
|
||||
// Cache fill handling
|
||||
CheckCacheFill, desc="Check if need to write or update the cache and trigger any necessary allocation and evictions";
|
||||
|
||||
// Internal requests generated to evict or writeback a local copy
|
||||
// to free-up cache space
|
||||
Local_Eviction, desc="Evicts/WB the local copy of the line";
|
||||
LocalHN_Eviction, desc="Local_Eviction triggered when is HN";
|
||||
Global_Eviction, desc="Local_Eviction + back-invalidate line in all upstream requesters";
|
||||
|
||||
// Events triggered from tbe.actions
|
||||
// In general, for each event we define a single transition from
|
||||
// BUSY_BLKD and/or BUSY_INTR.
|
||||
// See processNextState functions and Initiate_* actions.
|
||||
// All triggered transitions execute in the same cycle until it has to wait
|
||||
// for pending pending responses or data (set by expected_req_resp and
|
||||
// expected_snp_resp). Triggers queued with pushNB are executed even if
|
||||
// there are pending messages.
|
||||
|
||||
// Cache/directory access events. Notice these only model the latency.
|
||||
TagArrayRead, desc="Read the cache and directory tag array";
|
||||
TagArrayWrite, desc="Write the cache and directory tag array";
|
||||
DataArrayRead, desc="Read the cache data array";
|
||||
DataArrayWrite, desc="Write the cache data array";
|
||||
DataArrayWriteOnFill, desc="Write the cache data array (cache fill)";
|
||||
|
||||
// Events for modeling the pipeline latency
|
||||
ReadHitPipe, desc="Latency of reads served from local cache";
|
||||
ReadMissPipe, desc="Latency of reads not served from local cache";
|
||||
WriteFEPipe, desc="Front-end latency of write requests";
|
||||
WriteBEPipe, desc="Back-end latency of write requests";
|
||||
FillPipe, desc="Cache fill latency";
|
||||
SnpSharedPipe, desc="Latency for SnpShared requests";
|
||||
SnpInvPipe, desc="Latency for SnpUnique and SnpCleanInv requests";
|
||||
SnpOncePipe, desc="Latency for SnpOnce requests";
|
||||
|
||||
// Send a read request downstream.
|
||||
SendReadShared, desc="Send a ReadShared or ReadNotSharedDirty is allow_SD is false";
|
||||
SendReadOnce, desc="Send a ReadOnce";
|
||||
SendReadNoSnp, desc="Send a SendReadNoSnp";
|
||||
SendReadNoSnpDMT, desc="Send a SendReadNoSnp using DMT";
|
||||
SendReadUnique, desc="Send a ReadUnique";
|
||||
SendCompAck, desc="Send CompAck";
|
||||
// Read handling at the completer
|
||||
SendCompData, desc="Send CompData";
|
||||
WaitCompAck, desc="Expect to receive CompAck";
|
||||
SendRespSepData, desc="Send RespSepData for a DMT request";
|
||||
|
||||
// Send a write request downstream.
|
||||
SendWriteBackOrWriteEvict, desc="Send a WriteBackFull (if line is UD or SD) or WriteEvictFull (if UC)";
|
||||
SendWriteClean, desc="Send a WriteCleanFull";
|
||||
SendWriteNoSnp, desc="Send a WriteNoSnp for a full line";
|
||||
SendWriteNoSnpPartial, desc="Send a WriteNoSnpPtl";
|
||||
SendWriteUnique, desc="Send a WriteUniquePtl";
|
||||
SendWBData, desc="Send writeback data";
|
||||
SendWUData, desc="Send write unique data";
|
||||
SendWUDataCB, desc="Send write unique data from a sequencer callback";
|
||||
// Write handling at the completer
|
||||
SendCompDBIDResp, desc="Ack WB with CompDBIDResp";
|
||||
SendCompDBIDRespStale, desc="Ack stale WB with CompDBIDResp";
|
||||
SendCompDBIDResp_WU, desc="Ack WU with CompDBIDResp and set expected data";
|
||||
SendDBIDResp_WU, desc="Ack WU with DBIDResp and set expected data";
|
||||
SendComp_WU, desc="Ack WU completion";
|
||||
|
||||
// Dataless requests
|
||||
SendEvict, desc="Send a Evict";
|
||||
SendCompIResp, desc="Ack Evict with Comp_I";
|
||||
SendCleanUnique,desc="Send a CleanUnique";
|
||||
SendCompUCResp, desc="Ack CleanUnique with Comp_UC";
|
||||
|
||||
// Checks if an upgrade using a CleanUnique was sucessfull
|
||||
CheckUpgrade_FromStore, desc="Upgrade needed by a Store";
|
||||
CheckUpgrade_FromCU, desc="Upgrade needed by an upstream CleanUnique";
|
||||
CheckUpgrade_FromRU, desc="Upgrade needed by an upstream ReadUnique";
|
||||
|
||||
// Snoop requests
|
||||
// SnpNotSharedDirty are sent instead of SnpShared for ReadNotSharedDirty
|
||||
SendSnpShared, desc="Send a SnpShared/SnpNotSharedDirty to sharer in UC,UD, or SD state";
|
||||
SendSnpSharedFwdToOwner, desc="Send a SnpSharedFwd/SnpNotSharedDirtyFwd to sharer in UC,UD, or SD state";
|
||||
SendSnpSharedFwdToSharer, desc="Send a SnpSharedFwd/SnpNotSharedDirtyFwd to a sharer in SC state";
|
||||
SendSnpOnce, desc="Send a SnpOnce to a sharer";
|
||||
SendSnpOnceFwd, desc="Send a SnpOnceFwd to a sharer";
|
||||
SendSnpUnique, desc="Send a SnpUnique to all sharers";
|
||||
SendSnpUniqueRetToSrc, desc="Send a SnpUnique to all sharers. Sets RetToSrc for only one sharer.";
|
||||
SendSnpUniqueFwd, desc="Send a SnpUniqueFwd to a single sharer";
|
||||
SendSnpCleanInvalid, desc="Send a SnpCleanInvalid to all sharers";
|
||||
SendSnpCleanInvalidNoReq, desc="Send a SnpCleanInvalid to all sharers except requestor";
|
||||
// Snoop responses
|
||||
SendSnpData, desc="Send SnpRespData as snoop reply";
|
||||
SendSnpIResp, desc="Send SnpResp_I as snoop reply";
|
||||
SendInvSnpResp, desc="Check data state and queue either SendSnpIResp or SendSnpData";
|
||||
SendSnpUniqueFwdCompData, desc="Send CompData to SnpUniqueFwd target and queue either SendSnpFwdedData or SendSnpFwdedResp";
|
||||
SendSnpSharedFwdCompData, desc="Send CompData to SnpUniqueFwd target and queue either SendSnpFwdedData or SendSnpFwdedResp";
|
||||
SendSnpNotSharedDirtyFwdCompData, desc="Send CompData to SnpNotSharedDirtyFwd target and queue either SendSnpFwdedData or SendSnpFwdedResp";
|
||||
SendSnpOnceFwdCompData, desc="Send CompData to SnpOnceFwd target and queue either SendSnpFwdedData or SendSnpFwdedResp";
|
||||
SendSnpFwdedData, desc="Send SnpResp for a forwarding snoop";
|
||||
SendSnpFwdedResp, desc="Send SnpRespData for a forwarding snoop";
|
||||
|
||||
// Retry handling
|
||||
SendRetryAck, desc="Send RetryAck";
|
||||
SendPCrdGrant, desc="Send PCrdGrant";
|
||||
DoRetry, desc="Resend the current pending request";
|
||||
DoRetry_Hazard, desc="DoRetry during a hazard";
|
||||
|
||||
// Misc triggers
|
||||
LoadHit, desc="Complete a load hit";
|
||||
StoreHit, desc="Complete a store hit";
|
||||
UseTimeout, desc="Transition from UD_T -> UD";
|
||||
RestoreFromHazard, desc="Restore from a snoop hazard";
|
||||
TX_Data, desc="Transmit pending data messages";
|
||||
MaintainCoherence, desc="Queues a WriteBack or Evict before droping the only valid copy of the block";
|
||||
FinishCleanUnique, desc="Sends acks and perform any writeback after a CleanUnique";
|
||||
ActionStalledOnHazard, desc="Stall a trigger action because until finish handling snoop hazard";
|
||||
|
||||
// This is triggered once a transaction doesn't have
|
||||
// any queued action and is not expecting responses/data. The transaction
|
||||
// is finalized and the next stable state is stored in the cache/directory
|
||||
// See the processNextState and makeFinalState functions
|
||||
Final;
|
||||
|
||||
null;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Data structures
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Cache block size
|
||||
int blockSize, default="RubySystem::getBlockSizeBytes()";
|
||||
|
||||
// CacheEntry
|
||||
structure(CacheEntry, interface="AbstractCacheEntry") {
|
||||
State state, desc="SLICC line state";
|
||||
DataBlock DataBlk, desc="data for the block";
|
||||
bool HWPrefetched, default="false", desc="Set if this cache entry was prefetched";
|
||||
}
|
||||
|
||||
// Directory entry
|
||||
structure(DirEntry, interface="AbstractCacheEntry", main="false") {
|
||||
NetDest sharers, desc="All upstream controllers that have this line (includes ownwer)";
|
||||
MachineID owner, desc="Controller that has the line in UD,UC, or SD state";
|
||||
bool ownerExists, default="false", desc="true if owner exists";
|
||||
bool ownerIsExcl, default="false", desc="true if owner is UD or UC";
|
||||
State state, desc="SLICC line state";
|
||||
}
|
||||
|
||||
// Helper class for tracking expected response and data messages
|
||||
structure(ExpectedMap, external ="yes") {
|
||||
void clear(int dataChunks);
|
||||
void addExpectedRespType(CHIResponseType);
|
||||
void addExpectedDataType(CHIDataType);
|
||||
void setExpectedCount(int val);
|
||||
void addExpectedCount(int val);
|
||||
bool hasExpected();
|
||||
bool hasReceivedResp();
|
||||
bool hasReceivedData();
|
||||
int expected();
|
||||
int received();
|
||||
bool receiveResp(CHIResponseType);
|
||||
bool receiveData(CHIDataType);
|
||||
bool receivedDataType(CHIDataType);
|
||||
bool receivedRespType(CHIResponseType);
|
||||
}
|
||||
|
||||
// Tracks a pending retry
|
||||
structure(RetryQueueEntry) {
|
||||
Addr addr, desc="Line address";
|
||||
MachineID retryDest, desc="Retry destination";
|
||||
}
|
||||
|
||||
// Queue for event triggers. Used to specify a list of actions that need
|
||||
// to be performed across multiple transitions.
|
||||
// This class is also used to track pending retries
|
||||
structure(TriggerQueue, external ="yes") {
|
||||
Event front();
|
||||
Event back();
|
||||
bool frontNB();
|
||||
bool backNB();
|
||||
bool empty();
|
||||
void push(Event);
|
||||
void pushNB(Event);
|
||||
void pushFront(Event);
|
||||
void pushFrontNB(Event);
|
||||
void pop();
|
||||
// For the retry queue
|
||||
void emplace(Addr,MachineID);
|
||||
RetryQueueEntry next(); //SLICC won't allow to reuse front()
|
||||
}
|
||||
|
||||
// TBE fields
|
||||
structure(TBE, desc="Transaction buffer entry definition") {
|
||||
// in which table was this allocated
|
||||
bool is_req_tbe, desc="Allocated in the request table";
|
||||
bool is_snp_tbe, desc="Allocated in the snoop table";
|
||||
bool is_repl_tbe, desc="Allocated in the replacements table";
|
||||
|
||||
int storSlot, desc="Slot in the storage tracker occupied by this entry";
|
||||
|
||||
// Transaction info mostly extracted from the request message
|
||||
Addr addr, desc="Line address for this TBE";
|
||||
Addr accAddr, desc="Access address for Load/Store/WriteUniquePtl; otherwisse == addr";
|
||||
int accSize, desc="Access size for Load/Store/WriteUniquePtl; otherwisse == blockSize";
|
||||
CHIRequestType reqType, desc="Request type that initiated this transaction";
|
||||
MachineID requestor, desc="Requestor ID";
|
||||
MachineID fwdRequestor, desc="Requestor to receive data on fwding snoops";
|
||||
bool use_DMT, desc="Use DMT for this transaction";
|
||||
bool use_DCT, desc="Use DCT for this transaction";
|
||||
|
||||
// if either is set prefetchers are not notified on miss/hit/fill and
|
||||
// demand hit/miss stats are not incremented
|
||||
bool is_local_pf, desc="Request generated by a local prefetcher";
|
||||
bool is_remote_pf, desc="Request generated a prefetcher in another cache";
|
||||
|
||||
// NOTE: seqReq is a smart pointer pointing to original CPU request object
|
||||
// that triggers transactions associated with this TBE. seqReq carries some
|
||||
// information (e.g., PC of requesting instruction, virtual address of this
|
||||
// request, etc.). Not all transactions have this field set if they are not
|
||||
// triggered directly by a demand request from CPU.
|
||||
RequestPtr seqReq, default="nullptr", desc="Pointer to original request from CPU/sequencer";
|
||||
bool isSeqReqValid, default="false", desc="Set if seqReq is valid (not nullptr)";
|
||||
|
||||
// Transaction state information
|
||||
State state, desc="SLICC line state";
|
||||
|
||||
// Transient state information. These are set at the beggining of a
|
||||
// transactions and updated as data and responses are received. After
|
||||
// finalizing the transactions these are used to create the next SLICC
|
||||
// stable state.
|
||||
bool hasUseTimeout, desc="Line is locked under store/use timeout";
|
||||
DataBlock dataBlk, desc="Local copy of the line";
|
||||
WriteMask dataBlkValid, desc="Marks which bytes in the DataBlock are valid";
|
||||
bool dataValid, desc="Local copy is valid";
|
||||
bool dataDirty, desc="Local copy is dirtry";
|
||||
bool dataMaybeDirtyUpstream, desc="Line maybe dirty upstream";
|
||||
bool dataUnique, desc="Line is unique either locally or upsatream";
|
||||
bool dataToBeInvalid, desc="Local copy will be invalidated at the end of transaction";
|
||||
bool dataToBeSharedClean, desc="Local copy will become SC at the end of transaction";
|
||||
NetDest dir_sharers, desc="Upstream controllers that have the line (includes owner)";
|
||||
MachineID dir_owner, desc="Owner ID";
|
||||
bool dir_ownerExists, desc="Owner ID is valid";
|
||||
bool dir_ownerIsExcl, desc="Owner is UD or UC; SD otherwise";
|
||||
bool doCacheFill, desc="Write valid data to the cache when completing transaction";
|
||||
// NOTE: dataMaybeDirtyUpstream and dir_ownerExists are the same except
|
||||
// when we had just sent dirty data upstream and are waiting for ack to set
|
||||
// dir_ownerExists
|
||||
|
||||
// Helper structures to track expected events and additional transient
|
||||
// state info
|
||||
|
||||
// List of actions to be performed while on a transient state
|
||||
// See the processNextState function for details
|
||||
TriggerQueue actions, template="<Cache_Event>", desc="List of actions";
|
||||
Event pendAction, desc="Current pending action";
|
||||
Tick delayNextAction, desc="Delay next action until given tick";
|
||||
State finalState, desc="Final state; set when pendAction==Final";
|
||||
|
||||
// List of expected responses and data. Checks the type of data against the
|
||||
// expected ones for debugging purposes
|
||||
// See the processNextState function for details
|
||||
ExpectedMap expected_req_resp, template="<CHIResponseType,CHIDataType>";
|
||||
ExpectedMap expected_snp_resp, template="<CHIResponseType,CHIDataType>";
|
||||
bool defer_expected_comp; // expect to receive Comp before the end of transaction
|
||||
CHIResponseType slicchack1; // fix compiler not including headers
|
||||
CHIDataType slicchack2; // fix compiler not including headers
|
||||
|
||||
// Tracks pending data messages that need to be generated when sending
|
||||
// a line
|
||||
bool snd_pendEv, desc="Is there a pending tx event ?";
|
||||
WriteMask snd_pendBytes, desc="Which bytes are pending transmission";
|
||||
CHIDataType snd_msgType, desc="Type of message being sent";
|
||||
MachineID snd_destination, desc="Data destination";
|
||||
|
||||
// Tracks how to update the directory when receiving a CompAck
|
||||
bool updateDirOnCompAck, desc="Update directory on CompAck";
|
||||
bool requestorToBeOwner, desc="Sets dir_ownerExists";
|
||||
bool requestorToBeExclusiveOwner, desc="Sets dir_ownerIsExcl";
|
||||
// NOTE: requestor always added to dir_sharers if updateDirOnCompAck is set
|
||||
|
||||
// Set for incoming snoop requests
|
||||
bool snpNeedsData, desc="Set if snoop requires data as response";
|
||||
State fwdedState, desc="State of CompData sent due to a forwarding snoop";
|
||||
bool is_req_hazard, desc="Snoop hazard with an outstanding request";
|
||||
bool is_repl_hazard, desc="Snoop hazard with an outstanding writeback request";
|
||||
bool is_stale, desc="Request is now stale because of a snoop hazard";
|
||||
|
||||
// Tracks requests sent downstream
|
||||
CHIRequestType pendReqType, desc="Sent request type";
|
||||
bool pendReqAllowRetry, desc="Sent request can be retried";
|
||||
bool rcvdRetryAck, desc="Received a RetryAck";
|
||||
bool rcvdRetryCredit, desc="Received a PCrdGrant";
|
||||
// NOTE: the message is retried only after receiving both RetryAck and
|
||||
// PCrdGrant. A request can be retried only once.
|
||||
// These are a copy of the retry msg fields in case we need to retry
|
||||
Addr pendReqAccAddr;
|
||||
int pendReqAccSize;
|
||||
NetDest pendReqDest;
|
||||
bool pendReqD2OrigReq;
|
||||
bool pendReqRetToSrc;
|
||||
|
||||
// This TBE stalled a message and thus we need to call wakeUpBuffers
|
||||
// at some point
|
||||
bool wakeup_pending_req;
|
||||
bool wakeup_pending_snp;
|
||||
bool wakeup_pending_tgr;
|
||||
}
|
||||
|
||||
// TBE table definition
|
||||
structure(TBETable, external ="yes") {
|
||||
TBE lookup(Addr);
|
||||
void allocate(Addr);
|
||||
void deallocate(Addr);
|
||||
bool isPresent(Addr);
|
||||
}
|
||||
|
||||
structure(TBEStorage, external ="yes") {
|
||||
int size();
|
||||
int capacity();
|
||||
int reserved();
|
||||
int slotsAvailable();
|
||||
bool areNSlotsAvailable(int n);
|
||||
void incrementReserved();
|
||||
void decrementReserved();
|
||||
int addEntryToNewSlot();
|
||||
void addEntryToSlot(int slot);
|
||||
void removeEntryFromSlot(int slot);
|
||||
}
|
||||
|
||||
// Directory memory definition
|
||||
structure(PerfectCacheMemory, external = "yes") {
|
||||
void allocate(Addr);
|
||||
void deallocate(Addr);
|
||||
DirEntry lookup(Addr);
|
||||
bool isTagPresent(Addr);
|
||||
}
|
||||
|
||||
// Directory
|
||||
PerfectCacheMemory directory, template="<Cache_DirEntry>";
|
||||
|
||||
// Tracks unique lines locked after a store miss
|
||||
TimerTable useTimerTable;
|
||||
|
||||
// Multiplies sc_lock_base_latency to obtain the lock timeout.
|
||||
// This is incremented at Profile_Eviction and decays on
|
||||
// store miss completion
|
||||
int sc_lock_multiplier, default="0";
|
||||
|
||||
// Definitions of the TBE tables
|
||||
|
||||
// Main TBE table used for incoming requests
|
||||
TBETable TBEs, template="<Cache_TBE>", constructor="m_number_of_TBEs";
|
||||
TBEStorage storTBEs, constructor="this, m_number_of_TBEs";
|
||||
|
||||
// TBE table for WriteBack/Evict requests generated by a replacement
|
||||
// Notice storTBEs will be used when unify_repl_TBEs is set
|
||||
TBETable replTBEs, template="<Cache_TBE>", constructor="m_unify_repl_TBEs ? m_number_of_TBEs : m_number_of_repl_TBEs";
|
||||
TBEStorage storReplTBEs, constructor="this, m_number_of_repl_TBEs";
|
||||
|
||||
// TBE table for incoming snoops
|
||||
TBETable snpTBEs, template="<Cache_TBE>", constructor="m_number_of_snoop_TBEs";
|
||||
TBEStorage storSnpTBEs, constructor="this, m_number_of_snoop_TBEs";
|
||||
|
||||
// Retry handling
|
||||
|
||||
// Destinations that will be sent PCrdGrant when a TBE becomes available
|
||||
TriggerQueue retryQueue, template="<Cache_RetryQueueEntry>";
|
||||
|
||||
|
||||
// Pending RetryAck/PCrdGrant/DoRetry
|
||||
structure(RetryTriggerMsg, interface="Message") {
|
||||
Addr addr;
|
||||
Event event;
|
||||
MachineID retryDest;
|
||||
|
||||
bool functionalRead(Packet *pkt) { return false; }
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) { return false; }
|
||||
bool functionalWrite(Packet *pkt) { return false; }
|
||||
}
|
||||
|
||||
// Destinations from we received a RetryAck. Sending new requests to these
|
||||
// destinations will be blocked until a PCrdGrant is received if
|
||||
// throttle_req_on_retry is set
|
||||
NetDest destsWaitingRetry;
|
||||
|
||||
// Pending transaction actions (generated by TBE:actions)
|
||||
structure(TriggerMsg, interface="Message") {
|
||||
Addr addr;
|
||||
bool from_hazard; // this actions was generate during a snoop hazard
|
||||
bool functionalRead(Packet *pkt) { return false; }
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) { return false; }
|
||||
bool functionalWrite(Packet *pkt) { return false; }
|
||||
}
|
||||
|
||||
// Internal replacement request
|
||||
structure(ReplacementMsg, interface="Message") {
|
||||
Addr addr;
|
||||
Addr from_addr;
|
||||
int slot; // set only when unify_repl_TBEs is set
|
||||
bool functionalRead(Packet *pkt) { return false; }
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) { return false; }
|
||||
bool functionalWrite(Packet *pkt) { return false; }
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Input/output port definitions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
include "CHI-cache-ports.sm";
|
||||
// CHI-cache-ports.sm also includes CHI-cache-funcs.sm
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Actions and transitions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
include "CHI-cache-actions.sm";
|
||||
include "CHI-cache-transitions.sm";
|
||||
}
|
||||
792
src/mem/ruby/protocol/chi/CHI-mem.sm
Normal file
792
src/mem/ruby/protocol/chi/CHI-mem.sm
Normal file
@@ -0,0 +1,792 @@
|
||||
/*
|
||||
* Copyright (c) 2021 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
machine(MachineType:Memory, "Memory controller interface") :
|
||||
|
||||
// no explicit modeling of allocation latency like the Caches, so add one
|
||||
// cycle to the response enqueue latency as default
|
||||
Cycles response_latency := 2;
|
||||
Cycles data_latency := 1;
|
||||
Cycles to_memory_controller_latency := 1;
|
||||
|
||||
int data_channel_size;
|
||||
|
||||
// Interface to the network
|
||||
// Note vnet_type is used by Garnet only. "response" type is assumed to
|
||||
// have data, so use it for data channels and "none" for the rest.
|
||||
// network="To" for outbound queue; network="From" for inbound
|
||||
// virtual networks: 0=request, 1=snoop, 2=response, 3=data
|
||||
|
||||
MessageBuffer * reqOut, network="To", virtual_network="0", vnet_type="none";
|
||||
MessageBuffer * snpOut, network="To", virtual_network="1", vnet_type="none";
|
||||
MessageBuffer * rspOut, network="To", virtual_network="2", vnet_type="none";
|
||||
MessageBuffer * datOut, network="To", virtual_network="3", vnet_type="response";
|
||||
|
||||
MessageBuffer * reqIn, network="From", virtual_network="0", vnet_type="none";
|
||||
MessageBuffer * snpIn, network="From", virtual_network="1", vnet_type="none";
|
||||
MessageBuffer * rspIn, network="From", virtual_network="2", vnet_type="none";
|
||||
MessageBuffer * datIn, network="From", virtual_network="3", vnet_type="response";
|
||||
|
||||
// Requests that can allocate a TBE
|
||||
MessageBuffer * reqRdy;
|
||||
|
||||
// Data/ack to/from memory
|
||||
MessageBuffer * requestToMemory;
|
||||
MessageBuffer * responseFromMemory;
|
||||
|
||||
// Trigger queue for internal events
|
||||
MessageBuffer * triggerQueue;
|
||||
|
||||
{
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// States
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
state_declaration(State, desc="Transaction states", default="Memory_State_READY") {
|
||||
// We don't know if the line is cached, so the memory copy is maybe stable
|
||||
READY, AccessPermission:Backing_Store, desk="Ready to transfer the line";
|
||||
|
||||
WAITING_NET_DATA, AccessPermission:Backing_Store_Busy, desc="Waiting data from the network";
|
||||
SENDING_NET_DATA, AccessPermission:Backing_Store_Busy, desc="Sending data to the network";
|
||||
READING_MEM, AccessPermission:Backing_Store_Busy, desc="Waiting data from memory";
|
||||
|
||||
// Null state for debugging; allow writes
|
||||
null, AccessPermission:Backing_Store, desc="Null state";
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Events
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
enumeration(Event, desc="Memory events") {
|
||||
// Checks if a request can allocate a TBE be moved to reqRdy
|
||||
CheckAllocTBE;
|
||||
CheckAllocTBE_WithCredit;
|
||||
|
||||
// Requests
|
||||
WriteNoSnpPtl;
|
||||
WriteNoSnp;
|
||||
ReadNoSnp;
|
||||
ReadNoSnpSep;
|
||||
|
||||
// Data
|
||||
WriteData;
|
||||
|
||||
// Memory side
|
||||
MemoryData;
|
||||
MemoryAck;
|
||||
|
||||
// Internal event triggers
|
||||
Trigger_Send;
|
||||
Trigger_SendDone;
|
||||
Trigger_ReceiveDone;
|
||||
Trigger_SendRetry;
|
||||
Trigger_SendPCrdGrant;
|
||||
}
|
||||
|
||||
|
||||
// Is there a less tedious way to convert messages to events ??
|
||||
|
||||
Event reqToEvent (CHIRequestType type) {
|
||||
if (type == CHIRequestType:WriteNoSnpPtl) {
|
||||
return Event:WriteNoSnpPtl;
|
||||
} else if (type == CHIRequestType:WriteNoSnp) {
|
||||
return Event:WriteNoSnp;
|
||||
} else if (type == CHIRequestType:ReadNoSnp) {
|
||||
return Event:ReadNoSnp;
|
||||
} else if (type == CHIRequestType:ReadNoSnpSep) {
|
||||
return Event:ReadNoSnpSep;
|
||||
} else {
|
||||
error("Invalid CHIRequestType");
|
||||
}
|
||||
}
|
||||
|
||||
Event respToEvent (CHIResponseType type) {
|
||||
error("Invalid CHIResponseType");
|
||||
}
|
||||
|
||||
Event dataToEvent (CHIDataType type) {
|
||||
if (type == CHIDataType:NCBWrData) {
|
||||
return Event:WriteData;
|
||||
} else {
|
||||
error("Invalid CHIDataType");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Data structures
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Cache block size
|
||||
int blockSize, default="RubySystem::getBlockSizeBytes()";
|
||||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
int storSlot, desc="Slot in the storage tracker occupied by this entry";
|
||||
Addr addr, desc="Line address for this TBE";
|
||||
Addr accAddr, desc="Original access address. Set only for Write*Ptl";
|
||||
int accSize, desc="Access size. Set only for Write*Ptl";
|
||||
State state, desc="Current line state";
|
||||
DataBlock dataBlk, desc="Transaction data";
|
||||
WriteMask dataBlkValid, desc="valid bytes in dataBlk";
|
||||
int rxtxBytes, desc="Bytes sent or received";
|
||||
MachineID requestor, desc="Requestor that originated this request";
|
||||
MachineID destination, desc="Where we are sending data";
|
||||
bool useDataSepResp, desc="Replies with DataSepResp instead of CompData";
|
||||
}
|
||||
|
||||
structure(TBETable, external ="yes") {
|
||||
TBE lookup(Addr);
|
||||
void allocate(Addr);
|
||||
void deallocate(Addr);
|
||||
bool isPresent(Addr);
|
||||
bool areNSlotsAvailable(int n, Tick curTime);
|
||||
}
|
||||
|
||||
structure(TBEStorage, external ="yes") {
|
||||
int size();
|
||||
int capacity();
|
||||
int reserved();
|
||||
int slotsAvailable();
|
||||
bool areNSlotsAvailable(int n);
|
||||
void incrementReserved();
|
||||
void decrementReserved();
|
||||
int addEntryToNewSlot();
|
||||
void removeEntryFromSlot(int slot);
|
||||
}
|
||||
|
||||
TBETable TBEs, template="<Memory_TBE>", constructor="m_number_of_TBEs";
|
||||
TBEStorage storTBEs, constructor="this, m_number_of_TBEs";
|
||||
|
||||
// Tracks all pending MemoryAcks (debug purposes only)
|
||||
int pendingWrites, default="0";
|
||||
|
||||
structure(TriggerMsg, desc="...", interface="Message") {
|
||||
Addr addr;
|
||||
Event event;
|
||||
MachineID retryDest;
|
||||
|
||||
bool functionalRead(Packet *pkt) { return false; }
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) { return false; }
|
||||
bool functionalWrite(Packet *pkt) { return false; }
|
||||
}
|
||||
|
||||
// Tracks a pending credit request from a retry
|
||||
structure(RetryQueueEntry) {
|
||||
Addr addr, desc="Line address";
|
||||
MachineID retryDest, desc="Retry destination";
|
||||
}
|
||||
|
||||
structure(TriggerQueue, external ="yes") {
|
||||
void pop();
|
||||
bool empty();
|
||||
void emplace(Addr,MachineID);
|
||||
RetryQueueEntry next();
|
||||
}
|
||||
|
||||
TriggerQueue retryQueue, template="<Memory_RetryQueueEntry>";
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// External functions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Tick clockEdge();
|
||||
Tick curTick();
|
||||
Tick cyclesToTicks(Cycles c);
|
||||
void set_tbe(TBE b);
|
||||
void unset_tbe();
|
||||
void wakeUpAllBuffers(Addr a);
|
||||
bool respondsTo(Addr addr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Interface functions required by SLICC
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
State getState(TBE tbe, Addr addr) {
|
||||
if (is_valid(tbe)) {
|
||||
assert(tbe.addr == addr);
|
||||
return tbe.state;
|
||||
} else {
|
||||
return State:READY;
|
||||
}
|
||||
}
|
||||
|
||||
void setState(TBE tbe, Addr addr, State state) {
|
||||
if (is_valid(tbe)) {
|
||||
assert(tbe.addr == addr);
|
||||
tbe.state := state;
|
||||
}
|
||||
}
|
||||
|
||||
AccessPermission getAccessPermission(Addr addr) {
|
||||
if (respondsTo(addr)) {
|
||||
TBE tbe := TBEs[addr];
|
||||
if (is_valid(tbe)) {
|
||||
DPRINTF(RubySlicc, "%x %s,%s\n", addr, tbe.state, Memory_State_to_permission(tbe.state));
|
||||
return Memory_State_to_permission(tbe.state);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%x %s\n", addr, AccessPermission:Backing_Store);
|
||||
return AccessPermission:Backing_Store;
|
||||
}
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%x %s\n", addr, AccessPermission:NotPresent);
|
||||
return AccessPermission:NotPresent;
|
||||
}
|
||||
}
|
||||
|
||||
void setAccessPermission(Addr addr, State state) {
|
||||
}
|
||||
|
||||
void functionalRead(Addr addr, Packet *pkt, WriteMask &mask) {
|
||||
if (respondsTo(addr)) {
|
||||
DPRINTF(RubySlicc, "functionalRead %x\n", addr);
|
||||
TBE tbe := TBEs[addr];
|
||||
|
||||
if (mask.isEmpty()) {
|
||||
functionalMemoryRead(pkt);
|
||||
mask.fillMask();
|
||||
DPRINTF(RubySlicc, "functionalRead mem %x %s\n", addr, mask);
|
||||
}
|
||||
|
||||
// Update with any transient data
|
||||
//TODO additional handling of partial data ??
|
||||
if (is_valid(tbe)) {
|
||||
WriteMask read_mask;
|
||||
read_mask.setMask(addressOffset(tbe.accAddr, tbe.addr), tbe.accSize);
|
||||
read_mask.andMask(tbe.dataBlkValid);
|
||||
if (read_mask.isEmpty() == false) {
|
||||
testAndReadMask(addr, tbe.dataBlk, read_mask, pkt);
|
||||
DPRINTF(RubySlicc, "functionalRead tbe %x %s %s %s\n", addr, tbe.dataBlk, read_mask, mask);
|
||||
mask.orMask(read_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int functionalWrite(Addr addr, Packet *pkt) {
|
||||
if(respondsTo(addr)) {
|
||||
int num_functional_writes := 0;
|
||||
TBE tbe := TBEs[addr];
|
||||
if (is_valid(tbe)) {
|
||||
num_functional_writes := num_functional_writes +
|
||||
testAndWrite(addr, tbe.dataBlk, pkt);
|
||||
DPRINTF(RubySlicc, "functionalWrite tbe %x %s\n", addr, tbe.dataBlk);
|
||||
}
|
||||
num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
|
||||
DPRINTF(RubySlicc, "functionalWrite mem %x\n", addr);
|
||||
return num_functional_writes;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Helper functions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void printResources() {
|
||||
DPRINTF(RubySlicc, "Resources(avail/max): TBEs=%d/%d\n",
|
||||
storTBEs.size(), storTBEs.capacity());
|
||||
DPRINTF(RubySlicc, "Resources(in/out size): rdy=%d req=%d/%d rsp=%d/%d dat=%d/%d snp=%d/%d\n",
|
||||
reqRdy.getSize(curTick()),
|
||||
reqIn.getSize(curTick()), reqOut.getSize(curTick()),
|
||||
rspIn.getSize(curTick()), rspOut.getSize(curTick()),
|
||||
datIn.getSize(curTick()), datOut.getSize(curTick()),
|
||||
snpIn.getSize(curTick()), snpOut.getSize(curTick()));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Input/output port definitions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Outbound port definitions
|
||||
|
||||
out_port(reqOutPort, CHIRequestMsg, reqOut);
|
||||
out_port(snpOutPort, CHIRequestMsg, snpOut);
|
||||
out_port(rspOutPort, CHIResponseMsg, rspOut);
|
||||
out_port(datOutPort, CHIDataMsg, datOut);
|
||||
out_port(triggerOutPort, TriggerMsg, triggerQueue);
|
||||
out_port(memQueue_out, MemoryMsg, requestToMemory);
|
||||
out_port(reqRdyOutPort, CHIRequestMsg, reqRdy);
|
||||
|
||||
// Inbound port definitions
|
||||
|
||||
// Response
|
||||
in_port(rspInPort, CHIResponseMsg, rspIn, rank=6) {
|
||||
if (rspInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(rspInPort, CHIResponseMsg) {
|
||||
error("Unexpected message");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Data
|
||||
in_port(datInPort, CHIDataMsg, datIn, rank=5) {
|
||||
if (datInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(datInPort, CHIDataMsg) {
|
||||
int received := in_msg.bitMask.count();
|
||||
assert((received <= data_channel_size) && (received > 0));
|
||||
trigger(dataToEvent(in_msg.type), in_msg.addr, TBEs[in_msg.addr]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Data/Ack from memory
|
||||
|
||||
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=4) {
|
||||
if (memQueue_in.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
Addr addr := makeLineAddress(in_msg.addr);
|
||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||
trigger(Event:MemoryData, addr, TBEs[addr]);
|
||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||
trigger(Event:MemoryAck, addr, TBEs[addr]);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger
|
||||
in_port(triggerInPort, TriggerMsg, triggerQueue, rank=3) {
|
||||
if (triggerInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(triggerInPort, TriggerMsg) {
|
||||
trigger(in_msg.event, in_msg.addr, TBEs[in_msg.addr]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Snoops
|
||||
in_port(snpInPort, CHIRequestMsg, snpIn, rank=2) {
|
||||
if (snpInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(snpInPort, CHIRequestMsg) {
|
||||
error("Unexpected message");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Requests
|
||||
in_port(reqRdyInPort, CHIRequestMsg, reqRdy, rank=1) {
|
||||
if (reqRdyInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(reqRdyInPort, CHIRequestMsg) {
|
||||
trigger(reqToEvent(in_msg.type), in_msg.addr, TBEs[in_msg.addr]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
in_port(reqInPort, CHIRequestMsg, reqIn, rank=0) {
|
||||
if (reqInPort.isReady(clockEdge())) {
|
||||
printResources();
|
||||
peek(reqInPort, CHIRequestMsg) {
|
||||
if (in_msg.allowRetry) {
|
||||
trigger(Event:CheckAllocTBE, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
// Only expected requests that do not allow retry are the ones that
|
||||
// are being retried after receiving credit
|
||||
trigger(Event:CheckAllocTBE_WithCredit,
|
||||
in_msg.addr, TBEs[in_msg.addr]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Actions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
action(checkAllocateTBE, desc="") {
|
||||
// Move to reqRdy if resources available, otherwise send retry
|
||||
if (storTBEs.areNSlotsAvailable(1)) {
|
||||
// reserve a slot for this request
|
||||
storTBEs.incrementReserved();
|
||||
|
||||
peek(reqInPort, CHIRequestMsg) {
|
||||
enqueue(reqRdyOutPort, CHIRequestMsg, 0) {
|
||||
out_msg := in_msg;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
peek(reqInPort, CHIRequestMsg) {
|
||||
assert(in_msg.allowRetry);
|
||||
enqueue(triggerOutPort, TriggerMsg, 0) {
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.event := Event:Trigger_SendRetry;
|
||||
out_msg.retryDest := in_msg.requestor;
|
||||
retryQueue.emplace(in_msg.addr,in_msg.requestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
reqInPort.dequeue(clockEdge());
|
||||
}
|
||||
|
||||
action(checkAllocateTBE_withCredit, desc="") {
|
||||
// We must have reserved resources for this request
|
||||
peek(reqInPort, CHIRequestMsg) {
|
||||
assert(in_msg.allowRetry == false);
|
||||
enqueue(reqRdyOutPort, CHIRequestMsg, 0) {
|
||||
out_msg := in_msg;
|
||||
}
|
||||
}
|
||||
reqInPort.dequeue(clockEdge());
|
||||
}
|
||||
|
||||
action(allocateTBE, "atbe", desc="Allocate TBEs for a miss") {
|
||||
// We must have reserved resources for this allocation
|
||||
storTBEs.decrementReserved();
|
||||
assert(storTBEs.areNSlotsAvailable(1));
|
||||
|
||||
TBEs.allocate(address);
|
||||
set_tbe(TBEs[address]);
|
||||
tbe.storSlot := storTBEs.addEntryToNewSlot();
|
||||
tbe.addr := address;
|
||||
tbe.rxtxBytes := 0;
|
||||
tbe.useDataSepResp := false;
|
||||
}
|
||||
|
||||
action(initializeFromReqTBE, "itbe", desc="Initialize TBE fields") {
|
||||
peek(reqRdyInPort, CHIRequestMsg) {
|
||||
tbe.requestor := in_msg.requestor;
|
||||
if (in_msg.dataToFwdRequestor) {
|
||||
tbe.destination := in_msg.fwdRequestor;
|
||||
} else {
|
||||
tbe.destination := in_msg.requestor;
|
||||
}
|
||||
tbe.accAddr := in_msg.accAddr;
|
||||
tbe.accSize := in_msg.accSize;
|
||||
}
|
||||
}
|
||||
|
||||
action(decWritePending, "dwp", desc="Decrement pending writes") {
|
||||
assert(pendingWrites >= 1);
|
||||
pendingWrites := pendingWrites - 1;
|
||||
}
|
||||
|
||||
action(deallocateTBE, "dtbe", desc="Deallocate TBEs") {
|
||||
assert(is_valid(tbe));
|
||||
storTBEs.removeEntryFromSlot(tbe.storSlot);
|
||||
TBEs.deallocate(address);
|
||||
unset_tbe();
|
||||
// send credit if requestor waiting for it
|
||||
if (retryQueue.empty() == false) {
|
||||
assert(storTBEs.areNSlotsAvailable(1));
|
||||
storTBEs.incrementReserved();
|
||||
RetryQueueEntry e := retryQueue.next();
|
||||
retryQueue.pop();
|
||||
enqueue(triggerOutPort, TriggerMsg, 0) {
|
||||
out_msg.addr := e.addr;
|
||||
out_msg.retryDest := e.retryDest;
|
||||
out_msg.event := Event:Trigger_SendPCrdGrant;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
action(sendReadReceipt, "sRR", desc="Send receipt to requestor") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(rspOutPort, CHIResponseMsg, response_latency) {
|
||||
out_msg.addr := address;
|
||||
out_msg.type := CHIResponseType:ReadReceipt;
|
||||
out_msg.responder := machineID;
|
||||
out_msg.Destination.add(tbe.requestor);
|
||||
}
|
||||
// also send different type of data when ready
|
||||
tbe.useDataSepResp := true;
|
||||
}
|
||||
|
||||
action(sendCompDBIDResp, "sCbid", desc="Send ack to requestor") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(rspOutPort, CHIResponseMsg, response_latency) {
|
||||
out_msg.addr := address;
|
||||
out_msg.type := CHIResponseType:CompDBIDResp;
|
||||
out_msg.responder := machineID;
|
||||
out_msg.Destination.add(tbe.requestor);
|
||||
}
|
||||
}
|
||||
|
||||
action(sendMemoryRead, "smr", desc="Send request to memory") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
||||
out_msg.Sender := tbe.requestor;
|
||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||
out_msg.Len := 0;
|
||||
}
|
||||
}
|
||||
|
||||
action(sendMemoryWrite, "smw", desc="Send request to memory") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
|
||||
out_msg.addr := tbe.accAddr;
|
||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||
out_msg.Sender := tbe.requestor;
|
||||
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
||||
out_msg.DataBlk := tbe.dataBlk;
|
||||
out_msg.Len := tbe.accSize;
|
||||
}
|
||||
tbe.dataBlkValid.clear();
|
||||
pendingWrites := pendingWrites + 1;
|
||||
}
|
||||
|
||||
action(prepareSend, "ps", desc="Copies received memory data to TBE") {
|
||||
assert(is_valid(tbe));
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
tbe.dataBlk := in_msg.DataBlk;
|
||||
}
|
||||
tbe.rxtxBytes := 0;
|
||||
tbe.dataBlkValid.setMask(addressOffset(tbe.accAddr, tbe.addr), tbe.accSize);
|
||||
}
|
||||
|
||||
action(copyWriteDataToTBE, "cpWDat", desc="Copies received net data to TBE") {
|
||||
peek(datInPort, CHIDataMsg) {
|
||||
assert(is_valid(tbe));
|
||||
tbe.dataBlk.copyPartial(in_msg.dataBlk, in_msg.bitMask);
|
||||
tbe.dataBlkValid.orMask(in_msg.bitMask);
|
||||
tbe.rxtxBytes := tbe.rxtxBytes + in_msg.bitMask.count();
|
||||
}
|
||||
}
|
||||
|
||||
action(sendDataAndCheck, "sd", desc="Send received data to requestor") {
|
||||
assert(is_valid(tbe));
|
||||
assert(tbe.rxtxBytes < blockSize);
|
||||
enqueue(datOutPort, CHIDataMsg, data_latency) {
|
||||
out_msg.addr := tbe.addr;
|
||||
if (tbe.useDataSepResp) {
|
||||
out_msg.type := CHIDataType:DataSepResp_UC;
|
||||
} else {
|
||||
out_msg.type := CHIDataType:CompData_UC;
|
||||
}
|
||||
out_msg.dataBlk := tbe.dataBlk;
|
||||
// Called in order for the whole block so use rxtxBytes as offset
|
||||
out_msg.bitMask.setMask(tbe.rxtxBytes, data_channel_size);
|
||||
out_msg.Destination.add(tbe.destination);
|
||||
}
|
||||
|
||||
//DPRINTF(RubySlicc, "rxtxBytes=%d\n", tbe.rxtxBytes);
|
||||
|
||||
tbe.rxtxBytes := tbe.rxtxBytes + data_channel_size;
|
||||
|
||||
// end or send next chunk next cycle
|
||||
Event next := Event:Trigger_SendDone;
|
||||
Cycles delay := intToCycles(0);
|
||||
if (tbe.rxtxBytes < blockSize) {
|
||||
next := Event:Trigger_Send;
|
||||
delay := intToCycles(1);
|
||||
}
|
||||
enqueue(triggerOutPort, TriggerMsg, delay) {
|
||||
out_msg.addr := address;
|
||||
out_msg.event := next;
|
||||
}
|
||||
}
|
||||
|
||||
action(checkForReceiveCompletion, "cWc", desc="Check if all data is received") {
|
||||
assert(is_valid(tbe));
|
||||
DPRINTF(RubySlicc, "rxtxBytes=%d\n", tbe.rxtxBytes);
|
||||
assert((tbe.rxtxBytes <= tbe.accSize) && (tbe.rxtxBytes > 0));
|
||||
if (tbe.rxtxBytes == tbe.accSize) {
|
||||
enqueue(triggerOutPort, TriggerMsg, 0) {
|
||||
out_msg.addr := address;
|
||||
out_msg.event := Event:Trigger_ReceiveDone;
|
||||
}
|
||||
tbe.rxtxBytes := 0;
|
||||
assert(tbe.dataBlkValid.getMask(addressOffset(tbe.accAddr, tbe.addr), tbe.accSize));
|
||||
}
|
||||
}
|
||||
|
||||
action(popReqInQueue, "preq", desc="Pop request queue.") {
|
||||
reqRdyInPort.dequeue(clockEdge());
|
||||
}
|
||||
|
||||
action(popDataInQueue, "pdata", desc="Pop data queue.") {
|
||||
datInPort.dequeue(clockEdge());
|
||||
}
|
||||
|
||||
action(popTriggerQueue, "ptrigger", desc="Pop trigger queue.") {
|
||||
triggerInPort.dequeue(clockEdge());
|
||||
}
|
||||
|
||||
action(popMemoryQueue, "pmem", desc="Pop memory queue.") {
|
||||
memQueue_in.dequeue(clockEdge());
|
||||
}
|
||||
|
||||
// Stall/wake-up only used for requests that arrive when we are on the
|
||||
// WAITING_NET_DATA state. For all other case the line should be either
|
||||
// ready or we can overlap
|
||||
action(stallRequestQueue, "str", desc="Stall and wait on the address") {
|
||||
peek(reqRdyInPort, CHIRequestMsg){
|
||||
stall_and_wait(reqRdyInPort, address);
|
||||
}
|
||||
}
|
||||
action(wakeUpStalled, "wa", desc="Wake up any requests waiting for this address") {
|
||||
wakeUpAllBuffers(address);
|
||||
}
|
||||
|
||||
action(sendRetryAck, desc="") {
|
||||
peek(triggerInPort, TriggerMsg) {
|
||||
enqueue(rspOutPort, CHIResponseMsg, response_latency) {
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.type := CHIResponseType:RetryAck;
|
||||
out_msg.responder := machineID;
|
||||
out_msg.Destination.add(in_msg.retryDest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
action(sendPCrdGrant, desc="") {
|
||||
peek(triggerInPort, TriggerMsg) {
|
||||
enqueue(rspOutPort, CHIResponseMsg, response_latency) {
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.type := CHIResponseType:PCrdGrant;
|
||||
out_msg.responder := machineID;
|
||||
out_msg.Destination.add(in_msg.retryDest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Transitions
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
transition(READY, ReadNoSnp, READING_MEM) {
|
||||
allocateTBE;
|
||||
initializeFromReqTBE;
|
||||
sendMemoryRead;
|
||||
popReqInQueue;
|
||||
}
|
||||
|
||||
transition(READY, ReadNoSnpSep, READING_MEM) {
|
||||
allocateTBE;
|
||||
initializeFromReqTBE;
|
||||
sendMemoryRead;
|
||||
sendReadReceipt;
|
||||
popReqInQueue;
|
||||
}
|
||||
|
||||
transition(READING_MEM, MemoryData, SENDING_NET_DATA) {
|
||||
prepareSend;
|
||||
sendDataAndCheck;
|
||||
popMemoryQueue;
|
||||
}
|
||||
|
||||
transition(SENDING_NET_DATA, Trigger_Send) {
|
||||
sendDataAndCheck;
|
||||
popTriggerQueue;
|
||||
}
|
||||
|
||||
transition(READY, WriteNoSnpPtl, WAITING_NET_DATA) {
|
||||
allocateTBE;
|
||||
initializeFromReqTBE;
|
||||
sendCompDBIDResp;
|
||||
popReqInQueue;
|
||||
}
|
||||
|
||||
transition(READY, WriteNoSnp, WAITING_NET_DATA) {
|
||||
allocateTBE;
|
||||
initializeFromReqTBE;
|
||||
sendCompDBIDResp;
|
||||
popReqInQueue;
|
||||
}
|
||||
|
||||
transition(WAITING_NET_DATA, WriteData) {
|
||||
copyWriteDataToTBE;
|
||||
checkForReceiveCompletion;
|
||||
popDataInQueue;
|
||||
}
|
||||
|
||||
transition(WAITING_NET_DATA, Trigger_ReceiveDone, READY) {
|
||||
sendMemoryWrite;
|
||||
deallocateTBE;
|
||||
wakeUpStalled;
|
||||
popTriggerQueue;
|
||||
}
|
||||
|
||||
transition(SENDING_NET_DATA, Trigger_SendDone, READY) {
|
||||
deallocateTBE;
|
||||
wakeUpStalled;
|
||||
popTriggerQueue;
|
||||
}
|
||||
|
||||
// Just sanity check against counter of pending acks
|
||||
transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA,READY},
|
||||
MemoryAck) {
|
||||
decWritePending;
|
||||
popMemoryQueue;
|
||||
}
|
||||
|
||||
// Notice we only use this here and call wakeUp when leaving this state
|
||||
transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA},
|
||||
{ReadNoSnp, ReadNoSnpSep, WriteNoSnpPtl}) {
|
||||
stallRequestQueue;
|
||||
}
|
||||
|
||||
transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA,READY},
|
||||
Trigger_SendRetry) {
|
||||
sendRetryAck;
|
||||
popTriggerQueue;
|
||||
}
|
||||
|
||||
transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA,READY},
|
||||
Trigger_SendPCrdGrant) {
|
||||
sendPCrdGrant;
|
||||
popTriggerQueue;
|
||||
}
|
||||
|
||||
transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA,READY},
|
||||
CheckAllocTBE) {
|
||||
checkAllocateTBE;
|
||||
}
|
||||
|
||||
transition({READING_MEM,WAITING_NET_DATA,SENDING_NET_DATA,READY},
|
||||
CheckAllocTBE_WithCredit) {
|
||||
checkAllocateTBE_withCredit;
|
||||
}
|
||||
|
||||
}
|
||||
234
src/mem/ruby/protocol/chi/CHI-msg.sm
Normal file
234
src/mem/ruby/protocol/chi/CHI-msg.sm
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Copyright (c) 2021 ARM Limited
|
||||
* All rights reserved
|
||||
*
|
||||
* The license below extends only to copyright in the software and shall
|
||||
* not be construed as granting a license to any other intellectual
|
||||
* property including but not limited to intellectual property relating
|
||||
* to a hardware implementation of the functionality of the software
|
||||
* licensed hereunder. You may use the software subject to the license
|
||||
* terms below provided that you ensure that this notice is replicated
|
||||
* unmodified and in its entirety in all distributions of the software,
|
||||
* modified or unmodified, in source code or in binary form.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met: redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
* redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution;
|
||||
* neither the name of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
// All CHI request and response types match the name style in the standard doc.
|
||||
// For a description of a specific message type, refer to the Arm's AMBA 5
|
||||
// CHI specification (issue D):
|
||||
// https://static.docs.arm.com/ihi0050/d/
|
||||
// IHI0050D_amba_5_chi_architecture_spec.pdf
|
||||
|
||||
enumeration(CHIRequestType, desc="") {
|
||||
// Incoming requests generated by the sequencer
|
||||
Load;
|
||||
Store;
|
||||
StoreLine;
|
||||
|
||||
// CHI request types
|
||||
ReadShared;
|
||||
ReadNotSharedDirty;
|
||||
ReadUnique;
|
||||
ReadOnce;
|
||||
CleanUnique;
|
||||
|
||||
Evict;
|
||||
|
||||
WriteBackFull;
|
||||
WriteCleanFull;
|
||||
WriteEvictFull;
|
||||
WriteUniquePtl;
|
||||
WriteUniqueFull;
|
||||
|
||||
SnpSharedFwd;
|
||||
SnpNotSharedDirtyFwd;
|
||||
SnpUniqueFwd;
|
||||
SnpOnceFwd;
|
||||
SnpOnce;
|
||||
SnpShared;
|
||||
SnpUnique;
|
||||
SnpCleanInvalid;
|
||||
|
||||
WriteNoSnpPtl;
|
||||
WriteNoSnp;
|
||||
ReadNoSnp;
|
||||
ReadNoSnpSep;
|
||||
|
||||
null;
|
||||
}
|
||||
|
||||
structure(CHIRequestMsg, desc="", interface="Message") {
|
||||
Addr addr, desc="Request line address";
|
||||
Addr accAddr, desc="Original access address. Set for Write*Ptl and requests from the sequencer";
|
||||
int accSize, desc="Access size. Set for Write*Ptl and requests from the sequencer";
|
||||
CHIRequestType type, desc="Request type";
|
||||
MachineID requestor, desc="Requestor ID";
|
||||
MachineID fwdRequestor, desc="Where to send data for DMT/DCT requests";
|
||||
bool dataToFwdRequestor, desc="Data has to be forwarded to fwdRequestor";
|
||||
bool retToSrc, desc="Affects whether or not a snoop resp returns data";
|
||||
bool allowRetry, desc="This request can be retried";
|
||||
NetDest Destination, desc="Message destination";
|
||||
|
||||
RequestPtr seqReq, default="nullptr", desc="Pointer to original request from CPU/sequencer (nullptr if not valid)";
|
||||
bool isSeqReqValid, default="false", desc="Set if seqReq is valid (not nullptr)";
|
||||
|
||||
bool is_local_pf, desc="Request generated by a local prefetcher";
|
||||
bool is_remote_pf, desc="Request generated a prefetcher in another cache";
|
||||
|
||||
MessageSizeType MessageSize, default="MessageSizeType_Control";
|
||||
|
||||
// No data for functional access
|
||||
bool functionalRead(Packet *pkt) { return false; }
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) { return false; }
|
||||
bool functionalWrite(Packet *pkt) { return false; }
|
||||
}
|
||||
|
||||
enumeration(CHIResponseType, desc="...") {
|
||||
// CHI response types
|
||||
Comp_I;
|
||||
Comp_UC;
|
||||
Comp_SC;
|
||||
CompAck;
|
||||
CompDBIDResp;
|
||||
DBIDResp;
|
||||
Comp;
|
||||
ReadReceipt;
|
||||
RespSepData;
|
||||
|
||||
SnpResp_I;
|
||||
SnpResp_I_Fwded_UC;
|
||||
SnpResp_I_Fwded_UD_PD;
|
||||
SnpResp_SC;
|
||||
SnpResp_SC_Fwded_SC;
|
||||
SnpResp_SC_Fwded_SD_PD;
|
||||
SnpResp_UC_Fwded_I;
|
||||
SnpResp_UD_Fwded_I;
|
||||
SnpResp_SC_Fwded_I;
|
||||
SnpResp_SD_Fwded_I;
|
||||
|
||||
RetryAck;
|
||||
PCrdGrant;
|
||||
|
||||
null;
|
||||
}
|
||||
|
||||
structure(CHIResponseMsg, desc="", interface="Message") {
|
||||
Addr addr, desc="Line address";
|
||||
CHIResponseType type, desc="Response type";
|
||||
MachineID responder, desc="Responder ID";
|
||||
NetDest Destination, desc="Response destination";
|
||||
bool stale, desc="Response to a stale request";
|
||||
//NOTE: not in CHI and for debuging only
|
||||
|
||||
MessageSizeType MessageSize, default="MessageSizeType_Control";
|
||||
|
||||
// No data for functional access
|
||||
bool functionalRead(Packet *pkt) { return false; }
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) { return false; }
|
||||
bool functionalWrite(Packet *pkt) { return false; }
|
||||
}
|
||||
|
||||
enumeration(CHIDataType, desc="...") {
|
||||
// CHI data response types
|
||||
CompData_I;
|
||||
CompData_UC;
|
||||
CompData_SC;
|
||||
CompData_UD_PD;
|
||||
CompData_SD_PD;
|
||||
DataSepResp_UC;
|
||||
CBWrData_UC;
|
||||
CBWrData_SC;
|
||||
CBWrData_UD_PD;
|
||||
CBWrData_SD_PD;
|
||||
CBWrData_I;
|
||||
NCBWrData;
|
||||
SnpRespData_I;
|
||||
SnpRespData_I_PD;
|
||||
SnpRespData_SC;
|
||||
SnpRespData_SC_PD;
|
||||
SnpRespData_SD;
|
||||
SnpRespData_UC;
|
||||
SnpRespData_UD;
|
||||
SnpRespData_SC_Fwded_SC;
|
||||
SnpRespData_SC_Fwded_SD_PD;
|
||||
SnpRespData_SC_PD_Fwded_SC;
|
||||
SnpRespData_I_Fwded_SD_PD;
|
||||
SnpRespData_I_PD_Fwded_SC;
|
||||
SnpRespData_I_Fwded_SC;
|
||||
null;
|
||||
}
|
||||
|
||||
structure(CHIDataMsg, desc="", interface="Message") {
|
||||
Addr addr, desc="Line address";
|
||||
CHIDataType type, desc="Response type";
|
||||
MachineID responder, desc="Responder ID";
|
||||
NetDest Destination, desc="Response destination";
|
||||
DataBlock dataBlk, desc="Line data";
|
||||
WriteMask bitMask, desc="Which bytes in the data block are valid";
|
||||
|
||||
|
||||
MessageSizeType MessageSize, default="MessageSizeType_Data";
|
||||
|
||||
bool functionalRead(Packet *pkt) {
|
||||
if(bitMask.isFull()) {
|
||||
return testAndRead(addr, dataBlk, pkt);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool functionalRead(Packet *pkt, WriteMask &mask) {
|
||||
// read if bitmask has bytes not in mask or if data is dirty
|
||||
bool is_dirty := (type == CHIDataType:CompData_UD_PD) ||
|
||||
(type == CHIDataType:CompData_SD_PD) ||
|
||||
(type == CHIDataType:CBWrData_UD_PD) ||
|
||||
(type == CHIDataType:CBWrData_SD_PD) ||
|
||||
(type == CHIDataType:NCBWrData) ||
|
||||
(type == CHIDataType:SnpRespData_I_PD) ||
|
||||
(type == CHIDataType:SnpRespData_SC_PD) ||
|
||||
(type == CHIDataType:SnpRespData_SD) ||
|
||||
(type == CHIDataType:SnpRespData_UD) ||
|
||||
(type == CHIDataType:SnpRespData_SC_Fwded_SD_PD) ||
|
||||
(type == CHIDataType:SnpRespData_SC_PD_Fwded_SC) ||
|
||||
(type == CHIDataType:SnpRespData_I_Fwded_SD_PD) ||
|
||||
(type == CHIDataType:SnpRespData_I_PD_Fwded_SC);
|
||||
assert(bitMask.isEmpty() == false);
|
||||
WriteMask test_mask := mask;
|
||||
test_mask.orMask(bitMask);
|
||||
if ((test_mask.cmpMask(mask) == false) || is_dirty) {
|
||||
if (testAndReadMask(addr, dataBlk, bitMask, pkt)) {
|
||||
mask.orMask(bitMask);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool functionalWrite(Packet *pkt) {
|
||||
return testAndWrite(addr, dataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
6
src/mem/ruby/protocol/chi/CHI.slicc
Normal file
6
src/mem/ruby/protocol/chi/CHI.slicc
Normal file
@@ -0,0 +1,6 @@
|
||||
protocol "CHI";
|
||||
|
||||
include "RubySlicc_interfaces.slicc";
|
||||
include "CHI-msg.sm";
|
||||
include "CHI-cache.sm";
|
||||
include "CHI-mem.sm";
|
||||
47
src/mem/ruby/protocol/chi/SConsopts
Normal file
47
src/mem/ruby/protocol/chi/SConsopts
Normal file
@@ -0,0 +1,47 @@
|
||||
# -*- mode:python -*-
|
||||
|
||||
# Copyright (c) 2021 ARM Limited
|
||||
# All rights reserved.
|
||||
#
|
||||
# The license below extends only to copyright in the software and shall
|
||||
# not be construed as granting a license to any other intellectual
|
||||
# property including but not limited to intellectual property relating
|
||||
# to a hardware implementation of the functionality of the software
|
||||
# licensed hereunder. You may use the software subject to the license
|
||||
# terms below provided that you ensure that this notice is replicated
|
||||
# unmodified and in its entirety in all distributions of the software,
|
||||
# modified or unmodified, in source code or in binary form.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met: redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer;
|
||||
# redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution;
|
||||
# neither the name of the copyright holders nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Import('*')
|
||||
|
||||
# Register this protocol with gem5/SCons
|
||||
|
||||
all_protocols.append('CHI')
|
||||
|
||||
# CHI requires Ruby's inerface to support partial functional reads
|
||||
need_partial_func_reads.append('CHI')
|
||||
|
||||
protocol_dirs.append(Dir('.').abspath)
|
||||
@@ -45,9 +45,6 @@ if env['PROTOCOL'] == 'None':
|
||||
|
||||
env.Append(CPPDEFINES=['PROTOCOL_' + env['PROTOCOL']])
|
||||
|
||||
# list of protocols that require the partial functional read interface
|
||||
need_partial_func_reads = []
|
||||
|
||||
if env['PROTOCOL'] in need_partial_func_reads:
|
||||
env.Append(CPPDEFINES=['PARTIAL_FUNC_READS'])
|
||||
|
||||
|
||||
Reference in New Issue
Block a user