This patch is the final patch in a series of patches. The aim of the series is to make ruby more configurable than it was. More specifically, the connections between controllers are not at all possible (unless one is ready to make significant changes to the coherence protocol). Moreover the buffers themselves are magically connected to the network inside the slicc code. These connections are not part of the configuration file. This patch makes changes so that these connections will now be made in the python configuration files associated with the protocols. This requires each state machine to expose the message buffers it uses for input and output. So, the patch makes these buffers configurable members of the machines. The patch drops the slicc code that usd to connect these buffers to the network. Now these buffers are exposed to the python configuration system as Master and Slave ports. In the configuration files, any master port can be connected any slave port. The file pyobject.cc has been modified to take care of allocating the actual message buffer. This is inline with how other port connections work.
664 lines
21 KiB
Plaintext
664 lines
21 KiB
Plaintext
/*
|
|
* Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
|
|
* Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are
|
|
* met: redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer;
|
|
* redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution;
|
|
* neither the name of the copyright holders nor the names of its
|
|
* contributors may be used to endorse or promote products derived from
|
|
* this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
machine(Directory, "Directory protocol")
|
|
: DirectoryMemory * directory;
|
|
MemoryControl * memBuffer;
|
|
Cycles directory_latency := 12;
|
|
|
|
MessageBuffer * forwardFromDir, network="To", virtual_network="3",
|
|
ordered="false", vnet_type="forward";
|
|
MessageBuffer * responseFromDir, network="To", virtual_network="4",
|
|
ordered="false", vnet_type="response";
|
|
MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
|
|
ordered="true", vnet_type="response";
|
|
|
|
MessageBuffer * requestToDir, network="From", virtual_network="2",
|
|
ordered="true", vnet_type="request";
|
|
MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
|
|
ordered="true", vnet_type="request";
|
|
{
|
|
// STATES
|
|
state_declaration(State, desc="Directory states", default="Directory_State_I") {
|
|
// Base states
|
|
I, AccessPermission:Read_Write, desc="Invalid";
|
|
M, AccessPermission:Invalid, desc="Modified";
|
|
|
|
M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
|
|
M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
|
|
|
|
M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
|
|
M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
|
|
|
|
IM, AccessPermission:Busy, desc="Intermediate state I-->M";
|
|
MI, AccessPermission:Busy, desc="Intermediate state M-->I";
|
|
ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
|
|
ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
|
|
}
|
|
|
|
// Events
|
|
enumeration(Event, desc="Directory events") {
|
|
// processor requests
|
|
GETX, desc="A GETX arrives";
|
|
GETS, desc="A GETS arrives";
|
|
PUTX, desc="A PUTX arrives";
|
|
PUTX_NotOwner, desc="A PUTX arrives";
|
|
|
|
// DMA requests
|
|
DMA_READ, desc="A DMA Read memory request";
|
|
DMA_WRITE, desc="A DMA Write memory request";
|
|
|
|
// Memory Controller
|
|
Memory_Data, desc="Fetched data from memory arrives";
|
|
Memory_Ack, desc="Writeback Ack from memory arrives";
|
|
}
|
|
|
|
// TYPES
|
|
|
|
// DirectoryEntry
|
|
structure(Entry, desc="...", interface="AbstractEntry") {
|
|
State DirectoryState, desc="Directory state";
|
|
DataBlock DataBlk, desc="data for the block";
|
|
NetDest Sharers, desc="Sharers for this block";
|
|
NetDest Owner, desc="Owner of this block";
|
|
}
|
|
|
|
// TBE entries for DMA requests
|
|
structure(TBE, desc="TBE entries for outstanding DMA requests") {
|
|
Address PhysicalAddress, desc="physical address";
|
|
State TBEState, desc="Transient State";
|
|
DataBlock DataBlk, desc="Data to be written (DMA write only)";
|
|
int Len, desc="...";
|
|
MachineID DmaRequestor, desc="DMA requestor";
|
|
}
|
|
|
|
structure(TBETable, external="yes") {
|
|
TBE lookup(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
bool isPresent(Address);
|
|
}
|
|
|
|
// ** OBJECTS **
|
|
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
|
|
|
|
void set_tbe(TBE b);
|
|
void unset_tbe();
|
|
|
|
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
|
|
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
|
|
|
|
if (is_valid(dir_entry)) {
|
|
return dir_entry;
|
|
}
|
|
|
|
dir_entry := static_cast(Entry, "pointer",
|
|
directory.allocate(addr, new Entry));
|
|
return dir_entry;
|
|
}
|
|
|
|
State getState(TBE tbe, Address addr) {
|
|
if (is_valid(tbe)) {
|
|
return tbe.TBEState;
|
|
} else if (directory.isPresent(addr)) {
|
|
return getDirectoryEntry(addr).DirectoryState;
|
|
} else {
|
|
return State:I;
|
|
}
|
|
}
|
|
|
|
void setState(TBE tbe, Address addr, State state) {
|
|
|
|
if (is_valid(tbe)) {
|
|
tbe.TBEState := state;
|
|
}
|
|
|
|
if (directory.isPresent(addr)) {
|
|
|
|
if (state == State:M) {
|
|
assert(getDirectoryEntry(addr).Owner.count() == 1);
|
|
assert(getDirectoryEntry(addr).Sharers.count() == 0);
|
|
}
|
|
|
|
getDirectoryEntry(addr).DirectoryState := state;
|
|
|
|
if (state == State:I) {
|
|
assert(getDirectoryEntry(addr).Owner.count() == 0);
|
|
assert(getDirectoryEntry(addr).Sharers.count() == 0);
|
|
directory.invalidateBlock(addr);
|
|
}
|
|
}
|
|
}
|
|
|
|
AccessPermission getAccessPermission(Address addr) {
|
|
TBE tbe := TBEs[addr];
|
|
if(is_valid(tbe)) {
|
|
return Directory_State_to_permission(tbe.TBEState);
|
|
}
|
|
|
|
if(directory.isPresent(addr)) {
|
|
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
|
|
}
|
|
|
|
return AccessPermission:NotPresent;
|
|
}
|
|
|
|
void setAccessPermission(Address addr, State state) {
|
|
if (directory.isPresent(addr)) {
|
|
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
|
|
}
|
|
}
|
|
|
|
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
|
|
TBE tbe := TBEs[addr];
|
|
if(is_valid(tbe)) {
|
|
return tbe.DataBlk;
|
|
}
|
|
|
|
return getDirectoryEntry(addr).DataBlk;
|
|
}
|
|
|
|
// ** OUT_PORTS **
|
|
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
|
|
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
|
|
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
|
|
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
|
|
|
|
//added by SS
|
|
out_port(memQueue_out, MemoryMsg, memBuffer);
|
|
// ** IN_PORTS **
|
|
|
|
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
|
|
if (dmaRequestQueue_in.isReady()) {
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
TBE tbe := TBEs[in_msg.LineAddress];
|
|
if (in_msg.Type == DMARequestType:READ) {
|
|
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
|
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
|
trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
|
|
} else {
|
|
error("Invalid message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
|
if (requestQueue_in.isReady()) {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
TBE tbe := TBEs[in_msg.Addr];
|
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
|
trigger(Event:GETS, in_msg.Addr, tbe);
|
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
|
trigger(Event:GETX, in_msg.Addr, tbe);
|
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
|
if (getDirectoryEntry(in_msg.Addr).Owner.isElement(in_msg.Requestor)) {
|
|
trigger(Event:PUTX, in_msg.Addr, tbe);
|
|
} else {
|
|
trigger(Event:PUTX_NotOwner, in_msg.Addr, tbe);
|
|
}
|
|
} else {
|
|
error("Invalid message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
//added by SS
|
|
// off-chip memory request/response is done
|
|
in_port(memQueue_in, MemoryMsg, memBuffer) {
|
|
if (memQueue_in.isReady()) {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
TBE tbe := TBEs[in_msg.Addr];
|
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
|
trigger(Event:Memory_Data, in_msg.Addr, tbe);
|
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
|
trigger(Event:Memory_Ack, in_msg.Addr, tbe);
|
|
} else {
|
|
DPRINTF(RubySlicc,"%s\n", in_msg.Type);
|
|
error("Invalid message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Actions
|
|
|
|
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := CoherenceRequestType:WB_ACK;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, 1) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := CoherenceRequestType:WB_ACK;
|
|
out_msg.Requestor := in_msg.OriginalRequestorMachId;
|
|
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := CoherenceRequestType:WB_NACK;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(c_clearOwner, "c", desc="Clear the owner field") {
|
|
getDirectoryEntry(address).Owner.clear();
|
|
}
|
|
|
|
action(d_sendData, "d", desc="Send data to requestor") {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
|
|
assert(is_valid(tbe));
|
|
out_msg.PhysicalAddress := address;
|
|
out_msg.LineAddress := address;
|
|
out_msg.Type := DMAResponseType:DATA;
|
|
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
|
out_msg.Destination.add(tbe.DmaRequestor);
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
|
|
assert(is_valid(tbe));
|
|
out_msg.PhysicalAddress := address;
|
|
out_msg.LineAddress := address;
|
|
out_msg.Type := DMAResponseType:DATA;
|
|
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
|
out_msg.Destination.add(tbe.DmaRequestor);
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
|
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
|
|
assert(is_valid(tbe));
|
|
out_msg.PhysicalAddress := address;
|
|
out_msg.LineAddress := address;
|
|
out_msg.Type := DMAResponseType:ACK;
|
|
out_msg.Destination.add(tbe.DmaRequestor);
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
|
|
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
getDirectoryEntry(address).Owner.clear();
|
|
getDirectoryEntry(address).Owner.add(in_msg.Requestor);
|
|
}
|
|
}
|
|
|
|
action(f_forwardRequest, "f", desc="Forward request to owner") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
APPEND_TRANSITION_COMMENT("Own: ");
|
|
APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Addr).Owner);
|
|
APPEND_TRANSITION_COMMENT("Req: ");
|
|
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
|
|
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := in_msg.Type;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination := getDirectoryEntry(in_msg.Addr).Owner;
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := CoherenceRequestType:INV;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
|
|
requestQueue_in.dequeue();
|
|
}
|
|
|
|
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
|
|
dmaRequestQueue_in.dequeue();
|
|
}
|
|
|
|
action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
// assert(in_msg.Dirty);
|
|
// assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
|
|
getDirectoryEntry(in_msg.Addr).DataBlk := in_msg.DataBlk;
|
|
//getDirectoryEntry(in_msg.Addr).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Addr), in_msg.Len);
|
|
}
|
|
}
|
|
|
|
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
|
assert(is_valid(tbe));
|
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
|
}
|
|
|
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
TBEs.allocate(address);
|
|
set_tbe(TBEs[address]);
|
|
tbe.DataBlk := in_msg.DataBlk;
|
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
|
tbe.Len := in_msg.Len;
|
|
tbe.DmaRequestor := in_msg.Requestor;
|
|
}
|
|
}
|
|
|
|
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
TBEs.allocate(address);
|
|
set_tbe(TBEs[address]);
|
|
tbe.DmaRequestor := in_msg.Requestor;
|
|
}
|
|
}
|
|
|
|
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
TBEs.allocate(address);
|
|
set_tbe(TBEs[address]);
|
|
tbe.DataBlk := in_msg.DataBlk;
|
|
}
|
|
}
|
|
|
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
|
TBEs.deallocate(address);
|
|
unset_tbe();
|
|
}
|
|
|
|
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
|
|
requestQueue_in.recycle();
|
|
}
|
|
|
|
action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
|
|
dmaRequestQueue_in.recycle();
|
|
}
|
|
|
|
|
|
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, 1) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
|
out_msg.Sender := machineID;
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
out_msg.DataBlk := getDirectoryEntry(in_msg.Addr).DataBlk;
|
|
DPRINTF(RubySlicc,"%s\n", out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, 1) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
|
out_msg.Sender := machineID;
|
|
//out_msg.OriginalRequestorMachId := machineID;
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
|
|
DPRINTF(RubySlicc,"%s\n", out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, 1) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
//out_msg.OriginalRequestorMachId := machineID;
|
|
//out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
//out_msg.Prefetch := in_msg.Prefetch;
|
|
|
|
DPRINTF(RubySlicc,"%s\n", out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, 1) {
|
|
assert(is_valid(tbe));
|
|
out_msg.Addr := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
// get incoming data
|
|
// out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
//out_msg.Prefetch := in_msg.Prefetch;
|
|
|
|
DPRINTF(RubySlicc,"%s\n", out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, 1) {
|
|
out_msg.Addr := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
out_msg.Sender := machineID;
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
|
|
DPRINTF(RubySlicc,"%s\n", out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
|
|
memQueue_in.dequeue();
|
|
}
|
|
|
|
action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
|
|
assert(is_valid(tbe));
|
|
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
|
}
|
|
|
|
// TRANSITIONS
|
|
|
|
transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
|
|
z_recycleRequestQueue;
|
|
}
|
|
|
|
transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
|
|
z_recycleRequestQueue;
|
|
}
|
|
|
|
transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
|
|
y_recycleDMARequestQueue;
|
|
}
|
|
|
|
|
|
transition(I, GETX, IM) {
|
|
//d_sendData;
|
|
qf_queueMemoryFetchRequest;
|
|
e_ownerIsRequestor;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(IM, Memory_Data, M) {
|
|
d_sendData;
|
|
//e_ownerIsRequestor;
|
|
l_popMemQueue;
|
|
}
|
|
|
|
|
|
transition(I, DMA_READ, ID) {
|
|
//dr_sendDMAData;
|
|
r_allocateTbeForDmaRead;
|
|
qf_queueMemoryFetchRequestDMA;
|
|
p_popIncomingDMARequestQueue;
|
|
}
|
|
|
|
transition(ID, Memory_Data, I) {
|
|
dr_sendDMAData;
|
|
//p_popIncomingDMARequestQueue;
|
|
w_deallocateTBE;
|
|
l_popMemQueue;
|
|
}
|
|
|
|
|
|
|
|
transition(I, DMA_WRITE, ID_W) {
|
|
v_allocateTBE;
|
|
qw_queueMemoryWBRequest_partial;
|
|
p_popIncomingDMARequestQueue;
|
|
}
|
|
|
|
transition(ID_W, Memory_Ack, I) {
|
|
dwt_writeDMADataFromTBE;
|
|
da_sendDMAAck;
|
|
w_deallocateTBE;
|
|
l_popMemQueue;
|
|
}
|
|
|
|
transition(M, DMA_READ, M_DRD) {
|
|
v_allocateTBE;
|
|
inv_sendCacheInvalidate;
|
|
p_popIncomingDMARequestQueue;
|
|
}
|
|
|
|
transition(M_DRD, PUTX, M_DRDI) {
|
|
l_writeDataToMemory;
|
|
drp_sendDMAData;
|
|
c_clearOwner;
|
|
l_queueMemoryWBRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M_DRDI, Memory_Ack, I) {
|
|
l_sendWriteBackAck;
|
|
w_deallocateTBE;
|
|
l_popMemQueue;
|
|
}
|
|
|
|
|
|
transition(M, DMA_WRITE, M_DWR) {
|
|
v_allocateTBE;
|
|
inv_sendCacheInvalidate;
|
|
p_popIncomingDMARequestQueue;
|
|
}
|
|
|
|
transition(M_DWR, PUTX, M_DWRI) {
|
|
l_writeDataToMemory;
|
|
qw_queueMemoryWBRequest_partialTBE;
|
|
c_clearOwner;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M_DWRI, Memory_Ack, I) {
|
|
w_writeDataToMemoryFromTBE;
|
|
l_sendWriteBackAck;
|
|
da_sendDMAAck;
|
|
w_deallocateTBE;
|
|
l_popMemQueue;
|
|
}
|
|
|
|
transition(M, GETX, M) {
|
|
f_forwardRequest;
|
|
e_ownerIsRequestor;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M, PUTX, MI) {
|
|
c_clearOwner;
|
|
v_allocateTBEFromRequestNet;
|
|
l_queueMemoryWBRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(MI, Memory_Ack, I) {
|
|
w_writeDataToMemoryFromTBE;
|
|
l_sendWriteBackAck;
|
|
w_deallocateTBE;
|
|
l_popMemQueue;
|
|
}
|
|
|
|
transition(M, PUTX_NotOwner, M) {
|
|
b_sendWriteBackNack;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(I, PUTX_NotOwner, I) {
|
|
b_sendWriteBackNack;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
}
|