dev-amdgpu: Track outstanding chunks in mem manager

Requests sent using the GPU memory manager are not guaranteed to be
ordered. As a result, the last chunk created by the chunk generator
could complete before all of the previous chunks are done. This will
trigger the final callback and may cause an SDMA/PM4/etc. packet that is
waiting for its completion to resume before the data is ready.

This is likely a fix for verification failures in many applications.
Currently this is tested on MatrixTranspose from the HIP cookbook which
now passes its verification step. It could also potentially fix other
race conditions between reads/writes from/to memory such as using a PTE
or PDE before it is written, etc.

Change-Id: Id6fb342d899db6bd0b86c80056ecf91eeb3026f5
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/62714
Reviewed-by: Matt Sinclair <mattdsinclair@gmail.com>
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Maintainer: Matt Sinclair <mattdsinclair@gmail.com>
This commit is contained in:
Matthew Poremba
2022-08-25 16:03:17 -07:00
parent 432329c853
commit 404aa34855
2 changed files with 73 additions and 18 deletions

View File

@@ -43,7 +43,7 @@ namespace gem5
{
AMDGPUMemoryManager::AMDGPUMemoryManager(const AMDGPUMemoryManagerParams &p)
: ClockedObject(p), _gpuMemPort(csprintf("%s-port", name()), this),
: ClockedObject(p), _gpuMemPort(csprintf("%s-port", name()), *this),
cacheLineSize(p.system->cacheLineSize()),
_requestorId(p.system->getRequestorId(this))
{
@@ -55,6 +55,14 @@ AMDGPUMemoryManager::writeRequest(Addr addr, uint8_t *data, int size,
{
assert(data);
// Requests may return out of order, so we should track how many chunks
// are outstanding and if the last chunk was sent. Give each status struct
// a unique ID so that DMAs to the same address may occur at the same time
requestStatus.emplace(std::piecewise_construct,
std::forward_as_tuple(requestId), std::tuple<>{});
DPRINTF(AMDGPUMem, "Created status for write request %ld\n", requestId);
ChunkGenerator gen(addr, size, cacheLineSize);
for (; !gen.done(); gen.next()) {
RequestPtr req = std::make_shared<Request>(gen.addr(), gen.size(),
@@ -66,11 +74,11 @@ AMDGPUMemoryManager::writeRequest(Addr addr, uint8_t *data, int size,
gen.size());
pkt->dataDynamic<uint8_t>(dataPtr);
// We only want to issue the callback on the last request completing.
pkt->pushSenderState(
new GPUMemPort::SenderState(callback, addr, requestId));
requestStatus.at(requestId).outstandingChunks++;
if (gen.last()) {
pkt->pushSenderState(new GPUMemPort::SenderState(callback, addr));
} else {
pkt->pushSenderState(new GPUMemPort::SenderState(nullptr, addr));
requestStatus.at(requestId).sentLastChunk = true;
}
if (!_gpuMemPort.sendTimingReq(pkt)) {
@@ -80,6 +88,8 @@ AMDGPUMemoryManager::writeRequest(Addr addr, uint8_t *data, int size,
DPRINTF(AMDGPUMem, "Write request to %#lx sent\n", gen.addr());
}
}
requestId++;
}
void
@@ -89,6 +99,14 @@ AMDGPUMemoryManager::readRequest(Addr addr, uint8_t *data, int size,
assert(data);
uint8_t *dataPtr = data;
// Requests may return out of order, so we should track how many chunks
// are outstanding and if the last chunk was sent. Give each status struct
// a unique ID so that DMAs to the same address may occur at the same time
requestStatus.emplace(std::piecewise_construct,
std::forward_as_tuple(requestId), std::tuple<>{});
DPRINTF(AMDGPUMem, "Created status for read request %ld\n", requestId);
ChunkGenerator gen(addr, size, cacheLineSize);
for (; !gen.done(); gen.next()) {
RequestPtr req = std::make_shared<Request>(gen.addr(), gen.size(),
@@ -98,9 +116,12 @@ AMDGPUMemoryManager::readRequest(Addr addr, uint8_t *data, int size,
pkt->dataStatic<uint8_t>(dataPtr);
dataPtr += gen.size();
// Only issue the callback on the last request completing
pkt->pushSenderState(new GPUMemPort::SenderState(
gen.last() ? callback : nullptr, addr));
pkt->pushSenderState(
new GPUMemPort::SenderState(callback, addr, requestId));
requestStatus.at(requestId).outstandingChunks++;
if (gen.last()) {
requestStatus.at(requestId).sentLastChunk = true;
}
if (!_gpuMemPort.sendTimingReq(pkt)) {
DPRINTF(AMDGPUMem, "Request to %#lx needs retry\n", gen.addr());
@@ -109,6 +130,8 @@ AMDGPUMemoryManager::readRequest(Addr addr, uint8_t *data, int size,
DPRINTF(AMDGPUMem, "Read request to %#lx sent\n", gen.addr());
}
}
requestId++;
}
bool
@@ -118,12 +141,29 @@ AMDGPUMemoryManager::GPUMemPort::recvTimingResp(PacketPtr pkt)
[[maybe_unused]] SenderState *sender_state =
safe_cast<SenderState*>(pkt->senderState);
DPRINTF(AMDGPUMem, "Recveived Response for %#x\n", sender_state->_addr);
// Check if all chunks have completed, the last chunk was sent, and there
// is a callback, call the callback now.
assert(gpu_mem.requestStatus.count(sender_state->_requestId));
auto& status = gpu_mem.requestStatus.at(sender_state->_requestId);
// Check if there is a callback event and if so call it
if (sender_state->_callback) {
sender_state->_callback->process();
delete sender_state->_callback;
assert(status.outstandingChunks != 0);
status.outstandingChunks--;
DPRINTF(AMDGPUMem, "Received Response for %#x. %d chunks remain, sent "
"last = %d, requestId = %ld\n", sender_state->_addr,
status.outstandingChunks, status.sentLastChunk,
sender_state->_requestId);
if (!status.outstandingChunks && status.sentLastChunk) {
// Call and free the callback if there is one
if (sender_state->_callback) {
DPRINTF(AMDGPUMem, "Calling callback for request %ld\n",
sender_state->_requestId);
sender_state->_callback->process();
delete sender_state->_callback;
}
DPRINTF(AMDGPUMem, "Deleting status for request %ld\n",
sender_state->_requestId);
gpu_mem.requestStatus.erase(sender_state->_requestId);
}
delete pkt->senderState;

View File

@@ -33,6 +33,7 @@
#define __DEV_AMDGPU_MEMORY_MANAGER_HH__
#include <deque>
#include <unordered_map>
#include "base/callback.hh"
#include "mem/port.hh"
@@ -46,9 +47,9 @@ class AMDGPUMemoryManager : public ClockedObject
{
class GPUMemPort : public MasterPort
{
public:
GPUMemPort(const std::string &_name, AMDGPUMemoryManager *_gpuMemMgr)
: MasterPort(_name, _gpuMemMgr)
public:
GPUMemPort(const std::string &_name, AMDGPUMemoryManager &_gpuMemMgr)
: MasterPort(_name, &_gpuMemMgr), gpu_mem(_gpuMemMgr)
{
}
@@ -57,21 +58,35 @@ class AMDGPUMemoryManager : public ClockedObject
struct SenderState : public Packet::SenderState
{
SenderState(Event *callback, Addr addr)
: _callback(callback), _addr(addr)
SenderState(Event *callback, Addr addr, uint64_t requestId)
: _callback(callback), _addr(addr), _requestId(requestId)
{}
Event *_callback;
Addr _addr;
uint64_t _requestId;
};
std::deque<PacketPtr> retries;
AMDGPUMemoryManager &gpu_mem;
};
GPUMemPort _gpuMemPort;
const int cacheLineSize;
const RequestorID _requestorId;
struct RequestStatus
{
RequestStatus() : outstandingChunks(0), sentLastChunk(false)
{ }
uint64_t outstandingChunks;
bool sentLastChunk;
};
uint64_t requestId = 0;
std::unordered_map<uint64_t, RequestStatus> requestStatus;
public:
AMDGPUMemoryManager(const AMDGPUMemoryManagerParams &p);
~AMDGPUMemoryManager() {};