memtest: Memtester support for DMA

This patch adds DMA testing to the Memtester and is inherits many changes from
Polina's old tester_dma_extension patch.  Since Ruby does not work in atomic
mode, the atomic mode options are removed.
This commit is contained in:
Brad Beckmann
2010-08-20 11:46:12 -07:00
parent 64b2205992
commit 808701a10c
11 changed files with 114 additions and 32 deletions

View File

@@ -38,6 +38,7 @@ class MemTest(MemObject):
percent_dest_unaligned = Param.Percent(50,
"percent of copy dest address that are unaligned")
percent_reads = Param.Percent(65, "target read percentage")
issue_dmas = Param.Bool(False, "this memtester should issue dma requests")
percent_source_unaligned = Param.Percent(50,
"percent of copy source address that are unaligned")
percent_functional = Param.Percent(50, "percent of access that are functional")

View File

@@ -109,8 +109,20 @@ MemTest::sendPkt(PacketPtr pkt) {
completeRequest(pkt);
}
else if (!cachePort.sendTiming(pkt)) {
DPRINTF(MemTest, "accessRetry setting to true\n");
//
// dma requests should never be retried
//
if (issueDmas) {
panic("Nacked DMA requests are not supported\n");
}
accessRetry = true;
retryPkt = pkt;
} else {
if (issueDmas) {
dmaOutstanding = true;
}
}
}
@@ -127,6 +139,7 @@ MemTest::MemTest(const Params *p)
percentReads(p->percent_reads),
percentFunctional(p->percent_functional),
percentUncacheable(p->percent_uncacheable),
issueDmas(p->issue_dmas),
progressInterval(p->progress_interval),
nextProgressMessage(p->progress_interval),
percentSourceUnaligned(p->percent_source_unaligned),
@@ -134,6 +147,7 @@ MemTest::MemTest(const Params *p)
maxLoads(p->max_loads),
atomic(p->atomic)
{
vector<string> cmd;
cmd.push_back("/bin/ls");
vector<string> null_vec;
@@ -143,6 +157,8 @@ MemTest::MemTest(const Params *p)
cachePort.snoopRangeSent = false;
funcPort.snoopRangeSent = true;
id = TESTER_ALLOCATOR++;
// Needs to be masked off once we know the block size.
traceBlockAddr = p->trace_addr;
baseAddr1 = 0x100000;
@@ -154,9 +170,8 @@ MemTest::MemTest(const Params *p)
numReads = 0;
schedule(tickEvent, 0);
id = TESTER_ALLOCATOR++;
accessRetry = false;
dmaOutstanding = false;
}
Port *
@@ -188,6 +203,10 @@ MemTest::completeRequest(PacketPtr pkt)
{
Request *req = pkt->req;
if (issueDmas) {
dmaOutstanding = false;
}
DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
pkt->isWrite() ? "write" : "read",
req->getPaddr(), blockAddr(req->getPaddr()));
@@ -265,11 +284,15 @@ MemTest::tick()
schedule(tickEvent, curTick + ticks(1));
if (++noResponseCycles >= 500000) {
if (issueDmas) {
cerr << "DMA tester ";
}
cerr << name() << ": deadlocked at cycle " << curTick << endl;
fatal("");
}
if (accessRetry) {
if (accessRetry || (issueDmas && dmaOutstanding)) {
DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n");
return;
}
@@ -281,6 +304,8 @@ MemTest::tick()
unsigned access_size = random() % 4;
bool uncacheable = (random() % 100) < percentUncacheable;
unsigned dma_access_size = random() % 4;
//If we aren't doing copies, use id as offset, and do a false sharing
//mem tester
//We can eliminate the lower bits of the offset, and then use the id
@@ -288,6 +313,7 @@ MemTest::tick()
offset = blockAddr(offset);
offset += id;
access_size = 0;
dma_access_size = 0;
Request *req = new Request();
Request::Flags flags;
@@ -296,14 +322,21 @@ MemTest::tick()
if (uncacheable) {
flags.set(Request::UNCACHEABLE);
paddr = uncacheAddr + offset;
} else {
} else {
paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
}
bool probe = (random() % 100 < percentFunctional) && !uncacheable;
paddr &= ~((1 << access_size) - 1);
req->setPhys(paddr, 1 << access_size, flags);
req->setThreadContext(id,0);
if (issueDmas) {
paddr &= ~((1 << dma_access_size) - 1);
req->setPhys(paddr, 1 << dma_access_size, flags);
req->setThreadContext(id,0);
} else {
paddr &= ~((1 << access_size) - 1);
req->setPhys(paddr, 1 << access_size, flags);
req->setThreadContext(id,0);
}
assert(req->getSize() == 1);
uint8_t *result = new uint8_t[8];
@@ -325,8 +358,8 @@ MemTest::tick()
funcPort.readBlob(req->getPaddr(), result, req->getSize());
DPRINTF(MemTest,
"initiating read at address %x (blk %x) expecting %x\n",
req->getPaddr(), blockAddr(req->getPaddr()), *result);
"id %d initiating read at address %x (blk %x) expecting %x\n",
id, req->getPaddr(), blockAddr(req->getPaddr()), *result);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
pkt->setSrc(0);
@@ -380,6 +413,7 @@ void
MemTest::doRetry()
{
if (cachePort.sendTiming(retryPkt)) {
DPRINTF(MemTest, "accessRetry setting to false\n");
accessRetry = false;
retryPkt = NULL;
}

View File

@@ -132,6 +132,11 @@ class MemTest : public MemObject
PacketPtr retryPkt;
bool accessRetry;
//
// The dmaOustanding flag enforces only one dma at a time
//
bool dmaOutstanding;
unsigned size; // size of testing memory region
@@ -139,6 +144,8 @@ class MemTest : public MemObject
unsigned percentFunctional; // target percentage of functional accesses
unsigned percentUncacheable;
bool issueDmas;
int id;
std::set<unsigned> outstandingAddrs;

View File

@@ -49,6 +49,10 @@ DMASequencer::init()
RequestStatus
DMASequencer::makeRequest(const RubyRequest &request)
{
if (m_is_busy) {
return RequestStatus_BufferFull;
}
uint64_t paddr = request.paddr;
uint8_t* data = request.data;
int len = request.len;
@@ -108,6 +112,7 @@ DMASequencer::issueNext()
assert(m_is_busy == true);
active_request.bytes_completed = active_request.bytes_issued;
if (active_request.len == active_request.bytes_completed) {
DPRINTF(RubyDma, "DMA request completed\n");
ruby_hit_callback(active_request.pkt);
m_is_busy = false;
return;
@@ -141,6 +146,7 @@ DMASequencer::issueNext()
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg->getLen();
DPRINTF(RubyDma, "Next DMA segment issued to the DMA cntrl\n");
}
void

View File

@@ -52,3 +52,4 @@ Source('System.cc')
Source('TimerTable.cc')
TraceFlag('RubyCache')
TraceFlag('RubyDma')