From e4e359135eb5c2f456800f212fe0c3ef52a43770 Mon Sep 17 00:00:00 2001 From: wmin0 Date: Tue, 6 Feb 2024 03:08:20 +0800 Subject: [PATCH] systemc: Reduce unnecessary backdoor request in atomic transaction (#795) The backdoor request in b_transport is only used for hinting the dmi capability. Since most of traffic patterns are continous, we can cache the previous backdoor request result to spare the backdoor inspect of next request. Change-Id: I53c47226f949dd0be19d52cad0650fcfd62eebbc --- src/systemc/tlm_bridge/tlm_to_gem5.cc | 54 ++++++++++++++++++++------- src/systemc/tlm_bridge/tlm_to_gem5.hh | 2 + 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.cc b/src/systemc/tlm_bridge/tlm_to_gem5.cc index 036ca738ca..0830fc841e 100644 --- a/src/systemc/tlm_bridge/tlm_to_gem5.cc +++ b/src/systemc/tlm_bridge/tlm_to_gem5.cc @@ -299,6 +299,24 @@ TlmToGem5Bridge::destroyPacket(PacketPtr pkt) delete pkt; } +template +void +TlmToGem5Bridge::cacheBackdoor(gem5::MemBackdoorPtr backdoor) +{ + if (backdoor == nullptr) return; + + // We only need to register the callback at the first time. + if (requestedBackdoors.find(backdoor) == requestedBackdoors.end()) { + backdoor->addInvalidationCallback( + [this](const MemBackdoor &backdoor) + { + invalidateDmi(backdoor); + } + ); + requestedBackdoors.emplace(backdoor); + } +} + template void TlmToGem5Bridge::invalidateDmi(const gem5::MemBackdoor &backdoor) @@ -360,9 +378,29 @@ TlmToGem5Bridge::b_transport(tlm::tlm_generic_payload &trans, pkt->pushSenderState(new Gem5SystemC::TlmSenderState(trans)); MemBackdoorPtr backdoor = nullptr; - Tick ticks = bmp.sendAtomicBackdoor(pkt, backdoor); - if (backdoor) + Tick ticks = 0; + + // Check if we have a backdoor meet the request. If yes, we can just hints + // the requestor the DMI is supported. + for (auto& b : requestedBackdoors) { + if (pkt->getAddrRange().isSubset(b->range()) && + ((!pkt->isWrite() && b->readable()) || + (pkt->isWrite() && b->writeable()))) { + backdoor = b; + } + } + + if (backdoor) { + ticks = bmp.sendAtomic(pkt); + } else { + ticks = bmp.sendAtomicBackdoor(pkt, backdoor); + } + + // Hints the requestor the DMI is supported. + if (backdoor) { trans.set_dmi_allowed(true); + cacheBackdoor(backdoor); + } // send an atomic request to gem5 panic_if(pkt->needsResponse() && !pkt->isResponse(), @@ -450,17 +488,7 @@ TlmToGem5Bridge::get_direct_mem_ptr(tlm::tlm_generic_payload &trans, if (backdoor->writeable()) access = (access_t)(access | tlm::tlm_dmi::DMI_ACCESS_WRITE); dmi_data.set_granted_access(access); - - // We only need to register the callback at the first time. - if (requestedBackdoors.find(backdoor) == requestedBackdoors.end()) { - backdoor->addInvalidationCallback( - [this](const MemBackdoor &backdoor) - { - invalidateDmi(backdoor); - } - ); - requestedBackdoors.emplace(backdoor); - } + cacheBackdoor(backdoor); } trans.set_response_status(tlm::TLM_OK_RESPONSE); diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.hh b/src/systemc/tlm_bridge/tlm_to_gem5.hh index 32c477e6f2..baa58be768 100644 --- a/src/systemc/tlm_bridge/tlm_to_gem5.hh +++ b/src/systemc/tlm_bridge/tlm_to_gem5.hh @@ -143,6 +143,8 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase void invalidateDmi(const gem5::MemBackdoor &backdoor); + void cacheBackdoor(gem5::MemBackdoorPtr backdoor); + protected: // payload event call back void peq_cb(tlm::tlm_generic_payload &trans, const tlm::tlm_phase &phase);