mem: Split memory controller into base MemCtrl and HeteroMemCtrl

This change splits the default gem5 memory controller into two
memory controllers: MemCtrl (base memory controller which can be
used with only a single memory interface dram/nvm), and
HeteroMemCtrl (heterogeneous memory controller which inherits from
MemCtrl and requires a dram and an nvm memory interface).
New arguments are added to many of the base class (MemCtrl) functions
(for example memory inteface to use that function for) which helps
in easier use of these in the inherited class (HeteroMemCtrl).

Change-Id: Ifa4e9f9f1560c47063d1a8159a8c94add2e670bb
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/59731
Tested-by: kokoro <noreply+kokoro@google.com>
Maintainer: Bobby Bruce <bbruce@ucdavis.edu>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Reviewed-by: Bobby Bruce <bbruce@ucdavis.edu>
This commit is contained in:
Maryam Babaie
2022-05-13 18:03:37 -07:00
committed by Bobby Bruce
parent c7c11c5661
commit b0fd05dd3d
11 changed files with 1018 additions and 343 deletions

View File

@@ -235,7 +235,7 @@ def config_mem(options, system):
# Create a controller if not sharing a channel with DRAM
# in which case the controller has already been created
if not opt_hybrid_channel:
mem_ctrl = m5.objects.MemCtrl()
mem_ctrl = m5.objects.HeteroMemCtrl()
mem_ctrl.nvm = nvm_intf
mem_ctrls.append(mem_ctrl)

View File

@@ -106,14 +106,14 @@ MemConfig.config_mem(args, system)
# controller with an NVM interface, check to be sure
if not isinstance(system.mem_ctrls[0], m5.objects.MemCtrl):
fatal("This script assumes the controller is a MemCtrl subclass")
if not isinstance(system.mem_ctrls[0].nvm, m5.objects.NVMInterface):
if not isinstance(system.mem_ctrls[0].dram, m5.objects.NVMInterface):
fatal("This script assumes the memory is a NVMInterface class")
# there is no point slowing things down by saving any data
system.mem_ctrls[0].nvm.null = True
system.mem_ctrls[0].dram.null = True
# Set the address mapping based on input argument
system.mem_ctrls[0].nvm.addr_mapping = args.addr_map
system.mem_ctrls[0].dram.addr_mapping = args.addr_map
# stay in each state for 0.25 ms, long enough to warm things up, and
# short enough to avoid hitting a refresh
@@ -124,21 +124,21 @@ period = 250000000
# the DRAM maximum bandwidth to ensure that it is saturated
# get the number of regions
nbr_banks = system.mem_ctrls[0].nvm.banks_per_rank.value
nbr_banks = system.mem_ctrls[0].dram.banks_per_rank.value
# determine the burst length in bytes
burst_size = int((system.mem_ctrls[0].nvm.devices_per_rank.value *
system.mem_ctrls[0].nvm.device_bus_width.value *
system.mem_ctrls[0].nvm.burst_length.value) / 8)
burst_size = int((system.mem_ctrls[0].dram.devices_per_rank.value *
system.mem_ctrls[0].dram.device_bus_width.value *
system.mem_ctrls[0].dram.burst_length.value) / 8)
# next, get the page size in bytes
buffer_size = system.mem_ctrls[0].nvm.devices_per_rank.value * \
system.mem_ctrls[0].nvm.device_rowbuffer_size.value
buffer_size = system.mem_ctrls[0].dram.devices_per_rank.value * \
system.mem_ctrls[0].dram.device_rowbuffer_size.value
# match the maximum bandwidth of the memory, the parameter is in seconds
# and we need it in ticks (ps)
itt = system.mem_ctrls[0].nvm.tBURST.value * 1000000000000
itt = system.mem_ctrls[0].dram.tBURST.value * 1000000000000
# assume we start at 0
max_addr = mem_range.end
@@ -179,7 +179,7 @@ def trace():
0, max_addr, burst_size, int(itt), int(itt),
args.rd_perc, 0,
num_seq_pkts, buffer_size, nbr_banks, bank,
addr_map, args.nvm_ranks)
addr_map, args.dram_ranks)
yield system.tgen.createExit(0)
system.tgen.start(trace())

View File

@@ -117,8 +117,8 @@ MemConfig.config_mem(args, system)
# the following assumes that we are using the native controller
# with NVM and DRAM interfaces, check to be sure
if not isinstance(system.mem_ctrls[0], m5.objects.MemCtrl):
fatal("This script assumes the controller is a MemCtrl subclass")
if not isinstance(system.mem_ctrls[0], m5.objects.HeteroMemCtrl):
fatal("This script assumes the controller is a HeteroMemCtrl subclass")
if not isinstance(system.mem_ctrls[0].dram, m5.objects.DRAMInterface):
fatal("This script assumes the first memory is a DRAMInterface subclass")
if not isinstance(system.mem_ctrls[0].nvm, m5.objects.NVMInterface):

56
src/mem/HeteroMemCtrl.py Normal file
View File

@@ -0,0 +1,56 @@
# Copyright (c) 2012-2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 Amin Farmahini-Farahani
# Copyright (c) 2015 University of Kaiserslautern
# Copyright (c) 2015 The University of Bologna
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.proxy import *
from m5.objects.MemCtrl import *
# HeteroMemCtrl controls a dram and an nvm interface
# Both memory interfaces share the data and command bus
class HeteroMemCtrl(MemCtrl):
type = 'HeteroMemCtrl'
cxx_header = "mem/hetero_mem_ctrl.hh"
cxx_class = 'gem5::memory::HeteroMemCtrl'
# Interface to nvm memory media
# The dram interface `dram` used by HeteroMemCtrl is defined in
# the MemCtrl
nvm = Param.NVMInterface("NVM memory interface to use")

View File

@@ -59,11 +59,9 @@ class MemCtrl(QoSMemCtrl):
# bus in front of the controller for multiple ports
port = ResponsePort("This port responds to memory requests")
# Interface to volatile, DRAM media
dram = Param.DRAMInterface(NULL, "DRAM interface")
# Interface to non-volatile media
nvm = Param.NVMInterface(NULL, "NVM interface")
# Interface to memory media
dram = Param.MemInterface("Memory interface, can be a DRAM"
"or an NVM interface ")
# read and write buffer depths are set in the interface
# the controller will read these values when instantiated

View File

@@ -73,7 +73,7 @@ class NVMInterface(MemInterface):
the current interface.
"""
controller = MemCtrl()
controller.nvm = self
controller.dram = self
return controller
# NVM delays and device architecture defined to mimic PCM like memory.

View File

@@ -48,7 +48,9 @@ SimObject('AddrMapper.py', sim_objects=['AddrMapper', 'RangeAddrMapper'])
SimObject('Bridge.py', sim_objects=['Bridge'])
SimObject('SysBridge.py', sim_objects=['SysBridge'])
DebugFlag('SysBridge')
SimObject('MemCtrl.py', sim_objects=['MemCtrl'], enums=['MemSched'])
SimObject('MemCtrl.py', sim_objects=['MemCtrl'],
enums=['MemSched'])
SimObject('HeteroMemCtrl.py', sim_objects=['HeteroMemCtrl'])
SimObject('MemInterface.py', sim_objects=['MemInterface'], enums=['AddrMap'])
SimObject('DRAMInterface.py', sim_objects=['DRAMInterface'],
enums=['PageManage'])
@@ -74,6 +76,7 @@ Source('drampower.cc')
Source('external_master.cc')
Source('external_slave.cc')
Source('mem_ctrl.cc')
Source('hetero_mem_ctrl.cc')
Source('mem_interface.cc')
Source('dram_interface.cc')
Source('nvm_interface.cc')

462
src/mem/hetero_mem_ctrl.cc Normal file
View File

@@ -0,0 +1,462 @@
/*
* Copyright (c) 2010-2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2013 Amin Farmahini-Farahani
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/hetero_mem_ctrl.hh"
#include "base/trace.hh"
#include "debug/DRAM.hh"
#include "debug/Drain.hh"
#include "debug/MemCtrl.hh"
#include "debug/NVM.hh"
#include "debug/QOS.hh"
#include "mem/dram_interface.hh"
#include "mem/mem_interface.hh"
#include "mem/nvm_interface.hh"
#include "sim/system.hh"
namespace gem5
{
namespace memory
{
HeteroMemCtrl::HeteroMemCtrl(const HeteroMemCtrlParams &p) :
MemCtrl(p),
nvm(p.nvm)
{
DPRINTF(MemCtrl, "Setting up controller\n");
readQueue.resize(p.qos_priorities);
writeQueue.resize(p.qos_priorities);
fatal_if(dynamic_cast<DRAMInterface*>(dram) == nullptr,
"HeteroMemCtrl's dram interface must be of type DRAMInterface.\n");
fatal_if(dynamic_cast<NVMInterface*>(nvm) == nullptr,
"HeteroMemCtrl's nvm interface must be of type NVMInterface.\n");
// hook up interfaces to the controller
dram->setCtrl(this, commandWindow);
nvm->setCtrl(this, commandWindow);
readBufferSize = dram->readBufferSize + nvm->readBufferSize;
writeBufferSize = dram->writeBufferSize + nvm->writeBufferSize;
writeHighThreshold = writeBufferSize * p.write_high_thresh_perc / 100.0;
writeLowThreshold = writeBufferSize * p.write_low_thresh_perc / 100.0;
// perform a basic check of the write thresholds
if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
fatal("Write buffer low threshold %d must be smaller than the "
"high threshold %d\n", p.write_low_thresh_perc,
p.write_high_thresh_perc);
}
Tick
HeteroMemCtrl::recvAtomic(PacketPtr pkt)
{
Tick latency = 0;
if (dram->getAddrRange().contains(pkt->getAddr())) {
latency = MemCtrl::recvAtomicLogic(pkt, dram);
} else if (nvm->getAddrRange().contains(pkt->getAddr())) {
latency = MemCtrl::recvAtomicLogic(pkt, nvm);
} else {
panic("Can't handle address range for packet %s\n", pkt->print());
}
return latency;
}
bool
HeteroMemCtrl::recvTimingReq(PacketPtr pkt)
{
// This is where we enter from the outside world
DPRINTF(MemCtrl, "recvTimingReq: request %s addr %#x size %d\n",
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
panic_if(pkt->cacheResponding(), "Should not see packets where cache "
"is responding");
panic_if(!(pkt->isRead() || pkt->isWrite()),
"Should only see read and writes at memory controller\n");
// Calc avg gap between requests
if (prevArrival != 0) {
stats.totGap += curTick() - prevArrival;
}
prevArrival = curTick();
// What type of media does this packet access?
bool is_dram;
if (dram->getAddrRange().contains(pkt->getAddr())) {
is_dram = true;
} else if (nvm->getAddrRange().contains(pkt->getAddr())) {
is_dram = false;
} else {
panic("Can't handle address range for packet %s\n",
pkt->print());
}
// Find out how many memory packets a pkt translates to
// If the burst size is equal or larger than the pkt size, then a pkt
// translates to only one memory packet. Otherwise, a pkt translates to
// multiple memory packets
unsigned size = pkt->getSize();
uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
nvm->bytesPerBurst();
unsigned offset = pkt->getAddr() & (burst_size - 1);
unsigned int pkt_count = divCeil(offset + size, burst_size);
// run the QoS scheduler and assign a QoS priority value to the packet
qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
// check local buffers and do not accept if full
if (pkt->isWrite()) {
assert(size != 0);
if (writeQueueFull(pkt_count)) {
DPRINTF(MemCtrl, "Write queue full, not accepting\n");
// remember that we have to retry this port
retryWrReq = true;
stats.numWrRetry++;
return false;
} else {
addToWriteQueue(pkt, pkt_count, is_dram ? dram : nvm);
// If we are not already scheduled to get a request out of the
// queue, do so now
if (!nextReqEvent.scheduled()) {
DPRINTF(MemCtrl, "Request scheduled immediately\n");
schedule(nextReqEvent, curTick());
}
stats.writeReqs++;
stats.bytesWrittenSys += size;
}
} else {
assert(pkt->isRead());
assert(size != 0);
if (readQueueFull(pkt_count)) {
DPRINTF(MemCtrl, "Read queue full, not accepting\n");
// remember that we have to retry this port
retryRdReq = true;
stats.numRdRetry++;
return false;
} else {
if (!addToReadQueue(pkt, pkt_count, is_dram ? dram : nvm)) {
// If we are not already scheduled to get a request out of the
// queue, do so now
if (!nextReqEvent.scheduled()) {
DPRINTF(MemCtrl, "Request scheduled immediately\n");
schedule(nextReqEvent, curTick());
}
}
stats.readReqs++;
stats.bytesReadSys += size;
}
}
return true;
}
void
HeteroMemCtrl::processRespondEvent(MemInterface* mem_intr,
MemPacketQueue& queue,
EventFunctionWrapper& resp_event)
{
DPRINTF(MemCtrl,
"processRespondEvent(): Some req has reached its readyTime\n");
if (queue.front()->isDram()) {
MemCtrl::processRespondEvent(dram, queue, resp_event);
} else {
MemCtrl::processRespondEvent(nvm, queue, resp_event);
}
}
MemPacketQueue::iterator
HeteroMemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay,
MemInterface* mem_int)
{
// This method does the arbitration between requests.
MemPacketQueue::iterator ret = queue.end();
if (!queue.empty()) {
if (queue.size() == 1) {
// available rank corresponds to state refresh idle
MemPacket* mem_pkt = *(queue.begin());
if (packetReady(mem_pkt, mem_pkt->isDram()? dram : nvm)) {
ret = queue.begin();
DPRINTF(MemCtrl, "Single request, going to a free rank\n");
} else {
DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
}
} else if (memSchedPolicy == enums::fcfs) {
// check if there is a packet going to a free rank
for (auto i = queue.begin(); i != queue.end(); ++i) {
MemPacket* mem_pkt = *i;
if (packetReady(mem_pkt, mem_pkt->isDram()? dram : nvm)) {
ret = i;
break;
}
}
} else if (memSchedPolicy == enums::frfcfs) {
Tick col_allowed_at;
std::tie(ret, col_allowed_at)
= chooseNextFRFCFS(queue, extra_col_delay, mem_int);
} else {
panic("No scheduling policy chosen\n");
}
}
return ret;
}
std::pair<MemPacketQueue::iterator, Tick>
HeteroMemCtrl::chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay,
MemInterface* mem_intr)
{
auto selected_pkt_it = queue.end();
auto nvm_pkt_it = queue.end();
Tick col_allowed_at = MaxTick;
Tick nvm_col_allowed_at = MaxTick;
std::tie(selected_pkt_it, col_allowed_at) =
MemCtrl::chooseNextFRFCFS(queue, extra_col_delay, dram);
std::tie(nvm_pkt_it, nvm_col_allowed_at) =
MemCtrl::chooseNextFRFCFS(queue, extra_col_delay, nvm);
// Compare DRAM and NVM and select NVM if it can issue
// earlier than the DRAM packet
if (col_allowed_at > nvm_col_allowed_at) {
selected_pkt_it = nvm_pkt_it;
col_allowed_at = nvm_col_allowed_at;
}
return std::make_pair(selected_pkt_it, col_allowed_at);
}
Tick
HeteroMemCtrl::doBurstAccess(MemPacket* mem_pkt, MemInterface* mem_intr)
{
// mem_intr will be dram by default in HeteroMemCtrl
// When was command issued?
Tick cmd_at;
if (mem_pkt->isDram()) {
cmd_at = MemCtrl::doBurstAccess(mem_pkt, mem_intr);
// Update timing for NVM ranks if NVM is configured on this channel
nvm->addRankToRankDelay(cmd_at);
} else {
cmd_at = MemCtrl::doBurstAccess(mem_pkt, nvm);
// Update timing for NVM ranks if NVM is configured on this channel
dram->addRankToRankDelay(cmd_at);
}
return cmd_at;
}
bool
HeteroMemCtrl::memBusy(MemInterface* mem_intr) {
// mem_intr in case of HeteroMemCtrl will always be dram
// check ranks for refresh/wakeup - uses busStateNext, so done after
// turnaround decisions
// Default to busy status and update based on interface specifics
bool dram_busy, nvm_busy = true;
// DRAM
dram_busy = mem_intr->isBusy(false, false);
// NVM
bool read_queue_empty = totalReadQueueSize == 0;
bool all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
// Default state of unused interface is 'true'
// Simply AND the busy signals to determine if system is busy
if (dram_busy && nvm_busy) {
// if all ranks are refreshing wait for them to finish
// and stall this state machine without taking any further
// action, and do not schedule a new nextReqEvent
return true;
} else {
return false;
}
}
void
HeteroMemCtrl::nonDetermReads(MemInterface* mem_intr)
{
// mem_intr by default points to dram in case
// of HeteroMemCtrl, therefore, calling nonDetermReads
// from MemCtrl using nvm interace
MemCtrl::nonDetermReads(nvm);
}
bool
HeteroMemCtrl::nvmWriteBlock(MemInterface* mem_intr)
{
// mem_intr by default points to dram in case
// of HeteroMemCtrl, therefore, calling nvmWriteBlock
// from MemCtrl using nvm interface
return MemCtrl::nvmWriteBlock(nvm);
}
Tick
HeteroMemCtrl::minReadToWriteDataGap()
{
return std::min(dram->minReadToWriteDataGap(),
nvm->minReadToWriteDataGap());
}
Tick
HeteroMemCtrl::minWriteToReadDataGap()
{
return std::min(dram->minWriteToReadDataGap(),
nvm->minWriteToReadDataGap());
}
Addr
HeteroMemCtrl::burstAlign(Addr addr, MemInterface* mem_intr) const
{
// mem_intr will point to dram interface in HeteroMemCtrl
if (mem_intr->getAddrRange().contains(addr)) {
return (addr & ~(Addr(mem_intr->bytesPerBurst() - 1)));
} else {
assert(nvm->getAddrRange().contains(addr));
return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
}
}
bool
HeteroMemCtrl::pktSizeCheck(MemPacket* mem_pkt, MemInterface* mem_intr) const
{
// mem_intr will point to dram interface in HeteroMemCtrl
if (mem_pkt->isDram()) {
return (mem_pkt->size <= mem_intr->bytesPerBurst());
} else {
return (mem_pkt->size <= nvm->bytesPerBurst());
}
}
void
HeteroMemCtrl::recvFunctional(PacketPtr pkt)
{
bool found;
found = MemCtrl::recvFunctionalLogic(pkt, dram);
if (!found) {
found = MemCtrl::recvFunctionalLogic(pkt, nvm);
}
if (!found) {
panic("Can't handle address range for packet %s\n", pkt->print());
}
}
bool
HeteroMemCtrl::allIntfDrained() const
{
// ensure dram is in power down and refresh IDLE states
bool dram_drained = dram->allRanksDrained();
// No outstanding NVM writes
// All other queues verified as needed with calling logic
bool nvm_drained = nvm->allRanksDrained();
return (dram_drained && nvm_drained);
}
DrainState
HeteroMemCtrl::drain()
{
// if there is anything in any of our internal queues, keep track
// of that as well
if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
allIntfDrained())) {
DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
" resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
respQueue.size());
// the only queue that is not drained automatically over time
// is the write queue, thus kick things into action if needed
if (!totalWriteQueueSize && !nextReqEvent.scheduled()) {
schedule(nextReqEvent, curTick());
}
dram->drainRanks();
return DrainState::Draining;
} else {
return DrainState::Drained;
}
}
void
HeteroMemCtrl::drainResume()
{
if (!isTimingMode && system()->isTimingMode()) {
// if we switched to timing mode, kick things into action,
// and behave as if we restored from a checkpoint
startup();
dram->startup();
} else if (isTimingMode && !system()->isTimingMode()) {
// if we switch from timing mode, stop the refresh events to
// not cause issues with KVM
dram->suspend();
}
// update the mode
isTimingMode = system()->isTimingMode();
}
AddrRangeList
HeteroMemCtrl::getAddrRanges()
{
AddrRangeList ranges;
ranges.push_back(dram->getAddrRange());
ranges.push_back(nvm->getAddrRange());
return ranges;
}
} // namespace memory
} // namespace gem5

142
src/mem/hetero_mem_ctrl.hh Normal file
View File

@@ -0,0 +1,142 @@
/*
* Copyright (c) 2012-2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2013 Amin Farmahini-Farahani
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* HeteroMemCtrl declaration
*/
#ifndef __HETERO_MEM_CTRL_HH__
#define __HETERO_MEM_CTRL_HH__
#include "mem/mem_ctrl.hh"
#include "params/HeteroMemCtrl.hh"
namespace gem5
{
namespace memory
{
class HeteroMemCtrl : public MemCtrl
{
private:
/**
* Create pointer to interface of the actual nvm media when connected.
*/
NVMInterface* nvm;
MemPacketQueue::iterator chooseNext(MemPacketQueue& queue,
Tick extra_col_delay, MemInterface* mem_int) override;
virtual std::pair<MemPacketQueue::iterator, Tick>
chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay,
MemInterface* mem_intr) override;
Tick doBurstAccess(MemPacket* mem_pkt, MemInterface* mem_int) override;
Tick minReadToWriteDataGap() override;
Tick minWriteToReadDataGap() override;
AddrRangeList getAddrRanges() override;
/**
* Burst-align an address.
*
* @param addr The potentially unaligned address
* @param mem_intr The DRAM memory interface this packet belongs to
*
* @return An address aligned to a memory burst
*/
virtual Addr burstAlign(Addr addr, MemInterface* mem_intr) const override;
/**
* Check if mem pkt's size is sane
*
* @param mem_pkt memory packet
* @param mem_intr memory interface
* @return a boolean indicating if the mem pkt size is less than
* the burst size of the related mem interface
*/
virtual bool
pktSizeCheck(MemPacket* mem_pkt, MemInterface* mem_intr) const override;
virtual void processRespondEvent(MemInterface* mem_intr,
MemPacketQueue& queue,
EventFunctionWrapper& resp_event) override;
/**
* Checks if the memory interface is already busy
*
* @param mem_intr memory interface to check
* @return a boolean indicating if memory is busy
*/
virtual bool memBusy(MemInterface* mem_intr) override;
/**
* Will access nvm memory interface and select non-deterministic
* reads to issue
*/
virtual void nonDetermReads(MemInterface* mem_intr) override;
/**
* Will check if all writes are for nvm interface
* and nvm's write resp queue is full.
*
* @param mem_intr memory interface to use
* @return a boolean showing if nvm is blocked with writes
*/
virtual bool nvmWriteBlock(MemInterface* mem_intr) override;
public:
HeteroMemCtrl(const HeteroMemCtrlParams &p);
bool allIntfDrained() const override;
DrainState drain() override;
void drainResume() override;
protected:
Tick recvAtomic(PacketPtr pkt) override;
void recvFunctional(PacketPtr pkt) override;
bool recvTimingReq(PacketPtr pkt) override;
};
} // namespace memory
} // namespace gem5
#endif //__HETERO_MEM_CTRL_HH__

View File

@@ -61,13 +61,13 @@ MemCtrl::MemCtrl(const MemCtrlParams &p) :
qos::MemCtrl(p),
port(name() + ".port", *this), isTimingMode(false),
retryRdReq(false), retryWrReq(false),
nextReqEvent([this]{ processNextReqEvent(); }, name()),
respondEvent([this]{ processRespondEvent(); }, name()),
dram(p.dram), nvm(p.nvm),
readBufferSize((dram ? dram->readBufferSize : 0) +
(nvm ? nvm->readBufferSize : 0)),
writeBufferSize((dram ? dram->writeBufferSize : 0) +
(nvm ? nvm->writeBufferSize : 0)),
nextReqEvent([this] {processNextReqEvent(dram, respQueue,
respondEvent, nextReqEvent);}, name()),
respondEvent([this] {processRespondEvent(dram, respQueue,
respondEvent); }, name()),
dram(p.dram),
readBufferSize(dram->readBufferSize),
writeBufferSize(dram->writeBufferSize),
writeHighThreshold(writeBufferSize * p.write_high_thresh_perc / 100.0),
writeLowThreshold(writeBufferSize * p.write_low_thresh_perc / 100.0),
minWritesPerSwitch(p.min_writes_per_switch),
@@ -81,16 +81,11 @@ MemCtrl::MemCtrl(const MemCtrlParams &p) :
stats(*this)
{
DPRINTF(MemCtrl, "Setting up controller\n");
readQueue.resize(p.qos_priorities);
writeQueue.resize(p.qos_priorities);
// Hook up interfaces to the controller
if (dram)
dram->setCtrl(this, commandWindow);
if (nvm)
nvm->setCtrl(this, commandWindow);
fatal_if(!dram && !nvm, "Memory controller must have an interface");
dram->setCtrl(this, commandWindow);
// perform a basic check of the write thresholds
if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
@@ -120,13 +115,23 @@ MemCtrl::startup()
// have to worry about negative values when computing the time for
// the next request, this will add an insignificant bubble at the
// start of simulation
nextBurstAt = curTick() + (dram ? dram->commandOffset() :
nvm->commandOffset());
nextBurstAt = curTick() + dram->commandOffset();
}
}
Tick
MemCtrl::recvAtomic(PacketPtr pkt)
{
if (!dram->getAddrRange().contains(pkt->getAddr())) {
panic("Can't handle address range for packet %s\n", pkt->print());
}
return recvAtomicLogic(pkt, dram);
}
Tick
MemCtrl::recvAtomicLogic(PacketPtr pkt, MemInterface* mem_intr)
{
DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
pkt->cmdString(), pkt->getAddr());
@@ -134,41 +139,24 @@ MemCtrl::recvAtomic(PacketPtr pkt)
panic_if(pkt->cacheResponding(), "Should not see packets where cache "
"is responding");
Tick latency = 0;
// do the actual memory access and turn the packet into a response
if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
dram->access(pkt);
mem_intr->access(pkt);
if (pkt->hasData()) {
// this value is not supposed to be accurate, just enough to
// keep things going, mimic a closed page
latency = dram->accessLatency();
}
} else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
nvm->access(pkt);
if (pkt->hasData()) {
// this value is not supposed to be accurate, just enough to
// keep things going, mimic a closed page
latency = nvm->accessLatency();
}
} else {
panic("Can't handle address range for packet %s\n",
pkt->print());
if (pkt->hasData()) {
// this value is not supposed to be accurate, just enough to
// keep things going, mimic a closed page
// also this latency can't be 0
return mem_intr->accessLatency();
}
return latency;
return 0;
}
Tick
MemCtrl::recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
{
Tick latency = recvAtomic(pkt);
if (dram) {
dram->getBackdoor(backdoor);
} else if (nvm) {
nvm->getBackdoor(backdoor);
}
dram->getBackdoor(backdoor);
return latency;
}
@@ -195,8 +183,9 @@ MemCtrl::writeQueueFull(unsigned int neededEntries) const
return wrsize_new > writeBufferSize;
}
void
MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
bool
MemCtrl::addToReadQueue(PacketPtr pkt,
unsigned int pkt_count, MemInterface* mem_intr)
{
// only add to the read queue here. whenever the request is
// eventually done, set the readyTime, and call schedule()
@@ -215,8 +204,8 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
unsigned pktsServicedByWrQ = 0;
BurstHelper* burst_helper = NULL;
uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
nvm->bytesPerBurst();
uint32_t burst_size = mem_intr->bytesPerBurst();
for (int cnt = 0; cnt < pkt_count; ++cnt) {
unsigned size = std::min((addr | (burst_size - 1)) + 1,
base_addr + pkt->getSize()) - addr;
@@ -227,7 +216,7 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
// First check write buffer to see if the data is already at
// the controller
bool foundInWrQ = false;
Addr burst_addr = burstAlign(addr, is_dram);
Addr burst_addr = burstAlign(addr, mem_intr);
// if the burst address is not present then there is no need
// looking any further
if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
@@ -264,17 +253,13 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
}
MemPacket* mem_pkt;
if (is_dram) {
mem_pkt = dram->decodePacket(pkt, addr, size, true);
// increment read entries of the rank
dram->setupRank(mem_pkt->rank, true);
} else {
mem_pkt = nvm->decodePacket(pkt, addr, size, true);
// Increment count to trigger issue of non-deterministic read
nvm->setupRank(mem_pkt->rank, true);
// Default readyTime to Max; will be reset once read is issued
mem_pkt->readyTime = MaxTick;
}
mem_pkt = mem_intr->decodePacket(pkt, addr, size, true);
// Increment read entries of the rank (dram)
// Increment count to trigger issue of non-deterministic read (nvm)
mem_intr->setupRank(mem_pkt->rank, true);
// Default readyTime to Max; will be reset once read is issued
mem_pkt->readyTime = MaxTick;
mem_pkt->burstHelper = burst_helper;
assert(!readQueueFull(1));
@@ -285,8 +270,8 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
// log packet
logRequest(MemCtrl::READ, pkt->requestorId(), pkt->qosValue(),
mem_pkt->addr, 1);
logRequest(MemCtrl::READ, pkt->requestorId(),
pkt->qosValue(), mem_pkt->addr, 1);
// Update stats
stats.avgRdQLen = totalReadQueueSize + respQueue.size();
@@ -298,24 +283,21 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
// If all packets are serviced by write queue, we send the repsonse back
if (pktsServicedByWrQ == pkt_count) {
accessAndRespond(pkt, frontendLatency);
return;
accessAndRespond(pkt, frontendLatency, mem_intr);
return true;
}
// Update how many split packets are serviced by write queue
if (burst_helper != NULL)
burst_helper->burstsServiced = pktsServicedByWrQ;
// If we are not already scheduled to get a request out of the
// queue, do so now
if (!nextReqEvent.scheduled()) {
DPRINTF(MemCtrl, "Request scheduled immediately\n");
schedule(nextReqEvent, curTick());
}
// not all/any packets serviced by the write queue
return false;
}
void
MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count,
MemInterface* mem_intr)
{
// only add to the write queue here. whenever the request is
// eventually done, set the readyTime, and call schedule()
@@ -325,8 +307,8 @@ MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
// multiple packets
const Addr base_addr = pkt->getAddr();
Addr addr = base_addr;
uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
nvm->bytesPerBurst();
uint32_t burst_size = mem_intr->bytesPerBurst();
for (int cnt = 0; cnt < pkt_count; ++cnt) {
unsigned size = std::min((addr | (burst_size - 1)) + 1,
base_addr + pkt->getSize()) - addr;
@@ -336,31 +318,31 @@ MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
// see if we can merge with an existing item in the write
// queue and keep track of whether we have merged or not
bool merged = isInWriteQueue.find(burstAlign(addr, is_dram)) !=
bool merged = isInWriteQueue.find(burstAlign(addr, mem_intr)) !=
isInWriteQueue.end();
// if the item was not merged we need to create a new write
// and enqueue it
if (!merged) {
MemPacket* mem_pkt;
if (is_dram) {
mem_pkt = dram->decodePacket(pkt, addr, size, false);
dram->setupRank(mem_pkt->rank, false);
} else {
mem_pkt = nvm->decodePacket(pkt, addr, size, false);
nvm->setupRank(mem_pkt->rank, false);
}
mem_pkt = mem_intr->decodePacket(pkt, addr, size, false);
// Default readyTime to Max if nvm interface;
//will be reset once read is issued
mem_pkt->readyTime = MaxTick;
mem_intr->setupRank(mem_pkt->rank, false);
assert(totalWriteQueueSize < writeBufferSize);
stats.wrQLenPdf[totalWriteQueueSize]++;
DPRINTF(MemCtrl, "Adding to write queue\n");
writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
isInWriteQueue.insert(burstAlign(addr, is_dram));
isInWriteQueue.insert(burstAlign(addr, mem_intr));
// log packet
logRequest(MemCtrl::WRITE, pkt->requestorId(), pkt->qosValue(),
mem_pkt->addr, 1);
logRequest(MemCtrl::WRITE, pkt->requestorId(),
pkt->qosValue(), mem_pkt->addr, 1);
assert(totalWriteQueueSize == isInWriteQueue.size());
@@ -385,14 +367,7 @@ MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
// snoop the write queue for any upcoming reads
// @todo, if a pkt size is larger than burst size, we might need a
// different front end latency
accessAndRespond(pkt, frontendLatency);
// If we are not already scheduled to get a request out of the
// queue, do so now
if (!nextReqEvent.scheduled()) {
DPRINTF(MemCtrl, "Request scheduled immediately\n");
schedule(nextReqEvent, curTick());
}
accessAndRespond(pkt, frontendLatency, mem_intr);
}
void
@@ -439,25 +414,16 @@ MemCtrl::recvTimingReq(PacketPtr pkt)
}
prevArrival = curTick();
// What type of media does this packet access?
bool is_dram;
if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
is_dram = true;
} else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
is_dram = false;
} else {
panic("Can't handle address range for packet %s\n",
pkt->print());
}
panic_if(!(dram->getAddrRange().contains(pkt->getAddr())),
"Can't handle address range for packet %s\n", pkt->print());
// Find out how many memory packets a pkt translates to
// If the burst size is equal or larger than the pkt size, then a pkt
// translates to only one memory packet. Otherwise, a pkt translates to
// multiple memory packets
unsigned size = pkt->getSize();
uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
nvm->bytesPerBurst();
uint32_t burst_size = dram->bytesPerBurst();
unsigned offset = pkt->getAddr() & (burst_size - 1);
unsigned int pkt_count = divCeil(offset + size, burst_size);
@@ -474,7 +440,13 @@ MemCtrl::recvTimingReq(PacketPtr pkt)
stats.numWrRetry++;
return false;
} else {
addToWriteQueue(pkt, pkt_count, is_dram);
addToWriteQueue(pkt, pkt_count, dram);
// If we are not already scheduled to get a request out of the
// queue, do so now
if (!nextReqEvent.scheduled()) {
DPRINTF(MemCtrl, "Request scheduled immediately\n");
schedule(nextReqEvent, curTick());
}
stats.writeReqs++;
stats.bytesWrittenSys += size;
}
@@ -488,7 +460,14 @@ MemCtrl::recvTimingReq(PacketPtr pkt)
stats.numRdRetry++;
return false;
} else {
addToReadQueue(pkt, pkt_count, is_dram);
if (!addToReadQueue(pkt, pkt_count, dram)) {
// If we are not already scheduled to get a request out of the
// queue, do so now
if (!nextReqEvent.scheduled()) {
DPRINTF(MemCtrl, "Request scheduled immediately\n");
schedule(nextReqEvent, curTick());
}
}
stats.readReqs++;
stats.bytesReadSys += size;
}
@@ -498,17 +477,19 @@ MemCtrl::recvTimingReq(PacketPtr pkt)
}
void
MemCtrl::processRespondEvent()
MemCtrl::processRespondEvent(MemInterface* mem_intr,
MemPacketQueue& queue,
EventFunctionWrapper& resp_event)
{
DPRINTF(MemCtrl,
"processRespondEvent(): Some req has reached its readyTime\n");
MemPacket* mem_pkt = respQueue.front();
MemPacket* mem_pkt = queue.front();
if (mem_pkt->isDram()) {
// media specific checks and functions when read response is complete
dram->respondEvent(mem_pkt->rank);
}
// media specific checks and functions when read response is complete
// DRAM only
mem_intr->respondEvent(mem_pkt->rank);
if (mem_pkt->burstHelper) {
// it is a split packet
@@ -519,21 +500,23 @@ MemCtrl::processRespondEvent()
// so we can now respond to the requestor
// @todo we probably want to have a different front end and back
// end latency for split packets
accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency);
accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency,
mem_intr);
delete mem_pkt->burstHelper;
mem_pkt->burstHelper = NULL;
}
} else {
// it is not a split packet
accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency);
accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency,
mem_intr);
}
respQueue.pop_front();
queue.pop_front();
if (!respQueue.empty()) {
assert(respQueue.front()->readyTime >= curTick());
assert(!respondEvent.scheduled());
schedule(respondEvent, respQueue.front()->readyTime);
if (!queue.empty()) {
assert(queue.front()->readyTime >= curTick());
assert(!resp_event.scheduled());
schedule(resp_event, queue.front()->readyTime);
} else {
// if there is nothing left in any queue, signal a drain
if (drainState() == DrainState::Draining &&
@@ -542,11 +525,12 @@ MemCtrl::processRespondEvent()
DPRINTF(Drain, "Controller done draining\n");
signalDrainDone();
} else if (mem_pkt->isDram()) {
} else {
// check the refresh state and kick the refresh event loop
// into action again if banks already closed and just waiting
// for read to complete
dram->checkRefreshState(mem_pkt->rank);
// DRAM only
mem_intr->checkRefreshState(mem_pkt->rank);
}
}
@@ -561,7 +545,8 @@ MemCtrl::processRespondEvent()
}
MemPacketQueue::iterator
MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay,
MemInterface* mem_intr)
{
// This method does the arbitration between requests.
@@ -571,7 +556,7 @@ MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
if (queue.size() == 1) {
// available rank corresponds to state refresh idle
MemPacket* mem_pkt = *(queue.begin());
if (packetReady(mem_pkt)) {
if (packetReady(mem_pkt, dram)) {
ret = queue.begin();
DPRINTF(MemCtrl, "Single request, going to a free rank\n");
} else {
@@ -581,13 +566,15 @@ MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
// check if there is a packet going to a free rank
for (auto i = queue.begin(); i != queue.end(); ++i) {
MemPacket* mem_pkt = *i;
if (packetReady(mem_pkt)) {
if (packetReady(mem_pkt, dram)) {
ret = i;
break;
}
}
} else if (memSchedPolicy == enums::frfcfs) {
ret = chooseNextFRFCFS(queue, extra_col_delay);
Tick col_allowed_at;
std::tie(ret, col_allowed_at)
= chooseNextFRFCFS(queue, extra_col_delay, mem_intr);
} else {
panic("No scheduling policy chosen\n");
}
@@ -595,8 +582,9 @@ MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
return ret;
}
MemPacketQueue::iterator
MemCtrl::chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay)
std::pair<MemPacketQueue::iterator, Tick>
MemCtrl::chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay,
MemInterface* mem_intr)
{
auto selected_pkt_it = queue.end();
Tick col_allowed_at = MaxTick;
@@ -604,55 +592,28 @@ MemCtrl::chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay)
// time we need to issue a column command to be seamless
const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
// find optimal packet for each interface
if (dram && nvm) {
// create 2nd set of parameters for NVM
auto nvm_pkt_it = queue.end();
Tick nvm_col_at = MaxTick;
// Select packet by default to give priority if both
// can issue at the same time or seamlessly
std::tie(selected_pkt_it, col_allowed_at) =
dram->chooseNextFRFCFS(queue, min_col_at);
std::tie(nvm_pkt_it, nvm_col_at) =
nvm->chooseNextFRFCFS(queue, min_col_at);
// Compare DRAM and NVM and select NVM if it can issue
// earlier than the DRAM packet
if (col_allowed_at > nvm_col_at) {
selected_pkt_it = nvm_pkt_it;
}
} else if (dram) {
std::tie(selected_pkt_it, col_allowed_at) =
dram->chooseNextFRFCFS(queue, min_col_at);
} else if (nvm) {
std::tie(selected_pkt_it, col_allowed_at) =
nvm->chooseNextFRFCFS(queue, min_col_at);
}
std::tie(selected_pkt_it, col_allowed_at) =
mem_intr->chooseNextFRFCFS(queue, min_col_at);
if (selected_pkt_it == queue.end()) {
DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
}
return selected_pkt_it;
return std::make_pair(selected_pkt_it, col_allowed_at);
}
void
MemCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
MemCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency,
MemInterface* mem_intr)
{
DPRINTF(MemCtrl, "Responding to Address %#x.. \n", pkt->getAddr());
bool needsResponse = pkt->needsResponse();
// do the actual memory access which also turns the packet into a
// response
if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
dram->access(pkt);
} else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
nvm->access(pkt);
} else {
panic("Can't handle address range for packet %s\n",
pkt->print());
}
panic_if(!mem_intr->getAddrRange().contains(pkt->getAddr()),
"Can't handle address range for packet %s\n", pkt->print());
mem_intr->access(pkt);
// turn packet around to go back to requestor if response expected
if (needsResponse) {
@@ -815,8 +776,8 @@ MemCtrl::inWriteBusState(bool next_state) const
}
}
void
MemCtrl::doBurstAccess(MemPacket* mem_pkt)
Tick
MemCtrl::doBurstAccess(MemPacket* mem_pkt, MemInterface* mem_intr)
{
// first clean up the burstTick set, removing old entries
// before adding new entries for next burst
@@ -828,23 +789,8 @@ MemCtrl::doBurstAccess(MemPacket* mem_pkt)
// Issue the next burst and update bus state to reflect
// when previous command was issued
std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
if (mem_pkt->isDram()) {
std::tie(cmd_at, nextBurstAt) =
dram->doBurstAccess(mem_pkt, nextBurstAt, queue);
// Update timing for NVM ranks if NVM is configured on this channel
if (nvm)
nvm->addRankToRankDelay(cmd_at);
} else {
std::tie(cmd_at, nextBurstAt) =
nvm->doBurstAccess(mem_pkt, nextBurstAt, queue);
// Update timing for NVM ranks if NVM is configured on this channel
if (dram)
dram->addRankToRankDelay(cmd_at);
}
std::tie(cmd_at, nextBurstAt) =
mem_intr->doBurstAccess(mem_pkt, nextBurstAt, queue);
DPRINTF(MemCtrl, "Access to %#x, ready at %lld next burst at %lld.\n",
mem_pkt->addr, mem_pkt->readyTime, nextBurstAt);
@@ -853,9 +799,7 @@ MemCtrl::doBurstAccess(MemPacket* mem_pkt)
// conservative estimate of when we have to schedule the next
// request to not introduce any unecessary bubbles. In most cases
// we will wake up sooner than we have to.
nextReqTime = nextBurstAt - (dram ? dram->commandOffset() :
nvm->commandOffset());
nextReqTime = nextBurstAt - dram->commandOffset();
// Update the common bus stats
if (mem_pkt->isRead()) {
@@ -870,11 +814,58 @@ MemCtrl::doBurstAccess(MemPacket* mem_pkt)
stats.requestorWriteTotalLat[mem_pkt->requestorId()] +=
mem_pkt->readyTime - mem_pkt->entryTime;
}
return cmd_at;
}
bool
MemCtrl::memBusy(MemInterface* mem_intr) {
// check ranks for refresh/wakeup - uses busStateNext, so done after
// turnaround decisions
// Default to busy status and update based on interface specifics
// Default state of unused interface is 'true'
bool mem_busy = true;
bool all_writes_nvm = mem_intr->numWritesQueued == totalWriteQueueSize;
bool read_queue_empty = totalReadQueueSize == 0;
mem_busy = mem_intr->isBusy(read_queue_empty, all_writes_nvm);
if (mem_busy) {
// if all ranks are refreshing wait for them to finish
// and stall this state machine without taking any further
// action, and do not schedule a new nextReqEvent
return true;
} else {
return false;
}
}
bool
MemCtrl::nvmWriteBlock(MemInterface* mem_intr) {
bool all_writes_nvm = mem_intr->numWritesQueued == totalWriteQueueSize;
return (mem_intr->writeRespQueueFull() && all_writes_nvm);
}
void
MemCtrl::processNextReqEvent()
{
MemCtrl::nonDetermReads(MemInterface* mem_intr) {
for (auto queue = readQueue.rbegin();
queue != readQueue.rend(); ++queue) {
// select non-deterministic NVM read to issue
// assume that we have the command bandwidth to issue this along
// with additional RD/WR burst with needed bank operations
if (mem_intr->readsWaitingToIssue()) {
// select non-deterministic NVM read to issue
mem_intr->chooseRead(*queue);
}
}
}
void
MemCtrl::processNextReqEvent(MemInterface* mem_intr,
MemPacketQueue& resp_queue,
EventFunctionWrapper& resp_event,
EventFunctionWrapper& next_req_event) {
// transition is handled by QoS algorithm if enabled
if (turnPolicy) {
// select bus state - only done if QoS algorithms are in use
@@ -909,36 +900,9 @@ MemCtrl::processNextReqEvent()
// updates current state
busState = busStateNext;
if (nvm) {
for (auto queue = readQueue.rbegin();
queue != readQueue.rend(); ++queue) {
// select non-deterministic NVM read to issue
// assume that we have the command bandwidth to issue this along
// with additional RD/WR burst with needed bank operations
if (nvm->readsWaitingToIssue()) {
// select non-deterministic NVM read to issue
nvm->chooseRead(*queue);
}
}
}
nonDetermReads(mem_intr);
// check ranks for refresh/wakeup - uses busStateNext, so done after
// turnaround decisions
// Default to busy status and update based on interface specifics
bool dram_busy = dram ? dram->isBusy(false, false) : true;
bool nvm_busy = true;
bool all_writes_nvm = false;
if (nvm) {
all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
bool read_queue_empty = totalReadQueueSize == 0;
nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
}
// Default state of unused interface is 'true'
// Simply AND the busy signals to determine if system is busy
if (dram_busy && nvm_busy) {
// if all ranks are refreshing wait for them to finish
// and stall this state machine without taking any further
// action, and do not schedule a new nextReqEvent
if (memBusy(mem_intr)) {
return;
}
@@ -965,7 +929,7 @@ MemCtrl::processNextReqEvent()
// ensuring all banks are closed and
// have exited low power states
if (drainState() == DrainState::Draining &&
respQueue.empty() && allIntfDrained()) {
respQEmpty() && allIntfDrained()) {
DPRINTF(Drain, "MemCtrl controller done draining\n");
signalDrainDone();
@@ -994,7 +958,7 @@ MemCtrl::processNextReqEvent()
// If we are changing command type, incorporate the minimum
// bus turnaround delay which will be rank to rank delay
to_read = chooseNext((*queue), switched_cmd_type ?
minWriteToReadDataGap() : 0);
minWriteToReadDataGap() : 0, mem_intr);
if (to_read != queue->end()) {
// candidate read found
@@ -1015,12 +979,13 @@ MemCtrl::processNextReqEvent()
auto mem_pkt = *to_read;
doBurstAccess(mem_pkt);
Tick cmd_at = doBurstAccess(mem_pkt, mem_intr);
DPRINTF(MemCtrl,
"Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
// sanity check
assert(mem_pkt->size <= (mem_pkt->isDram() ?
dram->bytesPerBurst() :
nvm->bytesPerBurst()) );
assert(pktSizeCheck(mem_pkt, mem_intr));
assert(mem_pkt->readyTime >= curTick());
// log the response
@@ -1031,21 +996,21 @@ MemCtrl::processNextReqEvent()
// Insert into response queue. It will be sent back to the
// requestor at its readyTime
if (respQueue.empty()) {
assert(!respondEvent.scheduled());
schedule(respondEvent, mem_pkt->readyTime);
if (resp_queue.empty()) {
assert(!resp_event.scheduled());
schedule(resp_event, mem_pkt->readyTime);
} else {
assert(respQueue.back()->readyTime <= mem_pkt->readyTime);
assert(respondEvent.scheduled());
assert(resp_queue.back()->readyTime <= mem_pkt->readyTime);
assert(resp_event.scheduled());
}
respQueue.push_back(mem_pkt);
resp_queue.push_back(mem_pkt);
// we have so many writes that we have to transition
// don't transition if the writeRespQueue is full and
// there are no other writes that can issue
if ((totalWriteQueueSize > writeHighThreshold) &&
!(nvm && all_writes_nvm && nvm->writeRespQueueFull())) {
!(nvmWriteBlock(mem_intr))) {
switch_to_writes = true;
}
@@ -1079,7 +1044,7 @@ MemCtrl::processNextReqEvent()
// If we are changing command type, incorporate the minimum
// bus turnaround delay
to_write = chooseNext((*queue),
switched_cmd_type ? minReadToWriteDataGap() : 0);
switched_cmd_type ? minReadToWriteDataGap() : 0, mem_intr);
if (to_write != queue->end()) {
write_found = true;
@@ -1100,13 +1065,13 @@ MemCtrl::processNextReqEvent()
auto mem_pkt = *to_write;
// sanity check
assert(mem_pkt->size <= (mem_pkt->isDram() ?
dram->bytesPerBurst() :
nvm->bytesPerBurst()) );
assert(pktSizeCheck(mem_pkt, mem_intr));
doBurstAccess(mem_pkt);
Tick cmd_at = doBurstAccess(mem_pkt, mem_intr);
DPRINTF(MemCtrl,
"Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_intr));
// log the response
logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
@@ -1131,8 +1096,7 @@ MemCtrl::processNextReqEvent()
if (totalWriteQueueSize == 0 ||
(below_threshold && drainState() != DrainState::Draining) ||
(totalReadQueueSize && writesThisTime >= minWritesPerSwitch) ||
(totalReadQueueSize && nvm && nvm->writeRespQueueFull() &&
all_writes_nvm)) {
(totalReadQueueSize && (nvmWriteBlock(mem_intr)))) {
// turn the bus back around for reads again
busStateNext = MemCtrl::READ;
@@ -1146,12 +1110,8 @@ MemCtrl::processNextReqEvent()
// It is possible that a refresh to another rank kicks things back into
// action before reaching this point.
if (!nextReqEvent.scheduled())
schedule(nextReqEvent, std::max(nextReqTime, curTick()));
schedule(next_req_event, std::max(nextReqTime, curTick()));
// If there is space available and we have writes waiting then let
// them retry. This is done here to ensure that the retry does not
// cause a nextReqEvent to be scheduled before we do so as part of
// the next request processing
if (retryWrReq && totalWriteQueueSize < writeBufferSize) {
retryWrReq = false;
port.sendRetryReq();
@@ -1159,35 +1119,33 @@ MemCtrl::processNextReqEvent()
}
bool
MemCtrl::packetReady(MemPacket* pkt)
MemCtrl::packetReady(MemPacket* pkt, MemInterface* mem_intr)
{
return (pkt->isDram() ?
dram->burstReady(pkt) : nvm->burstReady(pkt));
return mem_intr->burstReady(pkt);
}
Tick
MemCtrl::minReadToWriteDataGap()
{
Tick dram_min = dram ? dram->minReadToWriteDataGap() : MaxTick;
Tick nvm_min = nvm ? nvm->minReadToWriteDataGap() : MaxTick;
return std::min(dram_min, nvm_min);
return dram->minReadToWriteDataGap();
}
Tick
MemCtrl::minWriteToReadDataGap()
{
Tick dram_min = dram ? dram->minWriteToReadDataGap() : MaxTick;
Tick nvm_min = nvm ? nvm->minWriteToReadDataGap() : MaxTick;
return std::min(dram_min, nvm_min);
return dram->minWriteToReadDataGap();
}
Addr
MemCtrl::burstAlign(Addr addr, bool is_dram) const
MemCtrl::burstAlign(Addr addr, MemInterface* mem_intr) const
{
if (is_dram)
return (addr & ~(Addr(dram->bytesPerBurst() - 1)));
else
return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
return (addr & ~(Addr(mem_intr->bytesPerBurst() - 1)));
}
bool
MemCtrl::pktSizeCheck(MemPacket* mem_pkt, MemInterface* mem_intr) const
{
return (mem_pkt->size <= mem_intr->bytesPerBurst());
}
MemCtrl::CtrlStats::CtrlStats(MemCtrl &_ctrl)
@@ -1254,7 +1212,8 @@ MemCtrl::CtrlStats::CtrlStats(MemCtrl &_ctrl)
statistics::units::Byte, statistics::units::Second>::get(),
"Average system write bandwidth in Byte/s"),
ADD_STAT(totGap, statistics::units::Tick::get(), "Total gap between requests"),
ADD_STAT(totGap, statistics::units::Tick::get(),
"Total gap between requests"),
ADD_STAT(avgGap, statistics::units::Rate<
statistics::units::Tick, statistics::units::Count>::get(),
"Average gap between requests"),
@@ -1385,16 +1344,22 @@ MemCtrl::CtrlStats::regStats()
void
MemCtrl::recvFunctional(PacketPtr pkt)
{
if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
bool found = recvFunctionalLogic(pkt, dram);
panic_if(!found, "Can't handle address range for packet %s\n",
pkt->print());
}
bool
MemCtrl::recvFunctionalLogic(PacketPtr pkt, MemInterface* mem_intr)
{
if (mem_intr->getAddrRange().contains(pkt->getAddr())) {
// rely on the abstract memory
dram->functionalAccess(pkt);
} else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
// rely on the abstract memory
nvm->functionalAccess(pkt);
} else {
panic("Can't handle address range for packet %s\n",
pkt->print());
}
mem_intr->functionalAccess(pkt);
return true;
} else {
return false;
}
}
Port &
@@ -1410,12 +1375,10 @@ MemCtrl::getPort(const std::string &if_name, PortID idx)
bool
MemCtrl::allIntfDrained() const
{
// ensure dram is in power down and refresh IDLE states
bool dram_drained = !dram || dram->allRanksDrained();
// No outstanding NVM writes
// All other queues verified as needed with calling logic
bool nvm_drained = !nvm || nvm->allRanksDrained();
return (dram_drained && nvm_drained);
// DRAM: ensure dram is in power down and refresh IDLE states
// NVM: No outstanding NVM writes
// NVM: All other queues verified as needed with calling logic
return dram->allRanksDrained();
}
DrainState
@@ -1436,8 +1399,7 @@ MemCtrl::drain()
schedule(nextReqEvent, curTick());
}
if (dram)
dram->drainRanks();
dram->drainRanks();
return DrainState::Draining;
} else {
@@ -1456,15 +1418,23 @@ MemCtrl::drainResume()
} else if (isTimingMode && !system()->isTimingMode()) {
// if we switch from timing mode, stop the refresh events to
// not cause issues with KVM
if (dram)
dram->suspend();
dram->suspend();
}
// update the mode
isTimingMode = system()->isTimingMode();
}
MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
AddrRangeList
MemCtrl::getAddrRanges()
{
AddrRangeList range;
range.push_back(dram->getAddrRange());
return range;
}
MemCtrl::MemoryPort::
MemoryPort(const std::string& name, MemCtrl& _ctrl)
: QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
ctrl(_ctrl)
{ }
@@ -1472,16 +1442,7 @@ MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
AddrRangeList
MemCtrl::MemoryPort::getAddrRanges() const
{
AddrRangeList ranges;
if (ctrl.dram) {
DPRINTF(DRAM, "Pushing DRAM ranges to port\n");
ranges.push_back(ctrl.dram->getAddrRange());
}
if (ctrl.nvm) {
DPRINTF(NVM, "Pushing NVM ranges to port\n");
ranges.push_back(ctrl.nvm->getAddrRange());
}
return ranges;
return ctrl.getAddrRanges();
}
void

View File

@@ -242,7 +242,7 @@ typedef std::deque<MemPacket*> MemPacketQueue;
*/
class MemCtrl : public qos::MemCtrl
{
private:
protected:
// For now, make use of a queued response port to avoid dealing with
// flow control for the responses being sent back
@@ -293,10 +293,15 @@ class MemCtrl : public qos::MemCtrl
* processRespondEvent is called; no parameters are allowed
* in these methods
*/
void processNextReqEvent();
virtual void processNextReqEvent(MemInterface* mem_intr,
MemPacketQueue& resp_queue,
EventFunctionWrapper& resp_event,
EventFunctionWrapper& next_req_event);
EventFunctionWrapper nextReqEvent;
void processRespondEvent();
virtual void processRespondEvent(MemInterface* mem_intr,
MemPacketQueue& queue,
EventFunctionWrapper& resp_event);
EventFunctionWrapper respondEvent;
/**
@@ -326,11 +331,12 @@ class MemCtrl : public qos::MemCtrl
*
* @param pkt The request packet from the outside world
* @param pkt_count The number of memory bursts the pkt
* @param is_dram Does this packet access DRAM?
* translate to. If pkt size is larger then one full burst,
* then pkt_count is greater than one.
* @param mem_intr The memory interface this pkt will
* eventually go to
* @return if all the read pkts are already serviced by wrQ
*/
void addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram);
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count,
MemInterface* mem_intr);
/**
* Decode the incoming pkt, create a mem_pkt and push to the
@@ -340,19 +346,22 @@ class MemCtrl : public qos::MemCtrl
*
* @param pkt The request packet from the outside world
* @param pkt_count The number of memory bursts the pkt
* @param is_dram Does this packet access DRAM?
* translate to. If pkt size is larger then one full burst,
* then pkt_count is greater than one.
* @param mem_intr The memory interface this pkt will
* eventually go to
*/
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram);
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count,
MemInterface* mem_intr);
/**
* Actually do the burst based on media specific access function.
* Update bus statistics when complete.
*
* @param mem_pkt The memory packet created from the outside world pkt
* @param mem_intr The memory interface to access
* @return Time when the command was issued
*
*/
void doBurstAccess(MemPacket* mem_pkt);
virtual Tick doBurstAccess(MemPacket* mem_pkt, MemInterface* mem_intr);
/**
* When a packet reaches its "readyTime" in the response Q,
@@ -362,29 +371,31 @@ class MemCtrl : public qos::MemCtrl
*
* @param pkt The packet from the outside world
* @param static_latency Static latency to add before sending the packet
* @param mem_intr the memory interface to access
*/
void accessAndRespond(PacketPtr pkt, Tick static_latency);
virtual void accessAndRespond(PacketPtr pkt, Tick static_latency,
MemInterface* mem_intr);
/**
* Determine if there is a packet that can issue.
*
* @param pkt The packet to evaluate
*/
bool packetReady(MemPacket* pkt);
virtual bool packetReady(MemPacket* pkt, MemInterface* mem_intr);
/**
* Calculate the minimum delay used when scheduling a read-to-write
* transision.
* @param return minimum delay
*/
Tick minReadToWriteDataGap();
virtual Tick minReadToWriteDataGap();
/**
* Calculate the minimum delay used when scheduling a write-to-read
* transision.
* @param return minimum delay
*/
Tick minWriteToReadDataGap();
virtual Tick minWriteToReadDataGap();
/**
* The memory schduler/arbiter - picks which request needs to
@@ -395,10 +406,11 @@ class MemCtrl : public qos::MemCtrl
*
* @param queue Queued requests to consider
* @param extra_col_delay Any extra delay due to a read/write switch
* @param mem_intr the memory interface to choose from
* @return an iterator to the selected packet, else queue.end()
*/
MemPacketQueue::iterator chooseNext(MemPacketQueue& queue,
Tick extra_col_delay);
virtual MemPacketQueue::iterator chooseNext(MemPacketQueue& queue,
Tick extra_col_delay, MemInterface* mem_intr);
/**
* For FR-FCFS policy reorder the read/write queue depending on row buffer
@@ -408,8 +420,9 @@ class MemCtrl : public qos::MemCtrl
* @param extra_col_delay Any extra delay due to a read/write switch
* @return an iterator to the selected packet, else queue.end()
*/
MemPacketQueue::iterator chooseNextFRFCFS(MemPacketQueue& queue,
Tick extra_col_delay);
virtual std::pair<MemPacketQueue::iterator, Tick>
chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay,
MemInterface* mem_intr);
/**
* Calculate burst window aligned tick
@@ -428,11 +441,21 @@ class MemCtrl : public qos::MemCtrl
* Burst-align an address.
*
* @param addr The potentially unaligned address
* @param is_dram Does this packet access DRAM?
* @param mem_intr The DRAM interface this pkt belongs to
*
* @return An address aligned to a memory burst
*/
Addr burstAlign(Addr addr, bool is_dram) const;
virtual Addr burstAlign(Addr addr, MemInterface* mem_intr) const;
/**
* Check if mem pkt's size is sane
*
* @param mem_pkt memory packet
* @param mem_intr memory interface
* @return An address aligned to a memory burst
*/
virtual bool pktSizeCheck(MemPacket* mem_pkt,
MemInterface* mem_intr) const;
/**
* The controller's main read and write queues,
@@ -468,14 +491,11 @@ class MemCtrl : public qos::MemCtrl
std::unordered_multiset<Tick> burstTicks;
/**
* Create pointer to interface of the actual dram media when connected
*/
DRAMInterface* const dram;
+ * Create pointer to interface of the actual memory media when connected
+ */
MemInterface* dram;
/**
* Create pointer to interface of the actual nvm media when connected
*/
NVMInterface* const nvm;
virtual AddrRangeList getAddrRanges();
/**
* The following are basic design parameters of the memory
@@ -483,10 +503,10 @@ class MemCtrl : public qos::MemCtrl
* The rowsPerBank is determined based on the capacity, number of
* ranks and banks, the burst size, and the row buffer size.
*/
const uint32_t readBufferSize;
const uint32_t writeBufferSize;
const uint32_t writeHighThreshold;
const uint32_t writeLowThreshold;
uint32_t readBufferSize;
uint32_t writeBufferSize;
uint32_t writeHighThreshold;
uint32_t writeLowThreshold;
const uint32_t minWritesPerSwitch;
uint32_t writesThisTime;
uint32_t readsThisTime;
@@ -611,6 +631,36 @@ class MemCtrl : public qos::MemCtrl
return (is_read ? readQueue : writeQueue);
};
virtual bool respQEmpty()
{
return respQueue.empty();
}
/**
* Checks if the memory interface is already busy
*
* @param mem_intr memory interface to check
* @return a boolean indicating if memory is busy
*/
virtual bool memBusy(MemInterface* mem_intr);
/**
* Will access memory interface and select non-deterministic
* reads to issue
* @param mem_intr memory interface to use
*/
virtual void nonDetermReads(MemInterface* mem_intr);
/**
* Will check if all writes are for nvm interface
* and nvm's write resp queue is full. The generic mem_intr is
* used as the same function can be called for a dram interface,
* in which case dram functions will eventually return false
* @param mem_intr memory interface to use
* @return a boolean showing if nvm is blocked with writes
*/
virtual bool nvmWriteBlock(MemInterface* mem_intr);
/**
* Remove commands that have already issued from burstTicks
*/
@@ -625,7 +675,7 @@ class MemCtrl : public qos::MemCtrl
*
* @return bool flag, set once drain complete
*/
bool allIntfDrained() const;
virtual bool allIntfDrained() const;
DrainState drain() override;
@@ -709,10 +759,13 @@ class MemCtrl : public qos::MemCtrl
protected:
Tick recvAtomic(PacketPtr pkt);
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor);
void recvFunctional(PacketPtr pkt);
bool recvTimingReq(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
virtual Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor);
virtual void recvFunctional(PacketPtr pkt);
virtual bool recvTimingReq(PacketPtr pkt);
bool recvFunctionalLogic(PacketPtr pkt, MemInterface* mem_intr);
Tick recvAtomicLogic(PacketPtr pkt, MemInterface* mem_intr);
};