mem: Make MemCtrl a ClockedObject

Made DRAMCtrl a ClockedObject, with DRAMInterface
defined as an AbstractMemory. The address
ranges are now defined per interface. Currently
the model only includes a DRAMInterface but this
can be expanded for other media types.

The controller object includes a parameter to the
interface, which is setup when gem5 is configured.

Change-Id: I6a368b845d574a713c7196c5671188ca8c1dc5e8
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/28968
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Wendy Elsasser
2020-02-07 18:00:57 -06:00
committed by Jason Lowe-Power
parent 518e79ad2d
commit 4acc419b6f
26 changed files with 1913 additions and 1736 deletions

View File

@@ -40,7 +40,7 @@ import m5.objects
from common import ObjectList
from common import HMC
def create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits, intlv_size,\
def create_mem_intf(intf, r, i, nbr_mem_ctrls, intlv_bits, intlv_size,
xor_low_bit):
"""
Helper function for creating a single memoy controller from the given
@@ -63,32 +63,32 @@ def create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits, intlv_size,\
# Create an instance so we can figure out the address
# mapping and row-buffer size
ctrl = cls()
interface = intf()
# Only do this for DRAMs
if issubclass(cls, m5.objects.DRAMCtrl):
if issubclass(intf, m5.objects.DRAMInterface):
# If the channel bits are appearing after the column
# bits, we need to add the appropriate number of bits
# for the row buffer size
if ctrl.addr_mapping.value == 'RoRaBaChCo':
if interface.addr_mapping.value == 'RoRaBaChCo':
# This computation only really needs to happen
# once, but as we rely on having an instance we
# end up having to repeat it for each and every
# one
rowbuffer_size = ctrl.device_rowbuffer_size.value * \
ctrl.devices_per_rank.value
rowbuffer_size = interface.device_rowbuffer_size.value * \
interface.devices_per_rank.value
intlv_low_bit = int(math.log(rowbuffer_size, 2))
# We got all we need to configure the appropriate address
# range
ctrl.range = m5.objects.AddrRange(r.start, size = r.size(),
interface.range = m5.objects.AddrRange(r.start, size = r.size(),
intlvHighBit = \
intlv_low_bit + intlv_bits - 1,
xorHighBit = xor_high_bit,
intlvBits = intlv_bits,
intlvMatch = i)
return ctrl
return interface
def config_mem(options, system):
"""
@@ -148,10 +148,10 @@ def config_mem(options, system):
if 2 ** intlv_bits != nbr_mem_ctrls:
fatal("Number of memory channels must be a power of 2")
cls = ObjectList.mem_list.get(opt_mem_type)
intf = ObjectList.mem_list.get(opt_mem_type)
mem_ctrls = []
if opt_elastic_trace_en and not issubclass(cls, m5.objects.SimpleMemory):
if opt_elastic_trace_en and not issubclass(intf, m5.objects.SimpleMemory):
fatal("When elastic trace is enabled, configure mem-type as "
"simple-mem.")
@@ -162,36 +162,53 @@ def config_mem(options, system):
intlv_size = max(opt_mem_channels_intlv, system.cache_line_size.value)
# For every range (most systems will only have one), create an
# array of controllers and set their parameters to match their
# address mapping in the case of a DRAM
# array of memory interfaces and set their parameters to match
# their address mapping in the case of a DRAM
for r in system.mem_ranges:
for i in range(nbr_mem_ctrls):
mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits,
# Create the DRAM interface
dram_intf = create_mem_intf(intf, r, i, nbr_mem_ctrls, intlv_bits,
intlv_size, opt_xor_low_bit)
# Set the number of ranks based on the command-line
# options if it was explicitly set
if issubclass(cls, m5.objects.DRAMCtrl) and opt_mem_ranks:
mem_ctrl.ranks_per_channel = opt_mem_ranks
if issubclass(intf, m5.objects.DRAMInterface) and opt_mem_ranks:
dram_intf.ranks_per_channel = opt_mem_ranks
# Enable low-power DRAM states if option is set
if issubclass(cls, m5.objects.DRAMCtrl):
mem_ctrl.enable_dram_powerdown = opt_dram_powerdown
if issubclass(intf, m5.objects.DRAMInterface):
dram_intf.enable_dram_powerdown = opt_dram_powerdown
if opt_elastic_trace_en:
mem_ctrl.latency = '1ns'
dram_intf.latency = '1ns'
print("For elastic trace, over-riding Simple Memory "
"latency to 1ns.")
# Create the controller that will drive the interface
if opt_mem_type == "HMC_2500_1x32":
# The static latency of the vault controllers is estimated
# to be smaller than a full DRAM channel controller
mem_ctrl = m5.objects.DRAMCtrl(min_writes_per_switch = 8,
static_backend_latency = '4ns',
static_frontend_latency = '4ns')
else:
mem_ctrl = m5.objects.DRAMCtrl()
# Hookup the controller to the interface and add to the list
mem_ctrl.dram = dram_intf
mem_ctrls.append(mem_ctrl)
# Create a controller and connect the interfaces to a controller
for i in range(len(mem_ctrls)):
if opt_mem_type == "HMC_2500_1x32":
# Connect the controllers to the membus
mem_ctrls[i].port = xbar[i/4].master
# Set memory device size. There is an independent controller for
# each vault. All vaults are same size.
mem_ctrls[i].dram.device_size = options.hmc_dev_vault_size
else:
# Connect the controllers to the membus
mem_ctrls[i].port = xbar.master
subsystem.mem_ctrls = mem_ctrls
# Connect the controllers to the membus
for i in range(len(subsystem.mem_ctrls)):
if opt_mem_type == "HMC_2500_1x32":
subsystem.mem_ctrls[i].port = xbar[i/4].master
# Set memory device size. There is an independent controller for
# each vault. All vaults are same size.
subsystem.mem_ctrls[i].device_size = options.hmc_dev_vault_size
else:
subsystem.mem_ctrls[i].port = xbar.master

View File

@@ -111,14 +111,19 @@ MemConfig.config_mem(args, system)
# Sanity check for memory controller class.
if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
fatal("This script assumes the memory is a DRAMCtrl subclass")
fatal("This script assumes the controller is a DRAMCtrl subclass")
if not isinstance(system.mem_ctrls[0].dram, m5.objects.DRAMInterface):
fatal("This script assumes the memory is a DRAMInterface subclass")
# There is no point slowing things down by saving any data.
system.mem_ctrls[0].null = True
system.mem_ctrls[0].dram.null = True
# enable DRAM low power states
system.mem_ctrls[0].dram.enable_dram_powerdown = True
# Set the address mapping based on input argument
system.mem_ctrls[0].addr_mapping = args.addr_map
system.mem_ctrls[0].page_policy = args.page_policy
system.mem_ctrls[0].dram.addr_mapping = args.addr_map
system.mem_ctrls[0].dram.page_policy = args.page_policy
# We create a traffic generator state for each param combination we want to
# test. Each traffic generator state is specified in the config file and the
@@ -132,22 +137,22 @@ cfg_file_path = os.path.dirname(__file__) + "/" +cfg_file_name
cfg_file = open(cfg_file_path, 'w')
# Get the number of banks
nbr_banks = int(system.mem_ctrls[0].banks_per_rank.value)
nbr_banks = int(system.mem_ctrls[0].dram.banks_per_rank.value)
# determine the burst size in bytes
burst_size = int((system.mem_ctrls[0].devices_per_rank.value *
system.mem_ctrls[0].device_bus_width.value *
system.mem_ctrls[0].burst_length.value) / 8)
burst_size = int((system.mem_ctrls[0].dram.devices_per_rank.value *
system.mem_ctrls[0].dram.device_bus_width.value *
system.mem_ctrls[0].dram.burst_length.value) / 8)
# next, get the page size in bytes (the rowbuffer size is already in bytes)
page_size = system.mem_ctrls[0].devices_per_rank.value * \
system.mem_ctrls[0].device_rowbuffer_size.value
page_size = system.mem_ctrls[0].dram.devices_per_rank.value * \
system.mem_ctrls[0].dram.device_rowbuffer_size.value
# Inter-request delay should be such that we can hit as many transitions
# to/from low power states as possible to. We provide a min and max itt to the
# traffic generator and it randomises in the range. The parameter is in
# seconds and we need it in ticks (ps).
itt_min = system.mem_ctrls[0].tBURST.value * 1000000000000
itt_min = system.mem_ctrls[0].dram.tBURST.value * 1000000000000
#The itt value when set to (tRAS + tRP + tCK) covers the case where
# a read command is delayed beyond the delay from ACT to PRE_PDN entry of the
@@ -155,9 +160,9 @@ itt_min = system.mem_ctrls[0].tBURST.value * 1000000000000
# between a write and power down entry will be tRCD + tCL + tWR + tRP + tCK.
# As we use this delay as a unit and create multiples of it as bigger delays
# for the sweep, this parameter works for reads, writes and mix of them.
pd_entry_time = (system.mem_ctrls[0].tRAS.value +
system.mem_ctrls[0].tRP.value +
system.mem_ctrls[0].tCK.value) * 1000000000000
pd_entry_time = (system.mem_ctrls[0].dram.tRAS.value +
system.mem_ctrls[0].dram.tRP.value +
system.mem_ctrls[0].dram.tCK.value) * 1000000000000
# We sweep itt max using the multipliers specified by the user.
itt_max_str = args.itt_list.strip().split()

View File

@@ -116,13 +116,15 @@ MemConfig.config_mem(options, system)
# the following assumes that we are using the native DRAM
# controller, check to be sure
if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
fatal("This script assumes the memory is a DRAMCtrl subclass")
fatal("This script assumes the controller is a DRAMCtrl subclass")
if not isinstance(system.mem_ctrls[0].dram, m5.objects.DRAMInterface):
fatal("This script assumes the memory is a DRAMInterface subclass")
# there is no point slowing things down by saving any data
system.mem_ctrls[0].null = True
system.mem_ctrls[0].dram.null = True
# Set the address mapping based on input argument
system.mem_ctrls[0].addr_mapping = options.addr_map
system.mem_ctrls[0].dram.addr_mapping = options.addr_map
# stay in each state for 0.25 ms, long enough to warm things up, and
# short enough to avoid hitting a refresh
@@ -133,21 +135,21 @@ period = 250000000
# the DRAM maximum bandwidth to ensure that it is saturated
# get the number of banks
nbr_banks = system.mem_ctrls[0].banks_per_rank.value
nbr_banks = system.mem_ctrls[0].dram.banks_per_rank.value
# determine the burst length in bytes
burst_size = int((system.mem_ctrls[0].devices_per_rank.value *
system.mem_ctrls[0].device_bus_width.value *
system.mem_ctrls[0].burst_length.value) / 8)
burst_size = int((system.mem_ctrls[0].dram.devices_per_rank.value *
system.mem_ctrls[0].dram.device_bus_width.value *
system.mem_ctrls[0].dram.burst_length.value) / 8)
# next, get the page size in bytes
page_size = system.mem_ctrls[0].devices_per_rank.value * \
system.mem_ctrls[0].device_rowbuffer_size.value
page_size = system.mem_ctrls[0].dram.devices_per_rank.value * \
system.mem_ctrls[0].dram.device_rowbuffer_size.value
# match the maximum bandwidth of the memory, the parameter is in seconds
# and we need it in ticks (ps)
itt = getattr(system.mem_ctrls[0].tBURST_MIN, 'value',
system.mem_ctrls[0].tBURST.value) * 1000000000000
itt = getattr(system.mem_ctrls[0].dram.tBURST_MIN, 'value',
system.mem_ctrls[0].dram.tBURST.value) * 1000000000000
# assume we start at 0
max_addr = mem_range.end

View File

@@ -217,7 +217,7 @@ cfg_file.close()
proto_tester = TrafficGen(config_file = cfg_file_path)
# Set up the system along with a DRAM controller
system = System(physmem = DDR3_1600_8x8())
system = System(physmem = DRAMCtrl(dram = DDR3_1600_8x8()))
system.voltage_domain = VoltageDomain(voltage = '1V')

View File

@@ -77,8 +77,9 @@ if m5.defines.buildEnv['TARGET_ISA'] == "x86":
system.cpu.interrupts[0].int_slave = system.membus.master
# Create a DDR3 memory controller and connect it to the membus
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl = DRAMCtrl()
system.mem_ctrl.dram = DDR3_1600_8x8()
system.mem_ctrl.dram.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master
# Connect the system up to the membus

View File

@@ -132,8 +132,9 @@ if m5.defines.buildEnv['TARGET_ISA'] == "x86":
system.system_port = system.membus.slave
# Create a DDR3 memory controller
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl = DRAMCtrl()
system.mem_ctrl.dram = DDR3_1600_8x8()
system.mem_ctrl.dram.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master
# Create a process for a simple "Hello World" application

View File

@@ -76,8 +76,9 @@ system.cpu.interrupts[0].int_master = system.membus.slave
system.cpu.interrupts[0].int_slave = system.membus.master
# Create a DDR3 memory controller and connect it to the membus
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl = DRAMCtrl()
system.mem_ctrl.dram = DDR3_1600_8x8()
system.mem_ctrl.dram.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master
# Connect the system up to the membus

View File

@@ -74,8 +74,9 @@ system.cpu.interrupts[0].int_master = system.membus.slave
system.cpu.interrupts[0].int_slave = system.membus.master
# Create a DDR3 memory controller and connect it to the membus
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl = DRAMCtrl()
system.mem_ctrl.dram = DDR3_1600_8x8()
system.mem_ctrl.dram.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master
# Connect the system up to the membus

View File

@@ -68,8 +68,9 @@ system.mem_ranges = [AddrRange('512MB')] # Create an address range
system.cpu = [TimingSimpleCPU() for i in range(2)]
# Create a DDR3 memory controller and connect it to the membus
system.mem_ctrl = DDR3_1600_8x8()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl = DRAMCtrl()
system.mem_ctrl.dram = DDR3_1600_8x8()
system.mem_ctrl.dram.range = system.mem_ranges[0]
# create the interrupt controller for the CPU and connect to the membus
for cpu in system.cpu:

View File

@@ -130,15 +130,16 @@ def setup_memory_controllers(system, ruby, dir_cntrls, options):
dir_ranges = []
for r in system.mem_ranges:
mem_type = ObjectList.mem_list.get(options.mem_type)
mem_ctrl = MemConfig.create_mem_ctrl(mem_type, r, index,
dram_intf = MemConfig.create_mem_intf(mem_type, r, index,
options.num_dirs, int(math.log(options.num_dirs, 2)),
intlv_size, options.xor_low_bit)
mem_ctrl = m5.objects.DRAMCtrl(dram = dram_intf)
if options.access_backing_store:
mem_ctrl.kvm_map=False
mem_ctrls.append(mem_ctrl)
dir_ranges.append(mem_ctrl.range)
dir_ranges.append(mem_ctrl.dram.range)
if crossbar != None:
mem_ctrl.port = crossbar.master

File diff suppressed because it is too large Load Diff

1473
src/mem/DRAMInterface.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
# -*- mode:python -*-
#
# Copyright (c) 2018-2019 ARM Limited
# Copyright (c) 2018-2020 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
@@ -47,6 +47,7 @@ SimObject('AbstractMemory.py')
SimObject('AddrMapper.py')
SimObject('Bridge.py')
SimObject('DRAMCtrl.py')
SimObject('DRAMInterface.py')
SimObject('ExternalMaster.py')
SimObject('ExternalSlave.py')
SimObject('MemObject.py')

View File

@@ -47,6 +47,7 @@
#include "debug/DRAMState.hh"
#include "debug/Drain.hh"
#include "debug/QOS.hh"
#include "params/DRAMInterface.hh"
#include "sim/system.hh"
using namespace std;
@@ -58,12 +59,13 @@ DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
retryRdReq(false), retryWrReq(false),
nextReqEvent([this]{ processNextReqEvent(); }, name()),
respondEvent([this]{ processRespondEvent(); }, name()),
readBufferSize(p->read_buffer_size),
writeBufferSize(p->write_buffer_size),
dram(p->dram),
readBufferSize(dram->readBufferSize),
writeBufferSize(dram->writeBufferSize),
writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
minWritesPerSwitch(p->min_writes_per_switch),
writesThisTime(0), readsThisTime(0), tCS(p->tCS),
writesThisTime(0), readsThisTime(0),
memSchedPolicy(p->mem_sched_policy),
frontendLatency(p->static_frontend_latency),
backendLatency(p->static_backend_latency),
@@ -74,37 +76,23 @@ DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
readQueue.resize(p->qos_priorities);
writeQueue.resize(p->qos_priorities);
dram->setCtrl(this);
// perform a basic check of the write thresholds
if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
fatal("Write buffer low threshold %d must be smaller than the "
"high threshold %d\n", p->write_low_thresh_perc,
p->write_high_thresh_perc);
// determine the rows per bank by looking at the total capacity
uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
AbstractMemory::size());
// create a DRAM interface
// will only populate the ranks if DRAM is configured
dram = new DRAMInterface(*this, p, capacity, range);
DPRINTF(DRAM, "Created DRAM interface \n");
}
void
DRAMCtrl::init()
{
MemCtrl::init();
if (!port.isConnected()) {
fatal("DRAMCtrl %s is unconnected!\n", name());
} else {
port.sendRangeChange();
}
dram->init(range);
}
void
@@ -114,8 +102,6 @@ DRAMCtrl::startup()
isTimingMode = system()->isTimingMode();
if (isTimingMode) {
dram->startupRanks();
// shift the bus busy time sufficiently far ahead that we never
// have to worry about negative values when computing the time for
// the next request, this will add an insignificant bubble at the
@@ -133,7 +119,7 @@ DRAMCtrl::recvAtomic(PacketPtr pkt)
"is responding");
// do the actual memory access and turn the packet into a response
access(pkt);
dram->access(pkt);
Tick latency = 0;
if (pkt->hasData()) {
@@ -263,7 +249,7 @@ DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
// address of first DRAM packet is kept unaliged. Subsequent DRAM packets
// are aligned to burst size boundaries. This is to ensure we accurately
// check read packets against packets in write queue.
const Addr base_addr = getCtrlAddr(pkt->getAddr());
const Addr base_addr = dram->getCtrlAddr(pkt->getAddr());
Addr addr = base_addr;
unsigned pktsServicedByWrQ = 0;
BurstHelper* burst_helper = NULL;
@@ -363,7 +349,7 @@ DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
// if the request size is larger than burst size, the pkt is split into
// multiple DRAM packets
const Addr base_addr = getCtrlAddr(pkt->getAddr());
const Addr base_addr = dram->getCtrlAddr(pkt->getAddr());
Addr addr = base_addr;
uint32_t burstSize = dram->bytesPerBurst();
for (int cnt = 0; cnt < pktCount; ++cnt) {
@@ -526,7 +512,7 @@ DRAMCtrl::processRespondEvent()
DRAMPacket* dram_pkt = respQueue.front();
// media specific checks and functions when read response is complete
dram->respondEventDRAM(dram_pkt->rank);
dram->respondEvent(dram_pkt->rank);
if (dram_pkt->burstHelper) {
// it is a split packet
@@ -727,12 +713,12 @@ DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick extra_col_delay)
void
DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
{
DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
DPRINTF(DRAM, "Responding to Address %lld.. \n",pkt->getAddr());
bool needsResponse = pkt->needsResponse();
// do the actual memory access which also turns the packet into a
// response
access(pkt);
dram->access(pkt);
// turn packet around to go back to requester if response expected
if (needsResponse) {
@@ -877,9 +863,9 @@ DRAMInterface::activateBank(Rank& rank_ref, Bank& bank_ref,
// if not, shift to next burst window
Tick act_at;
if (twoCycleActivate)
act_at = ctrl.verifyMultiCmd(act_tick, tAAD);
act_at = ctrl->verifyMultiCmd(act_tick, tAAD);
else
act_at = ctrl.verifySingleCmd(act_tick);
act_at = ctrl->verifySingleCmd(act_tick);
DPRINTF(DRAM, "Activate at tick %d\n", act_at);
@@ -997,7 +983,7 @@ DRAMInterface::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_tick,
// Issuing an explicit PRE command
// Verify that we have command bandwidth to issue the precharge
// if not, shift to next burst window
pre_at = ctrl.verifySingleCmd(pre_tick);
pre_at = ctrl->verifySingleCmd(pre_tick);
// enforce tPPD
for (int i = 0; i < banksPerRank; i++) {
rank_ref.banks[i].preAllowedAt = std::max(pre_at + tPPD,
@@ -1096,9 +1082,9 @@ DRAMInterface::doBurstAccess(DRAMPacket* dram_pkt, Tick next_burst_at,
// verify that we have command bandwidth to issue the burst
// if not, shift to next burst window
if (dataClockSync && ((cmd_at - rank_ref.lastBurstTick) > clkResyncDelay))
cmd_at = ctrl.verifyMultiCmd(cmd_at, tCK);
cmd_at = ctrl->verifyMultiCmd(cmd_at, tCK);
else
cmd_at = ctrl.verifySingleCmd(cmd_at);
cmd_at = ctrl->verifySingleCmd(cmd_at);
// if we are interleaving bursts, ensure that
// 1) we don't double interleave on next burst issue
@@ -1196,7 +1182,7 @@ DRAMInterface::doBurstAccess(DRAMPacket* dram_pkt, Tick next_burst_at,
bool got_more_hits = false;
bool got_bank_conflict = false;
for (uint8_t i = 0; i < ctrl.numPriorities(); ++i) {
for (uint8_t i = 0; i < ctrl->numPriorities(); ++i) {
auto p = queue[i].begin();
// keep on looking until we find a hit or reach the end of the
// queue
@@ -1267,6 +1253,7 @@ DRAMInterface::doBurstAccess(DRAMPacket* dram_pkt, Tick next_burst_at,
// Update latency stats
stats.totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
stats.totQLat += cmd_at - dram_pkt->entryTime;
stats.totBusLat += tBURST;
} else {
// Schedule write done event to decrement event count
// after the readyTime has been reached
@@ -1350,13 +1337,9 @@ DRAMCtrl::doBurstAccess(DRAMPacket* dram_pkt)
// Update latency stats
stats.masterReadTotalLat[dram_pkt->masterId()] +=
dram_pkt->readyTime - dram_pkt->entryTime;
stats.bytesRead += dram->bytesPerBurst();
stats.totBusLat += dram->burstDelay();
stats.masterReadBytes[dram_pkt->masterId()] += dram_pkt->size;
} else {
++writesThisTime;
stats.bytesWritten += dram->bytesPerBurst();
stats.masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size;
stats.masterWriteTotalLat[dram_pkt->masterId()] +=
dram_pkt->readyTime - dram_pkt->entryTime;
@@ -1458,8 +1441,9 @@ DRAMCtrl::processNextReqEvent()
// Figure out which read request goes next
// If we are changing command type, incorporate the minimum
// bus turnaround delay which will be tCS (different rank) case
to_read = chooseNext((*queue), switched_cmd_type ? tCS : 0);
// bus turnaround delay which will be rank to rank delay
to_read = chooseNext((*queue), switched_cmd_type ?
dram->rankDelay() : 0);
if (to_read != queue->end()) {
// candidate read found
@@ -1538,7 +1522,8 @@ DRAMCtrl::processNextReqEvent()
// If we are changing command type, incorporate the minimum
// bus turnaround delay
to_write = chooseNext((*queue),
switched_cmd_type ? std::min(dram->minRdToWr(), tCS) : 0);
switched_cmd_type ? std::min(dram->minRdToWr(),
dram->rankDelay()) : 0);
if (to_write != queue->end()) {
write_found = true;
@@ -1611,11 +1596,8 @@ DRAMCtrl::processNextReqEvent()
}
}
DRAMInterface::DRAMInterface(DRAMCtrl& _ctrl,
const DRAMCtrlParams* _p,
const uint64_t capacity,
const AddrRange range)
: SimObject(_p), ctrl(_ctrl),
DRAMInterface::DRAMInterface(const DRAMInterfaceParams* _p)
: AbstractMemory(_p),
addrMapping(_p->addr_mapping),
burstSize((_p->devices_per_rank * _p->burst_length *
_p->device_bus_width) / 8),
@@ -1630,7 +1612,7 @@ DRAMInterface::DRAMInterface(DRAMCtrl& _ctrl,
bankGroupsPerRank(_p->bank_groups_per_rank),
bankGroupArch(_p->bank_groups_per_rank > 0),
banksPerRank(_p->banks_per_rank), rowsPerBank(0),
tCK(_p->tCK), tCL(_p->tCL), tBURST(_p->tBURST),
tCK(_p->tCK), tCS(_p->tCS), tCL(_p->tCL), tBURST(_p->tBURST),
tBURST_MIN(_p->tBURST_MIN), tBURST_MAX(_p->tBURST_MAX), tRTW(_p->tRTW),
tCCD_L_WR(_p->tCCD_L_WR), tCCD_L(_p->tCCD_L), tRCD(_p->tRCD),
tRP(_p->tRP), tRAS(_p->tRAS), tWR(_p->tWR), tRTP(_p->tRTP),
@@ -1646,13 +1628,15 @@ DRAMInterface::DRAMInterface(DRAMCtrl& _ctrl,
wrToRdDly(tCL + tBURST + _p->tWTR), rdToWrDly(tBURST + tRTW),
wrToRdDlySameBG(tCL + _p->tBURST_MAX + _p->tWTR_L),
rdToWrDlySameBG(tRTW + _p->tBURST_MAX),
rankToRankDly(ctrl.rankDelay() + tBURST),
rankToRankDly(tCS + tBURST),
pageMgmt(_p->page_policy),
maxAccessesPerRow(_p->max_accesses_per_row),
timeStampOffset(0), activeRank(0),
enableDRAMPowerdown(_p->enable_dram_powerdown),
lastStatsResetTick(0),
stats(_ctrl, *this)
stats(*this),
readBufferSize(_p->read_buffer_size),
writeBufferSize(_p->write_buffer_size)
{
fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, "
"must be a power of two\n", burstSize);
@@ -1664,7 +1648,7 @@ DRAMInterface::DRAMInterface(DRAMCtrl& _ctrl,
for (int i = 0; i < ranksPerChannel; i++) {
DPRINTF(DRAM, "Creating DRAM rank %d \n", i);
Rank* rank = new Rank(ctrl, _p, i, *this);
Rank* rank = new Rank(_p, i, *this);
ranks.push_back(rank);
}
@@ -1672,6 +1656,11 @@ DRAMInterface::DRAMInterface(DRAMCtrl& _ctrl,
uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank *
ranksPerChannel;
uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
AbstractMemory::size());
// if actual DRAM size does not match memory capacity in system warn!
if (deviceCapacity != capacity / (1024 * 1024))
warn("DRAM device capacity (%d Mbytes) does not match the "
@@ -1726,8 +1715,10 @@ DRAMInterface::DRAMInterface(DRAMCtrl& _ctrl,
}
void
DRAMInterface::init(AddrRange range)
DRAMInterface::init()
{
AbstractMemory::init();
// a bit of sanity checks on the interleaving, save it for here to
// ensure that the system pointer is initialised
if (range.interleaved()) {
@@ -1749,7 +1740,7 @@ DRAMInterface::init(AddrRange range)
// channel striping has to be done at a granularity that
// is equal or larger to a cache line
if (ctrl.system()->cacheLineSize() > range.granularity()) {
if (system()->cacheLineSize() > range.granularity()) {
fatal("Channel interleaving of %s must be at least as large "
"as the cache line size\n", name());
}
@@ -1766,10 +1757,12 @@ DRAMInterface::init(AddrRange range)
}
void
DRAMInterface::startupRanks()
DRAMInterface::startup()
{
// timestamp offset should be in clock cycles for DRAMPower
timeStampOffset = divCeil(curTick(), tCK);
if (system()->isTimingMode()) {
// timestamp offset should be in clock cycles for DRAMPower
timeStampOffset = divCeil(curTick(), tCK);
}
for (auto r : ranks) {
r->startup(curTick() + tREFI - tRP);
@@ -1815,7 +1808,7 @@ DRAMInterface::isBusy()
}
void
DRAMInterface::respondEventDRAM(uint8_t rank)
DRAMInterface::respondEvent(uint8_t rank)
{
Rank& rank_ref = *ranks[rank];
@@ -1956,7 +1949,7 @@ DRAMInterface::minBankPrep(const DRAMPacketQueue& queue,
std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
// When is the earliest the R/W burst can issue?
const Tick col_allowed_at = ctrl.inReadBusState(false) ?
const Tick col_allowed_at = ctrl->inReadBusState(false) ?
ranks[i]->banks[j].rdAllowedAt :
ranks[i]->banks[j].wrAllowedAt;
Tick col_at = std::max(col_allowed_at, act_at + tRCD);
@@ -1996,9 +1989,15 @@ DRAMInterface::minBankPrep(const DRAMPacketQueue& queue,
return make_pair(bank_mask, hidden_bank_prep);
}
DRAMInterface::Rank::Rank(DRAMCtrl& _ctrl, const DRAMCtrlParams* _p, int _rank,
DRAMInterface& _dram)
: EventManager(&_ctrl), ctrl(_ctrl), dram(_dram),
DRAMInterface*
DRAMInterfaceParams::create()
{
return new DRAMInterface(this);
}
DRAMInterface::Rank::Rank(const DRAMInterfaceParams* _p,
int _rank, DRAMInterface& _dram)
: EventManager(&_dram), dram(_dram),
pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
refreshState(REF_IDLE), inLowPowerState(false), rank(_rank),
@@ -2011,7 +2010,7 @@ DRAMInterface::Rank::Rank(DRAMCtrl& _ctrl, const DRAMCtrlParams* _p, int _rank,
refreshEvent([this]{ processRefreshEvent(); }, name()),
powerEvent([this]{ processPowerEvent(); }, name()),
wakeUpEvent([this]{ processWakeUpEvent(); }, name()),
stats(_ctrl, *this)
stats(_dram, *this)
{
for (int b = 0; b < _p->banks_per_rank; b++) {
banks[b].bank = b;
@@ -2062,8 +2061,10 @@ bool
DRAMInterface::Rank::isQueueEmpty() const
{
// check commmands in Q based on current bus direction
bool no_queued_cmds = (ctrl.inReadBusState(true) && (readEntries == 0))
|| (ctrl.inWriteBusState(true) && (writeEntries == 0));
bool no_queued_cmds = (dram.ctrl->inReadBusState(true) &&
(readEntries == 0))
|| (dram.ctrl->inWriteBusState(true) &&
(writeEntries == 0));
return no_queued_cmds;
}
@@ -2187,7 +2188,7 @@ DRAMInterface::Rank::processRefreshEvent()
// if a request is at the moment being handled and this request is
// accessing the current rank then wait for it to finish
if ((rank == dram.activeRank)
&& (ctrl.requestEventScheduled())) {
&& (dram.ctrl->requestEventScheduled())) {
// hand control over to the request loop until it is
// evaluated next
DPRINTF(DRAM, "Refresh awaiting draining\n");
@@ -2262,7 +2263,7 @@ DRAMInterface::Rank::processRefreshEvent()
// or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
// should have outstanding precharge or read response event
assert(prechargeEvent.scheduled() ||
ctrl.respondEventScheduled());
dram.ctrl->respondEventScheduled());
// will start refresh when pwrState transitions to IDLE
}
@@ -2322,8 +2323,8 @@ DRAMInterface::Rank::processRefreshEvent()
assert(!powerEvent.scheduled());
if ((ctrl.drainState() == DrainState::Draining) ||
(ctrl.drainState() == DrainState::Drained)) {
if ((dram.ctrl->drainState() == DrainState::Draining) ||
(dram.ctrl->drainState() == DrainState::Drained)) {
// if draining, do not re-enter low-power mode.
// simply go to IDLE and wait
schedulePowerEvent(PWR_IDLE, curTick());
@@ -2548,10 +2549,10 @@ DRAMInterface::Rank::processPowerEvent()
}
// completed refresh event, ensure next request is scheduled
if (!ctrl.requestEventScheduled()) {
if (!dram.ctrl->requestEventScheduled()) {
DPRINTF(DRAM, "Scheduling next request after refreshing"
" rank %d\n", rank);
ctrl.restartScheduler(curTick());
dram.ctrl->restartScheduler(curTick());
}
}
@@ -2610,8 +2611,8 @@ DRAMInterface::Rank::processPowerEvent()
// bypass auto-refresh and go straight to SREF, where memory
// will issue refresh immediately upon entry
if (pwrStatePostRefresh == PWR_PRE_PDN && isQueueEmpty() &&
(ctrl.drainState() != DrainState::Draining) &&
(ctrl.drainState() != DrainState::Drained) &&
(dram.ctrl->drainState() != DrainState::Draining) &&
(dram.ctrl->drainState() != DrainState::Drained) &&
dram.enableDRAMPowerdown) {
DPRINTF(DRAMState, "Rank %d bypassing refresh and transitioning "
"to self refresh at %11u tick\n", rank, curTick());
@@ -2712,7 +2713,7 @@ DRAMInterface::Rank::resetStats() {
bool
DRAMInterface::Rank::forceSelfRefreshExit() const {
return (readEntries != 0) ||
(ctrl.inWriteBusState(true) && (writeEntries != 0));
(dram.ctrl->inWriteBusState(true) && (writeEntries != 0));
}
DRAMCtrl::CtrlStats::CtrlStats(DRAMCtrl &_ctrl)
@@ -2723,15 +2724,15 @@ DRAMCtrl::CtrlStats::CtrlStats(DRAMCtrl &_ctrl)
ADD_STAT(writeReqs, "Number of write requests accepted"),
ADD_STAT(readBursts,
"Number of DRAM read bursts, "
"Number of controller read bursts, "
"including those serviced by the write queue"),
ADD_STAT(writeBursts,
"Number of DRAM write bursts, "
"Number of controller write bursts, "
"including those merged in the write queue"),
ADD_STAT(servicedByWrQ,
"Number of DRAM read bursts serviced by the write queue"),
"Number of controller read bursts serviced by the write queue"),
ADD_STAT(mergedWrBursts,
"Number of DRAM write bursts merged with an existing one"),
"Number of controller write bursts merged with an existing one"),
ADD_STAT(neitherReadNorWriteReqs,
"Number of requests that are neither read nor write"),
@@ -2739,9 +2740,6 @@ DRAMCtrl::CtrlStats::CtrlStats(DRAMCtrl &_ctrl)
ADD_STAT(avgRdQLen, "Average read queue length when enqueuing"),
ADD_STAT(avgWrQLen, "Average write queue length when enqueuing"),
ADD_STAT(totBusLat, "Total ticks spent in databus transfers"),
ADD_STAT(avgBusLat, "Average bus latency per DRAM burst"),
ADD_STAT(numRdRetry, "Number of times read queue was full causing retry"),
ADD_STAT(numWrRetry, "Number of times write queue was full causing retry"),
@@ -2756,22 +2754,13 @@ DRAMCtrl::CtrlStats::CtrlStats(DRAMCtrl &_ctrl)
ADD_STAT(wrPerTurnAround,
"Writes before turning the bus around for reads"),
ADD_STAT(bytesRead, "Total number of bytes read from memory"),
ADD_STAT(bytesReadWrQ, "Total number of bytes read from write queue"),
ADD_STAT(bytesWritten, "Total number of bytes written to DRAM"),
ADD_STAT(bytesReadSys, "Total read bytes from the system interface side"),
ADD_STAT(bytesWrittenSys,
"Total written bytes from the system interface side"),
ADD_STAT(avgRdBW, "Average DRAM read bandwidth in MiByte/s"),
ADD_STAT(avgWrBW, "Average achieved write bandwidth in MiByte/s"),
ADD_STAT(avgRdBWSys, "Average system read bandwidth in MiByte/s"),
ADD_STAT(avgWrBWSys, "Average system write bandwidth in MiByte/s"),
ADD_STAT(peakBW, "Theoretical peak bandwidth in MiByte/s"),
ADD_STAT(busUtil, "Data bus utilization in percentage"),
ADD_STAT(busUtilRead, "Data bus utilization in percentage for reads"),
ADD_STAT(busUtilWrite, "Data bus utilization in percentage for writes"),
ADD_STAT(totGap, "Total gap between requests"),
ADD_STAT(avgGap, "Average gap between requests"),
@@ -2803,12 +2792,11 @@ DRAMCtrl::CtrlStats::regStats()
{
using namespace Stats;
assert(ctrl._system);
const auto max_masters = ctrl._system->maxMasters();
assert(ctrl.system());
const auto max_masters = ctrl.system()->maxMasters();
avgRdQLen.precision(2);
avgWrQLen.precision(2);
avgBusLat.precision(2);
readPktSize.init(ceilLog2(ctrl.dram->bytesPerBurst()) + 1);
writePktSize.init(ceilLog2(ctrl.dram->bytesPerBurst()) + 1);
@@ -2823,14 +2811,9 @@ DRAMCtrl::CtrlStats::regStats()
.init(ctrl.writeBufferSize)
.flags(nozero);
avgRdBW.precision(2);
avgWrBW.precision(2);
avgRdBWSys.precision(2);
avgWrBWSys.precision(2);
peakBW.precision(2);
busUtil.precision(2);
avgGap.precision(2);
busUtilWrite.precision(2);
// per-master bytes read and written to memory
masterReadBytes
@@ -2862,9 +2845,6 @@ DRAMCtrl::CtrlStats::regStats()
.flags(nonan)
.precision(2);
busUtilRead
.precision(2);
masterWriteRate
.flags(nozero | nonan)
.precision(12);
@@ -2878,7 +2858,7 @@ DRAMCtrl::CtrlStats::regStats()
.precision(2);
for (int i = 0; i < max_masters; i++) {
const std::string master = ctrl._system->getMasterName(i);
const std::string master = ctrl.system()->getMasterName(i);
masterReadBytes.subname(i, master);
masterReadRate.subname(i, master);
masterWriteBytes.subname(i, master);
@@ -2892,22 +2872,11 @@ DRAMCtrl::CtrlStats::regStats()
}
// Formula stats
avgBusLat = totBusLat / (readBursts - servicedByWrQ);
avgRdBW = (bytesRead / 1000000) / simSeconds;
avgWrBW = (bytesWritten / 1000000) / simSeconds;
avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
peakBW = (SimClock::Frequency / ctrl.dram->burstDataDelay()) *
ctrl.dram->bytesPerBurst() / 1000000;
busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
avgGap = totGap / (readReqs + writeReqs);
busUtilRead = avgRdBW / peakBW * 100;
busUtilWrite = avgWrBW / peakBW * 100;
masterReadRate = masterReadBytes / simSeconds;
masterWriteRate = masterWriteBytes / simSeconds;
masterReadAvgLat = masterReadTotalLat / masterReadAccesses;
@@ -2920,8 +2889,8 @@ DRAMInterface::DRAMStats::resetStats()
dram.lastStatsResetTick = curTick();
}
DRAMInterface::DRAMStats::DRAMStats(DRAMCtrl &_ctrl, DRAMInterface &_dram)
: Stats::Group(&_ctrl, csprintf("dram").c_str()),
DRAMInterface::DRAMStats::DRAMStats(DRAMInterface &_dram)
: Stats::Group(&_dram),
dram(_dram),
ADD_STAT(readBursts, "Number of DRAM read bursts"),
@@ -2931,10 +2900,13 @@ DRAMInterface::DRAMStats::DRAMStats(DRAMCtrl &_ctrl, DRAMInterface &_dram)
ADD_STAT(perBankWrBursts, "Per bank write bursts"),
ADD_STAT(totQLat, "Total ticks spent queuing"),
ADD_STAT(totBusLat, "Total ticks spent in databus transfers"),
ADD_STAT(totMemAccLat,
"Total ticks spent from burst creation until serviced "
"by the DRAM"),
ADD_STAT(avgQLat, "Average queueing delay per DRAM burst"),
ADD_STAT(avgBusLat, "Average bus latency per DRAM burst"),
ADD_STAT(avgMemAccLat, "Average memory access latency per DRAM burst"),
ADD_STAT(readRowHits, "Number of row buffer hits during reads"),
@@ -2947,6 +2919,12 @@ DRAMInterface::DRAMStats::DRAMStats(DRAMCtrl &_ctrl, DRAMInterface &_dram)
ADD_STAT(bytesWritten, "Total number of bytes written to DRAM"),
ADD_STAT(avgRdBW, "Average DRAM read bandwidth in MiBytes/s"),
ADD_STAT(avgWrBW, "Average DRAM write bandwidth in MiBytes/s"),
ADD_STAT(peakBW, "Theoretical peak bandwidth in MiByte/s"),
ADD_STAT(busUtil, "Data bus utilization in percentage"),
ADD_STAT(busUtilRead, "Data bus utilization in percentage for reads"),
ADD_STAT(busUtilWrite, "Data bus utilization in percentage for writes"),
ADD_STAT(pageHitRate, "Row buffer hit rate, read and write combined")
{
@@ -2958,6 +2936,7 @@ DRAMInterface::DRAMStats::regStats()
using namespace Stats;
avgQLat.precision(2);
avgBusLat.precision(2);
avgMemAccLat.precision(2);
readRowHitRate.precision(2);
@@ -2971,10 +2950,16 @@ DRAMInterface::DRAMStats::regStats()
dram.maxAccessesPerRow : dram.rowBufferSize)
.flags(nozero);
peakBW.precision(2);
busUtil.precision(2);
busUtilWrite.precision(2);
busUtilRead.precision(2);
pageHitRate.precision(2);
// Formula stats
avgQLat = totQLat / readBursts;
avgBusLat = totBusLat / readBursts;
avgMemAccLat = totMemAccLat / readBursts;
readRowHitRate = (readRowHits / readBursts) * 100;
@@ -2982,13 +2967,19 @@ DRAMInterface::DRAMStats::regStats()
avgRdBW = (bytesRead / 1000000) / simSeconds;
avgWrBW = (bytesWritten / 1000000) / simSeconds;
peakBW = (SimClock::Frequency / dram.burstDataDelay()) *
dram.bytesPerBurst() / 1000000;
busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
busUtilRead = avgRdBW / peakBW * 100;
busUtilWrite = avgWrBW / peakBW * 100;
pageHitRate = (writeRowHits + readRowHits) /
(writeBursts + readBursts) * 100;
}
DRAMInterface::RankStats::RankStats(DRAMCtrl &_ctrl, Rank &_rank)
: Stats::Group(&_ctrl, csprintf("dram_rank%d", _rank.rank).c_str()),
DRAMInterface::RankStats::RankStats(DRAMInterface &_dram, Rank &_rank)
: Stats::Group(&_dram, csprintf("rank%d", _rank.rank).c_str()),
rank(_rank),
ADD_STAT(actEnergy, "Energy for activate commands per rank (pJ)"),
@@ -3047,7 +3038,7 @@ void
DRAMCtrl::recvFunctional(PacketPtr pkt)
{
// rely on the abstract memory
functionalAccess(pkt);
dram->functionalAccess(pkt);
}
Port &
@@ -3093,6 +3084,7 @@ DRAMCtrl::drainResume()
// if we switched to timing mode, kick things into action,
// and behave as if we restored from a checkpoint
startup();
dram->startup();
} else if (isTimingMode && !system()->isTimingMode()) {
// if we switch from timing mode, stop the refresh events to
// not cause issues with KVM
@@ -3112,7 +3104,7 @@ AddrRangeList
DRAMCtrl::MemoryPort::getAddrRanges() const
{
AddrRangeList ranges;
ranges.push_back(ctrl.getAddrRange());
ranges.push_back(ctrl.dram->getAddrRange());
return ranges;
}

View File

@@ -55,12 +55,15 @@
#include "enums/AddrMap.hh"
#include "enums/MemSched.hh"
#include "enums/PageManage.hh"
#include "mem/abstract_mem.hh"
#include "mem/drampower.hh"
#include "mem/qos/mem_ctrl.hh"
#include "mem/qport.hh"
#include "params/DRAMCtrl.hh"
#include "sim/eventq.hh"
class DRAMInterfaceParams;
/**
* A basic class to track the bank state, i.e. what row is
* currently open (if any), when is the bank free to accept a new
@@ -242,7 +245,7 @@ typedef std::deque<DRAMPacket*> DRAMPacketQueue;
* The DRAMInterface includes a class for individual ranks
* and per rank functions.
*/
class DRAMInterface : public SimObject
class DRAMInterface : public AbstractMemory
{
private:
/**
@@ -342,7 +345,7 @@ class DRAMInterface : public SimObject
class Rank;
struct RankStats : public Stats::Group
{
RankStats(DRAMCtrl &ctrl, Rank &rank);
RankStats(DRAMInterface &dram, Rank &rank);
void regStats() override;
void resetStats() override;
@@ -408,13 +411,6 @@ class DRAMInterface : public SimObject
*/
class Rank : public EventManager
{
protected:
/**
* A reference to the parent DRAMCtrl instance
*/
DRAMCtrl& ctrl;
private:
/**
@@ -534,10 +530,10 @@ class DRAMInterface : public SimObject
*/
Tick lastBurstTick;
Rank(DRAMCtrl& _ctrl, const DRAMCtrlParams* _p, int _rank,
Rank(const DRAMInterfaceParams* _p, int _rank,
DRAMInterface& _dram);
const std::string name() const { return csprintf("dram_%d", rank); }
const std::string name() const { return csprintf("%d", rank); }
/**
* Kick off accounting for power and refresh states and
@@ -659,15 +655,16 @@ class DRAMInterface : public SimObject
* @param next Memory Command
* @return true if timeStamp of Command 1 < timeStamp of Command 2
*/
static bool sortTime(const Command& cmd, const Command& cmd_next)
static bool
sortTime(const Command& cmd, const Command& cmd_next)
{
return cmd.timeStamp < cmd_next.timeStamp;
};
}
/**
* A reference to the parent DRAMCtrl instance
* A pointer to the parent DRAMCtrl instance
*/
DRAMCtrl& ctrl;
DRAMCtrl* ctrl;
/**
* Memory controller configuration initialized based on parameter
@@ -698,6 +695,7 @@ class DRAMInterface : public SimObject
* DRAM timing requirements
*/
const Tick M5_CLASS_VAR_USED tCK;
const Tick tCS;
const Tick tCL;
const Tick tBURST;
const Tick tBURST_MIN;
@@ -781,7 +779,7 @@ class DRAMInterface : public SimObject
struct DRAMStats : public Stats::Group
{
DRAMStats(DRAMCtrl &ctrl, DRAMInterface &dram);
DRAMStats(DRAMInterface &dram);
void regStats() override;
void resetStats() override;
@@ -798,10 +796,12 @@ class DRAMInterface : public SimObject
// Latencies summed over all requests
Stats::Scalar totQLat;
Stats::Scalar totBusLat;
Stats::Scalar totMemAccLat;
// Average latencies per request
Stats::Formula avgQLat;
Stats::Formula avgBusLat;
Stats::Formula avgMemAccLat;
// Row hit count and rate
@@ -817,6 +817,11 @@ class DRAMInterface : public SimObject
// Average bandwidth
Stats::Formula avgRdBW;
Stats::Formula avgWrBW;
Stats::Formula peakBW;
// bus utilization
Stats::Formula busUtil;
Stats::Formula busUtilRead;
Stats::Formula busUtilWrite;
Stats::Formula pageHitRate;
};
@@ -828,16 +833,28 @@ class DRAMInterface : public SimObject
std::vector<Rank*> ranks;
public:
/**
* Buffer sizes for read and write queues in the controller
* These are passed to the controller on instantiation
* Defining them here allows for buffers to be resized based
* on memory type / configuration.
*/
const uint32_t readBufferSize;
const uint32_t writeBufferSize;
/** Setting a pointer to the controller */
void setCtrl(DRAMCtrl* _ctrl) { ctrl = _ctrl; }
/**
* Initialize the DRAM interface and verify parameters
* @param range is the address range for this interface
*/
void init(AddrRange range);
void init() override;
/**
* Iterate through dram ranks and instantiate per rank startup routine
*/
void startupRanks();
void startup() override;
/**
* Iterate through dram ranks to exit self-refresh in order to drain
@@ -860,16 +877,27 @@ class DRAMInterface : public SimObject
*/
void suspend();
/**
* Get an address in a dense range which starts from 0. The input
* address is the physical address of the request in an address
* space that contains other SimObjects apart from this
* controller.
*
* @param addr The intput address which should be in the addrRange
* @return An address in the continues range [0, max)
*/
Addr getCtrlAddr(Addr addr) { return range.getOffset(addr); }
/**
* @return number of bytes in a burst for this interface
*/
uint32_t bytesPerBurst() const { return burstSize; };
uint32_t bytesPerBurst() const { return burstSize; }
/**
*
* @return number of ranks per channel for this interface
*/
uint32_t numRanks() const { return ranksPerChannel; };
uint32_t numRanks() const { return ranksPerChannel; }
/*
* @return time to send a burst of data
@@ -879,7 +907,8 @@ class DRAMInterface : public SimObject
/*
* @return time to send a burst of data without gaps
*/
Tick burstDataDelay() const
Tick
burstDataDelay() const
{
return (burstInterleave ? tBURST_MAX / 2 : tBURST);
}
@@ -893,7 +922,14 @@ class DRAMInterface : public SimObject
*
* @return additional bus turnaround required for read-to-write
*/
Tick minRdToWr() const { return tRTW; };
Tick minRdToWr() const { return tRTW; }
/**
* Determine the required delay for an access to a different rank
*
* @return required rank to rank delay
*/
Tick rankDelay() const { return tCS; }
/*
* Function to calulate RAS cycle time for use within and
@@ -957,7 +993,8 @@ class DRAMInterface : public SimObject
* This requires the DRAM to be in the
* REF IDLE state
*/
bool burstReady(uint8_t rank) const
bool
burstReady(uint8_t rank) const
{
return ranks[rank]->inRefIdleState();
}
@@ -979,7 +1016,7 @@ class DRAMInterface : public SimObject
*
* @param rank Specifies rank associated with read burst
*/
void respondEventDRAM(uint8_t rank);
void respondEvent(uint8_t rank);
/**
* Check the refresh state to determine if refresh needs
@@ -989,8 +1026,7 @@ class DRAMInterface : public SimObject
*/
void checkRefreshState(uint8_t rank);
DRAMInterface(DRAMCtrl& _ctrl, const DRAMCtrlParams* _p,
uint64_t capacity, AddrRange range);
DRAMInterface(const DRAMInterfaceParams* _p);
};
/**
@@ -1140,20 +1176,6 @@ class DRAMCtrl : public QoS::MemCtrl
*/
void accessAndRespond(PacketPtr pkt, Tick static_latency);
/**
* Get an address in a dense range which starts from 0. The input
* address is the physical address of the request in an address
* space that contains other SimObjects apart from this
* controller.
*
* @param addr The intput address which should be in the addrRange
* @return An address in the continues range [0, max)
*/
Addr getCtrlAddr(Addr addr)
{
return range.getOffset(addr);
}
/**
* The memory schduler/arbiter - picks which request needs to
* go next, based on the specified policy such as FCFS or FR-FCFS
@@ -1236,6 +1258,11 @@ class DRAMCtrl : public QoS::MemCtrl
*/
std::unordered_multiset<Tick> burstTicks;
/**
* Create pointer to interface of the actual dram media
*/
DRAMInterface* const dram;
/**
* The following are basic design parameters of the memory
* controller, and are initialized based on parameter values.
@@ -1250,12 +1277,6 @@ class DRAMCtrl : public QoS::MemCtrl
uint32_t writesThisTime;
uint32_t readsThisTime;
/**
* Basic memory timing parameters initialized based on parameter
* values. These will be used across memory interfaces.
*/
const Tick tCS;
/**
* Memory controller configuration initialized based on parameter
* values.
@@ -1310,10 +1331,6 @@ class DRAMCtrl : public QoS::MemCtrl
// Average queue lengths
Stats::Average avgRdQLen;
Stats::Average avgWrQLen;
// Latencies summed over all requests
Stats::Scalar totBusLat;
// Average latencies per request
Stats::Formula avgBusLat;
Stats::Scalar numRdRetry;
Stats::Scalar numWrRetry;
@@ -1324,21 +1341,12 @@ class DRAMCtrl : public QoS::MemCtrl
Stats::Histogram rdPerTurnAround;
Stats::Histogram wrPerTurnAround;
Stats::Scalar bytesRead;
Stats::Scalar bytesReadWrQ;
Stats::Scalar bytesWritten;
Stats::Scalar bytesReadSys;
Stats::Scalar bytesWrittenSys;
// Average bandwidth
Stats::Formula avgRdBW;
Stats::Formula avgWrBW;
Stats::Formula avgRdBWSys;
Stats::Formula avgWrBWSys;
Stats::Formula peakBW;
// bus utilization
Stats::Formula busUtil;
Stats::Formula busUtilRead;
Stats::Formula busUtilWrite;
Stats::Scalar totGap;
Stats::Formula avgGap;
@@ -1366,11 +1374,6 @@ class DRAMCtrl : public QoS::MemCtrl
CtrlStats stats;
/**
* Create pointer to interfasce to the actual media
*/
DRAMInterface* dram;
/**
* Upstream caches need this packet until true is returned, so
* hold it for deletion until a subsequent call
@@ -1448,13 +1451,6 @@ class DRAMCtrl : public QoS::MemCtrl
*/
void restartScheduler(Tick tick) { schedule(nextReqEvent, tick); }
/**
* Determine the required delay for an access to a different rank
*
* @return required rank to rank delay
*/
Tick rankDelay() const { return tCS; }
/**
* Check the current direction of the memory channel
*

View File

@@ -40,13 +40,13 @@
#include "base/intmath.hh"
#include "sim/core.hh"
DRAMPower::DRAMPower(const DRAMCtrlParams* p, bool include_io) :
DRAMPower::DRAMPower(const DRAMInterfaceParams* p, bool include_io) :
powerlib(libDRAMPower(getMemSpec(p), include_io))
{
}
Data::MemArchitectureSpec
DRAMPower::getArchParams(const DRAMCtrlParams* p)
DRAMPower::getArchParams(const DRAMInterfaceParams* p)
{
Data::MemArchitectureSpec archSpec;
archSpec.burstLength = p->burst_length;
@@ -68,7 +68,7 @@ DRAMPower::getArchParams(const DRAMCtrlParams* p)
}
Data::MemTimingSpec
DRAMPower::getTimingParams(const DRAMCtrlParams* p)
DRAMPower::getTimingParams(const DRAMInterfaceParams* p)
{
// Set the values that are used for power calculations and ignore
// the ones only used by the controller functionality in DRAMPower
@@ -100,7 +100,7 @@ DRAMPower::getTimingParams(const DRAMCtrlParams* p)
}
Data::MemPowerSpec
DRAMPower::getPowerParams(const DRAMCtrlParams* p)
DRAMPower::getPowerParams(const DRAMInterfaceParams* p)
{
// All DRAMPower currents are in mA
Data::MemPowerSpec powerSpec;
@@ -132,7 +132,7 @@ DRAMPower::getPowerParams(const DRAMCtrlParams* p)
}
Data::MemorySpecification
DRAMPower::getMemSpec(const DRAMCtrlParams* p)
DRAMPower::getMemSpec(const DRAMInterfaceParams* p)
{
Data::MemorySpecification memSpec;
memSpec.memArchSpec = getArchParams(p);
@@ -142,7 +142,18 @@ DRAMPower::getMemSpec(const DRAMCtrlParams* p)
}
bool
DRAMPower::hasTwoVDD(const DRAMCtrlParams* p)
DRAMPower::hasTwoVDD(const DRAMInterfaceParams* p)
{
return p->VDD2 == 0 ? false : true;
}
uint8_t
DRAMPower::getDataRate(const DRAMInterfaceParams* p)
{
uint32_t burst_cycles = divCeil(p->tBURST_MAX, p->tCK);
uint8_t data_rate = p->burst_length / burst_cycles;
// 4 for GDDR5
if (data_rate != 1 && data_rate != 2 && data_rate != 4 && data_rate != 8)
fatal("Got unexpected data rate %d, should be 1 or 2 or 4 or 8\n");
return data_rate;
}

View File

@@ -44,7 +44,7 @@
#define __MEM_DRAM_POWER_HH__
#include "libdrampower/LibDRAMPower.h"
#include "params/DRAMCtrl.hh"
#include "params/DRAMInterface.hh"
/**
* DRAMPower is a standalone tool which calculates the power consumed by a
@@ -57,38 +57,44 @@ class DRAMPower
/**
* Transform the architechture parameters defined in
* DRAMCtrlParams to the memSpec of DRAMPower
* DRAMInterfaceParams to the memSpec of DRAMPower
*/
static Data::MemArchitectureSpec getArchParams(const DRAMCtrlParams* p);
static Data::MemArchitectureSpec getArchParams(
const DRAMInterfaceParams* p);
/**
* Transforms the timing parameters defined in DRAMCtrlParams to
* Transforms the timing parameters defined in DRAMInterfaceParams to
* the memSpec of DRAMPower
*/
static Data::MemTimingSpec getTimingParams(const DRAMCtrlParams* p);
static Data::MemTimingSpec getTimingParams(const DRAMInterfaceParams* p);
/**
* Transforms the power and current parameters defined in
* DRAMCtrlParam to the memSpec of DRAMPower
* DRAMInterfaceParams to the memSpec of DRAMPower
*/
static Data::MemPowerSpec getPowerParams(const DRAMCtrlParams* p);
static Data::MemPowerSpec getPowerParams(const DRAMInterfaceParams* p);
/**
* Determine data rate, either one or two.
*/
static uint8_t getDataRate(const DRAMInterfaceParams* p);
/**
* Determine if DRAM has two voltage domains (or one)
*/
static bool hasTwoVDD(const DRAMCtrlParams* p);
static bool hasTwoVDD(const DRAMInterfaceParams* p);
/**
* Return an instance of MemSpec based on the DRAMCtrlParams
* Return an instance of MemSpec based on the DRAMInterfaceParams
*/
static Data::MemorySpecification getMemSpec(const DRAMCtrlParams* p);
static Data::MemorySpecification getMemSpec(const DRAMInterfaceParams* p);
public:
// Instance of DRAMPower Library
libDRAMPower powerlib;
DRAMPower(const DRAMCtrlParams* p, bool include_io);
DRAMPower(const DRAMInterfaceParams* p, bool include_io);
};

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2018 ARM Limited
# Copyright (c) 2018-2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
@@ -34,18 +34,21 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.objects.AbstractMemory import AbstractMemory
from m5.proxy import *
from m5.objects.ClockedObject import ClockedObject
from m5.objects.QoSTurnaround import *
# QoS Queue Selection policy used to select packets among same-QoS queues
class QoSQPolicy(Enum): vals = ["fifo", "lifo", "lrg"]
class QoSMemCtrl(AbstractMemory):
class QoSMemCtrl(ClockedObject):
type = 'QoSMemCtrl'
cxx_header = "mem/qos/mem_ctrl.hh"
cxx_class = 'QoS::MemCtrl'
abstract = True
system = Param.System(Parent.any, "System that the controller belongs to.")
##### QoS support parameters ####
# Number of priorities in the system

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2018 ARM Limited
# Copyright (c) 2018-2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
@@ -37,6 +37,7 @@
from m5.params import *
from m5.objects.QoSMemCtrl import *
from m5.objects.QoSMemSinkInterface import *
class QoSMemSinkCtrl(QoSMemCtrl):
type = 'QoSMemSinkCtrl'
@@ -44,6 +45,10 @@ class QoSMemSinkCtrl(QoSMemCtrl):
cxx_class = "QoS::MemSinkCtrl"
port = ResponsePort("Response ports")
interface = Param.QoSMemSinkInterface(QoSMemSinkInterface(),
"Interface to memory")
# the basic configuration of the controller architecture, note
# that each entry corresponds to a burst for the specific DRAM
# configuration (e.g. x32 with burst length 8 is 32 bytes) and not
@@ -59,5 +64,3 @@ class QoSMemSinkCtrl(QoSMemCtrl):
# response latency - time to issue a response once a request is serviced
response_latency = Param.Latency("20ns", "Memory response latency")

View File

@@ -0,0 +1,40 @@
# Copyright (c) 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects.AbstractMemory import AbstractMemory
class QoSMemSinkInterface(AbstractMemory):
type = 'QoSMemSinkInterface'
cxx_header = "mem/qos/mem_sink.hh"

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2018 ARM Limited
# Copyright (c) 2018-2020 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
@@ -37,6 +37,7 @@ Import('*')
SimObject('QoSMemCtrl.py')
SimObject('QoSMemSinkCtrl.py')
SimObject('QoSMemSinkInterface.py')
SimObject('QoSPolicy.py')
SimObject('QoSTurnaround.py')

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019 ARM Limited
* Copyright (c) 2017-2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -42,7 +42,7 @@
namespace QoS {
MemCtrl::MemCtrl(const QoSMemCtrlParams * p)
: AbstractMemory(p),
: ClockedObject(p),
policy(p->qos_policy),
turnPolicy(p->qos_turnaround_policy),
queuePolicy(QueuePolicy::create(p)),
@@ -51,7 +51,8 @@ MemCtrl::MemCtrl(const QoSMemCtrlParams * p)
qosSyncroScheduler(p->qos_syncro_scheduler),
totalReadQueueSize(0), totalWriteQueueSize(0),
busState(READ), busStateNext(READ),
stats(*this)
stats(*this),
_system(p->system)
{
// Set the priority policy
if (policy) {
@@ -76,12 +77,6 @@ MemCtrl::MemCtrl(const QoSMemCtrlParams * p)
MemCtrl::~MemCtrl()
{}
void
MemCtrl::init()
{
AbstractMemory::init();
}
void
MemCtrl::logRequest(BusState dir, MasterID m_id, uint8_t qos,
Addr addr, uint64_t entries)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019 ARM Limited
* Copyright (c) 2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -36,10 +36,10 @@
*/
#include "debug/QOS.hh"
#include "mem/abstract_mem.hh"
#include "mem/qos/q_policy.hh"
#include "mem/qos/policy.hh"
#include "mem/qos/q_policy.hh"
#include "params/QoSMemCtrl.hh"
#include "sim/clocked_object.hh"
#include "sim/system.hh"
#include <unordered_map>
@@ -56,7 +56,7 @@ namespace QoS {
* which support QoS - it provides access to a set of QoS
* scheduling policies
*/
class MemCtrl: public AbstractMemory
class MemCtrl : public ClockedObject
{
public:
/** Bus Direction */
@@ -151,6 +151,9 @@ class MemCtrl: public AbstractMemory
Stats::Scalar numStayWriteState;
} stats;
/** Pointer to the System object */
System* _system;
/**
* Initializes dynamically counters and
* statistics for a given Master
@@ -265,11 +268,6 @@ class MemCtrl: public AbstractMemory
virtual ~MemCtrl();
/**
* Initializes this object
*/
void init() override;
/**
* Gets the current bus state
*
@@ -346,6 +344,10 @@ class MemCtrl: public AbstractMemory
* @return total number of priority levels
*/
uint8_t numPriorities() const { return _numPriorities; }
/** read the system pointer
* @return pointer to the system object */
System* system() const { return _system; }
};
template<typename Queues>

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018 ARM Limited
* Copyright (c) 2018-2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -40,6 +40,7 @@
#include "debug/Drain.hh"
#include "debug/QOS.hh"
#include "mem_sink.hh"
#include "params/QoSMemSinkInterface.hh"
#include "sim/system.hh"
namespace QoS {
@@ -50,12 +51,15 @@ MemSinkCtrl::MemSinkCtrl(const QoSMemSinkCtrlParams* p)
memoryPacketSize(p->memory_packet_size),
readBufferSize(p->read_buffer_size),
writeBufferSize(p->write_buffer_size), port(name() + ".port", *this),
interface(p->interface),
retryRdReq(false), retryWrReq(false), nextRequest(0), nextReqEvent(this)
{
// Resize read and write queue to allocate space
// for configured QoS priorities
readQueue.resize(numPriorities());
writeQueue.resize(numPriorities());
interface->setMemCtrl(this);
}
MemSinkCtrl::~MemSinkCtrl()
@@ -92,7 +96,7 @@ MemSinkCtrl::recvAtomic(PacketPtr pkt)
"%s Should not see packets where cache is responding\n",
__func__);
access(pkt);
interface->access(pkt);
return responseLatency;
}
@@ -101,7 +105,7 @@ MemSinkCtrl::recvFunctional(PacketPtr pkt)
{
pkt->pushLabel(name());
functionalAccess(pkt);
interface->functionalAccess(pkt);
pkt->popLabel();
}
@@ -279,7 +283,7 @@ MemSinkCtrl::processNextReqEvent()
// Do the actual memory access which also turns the packet
// into a response
access(pkt);
interface->access(pkt);
// Log the response
logResponse(pkt->isRead()? READ : WRITE,
@@ -351,7 +355,7 @@ AddrRangeList
MemSinkCtrl::MemoryPort::getAddrRanges() const
{
AddrRangeList ranges;
ranges.push_back(memory.getAddrRange());
ranges.push_back(memory.interface->getAddrRange());
return ranges;
}
@@ -390,3 +394,13 @@ QoSMemSinkCtrlParams::create()
return new QoS::MemSinkCtrl(this);
}
QoSMemSinkInterface::QoSMemSinkInterface(const QoSMemSinkInterfaceParams* _p)
: AbstractMemory(_p)
{
}
QoSMemSinkInterface*
QoSMemSinkInterfaceParams::create()
{
return new QoSMemSinkInterface(this);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018 ARM Limited
* Copyright (c) 2018-2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -41,10 +41,14 @@
#ifndef __MEM_QOS_MEM_SINK_HH__
#define __MEM_QOS_MEM_SINK_HH__
#include "mem/abstract_mem.hh"
#include "mem/qos/mem_ctrl.hh"
#include "mem/qport.hh"
#include "params/QoSMemSinkCtrl.hh"
class QoSMemSinkInterfaceParams;
class QoSMemSinkInterface;
namespace QoS {
/**
@@ -163,6 +167,11 @@ class MemSinkCtrl : public MemCtrl
/** Memory slave port */
MemoryPort port;
/**
* Create pointer to interface of actual media
*/
QoSMemSinkInterface* const interface;
/** Read request pending */
bool retryRdReq;
@@ -244,4 +253,17 @@ class MemSinkCtrl : public MemCtrl
} // namespace QoS
class QoSMemSinkInterface : public AbstractMemory
{
public:
/** Setting a pointer to the interface */
void setMemCtrl(QoS::MemSinkCtrl* _ctrl) { ctrl = _ctrl; };
/** Pointer to the controller */
QoS::MemSinkCtrl* ctrl;
QoSMemSinkInterface(const QoSMemSinkInterfaceParams* _p);
};
#endif /* __MEM_QOS_MEM_SINK_HH__ */

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2012-2013, 2017-2018 ARM Limited
# Copyright (c) 2012-2013, 2017-2018, 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
@@ -220,7 +220,12 @@ class BaseSESystem(BaseSystem):
super(BaseSESystem, self).init_system(system)
def create_system(self):
system = System(physmem = self.mem_class(),
if issubclass(self.mem_class, m5.objects.DRAMInterface):
mem_ctrl = DRAMCtrl()
mem_ctrl.dram = self.mem_class()
else:
mem_ctrl = self.mem_class()
system = System(physmem = mem_ctrl,
membus = SystemXBar(),
mem_mode = self.mem_mode,
multi_thread = (self.num_threads > 1))
@@ -272,8 +277,16 @@ class BaseFSSystem(BaseSystem):
else:
# create the memory controllers and connect them, stick with
# the physmem name to avoid bumping all the reference stats
system.physmem = [self.mem_class(range = r)
for r in system.mem_ranges]
if issubclass(self.mem_class, m5.objects.DRAMInterface):
mem_ctrls = []
for r in system.mem_ranges:
mem_ctrl = DRAMCtrl()
mem_ctrl.dram = self.mem_class(range = r)
mem_ctrls.append(mem_ctrl)
system.physmem = mem_ctrls
else:
system.physmem = [self.mem_class(range = r)
for r in system.mem_ranges]
for i in range(len(system.physmem)):
system.physmem[i].port = system.membus.master