mem: Add NVM interface

Add NVM interface to memory controller.

This can be used with or instead of the existing
DRAM interface. Therefore, a single controller can interface
to either DRAM or NVM, or both.

Specifically, a memory channel can be configured as:
- Memory controller interfacing to DRAM only
- Memory controller interfacing to NVM only
- Memory controller interfacing to both DRAM and NVM

How data is placed or migrated between media types is outside
of the scope of this change.

The NVM interface incorporates new static delay parameters
for read and write completion. The interface defines a 2
stage read to manage non-deterministic read delays while
enabling deterministic data transfer, similar to NVDIMM-P.
The NVM interface also includes parameters to define
read and write buffers on the media side (on-DIMM). These are
utilized to quickly offload commands and write data, mitigating
the effects of lower latency and bandwidth media characteristics.

Change-Id: I6b22ddb495877f88d161f0bd74ade32cc8fdcbcc
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/29027
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Reviewed-by: Wendy Elsasser <wendy.elsasser@arm.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
This commit is contained in:
Wendy Elsasser
2020-04-01 12:39:42 -05:00
committed by Jason Lowe-Power
parent 4acc419b6f
commit dab7c78eca
20 changed files with 3522 additions and 521 deletions

View File

@@ -80,6 +80,21 @@ def create_mem_intf(intf, r, i, nbr_mem_ctrls, intlv_bits, intlv_size,
intlv_low_bit = int(math.log(rowbuffer_size, 2))
# Also adjust interleaving bits for NVM attached as memory
# Will have separate range defined with unique interleaving
if issubclass(intf, m5.objects.NVMInterface):
# If the channel bits are appearing after the low order
# address bits (buffer bits), we need to add the appropriate
# number of bits for the buffer size
if interface.addr_mapping.value == 'RoRaBaChCo':
# This computation only really needs to happen
# once, but as we rely on having an instance we
# end up having to repeat it for each and every
# one
buffer_size = interface.per_bank_buffer_size.value
intlv_low_bit = int(math.log(buffer_size, 2))
# We got all we need to configure the appropriate address
# range
interface.range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -102,15 +117,23 @@ def config_mem(options, system):
"""
# Mandatory options
opt_mem_type = options.mem_type
opt_mem_channels = options.mem_channels
# Semi-optional options
# Must have either mem_type or nvm_type or both
opt_mem_type = getattr(options, "mem_type", None)
opt_nvm_type = getattr(options, "nvm_type", None)
if not opt_mem_type and not opt_nvm_type:
fatal("Must have option for either mem-type or nvm-type, or both")
# Optional options
opt_tlm_memory = getattr(options, "tlm_memory", None)
opt_external_memory_system = getattr(options, "external_memory_system",
None)
opt_elastic_trace_en = getattr(options, "elastic_trace_en", False)
opt_mem_ranks = getattr(options, "mem_ranks", None)
opt_nvm_ranks = getattr(options, "nvm_ranks", None)
opt_hybrid_channel = getattr(options, "hybrid_channel", False)
opt_dram_powerdown = getattr(options, "enable_dram_powerdown", None)
opt_mem_channels_intlv = getattr(options, "mem_channels_intlv", 128)
opt_xor_low_bit = getattr(options, "xor_low_bit", 0)
@@ -142,13 +165,19 @@ def config_mem(options, system):
return
nbr_mem_ctrls = opt_mem_channels
import math
from m5.util import fatal
intlv_bits = int(math.log(nbr_mem_ctrls, 2))
if 2 ** intlv_bits != nbr_mem_ctrls:
fatal("Number of memory channels must be a power of 2")
intf = ObjectList.mem_list.get(opt_mem_type)
if opt_mem_type:
intf = ObjectList.mem_list.get(opt_mem_type)
if opt_nvm_type:
n_intf = ObjectList.mem_list.get(opt_nvm_type)
nvm_intfs = []
mem_ctrls = []
if opt_elastic_trace_en and not issubclass(intf, m5.objects.SimpleMemory):
@@ -164,51 +193,80 @@ def config_mem(options, system):
# For every range (most systems will only have one), create an
# array of memory interfaces and set their parameters to match
# their address mapping in the case of a DRAM
range_iter = 0
for r in system.mem_ranges:
# As the loops iterates across ranges, assign them alternatively
# to DRAM and NVM if both configured, starting with DRAM
range_iter += 1
for i in range(nbr_mem_ctrls):
# Create the DRAM interface
dram_intf = create_mem_intf(intf, r, i, nbr_mem_ctrls, intlv_bits,
intlv_size, opt_xor_low_bit)
if opt_mem_type and (not opt_nvm_type or range_iter % 2 != 0):
# Create the DRAM interface
dram_intf = create_mem_intf(intf, r, i, nbr_mem_ctrls,
intlv_bits, intlv_size, opt_xor_low_bit)
# Set the number of ranks based on the command-line
# options if it was explicitly set
if issubclass(intf, m5.objects.DRAMInterface) and opt_mem_ranks:
dram_intf.ranks_per_channel = opt_mem_ranks
# Set the number of ranks based on the command-line
# options if it was explicitly set
if issubclass(intf, m5.objects.DRAMInterface) and \
opt_mem_ranks:
dram_intf.ranks_per_channel = opt_mem_ranks
# Enable low-power DRAM states if option is set
if issubclass(intf, m5.objects.DRAMInterface):
dram_intf.enable_dram_powerdown = opt_dram_powerdown
# Enable low-power DRAM states if option is set
if issubclass(intf, m5.objects.DRAMInterface):
dram_intf.enable_dram_powerdown = opt_dram_powerdown
if opt_elastic_trace_en:
dram_intf.latency = '1ns'
print("For elastic trace, over-riding Simple Memory "
"latency to 1ns.")
if opt_elastic_trace_en:
dram_intf.latency = '1ns'
print("For elastic trace, over-riding Simple Memory "
"latency to 1ns.")
# Create the controller that will drive the interface
if opt_mem_type == "HMC_2500_1x32":
# The static latency of the vault controllers is estimated
# to be smaller than a full DRAM channel controller
mem_ctrl = m5.objects.DRAMCtrl(min_writes_per_switch = 8,
static_backend_latency = '4ns',
static_frontend_latency = '4ns')
else:
mem_ctrl = m5.objects.DRAMCtrl()
# Create the controller that will drive the interface
if opt_mem_type == "HMC_2500_1x32":
# The static latency of the vault controllers is estimated
# to be smaller than a full DRAM channel controller
mem_ctrl = m5.objects.DRAMCtrl(min_writes_per_switch = 8,
static_backend_latency = '4ns',
static_frontend_latency = '4ns')
else:
mem_ctrl = m5.objects.DRAMCtrl()
# Hookup the controller to the interface and add to the list
mem_ctrl.dram = dram_intf
mem_ctrls.append(mem_ctrl)
# Hookup the controller to the interface and add to the list
mem_ctrl.dram = dram_intf
mem_ctrls.append(mem_ctrl)
# Create a controller and connect the interfaces to a controller
elif opt_nvm_type and (not opt_mem_type or range_iter % 2 == 0):
nvm_intf = create_mem_intf(n_intf, r, i, nbr_mem_ctrls,
intlv_bits, intlv_size)
# Set the number of ranks based on the command-line
# options if it was explicitly set
if issubclass(n_intf, m5.objects.NVMInterface) and \
opt_nvm_ranks:
nvm_intf.ranks_per_channel = opt_nvm_ranks
# Create a controller if not sharing a channel with DRAM
# in which case the controller has already been created
if not opt_hybrid_channel:
mem_ctrl = m5.objects.DRAMCtrl()
mem_ctrl.nvm = nvm_intf
mem_ctrls.append(mem_ctrl)
else:
nvm_intfs.append(nvm_intf)
# hook up NVM interface when channel is shared with DRAM + NVM
for i in range(len(nvm_intfs)):
mem_ctrls[i].nvm = nvm_intfs[i];
# Connect the controller to the xbar port
for i in range(len(mem_ctrls)):
if opt_mem_type == "HMC_2500_1x32":
# Connect the controllers to the membus
mem_ctrls[i].port = xbar[i/4].master
# Set memory device size. There is an independent controller for
# each vault. All vaults are same size.
# Set memory device size. There is an independent controller
# for each vault. All vaults are same size.
mem_ctrls[i].dram.device_size = options.hmc_dev_vault_size
else:
# Connect the controllers to the membus
mem_ctrls[i].port = xbar.master
subsystem.mem_ctrls = mem_ctrls