This PR changes memory and cache sizes in various parts of the gem5 codebase to use binary units (e.g. KiB) instead of metric units (e.g. kB). This makes the codebase more consistent, as gem5 automatically converts memory and cache sizes that are in metric units to binary units. This PR also adds a warning message to let users know when an auto-conversion from base 10 to base 2 units occurs. There were a few places in configs and in the comments of various files where I didn't change the metric units, as I couldn't figure out where the parameters with those units were being used.
250 lines
8.4 KiB
Python
250 lines
8.4 KiB
Python
# Copyright (c) 2016-2017, 2022-2024 Arm Limited
|
|
# All rights reserved.
|
|
#
|
|
# The license below extends only to copyright in the software and shall
|
|
# not be construed as granting a license to any other intellectual
|
|
# property including but not limited to intellectual property relating
|
|
# to a hardware implementation of the functionality of the software
|
|
# licensed hereunder. You may use the software subject to the license
|
|
# terms below provided that you ensure that this notice is replicated
|
|
# unmodified and in its entirety in all distributions of the software,
|
|
# modified or unmodified, in source code or in binary form.
|
|
#
|
|
# Redistribution and use in source and binary forms, with or without
|
|
# modification, are permitted provided that the following conditions are
|
|
# met: redistributions of source code must retain the above copyright
|
|
# notice, this list of conditions and the following disclaimer;
|
|
# redistributions in binary form must reproduce the above copyright
|
|
# notice, this list of conditions and the following disclaimer in the
|
|
# documentation and/or other materials provided with the distribution;
|
|
# neither the name of the copyright holders nor the names of its
|
|
# contributors may be used to endorse or promote products derived from
|
|
# this software without specific prior written permission.
|
|
#
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
"""This script is the syscall emulation example script from the ARM
|
|
Research Starter Kit on System Modeling. More information can be found
|
|
at: http://www.arm.com/ResearchEnablement/SystemModeling
|
|
"""
|
|
|
|
import argparse
|
|
import os
|
|
import shlex
|
|
|
|
import m5
|
|
from m5.objects import *
|
|
from m5.util import addToPath
|
|
|
|
m5.util.addToPath("../..")
|
|
|
|
import devices
|
|
from common import (
|
|
MemConfig,
|
|
ObjectList,
|
|
)
|
|
from common.cores.arm import (
|
|
HPI,
|
|
O3_ARM_v7a,
|
|
)
|
|
|
|
# Pre-defined CPU configurations. Each tuple must be ordered as : (cpu_class,
|
|
# l1_icache_class, l1_dcache_class, l2_Cache_class). Any of
|
|
# the cache class may be 'None' if the particular cache is not present.
|
|
cpu_types = {
|
|
"atomic": (AtomicSimpleCPU, None, None, None),
|
|
"minor": (MinorCPU, devices.L1I, devices.L1D, devices.L2),
|
|
"hpi": (HPI.HPI, HPI.HPI_ICache, HPI.HPI_DCache, HPI.HPI_L2),
|
|
"o3": (
|
|
O3_ARM_v7a.O3_ARM_v7a_3,
|
|
O3_ARM_v7a.O3_ARM_v7a_ICache,
|
|
O3_ARM_v7a.O3_ARM_v7a_DCache,
|
|
O3_ARM_v7a.O3_ARM_v7aL2,
|
|
),
|
|
}
|
|
|
|
|
|
def get_processes(cmd):
|
|
"""Interprets commands to run and returns a list of processes"""
|
|
|
|
cwd = os.getcwd()
|
|
multiprocesses = []
|
|
for idx, c in enumerate(cmd):
|
|
argv = shlex.split(c)
|
|
|
|
process = Process(pid=100 + idx, cwd=cwd, cmd=argv, executable=argv[0])
|
|
process.gid = os.getgid()
|
|
|
|
print("info: %d. command and arguments: %s" % (idx + 1, process.cmd))
|
|
multiprocesses.append(process)
|
|
|
|
return multiprocesses
|
|
|
|
|
|
def create(args):
|
|
"""Create and configure the system object."""
|
|
|
|
cpu_class = cpu_types[args.cpu][0]
|
|
mem_mode = cpu_class.memory_mode()
|
|
# Only simulate caches when using a timing CPU (e.g., the HPI model)
|
|
want_caches = True if mem_mode == "timing" else False
|
|
|
|
system = devices.SimpleSeSystem(
|
|
mem_mode=mem_mode,
|
|
)
|
|
|
|
# Add CPUs to the system. A cluster of CPUs typically have
|
|
# private L1 caches and a shared L2 cache.
|
|
system.cpu_cluster = devices.ArmCpuCluster(
|
|
system,
|
|
args.num_cores,
|
|
args.cpu_freq,
|
|
"1.2V",
|
|
*cpu_types[args.cpu],
|
|
tarmac_gen=args.tarmac_gen,
|
|
tarmac_dest=args.tarmac_dest,
|
|
)
|
|
|
|
# Create a cache hierarchy for the cluster. We are assuming that
|
|
# clusters have core-private L1 caches and an L2 that's shared
|
|
# within the cluster.
|
|
system.addCaches(want_caches, last_cache_level=2)
|
|
|
|
# Tell components about the expected physical memory ranges. This
|
|
# is, for example, used by the MemConfig helper to determine where
|
|
# to map DRAMs in the physical address space.
|
|
system.mem_ranges = [AddrRange(start=0, size=args.mem_size)]
|
|
|
|
# Configure the off-chip memory system.
|
|
MemConfig.config_mem(args, system)
|
|
|
|
# Wire up the system's memory system
|
|
system.connect()
|
|
|
|
# Parse the command line and get a list of Processes instances
|
|
# that we can pass to gem5.
|
|
processes = get_processes(args.commands_to_run)
|
|
if len(processes) != args.num_cores:
|
|
print(
|
|
"Error: Cannot map %d command(s) onto %d CPU(s)"
|
|
% (len(processes), args.num_cores)
|
|
)
|
|
sys.exit(1)
|
|
|
|
system.workload = SEWorkload.init_compatible(processes[0].executable)
|
|
|
|
# Assign one workload to each CPU
|
|
for cpu, workload in zip(system.cpu_cluster.cpus, processes):
|
|
cpu.workload = workload
|
|
|
|
return system
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(epilog=__doc__)
|
|
|
|
parser.add_argument(
|
|
"commands_to_run",
|
|
metavar="command(s)",
|
|
nargs="*",
|
|
help="Command(s) to run",
|
|
)
|
|
parser.add_argument(
|
|
"--cpu",
|
|
type=str,
|
|
choices=list(cpu_types.keys()),
|
|
default="atomic",
|
|
help="CPU model to use",
|
|
)
|
|
parser.add_argument("--cpu-freq", type=str, default="4GHz")
|
|
parser.add_argument(
|
|
"--num-cores", type=int, default=1, help="Number of CPU cores"
|
|
)
|
|
parser.add_argument(
|
|
"--mem-type",
|
|
default="DDR3_1600_8x8",
|
|
choices=ObjectList.mem_list.get_names(),
|
|
help="type of memory to use",
|
|
)
|
|
parser.add_argument(
|
|
"--mem-channels", type=int, default=2, help="number of memory channels"
|
|
)
|
|
parser.add_argument(
|
|
"--mem-ranks",
|
|
type=int,
|
|
default=None,
|
|
help="number of memory ranks per channel",
|
|
)
|
|
parser.add_argument(
|
|
"--mem-size",
|
|
action="store",
|
|
type=str,
|
|
default="2GiB",
|
|
help="Specify the physical memory size",
|
|
)
|
|
parser.add_argument(
|
|
"--tarmac-gen",
|
|
action="store_true",
|
|
help="Write a Tarmac trace.",
|
|
)
|
|
parser.add_argument(
|
|
"--tarmac-dest",
|
|
choices=TarmacDump.vals,
|
|
default="stdoutput",
|
|
help="Destination for the Tarmac trace output. [Default: stdoutput]",
|
|
)
|
|
parser.add_argument(
|
|
"-P",
|
|
"--param",
|
|
action="append",
|
|
default=[],
|
|
help="Set a SimObject parameter relative to the root node. "
|
|
"An extended Python multi range slicing syntax can be used "
|
|
"for arrays. For example: "
|
|
"'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' "
|
|
"sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 "
|
|
"Direct parameters of the root object are not accessible, "
|
|
"only parameters of its children.",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Create a single root node for gem5's object hierarchy. There can
|
|
# only exist one root node in the simulator at any given
|
|
# time. Tell gem5 that we want to use syscall emulation mode
|
|
# instead of full system mode.
|
|
root = Root(full_system=False)
|
|
|
|
# Populate the root node with a system. A system corresponds to a
|
|
# single node with shared memory.
|
|
root.system = create(args)
|
|
root.apply_config(args.param)
|
|
|
|
# Instantiate the C++ object hierarchy. After this point,
|
|
# SimObjects can't be instantiated anymore.
|
|
m5.instantiate()
|
|
|
|
# Start the simulator. This gives control to the C++ world and
|
|
# starts the simulator. The returned event tells the simulation
|
|
# script why the simulator exited.
|
|
event = m5.simulate()
|
|
|
|
# Print the reason for the simulation exit. Some exit codes are
|
|
# requests for service (e.g., checkpoints) from the simulation
|
|
# script. We'll just ignore them here and exit.
|
|
print(f"{event.getCause()} ({event.getCode()}) @ {m5.curTick()}")
|
|
|
|
|
|
if __name__ == "__m5_main__":
|
|
main()
|