python: Apply Black formatter to Python files

The command executed was `black src configs tests util`.

Change-Id: I8dfaa6ab04658fea37618127d6ac19270028d771
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/47024
Maintainer: Bobby Bruce <bbruce@ucdavis.edu>
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Reviewed-by: Giacomo Travaglini <giacomo.travaglini@arm.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Bobby R. Bruce
2022-07-05 11:02:25 -07:00
committed by Giacomo Travaglini
parent 1cfaa8da83
commit 787204c92d
980 changed files with 35668 additions and 22233 deletions

View File

@@ -42,4 +42,4 @@ class CoherenceProtocol(Enum):
MOESI_AMD_BASE = 8
MI_EXAMPLE = 9
GPU_VIPER = 10
CHI = 11
CHI = 11

View File

@@ -168,11 +168,13 @@ class AbstractBoard:
This function is used by the Simulator module to setup the simulation
correctly.
"""
if self._is_fs == None:
raise Exception("The workload for this board not yet to be set. "
"Whether the board is to be executed in FS or SE "
"mode is determined by which 'set workload' "
"function is run.")
if self._is_fs == None:
raise Exception(
"The workload for this board not yet to be set. "
"Whether the board is to be executed in FS or SE "
"mode is determined by which 'set workload' "
"function is run."
)
return self._is_fs
@abstractmethod

View File

@@ -30,6 +30,7 @@ from .abstract_board import AbstractBoard
from m5.objects import System
class AbstractSystemBoard(System, AbstractBoard):
"""
@@ -37,6 +38,7 @@ class AbstractSystemBoard(System, AbstractBoard):
"""
__metaclass__ = ABCMeta
def __init__(
self,
clk_freq: str,
@@ -51,4 +53,4 @@ class AbstractSystemBoard(System, AbstractBoard):
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
)

View File

@@ -62,6 +62,7 @@ from ..processors.abstract_processor import AbstractProcessor
from ..memory.abstract_memory_system import AbstractMemorySystem
from ..cachehierarchies.abstract_cache_hierarchy import AbstractCacheHierarchy
class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
"""
A board capable of full system simulation for ARM instructions. It is based
@@ -77,6 +78,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
* stage2 walker ports are ignored.
* This version does not support SECURITY extension.
"""
__metaclass__ = ABCMeta
def __init__(
@@ -86,20 +88,20 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
memory: AbstractMemorySystem,
cache_hierarchy: AbstractCacheHierarchy,
platform: VExpress_GEM5_Base = VExpress_GEM5_Foundation(),
release: ArmRelease = ArmDefaultRelease()
release: ArmRelease = ArmDefaultRelease(),
) -> None:
super().__init__()
AbstractBoard.__init__(
self,
clk_freq = clk_freq,
processor = processor,
memory = memory,
cache_hierarchy = cache_hierarchy,
clk_freq=clk_freq,
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# This board requires ARM ISA to work.
requires(isa_required = ISA.ARM)
requires(isa_required=ISA.ARM)
# Setting the voltage domain here.
@@ -179,9 +181,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
self.iobridge = Bridge(delay="50ns")
self.iobridge.mem_side_port = self.iobus.cpu_side_ports
self.iobridge.cpu_side_port = (
self.cache_hierarchy.get_mem_side_port()
)
self.iobridge.cpu_side_port = self.cache_hierarchy.get_mem_side_port()
# We either have iocache or dmabridge depending upon the
# cache_hierarchy. If we have "NoCache", then we use the dmabridge.
@@ -205,9 +205,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
# beidge in this case. Parameters of this bridge are also taken
# from the common/example/arm/devices.py file.
self.dmabridge = Bridge(
delay="50ns", ranges=self.mem_ranges
)
self.dmabridge = Bridge(delay="50ns", ranges=self.mem_ranges)
self.dmabridge.mem_side_port = self.get_dma_ports()[0]
self.dmabridge.cpu_side_port = self.get_dma_ports()[1]
@@ -271,7 +269,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
def get_dma_ports(self) -> List[Port]:
return [
self.cache_hierarchy.get_cpu_side_port(),
self.iobus.mem_side_ports
self.iobus.mem_side_ports,
]
@overrides(AbstractBoard)
@@ -292,9 +290,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
)
self.pci_devices = [PciVirtIO(vio=VirtIOBlock(image=image))]
self.realview.attachPciDevice(
self.pci_devices[0], self.iobus
)
self.realview.attachPciDevice(self.pci_devices[0], self.iobus)
# Now that the disk and workload are set, we can generate the device
# tree file. We will generate the dtb file everytime the board is
@@ -309,7 +305,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
# Specifying the dtb file location to the workload.
self.workload.dtb_filename = os.path.join(
m5.options.outdir, "device.dtb"
m5.options.outdir, "device.dtb"
)
# Calling generateDtb from class ArmSystem to add memory information to
@@ -322,7 +318,8 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
# the kernel file and the bootloader file(s).
self.realview.setupBootLoader(
self, self.workload.dtb_filename, self._bootloader)
self, self.workload.dtb_filename, self._bootloader
)
def _get_memory_ranges(self, mem_size) -> list:
"""
@@ -334,7 +331,7 @@ class ArmBoard(ArmSystem, AbstractBoard, KernelDiskWorkload):
for mem_range in self.realview._mem_regions:
size_in_range = min(mem_size, mem_range.size())
mem_ranges.append(
AddrRange(start = mem_range.start, size = size_in_range)
AddrRange(start=mem_range.start, size=size_in_range)
)
mem_size -= size_in_range

View File

@@ -72,6 +72,7 @@ from m5.util.fdthelper import (
FdtState,
)
class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
"""
A board capable of full system simulation for RISC-V.
@@ -94,9 +95,11 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
raise EnvironmentError("RiscvBoard is not compatible with Ruby")
if processor.get_isa() != ISA.RISCV:
raise Exception("The LupvBoard requires a processor using the "
raise Exception(
"The LupvBoard requires a processor using the "
"RISCV ISA. Current processor "
f"ISA: '{processor.get_isa().name}'.")
f"ISA: '{processor.get_isa().name}'."
)
super().__init__(clk_freq, processor, memory, cache_hierarchy)
@@ -107,76 +110,77 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
# Initialize all the devices that we want to use on this board
# Interrupt IDS for PIC Device
self._excep_code = { 'INT_SOFT_SUPER': 1, 'INT_SOFT_MACHINE': 3,
'INT_TIMER_SUPER': 5, 'INT_TIMER_MACHINE': 7,
'INT_EXT_SUPER': 9, 'INT_EXT_MACHINE': 11 }
self._int_ids = { 'TTY': 0, 'BLK': 1, 'RNG': 2}
self._excep_code = {
"INT_SOFT_SUPER": 1,
"INT_SOFT_MACHINE": 3,
"INT_TIMER_SUPER": 5,
"INT_TIMER_MACHINE": 7,
"INT_EXT_SUPER": 9,
"INT_EXT_MACHINE": 11,
}
self._int_ids = {"TTY": 0, "BLK": 1, "RNG": 2}
# CLINT
self.clint = Clint(pio_addr=0x2000000)
# PLIC
self.pic = Plic(pio_addr=0xc000000)
self.pic = Plic(pio_addr=0xC000000)
# LUPIO IPI
self.lupio_ipi = LupioIPI(
pio_addr=0x20001000,
int_type=self._excep_code['INT_SOFT_SUPER'],
num_threads = self.processor.get_num_cores()
int_type=self._excep_code["INT_SOFT_SUPER"],
num_threads=self.processor.get_num_cores(),
)
# LUPIO PIC
self.lupio_pic = LupioPIC(
pio_addr=0x20002000,
int_type = self._excep_code['INT_EXT_SUPER'],
num_threads = self.processor.get_num_cores()
int_type=self._excep_code["INT_EXT_SUPER"],
num_threads=self.processor.get_num_cores(),
)
#LupV Platform
self.lupv = LupV(
pic = self.lupio_pic,
uart_int_id = self._int_ids['TTY']
)
# LupV Platform
self.lupv = LupV(pic=self.lupio_pic, uart_int_id=self._int_ids["TTY"])
# LUPIO BLK
self.lupio_blk = LupioBLK(
pio_addr=0x20000000,
platform = self.lupv,
int_id = self._int_ids['BLK']
platform=self.lupv,
int_id=self._int_ids["BLK"],
)
# LUPIO RNG
self.lupio_rng = LupioRNG(
pio_addr=0x20003000,
platform = self.lupv,
int_id = self._int_ids['RNG']
platform=self.lupv,
int_id=self._int_ids["RNG"],
)
# LUPIO RTC
self.lupio_rtc = LupioRTC(pio_addr=0x20004000)
#LUPIO SYS
self.lupio_sys = LupioSYS(pio_addr= 0x20005000)
# LUPIO SYS
self.lupio_sys = LupioSYS(pio_addr=0x20005000)
# LUPIO TMR
self.lupio_tmr = LupioTMR(
pio_addr=0x20006000,
int_type = self._excep_code['INT_TIMER_SUPER'],
num_threads = self.processor.get_num_cores()
int_type=self._excep_code["INT_TIMER_SUPER"],
num_threads=self.processor.get_num_cores(),
)
# LUPIO TTY
self.lupio_tty = LupioTTY(
pio_addr=0x20007000,
platform = self.lupv,
int_id = self._int_ids['TTY']
platform=self.lupv,
int_id=self._int_ids["TTY"],
)
self.terminal = Terminal()
pic_srcs = [
self._int_ids['TTY'],
self._int_ids['BLK'],
self._int_ids['RNG']
self._int_ids["TTY"],
self._int_ids["BLK"],
self._int_ids["RNG"],
]
# Set the number of sources to the PIC as 0 because we've removed the
@@ -205,14 +209,14 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
self.pic,
self.lupio_ipi,
self.lupio_pic,
self.lupio_tmr
self.lupio_tmr,
]
self._off_chip_devices = [
self.lupio_blk,
self.lupio_tty,
self.lupio_sys,
self.lupio_rng,
self.lupio_rtc
self.lupio_rtc,
]
def _setup_io_devices(self) -> None:
@@ -349,9 +353,9 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
for i, core in enumerate(self.get_processor().get_cores()):
phandle = state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(self._excep_code['INT_SOFT_MACHINE'])
int_extended.append(self._excep_code["INT_SOFT_MACHINE"])
int_extended.append(phandle)
int_extended.append(self._excep_code['INT_TIMER_MACHINE'])
int_extended.append(self._excep_code["INT_TIMER_MACHINE"])
clint_node.append(
FdtPropertyWords("interrupts-extended", int_extended)
)
@@ -369,18 +373,19 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
# LupioTMR
lupio_tmr = self.lupio_tmr
lupio_tmr_node = lupio_tmr.generateBasicPioDeviceNode(soc_state,
"lupio-tmr", lupio_tmr.pio_addr,
lupio_tmr.pio_size)
lupio_tmr_node = lupio_tmr.generateBasicPioDeviceNode(
soc_state, "lupio-tmr", lupio_tmr.pio_addr, lupio_tmr.pio_size
)
int_state = FdtState(addr_cells=0, interrupt_cells=1)
lupio_tmr_node.append(FdtPropertyWords("clocks", [clk_phandle]))
int_extended = list()
for i, core in enumerate(self.get_processor().get_cores()):
phandle = state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(self._excep_code['INT_TIMER_SUPER'])
int_extended.append(self._excep_code["INT_TIMER_SUPER"])
lupio_tmr_node.append(
FdtPropertyWords("interrupts-extended", int_extended))
FdtPropertyWords("interrupts-extended", int_extended)
)
lupio_tmr_node.appendCompatible(["lupio,tmr"])
soc_node.append(lupio_tmr_node)
@@ -403,7 +408,7 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
for i, core in enumerate(self.get_processor().get_cores()):
phandle = state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(self._excep_code['INT_EXT_MACHINE'])
int_extended.append(self._excep_code["INT_EXT_MACHINE"])
plic_node.append(FdtPropertyWords("interrupts-extended", int_extended))
plic_node.append(FdtProperty("interrupt-controller"))
@@ -413,25 +418,26 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
# LupioIPI Device
lupio_ipi = self.lupio_ipi
lupio_ipi_node = lupio_ipi.generateBasicPioDeviceNode(soc_state,
"lupio-ipi", lupio_ipi.pio_addr,
lupio_ipi.pio_size)
lupio_ipi_node = lupio_ipi.generateBasicPioDeviceNode(
soc_state, "lupio-ipi", lupio_ipi.pio_addr, lupio_ipi.pio_size
)
int_extended = list()
for i, core in enumerate(self.get_processor().get_cores()):
phandle = state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(self._excep_code['INT_SOFT_SUPER'])
int_extended.append(self._excep_code["INT_SOFT_SUPER"])
lupio_ipi_node.append(
FdtPropertyWords("interrupts-extended", int_extended))
FdtPropertyWords("interrupts-extended", int_extended)
)
lupio_ipi_node.append(FdtProperty("interrupt-controller"))
lupio_ipi_node.appendCompatible(["lupio,ipi"])
soc_node.append(lupio_ipi_node)
# LupioPIC Device
lupio_pic = self.lupio_pic
lupio_pic_node = lupio_pic.generateBasicPioDeviceNode(soc_state,
"lupio-pic", lupio_pic.pio_addr,
lupio_pic.pio_size)
lupio_pic_node = lupio_pic.generateBasicPioDeviceNode(
soc_state, "lupio-pic", lupio_pic.pio_addr, lupio_pic.pio_size
)
int_state = FdtState(interrupt_cells=1)
lupio_pic_node.append(int_state.interruptCellsProperty())
phandle = state.phandle(lupio_pic)
@@ -440,36 +446,47 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
for i, core in enumerate(self.get_processor().get_cores()):
phandle = state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(self._excep_code['INT_EXT_SUPER'])
int_extended.append(self._excep_code["INT_EXT_SUPER"])
lupio_pic_node.append(
FdtPropertyWords("interrupts-extended", int_extended))
FdtPropertyWords("interrupts-extended", int_extended)
)
lupio_pic_node.append(FdtProperty("interrupt-controller"))
lupio_pic_node.appendCompatible(["lupio,pic"])
soc_node.append(lupio_pic_node)
# LupioBLK Device
lupio_blk = self.lupio_blk
lupio_blk_node = lupio_blk.generateBasicPioDeviceNode(soc_state,
"lupio-blk", lupio_blk.pio_addr,
lupio_blk.pio_size)
lupio_blk_node = lupio_blk.generateBasicPioDeviceNode(
soc_state, "lupio-blk", lupio_blk.pio_addr, lupio_blk.pio_size
)
lupio_blk_node.appendCompatible(["lupio,blk"])
lupio_blk_node.append(FdtPropertyWords("interrupts-extended",
[state.phandle(self.lupio_pic), self.lupio_blk.int_id]))
lupio_blk_node.append(
FdtPropertyWords(
"interrupts-extended",
[state.phandle(self.lupio_pic), self.lupio_blk.int_id],
)
)
soc_node.append(lupio_blk_node)
# LupioRNG Device
lupio_rng = self.lupio_rng
lupio_rng_node = lupio_rng.generateBasicPioDeviceNode(soc_state,
"lupio-rng", lupio_rng.pio_addr,lupio_rng.pio_size)
lupio_rng_node = lupio_rng.generateBasicPioDeviceNode(
soc_state, "lupio-rng", lupio_rng.pio_addr, lupio_rng.pio_size
)
lupio_rng_node.appendCompatible(["lupio,rng"])
lupio_rng_node.append(FdtPropertyWords("interrupts-extended",
[state.phandle(self.lupio_pic), self.lupio_rng.int_id]))
lupio_rng_node.append(
FdtPropertyWords(
"interrupts-extended",
[state.phandle(self.lupio_pic), self.lupio_rng.int_id],
)
)
soc_node.append(lupio_rng_node)
#LupioSYS Device
# LupioSYS Device
lupio_sys = self.lupio_sys
lupio_sys_node = lupio_sys.generateBasicPioDeviceNode(soc_state,
"lupio-sys", lupio_sys.pio_addr, lupio_sys.pio_size)
lupio_sys_node = lupio_sys.generateBasicPioDeviceNode(
soc_state, "lupio-sys", lupio_sys.pio_addr, lupio_sys.pio_size
)
lupio_sys_node.appendCompatible(["syscon"])
sys_phandle = state.phandle(self.lupio_sys)
lupio_sys_node.append(FdtPropertyWords("phandle", [sys_phandle]))
@@ -499,11 +516,16 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
# LupioTTY Device
lupio_tty = self.lupio_tty
lupio_tty_node = lupio_tty.generateBasicPioDeviceNode(soc_state,
"lupio-tty", lupio_tty.pio_addr, lupio_tty.pio_size)
lupio_tty_node = lupio_tty.generateBasicPioDeviceNode(
soc_state, "lupio-tty", lupio_tty.pio_addr, lupio_tty.pio_size
)
lupio_tty_node.appendCompatible(["lupio,tty"])
lupio_tty_node.append(FdtPropertyWords("interrupts-extended",
[state.phandle(self.lupio_pic), self.lupio_tty.int_id]))
lupio_tty_node.append(
FdtPropertyWords(
"interrupts-extended",
[state.phandle(self.lupio_pic), self.lupio_tty.int_id],
)
)
soc_node.append(lupio_tty_node)
root.append(soc_node)
@@ -525,10 +547,9 @@ class LupvBoard(AbstractSystemBoard, KernelDiskWorkload):
# Note: This must be called after set_workload because it looks for an
# attribute named "disk" and connects
# Set the disk image for the block device to use
# Set the disk image for the block device to use
image = CowDiskImage(
child=RawDiskImage(read_only=True),
read_only=False
child=RawDiskImage(read_only=True), read_only=False
)
image.child.image_file = disk_image.get_local_path()
self.lupio_blk.image = image

View File

@@ -34,6 +34,7 @@ import os
import m5
class KernelDiskWorkload:
"""
The purpose of this abstract class is to enable a full-system boot
@@ -162,7 +163,7 @@ class KernelDiskWorkload:
# We assume this this is in a multiple-inheritance setup with an
# Abstract board. This function will not work otherwise.
assert(isinstance(self,AbstractBoard))
assert isinstance(self, AbstractBoard)
# If we are setting a workload of this type, we need to run as a
# full-system simulation.

View File

@@ -93,9 +93,11 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
super().__init__(clk_freq, processor, memory, cache_hierarchy)
if processor.get_isa() != ISA.RISCV:
raise Exception("The RISCVBoard requires a processor using the"
raise Exception(
"The RISCVBoard requires a processor using the"
"RISCV ISA. Current processor ISA: "
f"'{processor.get_isa().name}'.")
f"'{processor.get_isa().name}'."
)
@overrides(AbstractSystemBoard)
def _setup_board(self) -> None:
@@ -141,16 +143,17 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
def _setup_io_devices(self) -> None:
"""Connect the I/O devices to the I/O bus"""
#Add PCI
# Add PCI
self.platform.pci_host.pio = self.iobus.mem_side_ports
#Add Ethernet card
self.ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0,
InterruptLine=1, InterruptPin=1)
# Add Ethernet card
self.ethernet = IGbE_e1000(
pci_bus=0, pci_dev=0, pci_func=0, InterruptLine=1, InterruptPin=1
)
self.ethernet.host = self.platform.pci_host
self.ethernet.pio = self.iobus.mem_side_ports
self.ethernet.dma = self.iobus.cpu_side_ports
self.ethernet.pio = self.iobus.mem_side_ports
self.ethernet.dma = self.iobus.cpu_side_ports
if self.get_cache_hierarchy().is_ruby():
for device in self._off_chip_devices + self._on_chip_devices:
@@ -172,10 +175,10 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
for dev in self._off_chip_devices
]
#PCI
self.bridge.ranges.append(AddrRange(0x2F000000, size='16MB'))
self.bridge.ranges.append(AddrRange(0x30000000, size='256MB'))
self.bridge.ranges.append(AddrRange(0x40000000, size='512MB'))
# PCI
self.bridge.ranges.append(AddrRange(0x2F000000, size="16MB"))
self.bridge.ranges.append(AddrRange(0x30000000, size="256MB"))
self.bridge.ranges.append(AddrRange(0x40000000, size="512MB"))
def _setup_pma(self) -> None:
"""Set the PMA devices on each core"""
@@ -185,10 +188,10 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
for dev in self._on_chip_devices + self._off_chip_devices
]
#PCI
uncacheable_range.append(AddrRange(0x2F000000, size='16MB'))
uncacheable_range.append(AddrRange(0x30000000, size='256MB'))
uncacheable_range.append(AddrRange(0x40000000, size='512MB'))
# PCI
uncacheable_range.append(AddrRange(0x2F000000, size="16MB"))
uncacheable_range.append(AddrRange(0x30000000, size="256MB"))
uncacheable_range.append(AddrRange(0x40000000, size="512MB"))
# TODO: Not sure if this should be done per-core like in the example
for cpu in self.get_processor().get_cores():
@@ -346,8 +349,9 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
soc_node.append(plic_node)
# PCI
pci_state = FdtState(addr_cells=3, size_cells=2,
cpu_cells=1, interrupt_cells=1)
pci_state = FdtState(
addr_cells=3, size_cells=2, cpu_cells=1, interrupt_cells=1
)
pci_node = FdtNode("pci")
if int(self.platform.pci_host.conf_device_bits) == 8:
@@ -364,9 +368,13 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
pci_node.append(pci_state.sizeCellsProperty())
pci_node.append(pci_state.interruptCellsProperty())
# PCI address for CPU
pci_node.append(FdtPropertyWords("reg",
soc_state.addrCells(self.platform.pci_host.conf_base) +
soc_state.sizeCells(self.platform.pci_host.conf_size) ))
pci_node.append(
FdtPropertyWords(
"reg",
soc_state.addrCells(self.platform.pci_host.conf_base)
+ soc_state.sizeCells(self.platform.pci_host.conf_size),
)
)
# Ranges mapping
# For now some of this is hard coded, because the PCI module does not
@@ -382,18 +390,19 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
# AXI memory address range
ranges += self.platform.pci_host.pciFdtAddr(space=2, addr=0)
ranges += soc_state.addrCells(self.platform.pci_host.pci_mem_base)
ranges += pci_state.sizeCells(0x40000000) # Fixed size
ranges += pci_state.sizeCells(0x40000000) # Fixed size
pci_node.append(FdtPropertyWords("ranges", ranges))
# Interrupt mapping
plic_handle = int_state.phandle(plic)
int_base = self.platform.pci_host.int_base
int_base = self.platform.pci_host.int_base
interrupts = []
for i in range(int(self.platform.pci_host.int_count)):
interrupts += self.platform.pci_host.pciFdtAddr(device=i,
addr=0) + [int(i) + 1, plic_handle, int(int_base) + i]
interrupts += self.platform.pci_host.pciFdtAddr(
device=i, addr=0
) + [int(i) + 1, plic_handle, int(int_base) + i]
pci_node.append(FdtPropertyWords("interrupt-map", interrupts))
@@ -401,8 +410,9 @@ class RiscvBoard(AbstractSystemBoard, KernelDiskWorkload):
if int_count & (int_count - 1):
fatal("PCI interrupt count should be power of 2")
intmask = self.platform.pci_host.pciFdtAddr(device=int_count - 1,
addr=0) + [0x0]
intmask = self.platform.pci_host.pciFdtAddr(
device=int_count - 1, addr=0
) + [0x0]
pci_node.append(FdtPropertyWords("interrupt-map-mask", intmask))
if self.platform.pci_host._dma_coherent:

View File

@@ -31,6 +31,7 @@ from m5.objects import SEWorkload, Process
from typing import Optional, List
class SEBinaryWorkload:
"""
This class is used to enable simple Syscall-Execution (SE) mode execution
@@ -64,7 +65,7 @@ class SEBinaryWorkload:
# We assume this this is in a multiple-inheritance setup with an
# Abstract board. This function will not work otherwise.
assert(isinstance(self,AbstractBoard))
assert isinstance(self, AbstractBoard)
# If we are setting a workload of this type, we need to run as a
# SE-mode simulation.
@@ -77,7 +78,7 @@ class SEBinaryWorkload:
process.executable = binary_path
process.cmd = [binary_path] + arguments
if stdin_file is not None:
process.input = stdin_file.get_local_path()
process.input = stdin_file.get_local_path()
self.get_processor().get_cores()[0].set_workload(process)

View File

@@ -24,11 +24,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects import (
AddrRange,
IOXBar,
Port,
)
from m5.objects import AddrRange, IOXBar, Port
from .abstract_system_board import AbstractSystemBoard
from .se_binary_workload import SEBinaryWorkload

View File

@@ -24,11 +24,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects import (
Port,
IOXBar,
AddrRange,
)
from m5.objects import Port, IOXBar, AddrRange
from .mem_mode import MemMode, mem_mode_to_string
from ...utils.override import overrides

View File

@@ -85,8 +85,10 @@ class X86Board(AbstractSystemBoard, KernelDiskWorkload):
)
if self.get_processor().get_isa() != ISA.X86:
raise Exception("The X86Board requires a processor using the X86 "
f"ISA. Current processor ISA: '{processor.get_isa().name}'.")
raise Exception(
"The X86Board requires a processor using the X86 "
f"ISA. Current processor ISA: '{processor.get_isa().name}'."
)
@overrides(AbstractSystemBoard)
def _setup_board(self) -> None:
@@ -100,7 +102,7 @@ class X86Board(AbstractSystemBoard, KernelDiskWorkload):
# Set up all of the I/O.
self._setup_io_devices()
self.m5ops_base = 0xffff0000
self.m5ops_base = 0xFFFF0000
def _setup_io_devices(self):
""" Sets up the x86 IO devices.

View File

@@ -33,24 +33,29 @@ from m5.objects import Cache_Controller, MessageBuffer, RubyNetwork
import math
class TriggerMessageBuffer(MessageBuffer):
'''
"""
MessageBuffer for triggering internal controller events.
These buffers should not be affected by the Ruby tester randomization
and allow poping messages enqueued in the same cycle.
'''
randomization = 'disabled'
"""
randomization = "disabled"
allow_zero_latency = True
class OrderedTriggerMessageBuffer(TriggerMessageBuffer):
ordered = True
class AbstractNode(Cache_Controller):
"""A node is the abstract unit for caches in the CHI protocol.
You can extend the AbstractNode to create caches (private or shared) and
directories with or without data caches.
"""
_version = 0
@classmethod
@@ -128,5 +133,3 @@ class AbstractNode(Cache_Controller):
self.rspIn.in_port = network.out_port
self.snpIn.in_port = network.out_port
self.datIn.in_port = network.out_port

View File

@@ -26,12 +26,8 @@
from .abstract_node import AbstractNode
from m5.objects import (
ClockDomain,
NULL,
RubyCache,
RubyNetwork,
)
from m5.objects import ClockDomain, NULL, RubyCache, RubyNetwork
class SimpleDirectory(AbstractNode):
"""A directory or home node (HNF)
@@ -39,6 +35,7 @@ class SimpleDirectory(AbstractNode):
This simple directory has no cache. It forwards all requests as directly
as possible.
"""
def __init__(
self,
network: RubyNetwork,
@@ -49,10 +46,7 @@ class SimpleDirectory(AbstractNode):
# Dummy cache
self.cache = RubyCache(
dataAccessLatency = 0,
tagAccessLatency = 1,
size = "128",
assoc = 1
dataAccessLatency=0, tagAccessLatency=1, size="128", assoc=1
)
self.clk_domain = clk_domain

View File

@@ -29,26 +29,16 @@ from gem5.isas import ISA
from .abstract_node import AbstractNode
from m5.objects import (
ClockDomain,
RubyCache,
)
from m5.objects import ClockDomain, RubyCache
class DMARequestor(AbstractNode):
def __init__(
self,
network,
cache_line_size,
clk_domain: ClockDomain,
):
def __init__(self, network, cache_line_size, clk_domain: ClockDomain):
super().__init__(network, cache_line_size)
# Dummy cache
self.cache = RubyCache(
dataAccessLatency = 0,
tagAccessLatency = 1,
size = "128",
assoc = 1
dataAccessLatency=0, tagAccessLatency=1, size="128", assoc=1
)
self.clk_domain = clk_domain
@@ -76,5 +66,5 @@ class DMARequestor(AbstractNode):
# Some reasonable default TBE params
self.number_of_TBEs = 16
self.number_of_repl_TBEs = 1
self.number_of_snoop_TBEs = 1 # Should never receive snoops
self.number_of_snoop_TBEs = 1 # Should never receive snoops
self.unify_repl_TBEs = False

View File

@@ -36,9 +36,11 @@ from m5.objects import (
from .abstract_node import TriggerMessageBuffer
class MemoryController(Memory_Controller):
"""A controller that connects to memory
"""
_version = 0
@classmethod
@@ -47,10 +49,7 @@ class MemoryController(Memory_Controller):
return cls._version - 1
def __init__(
self,
network: RubyNetwork,
ranges: List[AddrRange],
port: Port
self, network: RubyNetwork, ranges: List[AddrRange], port: Port
):
super().__init__()
@@ -65,7 +64,7 @@ class MemoryController(Memory_Controller):
def connectQueues(self, network):
self.triggerQueue = TriggerMessageBuffer()
self.responseFromMemory = MessageBuffer()
self.requestToMemory = MessageBuffer(ordered = True)
self.requestToMemory = MessageBuffer(ordered=True)
self.reqRdy = TriggerMessageBuffer()
self.reqOut = MessageBuffer()

View File

@@ -29,11 +29,7 @@ from gem5.isas import ISA
from .abstract_node import AbstractNode
from m5.objects import (
ClockDomain,
RubyCache,
RubyNetwork,
)
from m5.objects import ClockDomain, RubyCache, RubyNetwork
class PrivateL1MOESICache(AbstractNode):
@@ -69,7 +65,7 @@ class PrivateL1MOESICache(AbstractNode):
self.alloc_on_readshared = True
self.alloc_on_readunique = True
self.alloc_on_readonce = True
self.alloc_on_writeback = False # Should never happen in an L1
self.alloc_on_writeback = False # Should never happen in an L1
self.dealloc_on_unique = False
self.dealloc_on_shared = False
self.dealloc_backinv_unique = True

View File

@@ -28,8 +28,9 @@ from itertools import chain
from typing import List
from m5.objects.SubSystem import SubSystem
from gem5.components.cachehierarchies.ruby.abstract_ruby_cache_hierarchy \
import AbstractRubyCacheHierarchy
from gem5.components.cachehierarchies.ruby.abstract_ruby_cache_hierarchy import (
AbstractRubyCacheHierarchy,
)
from gem5.components.cachehierarchies.abstract_cache_hierarchy import (
AbstractCacheHierarchy,
)
@@ -49,12 +50,7 @@ from .nodes.dma_requestor import DMARequestor
from .nodes.directory import SimpleDirectory
from .nodes.memory_controller import MemoryController
from m5.objects import (
NULL,
RubySystem,
RubySequencer,
RubyPortProxy,
)
from m5.objects import NULL, RubySystem, RubySequencer, RubyPortProxy
class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy):
@@ -113,14 +109,15 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy):
# Create the DMA Controllers, if required.
if board.has_dma_ports():
self.dma_controllers = self._create_dma_controllers(board)
self.ruby_system.num_of_sequencers = len(self.core_clusters) * 2 \
+ len(self.dma_controllers)
self.ruby_system.num_of_sequencers = len(
self.core_clusters
) * 2 + len(self.dma_controllers)
else:
self.ruby_system.num_of_sequencers = len(self.core_clusters) * 2
self.ruby_system.network.connectControllers(
list(
chain.from_iterable( # Grab the controllers from each cluster
chain.from_iterable( # Grab the controllers from each cluster
[
(cluster.dcache, cluster.icache)
for cluster in self.core_clusters
@@ -139,10 +136,8 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy):
self.ruby_system.sys_port_proxy = RubyPortProxy()
board.connect_system_port(self.ruby_system.sys_port_proxy.in_ports)
def _create_core_cluster(self,
core: AbstractCore,
core_num: int,
board: AbstractBoard
def _create_core_cluster(
self, core: AbstractCore, core_num: int, board: AbstractBoard
) -> SubSystem:
"""Given the core and the core number this function creates a cluster
for the core with a split I/D cache
@@ -168,9 +163,7 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy):
)
cluster.icache.sequencer = RubySequencer(
version=core_num,
dcache=NULL,
clk_domain=cluster.icache.clk_domain,
version=core_num, dcache=NULL, clk_domain=cluster.icache.clk_domain
)
cluster.dcache.sequencer = RubySequencer(
version=core_num,
@@ -206,23 +199,17 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy):
return cluster
def _create_memory_controllers(
self,
board: AbstractBoard
self, board: AbstractBoard
) -> List[MemoryController]:
memory_controllers = []
for rng, port in board.get_memory().get_mem_ports():
mc = MemoryController(
self.ruby_system.network,
rng,
port,
)
mc = MemoryController(self.ruby_system.network, rng, port)
mc.ruby_system = self.ruby_system
memory_controllers.append(mc)
return memory_controllers
def _create_dma_controllers(
self,
board: AbstractBoard
self, board: AbstractBoard
) -> List[DMARequestor]:
dma_controllers = []
for i, port in enumerate(board.get_dma_ports()):
@@ -232,10 +219,7 @@ class PrivateL1CacheHierarchy(AbstractRubyCacheHierarchy):
board.get_clock_domain(),
)
version = len(board.get_processor().get_cores()) + i
ctrl.sequencer = RubySequencer(
version=version,
in_ports=port
)
ctrl.sequencer = RubySequencer(version=version, in_ports=port)
ctrl.sequencer.dcache = NULL
ctrl.ruby_system = self.ruby_system

View File

@@ -28,6 +28,7 @@ from .....utils.override import *
from m5.objects import Cache, BasePrefetcher, StridePrefetcher
class MMUCache(Cache):
"""
A simple Memory Management Unit (MMU) cache with default values.

View File

@@ -36,6 +36,7 @@ from m5.objects import Cache, BaseXBar, SystemXBar, BadAddr, Port
from ....utils.override import *
class PrivateL1CacheHierarchy(AbstractClassicCacheHierarchy):
"""
A cache setup where each core has a private L1 data and instruction Cache.
@@ -103,11 +104,13 @@ class PrivateL1CacheHierarchy(AbstractClassicCacheHierarchy):
]
# ITLB Page walk caches
self.iptw_caches = [
MMUCache(size="8KiB") for _ in range(board.get_processor().get_num_cores())
MMUCache(size="8KiB")
for _ in range(board.get_processor().get_num_cores())
]
# DTLB Page walk caches
self.dptw_caches = [
MMUCache(size="8KiB") for _ in range(board.get_processor().get_num_cores())
MMUCache(size="8KiB")
for _ in range(board.get_processor().get_num_cores())
]
if board.has_coherent_io():

View File

@@ -37,6 +37,7 @@ from m5.objects import Cache, L2XBar, BaseXBar, SystemXBar, BadAddr, Port
from ....utils.override import *
class PrivateL1PrivateL2CacheHierarchy(
AbstractClassicCacheHierarchy, AbstractTwoLevelCacheHierarchy
):
@@ -134,12 +135,12 @@ class PrivateL1PrivateL2CacheHierarchy(
]
# ITLB Page walk caches
self.iptw_caches = [
MMUCache(size='8KiB')
MMUCache(size="8KiB")
for _ in range(board.get_processor().get_num_cores())
]
# DTLB Page walk caches
self.dptw_caches = [
MMUCache(size='8KiB')
MMUCache(size="8KiB")
for _ in range(board.get_processor().get_num_cores())
]

View File

@@ -27,10 +27,7 @@
from ......utils.override import overrides
from ..abstract_directory import AbstractDirectory
from m5.objects import (
MessageBuffer,
RubyDirectoryMemory,
)
from m5.objects import MessageBuffer, RubyDirectoryMemory
class Directory(AbstractDirectory):

View File

@@ -29,12 +29,7 @@ from ......isas import ISA
from ..abstract_l1_cache import AbstractL1Cache
from ......utils.override import *
from m5.objects import (
MessageBuffer,
RubyPrefetcher,
RubyCache,
ClockDomain,
)
from m5.objects import MessageBuffer, RubyPrefetcher, RubyCache, ClockDomain
import math

View File

@@ -28,10 +28,7 @@ from ..abstract_directory import AbstractDirectory
from ......utils.override import overrides
from m5.objects import (
MessageBuffer,
RubyDirectoryMemory,
)
from m5.objects import MessageBuffer, RubyDirectoryMemory
class Directory(AbstractDirectory):

View File

@@ -29,11 +29,7 @@ from .....processors.abstract_core import AbstractCore
from ......isas import ISA
from ..abstract_l1_cache import AbstractL1Cache
from m5.objects import (
MessageBuffer,
RubyCache,
ClockDomain,
)
from m5.objects import MessageBuffer, RubyCache, ClockDomain
class L1Cache(AbstractL1Cache):

View File

@@ -38,12 +38,7 @@ from .caches.mesi_two_level.l2_cache import L2Cache
from .caches.mesi_two_level.directory import Directory
from .caches.mesi_two_level.dma_controller import DMAController
from m5.objects import (
RubySystem,
RubySequencer,
DMASequencer,
RubyPortProxy,
)
from m5.objects import RubySystem, RubySequencer, DMASequencer, RubyPortProxy
class MESITwoLevelCacheHierarchy(
@@ -110,9 +105,7 @@ class MESITwoLevelCacheHierarchy(
)
cache.sequencer = RubySequencer(
version=i,
dcache=cache.L1Dcache,
clk_domain=cache.clk_domain,
version=i, dcache=cache.L1Dcache, clk_domain=cache.clk_domain
)
if board.has_io_bus():

View File

@@ -37,12 +37,7 @@ from ....utils.override import overrides
from ....utils.requires import requires
from m5.objects import (
RubySystem,
RubySequencer,
DMASequencer,
RubyPortProxy,
)
from m5.objects import RubySystem, RubySequencer, DMASequencer, RubyPortProxy
class MIExampleCacheHierarchy(AbstractRubyCacheHierarchy):
@@ -51,11 +46,7 @@ class MIExampleCacheHierarchy(AbstractRubyCacheHierarchy):
simple point-to-point topology.
"""
def __init__(
self,
size: str,
assoc: str,
):
def __init__(self, size: str, assoc: str):
"""
:param size: The size of each cache in the heirarchy.
:param assoc: The associativity of each cache.

View File

@@ -128,9 +128,7 @@ class SingleChannel(AbstractMemorySystem):
self.mem_ctrl.range = ranges[0]
def SingleChannelDDR3_1600(
size: Optional[str] = "2048MB",
) -> SingleChannel:
def SingleChannelDDR3_1600(size: Optional[str] = "2048MB",) -> SingleChannel:
"""
A single channel DDR3_1600.

View File

@@ -42,6 +42,7 @@ def _try_convert(val, cls):
except:
raise Exception(f"Could not convert {val} to {cls}")
def _isPow2(num):
log_num = int(log(num, 2))
if 2 ** log_num != num:
@@ -49,12 +50,14 @@ def _isPow2(num):
else:
return True
class ChanneledMemory(AbstractMemorySystem):
"""A class to implement multi-channel memory system
This class can take a DRAM Interface as a parameter to model a multi
channel DDR DRAM memory system.
"""
def __init__(
self,
dram_interface_class: Type[DRAMInterface],
@@ -181,5 +184,3 @@ class ChanneledMemory(AbstractMemorySystem):
)
self._mem_range = ranges[0]
self._interleave_addresses()

View File

@@ -34,63 +34,34 @@ from .dram_interfaces.lpddr3 import LPDDR3_1600_1x32
from .dram_interfaces.hbm import HBM_1000_4H_1x64
def DualChannelDDR3_1600(
size: Optional[str] = None,
) -> AbstractMemorySystem:
def DualChannelDDR3_1600(size: Optional[str] = None,) -> AbstractMemorySystem:
"""
A dual channel memory system using DDR3_1600_8x8 based DIMM
"""
return ChanneledMemory(
DDR3_1600_8x8,
2,
64,
size=size,
)
return ChanneledMemory(DDR3_1600_8x8, 2, 64, size=size)
def DualChannelDDR3_2133(
size: Optional[str] = None,
) -> AbstractMemorySystem:
def DualChannelDDR3_2133(size: Optional[str] = None,) -> AbstractMemorySystem:
"""
A dual channel memory system using DDR3_2133_8x8 based DIMM
"""
return ChanneledMemory(
DDR3_2133_8x8,
2,
64,
size=size,
)
return ChanneledMemory(DDR3_2133_8x8, 2, 64, size=size)
def DualChannelDDR4_2400(
size: Optional[str] = None,
) -> AbstractMemorySystem:
def DualChannelDDR4_2400(size: Optional[str] = None,) -> AbstractMemorySystem:
"""
A dual channel memory system using DDR4_2400_8x8 based DIMM
"""
return ChanneledMemory(
DDR4_2400_8x8,
2,
64,
size=size,
)
return ChanneledMemory(DDR4_2400_8x8, 2, 64, size=size)
def DualChannelLPDDR3_1600(
size: Optional[str] = None,
) -> AbstractMemorySystem:
return ChanneledMemory(
LPDDR3_1600_1x32,
2,
64,
size=size,
)
return ChanneledMemory(LPDDR3_1600_1x32, 2, 64, size=size)
def HBM2Stack(
size: Optional[str] = None,
) -> AbstractMemorySystem:
def HBM2Stack(size: Optional[str] = None,) -> AbstractMemorySystem:
if not size:
size = "4GiB"
return ChanneledMemory(
HBM_1000_4H_1x64,
16,
64,
size=size,
)
return ChanneledMemory(HBM_1000_4H_1x64, 16, 64, size=size)

View File

@@ -34,6 +34,7 @@ from ..boards.abstract_board import AbstractBoard
from .abstract_memory_system import AbstractMemorySystem
from m5.objects import AddrRange, MemCtrl, Port, SimpleMemory
class SingleChannelSimpleMemory(AbstractMemorySystem):
"""A class to implement single channel memory system using SimpleMemory

View File

@@ -41,12 +41,8 @@ def SingleChannelDDR3_1600(
"""
A single channel memory system using DDR3_1600_8x8 based DIMM
"""
return ChanneledMemory(
DDR3_1600_8x8,
1,
64,
size=size,
)
return ChanneledMemory(DDR3_1600_8x8, 1, 64, size=size)
def SingleChannelDDR3_2133(
size: Optional[str] = None,
@@ -54,12 +50,8 @@ def SingleChannelDDR3_2133(
"""
A single channel memory system using DDR3_2133_8x8 based DIMM
"""
return ChanneledMemory(
DDR3_2133_8x8,
1,
64,
size=size,
)
return ChanneledMemory(DDR3_2133_8x8, 1, 64, size=size)
def SingleChannelDDR4_2400(
size: Optional[str] = None,
@@ -67,31 +59,16 @@ def SingleChannelDDR4_2400(
"""
A single channel memory system using DDR4_2400_8x8 based DIMM
"""
return ChanneledMemory(
DDR4_2400_8x8,
1,
64,
size=size,
)
return ChanneledMemory(DDR4_2400_8x8, 1, 64, size=size)
def SingleChannelLPDDR3_1600(
size: Optional[str] = None,
) -> AbstractMemorySystem:
return ChanneledMemory(
LPDDR3_1600_1x32,
1,
64,
size=size,
)
return ChanneledMemory(LPDDR3_1600_1x32, 1, 64, size=size)
def SingleChannelHBM(
size: Optional[str] = None,
) -> AbstractMemorySystem:
def SingleChannelHBM(size: Optional[str] = None,) -> AbstractMemorySystem:
if not size:
size = "256MiB"
return ChanneledMemory(
HBM_1000_4H_1x128,
1,
64,
size=size
)
return ChanneledMemory(HBM_1000_4H_1x128, 1, 64, size=size)

View File

@@ -35,6 +35,7 @@ from ...utils.requires import requires
from m5.objects import BaseMMU, Port, SubSystem
class AbstractCore(SubSystem):
__metaclass__ = ABCMeta
@@ -92,8 +93,9 @@ class AbstractCore(SubSystem):
@abstractmethod
def connect_interrupt(
self, interrupt_requestor: Optional[Port] = None,
interrupt_responce: Optional[Port] = None
self,
interrupt_requestor: Optional[Port] = None,
interrupt_responce: Optional[Port] = None,
) -> None:
""" Connect the core interrupts to the interrupt controller
@@ -124,29 +126,31 @@ class AbstractCore(SubSystem):
requires(isa_required=isa)
_isa_string_map = {
ISA.X86 : "X86",
ISA.ARM : "Arm",
ISA.RISCV : "Riscv",
ISA.SPARC : "Sparc",
ISA.POWER : "Power",
ISA.MIPS : "Mips",
ISA.X86: "X86",
ISA.ARM: "Arm",
ISA.RISCV: "Riscv",
ISA.SPARC: "Sparc",
ISA.POWER: "Power",
ISA.MIPS: "Mips",
}
_cpu_types_string_map = {
CPUTypes.ATOMIC : "AtomicSimpleCPU",
CPUTypes.O3 : "O3CPU",
CPUTypes.TIMING : "TimingSimpleCPU",
CPUTypes.KVM : "KvmCPU",
CPUTypes.MINOR : "MinorCPU",
CPUTypes.ATOMIC: "AtomicSimpleCPU",
CPUTypes.O3: "O3CPU",
CPUTypes.TIMING: "TimingSimpleCPU",
CPUTypes.KVM: "KvmCPU",
CPUTypes.MINOR: "MinorCPU",
}
if isa not in _isa_string_map:
raise NotImplementedError(f"ISA '{isa.name}' does not have an"
raise NotImplementedError(
f"ISA '{isa.name}' does not have an"
"entry in `AbstractCore.cpu_simobject_factory._isa_string_map`"
)
if cpu_type not in _cpu_types_string_map:
raise NotImplementedError(f"CPUType '{cpu_type.name}' "
raise NotImplementedError(
f"CPUType '{cpu_type.name}' "
"does not have an entry in "
"`AbstractCore.cpu_simobject_factory._cpu_types_string_map`"
)
@@ -162,19 +166,24 @@ class AbstractCore(SubSystem):
# : ArmKvmCPU and ArmV8KvmCPU for 32 bit (Armv7l) and 64 bit (Armv8)
# respectively.
if isa.name == "ARM" and \
cpu_type == CPUTypes.KVM and \
platform.architecture()[0] == "64bit":
cpu_class_str = f"{_isa_string_map[isa]}V8"\
f"{_cpu_types_string_map[cpu_type]}"
if (
isa.name == "ARM"
and cpu_type == CPUTypes.KVM
and platform.architecture()[0] == "64bit"
):
cpu_class_str = (
f"{_isa_string_map[isa]}V8"
f"{_cpu_types_string_map[cpu_type]}"
)
else:
cpu_class_str = f"{_isa_string_map[isa]}"\
f"{_cpu_types_string_map[cpu_type]}"
cpu_class_str = (
f"{_isa_string_map[isa]}" f"{_cpu_types_string_map[cpu_type]}"
)
try:
to_return_cls = getattr(importlib.import_module(module_str),
cpu_class_str
)
to_return_cls = getattr(
importlib.import_module(module_str), cpu_class_str
)
except ImportError:
raise Exception(
f"Cannot find CPU type '{cpu_type.name}' for '{isa.name}' "

View File

@@ -28,6 +28,7 @@ from enum import Enum
from typing import Set
import os
class CPUTypes(Enum):
ATOMIC = "atomic"
KVM = "kvm"
@@ -35,12 +36,14 @@ class CPUTypes(Enum):
TIMING = "timing"
MINOR = "minor"
def get_cpu_types_str_set() -> Set[CPUTypes]:
"""
Returns a set of all the CPU types as strings.
"""
return {cpu_type.value for cpu_type in CPUTypes}
def get_cpu_type_from_str(input: str) -> CPUTypes:
"""
Will return the correct enum given the input string. This is matched on
@@ -57,7 +60,7 @@ def get_cpu_type_from_str(input: str) -> CPUTypes:
if input.lower() == cpu_type.value:
return cpu_type
valid_cpu_types_list_str =str()
valid_cpu_types_list_str = str()
for cpu_type_str in get_cpu_types_str_set():
valid_cpu_types_list_str += f"{os.linesep}{cpu_type_str}"

View File

@@ -44,18 +44,14 @@ class GUPSGeneratorCore(AbstractGeneratorCore):
"""
super().__init__()
self.generator = GUPSGen(
start_addr=start_addr,
mem_size=mem_size,
update_limit=update_limit,
)
start_addr=start_addr, mem_size=mem_size, update_limit=update_limit
)
if clk_freq:
clock_domain = SrcClockDomain(
clock=clk_freq, voltage_domain=VoltageDomain()
)
self.generator.clk_domain = clock_domain
@overrides(AbstractGeneratorCore)
def connect_dcache(self, port: Port) -> None:
self.generator.port = port

View File

@@ -34,6 +34,7 @@ from .abstract_processor import AbstractProcessor
from ..boards.abstract_board import AbstractBoard
from .gups_generator_core import GUPSGeneratorCore
class GUPSGeneratorEP(AbstractProcessor):
def __init__(
self,
@@ -85,7 +86,7 @@ class GUPSGeneratorEP(AbstractProcessor):
start_addr=start_addr + i * chunk_size,
mem_size=table_size,
update_limit=update_limit,
clk_freq=clk_freq
clk_freq=clk_freq,
)
for i in range(num_cores)
]

View File

@@ -33,6 +33,8 @@ from ..boards.mem_mode import MemMode
from .abstract_processor import AbstractProcessor
from ..boards.abstract_board import AbstractBoard
from .gups_generator_core import GUPSGeneratorCore
class GUPSGeneratorPAR(AbstractProcessor):
def __init__(
self,

View File

@@ -33,21 +33,12 @@ from ...isas import ISA
from ...runtime import get_runtime_isa
from ...utils.override import overrides
from m5.objects import (
BaseMMU,
Port,
BaseCPU,
Process,
)
from m5.objects import BaseMMU, Port, BaseCPU, Process
class SimpleCore(AbstractCore):
def __init__(
self,
cpu_type: CPUTypes,
core_id: int,
isa: Optional[ISA]= None
self, cpu_type: CPUTypes, core_id: int, isa: Optional[ISA] = None
):
super().__init__(cpu_type=cpu_type)
if isa:
@@ -56,9 +47,7 @@ class SimpleCore(AbstractCore):
else:
self._isa = get_runtime_isa()
self.core = AbstractCore.cpu_simobject_factory(
isa=self._isa,
cpu_type=cpu_type,
core_id=core_id
isa=self._isa, cpu_type=cpu_type, core_id=core_id
)
self.core.createThreads()
@@ -102,8 +91,9 @@ class SimpleCore(AbstractCore):
@overrides(AbstractCore)
def connect_interrupt(
self, interrupt_requestor: Optional[Port] = None,
interrupt_responce: Optional[Port] = None
self,
interrupt_requestor: Optional[Port] = None,
interrupt_responce: Optional[Port] = None,
) -> None:
# TODO: This model assumes that we will only create an interrupt

View File

@@ -45,10 +45,7 @@ class SimpleProcessor(AbstractProcessor):
"""
def __init__(
self,
cpu_type: CPUTypes,
num_cores: int,
isa: Optional[ISA] = None,
self, cpu_type: CPUTypes, num_cores: int, isa: Optional[ISA] = None
) -> None:
"""
param cpu_type: The CPU type for each type in the processor.
@@ -63,9 +60,7 @@ class SimpleProcessor(AbstractProcessor):
"""
super().__init__(
cores=self._create_cores(
cpu_type=cpu_type,
num_cores=num_cores,
isa = isa,
cpu_type=cpu_type, num_cores=num_cores, isa=isa
)
)
@@ -76,14 +71,11 @@ class SimpleProcessor(AbstractProcessor):
self.kvm_vm = KvmVM()
def _create_cores(
self,
cpu_type: CPUTypes,
num_cores: int,
isa: Optional[ISA]
self, cpu_type: CPUTypes, num_cores: int, isa: Optional[ISA]
):
return [
SimpleCore(cpu_type=cpu_type, core_id=i, isa=isa,) \
for i in range(num_cores)
SimpleCore(cpu_type=cpu_type, core_id=i, isa=isa)
for i in range(num_cores)
]
@overrides(AbstractProcessor)

View File

@@ -93,8 +93,7 @@ class SimpleSwitchableProcessor(SwitchableProcessor):
}
super().__init__(
switchable_cores=switchable_cores,
starting_cores=self._start_key,
switchable_cores=switchable_cores, starting_cores=self._start_key
)
@overrides(SwitchableProcessor)

View File

@@ -150,8 +150,7 @@ class SwitchableProcessor(AbstractProcessor):
# Switch the CPUs
m5.switchCpus(
self._board,
list(zip(current_core_simobj, to_switch_simobj)),
self._board, list(zip(current_core_simobj, to_switch_simobj))
)
# Ensure the current processor is updated.

View File

@@ -32,6 +32,7 @@ import os
from enum import Enum
from typing import Set
class ISA(Enum):
"""
The ISA Enums which may be used in the gem5 stdlib to specify ISAs.
@@ -46,6 +47,7 @@ class ISA(Enum):
...
```
"""
X86 = "x86"
RISCV = "riscv"
ARM = "arm"
@@ -54,12 +56,14 @@ class ISA(Enum):
SPARC = "sparc"
NULL = "null"
def get_isas_str_set() -> Set[ISA]:
"""
Returns a set of all the ISA as strings.
"""
return {isa.value for isa in ISA}
def get_isa_from_str(input: str) -> ISA:
"""
Will return the correct enum given the input string. This is matched on
@@ -76,7 +80,7 @@ def get_isa_from_str(input: str) -> ISA:
if input.lower() == isa.value:
return isa
valid_isas_str_list =str()
valid_isas_str_list = str()
for isa_str in get_isa_from_str():
valid_isas_str_list += f"{os.linesep}{isa_str}"

View File

@@ -30,8 +30,9 @@ from ...components.processors.cpu_types import CPUTypes
from ...components.boards.x86_board import X86Board
from ...components.memory.single_channel import SingleChannelDDR3_1600
from ...components.processors.simple_processor import SimpleProcessor
from ...components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy \
import MESITwoLevelCacheHierarchy
from ...components.cachehierarchies.ruby.mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
from ...coherence_protocol import CoherenceProtocol
from ...isas import ISA
from ...utils.requires import requires
@@ -70,15 +71,15 @@ class X86DemoBoard(X86Board):
coherence_protocol_required=CoherenceProtocol.MESI_TWO_LEVEL,
)
warn("The X86DemoBoard is solely for demonstration purposes. "
"This board is not known to be be representative of any "
"real-world system. Use with caution.")
warn(
"The X86DemoBoard is solely for demonstration purposes. "
"This board is not known to be be representative of any "
"real-world system. Use with caution."
)
memory = SingleChannelDDR3_1600(size="2GB")
processor = SimpleProcessor(
cpu_type=CPUTypes.TIMING,
isa=ISA.X86,
num_cores=4
cpu_type=CPUTypes.TIMING, isa=ISA.X86, num_cores=4
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="32kB",

View File

@@ -50,15 +50,18 @@ This Python module contains functions used to download, list, and obtain
information about resources from resources.gem5.org.
"""
def _resources_json_version_required() -> str:
"""
Specifies the version of resources.json to obtain.
"""
return "develop"
def _get_resources_json_uri() -> str:
return "https://resources.gem5.org/resources.json"
def _url_validator(url):
try:
result = urllib.parse.urlparse(url)
@@ -66,8 +69,9 @@ def _url_validator(url):
except:
return False
def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict:
'''
"""
Returns a resource JSON, in the form of a Python Dict. The location
of the JSON must be specified.
@@ -78,7 +82,7 @@ def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict:
:param use_caching: True if a cached file is to be used (up to an hour),
otherwise the file will be retrieved from the URL regardless. True by
default. Only valid in cases where a URL is passed.
'''
"""
# If a local valid path is passed, just load it.
if Path(path).is_file():
@@ -116,9 +120,12 @@ def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict:
# time of the file. This is the most portable solution as other ideas,
# like "file creation time", are not always the same concept between
# operating systems.
if not use_caching or not os.path.exists(download_path) or \
(time.time() - os.path.getmtime(download_path)) > 3600:
_download(path, download_path)
if (
not use_caching
or not os.path.exists(download_path)
or (time.time() - os.path.getmtime(download_path)) > 3600
):
_download(path, download_path)
with open(download_path) as f:
file_contents = f.read()
@@ -134,6 +141,7 @@ def _get_resources_json_at_path(path: str, use_caching: bool = True) -> Dict:
return to_return
def _get_resources_json() -> Dict:
"""
Gets the Resources JSON.
@@ -142,7 +150,7 @@ def _get_resources_json() -> Dict:
"""
path = os.getenv("GEM5_RESOURCE_JSON", _get_resources_json_uri())
to_return = _get_resources_json_at_path(path = path)
to_return = _get_resources_json_at_path(path=path)
# If the current version pulled is not correct, look up the
# "previous-versions" field to find the correct one.
@@ -150,17 +158,18 @@ def _get_resources_json() -> Dict:
if to_return["version"] != version:
if version in to_return["previous-versions"].keys():
to_return = _get_resources_json_at_path(
path = to_return["previous-versions"][version]
path=to_return["previous-versions"][version]
)
else:
# This should never happen, but we thrown an exception to explain
# that we can't find the version.
raise Exception(
f"Version '{version}' of resources.json cannot be found."
)
)
return to_return
def _get_url_base() -> str:
"""
Obtains the "url_base" string from the resources.json file.
@@ -217,11 +226,8 @@ def _get_resources(resources_group: Dict) -> Dict[str, Dict]:
return to_return
def _download(
url: str,
download_to: str,
max_attempts: int = 6,
) -> None:
def _download(url: str, download_to: str, max_attempts: int = 6) -> None:
"""
Downloads a file.
@@ -240,7 +246,6 @@ def _download(
# TODO: This whole setup will only work for single files we can get via
# wget. We also need to support git clones going forward.
attempt = 0
while True:
# The loop will be broken on a successful download, via a `return`, or
@@ -268,7 +273,6 @@ def _download(
raise e
def list_resources() -> List[str]:
"""
Lists all available resources by name.
@@ -377,14 +381,16 @@ def get_resource(
else:
raise Exception(
"The resource.json entry for '{}' has a value for the "
"'is_zipped' field which is neither a string or a boolean."
.format(
"'is_zipped' field which is neither a string or a boolean.".format(
resource_name
)
)
run_tar_extract = untar and "is_tar_archive" in resource_json and \
resource_json["is_tar_archive"]
run_tar_extract = (
untar
and "is_tar_archive" in resource_json
and resource_json["is_tar_archive"]
)
tar_extension = ".tar"
if run_tar_extract:
@@ -397,8 +403,7 @@ def get_resource(
# TODO: Might be nice to have some kind of download status bar here.
# TODO: There might be a case where this should be silenced.
print(
"Resource '{}' was not found locally. Downloading to '{}'..."
.format(
"Resource '{}' was not found locally. Downloading to '{}'...".format(
resource_name, download_dest
)
)
@@ -416,7 +421,7 @@ def get_resource(
resource_name, download_dest
)
)
unzip_to = download_dest[:-len(zip_extension)]
unzip_to = download_dest[: -len(zip_extension)]
with gzip.open(download_dest, "rb") as f:
with open(unzip_to, "wb") as o:
shutil.copyfileobj(f, o)
@@ -431,7 +436,7 @@ def get_resource(
f"Unpacking the the resource '{resource_name}' "
f"('{download_dest}')"
)
unpack_to = download_dest[:-len(tar_extension)]
unpack_to = download_dest[: -len(tar_extension)]
with tarfile.open(download_dest) as f:
f.extractall(unpack_to)
os.remove(download_dest)

View File

@@ -28,14 +28,16 @@ from pathlib import Path
import hashlib
from _hashlib import HASH as Hash
def _md5_update_from_file(filename: Path, hash: Hash) -> Hash:
def _md5_update_from_file(filename: Path, hash: Hash) -> Hash:
assert filename.is_file()
with open(str(filename), "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash.update(chunk)
return hash
def _md5_update_from_dir(directory: Path, hash: Hash) -> Hash:
def _md5_update_from_dir(directory: Path, hash: Hash) -> Hash:
assert directory.is_dir()
for path in sorted(directory.iterdir(), key=lambda p: str(p).lower()):
hash.update(path.name.encode())
@@ -45,6 +47,7 @@ def _md5_update_from_dir(directory: Path, hash: Hash) -> Hash:
hash = _md5_update_from_dir(path, hash)
return hash
def md5(path: Path) -> str:
"""
Gets the md5 value of a file or directory. `md5_file` is used if the path
@@ -60,7 +63,8 @@ def md5(path: Path) -> str:
else:
raise Exception(f"Path '{path}' is not a valid file or directory.")
def md5_file(filename: Path) -> str:
def md5_file(filename: Path) -> str:
"""
Gives the md5 hash of a file
@@ -68,6 +72,7 @@ def md5_file(filename: Path) -> str:
"""
return str(_md5_update_from_file(filename, hashlib.md5()).hexdigest())
def md5_dir(directory: Path) -> str:
"""
Gives the md5 value of a directory.

View File

@@ -80,6 +80,7 @@ class CustomResource(AbstractResource):
"""
super().__init__(local_path=local_path, metadata=metadata)
class CustomDiskImageResource(CustomResource):
"""
A custom disk image gem5 resource. It can be used to specify a custom,
@@ -111,6 +112,7 @@ class CustomDiskImageResource(CustomResource):
super().__init__(local_path=local_path, metadata=metadata)
class Resource(AbstractResource):
"""
An official gem5 resources as hosted within our gem5 resources repository
@@ -163,15 +165,14 @@ class Resource(AbstractResource):
to_path = os.path.join(resource_directory, resource_name)
super().__init__(
local_path=to_path,
metadata=get_resources_json_obj(resource_name))
local_path=to_path, metadata=get_resources_json_obj(resource_name)
)
get_resource(
resource_name=resource_name,
to_path=to_path,
download_md5_mismatch=download_md5_mismatch
download_md5_mismatch=download_md5_mismatch,
)
def _get_default_resource_dir(cls) -> str:
"""
Obtain the default gem5 resources directory on the host system. This
@@ -188,14 +189,16 @@ class Resource(AbstractResource):
]
for path in test_list:
if os.path.exists(path): # If the path already exists...
if os.path.isdir(path): # Check to see the path is a directory.
return path # If so, the path is valid and can be used.
else: # If the path does not exist, try to create it.
if os.path.exists(path): # If the path already exists...
if os.path.isdir(
path
): # Check to see the path is a directory.
return path # If so, the path is valid and can be used.
else: # If the path does not exist, try to create it.
try:
os.makedirs(path, exist_ok=False)
return path
except OSError:
continue # If the path cannot be created, then try another.
continue # If the path cannot be created, then try another.
raise Exception("Cannot find a valid location to download resources")

View File

@@ -35,6 +35,7 @@ from .isas import ISA, get_isa_from_str, get_isas_str_set
from .coherence_protocol import CoherenceProtocol
from typing import Set
def get_supported_isas() -> Set[ISA]:
"""
Returns the set of all the ISAs compiled into the current binary.
@@ -51,7 +52,6 @@ def get_supported_isas() -> Set[ISA]:
return supported_isas
def get_runtime_isa() -> ISA:
"""
Returns a single target ISA at runtime.
@@ -68,8 +68,10 @@ def get_runtime_isa() -> ISA:
:returns: The target ISA.
"""
warn("The `get_runtime_isa` function is deprecated. Please migrate away "
"from using this function.")
warn(
"The `get_runtime_isa` function is deprecated. Please migrate away "
"from using this function."
)
if "TARGET_ISA" in buildEnv.keys():
return get_isa_from_str(buildEnv["TARGET_ISA"])
@@ -79,9 +81,12 @@ def get_runtime_isa() -> ISA:
if len(supported_isas) == 1:
return next(iter(supported_isas))
raise Exception("Cannot determine the the runtime ISA. Either the "
"'TARGET_ISA' parameter must be set or the binary only "
"compiled to one ISA.")
raise Exception(
"Cannot determine the the runtime ISA. Either the "
"'TARGET_ISA' parameter must be set or the binary only "
"compiled to one ISA."
)
def get_runtime_coherence_protocol() -> CoherenceProtocol:
"""Gets the cache coherence protocol.

View File

@@ -42,8 +42,8 @@ class ExitEvent(Enum):
SWITCHCPU = "switchcpu" # An exit needed to switch CPU cores.
FAIL = "fail" # An exit because the simulation has failed.
CHECKPOINT = "checkpoint" # An exit to load a checkpoint.
MAX_TICK = "max tick" # An exit due to a maximum tick value being met.
USER_INTERRUPT = ( # An exit due to a user interrupt (e.g., cntr + c)
MAX_TICK = "max tick" # An exit due to a maximum tick value being met.
USER_INTERRUPT = ( # An exit due to a user interrupt (e.g., cntr + c)
"user interupt"
)

View File

@@ -32,10 +32,14 @@ from m5.util import warn
"""
In this package we store generators for simulation exit events.
"""
def defaultBehaviorWarning(type, effect):
warn(
"As no behavior was set by the user, default behavior is being carried"
f" out.\n Type: {type} \n Detail: {effect} \n")
f" out.\n Type: {type} \n Detail: {effect} \n"
)
def default_exit_generator():
"""
@@ -45,7 +49,8 @@ def default_exit_generator():
defaultBehaviorWarning(
"default_exit_generator",
"A default generator for an exit event. It will return True, "
"indicating that the Simulator run loop should exit.")
"indicating that the Simulator run loop should exit.",
)
while True:
yield True
@@ -60,7 +65,8 @@ def default_switch_generator(processor: AbstractProcessor):
"default_switch_generator",
"A default generator for a switch exit event.If the processor is a "
"SwitchableProcessor, this generator will switch it. Otherwise nothing"
" will happen.")
" will happen.",
)
is_switchable = isinstance(processor, SwitchableProcessor)
while True:
if is_switchable:
@@ -77,7 +83,8 @@ def default_workbegin_generator():
defaultBehaviorWarning(
"default_workbegin_generator",
"A default generator for a workbegin exit event. It will reset the "
"simulation statistics.")
"simulation statistics.",
)
while True:
m5.stats.reset()
yield False
@@ -91,7 +98,8 @@ def default_workend_generator():
defaultBehaviorWarning(
"default_workend_generator",
"A default generator for a workend exit event. It will dump the "
"simulation statistics.")
"simulation statistics.",
)
while True:
m5.stats.dump()
yield False

View File

@@ -372,4 +372,3 @@ class Simulator:
will be saved.
"""
m5.checkpoint(str(checkpoint_dir))

View File

@@ -39,14 +39,14 @@ def _get_exception_str(msg: str):
# stated. `inspect.stack()[1]` is the `requires` caller method. One above
# this on the stack, `inspect.stack()[2]` should be where `requires` is
# called.
if inspect.stack()[2].function == '<module>':
if inspect.stack()[2].function == "<module>":
# If the caller is a Python module, we use the filename. This is for
# the case where the `requires` function is called outside of a class.
name = inspect.stack()[2].filename
else:
# Otherwise we assume the `requires` is being called by a class, in
# which case we label the exception message with the class name.
name = inspect.stack()[2].frame.f_locals['self'].__class__.__name__
name = inspect.stack()[2].frame.f_locals["self"].__class__.__name__
return "[{}] {}".format(name, msg)
@@ -93,9 +93,10 @@ def requires(
# why the enum did not compare correctly yielded no results. The following
# code works, even though it is verbose and appears functionally equivalent
# to the original code.
if isa_required != None and isa_required.value not in \
(isa.value for isa in supported_isas):
msg=f"The required ISA is '{isa_required.name}'. Supported ISAs: "
if isa_required != None and isa_required.value not in (
isa.value for isa in supported_isas
):
msg = f"The required ISA is '{isa_required.name}'. Supported ISAs: "
for isa in supported_isas:
msg += f"{os.linesep}{isa.name}"
raise Exception(_get_exception_str(msg=msg))
@@ -108,9 +109,9 @@ def requires(
raise Exception(
_get_exception_str(
msg="The current coherence protocol is "
"'{}'. Required: '{}'".format(
runtime_coherence_protocol.name,
coherence_protocol_required.name,
"'{}'. Required: '{}'".format(
runtime_coherence_protocol.name,
coherence_protocol_required.name,
)
)
)
@@ -120,4 +121,4 @@ def requires(
_get_exception_str(
msg="KVM is required but is unavailable on this system"
)
)
)

View File

@@ -29,6 +29,7 @@ import importlib.abc
import importlib.util
import os
class ByteCodeLoader(importlib.abc.Loader):
def __init__(self, code):
super().__init__()
@@ -37,14 +38,15 @@ class ByteCodeLoader(importlib.abc.Loader):
def exec_module(self, module):
exec(self.code, module.__dict__)
# Simple importer that allows python to import data from a dict of
# code objects. The keys are the module path, and the items are the
# filename and bytecode of the file.
class CodeImporter(object):
def __init__(self):
self.modules = {}
override_var = os.environ.get('M5_OVERRIDE_PY_SOURCE', 'false')
self.override = (override_var.lower() in ('true', 'yes'))
override_var = os.environ.get("M5_OVERRIDE_PY_SOURCE", "false")
self.override = override_var.lower() in ("true", "yes")
def add_module(self, abspath, modpath, code):
if modpath in self.modules:
@@ -59,18 +61,19 @@ class CodeImporter(object):
abspath, code = self.modules[fullname]
if self.override and os.path.exists(abspath):
src = open(abspath, 'r').read()
code = compile(src, abspath, 'exec')
src = open(abspath, "r").read()
code = compile(src, abspath, "exec")
is_package = (os.path.basename(abspath) == '__init__.py')
is_package = os.path.basename(abspath) == "__init__.py"
spec = importlib.util.spec_from_loader(
name=fullname, loader=ByteCodeLoader(code),
is_package=is_package)
name=fullname, loader=ByteCodeLoader(code), is_package=is_package
)
spec.loader_state = self.modules.keys()
return spec
# Create an importer and add it to the meta_path so future imports can
# use it. There's currently nothing in the importer, but calls to
# add_module can be used to add code.
@@ -79,6 +82,7 @@ def install():
global add_module
add_module = importer.add_module
import sys
sys.meta_path.insert(0, importer)
# Injected into this module's namespace by the c++ code that loads it.

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,8 @@ if in_gem5:
from . import objects
from . import params
from . import stats
if defines.buildEnv['USE_SYSTEMC']:
if defines.buildEnv["USE_SYSTEMC"]:
from . import systemc
from . import tlm
from . import util
@@ -54,4 +55,3 @@ if in_gem5:
from .event import *
from .main import main
from .simulate import *

View File

@@ -31,30 +31,37 @@ from _m5.debug import SimpleFlag, CompoundFlag
from _m5.debug import schedBreak, setRemoteGDBPort
from m5.util import printList
def help():
sorted_flags = sorted(flags.items(), key=lambda kv: kv[0])
print("Base Flags:")
for name, flag in filter(lambda kv: isinstance(kv[1], SimpleFlag)
and not kv[1].isFormat, sorted_flags):
for name, flag in filter(
lambda kv: isinstance(kv[1], SimpleFlag) and not kv[1].isFormat,
sorted_flags,
):
print(" %s: %s" % (name, flag.desc))
print()
print("Compound Flags:")
for name, flag in filter(lambda kv: isinstance(kv[1], CompoundFlag),
sorted_flags):
for name, flag in filter(
lambda kv: isinstance(kv[1], CompoundFlag), sorted_flags
):
print(" %s: %s" % (name, flag.desc))
# The list of kids for flag "All" is too long, so it is not printed
if name != "All":
printList([ c.name for c in flag.kids() ], indent=8)
printList([c.name for c in flag.kids()], indent=8)
else:
print(" All Base Flags")
print()
print("Formatting Flags:")
for name, flag in filter(lambda kv: isinstance(kv[1], SimpleFlag)
and kv[1].isFormat, sorted_flags):
for name, flag in filter(
lambda kv: isinstance(kv[1], SimpleFlag) and kv[1].isFormat,
sorted_flags,
):
print(" %s: %s" % (name, flag.desc))
print()
class AllFlags(Mapping):
def __init__(self):
self._version = -1
@@ -98,4 +105,5 @@ class AllFlags(Mapping):
self._update()
return self._dict.items()
flags = AllFlags()

View File

@@ -47,6 +47,7 @@ from _m5.event import getEventQueue, setEventQueue
mainq = None
class EventWrapper(Event):
"""Helper class to wrap callable objects in an Event base class"""
@@ -54,8 +55,9 @@ class EventWrapper(Event):
super().__init__(**kwargs)
if not callable(func):
raise RuntimeError("Can't wrap '%s', object is not callable" % \
str(func))
raise RuntimeError(
"Can't wrap '%s', object is not callable" % str(func)
)
self._func = func
@@ -63,7 +65,7 @@ class EventWrapper(Event):
self._func()
def __str__(self):
return "EventWrapper(%s)" % (str(self._func), )
return "EventWrapper(%s)" % (str(self._func),)
class ProgressEvent(Event):
@@ -74,7 +76,7 @@ class ProgressEvent(Event):
self.eventq.schedule(self, m5.curTick() + self.period)
def __call__(self):
print("Progress! Time now %fs" % (m5.curTick()/1e12))
print("Progress! Time now %fs" % (m5.curTick() / 1e12))
self.eventq.schedule(self, m5.curTick() + self.period)
@@ -83,5 +85,12 @@ def create(func, priority=Event.Default_Pri):
return EventWrapper(func, priority=priority)
__all__ = [ 'Event', 'EventWrapper', 'ProgressEvent', 'SimExit',
'mainq', 'create' ]
__all__ = [
"Event",
"EventWrapper",
"ProgressEvent",
"SimExit",
"mainq",
"create",
]

View File

@@ -34,4 +34,3 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

File diff suppressed because it is too large Load Diff

View File

@@ -33,11 +33,11 @@ from .timeconversion import TimeConversion
from .jsonloader import JsonLoader
__all__ = [
"Group",
"SimStat",
"Statistic",
"TimeConversion",
"StorageType",
"JsonSerializable",
"JsonLoader",
]
"Group",
"SimStat",
"Statistic",
"TimeConversion",
"StorageType",
"JsonSerializable",
"JsonLoader",
]

View File

@@ -25,13 +25,22 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from typing import Callable, Dict, Iterator, List, Mapping, Optional, Pattern,\
Union
from typing import (
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Pattern,
Union,
)
from .jsonserializable import JsonSerializable
from .statistic import Scalar, Statistic
from .timeconversion import TimeConversion
class Group(JsonSerializable):
"""
Used to create the heirarchical stats structure. A Group object contains a
@@ -41,10 +50,14 @@ class Group(JsonSerializable):
type: Optional[str]
time_conversion: Optional[TimeConversion]
def __init__(self, type: Optional[str] = None,
time_conversion: Optional[TimeConversion] = None,
**kwargs: Dict[str, Union["Group",Statistic,List["Group"],
List["Statistic"]]]):
def __init__(
self,
type: Optional[str] = None,
time_conversion: Optional[TimeConversion] = None,
**kwargs: Dict[
str, Union["Group", Statistic, List["Group"], List["Statistic"]]
]
):
if type is None:
self.type = "Group"
else:
@@ -52,11 +65,12 @@ class Group(JsonSerializable):
self.time_conversion = time_conversion
for key,value in kwargs.items():
for key, value in kwargs.items():
setattr(self, key, value)
def children(self, predicate: Optional[Callable[[str], bool]] = None
) -> Iterator[Union["Group", Statistic]]:
def children(
self, predicate: Optional[Callable[[str], bool]] = None
) -> Iterator[Union["Group", Statistic]]:
""" Iterate through all of the children, optionally with a predicate
```
@@ -71,7 +85,8 @@ class Group(JsonSerializable):
"""
for attr in self.__dict__:
# Check the provided predicate. If not a match, skip this child
if predicate and not predicate(attr): continue
if predicate and not predicate(attr):
continue
obj = getattr(self, attr)
if isinstance(obj, Group) or isinstance(obj, Statistic):
yield obj
@@ -100,8 +115,9 @@ class Group(JsonSerializable):
"""
yield from self.children(lambda _name: _name in name)
def find_re(self, regex: Union[str, Pattern]
) -> Iterator[Union["Group", Statistic]]:
def find_re(
self, regex: Union[str, Pattern]
) -> Iterator[Union["Group", Statistic]]:
""" Find all stats that match the name
This function searches all of the "children" in this group. It yields
@@ -124,6 +140,7 @@ class Group(JsonSerializable):
pattern = regex
yield from self.children(lambda _name: bool(pattern.search(_name)))
class Vector(Group):
"""
The Vector class is used to store vector information. However, in gem5
@@ -132,5 +149,6 @@ class Vector(Group):
accordance to decisions made in relation to
https://gem5.atlassian.net/browse/GEM5-867.
"""
def __init__(self, scalar_map: Mapping[str,Scalar]):
def __init__(self, scalar_map: Mapping[str, Scalar]):
super().__init__(type="Vector", time_conversion=None, **scalar_map)

View File

@@ -31,6 +31,7 @@ from .group import Group, Vector
import json
from typing import IO, Union
class JsonLoader(json.JSONDecoder):
"""
Subclass of JSONDecoder that overrides 'object_hook'. Converts JSON object
@@ -49,26 +50,26 @@ class JsonLoader(json.JSONDecoder):
def __init__(self):
super().__init__(self, object_hook=self.__json_to_simstat)
def __json_to_simstat(self, d: dict) -> Union[SimStat,Statistic,Group]:
if 'type' in d:
if d['type'] == 'Scalar':
d.pop('type', None)
def __json_to_simstat(self, d: dict) -> Union[SimStat, Statistic, Group]:
if "type" in d:
if d["type"] == "Scalar":
d.pop("type", None)
return Scalar(**d)
elif d['type'] == 'Distribution':
d.pop('type', None)
elif d["type"] == "Distribution":
d.pop("type", None)
return Distribution(**d)
elif d['type'] == 'Accumulator':
d.pop('type', None)
elif d["type"] == "Accumulator":
d.pop("type", None)
return Accumulator(**d)
elif d['type'] == 'Group':
elif d["type"] == "Group":
return Group(**d)
elif d['type'] == 'Vector':
d.pop('type', None)
d.pop('time_conversion', None)
elif d["type"] == "Vector":
d.pop("type", None)
d.pop("time_conversion", None)
return Vector(d)
else:
@@ -78,6 +79,7 @@ class JsonLoader(json.JSONDecoder):
else:
return SimStat(**d)
def load(json_file: IO) -> SimStat:
"""
Wrapper function that provides a cleaner interface for using the
@@ -95,4 +97,3 @@ def load(json_file: IO) -> SimStat:
simstat_object = json.load(json_file, cls=JsonLoader)
return simstat_object

View File

@@ -30,6 +30,7 @@ from typing import Dict, List, Union, Any, IO
from .storagetype import StorageType
class JsonSerializable:
"""
Classes which inherit from JsonSerializable can be translated into JSON
@@ -61,8 +62,9 @@ class JsonSerializable:
model_dct[key] = new_value
return model_dct
def __process_json_value(self,
value: Any) -> Union[str,int,float,Dict,List,None]:
def __process_json_value(
self, value: Any
) -> Union[str, int, float, Dict, List, None]:
"""
Translate values into a value which can be handled by the Python stdlib
JSON package.
@@ -91,7 +93,6 @@ class JsonSerializable:
return None
def dumps(self, **kwargs) -> str:
"""
This function mirrors the Python stdlib JSON module method
@@ -126,8 +127,8 @@ class JsonSerializable:
"""
# Setting the default indentation to something readable.
if 'indent' not in kwargs:
kwargs['indent'] = 4
if "indent" not in kwargs:
kwargs["indent"] = 4
return json.dumps(obj=self.to_json(), **kwargs)
@@ -161,7 +162,7 @@ class JsonSerializable:
"""
# Setting the default indentation to something readable.
if 'indent' not in kwargs:
kwargs['indent'] = 4
if "indent" not in kwargs:
kwargs["indent"] = 4
json.dump(obj=self.to_json(), fp=fp, **kwargs)
json.dump(obj=self.to_json(), fp=fp, **kwargs)

View File

@@ -32,6 +32,7 @@ from .group import Group
from .statistic import Statistic
from .timeconversion import TimeConversion
class SimStat(JsonSerializable):
"""
Contains all the statistics for a given simulation.
@@ -42,15 +43,18 @@ class SimStat(JsonSerializable):
simulated_begin_time: Optional[Union[int, float]]
simulated_end_time: Optional[Union[int, float]]
def __init__(self, creation_time: Optional[datetime] = None,
time_conversion: Optional[TimeConversion] = None,
simulated_begin_time: Optional[Union[int, float]] = None,
simulated_end_time: Optional[Union[int, float]] = None,
**kwargs: Dict[str, Union[Group,Statistic,List[Group]]]):
def __init__(
self,
creation_time: Optional[datetime] = None,
time_conversion: Optional[TimeConversion] = None,
simulated_begin_time: Optional[Union[int, float]] = None,
simulated_end_time: Optional[Union[int, float]] = None,
**kwargs: Dict[str, Union[Group, Statistic, List[Group]]]
):
self.creation_time = creation_time
self.time_conversion = time_conversion
self.simulated_begin_time = simulated_begin_time
self.simulated_end_time = simulated_end_time
for key,value in kwargs.items():
setattr(self, key, value)
for key, value in kwargs.items():
setattr(self, key, value)

View File

@@ -30,6 +30,7 @@ from typing import Any, Iterable, Optional, Union, List
from .jsonserializable import JsonSerializable
from .storagetype import StorageType
class Statistic(ABC, JsonSerializable):
"""
The abstract base class for all Python statistics.
@@ -41,16 +42,21 @@ class Statistic(ABC, JsonSerializable):
description: Optional[str]
datatype: Optional[StorageType]
def __init__(self, value: Any, type: Optional[str] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None):
def __init__(
self,
value: Any,
type: Optional[str] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None,
):
self.value = value
self.type = type
self.unit = unit
self.description = description
self.datatype = datatype
class Scalar(Statistic):
"""
A scalar Python statistic type.
@@ -58,26 +64,44 @@ class Scalar(Statistic):
value: Union[float, int]
def __init__(self, value: Any,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None):
super().__init__(value=value, type="Scalar", unit=unit,
description=description, datatype=datatype)
def __init__(
self,
value: Any,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None,
):
super().__init__(
value=value,
type="Scalar",
unit=unit,
description=description,
datatype=datatype,
)
class BaseScalarVector(Statistic):
"""
An abstract base class for classes containing a vector of Scalar values.
"""
value: List[Union[int,float]]
def __init__(self, value: Iterable[Union[int,float]],
type: Optional[str] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None):
super().__init__(value=list(value), type=type, unit=unit,
description=description, datatype=datatype)
value: List[Union[int, float]]
def __init__(
self,
value: Iterable[Union[int, float]],
type: Optional[str] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None,
):
super().__init__(
value=list(value),
type=type,
unit=unit,
description=description,
datatype=datatype,
)
def mean(self) -> float:
"""
@@ -88,10 +112,11 @@ class BaseScalarVector(Statistic):
float
The mean value across all bins.
"""
assert(self.value != None)
assert(isinstance(self.value, List))
assert self.value != None
assert isinstance(self.value, List)
from statistics import mean as statistics_mean
return statistics_mean(self.value)
def count(self) -> float:
@@ -103,7 +128,7 @@ class BaseScalarVector(Statistic):
float
The sum of all bin values.
"""
assert(self.value != None)
assert self.value != None
return sum(self.value)
@@ -127,21 +152,29 @@ class Distribution(BaseScalarVector):
overflow: Optional[int]
logs: Optional[float]
def __init__(self, value: Iterable[int],
min: Union[float, int],
max: Union[float, int],
num_bins: int,
bin_size: Union[float, int],
sum: Optional[int] = None,
sum_squared: Optional[int] = None,
underflow: Optional[int] = None,
overflow: Optional[int] = None,
logs: Optional[float] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None):
super().__init__(value=value, type="Distribution", unit=unit,
description=description, datatype=datatype)
def __init__(
self,
value: Iterable[int],
min: Union[float, int],
max: Union[float, int],
num_bins: int,
bin_size: Union[float, int],
sum: Optional[int] = None,
sum_squared: Optional[int] = None,
underflow: Optional[int] = None,
overflow: Optional[int] = None,
logs: Optional[float] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None,
):
super().__init__(
value=value,
type="Distribution",
unit=unit,
description=description,
datatype=datatype,
)
self.min = min
self.max = max
@@ -154,8 +187,9 @@ class Distribution(BaseScalarVector):
self.sum_squared = sum_squared
# These check some basic conditions of a distribution.
assert(self.bin_size >= 0)
assert(self.num_bins >= 1)
assert self.bin_size >= 0
assert self.num_bins >= 1
class Accumulator(BaseScalarVector):
"""
@@ -167,16 +201,24 @@ class Accumulator(BaseScalarVector):
max: Union[int, float]
sum_squared: Optional[int]
def __init__(self, value: Iterable[Union[int,float]],
count: int,
min: Union[int, float],
max: Union[int, float],
sum_squared: Optional[int] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None):
super().__init__(value=value, type="Accumulator", unit=unit,
description=description, datatype=datatype)
def __init__(
self,
value: Iterable[Union[int, float]],
count: int,
min: Union[int, float],
max: Union[int, float],
sum_squared: Optional[int] = None,
unit: Optional[str] = None,
description: Optional[str] = None,
datatype: Optional[StorageType] = None,
):
super().__init__(
value=value,
type="Accumulator",
unit=unit,
description=description,
datatype=datatype,
)
self._count = count
self.min = min

View File

@@ -27,6 +27,7 @@
from enum import Enum
from typing import Dict
class StorageType(Enum):
"""
An enum used to declare what C++ data type was used to store a value.
@@ -34,9 +35,10 @@ class StorageType(Enum):
E.g. 's64' indicates a 64 bit signed integer
"""
u32: str = "u32"
u64: str = "u64"
s32: str = "s32"
s64: str = "s64"
f32: str = "f32"
f64: str = "f64"
f64: str = "f64"

View File

@@ -26,14 +26,16 @@
from typing import Optional
class TimeConversion:
"""
A class for specifying a scale factor necessary to translate a simulation
time measurement (e.g. ticks) into seconds.
"""
scale_factor: float
description: Optional[str]
def __init__(self, scale_factor: float, description: Optional[str] = None):
self.scale_factor = scale_factor
self.description = description
self.description = description

View File

@@ -40,5 +40,5 @@ import inspect
import _m5
for name, module in inspect.getmembers(_m5):
if name.startswith('param_') or name.startswith('enum_'):
if name.startswith("param_") or name.startswith("enum_"):
exec("from _m5.%s import *" % name)

View File

@@ -42,14 +42,17 @@ import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
__all__ = ["options", "arguments", "main"]
usage="%prog [gem5 options] script.py [script options]"
brief_copyright=\
usage = "%prog [gem5 options] script.py [script options]"
brief_copyright = (
"gem5 is copyrighted software; use the --copyright option for details."
)
def _stats_help(option, opt, value, parser):
import m5
print("A stat file can either be specified as a URI or a plain")
print("path. When specified as a path, gem5 uses the default text ")
print("format.")
@@ -67,95 +70,220 @@ def parse_options():
option = options.add_option
group = options.set_group
listener_modes = ( "on", "off", "auto" )
listener_modes = ("on", "off", "auto")
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
option(
"-B",
"--build-info",
action="store_true",
default=False,
help="Show build information",
)
option(
"-C",
"--copyright",
action="store_true",
default=False,
help="Show full copyright information",
)
option(
"-R",
"--readme",
action="store_true",
default=False,
help="Show the readme",
)
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--silent-redirect", action="store_true", default=False,
help="Suppress printing a message when redirecting stdout or stderr")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option("--listener-mode", metavar="{on,off,auto}",
choices=listener_modes, default="auto",
help="Port (e.g., gdb) listener mode (auto: Enable if running " \
"interactively) [Default: %default]")
option("--allow-remote-connections", action="store_true", default=False,
option(
"-d",
"--outdir",
metavar="DIR",
default="m5out",
help="Set the output directory to DIR [Default: %default]",
)
option(
"-r",
"--redirect-stdout",
action="store_true",
default=False,
help="Redirect stdout (& stderr, without -e) to file",
)
option(
"-e",
"--redirect-stderr",
action="store_true",
default=False,
help="Redirect stderr to file",
)
option(
"--silent-redirect",
action="store_true",
default=False,
help="Suppress printing a message when redirecting stdout or stderr",
)
option(
"--stdout-file",
metavar="FILE",
default="simout",
help="Filename for -r redirection [Default: %default]",
)
option(
"--stderr-file",
metavar="FILE",
default="simerr",
help="Filename for -e redirection [Default: %default]",
)
option(
"--listener-mode",
metavar="{on,off,auto}",
choices=listener_modes,
default="auto",
help="Port (e.g., gdb) listener mode (auto: Enable if running "
"interactively) [Default: %default]",
)
option(
"--allow-remote-connections",
action="store_true",
default=False,
help="Port listeners will accept connections from anywhere (0.0.0.0). "
"Default is only localhost.")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
"Default is only localhost.",
)
option(
"-i",
"--interactive",
action="store_true",
default=False,
help="Invoke the interactive interpreter after running the script",
)
option(
"--pdb",
action="store_true",
default=False,
help="Invoke the python debugger before running the script",
)
option(
"-p",
"--path",
metavar="PATH[:PATH]",
action="append",
split=":",
help="Prepend PATH to the system path when invoking the script",
)
option("-q", "--quiet", action="count", default=0, help="Reduce verbosity")
option(
"-v", "--verbose", action="count", default=0, help="Increase verbosity"
)
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
option("--stats-help",
action="callback", callback=_stats_help,
help="Display documentation for available stat visitors")
option(
"--stats-file",
metavar="FILE",
default="stats.txt",
help="Sets the output file for statistics [Default: %default]",
)
option(
"--stats-help",
action="callback",
callback=_stats_help,
help="Display documentation for available stat visitors",
)
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]")
option("--dot-dvfs-config", metavar="FILE", default=None,
help="Create DOT & pdf outputs of the DVFS configuration" + \
" [Default: %default]")
option(
"--dump-config",
metavar="FILE",
default="config.ini",
help="Dump configuration output file [Default: %default]",
)
option(
"--json-config",
metavar="FILE",
default="config.json",
help="Create JSON output of the configuration [Default: %default]",
)
option(
"--dot-config",
metavar="FILE",
default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]",
)
option(
"--dot-dvfs-config",
metavar="FILE",
default=None,
help="Create DOT & pdf outputs of the DVFS configuration"
+ " [Default: %default]",
)
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TICK[,TICK]", action='append', split=',',
help="Create breakpoint(s) at TICK(s) " \
"(kills process if no debugger attached)")
option("--debug-help", action='store_true',
help="Print help on debug flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for debug output (-FLAG disables a flag)")
option("--debug-start", metavar="TICK", type='int',
help="Start debug output at TICK")
option("--debug-end", metavar="TICK", type='int',
help="End debug output at TICK")
option("--debug-file", metavar="FILE", default="cout",
option(
"--debug-break",
metavar="TICK[,TICK]",
action="append",
split=",",
help="Create breakpoint(s) at TICK(s) "
"(kills process if no debugger attached)",
)
option(
"--debug-help", action="store_true", help="Print help on debug flags"
)
option(
"--debug-flags",
metavar="FLAG[,FLAG]",
action="append",
split=",",
help="Sets the flags for debug output (-FLAG disables a flag)",
)
option(
"--debug-start",
metavar="TICK",
type="int",
help="Start debug output at TICK",
)
option(
"--debug-end",
metavar="TICK",
type="int",
help="End debug output at TICK",
)
option(
"--debug-file",
metavar="FILE",
default="cout",
help="Sets the output file for debug. Append '.gz' to the name for it"
" to be compressed automatically [Default: %default]")
option("--debug-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
" to be compressed automatically [Default: %default]",
)
option(
"--debug-ignore",
metavar="EXPR",
action="append",
split=":",
help="Ignore EXPR sim objects",
)
option(
"--remote-gdb-port",
type="int",
default=7000,
help="Remote gdb base port (set to 0 to disable listening)",
)
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
option(
"--list-sim-objects",
action="store_true",
default=False,
help="List all built-in SimObjects, their params and default values",
)
arguments = options.parse_args()
return options,arguments
return options, arguments
def interact(scope):
banner = "gem5 Interactive Console"
@@ -172,8 +300,9 @@ def interact(scope):
cfg = Config()
cfg.PromptManager.in_template = prompt_in1
cfg.PromptManager.out_template = prompt_out
ipshell = InteractiveShellEmbed(config=cfg, user_ns=scope,
banner1=banner)
ipshell = InteractiveShellEmbed(
config=cfg, user_ns=scope, banner1=banner
)
except ImportError:
pass
@@ -193,6 +322,7 @@ def _check_tracing():
fatal("Tracing is not enabled. Compile with TRACING_ON")
def main():
import m5
import _m5.core
@@ -235,29 +365,29 @@ def main():
# Now redirect stdout/stderr as desired
if options.redirect_stdout:
redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
redir_fd = os.open(stdout_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stdout.fileno())
if not options.redirect_stderr:
os.dup2(redir_fd, sys.stderr.fileno())
if options.redirect_stderr:
redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
redir_fd = os.open(stderr_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stderr.fileno())
done = False
if options.build_info:
done = True
print('Build information:')
print("Build information:")
print()
print('gem5 version %s' % defines.gem5Version)
print('compiled %s' % defines.compileDate)
print('build options:')
print("gem5 version %s" % defines.gem5Version)
print("compiled %s" % defines.compileDate)
print("build options:")
keys = list(defines.buildEnv.keys())
keys.sort()
for key in keys:
val = defines.buildEnv[key]
print(' %s = %s' % (key, val))
print(" %s = %s" % (key, val))
print()
if options.copyright:
@@ -267,7 +397,7 @@ def main():
if options.readme:
done = True
print('Readme:')
print("Readme:")
print()
print(info.README)
print()
@@ -279,6 +409,7 @@ def main():
if options.list_sim_objects:
from . import SimObject
done = True
print("SimObjects:")
objects = list(SimObject.allClasses.keys())
@@ -291,13 +422,19 @@ def main():
params.sort()
for pname in params:
param = obj._params[pname]
default = getattr(param, 'default', '')
default = getattr(param, "default", "")
print(terminal_formatter.format_output(pname, indent=8))
if default:
print(terminal_formatter.format_output(
str(default), label="default: ", indent=21))
print(terminal_formatter.format_output(
param.desc, label="desc: ", indent=21))
print(
terminal_formatter.format_output(
str(default), label="default: ", indent=21
)
)
print(
terminal_formatter.format_output(
param.desc, label="desc: ", indent=21
)
)
print()
print()
@@ -317,13 +454,17 @@ def main():
print("gem5 version %s" % _m5.core.gem5Version)
print("gem5 compiled %s" % _m5.core.compileDate)
print("gem5 started %s" %
datetime.datetime.now().strftime("%b %e %Y %X"))
print("gem5 executing on %s, pid %d" %
(socket.gethostname(), os.getpid()))
print(
"gem5 started %s" % datetime.datetime.now().strftime("%b %e %Y %X")
)
print(
"gem5 executing on %s, pid %d"
% (socket.gethostname(), os.getpid())
)
# in Python 3 pipes.quote() is moved to shlex.quote()
import pipes
print("command line:", " ".join(map(pipes.quote, sys.argv)))
print()
@@ -371,7 +512,7 @@ def main():
off_flags = []
for flag in options.debug_flags:
off = False
if flag.startswith('-'):
if flag.startswith("-"):
flag = flag[1:]
off = True
@@ -403,13 +544,12 @@ def main():
trace.ignore(ignore)
sys.argv = arguments
sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
sys.path = [os.path.dirname(sys.argv[0])] + sys.path
filename = sys.argv[0]
filedata = open(filename, 'r').read()
filecode = compile(filedata, filename, 'exec')
scope = { '__file__' : filename,
'__name__' : '__m5_main__' }
filedata = open(filename, "r").read()
filecode = compile(filedata, filename, "exec")
scope = {"__file__": filename, "__name__": "__m5_main__"}
# if pdb was requested, execfile the thing under pdb, otherwise,
# just do the execfile normally
@@ -421,7 +561,7 @@ def main():
try:
pdb.run(filecode, scope)
except SystemExit:
print("The program exited via sys.exit(). Exit status: ", end=' ')
print("The program exited via sys.exit(). Exit status: ", end=" ")
print(sys.exc_info()[1])
except:
traceback.print_exc()
@@ -429,7 +569,7 @@ def main():
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
pdb.interaction(t.tb_frame, t)
else:
exec(filecode, scope)

View File

@@ -24,6 +24,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.SimObject import *
# The ByteOrder enum is defined in params. Expose it here so we can declare it
# to SCons, since there's no normal SimObject file to make it a part of.
from m5.params import ByteOrder

View File

@@ -25,5 +25,5 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
for module in __spec__.loader_state:
if module.startswith('m5.objects.'):
if module.startswith("m5.objects."):
exec("from %s import *" % module)

View File

@@ -29,11 +29,15 @@ import sys
from optparse import *
class nodefault(object): pass
class nodefault(object):
pass
class splitter(object):
def __init__(self, split):
self.split = split
def __call__(self, option, opt_str, value, parser):
values = value.split(self.split)
dest = getattr(parser.values, option.dest)
@@ -42,9 +46,10 @@ class splitter(object):
else:
dest.extend(values)
class OptionParser(dict):
def __init__(self, *args, **kwargs):
kwargs.setdefault('formatter', optparse.TitledHelpFormatter())
kwargs.setdefault("formatter", optparse.TitledHelpFormatter())
self._optparse = optparse.OptionParser(*args, **kwargs)
self._optparse.disable_interspersed_args()
@@ -57,24 +62,24 @@ class OptionParser(dict):
return self._optparse.set_defaults(*args, **kwargs)
def set_group(self, *args, **kwargs):
'''set the current option group'''
"""set the current option group"""
if not args and not kwargs:
self._group = self._optparse
else:
self._group = self._optparse.add_option_group(*args, **kwargs)
def add_option(self, *args, **kwargs):
'''add an option to the current option group, or global none set'''
"""add an option to the current option group, or global none set"""
# if action=split, but allows the option arguments
# themselves to be lists separated by the split variable'''
if kwargs.get('action', None) == 'append' and 'split' in kwargs:
split = kwargs.pop('split')
kwargs['default'] = []
kwargs['type'] = 'string'
kwargs['action'] = 'callback'
kwargs['callback'] = splitter(split)
if kwargs.get("action", None) == "append" and "split" in kwargs:
split = kwargs.pop("split")
kwargs["default"] = []
kwargs["type"] = "string"
kwargs["action"] = "callback"
kwargs["callback"] = splitter(split)
option = self._group.add_option(*args, **kwargs)
dest = option.dest
@@ -84,12 +89,12 @@ class OptionParser(dict):
return option
def bool_option(self, name, default, help):
'''add a boolean option called --name and --no-name.
Display help depending on which is the default'''
"""add a boolean option called --name and --no-name.
Display help depending on which is the default"""
tname = '--%s' % name
fname = '--no-%s' % name
dest = name.replace('-', '_')
tname = "--%s" % name
fname = "--no-%s" % name
dest = name.replace("-", "_")
if default:
thelp = optparse.SUPPRESS_HELP
fhelp = help
@@ -97,15 +102,17 @@ class OptionParser(dict):
thelp = help
fhelp = optparse.SUPPRESS_HELP
topt = self.add_option(tname, action="store_true", default=default,
help=thelp)
fopt = self.add_option(fname, action="store_false", dest=dest,
help=fhelp)
topt = self.add_option(
tname, action="store_true", default=default, help=thelp
)
fopt = self.add_option(
fname, action="store_false", dest=dest, help=fhelp
)
return topt,fopt
return topt, fopt
def __getattr__(self, attr):
if attr.startswith('_'):
if attr.startswith("_"):
return super().__getattribute__(attr)
if attr in self:
@@ -114,10 +121,10 @@ class OptionParser(dict):
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
if attr.startswith('_'):
if attr.startswith("_"):
super().__setattr__(attr, value)
elif attr in self._allopts:
defaults = { attr : value }
defaults = {attr: value}
self.set_defaults(**defaults)
if attr in self:
self[attr] = value
@@ -125,9 +132,9 @@ class OptionParser(dict):
super().__setattr__(attr, value)
def parse_args(self):
opts,args = self._optparse.parse_args()
opts, args = self._optparse.parse_args()
for key,val in opts.__dict__.items():
for key, val in opts.__dict__.items():
if val is not None or key not in self:
self[key] = val
@@ -137,4 +144,3 @@ class OptionParser(dict):
self._optparse.print_help()
if exitcode is not None:
sys.exit(exitcode)

File diff suppressed because it is too large Load Diff

View File

@@ -44,6 +44,7 @@
import copy
class BaseProxy(object):
def __init__(self, search_self, search_up):
self._search_self = search_self
@@ -52,49 +53,52 @@ class BaseProxy(object):
def __str__(self):
if self._search_self and not self._search_up:
s = 'Self'
s = "Self"
elif not self._search_self and self._search_up:
s = 'Parent'
s = "Parent"
else:
s = 'ConfusedProxy'
return s + '.' + self.path()
s = "ConfusedProxy"
return s + "." + self.path()
def __setattr__(self, attr, value):
if not attr.startswith('_'):
if not attr.startswith("_"):
raise AttributeError(
"cannot set attribute '%s' on proxy object" % attr)
"cannot set attribute '%s' on proxy object" % attr
)
super().__setattr__(attr, value)
def _gen_op(operation):
def op(self, operand):
if not (isinstance(operand, (int, float)) or \
isproxy(operand)):
if not (isinstance(operand, (int, float)) or isproxy(operand)):
raise TypeError(
"Proxy operand must be a constant or a proxy to a param")
"Proxy operand must be a constant or a proxy to a param"
)
self._ops.append((operation, operand))
return self
return op
# Support for multiplying proxies by either constants or other proxies
__mul__ = _gen_op(lambda operand_a, operand_b : operand_a * operand_b)
__mul__ = _gen_op(lambda operand_a, operand_b: operand_a * operand_b)
__rmul__ = __mul__
# Support for dividing proxies by either constants or other proxies
__truediv__ = _gen_op(lambda operand_a, operand_b :
operand_a / operand_b)
__floordiv__ = _gen_op(lambda operand_a, operand_b :
operand_a // operand_b)
__truediv__ = _gen_op(lambda operand_a, operand_b: operand_a / operand_b)
__floordiv__ = _gen_op(lambda operand_a, operand_b: operand_a // operand_b)
# Support for dividing constants by proxies
__rtruediv__ = _gen_op(lambda operand_a, operand_b :
operand_b / operand_a.getValue())
__rfloordiv__ = _gen_op(lambda operand_a, operand_b :
operand_b // operand_a.getValue())
__rtruediv__ = _gen_op(
lambda operand_a, operand_b: operand_b / operand_a.getValue()
)
__rfloordiv__ = _gen_op(
lambda operand_a, operand_b: operand_b // operand_a.getValue()
)
# After all the operators and operands have been defined, this function
# should be called to perform the actual operation
def _opcheck(self, result, base):
from . import params
for operation, operand in self._ops:
# Get the operand's value
if isproxy(operand):
@@ -132,8 +136,9 @@ class BaseProxy(object):
if not done:
raise AttributeError(
"Can't resolve proxy '%s' of type '%s' from '%s'" % \
(self.path(), self._pdesc.ptype_str, base.path()))
"Can't resolve proxy '%s' of type '%s' from '%s'"
% (self.path(), self._pdesc.ptype_str, base.path())
)
if isinstance(result, BaseProxy):
if result == self:
@@ -153,6 +158,7 @@ class BaseProxy(object):
# if index is 0 and item is not subscriptable, just
# use item itself (so cpu[0] works on uniprocessors)
return obj
getindex = staticmethod(getindex)
# This method should be called once the proxy is assigned to a
@@ -161,6 +167,7 @@ class BaseProxy(object):
def set_param_desc(self, pdesc):
self._pdesc = pdesc
class AttrProxy(BaseProxy):
def __init__(self, search_self, search_up, attr):
super().__init__(search_self, search_up)
@@ -169,11 +176,12 @@ class AttrProxy(BaseProxy):
def __getattr__(self, attr):
# python uses __bases__ internally for inheritance
if attr.startswith('_'):
if attr.startswith("_"):
return super().__getattr__(self, attr)
if hasattr(self, '_pdesc'):
raise AttributeError("Attribute reference on bound proxy "
f"({self}.{attr})")
if hasattr(self, "_pdesc"):
raise AttributeError(
"Attribute reference on bound proxy " f"({self}.{attr})"
)
# Return a copy of self rather than modifying self in place
# since self could be an indirect reference via a variable or
# parameter
@@ -185,7 +193,7 @@ class AttrProxy(BaseProxy):
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError("Proxy object requires integer index")
if hasattr(self, '_pdesc'):
if hasattr(self, "_pdesc"):
raise AttributeError("Index operation on bound proxy")
new_self = copy.deepcopy(self)
new_self._modifiers.append(key)
@@ -195,8 +203,8 @@ class AttrProxy(BaseProxy):
try:
val = getattr(obj, self._attr)
visited = False
if hasattr(val, '_visited'):
visited = getattr(val, '_visited')
if hasattr(val, "_visited"):
visited = getattr(val, "_visited")
if visited:
return None, False
@@ -217,7 +225,7 @@ class AttrProxy(BaseProxy):
elif isinstance(m, int):
val = val[m]
else:
assert("Item must be string or integer")
assert "Item must be string or integer"
while isproxy(val):
val = val.unproxy(obj)
return val, True
@@ -226,19 +234,21 @@ class AttrProxy(BaseProxy):
p = self._attr
for m in self._modifiers:
if isinstance(m, str):
p += '.%s' % m
p += ".%s" % m
elif isinstance(m, int):
p += '[%d]' % m
p += "[%d]" % m
else:
assert("Item must be string or integer")
assert "Item must be string or integer"
return p
class AnyProxy(BaseProxy):
def find(self, obj):
return obj.find_any(self._pdesc.ptype)
def path(self):
return 'any'
return "any"
# The AllProxy traverses the entire sub-tree (not only the children)
# and adds all objects of a specific type
@@ -247,10 +257,12 @@ class AllProxy(BaseProxy):
return obj.find_all(self._pdesc.ptype)
def path(self):
return 'all'
return "all"
def isproxy(obj):
from . import params
if isinstance(obj, (BaseProxy, params.EthernetAddr)):
return True
elif isinstance(obj, (list, tuple)):
@@ -259,24 +271,26 @@ def isproxy(obj):
return True
return False
class ProxyFactory(object):
def __init__(self, search_self, search_up):
self.search_self = search_self
self.search_up = search_up
def __getattr__(self, attr):
if attr == 'any':
if attr == "any":
return AnyProxy(self.search_self, self.search_up)
elif attr == 'all':
elif attr == "all":
if self.search_up:
assert("Parant.all is not supported")
assert "Parant.all is not supported"
return AllProxy(self.search_self, self.search_up)
else:
return AttrProxy(self.search_self, self.search_up, attr)
# global objects for handling proxies
Parent = ProxyFactory(search_self = False, search_up = True)
Self = ProxyFactory(search_self = True, search_up = False)
Parent = ProxyFactory(search_self=False, search_up=True)
Self = ProxyFactory(search_self=True, search_up=False)
# limit exports on 'from proxy import *'
__all__ = ['Parent', 'Self']
__all__ = ["Parent", "Self"]

View File

@@ -58,11 +58,11 @@ from .util import fatal
from .util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
MaxTick = 2 ** 64 - 1
_drain_manager = _m5.drain.DrainManager.instance()
_instantiated = False # Has m5.instantiate() been called?
_instantiated = False # Has m5.instantiate() been called?
# The final call to instantiate the SimObject graph and initialize the
# system.
@@ -85,13 +85,15 @@ def instantiate(ckpt_dir=None):
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants(): obj.adoptOrphanParams()
for obj in root.descendants():
obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants(): obj.unproxyParams()
for obj in root.descendants():
obj.unproxyParams()
if options.dump_config:
ini_file = open(os.path.join(options.outdir, options.dump_config), 'w')
ini_file = open(os.path.join(options.outdir, options.dump_config), "w")
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
@@ -100,8 +102,10 @@ def instantiate(ckpt_dir=None):
if options.json_config:
try:
import json
json_file = open(
os.path.join(options.outdir, options.json_config), 'w')
os.path.join(options.outdir, options.json_config), "w"
)
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
@@ -116,21 +120,26 @@ def instantiate(ckpt_dir=None):
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants(): obj.createCCObject()
for obj in root.descendants(): obj.connectPorts()
for obj in root.descendants():
obj.createCCObject()
for obj in root.descendants():
obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants(): obj.init()
for obj in root.descendants():
obj.init()
# Do a third pass to initialize statistics
stats._bindStatHierarchy(root)
root.regStats()
# Do a fourth pass to initialize probe points
for obj in root.descendants(): obj.regProbePoints()
for obj in root.descendants():
obj.regProbePoints()
# Do a fifth pass to connect probe listeners
for obj in root.descendants(): obj.regProbeListeners()
for obj in root.descendants():
obj.regProbeListeners()
# We want to generate the DVFS diagram for the system. This can only be
# done once all of the CPP objects have been created and initialised so
@@ -145,15 +154,20 @@ def instantiate(ckpt_dir=None):
if ckpt_dir:
_drain_manager.preCheckpointRestore()
ckpt = _m5.core.getCheckpoint(ckpt_dir)
for obj in root.descendants(): obj.loadState(ckpt)
for obj in root.descendants():
obj.loadState(ckpt)
else:
for obj in root.descendants(): obj.initState()
for obj in root.descendants():
obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
need_startup = True
def simulate(*args, **kwargs):
global need_startup
global _instantiated
@@ -163,7 +177,8 @@ def simulate(*args, **kwargs):
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants(): obj.startup()
for obj in root.descendants():
obj.startup()
need_startup = False
# Python exit handlers happen in reverse order.
@@ -189,6 +204,7 @@ def simulate(*args, **kwargs):
return sim_out
def drain():
"""Drain the simulator in preparation of a checkpoint or memory mode
switch.
@@ -212,7 +228,7 @@ def drain():
# WARNING: if a valid exit event occurs while draining, it
# will not get returned to the user script
exit_event = _m5.event.simulate()
while exit_event.getCause() != 'Finished drain':
while exit_event.getCause() != "Finished drain":
exit_event = simulate()
return False
@@ -224,14 +240,17 @@ def drain():
assert _drain_manager.isDrained(), "Drain state inconsistent"
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
@@ -242,15 +261,19 @@ def checkpoint(dir):
print("Writing checkpoint")
_m5.core.serializeAll(dir)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError("Parameter of type '%s'. Must be type %s or %s." % \
(type(system), objects.Root, objects.System))
raise TypeError(
"Parameter of type '%s'. Must be type %s or %s."
% (type(system), objects.Root, objects.System)
)
if system.getMemoryMode() != mode:
system.setMemoryMode(mode)
else:
print("System already in target mode. Memory mode unchanged.")
def switchCpus(system, cpuList, verbose=True):
"""Switch CPUs in a system.
@@ -283,21 +306,25 @@ def switchCpus(system, cpuList, verbose=True):
raise TypeError("%s is not of type BaseCPU" % new_cpu)
if new_cpu in old_cpu_set:
raise RuntimeError(
"New CPU (%s) is in the list of old CPUs." % (old_cpu,))
"New CPU (%s) is in the list of old CPUs." % (old_cpu,)
)
if not new_cpu.switchedOut():
raise RuntimeError("New CPU (%s) is already active." % (new_cpu,))
if not new_cpu.support_take_over():
raise RuntimeError(
"New CPU (%s) does not support CPU handover." % (old_cpu,))
"New CPU (%s) does not support CPU handover." % (old_cpu,)
)
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError(
"%s and %s require different memory modes." % (new_cpu,
new_cpus[0]))
"%s and %s require different memory modes."
% (new_cpu, new_cpus[0])
)
if old_cpu.switchedOut():
raise RuntimeError("Old CPU (%s) is inactive." % (new_cpu,))
if not old_cpu.support_take_over():
raise RuntimeError(
"Old CPU (%s) does not support CPU handover." % (old_cpu,))
"Old CPU (%s) does not support CPU handover." % (old_cpu,)
)
MemoryMode = params.allEnums["MemoryMode"]
try:
@@ -326,11 +353,15 @@ def switchCpus(system, cpuList, verbose=True):
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
def notifyFork(root):
for obj in root.descendants():
obj.notifyFork()
fork_count = 0
def fork(simout="%(parent)s.f%(fork_seq)i"):
"""Fork the simulator.
@@ -353,6 +384,7 @@ def fork(simout="%(parent)s.f%(fork_seq)i"):
pid of the child process or 0 if running in the child.
"""
from m5 import options
global fork_count
if not _m5.core.listenersDisabled():
@@ -375,16 +407,17 @@ def fork(simout="%(parent)s.f%(fork_seq)i"):
# Setup a new output directory
parent = options.outdir
options.outdir = simout % {
"parent" : parent,
"fork_seq" : fork_count,
"pid" : os.getpid(),
}
"parent": parent,
"fork_seq": fork_count,
"pid": os.getpid(),
}
_m5.core.setOutputDir(options.outdir)
else:
fork_count += 1
return pid
from _m5.core import disableAllListeners, listenersDisabled
from _m5.core import listenersLoopbackOnly
from _m5.core import curTick

View File

@@ -53,12 +53,13 @@ outputList = []
# Dictionary of stat visitor factories populated by the _url_factory
# visitor.
factories = { }
factories = {}
# List of all factories. Contains tuples of (factory, schemes,
# enabled).
all_factories = []
def _url_factory(schemes, enable=True):
"""Wrap a plain Python function with URL parsing helpers
@@ -101,19 +102,23 @@ def _url_factory(schemes, enable=True):
# values into proper Python types.
def parse_value(key, values):
if len(values) == 0 or (len(values) == 1 and not values[0]):
fatal("%s: '%s' doesn't have a value." % (
url.geturl(), key))
fatal(
"%s: '%s' doesn't have a value." % (url.geturl(), key)
)
elif len(values) > 1:
fatal("%s: '%s' has multiple values." % (
url.geturl(), key))
fatal(
"%s: '%s' has multiple values." % (url.geturl(), key)
)
else:
try:
return key, literal_eval(values[0])
except ValueError:
fatal("%s: %s isn't a valid Python literal" \
% (url.geturl(), values[0]))
fatal(
"%s: %s isn't a valid Python literal"
% (url.geturl(), values[0])
)
kwargs = dict([ parse_value(k, v) for k, v in qs.items() ])
kwargs = dict([parse_value(k, v) for k, v in qs.items()])
try:
return func("%s%s" % (url.netloc, url.path), **kwargs)
@@ -128,7 +133,8 @@ def _url_factory(schemes, enable=True):
return decorator
@_url_factory([ None, "", "text", "file", ])
@_url_factory([None, "", "text", "file"])
def _textFactory(fn, desc=True, spaces=True):
"""Output stats in text format.
@@ -147,7 +153,8 @@ def _textFactory(fn, desc=True, spaces=True):
return _m5.stats.initText(fn, desc, spaces)
@_url_factory([ "h5", ], enable=hasattr(_m5.stats, "initHDF5"))
@_url_factory(["h5"], enable=hasattr(_m5.stats, "initHDF5"))
def _hdf5Factory(fn, chunking=10, desc=True, formulas=True):
"""Output stats in HDF5 format.
@@ -183,6 +190,7 @@ def _hdf5Factory(fn, chunking=10, desc=True, formulas=True):
return _m5.stats.initHDF5(fn, chunking, desc, formulas)
@_url_factory(["json"])
def _jsonFactory(fn):
"""Output stats in JSON format.
@@ -194,6 +202,7 @@ def _jsonFactory(fn):
return JsonOutputVistor(fn)
def addStatVisitor(url):
"""Add a stat visitor specified using a URL string
@@ -225,6 +234,7 @@ def addStatVisitor(url):
outputList.append(factory(parsed))
def printStatVisitorTypes():
"""List available stat visitors and their documentation"""
@@ -235,17 +245,19 @@ def printStatVisitorTypes():
print("| %s" % line)
print()
enabled_visitors = [ x for x in all_factories if x[2] ]
enabled_visitors = [x for x in all_factories if x[2]]
for factory, schemes, _ in enabled_visitors:
print("%s:" % ", ".join(filter(lambda x: x is not None, schemes)))
# Try to extract the factory doc string
print_doc(inspect.getdoc(factory))
def initSimStats():
_m5.stats.initSimStats()
_m5.stats.registerPythonStatsHandlers()
def _visit_groups(visitor, root=None):
if root is None:
root = Root.getInstance()
@@ -253,12 +265,15 @@ def _visit_groups(visitor, root=None):
visitor(group)
_visit_groups(visitor, root=group)
def _visit_stats(visitor, root=None):
def for_each_stat(g):
for stat in g.getStats():
visitor(g, stat)
_visit_groups(for_each_stat, root=root)
def _bindStatHierarchy(root):
def _bind_obj(name, obj):
if isNullPointer(obj):
@@ -278,33 +293,39 @@ def _bindStatHierarchy(root):
if isinstance(obj.getCCObject(), _m5.stats.Group):
parent = root
while parent:
if hasattr(parent, 'addStatGroup'):
if hasattr(parent, "addStatGroup"):
parent.addStatGroup(name, obj.getCCObject())
break
parent = parent.get_parent();
parent = parent.get_parent()
_bindStatHierarchy(obj)
for name, obj in root._children.items():
_bind_obj(name, obj)
names = []
stats_dict = {}
stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
"""Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
the package is enabled, no more statistics can be created."""
def check_stat(group, stat):
if not stat.check() or not stat.baseCheck():
fatal("statistic '%s' (%d) was not properly initialized " \
"by a regStats() function\n", stat.name, stat.id)
fatal(
"statistic '%s' (%d) was not properly initialized "
"by a regStats() function\n",
stat.name,
stat.id,
)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
# Legacy stat
global stats_list
stats_list = list(_m5.stats.statsList())
@@ -312,21 +333,21 @@ def enable():
for stat in stats_list:
check_stat(None, stat)
stats_list.sort(key=lambda s: s.name.split('.'))
stats_list.sort(key=lambda s: s.name.split("."))
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
# New stats
_visit_stats(check_stat)
_visit_stats(lambda g, s: s.enable())
_m5.stats.enable();
_m5.stats.enable()
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
"""Prepare all stats for data access. This must be done before
dumping and serialization."""
# Legacy stats
for stat in stats_list:
@@ -335,6 +356,7 @@ def prepare():
# New stats
_visit_stats(lambda g, s: s.prepare())
def _dump_to_visitor(visitor, roots=None):
# New stats
def dump_group(group):
@@ -361,12 +383,14 @@ def _dump_to_visitor(visitor, roots=None):
for stat in stats_list:
stat.visit(visitor)
lastDump = 0
# List[SimObject].
global_dump_roots = []
def dump(roots=None):
'''Dump all statistics data to the registered outputs'''
"""Dump all statistics data to the registered outputs"""
all_roots = []
if roots is not None:
@@ -391,7 +415,7 @@ def dump(roots=None):
# Notify new-style stats group that we are about to dump stats.
sim_root = Root.getInstance()
if sim_root:
sim_root.preDumpStats();
sim_root.preDumpStats()
prepare()
for output in outputList:
@@ -406,8 +430,9 @@ def dump(roots=None):
_dump_to_visitor(output, roots=all_roots)
output.end()
def reset():
'''Reset all statistics to the base state'''
"""Reset all statistics to the base state"""
# call reset stats on all SimObjects
root = Root.getInstance()
@@ -420,14 +445,17 @@ def reset():
_m5.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
flags = attrdict(
{
"none": 0x0000,
"init": 0x0001,
"display": 0x0002,
"total": 0x0010,
"pdf": 0x0020,
"cdf": 0x0040,
"dist": 0x0080,
"nozero": 0x0100,
"nonan": 0x0200,
}
)

View File

@@ -39,11 +39,13 @@ from m5.ext.pystats.simstat import *
from m5.ext.pystats.statistic import *
from m5.ext.pystats.storagetype import *
class JsonOutputVistor():
class JsonOutputVistor:
"""
This is a helper vistor class used to include a JSON output via the stats
API (`src/python/m5/stats/__init__.py`).
"""
file: str
json_args: Dict
@@ -77,10 +79,11 @@ class JsonOutputVistor():
The Root, or List of roots, whose stats are are to be dumped JSON.
"""
with open(self.file, 'w') as fp:
with open(self.file, "w") as fp:
simstat = get_simstat(root=roots, prepare_stats=False)
simstat.dump(fp=fp, **self.json_args)
def get_stats_group(group: _m5.stats.Group) -> Group:
"""
Translates a gem5 Group object into a Python stats Group object. A Python
@@ -113,6 +116,7 @@ def get_stats_group(group: _m5.stats.Group) -> Group:
return Group(**stats_dict)
def __get_statistic(statistic: _m5.stats.Info) -> Optional[Statistic]:
"""
Translates a _m5.stats.Info object into a Statistic object, to process
@@ -130,7 +134,7 @@ def __get_statistic(statistic: _m5.stats.Info) -> Optional[Statistic]:
cannot be translated.
"""
assert(isinstance(statistic, _m5.stats.Info))
assert isinstance(statistic, _m5.stats.Info)
statistic.prepare()
if isinstance(statistic, _m5.stats.ScalarInfo):
@@ -146,6 +150,7 @@ def __get_statistic(statistic: _m5.stats.Info) -> Optional[Statistic]:
return None
def __get_scaler(statistic: _m5.stats.ScalarInfo) -> Scalar:
value = statistic.value
unit = statistic.unit
@@ -154,11 +159,9 @@ def __get_scaler(statistic: _m5.stats.ScalarInfo) -> Scalar:
datatype = StorageType["f64"]
return Scalar(
value=value,
unit=unit,
description=description,
datatype=datatype,
)
value=value, unit=unit, description=description, datatype=datatype
)
def __get_distribution(statistic: _m5.stats.DistInfo) -> Distribution:
unit = statistic.unit
@@ -177,20 +180,21 @@ def __get_distribution(statistic: _m5.stats.DistInfo) -> Distribution:
datatype = StorageType["f64"]
return Distribution(
value=value,
min=min,
max=max,
num_bins=num_bins,
bin_size=bin_size,
sum = sum_val,
sum_squared = sum_squared,
underflow = underflow,
overflow = overflow,
logs = logs,
unit=unit,
description=description,
datatype=datatype,
)
value=value,
min=min,
max=max,
num_bins=num_bins,
bin_size=bin_size,
sum=sum_val,
sum_squared=sum_squared,
underflow=underflow,
overflow=overflow,
logs=logs,
unit=unit,
description=description,
datatype=datatype,
)
def __get_vector(statistic: _m5.stats.VectorInfo) -> Vector:
to_add = dict()
@@ -212,14 +216,12 @@ def __get_vector(statistic: _m5.stats.VectorInfo) -> Vector:
index_string = str(index)
to_add[index_string] = Scalar(
value=value,
unit=unit,
description=description,
datatype=datatype,
)
value=value, unit=unit, description=description, datatype=datatype
)
return Vector(scalar_map=to_add)
def _prepare_stats(group: _m5.stats.Group):
"""
Prepares the statistics for dumping.
@@ -234,8 +236,9 @@ def _prepare_stats(group: _m5.stats.Group):
_prepare_stats(child)
def get_simstat(root: Union[SimObject, List[SimObject]],
prepare_stats: bool = True) -> SimStat:
def get_simstat(
root: Union[SimObject, List[SimObject]], prepare_stats: bool = True
) -> SimStat:
"""
This function will return the SimStat object for a simulation given a
SimObject (typically a Root SimObject), or list of SimObjects. The returned
@@ -262,7 +265,7 @@ def get_simstat(root: Union[SimObject, List[SimObject]],
"""
stats_map = {}
creation_time = datetime.now()
time_converstion = None # TODO https://gem5.atlassian.net/browse/GEM5-846
time_converstion = None # TODO https://gem5.atlassian.net/browse/GEM5-846
final_tick = Root.getInstance().resolveStat("finalTick").value
sim_ticks = Root.getInstance().resolveStat("simTicks").value
simulated_begin_time = int(final_tick - sim_ticks)
@@ -284,16 +287,16 @@ def get_simstat(root: Union[SimObject, List[SimObject]],
_prepare_stats(r)
stats_map[r.get_name()] = get_stats_group(r)
else:
raise TypeError("Object (" + str(r) + ") passed is not a "
"SimObject. " + __name__ + " only processes "
"SimObjects, or a list of SimObjects.")
raise TypeError(
"Object (" + str(r) + ") passed is not a "
"SimObject. " + __name__ + " only processes "
"SimObjects, or a list of SimObjects."
)
return SimStat(
creation_time=creation_time,
time_conversion=time_converstion,
simulated_begin_time=simulated_begin_time,
simulated_end_time=simulated_end_time,
**stats_map,
)
creation_time=creation_time,
time_conversion=time_converstion,
simulated_begin_time=simulated_begin_time,
simulated_end_time=simulated_end_time,
**stats_map,
)

View File

@@ -32,8 +32,10 @@ from m5.util import warn
# fix the global frequency
def fixGlobalFrequency():
import _m5.core
_m5.core.fixClockFrequency()
def setGlobalFrequency(ticksPerSecond):
from m5.util import convert
import _m5.core
@@ -46,12 +48,15 @@ def setGlobalFrequency(ticksPerSecond):
tps = round(convert.anyToFrequency(ticksPerSecond))
else:
raise TypeError(
"wrong type '%s' for ticksPerSecond" % type(ticksPerSecond))
"wrong type '%s' for ticksPerSecond" % type(ticksPerSecond)
)
_m5.core.setClockFrequency(int(tps))
# how big does a rounding error need to be before we warn about it?
frequency_tolerance = 0.001 # 0.1%
def fromSeconds(value):
import _m5.core
@@ -62,7 +67,8 @@ def fromSeconds(value):
# had better be fixed
if not _m5.core.clockFrequencyFixed():
raise AttributeError(
"In order to do conversions, the global frequency must be fixed")
"In order to do conversions, the global frequency must be fixed"
)
if value == 0:
return 0
@@ -71,12 +77,21 @@ def fromSeconds(value):
value *= _m5.core.getClockFrequency()
int_value = int(
decimal.Decimal(value).to_integral_value( decimal.ROUND_HALF_UP))
decimal.Decimal(value).to_integral_value(decimal.ROUND_HALF_UP)
)
err = (value - int_value) / value
if err > frequency_tolerance:
warn("rounding error > tolerance\n %f rounded to %d", value,
int_value)
warn(
"rounding error > tolerance\n %f rounded to %d",
value,
int_value,
)
return int_value
__all__ = [ 'setGlobalFrequency', 'fixGlobalFrequency', 'fromSeconds',
'frequency_tolerance' ]
__all__ = [
"setGlobalFrequency",
"fixGlobalFrequency",
"fromSeconds",
"frequency_tolerance",
]

View File

@@ -52,65 +52,77 @@ from .multidict import multidict
# ever happen regardless of what the user does (i.e., an acutal m5
# bug).
def panic(fmt, *args):
print('panic:', fmt % args, file=sys.stderr)
print("panic:", fmt % args, file=sys.stderr)
sys.exit(1)
# fatal() should be called when the simulation cannot continue due to
# some condition that is the user's fault (bad configuration, invalid
# arguments, etc.) and not a simulator bug.
def fatal(fmt, *args):
print('fatal:', fmt % args, file=sys.stderr)
print("fatal:", fmt % args, file=sys.stderr)
sys.exit(1)
# warn() should be called when the user should be warned about some condition
# that may or may not be the user's fault, but that they should be made aware
# of as it may affect the simulation or results.
def warn(fmt, *args):
print('warn:', fmt % args, file=sys.stderr)
print("warn:", fmt % args, file=sys.stderr)
# inform() should be called when the user should be informed about some
# condition that they may be interested in.
def inform(fmt, *args):
print('info:', fmt % args, file=sys.stdout)
print("info:", fmt % args, file=sys.stdout)
def callOnce(func):
"""Decorator that enables to run a given function only once. Subsequent
calls are discarded."""
@wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return func(*args, **kwargs)
wrapper.has_run = False
return wrapper
def deprecated(replacement=None, logger=warn):
"""This decorator warns the user about a deprecated function."""
def decorator(func):
@callOnce
def notifyDeprecation():
try:
func_name = lambda f: f.__module__ + '.' + f.__qualname__
message = f'Function {func_name(func)} is deprecated.'
func_name = lambda f: f.__module__ + "." + f.__qualname__
message = f"Function {func_name(func)} is deprecated."
if replacement:
message += f' Prefer {func_name(replacement)} instead.'
message += f" Prefer {func_name(replacement)} instead."
except AttributeError:
message = f'Function {func} is deprecated.'
message = f"Function {func} is deprecated."
if replacement:
message += f' Prefer {replacement} instead.'
message += f" Prefer {replacement} instead."
logger(message)
notifyDeprecation()
return func
return decorator
class Singleton(type):
def __call__(cls, *args, **kwargs):
if hasattr(cls, '_instance'):
if hasattr(cls, "_instance"):
return cls._instance
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
def addToPath(path):
"""Prepend given directory to system module search path. We may not
need this anymore if we can structure our config library more like a
@@ -125,6 +137,7 @@ def addToPath(path):
# so place the new dir right after that.
sys.path.insert(1, path)
def repoPath():
"""
Return the abspath of the gem5 repository.
@@ -132,15 +145,15 @@ def repoPath():
<gem5-repo>/build/<ISA>/gem5.[opt,debug...]
"""
return os.path.dirname(
os.path.dirname(
os.path.dirname(sys.executable)))
return os.path.dirname(os.path.dirname(os.path.dirname(sys.executable)))
# Apply method to object.
# applyMethod(obj, 'meth', <args>) is equivalent to obj.meth(<args>)
def applyMethod(obj, meth, *args, **kwargs):
return getattr(obj, meth)(*args, **kwargs)
# If the first argument is an (non-sequence) object, apply the named
# method with the given arguments. If the first argument is a
# sequence, apply the method to each element of the sequence (a la
@@ -151,6 +164,7 @@ def applyOrMap(objOrSeq, meth, *args, **kwargs):
else:
return [applyMethod(o, meth, *args, **kwargs) for o in objOrSeq]
def crossproduct(items):
if len(items) == 1:
for i in items[0]:
@@ -160,6 +174,7 @@ def crossproduct(items):
for j in crossproduct(items[1:]):
yield (i,) + j
def flatten(items):
while items:
item = items.pop(0)
@@ -168,25 +183,28 @@ def flatten(items):
else:
yield item
# force scalars to one-element lists for uniformity
def makeList(objOrList):
if isinstance(objOrList, list):
return objOrList
return [objOrList]
def printList(items, indent=4):
line = ' ' * indent
for i,item in enumerate(items):
line = " " * indent
for i, item in enumerate(items):
if len(line) + len(item) > 76:
print(line)
line = ' ' * indent
line = " " * indent
if i < len(items) - 1:
line += '%s, ' % item
line += "%s, " % item
else:
line += item
print(line)
def isInteractive():
"""Check if the simulator is run interactively or in a batch environment"""

View File

@@ -24,17 +24,19 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__all__ = [ 'attrdict', 'multiattrdict', 'optiondict' ]
__all__ = ["attrdict", "multiattrdict", "optiondict"]
class attrdict(dict):
"""Wrap dict, so you can use attribute access to get/set elements"""
def __getattr__(self, attr):
if attr in self:
return self.__getitem__(attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
if attr in dir(self) or attr.startswith('_'):
if attr in dir(self) or attr.startswith("_"):
return super().__setattr__(attr, value)
return self.__setitem__(attr, value)
@@ -49,40 +51,45 @@ class attrdict(dict):
def __setstate__(self, state):
self.update(state)
class multiattrdict(attrdict):
"""Wrap attrdict so that nested attribute accesses automatically create
nested dictionaries."""
def __getattr__(self, attr):
try:
return super().__getattr__(attr)
except AttributeError:
if attr.startswith('_'):
if attr.startswith("_"):
raise
d = multiattrdict()
setattr(self, attr, d)
return d
class optiondict(attrdict):
"""Modify attrdict so that a missing attribute just returns None"""
def __getattr__(self, attr):
try:
return super().__getattr__(attr)
except AttributeError:
return None
if __name__ == '__main__':
if __name__ == "__main__":
x = attrdict()
x.y = 1
x['z'] = 2
print(x['y'], x.y)
print(x['z'], x.z)
x["z"] = 2
print(x["y"], x.y)
print(x["z"], x.z)
print(dir(x))
print(x)
print()
del x['y']
del x["y"]
del x.z
print(dir(x))
print(x)

View File

@@ -38,10 +38,10 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# metric prefixes
atto = 1.0e-18
atto = 1.0e-18
femto = 1.0e-15
pico = 1.0e-12
nano = 1.0e-9
pico = 1.0e-12
nano = 1.0e-9
micro = 1.0e-6
milli = 1.0e-3
@@ -50,7 +50,7 @@ mega = 1.0e6
giga = 1.0e9
tera = 1.0e12
peta = 1.0e15
exa = 1.0e18
exa = 1.0e18
# power of 2 prefixes
kibi = 1024
@@ -61,47 +61,49 @@ pebi = tebi * 1024
exbi = pebi * 1024
metric_prefixes = {
'Ei': exbi,
'E': exa,
'Pi': pebi,
'P': peta,
'Ti': tebi,
'T': tera,
'Gi': gibi,
'G': giga,
'M': mega,
'Ki': kibi,
'k': kilo,
'Mi': mebi,
'm': milli,
'u': micro,
'n': nano,
'p': pico,
'f': femto,
'a': atto,
"Ei": exbi,
"E": exa,
"Pi": pebi,
"P": peta,
"Ti": tebi,
"T": tera,
"Gi": gibi,
"G": giga,
"M": mega,
"Ki": kibi,
"k": kilo,
"Mi": mebi,
"m": milli,
"u": micro,
"n": nano,
"p": pico,
"f": femto,
"a": atto,
}
binary_prefixes = {
'Ei': exbi,
'E' : exbi,
'Pi': pebi,
'P' : pebi,
'Ti': tebi,
'T' : tebi,
'Gi': gibi,
'G' : gibi,
'Mi': mebi,
'M' : mebi,
'Ki': kibi,
'k' : kibi,
"Ei": exbi,
"E": exbi,
"Pi": pebi,
"P": pebi,
"Ti": tebi,
"T": tebi,
"Gi": gibi,
"G": gibi,
"Mi": mebi,
"M": mebi,
"Ki": kibi,
"k": kibi,
}
def assertStr(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
def _split_suffix(value, suffixes):
'''Split a string based on a suffix from a list of suffixes.
"""Split a string based on a suffix from a list of suffixes.
:param value: String value to test for a matching suffix.
:param suffixes: Container of suffixes to test.
@@ -109,16 +111,15 @@ def _split_suffix(value, suffixes):
:returns: A tuple of (value, suffix). Suffix is the empty string
if there is no match.
'''
matches = [ sfx for sfx in suffixes if value.endswith(sfx) ]
"""
matches = [sfx for sfx in suffixes if value.endswith(sfx)]
assert len(matches) <= 1
return (value[:-len(matches[0])], matches[0]) if matches \
else (value, '')
return (value[: -len(matches[0])], matches[0]) if matches else (value, "")
def toNum(value, target_type, units, prefixes, converter):
'''Convert a string using units and prefixes to (typically) a float or
"""Convert a string using units and prefixes to (typically) a float or
integer.
String values are assumed to either be a naked magnitude without a
@@ -133,7 +134,7 @@ def toNum(value, target_type, units, prefixes, converter):
:returns: Tuple of (converted value, unit)
'''
"""
assertStr(value)
def convert(val):
@@ -141,7 +142,8 @@ def toNum(value, target_type, units, prefixes, converter):
return converter(val)
except ValueError:
raise ValueError(
"cannot convert '%s' to %s" % (value, target_type))
"cannot convert '%s' to %s" % (value, target_type)
)
# Units can be None, the empty string, or a list/tuple. Convert
# to a tuple for consistent handling.
@@ -159,56 +161,67 @@ def toNum(value, target_type, units, prefixes, converter):
magnitude, prefix = _split_suffix(magnitude_prefix, prefixes)
scale = prefixes[prefix] if prefix else 1
else:
magnitude, prefix, scale = magnitude_prefix, '', 1
magnitude, prefix, scale = magnitude_prefix, "", 1
return convert(magnitude) * scale, unit
def toFloat(value, target_type='float', units=None, prefixes=[]):
def toFloat(value, target_type="float", units=None, prefixes=[]):
return toNum(value, target_type, units, prefixes, float)[0]
def toMetricFloat(value, target_type='float', units=None):
def toMetricFloat(value, target_type="float", units=None):
return toFloat(value, target_type, units, metric_prefixes)
def toBinaryFloat(value, target_type='float', units=None):
def toBinaryFloat(value, target_type="float", units=None):
return toFloat(value, target_type, units, binary_prefixes)
def toInteger(value, target_type='integer', units=None, prefixes=[]):
return toNum(value, target_type, units, prefixes,
lambda x: int(x, 0))[0]
def toMetricInteger(value, target_type='integer', units=None):
def toInteger(value, target_type="integer", units=None, prefixes=[]):
return toNum(value, target_type, units, prefixes, lambda x: int(x, 0))[0]
def toMetricInteger(value, target_type="integer", units=None):
return toInteger(value, target_type, units, metric_prefixes)
def toBinaryInteger(value, target_type='integer', units=None):
def toBinaryInteger(value, target_type="integer", units=None):
return toInteger(value, target_type, units, binary_prefixes)
def toBool(value):
assertStr(value)
value = value.lower()
if value in ('true', 't', 'yes', 'y', '1'):
if value in ("true", "t", "yes", "y", "1"):
return True
if value in ('false', 'f', 'no', 'n', '0'):
if value in ("false", "f", "no", "n", "0"):
return False
raise ValueError("cannot convert '%s' to bool" % value)
def toFrequency(value):
return toMetricFloat(value, 'frequency', 'Hz')
return toMetricFloat(value, "frequency", "Hz")
def toLatency(value):
return toMetricFloat(value, 'latency', 's')
return toMetricFloat(value, "latency", "s")
def anyToLatency(value):
"""Convert a magnitude and unit to a clock period."""
magnitude, unit = toNum(value,
target_type='latency',
units=('Hz', 's'),
prefixes=metric_prefixes,
converter=float)
if unit == 's':
magnitude, unit = toNum(
value,
target_type="latency",
units=("Hz", "s"),
prefixes=metric_prefixes,
converter=float,
)
if unit == "s":
return magnitude
elif unit == 'Hz':
elif unit == "Hz":
try:
return 1.0 / magnitude
except ZeroDivisionError:
@@ -216,17 +229,20 @@ def anyToLatency(value):
else:
raise ValueError(f"'{value}' needs a valid unit to be unambiguous.")
def anyToFrequency(value):
"""Convert a magnitude and unit to a clock frequency."""
magnitude, unit = toNum(value,
target_type='frequency',
units=('Hz', 's'),
prefixes=metric_prefixes,
converter=float)
if unit == 'Hz':
magnitude, unit = toNum(
value,
target_type="frequency",
units=("Hz", "s"),
prefixes=metric_prefixes,
converter=float,
)
if unit == "Hz":
return magnitude
elif unit == 's':
elif unit == "s":
try:
return 1.0 / magnitude
except ZeroDivisionError:
@@ -234,40 +250,49 @@ def anyToFrequency(value):
else:
raise ValueError(f"'{value}' needs a valid unit to be unambiguous.")
def toNetworkBandwidth(value):
return toMetricFloat(value, 'network bandwidth', 'bps')
return toMetricFloat(value, "network bandwidth", "bps")
def toMemoryBandwidth(value):
return toBinaryFloat(value, 'memory bandwidth', 'B/s')
return toBinaryFloat(value, "memory bandwidth", "B/s")
def toMemorySize(value):
return toBinaryInteger(value, 'memory size', 'B')
return toBinaryInteger(value, "memory size", "B")
def toIpAddress(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
bytes = value.split('.')
bytes = value.split(".")
if len(bytes) != 4:
raise ValueError('invalid ip address %s' % value)
raise ValueError("invalid ip address %s" % value)
for byte in bytes:
if not 0 <= int(byte) <= 0xff:
raise ValueError('invalid ip address %s' % value)
if not 0 <= int(byte) <= 0xFF:
raise ValueError("invalid ip address %s" % value)
return (
(int(bytes[0]) << 24)
| (int(bytes[1]) << 16)
| (int(bytes[2]) << 8)
| (int(bytes[3]) << 0)
)
return (int(bytes[0]) << 24) | (int(bytes[1]) << 16) | \
(int(bytes[2]) << 8) | (int(bytes[3]) << 0)
def toIpNetmask(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
(ip, netmask) = value.split('/')
(ip, netmask) = value.split("/")
ip = toIpAddress(ip)
netmaskParts = netmask.split('.')
netmaskParts = netmask.split(".")
if len(netmaskParts) == 1:
if not 0 <= int(netmask) <= 32:
raise ValueError('invalid netmask %s' % netmask)
raise ValueError("invalid netmask %s" % netmask)
return (ip, int(netmask))
elif len(netmaskParts) == 4:
netmaskNum = toIpAddress(netmask)
@@ -275,45 +300,52 @@ def toIpNetmask(value):
return (ip, 0)
testVal = 0
for i in range(32):
testVal |= (1 << (31 - i))
testVal |= 1 << (31 - i)
if testVal == netmaskNum:
return (ip, i + 1)
raise ValueError('invalid netmask %s' % netmask)
raise ValueError("invalid netmask %s" % netmask)
else:
raise ValueError('invalid netmask %s' % netmask)
raise ValueError("invalid netmask %s" % netmask)
def toIpWithPort(value):
if not isinstance(value, str):
raise TypeError("wrong type '%s' should be str" % type(value))
(ip, port) = value.split(':')
(ip, port) = value.split(":")
ip = toIpAddress(ip)
if not 0 <= int(port) <= 0xffff:
raise ValueError('invalid port %s' % port)
if not 0 <= int(port) <= 0xFFFF:
raise ValueError("invalid port %s" % port)
return (ip, int(port))
def toVoltage(value):
return toMetricFloat(value, 'voltage', 'V')
return toMetricFloat(value, "voltage", "V")
def toCurrent(value):
return toMetricFloat(value, 'current', 'A')
return toMetricFloat(value, "current", "A")
def toEnergy(value):
return toMetricFloat(value, 'energy', 'J')
return toMetricFloat(value, "energy", "J")
def toTemperature(value):
"""Convert a string value specified to a temperature in Kelvin"""
magnitude, unit = toNum(value,
target_type='temperature',
units=('K', 'C', 'F'),
prefixes=metric_prefixes,
converter=float)
if unit == 'K':
magnitude, unit = toNum(
value,
target_type="temperature",
units=("K", "C", "F"),
prefixes=metric_prefixes,
converter=float,
)
if unit == "K":
kelvin = magnitude
elif unit == 'C':
elif unit == "C":
kelvin = magnitude + 273.15
elif unit == 'F':
elif unit == "F":
kelvin = (magnitude + 459.67) / 1.8
else:
raise ValueError(f"'{value}' needs a valid temperature unit.")

View File

@@ -57,11 +57,13 @@ import m5, os, re
from m5.SimObject import isRoot, isSimObjectVector
from m5.params import PortRef, isNullPointer
from m5.util import warn
try:
import pydot
except:
pydot = False
def simnode_children(simNode):
for child in simNode._children.values():
if isNullPointer(child):
@@ -73,15 +75,16 @@ def simnode_children(simNode):
else:
yield child
# need to create all nodes (components) before creating edges (memory channels)
def dot_create_nodes(simNode, callgraph):
if isRoot(simNode):
label = "root"
else:
label = simNode._name
full_path = re.sub('\.', '_', simNode.path())
full_path = re.sub("\.", "_", simNode.path())
# add class name under the label
label = "\"" + label + " \\n: " + simNode.__class__.__name__ + "\""
label = '"' + label + " \\n: " + simNode.__class__.__name__ + '"'
# each component is a sub-graph (cluster)
cluster = dot_create_cluster(simNode, full_path, label)
@@ -100,12 +103,13 @@ def dot_create_nodes(simNode, callgraph):
callgraph.add_subgraph(cluster)
# create all edges according to memory hierarchy
def dot_create_edges(simNode, callgraph):
for port_name in simNode._ports.keys():
port = simNode._port_refs.get(port_name, None)
if port != None:
full_path = re.sub('\.', '_', simNode.path())
full_path = re.sub("\.", "_", simNode.path())
full_port_name = full_path + "_" + port_name
port_node = dot_create_node(simNode, full_port_name, port_name)
# create edges
@@ -121,24 +125,25 @@ def dot_create_edges(simNode, callgraph):
for child in simnode_children(simNode):
dot_create_edges(child, callgraph)
def dot_add_edge(simNode, callgraph, full_port_name, port):
peer = port.peer
full_peer_path = re.sub('\.', '_', peer.simobj.path())
full_peer_path = re.sub("\.", "_", peer.simobj.path())
full_peer_port_name = full_peer_path + "_" + peer.name
# Each edge is encountered twice, once for each peer. We only want one
# edge, so we'll arbitrarily chose which peer "wins" based on their names.
if full_peer_port_name < full_port_name:
dir_type = {
(False, False) : 'both',
(True, False) : 'forward',
(False, True) : 'back',
(True, True) : 'none'
}[ (port.is_source,
peer.is_source) ]
(False, False): "both",
(True, False): "forward",
(False, True): "back",
(True, True): "none",
}[(port.is_source, peer.is_source)]
edge = pydot.Edge(full_port_name, full_peer_port_name, dir=dir_type)
callgraph.add_edge(edge)
def dot_create_cluster(simNode, full_path, label):
# get the parameter values of the node and use them as a tooltip
ini_strings = []
@@ -146,42 +151,45 @@ def dot_create_cluster(simNode, full_path, label):
value = simNode._values.get(param)
if value != None:
# parameter name = value in HTML friendly format
ini_strings.append(str(param) + "&#61;" +
simNode._values[param].ini_str())
ini_strings.append(
str(param) + "&#61;" + simNode._values[param].ini_str()
)
# join all the parameters with an HTML newline
# Pydot limit line length to 16384.
# Account for the quotes added later around the tooltip string
tooltip = "&#10;\\".join(ini_strings)
max_tooltip_length = 16384 - 2
if len(tooltip) > max_tooltip_length:
truncated = '... (truncated)'
tooltip = tooltip[:max_tooltip_length-len(truncated)] + truncated
truncated = "... (truncated)"
tooltip = tooltip[: max_tooltip_length - len(truncated)] + truncated
return pydot.Cluster(
full_path,
shape="box",
label=label,
tooltip='"' + tooltip + '"',
style='"rounded, filled"',
color="#000000",
fillcolor=dot_gen_colour(simNode),
fontname="Arial",
fontsize="14",
fontcolor="#000000",
)
return pydot.Cluster( \
full_path, \
shape = "box", \
label = label, \
tooltip = "\"" + tooltip + "\"", \
style = "\"rounded, filled\"", \
color = "#000000", \
fillcolor = dot_gen_colour(simNode), \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
def dot_create_node(simNode, full_path, label):
return pydot.Node( \
full_path, \
shape = "box", \
label = label, \
style = "\"rounded, filled\"", \
color = "#000000", \
fillcolor = dot_gen_colour(simNode, True), \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
return pydot.Node(
full_path,
shape="box",
label=label,
style='"rounded, filled"',
color="#000000",
fillcolor=dot_gen_colour(simNode, True),
fontname="Arial",
fontsize="14",
fontcolor="#000000",
)
# an enumerator for different kinds of node types, at the moment we
# discern the majority of node types, with the caches being the
@@ -194,17 +202,20 @@ class NodeType:
DEV = 4
OTHER = 5
# based on the sim object, determine the node type
def get_node_type(simNode):
if isinstance(simNode, m5.objects.System):
return NodeType.SYS
# NULL ISA has no BaseCPU or PioDevice, so check if these names
# exists before using them
elif 'BaseCPU' in dir(m5.objects) and \
isinstance(simNode, m5.objects.BaseCPU):
elif "BaseCPU" in dir(m5.objects) and isinstance(
simNode, m5.objects.BaseCPU
):
return NodeType.CPU
elif 'PioDevice' in dir(m5.objects) and \
isinstance(simNode, m5.objects.PioDevice):
elif "PioDevice" in dir(m5.objects) and isinstance(
simNode, m5.objects.PioDevice
):
return NodeType.DEV
elif isinstance(simNode, m5.objects.BaseXBar):
return NodeType.XBAR
@@ -213,6 +224,7 @@ def get_node_type(simNode):
else:
return NodeType.OTHER
# based on the node type, determine the colour as an RGB tuple, the
# palette is rather arbitrary at this point (some coherent natural
# tones), and someone that feels artistic should probably have a look
@@ -231,9 +243,10 @@ def get_type_colour(nodeType):
# use a relatively gray shade
return (186, 182, 174)
# generate colour for a node, either corresponding to a sim object or a
# port
def dot_gen_colour(simNode, isPort = False):
def dot_gen_colour(simNode, isPort=False):
# determine the type of the current node, and also its parent, if
# the node is not the same type as the parent then we use the base
# colour for its type
@@ -269,35 +282,38 @@ def dot_gen_colour(simNode, isPort = False):
return dot_rgb_to_html(r, g, b)
def dot_rgb_to_html(r, g, b):
return "#%.2x%.2x%.2x" % (int(r), int(g), int(b))
# We need to create all of the clock domains. We abuse the alpha channel to get
# the correct domain colouring.
def dot_add_clk_domain(c_dom, v_dom):
label = "\"" + str(c_dom) + "\ :\ " + str(v_dom) + "\""
label = re.sub('\.', '_', str(label))
full_path = re.sub('\.', '_', str(c_dom))
return pydot.Cluster( \
full_path, \
shape = "box", \
label = label, \
style = "\"rounded, filled, dashed\"", \
color = "#000000", \
fillcolor = "#AFC8AF8F", \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
label = '"' + str(c_dom) + "\ :\ " + str(v_dom) + '"'
label = re.sub("\.", "_", str(label))
full_path = re.sub("\.", "_", str(c_dom))
return pydot.Cluster(
full_path,
shape="box",
label=label,
style='"rounded, filled, dashed"',
color="#000000",
fillcolor="#AFC8AF8F",
fontname="Arial",
fontsize="14",
fontcolor="#000000",
)
def dot_create_dvfs_nodes(simNode, callgraph, domain=None):
if isRoot(simNode):
label = "root"
else:
label = simNode._name
full_path = re.sub('\.', '_', simNode.path())
full_path = re.sub("\.", "_", simNode.path())
# add class name under the label
label = "\"" + label + " \\n: " + simNode.__class__.__name__ + "\""
label = '"' + label + " \\n: " + simNode.__class__.__name__ + '"'
# each component is a sub-graph (cluster)
cluster = dot_create_cluster(simNode, full_path, label)
@@ -316,12 +332,12 @@ def dot_create_dvfs_nodes(simNode, callgraph, domain=None):
# recurse to children
for child in simnode_children(simNode):
try:
c_dom = child.__getattr__('clk_domain')
v_dom = c_dom.__getattr__('voltage_domain')
c_dom = child.__getattr__("clk_domain")
v_dom = c_dom.__getattr__("voltage_domain")
except AttributeError:
# Just re-use the domain from above
c_dom = domain
v_dom = c_dom.__getattr__('voltage_domain')
v_dom = c_dom.__getattr__("voltage_domain")
pass
if c_dom == domain or c_dom == None:
@@ -339,16 +355,19 @@ def dot_create_dvfs_nodes(simNode, callgraph, domain=None):
callgraph.add_subgraph(cluster)
def do_dot(root, outdir, dotFilename):
if not pydot:
warn("No dot file generated. " +
"Please install pydot to generate the dot file and pdf.")
warn(
"No dot file generated. "
+ "Please install pydot to generate the dot file and pdf."
)
return
# * use ranksep > 1.0 for for vertical separation between nodes
# especially useful if you need to annotate edges using e.g. visio
# which accepts svg format
# * no need for hoizontal separation as nothing moves horizonally
callgraph = pydot.Dot(graph_type='digraph', ranksep='1.3')
callgraph = pydot.Dot(graph_type="digraph", ranksep="1.3")
dot_create_nodes(root, callgraph)
dot_create_edges(root, callgraph)
dot_filename = os.path.join(outdir, dotFilename)
@@ -361,16 +380,19 @@ def do_dot(root, outdir, dotFilename):
except:
warn("failed to generate dot output from %s", dot_filename)
def do_dvfs_dot(root, outdir, dotFilename):
if not pydot:
warn("No dot file generated. " +
"Please install pydot to generate the dot file and pdf.")
warn(
"No dot file generated. "
+ "Please install pydot to generate the dot file and pdf."
)
return
# There is a chance that we are unable to resolve the clock or
# voltage domains. If so, we fail silently.
try:
dvfsgraph = pydot.Dot(graph_type='digraph', ranksep='1.3')
dvfsgraph = pydot.Dot(graph_type="digraph", ranksep="1.3")
dot_create_dvfs_nodes(root, dvfsgraph)
dot_create_edges(root, dvfsgraph)
dot_filename = os.path.join(outdir, dotFilename)

View File

@@ -38,6 +38,7 @@
import os
import m5
from m5.util import warn
try:
import pydot
except:
@@ -47,55 +48,56 @@ except:
def _dot_rgb_to_html(r, g, b):
return "#%.2x%.2x%.2x" % (r, g, b)
def _dot_create_router_node(full_path, label):
return pydot.Node( \
full_path, \
shape = "Mrecord", \
label = label, \
style = "\"rounded, filled\"", \
color = "#000000", \
fillcolor = _dot_rgb_to_html(204, 230, 252), \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
return pydot.Node(
full_path,
shape="Mrecord",
label=label,
style='"rounded, filled"',
color="#000000",
fillcolor=_dot_rgb_to_html(204, 230, 252),
fontname="Arial",
fontsize="14",
fontcolor="#000000",
)
def _dot_create_ctrl_node(full_path, label):
return pydot.Node( \
full_path, \
shape = "Mrecord", \
label = label, \
style = "\"rounded, filled\"", \
color = "#000000", \
fillcolor = _dot_rgb_to_html(229, 188, 208), \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
return pydot.Node(
full_path,
shape="Mrecord",
label=label,
style='"rounded, filled"',
color="#000000",
fillcolor=_dot_rgb_to_html(229, 188, 208),
fontname="Arial",
fontsize="14",
fontcolor="#000000",
)
def _dot_create_int_edge(src, dst):
return pydot.Edge(src, dst,
weight = .5,
color = '#042d50',
dir = 'forward')
return pydot.Edge(src, dst, weight=0.5, color="#042d50", dir="forward")
def _dot_create_ext_edge(src, dst):
return pydot.Edge(src, dst,
weight = 1.0,
color = '#381526',
dir = 'both')
return pydot.Edge(src, dst, weight=1.0, color="#381526", dir="both")
def _dot_create(network, callgraph):
for r in network.routers:
callgraph.add_node(_dot_create_router_node(r.path(),
'R %d' % r.router_id))
callgraph.add_node(
_dot_create_router_node(r.path(), "R %d" % r.router_id)
)
# One link for each direction but draw one edge only
connected = dict()
for link in network.int_links:
if (link.src_node.path() in connected) and \
(connected[link.src_node.path()] == link.dst_node.path()):
continue
if (link.src_node.path() in connected) and (
connected[link.src_node.path()] == link.dst_node.path()
):
continue
callgraph.add_edge(
_dot_create_int_edge(link.src_node.path(), link.dst_node.path())
)
@@ -106,44 +108,45 @@ def _dot_create(network, callgraph):
rpaths = [link.ext_node.path()[::-1] for link in network.ext_links]
preffix = os.path.commonprefix(paths)
suffix = os.path.commonprefix(rpaths)[::-1]
def strip_right(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text)-len(suffix)]
return text[: len(text) - len(suffix)]
def strip_left(text, prefix):
if not text.startswith(prefix):
return text
return text[len(prefix):]
return text[len(prefix) :]
for link in network.ext_links:
ctrl = link.ext_node
label = strip_right(strip_left(ctrl.path(), preffix), suffix)
if hasattr(ctrl, '_node_type'):
label += ' (' + ctrl._node_type + ')'
callgraph.add_node(
_dot_create_ctrl_node(ctrl.path(), label)
if hasattr(ctrl, "_node_type"):
label += " (" + ctrl._node_type + ")"
callgraph.add_node(_dot_create_ctrl_node(ctrl.path(), label))
callgraph.add_edge(
_dot_create_ext_edge(link.ext_node.path(), link.int_node.path())
)
callgraph.add_edge(_dot_create_ext_edge(
link.ext_node.path(), link.int_node.path()))
def _do_dot(network, outdir, dotFilename):
callgraph = pydot.Dot(graph_type='graph', rankdir='LR')
callgraph = pydot.Dot(graph_type="graph", rankdir="LR")
_dot_create(network, callgraph)
dot_filename = os.path.join(outdir, dotFilename)
callgraph.write(dot_filename)
try:
# dot crashes if the figure is extremely wide.
# So avoid terminating simulation unnecessarily
callgraph.write_svg(dot_filename + ".svg", prog='neato')
callgraph.write_pdf(dot_filename + ".pdf", prog='neato')
callgraph.write_svg(dot_filename + ".svg", prog="neato")
callgraph.write_pdf(dot_filename + ".pdf", prog="neato")
except:
warn("failed to generate dot output from %s", dot_filename)
def do_ruby_dot(root, outdir, dotFilename):
RubyNetwork = getattr(m5.objects, 'RubyNetwork', None)
RubyNetwork = getattr(m5.objects, "RubyNetwork", None)
if not pydot or not RubyNetwork:
return
@@ -154,6 +157,7 @@ def do_ruby_dot(root, outdir, dotFilename):
for network in filter(is_ruby_network, root.descendants()):
# We assume each ruby system has a single network.
rubydotFilename = dotFilename.replace(".dot",
"." + network.get_parent().path() + ".dot")
rubydotFilename = dotFilename.replace(
".dot", "." + network.get_parent().path() + ".dot"
)
_do_dot(network, outdir, rubydotFilename)

View File

@@ -41,12 +41,16 @@ import os
from m5.SimObject import SimObject
from m5.util import fatal
class FdtProperty(pyfdt.FdtProperty):
"""Create a property without values."""
pass
class FdtPropertyWords(pyfdt.FdtPropertyWords):
"""Create a property with word (32-bit unsigned) values."""
def __init__(self, name, words):
if type(words) != list:
words = [words]
@@ -55,15 +59,19 @@ class FdtPropertyWords(pyfdt.FdtPropertyWords):
words = [int(w, base=0) if type(w) == str else int(w) for w in words]
super().__init__(name, words)
class FdtPropertyStrings(pyfdt.FdtPropertyStrings):
"""Create a property with string values."""
def __init__(self, name, strings):
if type(strings) == str:
strings = [strings]
strings = [str(string) for string in strings] # Make all values strings
strings = [
str(string) for string in strings
] # Make all values strings
super().__init__(name, strings)
class FdtPropertyBytes(pyfdt.FdtPropertyBytes):
"""Create a property with integer (8-bit signed) values."""
@@ -72,10 +80,12 @@ class FdtPropertyBytes(pyfdt.FdtPropertyBytes):
values = [values]
# Make sure all values are ints (use automatic base detection if the
# type is str)
values = [int(v, base=0)
if isinstance(v, str) else int(v) for v in values]
values = [
int(v, base=0) if isinstance(v, str) else int(v) for v in values
]
super().__init__(name, values)
class FdtState(object):
"""Class for maintaining state while recursively generating a flattened
device tree. The state tracks address, size and CPU address cell sizes, and
@@ -88,10 +98,10 @@ class FdtState(object):
"""Instantiate values of this state. The state can only be initialized
once."""
self.addr_cells = kwargs.pop('addr_cells', 0)
self.size_cells = kwargs.pop('size_cells', 0)
self.cpu_cells = kwargs.pop('cpu_cells', 0)
self.interrupt_cells = kwargs.pop('interrupt_cells', 0)
self.addr_cells = kwargs.pop("addr_cells", 0)
self.size_cells = kwargs.pop("size_cells", 0)
self.cpu_cells = kwargs.pop("cpu_cells", 0)
self.interrupt_cells = kwargs.pop("interrupt_cells", 0)
def phandle(self, obj):
"""Return a unique phandle number for a key. The key can be a SimObject
@@ -104,7 +114,7 @@ class FdtState(object):
try:
key = str(obj)
except ValueError:
raise ValueError('Phandle keys must be castable to str')
raise ValueError("Phandle keys must be castable to str")
if not key in FdtState.phandles:
FdtState.phandle_counter += 1
@@ -123,7 +133,9 @@ class FdtState(object):
if (value >> (32 * cells)) != 0:
fatal("Value %d doesn't fit in %d cells" % (value, cells))
return [(value >> 32*(x-1)) & 0xFFFFFFFF for x in range(cells, 0, -1)]
return [
(value >> 32 * (x - 1)) & 0xFFFFFFFF for x in range(cells, 0, -1)
]
def addrCells(self, addr):
"""Format an integer type according to the address_cells value of this
@@ -166,8 +178,10 @@ class FdtState(object):
class FdtNop(pyfdt.FdtNop):
"""Create an empty node."""
pass
class FdtNode(pyfdt.FdtNode):
def __init__(self, name, obj=None):
"""Create a new node and immediately set the phandle property, if obj
@@ -180,7 +194,7 @@ class FdtNode(pyfdt.FdtNode):
"""Change the behavior of the normal append to override if a node with
the same name already exists or merge if the name exists and is a node
type. Can also take a list of subnodes, that each get appended."""
if not hasattr(subnodes, '__iter__'):
if not hasattr(subnodes, "__iter__"):
subnodes = [subnodes]
for subnode in subnodes:
@@ -193,8 +207,9 @@ class FdtNode(pyfdt.FdtNode):
except ValueError:
item = None
if isinstance(item, pyfdt.FdtNode) and \
isinstance(subnode, pyfdt.FdtNode):
if isinstance(item, pyfdt.FdtNode) and isinstance(
subnode, pyfdt.FdtNode
):
item.merge(subnode)
subnode = item
@@ -210,7 +225,7 @@ class FdtNode(pyfdt.FdtNode):
strings."""
if isinstance(compatible, str):
compatible = [compatible]
self.append(FdtPropertyStrings('compatible', compatible))
self.append(FdtPropertyStrings("compatible", compatible))
def appendPhandle(self, obj):
"""Append a phandle property to this node with the phandle of the
@@ -221,6 +236,7 @@ class FdtNode(pyfdt.FdtNode):
phandle = state.phandle(obj)
self.append(FdtPropertyWords("phandle", [phandle]))
class Fdt(pyfdt.Fdt):
def sortNodes(self, node):
"""Move all properties to the beginning and subnodes to the end
@@ -251,7 +267,7 @@ class Fdt(pyfdt.Fdt):
"""Convert the device tree to DTB and write to a file."""
filename = os.path.realpath(filename)
try:
with open(filename, 'wb') as f:
with open(filename, "wb") as f:
f.write(self.to_dtb())
return filename
except IOError:
@@ -261,7 +277,7 @@ class Fdt(pyfdt.Fdt):
"""Convert the device tree to DTS and write to a file."""
filename = os.path.realpath(filename)
try:
with open(filename, 'w') as f:
with open(filename, "w") as f:
f.write(self.to_dts())
return filename
except IOError:

View File

@@ -24,10 +24,11 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__all__ = [ 'multidict' ]
__all__ = ["multidict"]
class multidict(object):
def __init__(self, parent = {}, **kwargs):
def __init__(self, parent={}, **kwargs):
self.local = dict(**kwargs)
self.parent = parent
self.deleted = {}
@@ -67,13 +68,13 @@ class multidict(object):
return len(self.local) + len(self.parent)
def next(self):
for key,value in self.local.items():
yield key,value
for key, value in self.local.items():
yield key, value
if self.parent:
for key,value in self.parent.next():
for key, value in self.parent.next():
if key not in self.local and key not in self.deleted:
yield key,value
yield key, value
def has_key(self, key):
return key in self
@@ -83,11 +84,11 @@ class multidict(object):
yield item
def keys(self):
for key,value in self.next():
for key, value in self.next():
yield key
def values(self):
for key,value in self.next():
for key, value in self.next():
yield value
def get(self, key, default=None):
@@ -105,10 +106,10 @@ class multidict(object):
return default
def _dump(self):
print('multidict dump')
print("multidict dump")
node = self
while isinstance(node, multidict):
print(' ', node.local)
print(" ", node.local)
node = node.parent
def _dumpkey(self, key):
@@ -120,52 +121,53 @@ class multidict(object):
node = node.parent
print(key, values)
if __name__ == '__main__':
if __name__ == "__main__":
test1 = multidict()
test2 = multidict(test1)
test3 = multidict(test2)
test4 = multidict(test3)
test1['a'] = 'test1_a'
test1['b'] = 'test1_b'
test1['c'] = 'test1_c'
test1['d'] = 'test1_d'
test1['e'] = 'test1_e'
test1["a"] = "test1_a"
test1["b"] = "test1_b"
test1["c"] = "test1_c"
test1["d"] = "test1_d"
test1["e"] = "test1_e"
test2['a'] = 'test2_a'
del test2['b']
test2['c'] = 'test2_c'
del test1['a']
test2["a"] = "test2_a"
del test2["b"]
test2["c"] = "test2_c"
del test1["a"]
test2.setdefault('f', multidict)
test2.setdefault("f", multidict)
print('test1>', list(test1.items()))
print('test2>', list(test2.items()))
#print(test1['a'])
print(test1['b'])
print(test1['c'])
print(test1['d'])
print(test1['e'])
print("test1>", list(test1.items()))
print("test2>", list(test2.items()))
# print(test1['a'])
print(test1["b"])
print(test1["c"])
print(test1["d"])
print(test1["e"])
print(test2['a'])
#print(test2['b'])
print(test2['c'])
print(test2['d'])
print(test2['e'])
print(test2["a"])
# print(test2['b'])
print(test2["c"])
print(test2["d"])
print(test2["e"])
for key in test2.keys():
print(key)
test2.get('g', 'foo')
#test2.get('b')
test2.get('b', 'bar')
test2.setdefault('b', 'blah')
test2.get("g", "foo")
# test2.get('b')
test2.get("b", "bar")
test2.setdefault("b", "blah")
print(test1)
print(test2)
print(repr(test2))
print(len(test2))
test3['a'] = [ 0, 1, 2, 3 ]
test3["a"] = [0, 1, 2, 3]
print(test4)

View File

@@ -35,11 +35,13 @@
from abc import *
class PyBindExport(object, metaclass=ABCMeta):
@abstractmethod
def export(self, code, cname):
pass
class PyBindProperty(PyBindExport):
def __init__(self, name, cxx_name=None, writable=True):
self.name = name
@@ -50,14 +52,21 @@ class PyBindProperty(PyBindExport):
export = "def_readwrite" if self.writable else "def_readonly"
code('.${export}("${{self.name}}", &${cname}::${{self.cxx_name}})')
class PyBindMethod(PyBindExport):
def __init__(self, name, cxx_name=None, args=None,
return_value_policy=None, static=False):
def __init__(
self,
name,
cxx_name=None,
args=None,
return_value_policy=None,
static=False,
):
self.name = name
self.cxx_name = cxx_name if cxx_name else name
self.args = args
self.return_value_policy = return_value_policy
self.method_def = 'def_static' if static else 'def'
self.method_def = "def_static" if static else "def"
def _conv_arg(self, value):
if isinstance(value, bool):
@@ -68,18 +77,23 @@ class PyBindMethod(PyBindExport):
raise TypeError("Unsupported PyBind default value type")
def export(self, code, cname):
arguments = [ '"${{self.name}}"', '&${cname}::${{self.cxx_name}}' ]
arguments = ['"${{self.name}}"', "&${cname}::${{self.cxx_name}}"]
if self.return_value_policy:
arguments.append('pybind11::return_value_policy::'
'${{self.return_value_policy}}')
arguments.append(
"pybind11::return_value_policy::"
"${{self.return_value_policy}}"
)
if self.args:
def get_arg_decl(arg):
if isinstance(arg, tuple):
name, default = arg
return 'py::arg("%s") = %s' % (
name, self._conv_arg(default))
name,
self._conv_arg(default),
)
else:
return 'py::arg("%s")' % arg
arguments.extend(list([ get_arg_decl(a) for a in self.args ]))
code('.' + self.method_def + '(' + ', '.join(arguments) + ')')
arguments.extend(list([get_arg_decl(a) for a in self.args]))
code("." + self.method_def + "(" + ", ".join(arguments) + ")")

View File

@@ -52,39 +52,47 @@ color_names = "Black Red Green Yellow Blue Magenta Cyan".split()
# Please feel free to add information about other terminals here.
#
capability_map = {
'Bold': 'bold',
'Dim': 'dim',
'Blink': 'blink',
'Underline': 'smul',
'Reverse': 'rev',
'Standout': 'smso',
'Normal': 'sgr0'
"Bold": "bold",
"Dim": "dim",
"Blink": "blink",
"Underline": "smul",
"Reverse": "rev",
"Standout": "smso",
"Normal": "sgr0",
}
capability_names = list(capability_map.keys())
def null_cap_string(s, *args):
return ''
return ""
try:
import curses
curses.setupterm()
def cap_string(s, *args):
cap = curses.tigetstr(s)
if cap:
return curses.tparm(cap, *args).decode('utf-8')
return curses.tparm(cap, *args).decode("utf-8")
else:
return ''
return ""
except:
cap_string = null_cap_string
class ColorStrings(object):
def __init__(self, cap_string):
for i, c in enumerate(color_names):
setattr(self, c, cap_string('setaf', i))
setattr(self, c, cap_string("setaf", i))
for name, cap in capability_map.items():
setattr(self, name, cap_string(cap))
termcap = ColorStrings(cap_string)
no_termcap = ColorStrings(null_cap_string)
@@ -93,7 +101,8 @@ if sys.stdout.isatty():
else:
tty_termcap = no_termcap
def get_termcap(use_colors = None):
def get_termcap(use_colors=None):
if use_colors:
return termcap
elif use_colors is None:
@@ -102,19 +111,27 @@ def get_termcap(use_colors = None):
else:
return no_termcap
def test_termcap(obj):
for c_name in color_names:
c_str = getattr(obj, c_name)
print(c_str + c_name + obj.Normal)
for attr_name in capability_names:
if attr_name == 'Normal':
if attr_name == "Normal":
continue
attr_str = getattr(obj, attr_name)
print(attr_str + c_str + attr_name + " " + c_name + obj.Normal)
print(obj.Bold + obj.Underline +
c_name + "Bold Underline " + c + obj.Normal)
print(
obj.Bold
+ obj.Underline
+ c_name
+ "Bold Underline "
+ c
+ obj.Normal
)
if __name__ == '__main__':
if __name__ == "__main__":
print("=== termcap enabled ===")
test_termcap(termcap)
print(termcap.Normal)

View File

@@ -26,8 +26,8 @@
import textwrap
class TerminalFormatter:
class TerminalFormatter:
def __init__(self, max_width=80):
# text_width holds the actual width we'll be wrapping to.
# This takes into account the current terminal size.
@@ -35,9 +35,13 @@ class TerminalFormatter:
def __terminal_size(self):
import fcntl, termios, struct
h, w, hp, wp = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))
h, w, hp, wp = struct.unpack(
"HHHH",
fcntl.ioctl(
0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)
),
)
return w, h
def __get_paragraphs(self, text, flatten=False):
@@ -74,15 +78,17 @@ class TerminalFormatter:
for line in text.splitlines():
stripped = line.strip()
if not stripped: #I.e. a blank line.
if not stripped: # I.e. a blank line.
paragraphs.append(
{False: "\n", True: " "}[flatten].join(cur_paragraph))
{False: "\n", True: " "}[flatten].join(cur_paragraph)
)
cur_paragraph = []
else:
cur_paragraph.append(stripped)
paragraphs.append(
{False: "\n", True: " "}[flatten].join(cur_paragraph))
{False: "\n", True: " "}[flatten].join(cur_paragraph)
)
return paragraphs
@@ -115,15 +121,19 @@ class TerminalFormatter:
paragraphs = self.__get_paragraphs(text, True)
# Wrap and Indent the paragraphs
wrapper = textwrap.TextWrapper(width =
max((self.__text_width - indent),1))
wrapper = textwrap.TextWrapper(
width=max((self.__text_width - indent), 1)
)
# The first paragraph is special case due to the inclusion of the label
formatted_paragraphs = [' ' * max((indent - len(label)),0) \
+ label + wrapper.wrap(paragraphs[0])[0]]
formatted_paragraphs = [
" " * max((indent - len(label)), 0)
+ label
+ wrapper.wrap(paragraphs[0])[0]
]
for paragraph in paragraphs:
for line in wrapper.wrap(paragraph[1:])[1:]:
formatted_paragraphs.append(' ' * indent + line)
formatted_paragraphs.append('\n')
formatted_paragraphs.append(" " * indent + line)
formatted_paragraphs.append("\n")
# Remove the last line break
return '\n'.join(formatted_paragraphs[:-1])
return "\n".join(formatted_paragraphs[:-1])