Fix some tests and refactor simulation script

This commit is contained in:
2024-03-04 12:15:28 +01:00
committed by Derek Christ
parent be1807e9b0
commit ecf9127faa
8 changed files with 104 additions and 137 deletions

View File

@@ -111,7 +111,7 @@
"PRPDEN": 1,
"REFPDEN": 1,
"RTRS": 1,
"clkMhz": 533
"tCK": 1877
}
},
"simconfig": {

View File

@@ -1,22 +1,22 @@
#!/usr/bin/env python3
import os
import matplotlib.pyplot as plt
from pathlib import Path
from dataclasses import dataclass
from simulation import (
Simulation,
Configuration,
get_options_from_args,
ConfigTokens,
get_argparser,
get_options,
simulation_results,
)
script_directory = os.path.dirname(os.path.abspath(__file__))
base_config = os.path.join(script_directory, "example_ddr3_simulations.json")
import matplotlib.pyplot as plt
SCRIPT_DIRECTORY = Path(__file__).parent.resolve()
DEFAULT_BASE_CONFIG = SCRIPT_DIRECTORY / "example_ddr3_simulations.json"
simulations: list[Simulation] = []
@dataclass(frozen=True)
class SchedulerBuffer:
buffer: str
@@ -35,7 +35,7 @@ for refresh_policy in refresh_policies:
for pattern in patterns:
PAGE_POLICY = "Open" if pattern == "sequential" else "Closed"
config = Configuration(
config_tokens = ConfigTokens(
f"{refresh_policy}_{scheduler_buffer.buffer}_{scheduler_buffer.size}_{pattern}",
{
"refresh_policy": refresh_policy,
@@ -46,69 +46,39 @@ for refresh_policy in refresh_policies:
},
)
simulations.append(Simulation(config))
simulations.append(Simulation(config_tokens))
options = get_options_from_args()
parser = get_argparser()
options = get_options(parser.parse_args())
if options.base_config is None:
options.base_config = base_config
options.base_config = DEFAULT_BASE_CONFIG
dataframe = simulation_results(options, simulations)
df = simulation_results(options, simulations)
# Generate interesting plots!
for refresh_policy in refresh_policies:
refresh_policy_filter = dataframe["refresh_policy"] == refresh_policy
for scheduler_buffer in scheduler_buffers:
scheduler_buffer_filter = (
dataframe["scheduler_buffer"] == scheduler_buffer.buffer
)
scheduler_buffer_size_filter = (
dataframe["scheduler_buffer_size"] == scheduler_buffer.size
)
filters = (
refresh_policy_filter
& scheduler_buffer_filter
& scheduler_buffer_size_filter
)
TEMP_DATAFRAME = dataframe[filters]
for pattern in patterns:
pattern_filter = dataframe["pattern"] == pattern
for name, data in df.group_by("refresh_policy", "scheduler_buffer_size", maintain_order=True):
plt.figure(figsize=(8.0, 12.0))
filters = (
refresh_policy_filter
& scheduler_buffer_filter
& scheduler_buffer_size_filter
)
TEMP_DATAFRAME = dataframe[filters]
plt.bar(
"pattern",
"bandwidth",
data=TEMP_DATAFRAME,
data=data,
)
# Plot MAX line
# Plot MAX bar
plt.bar(
"MAX",
"max_bandwidth",
data=TEMP_DATAFRAME,
data=data,
color="darkgray",
)
title = f"{refresh_policy} {scheduler_buffer.buffer} {scheduler_buffer.size}"
plt.title(title)
plt.title(name)
plt.axis(ymin=0, ymax=120)
plt.xlabel("Access Pattern")
plt.ylabel("Bandwidth [Gb/s]")
plt.savefig(f"{options.out_dir}/{title}.png")
plt.savefig(options.out_dir / f"{name}.png")
plt.close()

View File

@@ -1,3 +1,3 @@
tqdm
pandas
polars
matplotlib

View File

@@ -1,45 +1,34 @@
import argparse
import subprocess
import sys
import sqlite3
import json
import os
import re
from pathlib import Path
from dataclasses import dataclass, fields
from typing import Optional
import sqlite3
import subprocess
import sys
from dataclasses import dataclass, field, fields
from multiprocessing.pool import ThreadPool
from tqdm import tqdm
from pathlib import Path
import pandas as pd
import polars as pl
from tqdm import tqdm
sys.path.append("extensions/apps/traceAnalyzer/scripts")
from metrics import (
average_response_latency_in_ns,
max_response_latency_in_ns,
memory_active_in_percent,
maximum_data_rate,
)
from metrics import (average_response_latency_in_ns,
max_response_latency_in_ns, maximum_data_rate,
memory_active_in_percent)
@dataclass
class Options:
dramsys: Path
dramsys: Path | None
override: bool
out_dir: Path
simulate: bool
metrics: bool
base_config: Path | None
resource_dir: Path | None = None
jobs: int | None = None
@dataclass(frozen=True)
class SubConfig:
name: str
parameters: dict[str, str]
resource_dir: Path | None
jobs: int | None
@dataclass(frozen=True)
@@ -53,26 +42,32 @@ class Statistics:
@dataclass(frozen=True)
class Configuration:
class ConfigTokens:
name: str
tokens: dict[str, str | int]
@dataclass
class Simulation:
config: Configuration
directory: Optional[str] = None
statistics: Optional[list[Statistics]] = None
config_tokens: ConfigTokens
directory: Path | None = None
statistics: list[Statistics] = field(default_factory=list)
def run_dramsys(dramsys: Path, simulation_dir: Path, resource_dir: Path | None):
with open(f"{simulation_dir}/out.txt", "w", encoding="utf-8") as output_file:
command = [dramsys.absolute(), "config.json"]
with open(simulation_dir / "stdout.txt", "w", encoding="utf-8") as output_file:
command = [dramsys.absolute().as_posix(), "config.json"]
if resource_dir is not None:
command.append(resource_dir)
command.append(resource_dir.absolute().as_posix())
subprocess.run(command, cwd=simulation_dir, stdout=output_file, check=True)
subprocess.run(
command,
cwd=simulation_dir,
stdout=output_file,
stderr=output_file,
check=True,
)
def calculate_simulation_metrics(simulation: Simulation):
@@ -131,12 +126,13 @@ def simulate(
work_item: WorkItem,
):
simulation_dir = work_item.simulation.directory
assert simulation_dir
json_config = None
with open(work_item.base_config, encoding="utf-8") as config_file:
config_string = config_file.read()
config_string = replace_placeholders(
config_string, work_item.simulation.config.tokens
config_string, work_item.simulation.config_tokens.tokens
)
json_config = json.loads(config_string)
@@ -151,13 +147,13 @@ def simulate(
calculate_simulation_metrics(work_item.simulation)
def generate_dataframe(simulations: list[Simulation], out_dir: str) -> pd.DataFrame:
# Pack results in a panda dataframe
def generate_dataframe(simulations: list[Simulation], out_dir: Path) -> pl.DataFrame:
# Pack results in dataframe
labels = ["name", "channel"]
statistic_labels = list(map(lambda field: field.name, fields(Statistics)))
statistic_labels = [field.name for field in fields(Statistics)]
# Get one simulation...
config_keys, _ = zip(*simulations[0].config.tokens.items())
config_keys = simulations[0].config_tokens.tokens.keys()
labels.extend(config_keys)
labels.extend(statistic_labels)
@@ -165,15 +161,15 @@ def generate_dataframe(simulations: list[Simulation], out_dir: str) -> pd.DataFr
entries = []
for simulation in simulations:
_, config_values = zip(*simulation.config.tokens.items())
config_values = simulation.config_tokens.tokens.values()
for stat in simulation.statistics:
channel_pattern = re.compile("(?<=ch)[0-9]+")
channel = int(channel_pattern.search(stat.filename)[0])
m = re.search("(?<=ch)[0-9]+", stat.filename)
channel = m.group(0) if m else -1
entries.append(
[
simulation.config.name,
simulation.config_tokens.name,
channel,
*config_values,
stat.filename,
@@ -185,17 +181,17 @@ def generate_dataframe(simulations: list[Simulation], out_dir: str) -> pd.DataFr
]
)
dataframe = pd.DataFrame(data=entries, columns=labels)
dataframe.to_csv(f"{out_dir}/statistics.csv", sep=";")
df = pl.DataFrame(data=entries, schema=labels)
df.write_csv(out_dir / "statistics.csv")
return dataframe
return df
def populate_simulation_directories(
simulations: list[Simulation], out_dir: str, override: bool
simulations: list[Simulation], out_dir: Path, override: bool
):
for simulation in simulations:
simulation_dir = Path(f"{out_dir}/simulations/{simulation.config.name}")
simulation_dir = out_dir / "simulations" / simulation.config_tokens.name
try:
simulation_dir.mkdir(parents=True, exist_ok=override)
@@ -209,8 +205,8 @@ def populate_simulation_directories(
def calculate_metrics(
simulations: list[Simulation], out_dir: str, jobs: int | None
) -> pd.DataFrame:
simulations: list[Simulation], out_dir: Path, jobs: int | None
) -> pl.DataFrame:
populate_simulation_directories(simulations, out_dir, override=True)
with ThreadPool(jobs) as thread_pool:
@@ -223,13 +219,17 @@ def calculate_metrics(
return generate_dataframe(simulations, out_dir)
def run_simulations(simulations: list[Simulation], options: Options) -> pd.DataFrame:
def run_simulations(simulations: list[Simulation], options: Options) -> pl.DataFrame:
if len(simulations) == 0:
print("Must specify at least one simulation configuration!")
sys.exit(-1)
if options.dramsys is None:
print("Must specify DRAMSys executable!")
sys.exit(-1)
if options.base_config is None:
print("Must specify a base config")
print("Must specify a base config!")
sys.exit(-1)
print("Create simulation directories...")
@@ -251,9 +251,11 @@ def run_simulations(simulations: list[Simulation], options: Options) -> pd.DataF
return calculate_metrics(simulations, options.out_dir, options.jobs)
def get_options_from_args() -> Options:
def get_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="DRAMSys simulation utility")
parser.add_argument("dramsys", type=Path, help="path to the DRAMSys executable")
parser.add_argument(
"dramsys", type=Path, nargs="?", help="path to the DRAMSys executable"
)
parser.add_argument(
"--simulate",
default=False,
@@ -269,6 +271,7 @@ def get_options_from_args() -> Options:
parser.add_argument(
"-f",
"--force",
dest="override",
default=False,
action="store_true",
help="force override existing simulation artifacts",
@@ -298,23 +301,16 @@ def get_options_from_args() -> Options:
help="run N jobs in parallel",
)
arguments = parser.parse_args()
return Options(
arguments.dramsys,
arguments.force,
arguments.out_dir,
arguments.simulate,
arguments.metrics,
arguments.base_config,
arguments.resource_dir,
arguments.jobs,
)
return parser
def get_options(args: argparse.Namespace) -> Options:
return Options(**vars(args))
def simulation_results(
options: Options,
simulations: list[Simulation],
) -> pd.DataFrame:
) -> pl.DataFrame:
if options.simulate:
return run_simulations(simulations, options)
@@ -323,9 +319,10 @@ def simulation_results(
print("Summarizing simulation results in statistics.csv...")
statistics_file = f"{options.out_dir}/statistics.csv"
statistics_file = options.out_dir / "statistics.csv"
if not os.path.isfile(statistics_file):
print("Run the simulations first to generate simulation artifacts")
sys.exit(-1)
return pd.read_csv(f"{options.out_dir}/statistics.csv", sep=";")
return pl.read_csv(statistics_file)

View File

@@ -136,7 +136,7 @@
"WTR_L": 16,
"WTR_S": 4,
"XP": 12,
"clkMhz": 1600
"tCK": 625
}
},
"simconfig": {

View File

@@ -71,7 +71,7 @@
"ACTPDEN": 2,
"PRPDEN": 2,
"REFPDEN": 2,
"clkMhz": 1600
"tCK": 625
}
}
}

View File

@@ -164,7 +164,7 @@ DRAMSys::Config::MemSpec ConfigurationTest::createMemSpec()
{"REFI1", 6240}, {"REFI2", 3120}, {"REFISB", 1560}, {"REFSBRD_slr", 48},
{"REFSBRD_dlr", 0}, {"RTRS", 2}, {"CPDED", 8}, {"PD", 12},
{"XP", 12}, {"ACTPDEN", 2}, {"PRPDEN", 2}, {"REFPDEN", 2},
{"clkMhz", 1600},
{"tCK", 625},
}}};
return {memArchitectureSpec,
@@ -446,7 +446,7 @@ TEST_F(ConfigurationTest, MemSpec)
"WTR_L": 16,
"WTR_S": 4,
"XP": 12,
"clkMhz": 1600
"tCK": 625
}
}
}