Introduce powerful simulation script and provide an example
This commit is contained in:
142
scripts/simulation/example_ddr3_simulations.json
Normal file
142
scripts/simulation/example_ddr3_simulations.json
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
{
|
||||||
|
"simulation": {
|
||||||
|
"addressmapping": {
|
||||||
|
"BANK_BIT": [
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8
|
||||||
|
],
|
||||||
|
"BYTE_BIT": [
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
2
|
||||||
|
],
|
||||||
|
"COLUMN_BIT": [
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
11,
|
||||||
|
12,
|
||||||
|
13,
|
||||||
|
14,
|
||||||
|
15
|
||||||
|
],
|
||||||
|
"ROW_BIT": [
|
||||||
|
16,
|
||||||
|
17,
|
||||||
|
18,
|
||||||
|
19,
|
||||||
|
20,
|
||||||
|
21,
|
||||||
|
22,
|
||||||
|
23,
|
||||||
|
24,
|
||||||
|
25,
|
||||||
|
26,
|
||||||
|
27,
|
||||||
|
28,
|
||||||
|
29
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"mcconfig": {
|
||||||
|
"PagePolicy": "<page_policy>",
|
||||||
|
"SchedulerBuffer": "<scheduler_buffer>",
|
||||||
|
"RequestBufferSize": <scheduler_buffer_size>,
|
||||||
|
"RefreshPolicy": "<refresh_policy>",
|
||||||
|
"RefreshMaxPostponed": 0,
|
||||||
|
"RefreshMaxPulledin": 0,
|
||||||
|
"RefreshManagement": false,
|
||||||
|
"Arbiter": "Simple",
|
||||||
|
"CmdMux": "Oldest",
|
||||||
|
"MaxActiveTransactions": 128,
|
||||||
|
"PowerDownPolicy": "NoPowerDown",
|
||||||
|
"RespQueue": "Fifo",
|
||||||
|
"Scheduler": "FrFcfs"
|
||||||
|
},
|
||||||
|
"memspec": {
|
||||||
|
"memarchitecturespec": {
|
||||||
|
"burstLength": 8,
|
||||||
|
"dataRate": 2,
|
||||||
|
"nbrOfBanks": 8,
|
||||||
|
"nbrOfChannels": 1,
|
||||||
|
"nbrOfColumns": 1024,
|
||||||
|
"nbrOfDevices": 8,
|
||||||
|
"nbrOfRanks": 1,
|
||||||
|
"nbrOfRows": 16384,
|
||||||
|
"width": 8
|
||||||
|
},
|
||||||
|
"memoryId": "MICRON_1Gb_DDR3-1600_8bit_G",
|
||||||
|
"memoryType": "DDR3",
|
||||||
|
"mempowerspec": {
|
||||||
|
"idd0": 70.0,
|
||||||
|
"idd2n": 45.0,
|
||||||
|
"idd2p0": 12.0,
|
||||||
|
"idd2p1": 30.0,
|
||||||
|
"idd3n": 45.0,
|
||||||
|
"idd3p0": 35.0,
|
||||||
|
"idd3p1": 35.0,
|
||||||
|
"idd4r": 140.0,
|
||||||
|
"idd4w": 145.0,
|
||||||
|
"idd5": 170.0,
|
||||||
|
"idd6": 8.0,
|
||||||
|
"vdd": 1.5
|
||||||
|
},
|
||||||
|
"memtimingspec": {
|
||||||
|
"ACTPDEN": 1,
|
||||||
|
"AL": 0,
|
||||||
|
"CCD": 4,
|
||||||
|
"CKE": 3,
|
||||||
|
"CKESR": 4,
|
||||||
|
"CL": 10,
|
||||||
|
"DQSCK": 0,
|
||||||
|
"FAW": 24,
|
||||||
|
"PRPDEN": 1,
|
||||||
|
"RAS": 28,
|
||||||
|
"RC": 38,
|
||||||
|
"RCD": 10,
|
||||||
|
"REFI": 6240,
|
||||||
|
"REFPDEN": 1,
|
||||||
|
"RFC": 88,
|
||||||
|
"RL": 10,
|
||||||
|
"RP": 10,
|
||||||
|
"RRD": 5,
|
||||||
|
"RTP": 6,
|
||||||
|
"RTRS": 1,
|
||||||
|
"WL": 8,
|
||||||
|
"WR": 12,
|
||||||
|
"WTR": 6,
|
||||||
|
"XP": 6,
|
||||||
|
"XPDLL": 20,
|
||||||
|
"XS": 96,
|
||||||
|
"XSDLL": 512,
|
||||||
|
"clkMhz": 800
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"simconfig": {
|
||||||
|
"AddressOffset": 0,
|
||||||
|
"CheckTLM2Protocol": false,
|
||||||
|
"DatabaseRecording": true,
|
||||||
|
"Debug": false,
|
||||||
|
"EnableWindowing": false,
|
||||||
|
"PowerAnalysis": false,
|
||||||
|
"SimulationName": "example",
|
||||||
|
"SimulationProgressBar": false,
|
||||||
|
"StoreMode": "NoStorage",
|
||||||
|
"UseMalloc": false,
|
||||||
|
"WindowSize": 1000
|
||||||
|
},
|
||||||
|
"simulationid": "ddr3-example",
|
||||||
|
"tracesetup": [
|
||||||
|
{
|
||||||
|
"clkMhz": 2000,
|
||||||
|
"type": "generator",
|
||||||
|
"name": "gen1",
|
||||||
|
"numRequests": 20000,
|
||||||
|
"rwRatio": 0.85,
|
||||||
|
"addressDistribution": "<pattern>"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
114
scripts/simulation/example_ddr3_simulations.py
Executable file
114
scripts/simulation/example_ddr3_simulations.py
Executable file
@@ -0,0 +1,114 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from simulation import (
|
||||||
|
Simulation,
|
||||||
|
Configuration,
|
||||||
|
get_options_from_args,
|
||||||
|
simulation_results,
|
||||||
|
)
|
||||||
|
|
||||||
|
script_directory = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
base_config = os.path.join(script_directory, "example_ddr3_simulations.json")
|
||||||
|
|
||||||
|
simulations: list[Simulation] = []
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class SchedulerBuffer:
|
||||||
|
buffer: str
|
||||||
|
size: int
|
||||||
|
|
||||||
|
|
||||||
|
refresh_policies = ["NoRefresh", "AllBank"]
|
||||||
|
patterns = ["sequential", "random"]
|
||||||
|
scheduler_buffers = [
|
||||||
|
SchedulerBuffer("Bankwise", 8),
|
||||||
|
SchedulerBuffer("Bankwise", 16),
|
||||||
|
]
|
||||||
|
|
||||||
|
for refresh_policy in refresh_policies:
|
||||||
|
for scheduler_buffer in scheduler_buffers:
|
||||||
|
for pattern in patterns:
|
||||||
|
PAGE_POLICY = "Open" if pattern == "sequential" else "Closed"
|
||||||
|
|
||||||
|
config = Configuration(
|
||||||
|
f"{refresh_policy}_{scheduler_buffer.buffer}_{scheduler_buffer.size}_{pattern}",
|
||||||
|
{
|
||||||
|
"refresh_policy": refresh_policy,
|
||||||
|
"pattern": pattern,
|
||||||
|
"page_policy": PAGE_POLICY,
|
||||||
|
"scheduler_buffer": scheduler_buffer.buffer,
|
||||||
|
"scheduler_buffer_size": scheduler_buffer.size,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
simulations.append(Simulation(config))
|
||||||
|
|
||||||
|
|
||||||
|
options = get_options_from_args()
|
||||||
|
|
||||||
|
if options.base_config is None:
|
||||||
|
options.base_config = base_config
|
||||||
|
|
||||||
|
dataframe = simulation_results(options, simulations)
|
||||||
|
|
||||||
|
# Generate interesting plots!
|
||||||
|
for refresh_policy in refresh_policies:
|
||||||
|
refresh_policy_filter = dataframe["refresh_policy"] == refresh_policy
|
||||||
|
|
||||||
|
for scheduler_buffer in scheduler_buffers:
|
||||||
|
scheduler_buffer_filter = (
|
||||||
|
dataframe["scheduler_buffer"] == scheduler_buffer.buffer
|
||||||
|
)
|
||||||
|
scheduler_buffer_size_filter = (
|
||||||
|
dataframe["scheduler_buffer_size"] == scheduler_buffer.size
|
||||||
|
)
|
||||||
|
|
||||||
|
filters = (
|
||||||
|
refresh_policy_filter
|
||||||
|
& scheduler_buffer_filter
|
||||||
|
& scheduler_buffer_size_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
TEMP_DATAFRAME = dataframe[filters]
|
||||||
|
|
||||||
|
for pattern in patterns:
|
||||||
|
pattern_filter = dataframe["pattern"] == pattern
|
||||||
|
|
||||||
|
plt.figure(figsize=(8.0, 12.0))
|
||||||
|
|
||||||
|
filters = (
|
||||||
|
refresh_policy_filter
|
||||||
|
& scheduler_buffer_filter
|
||||||
|
& scheduler_buffer_size_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
TEMP_DATAFRAME = dataframe[filters]
|
||||||
|
|
||||||
|
plt.bar(
|
||||||
|
"pattern",
|
||||||
|
"bandwidth",
|
||||||
|
data=TEMP_DATAFRAME,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Plot MAX line
|
||||||
|
plt.bar(
|
||||||
|
"MAX",
|
||||||
|
"max_bandwidth",
|
||||||
|
data=TEMP_DATAFRAME,
|
||||||
|
color="darkgray",
|
||||||
|
)
|
||||||
|
|
||||||
|
title = f"{refresh_policy} {scheduler_buffer.buffer} {scheduler_buffer.size}"
|
||||||
|
|
||||||
|
plt.title(title)
|
||||||
|
plt.axis(ymin=0, ymax=120)
|
||||||
|
plt.xlabel("Access Pattern")
|
||||||
|
plt.ylabel("Bandwidth [Gb/s]")
|
||||||
|
|
||||||
|
plt.savefig(f"{options.out_dir}/{title}.png")
|
||||||
|
plt.close()
|
||||||
3
scripts/simulation/requirements.txt
Normal file
3
scripts/simulation/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
tqdm
|
||||||
|
pandas
|
||||||
|
matplotlib
|
||||||
331
scripts/simulation/simulation.py
Executable file
331
scripts/simulation/simulation.py
Executable file
@@ -0,0 +1,331 @@
|
|||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import sqlite3
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from dataclasses import dataclass, fields
|
||||||
|
from typing import Optional
|
||||||
|
from multiprocessing.pool import ThreadPool
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
sys.path.append("extensions/apps/traceAnalyzer/scripts")
|
||||||
|
|
||||||
|
from metrics import (
|
||||||
|
average_response_latency_in_ns,
|
||||||
|
max_response_latency_in_ns,
|
||||||
|
memory_active_in_percent,
|
||||||
|
maximum_data_rate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Options:
|
||||||
|
dramsys: Path
|
||||||
|
override: bool
|
||||||
|
out_dir: Path
|
||||||
|
simulate: bool
|
||||||
|
metrics: bool
|
||||||
|
base_config: Path | None
|
||||||
|
resource_dir: Path | None = None
|
||||||
|
jobs: int | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class SubConfig:
|
||||||
|
name: str
|
||||||
|
parameters: dict[str, str]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class Statistics:
|
||||||
|
filename: str
|
||||||
|
databus_utilization: float
|
||||||
|
bandwidth: float
|
||||||
|
max_bandwidth: float
|
||||||
|
avg_latency: float
|
||||||
|
max_latency: float
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class Configuration:
|
||||||
|
name: str
|
||||||
|
tokens: dict[str, str | int]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Simulation:
|
||||||
|
config: Configuration
|
||||||
|
directory: Optional[str] = None
|
||||||
|
statistics: Optional[list[Statistics]] = None
|
||||||
|
|
||||||
|
|
||||||
|
def run_dramsys(dramsys: Path, simulation_dir: Path, resource_dir: Path | None):
|
||||||
|
with open(f"{simulation_dir}/out.txt", "w", encoding="utf-8") as output_file:
|
||||||
|
command = [dramsys.absolute(), "config.json"]
|
||||||
|
|
||||||
|
if resource_dir is not None:
|
||||||
|
command.append(resource_dir)
|
||||||
|
|
||||||
|
subprocess.run(command, cwd=simulation_dir, stdout=output_file, check=True)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_simulation_metrics(simulation: Simulation):
|
||||||
|
simulation_dir = simulation.directory
|
||||||
|
stats: list[Statistics] = []
|
||||||
|
|
||||||
|
for file in os.listdir(simulation_dir):
|
||||||
|
if file.endswith(".tdb"):
|
||||||
|
connection = sqlite3.connect(f"{simulation_dir}/{file}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
max_bandwidth = maximum_data_rate(connection) / 1000
|
||||||
|
avg_latency = average_response_latency_in_ns(connection)
|
||||||
|
max_latency = max_response_latency_in_ns(connection)
|
||||||
|
databus_utilization = memory_active_in_percent(connection) / 100
|
||||||
|
bandwidth = databus_utilization * max_bandwidth
|
||||||
|
|
||||||
|
stats.append(
|
||||||
|
Statistics(
|
||||||
|
file,
|
||||||
|
databus_utilization,
|
||||||
|
bandwidth,
|
||||||
|
max_bandwidth,
|
||||||
|
avg_latency,
|
||||||
|
max_latency,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as error:
|
||||||
|
print(
|
||||||
|
f"Warning: Could not calculate metrics for {simulation_dir}/{file}: {error}"
|
||||||
|
)
|
||||||
|
|
||||||
|
simulation.statistics = stats
|
||||||
|
|
||||||
|
|
||||||
|
# Replace placeholders with actual values
|
||||||
|
def replace_placeholders(config_json: str, tokens: dict) -> str:
|
||||||
|
for key, value in tokens.items():
|
||||||
|
placeholder = f"<{key}>"
|
||||||
|
|
||||||
|
config_json = config_json.replace(placeholder, str(value))
|
||||||
|
|
||||||
|
return config_json
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class WorkItem:
|
||||||
|
dramsys: Path
|
||||||
|
simulation: Simulation
|
||||||
|
base_config: Path
|
||||||
|
resource_dir: Path | None
|
||||||
|
|
||||||
|
|
||||||
|
def simulate(
|
||||||
|
work_item: WorkItem,
|
||||||
|
):
|
||||||
|
simulation_dir = work_item.simulation.directory
|
||||||
|
|
||||||
|
json_config = None
|
||||||
|
with open(work_item.base_config, encoding="utf-8") as config_file:
|
||||||
|
config_string = config_file.read()
|
||||||
|
config_string = replace_placeholders(
|
||||||
|
config_string, work_item.simulation.config.tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
json_config = json.loads(config_string)
|
||||||
|
|
||||||
|
simulation_json = simulation_dir / "config.json"
|
||||||
|
|
||||||
|
# Save config besides simulation directory
|
||||||
|
with open(simulation_json, "w", encoding="utf-8") as config_file:
|
||||||
|
json.dump(json_config, config_file, indent=4)
|
||||||
|
|
||||||
|
run_dramsys(work_item.dramsys, simulation_dir, work_item.resource_dir)
|
||||||
|
calculate_simulation_metrics(work_item.simulation)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_dataframe(simulations: list[Simulation], out_dir: str) -> pd.DataFrame:
|
||||||
|
# Pack results in a panda dataframe
|
||||||
|
labels = ["name", "channel"]
|
||||||
|
statistic_labels = list(map(lambda field: field.name, fields(Statistics)))
|
||||||
|
|
||||||
|
# Get one simulation...
|
||||||
|
config_keys, _ = zip(*simulations[0].config.tokens.items())
|
||||||
|
|
||||||
|
labels.extend(config_keys)
|
||||||
|
labels.extend(statistic_labels)
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
|
||||||
|
for simulation in simulations:
|
||||||
|
_, config_values = zip(*simulation.config.tokens.items())
|
||||||
|
|
||||||
|
for stat in simulation.statistics:
|
||||||
|
channel_pattern = re.compile("(?<=ch)[0-9]+")
|
||||||
|
channel = int(channel_pattern.search(stat.filename)[0])
|
||||||
|
|
||||||
|
entries.append(
|
||||||
|
[
|
||||||
|
simulation.config.name,
|
||||||
|
channel,
|
||||||
|
*config_values,
|
||||||
|
stat.filename,
|
||||||
|
stat.databus_utilization,
|
||||||
|
stat.bandwidth,
|
||||||
|
stat.max_bandwidth,
|
||||||
|
stat.avg_latency,
|
||||||
|
stat.max_latency,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
dataframe = pd.DataFrame(data=entries, columns=labels)
|
||||||
|
dataframe.to_csv(f"{out_dir}/statistics.csv", sep=";")
|
||||||
|
|
||||||
|
return dataframe
|
||||||
|
|
||||||
|
|
||||||
|
def populate_simulation_directories(
|
||||||
|
simulations: list[Simulation], out_dir: str, override: bool
|
||||||
|
):
|
||||||
|
for simulation in simulations:
|
||||||
|
simulation_dir = Path(f"{out_dir}/simulations/{simulation.config.name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
simulation_dir.mkdir(parents=True, exist_ok=override)
|
||||||
|
except FileExistsError:
|
||||||
|
print(
|
||||||
|
"Previous simulations artifacts found. To continue, enable the force override flag."
|
||||||
|
)
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
simulation.directory = simulation_dir
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_metrics(
|
||||||
|
simulations: list[Simulation], out_dir: str, jobs: int | None
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
populate_simulation_directories(simulations, out_dir, override=True)
|
||||||
|
|
||||||
|
with ThreadPool(jobs) as thread_pool:
|
||||||
|
for _ in tqdm(
|
||||||
|
thread_pool.imap_unordered(calculate_simulation_metrics, simulations),
|
||||||
|
total=len(simulations),
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return generate_dataframe(simulations, out_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def run_simulations(simulations: list[Simulation], options: Options) -> pd.DataFrame:
|
||||||
|
if len(simulations) == 0:
|
||||||
|
print("Must specify at least one simulation configuration!")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
if options.base_config is None:
|
||||||
|
print("Must specify a base config")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
print("Create simulation directories...")
|
||||||
|
populate_simulation_directories(simulations, options.out_dir, options.override)
|
||||||
|
|
||||||
|
print("Run simulations...")
|
||||||
|
with ThreadPool(options.jobs) as thread_pool:
|
||||||
|
args = list(
|
||||||
|
WorkItem(
|
||||||
|
options.dramsys, simulation, options.base_config, options.resource_dir
|
||||||
|
)
|
||||||
|
for simulation in simulations
|
||||||
|
)
|
||||||
|
|
||||||
|
for _ in tqdm(thread_pool.imap_unordered(simulate, args), total=len(args)):
|
||||||
|
pass
|
||||||
|
|
||||||
|
print("Calculate metrics...")
|
||||||
|
return calculate_metrics(simulations, options.out_dir, options.jobs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_options_from_args() -> Options:
|
||||||
|
parser = argparse.ArgumentParser(description="DRAMSys simulation utility")
|
||||||
|
parser.add_argument("dramsys", type=Path, help="path to the DRAMSys executable")
|
||||||
|
parser.add_argument(
|
||||||
|
"--simulate",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="run the simulations generating simulation artifacts",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--metrics",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="calculate the metrics from existing simulation artifacts",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-f",
|
||||||
|
"--force",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="force override existing simulation artifacts",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--out-dir",
|
||||||
|
type=Path,
|
||||||
|
default="out",
|
||||||
|
help="path to the output directory",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--base-config",
|
||||||
|
type=Path,
|
||||||
|
help="path to the base configuration file",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--resource-dir",
|
||||||
|
type=Path,
|
||||||
|
help="path to the resource directory",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-j",
|
||||||
|
"--jobs",
|
||||||
|
metavar="N",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="run N jobs in parallel",
|
||||||
|
)
|
||||||
|
|
||||||
|
arguments = parser.parse_args()
|
||||||
|
return Options(
|
||||||
|
arguments.dramsys,
|
||||||
|
arguments.force,
|
||||||
|
arguments.out_dir,
|
||||||
|
arguments.simulate,
|
||||||
|
arguments.metrics,
|
||||||
|
arguments.base_config,
|
||||||
|
arguments.resource_dir,
|
||||||
|
arguments.jobs,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def simulation_results(
|
||||||
|
options: Options,
|
||||||
|
simulations: list[Simulation],
|
||||||
|
) -> pd.DataFrame:
|
||||||
|
if options.simulate:
|
||||||
|
return run_simulations(simulations, options)
|
||||||
|
|
||||||
|
if options.metrics:
|
||||||
|
return calculate_metrics(simulations, options.out_dir, options.jobs)
|
||||||
|
|
||||||
|
print("Summarizing simulation results in statistics.csv...")
|
||||||
|
|
||||||
|
statistics_file = f"{options.out_dir}/statistics.csv"
|
||||||
|
if not os.path.isfile(statistics_file):
|
||||||
|
print("Run the simulations first to generate simulation artifacts")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
return pd.read_csv(f"{options.out_dir}/statistics.csv", sep=";")
|
||||||
Reference in New Issue
Block a user