tests, configs, util, mem, python, systemc: Change base 10 units to base 2 (#1605)

This commit changes metric units (e.g. kB, MB, and GB) to binary units
(KiB, MiB, GiB) in various files. This PR covers files that were missed
by a previous PR that also made these changes.
This commit is contained in:
Erin (Jianghua) Le
2024-10-01 11:18:05 -07:00
committed by GitHub
parent d57208c615
commit c10feed524
45 changed files with 135 additions and 129 deletions

View File

@@ -84,7 +84,7 @@ class IOCache(Cache):
data_latency = 50
response_latency = 50
mshrs = 20
size = "1kB"
size = "1KiB"
tgts_per_mshr = 12
@@ -94,6 +94,6 @@ class PageTableWalkerCache(Cache):
data_latency = 2
response_latency = 2
mshrs = 10
size = "1kB"
size = "1KiB"
tgts_per_mshr = 12
is_read_only = False

View File

@@ -155,7 +155,7 @@ def addNoISAOptions(parser):
"--mem-size",
action="store",
type=str,
default="512MB",
default="512MiB",
help="Specify the physical memory size (single memory)",
)
parser.add_argument(
@@ -188,10 +188,10 @@ def addNoISAOptions(parser):
parser.add_argument("--num-dirs", type=int, default=1)
parser.add_argument("--num-l2caches", type=int, default=1)
parser.add_argument("--num-l3caches", type=int, default=1)
parser.add_argument("--l1d_size", type=str, default="64kB")
parser.add_argument("--l1i_size", type=str, default="32kB")
parser.add_argument("--l2_size", type=str, default="2MB")
parser.add_argument("--l3_size", type=str, default="16MB")
parser.add_argument("--l1d_size", type=str, default="64KiB")
parser.add_argument("--l1i_size", type=str, default="32KiB")
parser.add_argument("--l2_size", type=str, default="2MiB")
parser.add_argument("--l3_size", type=str, default="16MiB")
parser.add_argument("--l1d_assoc", type=int, default=2)
parser.add_argument("--l1i_assoc", type=int, default=2)
parser.add_argument("--l2_assoc", type=int, default=8)

View File

@@ -1704,7 +1704,7 @@ class HPI_ICache(Cache):
response_latency = 1
mshrs = 2
tgts_per_mshr = 8
size = "32kB"
size = "32KiB"
assoc = 2
# No prefetcher, this is handled by the core
@@ -1715,7 +1715,7 @@ class HPI_DCache(Cache):
response_latency = 1
mshrs = 4
tgts_per_mshr = 8
size = "32kB"
size = "32KiB"
assoc = 4
write_buffers = 4
prefetcher = StridePrefetcher(queue_size=4, degree=4)
@@ -1727,7 +1727,7 @@ class HPI_L2(Cache):
response_latency = 5
mshrs = 4
tgts_per_mshr = 8
size = "1024kB"
size = "1024KiB"
assoc = 16
write_buffers = 16
# prefetcher FIXME

View File

@@ -176,7 +176,7 @@ class O3_ARM_v7a_ICache(Cache):
response_latency = 1
mshrs = 2
tgts_per_mshr = 8
size = "32kB"
size = "32KiB"
assoc = 2
is_read_only = True
# Writeback clean lines as well
@@ -190,7 +190,7 @@ class O3_ARM_v7a_DCache(Cache):
response_latency = 2
mshrs = 6
tgts_per_mshr = 8
size = "32kB"
size = "32KiB"
assoc = 2
write_buffers = 16
# Consider the L2 a victim cache also for clean lines
@@ -204,7 +204,7 @@ class O3_ARM_v7aL2(Cache):
response_latency = 12
mshrs = 16
tgts_per_mshr = 8
size = "1MB"
size = "1MiB"
assoc = 16
write_buffers = 8
clusivity = "mostly_excl"

View File

@@ -124,7 +124,7 @@ class L1Cache(Cache):
class L1I(L1Cache):
mshrs = 2
size = "32kB"
size = "32KiB"
assoc = 2
is_read_only = True
tgts_per_mshr = 20
@@ -132,7 +132,7 @@ class L1I(L1Cache):
class L1D(L1Cache):
mshrs = 4
size = "32kB"
size = "32KiB"
assoc = 4
write_buffers = 4
@@ -144,7 +144,7 @@ class L2(Cache):
response_latency = 9
mshrs = 8
tgts_per_mshr = 12
size = "512kB"
size = "512KiB"
assoc = 8
write_buffers = 16
clusivity = "mostly_excl"

View File

@@ -177,7 +177,7 @@ class L1Cache(Cache):
# Instruction Cache
class L1I(L1Cache):
mshrs = 2
size = "32kB"
size = "32KiB"
assoc = 2
is_read_only = True
@@ -185,7 +185,7 @@ class L1I(L1Cache):
# Data Cache
class L1D(L1Cache):
mshrs = 6
size = "32kB"
size = "32KiB"
assoc = 2
write_buffers = 16
@@ -197,7 +197,7 @@ class L2(Cache):
response_latency = 15
mshrs = 16
tgts_per_mshr = 8
size = "2MB"
size = "2MiB"
assoc = 16
write_buffers = 8
clusivity = "mostly_excl"

View File

@@ -276,7 +276,7 @@ def main():
"--mem-size",
action="store",
type=str,
default="2GB",
default="2GiB",
help="Specify the physical memory size",
)
parser.add_argument(

View File

@@ -128,8 +128,8 @@ if __name__ == "__m5_main__":
args.num_cpus = 1
args.mem_size = "3GiB"
args.dgpu = True
args.dgpu_mem_size = "16GB"
args.dgpu_start = "0GB"
args.dgpu_mem_size = "16GiB"
args.dgpu_start = "0GiB"
args.checkpoint_restore = 0
args.disjoint = True
args.timing_gpu = True

View File

@@ -136,8 +136,8 @@ if __name__ == "__m5_main__":
args.num_cpus = 1
args.mem_size = "3GiB"
args.dgpu = True
args.dgpu_mem_size = "16GB"
args.dgpu_start = "0GB"
args.dgpu_mem_size = "16GiB"
args.dgpu_start = "0GiB"
args.checkpoint_restore = 0
args.disjoint = True
args.timing_gpu = True

View File

@@ -126,8 +126,8 @@ if __name__ == "__m5_main__":
args.num_cpus = 1
args.mem_size = "3GiB"
args.dgpu = True
args.dgpu_mem_size = "16GB"
args.dgpu_start = "0GB"
args.dgpu_mem_size = "16GiB"
args.dgpu_start = "0GiB"
args.checkpoint_restore = 0
args.disjoint = True
args.timing_gpu = True

View File

@@ -142,8 +142,8 @@ def runMI200GPUFS(cpu_type):
args.cpu_type = "X86KvmCPU"
args.mem_size = "8GiB" # CPU host memory
args.dgpu = True
args.dgpu_mem_size = "16GB" # GPU device memory
args.dgpu_start = "0GB"
args.dgpu_mem_size = "16GiB" # GPU device memory
args.dgpu_start = "0GiB"
args.checkpoint_restore = 0
args.disjoint = True
args.timing_gpu = True

View File

@@ -156,7 +156,7 @@ def runMI300GPUFS(
# Defaults for MI300X
args.gpu_device = "MI300X"
args.dgpu_mem_size = "16GB" # GPU memory size, must be 16GB currently.
args.dgpu_mem_size = "16GiB" # GPU memory size, must be 16GiB currently.
# See: https://rocm.docs.amd.com/en/latest/conceptual/gpu-arch/mi300.html
# Topology for one XCD. Number of CUs is approximately 304 / 8, rounded

View File

@@ -112,7 +112,7 @@ def addRunFSOptions(parser):
"--dgpu-mem-size",
action="store",
type=str,
default="16GB",
default="16GiB",
help="Specify the dGPU physical memory size",
)
parser.add_argument(

View File

@@ -143,8 +143,8 @@ def runVegaGPUFS(cpu_type):
args.num_cpus = 1
args.mem_size = "3GiB"
args.dgpu = True
args.dgpu_mem_size = "16GB"
args.dgpu_start = "0GB"
args.dgpu_mem_size = "16GiB"
args.dgpu_start = "0GiB"
args.checkpoint_restore = 0
args.disjoint = True
args.timing_gpu = True

View File

@@ -60,12 +60,12 @@ def add_options(parser):
)
# considering 4GB HMC device with following parameters
# hmc_device_size = '4GB'
# hmc_vault_size = '256MB'
# considering 4GiB HMC device with following parameters
# hmc_device_size = '4GiB'
# hmc_vault_size = '256MiB'
# hmc_stack_size = 8
# hmc_bank_in_stack = 2
# hmc_bank_size = '16MB'
# hmc_bank_size = '16MiB'
# hmc_bank_in_vault = 16
def build_system(options):
# create the system we are going to simulate

View File

@@ -122,12 +122,12 @@ args = parser.parse_args()
if args.cache_size == "small":
args.tcp_size = "256B"
args.tcp_assoc = 2
args.tcc_size = "1kB"
args.tcc_size = "1KiB"
args.tcc_assoc = 2
elif args.cache_size == "large":
args.tcp_size = "256kB"
args.tcp_size = "256KiB"
args.tcp_assoc = 16
args.tcc_size = "1024kB"
args.tcc_size = "1024KiB"
args.tcc_assoc = 16
#

View File

@@ -91,7 +91,7 @@ args = parser.parse_args()
args.l1d_size = "256B"
args.l1i_size = "256B"
args.l2_size = "512B"
args.l3_size = "1kB"
args.l3_size = "1KiB"
args.l1d_assoc = 2
args.l1i_assoc = 2
args.l2_assoc = 2

View File

@@ -80,7 +80,7 @@ args = parser.parse_args()
args.l1d_size = "256B"
args.l1i_size = "256B"
args.l2_size = "512B"
args.l3_size = "1kB"
args.l3_size = "1KiB"
args.l1d_assoc = 2
args.l1i_assoc = 2
args.l2_assoc = 2