configs: Fix Python 3 iterator and exec compatibility issues

Python 2.7 used to return lists for operations such as map and range,
this has changed in Python 3. To make the configs Python 3 compliant,
add explicit conversions from iterators to lists where needed, replace
xrange with range, and fix changes to exec syntax.

This change doesn't fix import paths since that might require us to
restructure the configs slightly.

Change-Id: Idcea8482b286779fc98b4e144ca8f54069c08024
Signed-off-by: Andreas Sandberg <andreas.sandberg@arm.com>
Reviewed-on: https://gem5-review.googlesource.com/c/16002
Reviewed-by: Gabe Black <gabeblack@google.com>
This commit is contained in:
Andreas Sandberg
2019-01-26 10:57:44 +00:00
parent c38a6523ab
commit 32bbddf236
48 changed files with 186 additions and 172 deletions

View File

@@ -79,7 +79,7 @@ def print_bp_list():
def bp_names(): def bp_names():
"""Return a list of valid Branch Predictor names.""" """Return a list of valid Branch Predictor names."""
return _bp_classes.keys() return list(_bp_classes.keys())
# Add all BPs in the object hierarchy. # Add all BPs in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_bp_class): for name, cls in inspect.getmembers(m5.objects, is_bp_class):

View File

@@ -141,6 +141,6 @@ Benchmarks = {
None, 'android-ics')] None, 'android-ics')]
} }
benchs = Benchmarks.keys() benchs = list(Benchmarks.keys())
benchs.sort() benchs.sort()
DefinedBenchmarks = ", ".join(benchs) DefinedBenchmarks = ", ".join(benchs)

View File

@@ -97,7 +97,7 @@ def config_cache(options, system):
if options.memchecker: if options.memchecker:
system.memchecker = MemChecker() system.memchecker = MemChecker()
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
if options.caches: if options.caches:
icache = icache_class(size=options.l1i_size, icache = icache_class(size=options.l1i_size,
assoc=options.l1i_assoc) assoc=options.l1i_assoc)

View File

@@ -99,7 +99,7 @@ def print_cpu_list():
def cpu_names(): def cpu_names():
"""Return a list of valid CPU names.""" """Return a list of valid CPU names."""
return _cpu_classes.keys() return list(_cpu_classes.keys())
def config_etrace(cpu_cls, cpu_list, options): def config_etrace(cpu_cls, cpu_list, options):
if issubclass(cpu_cls, m5.objects.DerivO3CPU): if issubclass(cpu_cls, m5.objects.DerivO3CPU):

View File

@@ -548,7 +548,7 @@ def makeX86System(mem_mode, numCPUs=1, mdesc=None, self=None, Ruby=False):
# Set up the Intel MP table # Set up the Intel MP table
base_entries = [] base_entries = []
ext_entries = [] ext_entries = []
for i in xrange(numCPUs): for i in range(numCPUs):
bp = X86IntelMPProcessor( bp = X86IntelMPProcessor(
local_apic_id = i, local_apic_id = i,
local_apic_version = 0x14, local_apic_version = 0x14,

View File

@@ -69,7 +69,7 @@ def Coalescer_constructor(level):
def create_TLB_Coalescer(options, my_level, my_index, TLB_name, Coalescer_name): def create_TLB_Coalescer(options, my_level, my_index, TLB_name, Coalescer_name):
# arguments: options, TLB level, number of private structures for this Level, # arguments: options, TLB level, number of private structures for this Level,
# TLB name and Coalescer name # TLB name and Coalescer name
for i in xrange(my_index): for i in range(my_index):
TLB_name.append(eval(TLB_constructor(my_level))) TLB_name.append(eval(TLB_constructor(my_level)))
Coalescer_name.append(eval(Coalescer_constructor(my_level))) Coalescer_name.append(eval(Coalescer_constructor(my_level)))
@@ -109,7 +109,7 @@ def config_tlb_hierarchy(options, system, shader_idx):
# Create the hiearchy # Create the hiearchy
# Call the appropriate constructors and add objects to the system # Call the appropriate constructors and add objects to the system
for i in xrange(len(TLB_hierarchy)): for i in range(len(TLB_hierarchy)):
hierarchy_level = TLB_hierarchy[i] hierarchy_level = TLB_hierarchy[i]
level = i+1 level = i+1
for TLB_type in hierarchy_level: for TLB_type in hierarchy_level:
@@ -143,7 +143,7 @@ def config_tlb_hierarchy(options, system, shader_idx):
# Each TLB is connected with its Coalescer through a single port. # Each TLB is connected with its Coalescer through a single port.
# There is a one-to-one mapping of TLBs to Coalescers at a given level # There is a one-to-one mapping of TLBs to Coalescers at a given level
# This won't be modified no matter what the hierarchy looks like. # This won't be modified no matter what the hierarchy looks like.
for i in xrange(len(TLB_hierarchy)): for i in range(len(TLB_hierarchy)):
hierarchy_level = TLB_hierarchy[i] hierarchy_level = TLB_hierarchy[i]
level = i+1 level = i+1
for TLB_type in hierarchy_level: for TLB_type in hierarchy_level:
@@ -159,7 +159,7 @@ def config_tlb_hierarchy(options, system, shader_idx):
name = TLB_type['name'] name = TLB_type['name']
num_TLBs = TLB_type['width'] num_TLBs = TLB_type['width']
if name == 'l1': # L1 D-TLBs if name == 'l1': # L1 D-TLBs
tlb_per_cu = num_TLBs / n_cu tlb_per_cu = num_TLBs // n_cu
for cu_idx in range(n_cu): for cu_idx in range(n_cu):
if tlb_per_cu: if tlb_per_cu:
for tlb in range(tlb_per_cu): for tlb in range(tlb_per_cu):

View File

@@ -337,16 +337,16 @@ def config_hmc_host_ctrl(opt, system):
num_lanes=opt.num_lanes_per_link, num_lanes=opt.num_lanes_per_link,
link_speed=opt.serial_link_speed, link_speed=opt.serial_link_speed,
delay=opt.total_ctrl_latency) for i in delay=opt.total_ctrl_latency) for i in
xrange(opt.num_serial_links)] range(opt.num_serial_links)]
system.hmc_host.seriallink = sl system.hmc_host.seriallink = sl
# enable global monitor # enable global monitor
if opt.enable_global_monitor: if opt.enable_global_monitor:
system.hmc_host.lmonitor = [CommMonitor() for i in system.hmc_host.lmonitor = [CommMonitor() for i in
xrange(opt.num_serial_links)] range(opt.num_serial_links)]
# set the clock frequency for serial link # set the clock frequency for serial link
for i in xrange(opt.num_serial_links): for i in range(opt.num_serial_links):
clk = opt.link_controller_frequency clk = opt.link_controller_frequency
vd = VoltageDomain(voltage='1V') vd = VoltageDomain(voltage='1V')
scd = SrcClockDomain(clock=clk, voltage_domain=vd) scd = SrcClockDomain(clock=clk, voltage_domain=vd)
@@ -357,7 +357,7 @@ def config_hmc_host_ctrl(opt, system):
hh = system.hmc_host hh = system.hmc_host
if opt.arch == "distributed": if opt.arch == "distributed":
mb = system.membus mb = system.membus
for i in xrange(opt.num_links_controllers): for i in range(opt.num_links_controllers):
if opt.enable_global_monitor: if opt.enable_global_monitor:
mb.master = hh.lmonitor[i].slave mb.master = hh.lmonitor[i].slave
hh.lmonitor[i].master = hh.seriallink[i].slave hh.lmonitor[i].master = hh.seriallink[i].slave
@@ -375,7 +375,7 @@ def config_hmc_host_ctrl(opt, system):
mb.master = hh.seriallink[1].slave mb.master = hh.seriallink[1].slave
if opt.arch == "same": if opt.arch == "same":
for i in xrange(opt.num_links_controllers): for i in range(opt.num_links_controllers):
if opt.enable_global_monitor: if opt.enable_global_monitor:
hh.lmonitor[i].master = hh.seriallink[i].slave hh.lmonitor[i].master = hh.seriallink[i].slave
@@ -395,7 +395,7 @@ def config_hmc_dev(opt, system, hmc_host):
system.mem_ranges = addr_ranges_vaults system.mem_ranges = addr_ranges_vaults
if opt.enable_link_monitor: if opt.enable_link_monitor:
lm = [CommMonitor() for i in xrange(opt.num_links_controllers)] lm = [CommMonitor() for i in range(opt.num_links_controllers)]
system.hmc_dev.lmonitor = lm system.hmc_dev.lmonitor = lm
# 4 HMC Crossbars located in its logic-base (LoB) # 4 HMC Crossbars located in its logic-base (LoB)
@@ -403,17 +403,17 @@ def config_hmc_dev(opt, system, hmc_host):
frontend_latency=opt.xbar_frontend_latency, frontend_latency=opt.xbar_frontend_latency,
forward_latency=opt.xbar_forward_latency, forward_latency=opt.xbar_forward_latency,
response_latency=opt.xbar_response_latency) for i in response_latency=opt.xbar_response_latency) for i in
xrange(opt.number_mem_crossbar)] range(opt.number_mem_crossbar)]
system.hmc_dev.xbar = xb system.hmc_dev.xbar = xb
for i in xrange(opt.number_mem_crossbar): for i in range(opt.number_mem_crossbar):
clk = opt.xbar_frequency clk = opt.xbar_frequency
vd = VoltageDomain(voltage='1V') vd = VoltageDomain(voltage='1V')
scd = SrcClockDomain(clock=clk, voltage_domain=vd) scd = SrcClockDomain(clock=clk, voltage_domain=vd)
system.hmc_dev.xbar[i].clk_domain = scd system.hmc_dev.xbar[i].clk_domain = scd
# Attach 4 serial link to 4 crossbar/s # Attach 4 serial link to 4 crossbar/s
for i in xrange(opt.num_serial_links): for i in range(opt.num_serial_links):
if opt.enable_link_monitor: if opt.enable_link_monitor:
system.hmc_host.seriallink[i].master = \ system.hmc_host.seriallink[i].master = \
system.hmc_dev.lmonitor[i].slave system.hmc_dev.lmonitor[i].slave
@@ -429,7 +429,7 @@ def config_hmc_dev(opt, system, hmc_host):
# create a list of buffers # create a list of buffers
system.hmc_dev.buffers = [Bridge(req_size=opt.xbar_buffer_size_req, system.hmc_dev.buffers = [Bridge(req_size=opt.xbar_buffer_size_req,
resp_size=opt.xbar_buffer_size_resp) resp_size=opt.xbar_buffer_size_resp)
for i in xrange(numx*(opt.mem_chunk-1))] for i in range(numx*(opt.mem_chunk-1))]
# Buffer iterator # Buffer iterator
it = iter(range(len(system.hmc_dev.buffers))) it = iter(range(len(system.hmc_dev.buffers)))

View File

@@ -86,7 +86,7 @@ def print_mem_list():
def mem_names(): def mem_names():
"""Return a list of valid memory names.""" """Return a list of valid memory names."""
return _mem_classes.keys() return list(_mem_classes.keys())
# Add all memory controllers in the object hierarchy. # Add all memory controllers in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_mem_class): for name, cls in inspect.getmembers(m5.objects, is_mem_class):
@@ -215,7 +215,7 @@ def config_mem(options, system):
# array of controllers and set their parameters to match their # array of controllers and set their parameters to match their
# address mapping in the case of a DRAM # address mapping in the case of a DRAM
for r in system.mem_ranges: for r in system.mem_ranges:
for i in xrange(nbr_mem_ctrls): for i in range(nbr_mem_ctrls):
mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits, mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits,
intlv_size) intlv_size)
# Set the number of ranks based on the command-line # Set the number of ranks based on the command-line
@@ -233,7 +233,7 @@ def config_mem(options, system):
subsystem.mem_ctrls = mem_ctrls subsystem.mem_ctrls = mem_ctrls
# Connect the controllers to the membus # Connect the controllers to the membus
for i in xrange(len(subsystem.mem_ctrls)): for i in range(len(subsystem.mem_ctrls)):
if opt_mem_type == "HMC_2500_1x32": if opt_mem_type == "HMC_2500_1x32":
subsystem.mem_ctrls[i].port = xbar[i/4].master subsystem.mem_ctrls[i].port = xbar[i/4].master
# Set memory device size. There is an independent controller for # Set memory device size. There is an independent controller for

View File

@@ -339,8 +339,9 @@ def addFSOptions(parser):
# System options # System options
parser.add_option("--kernel", action="store", type="string") parser.add_option("--kernel", action="store", type="string")
parser.add_option("--os-type", action="store", type="choice", parser.add_option("--os-type", action="store", type="choice",
choices=os_types[buildEnv['TARGET_ISA']], default="linux", choices=os_types[str(buildEnv['TARGET_ISA'])],
help="Specifies type of OS to boot") default="linux",
help="Specifies type of OS to boot")
parser.add_option("--script", action="store", type="string") parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true", parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\ help="Stores changed frame buffers from the VNC server to compressed "\

View File

@@ -103,7 +103,7 @@ def print_platform_list():
def platform_names(): def platform_names():
"""Return a list of valid Platform names.""" """Return a list of valid Platform names."""
return _platform_classes.keys() + _platform_aliases.keys() return list(_platform_classes.keys()) + list(_platform_aliases.keys())
# Add all Platforms in the object hierarchy. # Add all Platforms in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_platform_class): for name, cls in inspect.getmembers(m5.objects, is_platform_class):

View File

@@ -453,18 +453,18 @@ def run(options, root, testsys, cpu_class):
switch_cpus = None switch_cpus = None
if options.prog_interval: if options.prog_interval:
for i in xrange(np): for i in range(np):
testsys.cpu[i].progress_interval = options.prog_interval testsys.cpu[i].progress_interval = options.prog_interval
if options.maxinsts: if options.maxinsts:
for i in xrange(np): for i in range(np):
testsys.cpu[i].max_insts_any_thread = options.maxinsts testsys.cpu[i].max_insts_any_thread = options.maxinsts
if cpu_class: if cpu_class:
switch_cpus = [cpu_class(switched_out=True, cpu_id=(i)) switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
for i in xrange(np)] for i in range(np)]
for i in xrange(np): for i in range(np):
if options.fast_forward: if options.fast_forward:
testsys.cpu[i].max_insts_any_thread = int(options.fast_forward) testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
switch_cpus[i].system = testsys switch_cpus[i].system = testsys
@@ -489,7 +489,7 @@ def run(options, root, testsys, cpu_class):
CpuConfig.config_etrace(cpu_class, switch_cpus, options) CpuConfig.config_etrace(cpu_class, switch_cpus, options)
testsys.switch_cpus = switch_cpus testsys.switch_cpus = switch_cpus
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)] switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in range(np)]
if options.repeat_switch: if options.repeat_switch:
switch_class = getCPUClass(options.cpu_type)[0] switch_class = getCPUClass(options.cpu_type)[0]
@@ -502,9 +502,9 @@ def run(options, root, testsys, cpu_class):
sys.exit(1) sys.exit(1)
repeat_switch_cpus = [switch_class(switched_out=True, \ repeat_switch_cpus = [switch_class(switched_out=True, \
cpu_id=(i)) for i in xrange(np)] cpu_id=(i)) for i in range(np)]
for i in xrange(np): for i in range(np):
repeat_switch_cpus[i].system = testsys repeat_switch_cpus[i].system = testsys
repeat_switch_cpus[i].workload = testsys.cpu[i].workload repeat_switch_cpus[i].workload = testsys.cpu[i].workload
repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
@@ -520,18 +520,18 @@ def run(options, root, testsys, cpu_class):
if cpu_class: if cpu_class:
repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i]) repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
for i in xrange(np)] for i in range(np)]
else: else:
repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i]) repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
for i in xrange(np)] for i in range(np)]
if options.standard_switch: if options.standard_switch:
switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i)) switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
for i in xrange(np)] for i in range(np)]
switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i)) switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
for i in xrange(np)] for i in range(np)]
for i in xrange(np): for i in range(np):
switch_cpus[i].system = testsys switch_cpus[i].system = testsys
switch_cpus_1[i].system = testsys switch_cpus_1[i].system = testsys
switch_cpus[i].workload = testsys.cpu[i].workload switch_cpus[i].workload = testsys.cpu[i].workload
@@ -572,8 +572,12 @@ def run(options, root, testsys, cpu_class):
testsys.switch_cpus = switch_cpus testsys.switch_cpus = switch_cpus
testsys.switch_cpus_1 = switch_cpus_1 testsys.switch_cpus_1 = switch_cpus_1
switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)] switch_cpu_list = [
switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)] (testsys.cpu[i], switch_cpus[i]) for i in range(np)
]
switch_cpu_list1 = [
(switch_cpus[i], switch_cpus_1[i]) for i in range(np)
]
# set the checkpoint in the cpu before m5.instantiate is called # set the checkpoint in the cpu before m5.instantiate is called
if options.take_checkpoints != None and \ if options.take_checkpoints != None and \
@@ -581,7 +585,7 @@ def run(options, root, testsys, cpu_class):
offset = int(options.take_checkpoints) offset = int(options.take_checkpoints)
# Set an instruction break point # Set an instruction break point
if options.simpoint: if options.simpoint:
for i in xrange(np): for i in range(np):
if testsys.cpu[i].workload[0].simpoint == 0: if testsys.cpu[i].workload[0].simpoint == 0:
fatal('no simpoint for testsys.cpu[%d].workload[0]', i) fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
@@ -592,7 +596,7 @@ def run(options, root, testsys, cpu_class):
options.take_checkpoints = offset options.take_checkpoints = offset
# Set all test cpus with the right number of instructions # Set all test cpus with the right number of instructions
# for the upcoming simulation # for the upcoming simulation
for i in xrange(np): for i in range(np):
testsys.cpu[i].max_insts_any_thread = offset testsys.cpu[i].max_insts_any_thread = offset
if options.take_simpoint_checkpoints != None: if options.take_simpoint_checkpoints != None:

View File

@@ -26,6 +26,8 @@
# #
# Authors: Ali Saidi # Authors: Ali Saidi
from six import string_types
import os, sys import os, sys
config_path = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.dirname(os.path.abspath(__file__))
@@ -35,7 +37,7 @@ class PathSearchFunc(object):
_sys_paths = None _sys_paths = None
def __init__(self, subdirs, sys_paths=None): def __init__(self, subdirs, sys_paths=None):
if isinstance(subdirs, basestring): if isinstance(subdirs, string_types):
subdirs = [subdirs] subdirs = [subdirs]
self._subdir = os.path.join(*subdirs) self._subdir = os.path.join(*subdirs)
if sys_paths: if sys_paths:
@@ -55,16 +57,16 @@ class PathSearchFunc(object):
paths = filter(os.path.isdir, paths) paths = filter(os.path.isdir, paths)
if not paths: if not paths:
raise IOError, "Can't find a path to system files." raise IOError("Can't find a path to system files.")
self._sys_paths = paths self._sys_paths = list(paths)
filepath = os.path.join(self._subdir, filename) filepath = os.path.join(self._subdir, filename)
paths = (os.path.join(p, filepath) for p in self._sys_paths) paths = (os.path.join(p, filepath) for p in self._sys_paths)
try: try:
return next(p for p in paths if os.path.exists(p)) return next(p for p in paths if os.path.exists(p))
except StopIteration: except StopIteration:
raise IOError, "Can't find file '%s' on path." % filename raise IOError("Can't find file '%s' on path." % filename)
disk = PathSearchFunc('disks') disk = PathSearchFunc('disks')
binary = PathSearchFunc('binaries') binary = PathSearchFunc('binaries')

View File

@@ -177,7 +177,7 @@ def let(bindings, expr):
defns = [] defns = []
# Then apply them to the produced new env # Then apply them to the produced new env
for i in xrange(0, len(bindings)): for i in range(0, len(bindings)):
name, binding_expr = bindings[i] name, binding_expr = bindings[i]
defns.append(binding_expr(new_env)) defns.append(binding_expr(new_env))

View File

@@ -93,13 +93,13 @@ class Benchmark(object):
try: try:
func = getattr(self.__class__, input_set) func = getattr(self.__class__, input_set)
except AttributeError: except AttributeError:
raise AttributeError, \ raise AttributeError(
'The benchmark %s does not have the %s input set' % \ 'The benchmark %s does not have the %s input set' % \
(self.name, input_set) (self.name, input_set))
executable = joinpath(spec_dist, 'binaries', isa, os, self.binary) executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
if not isfile(executable): if not isfile(executable):
raise AttributeError, '%s not found' % executable raise AttributeError('%s not found' % executable)
self.executable = executable self.executable = executable
# root of tree for input & output data files # root of tree for input & output data files
@@ -113,7 +113,7 @@ class Benchmark(object):
self.input_set = input_set self.input_set = input_set
if not isdir(inputs_dir): if not isdir(inputs_dir):
raise AttributeError, '%s not found' % inputs_dir raise AttributeError('%s not found' % inputs_dir)
self.inputs_dir = [ inputs_dir ] self.inputs_dir = [ inputs_dir ]
if isdir(all_dir): if isdir(all_dir):
@@ -670,7 +670,7 @@ class vortex(Benchmark):
elif (isa == 'sparc' or isa == 'sparc32'): elif (isa == 'sparc' or isa == 'sparc32'):
self.endian = 'bendian' self.endian = 'bendian'
else: else:
raise AttributeError, "unknown ISA %s" % isa raise AttributeError("unknown ISA %s" % isa)
super(vortex, self).__init__(isa, os, input_set) super(vortex, self).__init__(isa, os, input_set)

2
configs/dist/sw.py vendored
View File

@@ -57,7 +57,7 @@ def build_switch(options):
sync_repeat = options.dist_sync_repeat, sync_repeat = options.dist_sync_repeat,
is_switch = True, is_switch = True,
num_nodes = options.dist_size) num_nodes = options.dist_size)
for i in xrange(options.dist_size)] for i in range(options.dist_size)]
for (i, link) in enumerate(switch.portlink): for (i, link) in enumerate(switch.portlink):
link.int0 = switch.interface[i] link.int0 = switch.interface[i]

View File

@@ -188,7 +188,7 @@ def create_trace(filename, max_addr, burst_size, itt):
protolib.encodeMessage(proto_out, header) protolib.encodeMessage(proto_out, header)
# create a list of every single address to touch # create a list of every single address to touch
addrs = range(0, max_addr, burst_size) addrs = list(range(0, max_addr, burst_size))
import random import random
random.shuffle(addrs) random.shuffle(addrs)

View File

@@ -166,11 +166,11 @@ pd_entry_time = (system.mem_ctrls[0].tRAS.value +
# We sweep itt max using the multipliers specified by the user. # We sweep itt max using the multipliers specified by the user.
itt_max_str = args.itt_list.strip().split() itt_max_str = args.itt_list.strip().split()
itt_max_multiples = map(lambda x : int(x), itt_max_str) itt_max_multiples = [ int(x) for x in itt_max_str ]
if len(itt_max_multiples) == 0: if len(itt_max_multiples) == 0:
fatal("String for itt-max-list detected empty\n") fatal("String for itt-max-list detected empty\n")
itt_max_values = map(lambda m : pd_entry_time * m, itt_max_multiples) itt_max_values = [ pd_entry_time * m for m in itt_max_multiples ]
# Generate request addresses in the entire range, assume we start at 0 # Generate request addresses in the entire range, assume we start at 0
max_addr = mem_range.end max_addr = mem_range.end

View File

@@ -225,7 +225,7 @@ if options.TLB_config == "perLane":
# List of compute units; one GPU can have multiple compute units # List of compute units; one GPU can have multiple compute units
compute_units = [] compute_units = []
for i in xrange(n_cu): for i in range(n_cu):
compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane, compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
num_SIMDs = options.simds_per_cu, num_SIMDs = options.simds_per_cu,
wfSize = options.wf_size, wfSize = options.wf_size,
@@ -255,8 +255,8 @@ for i in xrange(n_cu):
options.outOfOrderDataDelivery)) options.outOfOrderDataDelivery))
wavefronts = [] wavefronts = []
vrfs = [] vrfs = []
for j in xrange(options.simds_per_cu): for j in range(options.simds_per_cu):
for k in xrange(shader.n_wf): for k in range(shader.n_wf):
wavefronts.append(Wavefront(simdId = j, wf_slot_id = k, wavefronts.append(Wavefront(simdId = j, wf_slot_id = k,
wfSize = options.wf_size)) wfSize = options.wf_size))
vrfs.append(VectorRegisterFile(simd_id=j, vrfs.append(VectorRegisterFile(simd_id=j,
@@ -311,7 +311,7 @@ if fast_forward:
future_cpu_list = [] future_cpu_list = []
# Initial CPUs to be used during fast-forwarding. # Initial CPUs to be used during fast-forwarding.
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
cpu = CpuClass(cpu_id = i, cpu = CpuClass(cpu_id = i,
clk_domain = SrcClockDomain( clk_domain = SrcClockDomain(
clock = options.CPUClock, clock = options.CPUClock,
@@ -328,7 +328,7 @@ else:
MainCpuClass = CpuClass MainCpuClass = CpuClass
# CPs to be used throughout the simulation. # CPs to be used throughout the simulation.
for i in xrange(options.num_cp): for i in range(options.num_cp):
cp = MainCpuClass(cpu_id = options.num_cpus + i, cp = MainCpuClass(cpu_id = options.num_cpus + i,
clk_domain = SrcClockDomain( clk_domain = SrcClockDomain(
clock = options.CPUClock, clock = options.CPUClock,
@@ -337,7 +337,7 @@ for i in xrange(options.num_cp):
cp_list.append(cp) cp_list.append(cp)
# Main CPUs (to be used after fast-forwarding if fast-forwarding is specified). # Main CPUs (to be used after fast-forwarding if fast-forwarding is specified).
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
cpu = MainCpuClass(cpu_id = i, cpu = MainCpuClass(cpu_id = i,
clk_domain = SrcClockDomain( clk_domain = SrcClockDomain(
clock = options.CPUClock, clock = options.CPUClock,
@@ -400,7 +400,7 @@ for cp in cp_list:
cp.workload = host_cpu.workload cp.workload = host_cpu.workload
if fast_forward: if fast_forward:
for i in xrange(len(future_cpu_list)): for i in range(len(future_cpu_list)):
future_cpu_list[i].workload = cpu_list[i].workload future_cpu_list[i].workload = cpu_list[i].workload
future_cpu_list[i].createThreads() future_cpu_list[i].createThreads()
@@ -408,7 +408,7 @@ if fast_forward:
# List of CPUs that must be switched when moving between KVM and simulation # List of CPUs that must be switched when moving between KVM and simulation
if fast_forward: if fast_forward:
switch_cpu_list = \ switch_cpu_list = \
[(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)] [(cpu_list[i], future_cpu_list[i]) for i in range(options.num_cpus)]
# Full list of processing cores in the system. Note that # Full list of processing cores in the system. Note that
# dispatcher is also added to cpu_list although it is # dispatcher is also added to cpu_list although it is
@@ -431,7 +431,7 @@ if fast_forward:
have_kvm_support = 'BaseKvmCPU' in globals() have_kvm_support = 'BaseKvmCPU' in globals()
if have_kvm_support and buildEnv['TARGET_ISA'] == "x86": if have_kvm_support and buildEnv['TARGET_ISA'] == "x86":
system.vm = KvmVM() system.vm = KvmVM()
for i in xrange(len(host_cpu.workload)): for i in range(len(host_cpu.workload)):
host_cpu.workload[i].useArchPT = True host_cpu.workload[i].useArchPT = True
host_cpu.workload[i].kvmInSE = True host_cpu.workload[i].kvmInSE = True
else: else:
@@ -479,15 +479,15 @@ gpu_port_idx = len(system.ruby._cpu_ports) \
gpu_port_idx = gpu_port_idx - options.num_cp * 2 gpu_port_idx = gpu_port_idx - options.num_cp * 2
wavefront_size = options.wf_size wavefront_size = options.wf_size
for i in xrange(n_cu): for i in range(n_cu):
# The pipeline issues wavefront_size number of uncoalesced requests # The pipeline issues wavefront_size number of uncoalesced requests
# in one GPU issue cycle. Hence wavefront_size mem ports. # in one GPU issue cycle. Hence wavefront_size mem ports.
for j in xrange(wavefront_size): for j in range(wavefront_size):
system.cpu[shader_idx].CUs[i].memory_port[j] = \ system.cpu[shader_idx].CUs[i].memory_port[j] = \
system.ruby._cpu_ports[gpu_port_idx].slave[j] system.ruby._cpu_ports[gpu_port_idx].slave[j]
gpu_port_idx += 1 gpu_port_idx += 1
for i in xrange(n_cu): for i in range(n_cu):
if i > 0 and not i % options.cu_per_sqc: if i > 0 and not i % options.cu_per_sqc:
print("incrementing idx on ", i) print("incrementing idx on ", i)
gpu_port_idx += 1 gpu_port_idx += 1
@@ -496,7 +496,7 @@ for i in xrange(n_cu):
gpu_port_idx = gpu_port_idx + 1 gpu_port_idx = gpu_port_idx + 1
# attach CP ports to Ruby # attach CP ports to Ruby
for i in xrange(options.num_cp): for i in range(options.num_cp):
system.cpu[cp_idx].createInterruptController() system.cpu[cp_idx].createInterruptController()
system.cpu[cp_idx].dcache_port = \ system.cpu[cp_idx].dcache_port = \
system.ruby._cpu_ports[gpu_port_idx + i * 2].slave system.ruby._cpu_ports[gpu_port_idx + i * 2].slave

View File

@@ -138,7 +138,7 @@ def build_test_system(np):
# For now, assign all the CPUs to the same clock domain # For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i) test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
for i in xrange(np)] for i in range(np)]
if CpuConfig.is_kvm_cpu(TestCPUClass) or CpuConfig.is_kvm_cpu(FutureClass): if CpuConfig.is_kvm_cpu(TestCPUClass) or CpuConfig.is_kvm_cpu(FutureClass):
test_sys.kvm_vm = KvmVM() test_sys.kvm_vm = KvmVM()
@@ -194,7 +194,7 @@ def build_test_system(np):
if np > 1: if np > 1:
fatal("SimPoint generation not supported with more than one CPUs") fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np): for i in range(np):
if options.simpoint_profile: if options.simpoint_profile:
test_sys.cpu[i].addSimPointProbe(options.simpoint_interval) test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker: if options.checker:
@@ -277,7 +277,7 @@ def build_drive_system(np):
# memory bus # memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r) drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges] for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)): for i in range(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param drive_sys.init_param = options.init_param

View File

@@ -87,7 +87,8 @@ parser.add_option("--inj-vnet", type="int", default=-1,
# #
Ruby.define_options(parser) Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py")) exec(compile(open(os.path.join(config_root, "common", "Options.py")).read(),
os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
@@ -112,7 +113,7 @@ cpus = [ GarnetSyntheticTraffic(
inj_vnet=options.inj_vnet, inj_vnet=options.inj_vnet,
precision=options.precision, precision=options.precision,
num_dest=options.num_dirs) \ num_dest=options.num_dirs) \
for i in xrange(options.num_cpus) ] for i in range(options.num_cpus) ]
# create the desired simulated system # create the desired simulated system
system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)]) system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])

View File

@@ -57,17 +57,17 @@ def build_system(options):
system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd) system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd)
# add traffic generators to the system # add traffic generators to the system
system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in
xrange(options.num_tgen)] range(options.num_tgen)]
# Config memory system with given HMC arch # Config memory system with given HMC arch
MemConfig.config_mem(options, system) MemConfig.config_mem(options, system)
# Connect the traffic generatiors # Connect the traffic generatiors
if options.arch == "distributed": if options.arch == "distributed":
for i in xrange(options.num_tgen): for i in range(options.num_tgen):
system.tgen[i].port = system.membus.slave system.tgen[i].port = system.membus.slave
# connect the system port even if it is not used in this example # connect the system port even if it is not used in this example
system.system_port = system.membus.slave system.system_port = system.membus.slave
if options.arch == "mixed": if options.arch == "mixed":
for i in xrange(int(options.num_tgen/2)): for i in range(int(options.num_tgen/2)):
system.tgen[i].port = system.membus.slave system.tgen[i].port = system.membus.slave
hh = system.hmc_host hh = system.hmc_host
if options.enable_global_monitor: if options.enable_global_monitor:
@@ -82,7 +82,7 @@ def build_system(options):
system.system_port = system.membus.slave system.system_port = system.membus.slave
if options.arch == "same": if options.arch == "same":
hh = system.hmc_host hh = system.hmc_host
for i in xrange(options.num_links_controllers): for i in range(options.num_links_controllers):
if options.enable_global_monitor: if options.enable_global_monitor:
system.tgen[i].port = hh.lmonitor[i].slave system.tgen[i].port = hh.lmonitor[i].slave
else: else:

View File

@@ -246,9 +246,9 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
# The levels are indexing backwards through the list # The levels are indexing backwards through the list
ntesters = testerspec[len(cachespec) - level] ntesters = testerspec[len(cachespec) - level]
testers = [proto_tester() for i in xrange(ntesters)] testers = [proto_tester() for i in range(ntesters)]
checkers = [MemCheckerMonitor(memchecker = system.memchecker) \ checkers = [MemCheckerMonitor(memchecker = system.memchecker) \
for i in xrange(ntesters)] for i in range(ntesters)]
if ntesters: if ntesters:
subsys.tester = testers subsys.tester = testers
subsys.checkers = checkers subsys.checkers = checkers
@@ -264,8 +264,8 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
# Create and connect the caches, both the ones fanning out # Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers # to create the tree, and the ones used to connect testers
# on this level # on this level
tree_caches = [prototypes[0]() for i in xrange(ncaches[0])] tree_caches = [prototypes[0]() for i in range(ncaches[0])]
tester_caches = [proto_l1() for i in xrange(ntesters)] tester_caches = [proto_l1() for i in range(ntesters)]
subsys.cache = tester_caches + tree_caches subsys.cache = tester_caches + tree_caches
for cache in tree_caches: for cache in tree_caches:

View File

@@ -257,7 +257,7 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
limit = (len(cachespec) - level + 1) * 100000000 limit = (len(cachespec) - level + 1) * 100000000
testers = [proto_tester(interval = 10 * (level * level + 1), testers = [proto_tester(interval = 10 * (level * level + 1),
progress_check = limit) \ progress_check = limit) \
for i in xrange(ntesters)] for i in range(ntesters)]
if ntesters: if ntesters:
subsys.tester = testers subsys.tester = testers
@@ -272,8 +272,8 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
# Create and connect the caches, both the ones fanning out # Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers # to create the tree, and the ones used to connect testers
# on this level # on this level
tree_caches = [prototypes[0]() for i in xrange(ncaches[0])] tree_caches = [prototypes[0]() for i in range(ncaches[0])]
tester_caches = [proto_l1() for i in xrange(ntesters)] tester_caches = [proto_l1() for i in range(ntesters)]
subsys.cache = tester_caches + tree_caches subsys.cache = tester_caches + tree_caches
for cache in tree_caches: for cache in tree_caches:

View File

@@ -280,7 +280,7 @@ class ConfigManager(object):
# Assume that unnamed ports are unconnected # Assume that unnamed ports are unconnected
peers = self.config.get_port_peers(object_name, port_name) peers = self.config.get_port_peers(object_name, port_name)
for index, peer in zip(xrange(0, len(peers)), peers): for index, peer in zip(range(0, len(peers)), peers):
parsed_ports.append(( parsed_ports.append((
PortConnection(object_name, port.name, index), PortConnection(object_name, port.name, index),
PortConnection.from_string(peer))) PortConnection.from_string(peer)))

View File

@@ -76,7 +76,9 @@ parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
# #
Ruby.define_options(parser) Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py")) exec(compile( \
open(os.path.join(config_root, "common", "Options.py")).read(), \
os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
@@ -97,7 +99,7 @@ options.l3_assoc=2
assert(options.num_compute_units >= 1) assert(options.num_compute_units >= 1)
n_cu = options.num_compute_units n_cu = options.num_compute_units
options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc) options.num_sqc = int((n_cu + options.cu_per_sqc - 1) // options.cu_per_sqc)
if args: if args:
print("Error: script doesn't take any positional arguments") print("Error: script doesn't take any positional arguments")

View File

@@ -65,7 +65,9 @@ parser.add_option("--suppress-func-warnings", action="store_true",
# #
Ruby.define_options(parser) Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py")) exec(compile( \
open(os.path.join(config_root, "common", "Options.py")).read(), \
os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
@@ -101,7 +103,7 @@ cpus = [ MemTest(max_loads = options.maxloads,
percent_uncacheable = 0, percent_uncacheable = 0,
progress_interval = options.progress, progress_interval = options.progress,
suppress_func_warnings = options.suppress_func_warnings) \ suppress_func_warnings = options.suppress_func_warnings) \
for i in xrange(options.num_cpus) ] for i in range(options.num_cpus) ]
system = System(cpu = cpus, system = System(cpu = cpus,
clk_domain = SrcClockDomain(clock = options.sys_clock), clk_domain = SrcClockDomain(clock = options.sys_clock),
@@ -114,7 +116,7 @@ if options.num_dmas > 0:
progress_interval = options.progress, progress_interval = options.progress,
suppress_func_warnings = suppress_func_warnings =
not options.suppress_func_warnings) \ not options.suppress_func_warnings) \
for i in xrange(options.num_dmas) ] for i in range(options.num_dmas) ]
system.dma_devices = dmas system.dma_devices = dmas
else: else:
dmas = [] dmas = []

View File

@@ -59,7 +59,9 @@ parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
# #
Ruby.define_options(parser) Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py")) exec(compile( \
open(os.path.join(config_root, "common", "Options.py")).read(), \
os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args() (options, args) = parser.parse_args()

View File

@@ -171,7 +171,7 @@ if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!") fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)], system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)],
mem_mode = test_mem_mode, mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)], mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size) cache_line_size = options.cacheline_size)
@@ -220,7 +220,7 @@ if options.simpoint_profile:
if np > 1: if np > 1:
fatal("SimPoint generation not supported with more than one CPUs") fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np): for i in range(np):
if options.smt: if options.smt:
system.cpu[i].workload = multiprocesses system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1: elif len(multiprocesses) == 1:
@@ -246,7 +246,7 @@ if options.ruby:
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain) voltage_domain = system.voltage_domain)
for i in xrange(np): for i in range(np):
ruby_port = system.ruby._cpu_ports[i] ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby # Create the interrupt controller and connect its ports to Ruby

View File

@@ -115,7 +115,7 @@ def construct(options, system, ruby_system):
cpu_sequencers = [] cpu_sequencers = []
cpuCluster = None cpuCluster = None
cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
for i in xrange((options.num_cpus + 1) / 2): for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl() cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system) cp_cntrl.create(options, ruby_system, system)

View File

@@ -470,7 +470,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
numa_bit = block_size_bits + dir_bits - 1 numa_bit = block_size_bits + dir_bits - 1
for i in xrange(options.num_dirs): for i in range(options.num_dirs):
dir_ranges = [] dir_ranges = []
for r in system.mem_ranges: for r in system.mem_ranges:
addr_range = m5.objects.AddrRange(r.start, size = r.size(), addr_range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -511,7 +511,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# For an odd number of CPUs, still create the right number of controllers # For an odd number of CPUs, still create the right number of controllers
cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
for i in xrange((options.num_cpus + 1) / 2): for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl() cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system) cp_cntrl.create(options, ruby_system, system)
@@ -545,7 +545,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
gpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s gpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
for i in xrange(options.num_compute_units): for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = 2560) # max outstanding requests number_of_TBEs = 2560) # max outstanding requests
@@ -578,7 +578,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
gpuCluster.add(tcp_cntrl) gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc): for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system) sqc_cntrl.create(options, ruby_system, system)
@@ -610,7 +610,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# SQC also in GPU cluster # SQC also in GPU cluster
gpuCluster.add(sqc_cntrl) gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp): for i in range(options.num_cp):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = 2560) # max outstanding requests number_of_TBEs = 2560) # max outstanding requests
@@ -673,7 +673,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# SQC also in GPU cluster # SQC also in GPU cluster
gpuCluster.add(sqc_cntrl) gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs): for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits, tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = options.num_compute_units * 2560) number_of_TBEs = options.num_compute_units * 2560)

View File

@@ -429,7 +429,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
mainCluster = Cluster(intBW=crossbar_bw) mainCluster = Cluster(intBW=crossbar_bw)
else: else:
mainCluster = Cluster(intBW=8) # 16 GB/s mainCluster = Cluster(intBW=8) # 16 GB/s
for i in xrange(options.num_dirs): for i in range(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits) dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system) dir_cntrl.create(options, ruby_system, system)
@@ -467,7 +467,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw) cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else: else:
cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange((options.num_cpus + 1) / 2): for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl() cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system) cp_cntrl.create(options, ruby_system, system)
@@ -504,7 +504,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw) gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else: else:
gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
for i in xrange(options.num_compute_units): for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1, issue_latency = 1,
@@ -543,7 +543,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
gpuCluster.add(tcp_cntrl) gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc): for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system) sqc_cntrl.create(options, ruby_system, system)
@@ -569,7 +569,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# SQC also in GPU cluster # SQC also in GPU cluster
gpuCluster.add(sqc_cntrl) gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp): for i in range(options.num_cp):
tcp_ID = options.num_compute_units + i tcp_ID = options.num_compute_units + i
sqc_ID = options.num_sqc + i sqc_ID = options.num_sqc + i
@@ -623,7 +623,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# SQC also in GPU cluster # SQC also in GPU cluster
gpuCluster.add(sqc_cntrl) gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs): for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency) tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
tcc_cntrl.create(options, ruby_system, system) tcc_cntrl.create(options, ruby_system, system)

View File

@@ -407,7 +407,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# Clusters # Clusters
crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
mainCluster = Cluster(intBW = crossbar_bw) mainCluster = Cluster(intBW = crossbar_bw)
for i in xrange(options.num_dirs): for i in range(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits) dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system) dir_cntrl.create(options, ruby_system, system)
@@ -440,7 +440,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
mainCluster.add(dir_cntrl) mainCluster.add(dir_cntrl)
cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw) cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw)
for i in xrange((options.num_cpus + 1) / 2): for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl() cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system) cp_cntrl.create(options, ruby_system, system)
@@ -473,7 +473,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
cpuCluster.add(cp_cntrl) cpuCluster.add(cp_cntrl)
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw) gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
for i in xrange(options.num_compute_units): for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1, issue_latency = 1,
@@ -510,7 +510,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
gpuCluster.add(tcp_cntrl) gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc): for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system) sqc_cntrl.create(options, ruby_system, system)
@@ -539,7 +539,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# Because of wire buffers, num_tccs must equal num_tccdirs # Because of wire buffers, num_tccs must equal num_tccdirs
numa_bit = 6 numa_bit = 6
for i in xrange(options.num_tccs): for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl() tcc_cntrl = TCCCntrl()
tcc_cntrl.create(options, ruby_system, system) tcc_cntrl.create(options, ruby_system, system)

View File

@@ -469,7 +469,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# For an odd number of CPUs, still create the right number of controllers # For an odd number of CPUs, still create the right number of controllers
crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw) cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
for i in xrange((options.num_cpus + 1) / 2): for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl() cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system) cp_cntrl.create(options, ruby_system, system)
@@ -535,7 +535,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
cpuCluster.add(rb_cntrl) cpuCluster.add(rb_cntrl)
gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw) gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
for i in xrange(options.num_compute_units): for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits, tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1, issue_latency = 1,
@@ -571,7 +571,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
gpuCluster.add(tcp_cntrl) gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc): for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits) sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system) sqc_cntrl.create(options, ruby_system, system)
@@ -599,7 +599,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
numa_bit = 6 numa_bit = 6
for i in xrange(options.num_tccs): for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl() tcc_cntrl = TCCCntrl()
tcc_cntrl.create(options, ruby_system, system) tcc_cntrl.create(options, ruby_system, system)

View File

@@ -66,7 +66,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
# controller constructors are called before the network constructor # controller constructors are called before the network constructor
# #
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D # Only one cache exists for this protocol, so by default use the L1D

View File

@@ -83,8 +83,8 @@ def create_system(options, full_system, system, dma_ports, bootmem,
# Must create the individual controllers before the network to ensure the # Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor # controller constructors are called before the network constructor
# #
for i in xrange(options.num_clusters): for i in range(options.num_clusters):
for j in xrange(num_cpus_per_cluster): for j in range(num_cpus_per_cluster):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #
@@ -164,7 +164,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l1_cntrl.responseFromL2.slave = ruby_system.network.master l1_cntrl.responseFromL2.slave = ruby_system.network.master
for j in xrange(num_l2caches_per_cluster): for j in range(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size, l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc, assoc = options.l2_assoc,
start_index_bit = l2_index_start) start_index_bit = l2_index_start)

View File

@@ -67,7 +67,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l2_bits = int(math.log(options.num_l2caches, 2)) l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #
@@ -135,7 +135,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l2_index_start = block_size_bits + l2_bits l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches): for i in range(options.num_l2caches):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #

View File

@@ -64,7 +64,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
# #
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D # Only one cache exists for this protocol, so by default use the L1D

View File

@@ -248,7 +248,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
numa_bit = block_size_bits + dir_bits - 1 numa_bit = block_size_bits + dir_bits - 1
for i in xrange(options.num_dirs): for i in range(options.num_dirs):
dir_ranges = [] dir_ranges = []
for r in system.mem_ranges: for r in system.mem_ranges:
addr_range = m5.objects.AddrRange(r.start, size = r.size(), addr_range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -294,7 +294,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
# For an odd number of CPUs, still create the right number of controllers # For an odd number of CPUs, still create the right number of controllers
cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
for i in xrange((options.num_cpus + 1) / 2): for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl() cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system) cp_cntrl.create(options, ruby_system, system)

View File

@@ -67,7 +67,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l2_bits = int(math.log(options.num_l2caches, 2)) l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #
@@ -126,7 +126,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l2_index_start = block_size_bits + l2_bits l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches): for i in range(options.num_l2caches):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #

View File

@@ -80,7 +80,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l2_bits = int(math.log(options.num_l2caches, 2)) l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #
@@ -149,7 +149,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
l2_index_start = block_size_bits + l2_bits l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches): for i in range(options.num_l2caches):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #

View File

@@ -74,7 +74,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
# #
block_size_bits = int(math.log(options.cacheline_size, 2)) block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus): for i in range(options.num_cpus):
# #
# First create the Ruby objects associated with this cpu # First create the Ruby objects associated with this cpu
# #

View File

@@ -214,7 +214,7 @@ def create_system(options, full_system, system, piobus = None, dma_ports = [],
def create_directories(options, bootmem, ruby_system, system): def create_directories(options, bootmem, ruby_system, system):
dir_cntrl_nodes = [] dir_cntrl_nodes = []
for i in xrange(options.num_dirs): for i in range(options.num_dirs):
dir_cntrl = Directory_Controller() dir_cntrl = Directory_Controller()
dir_cntrl.version = i dir_cntrl.version = i
dir_cntrl.directory = RubyDirectoryMemory() dir_cntrl.directory = RubyDirectoryMemory()

View File

@@ -167,41 +167,41 @@ all_cpus = []
all_l1s = [] all_l1s = []
all_l1buses = [] all_l1buses = []
if options.timing: if options.timing:
clusters = [ Cluster() for i in xrange(options.numclusters)] clusters = [ Cluster() for i in range(options.numclusters)]
for j in xrange(options.numclusters): for j in range(options.numclusters):
clusters[j].id = j clusters[j].id = j
for cluster in clusters: for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency) cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus] all_l1buses += [cluster.clusterbus]
cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id, cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency) clock=options.frequency)
for i in xrange(cpusPerCluster)] for i in range(cpusPerCluster)]
all_cpus += cluster.cpus all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4) cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1] all_l1s += [cluster.l1]
elif options.detailed: elif options.detailed:
clusters = [ Cluster() for i in xrange(options.numclusters)] clusters = [ Cluster() for i in range(options.numclusters)]
for j in xrange(options.numclusters): for j in range(options.numclusters):
clusters[j].id = j clusters[j].id = j
for cluster in clusters: for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency) cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus] all_l1buses += [cluster.clusterbus]
cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id, cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id,
clock=options.frequency) clock=options.frequency)
for i in xrange(cpusPerCluster)] for i in range(cpusPerCluster)]
all_cpus += cluster.cpus all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4) cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1] all_l1s += [cluster.l1]
else: else:
clusters = [ Cluster() for i in xrange(options.numclusters)] clusters = [ Cluster() for i in range(options.numclusters)]
for j in xrange(options.numclusters): for j in range(options.numclusters):
clusters[j].id = j clusters[j].id = j
for cluster in clusters: for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency) cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus] all_l1buses += [cluster.clusterbus]
cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id, cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency) clock=options.frequency)
for i in xrange(cpusPerCluster)] for i in range(cpusPerCluster)]
all_cpus += cluster.cpus all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4) cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1] all_l1s += [cluster.l1]

View File

@@ -182,15 +182,15 @@ busFrequency = Frequency(options.frequency)
if options.timing: if options.timing:
cpus = [TimingSimpleCPU(cpu_id = i, cpus = [TimingSimpleCPU(cpu_id = i,
clock=options.frequency) clock=options.frequency)
for i in xrange(options.numcpus)] for i in range(options.numcpus)]
elif options.detailed: elif options.detailed:
cpus = [DerivO3CPU(cpu_id = i, cpus = [DerivO3CPU(cpu_id = i,
clock=options.frequency) clock=options.frequency)
for i in xrange(options.numcpus)] for i in range(options.numcpus)]
else: else:
cpus = [AtomicSimpleCPU(cpu_id = i, cpus = [AtomicSimpleCPU(cpu_id = i,
clock=options.frequency) clock=options.frequency)
for i in xrange(options.numcpus)] for i in range(options.numcpus)]
# ---------------------- # ----------------------
# Create a system, and add system wide objects # Create a system, and add system wide objects

View File

@@ -126,8 +126,8 @@ class MeshDirCorners_XY(SimpleTopology):
int_links = [] int_links = []
# East output to West input links (weight = 1) # East output to West input links (weight = 1)
for row in xrange(num_rows): for row in range(num_rows):
for col in xrange(num_columns): for col in range(num_columns):
if (col + 1 < num_columns): if (col + 1 < num_columns):
east_out = col + (row * num_columns) east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns) west_in = (col + 1) + (row * num_columns)
@@ -141,8 +141,8 @@ class MeshDirCorners_XY(SimpleTopology):
link_count += 1 link_count += 1
# West output to East input links (weight = 1) # West output to East input links (weight = 1)
for row in xrange(num_rows): for row in range(num_rows):
for col in xrange(num_columns): for col in range(num_columns):
if (col + 1 < num_columns): if (col + 1 < num_columns):
east_in = col + (row * num_columns) east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns) west_out = (col + 1) + (row * num_columns)
@@ -156,8 +156,8 @@ class MeshDirCorners_XY(SimpleTopology):
link_count += 1 link_count += 1
# North output to South input links (weight = 2) # North output to South input links (weight = 2)
for col in xrange(num_columns): for col in range(num_columns):
for row in xrange(num_rows): for row in range(num_rows):
if (row + 1 < num_rows): if (row + 1 < num_rows):
north_out = col + (row * num_columns) north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns) south_in = col + ((row + 1) * num_columns)
@@ -171,8 +171,8 @@ class MeshDirCorners_XY(SimpleTopology):
link_count += 1 link_count += 1
# South output to North input links (weight = 2) # South output to North input links (weight = 2)
for col in xrange(num_columns): for col in range(num_columns):
for row in xrange(num_rows): for row in range(num_rows):
if (row + 1 < num_rows): if (row + 1 < num_rows):
north_in = col + (row * num_columns) north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns) south_out = col + ((row + 1) * num_columns)

View File

@@ -78,7 +78,7 @@ class Mesh_XY(SimpleTopology):
# distributed across the network. # distributed across the network.
network_nodes = [] network_nodes = []
remainder_nodes = [] remainder_nodes = []
for node_index in xrange(len(nodes)): for node_index in range(len(nodes)):
if node_index < (len(nodes) - remainder): if node_index < (len(nodes) - remainder):
network_nodes.append(nodes[node_index]) network_nodes.append(nodes[node_index])
else: else:
@@ -110,8 +110,8 @@ class Mesh_XY(SimpleTopology):
int_links = [] int_links = []
# East output to West input links (weight = 1) # East output to West input links (weight = 1)
for row in xrange(num_rows): for row in range(num_rows):
for col in xrange(num_columns): for col in range(num_columns):
if (col + 1 < num_columns): if (col + 1 < num_columns):
east_out = col + (row * num_columns) east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns) west_in = (col + 1) + (row * num_columns)
@@ -125,8 +125,8 @@ class Mesh_XY(SimpleTopology):
link_count += 1 link_count += 1
# West output to East input links (weight = 1) # West output to East input links (weight = 1)
for row in xrange(num_rows): for row in range(num_rows):
for col in xrange(num_columns): for col in range(num_columns):
if (col + 1 < num_columns): if (col + 1 < num_columns):
east_in = col + (row * num_columns) east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns) west_out = (col + 1) + (row * num_columns)
@@ -140,8 +140,8 @@ class Mesh_XY(SimpleTopology):
link_count += 1 link_count += 1
# North output to South input links (weight = 2) # North output to South input links (weight = 2)
for col in xrange(num_columns): for col in range(num_columns):
for row in xrange(num_rows): for row in range(num_rows):
if (row + 1 < num_rows): if (row + 1 < num_rows):
north_out = col + (row * num_columns) north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns) south_in = col + ((row + 1) * num_columns)
@@ -155,8 +155,8 @@ class Mesh_XY(SimpleTopology):
link_count += 1 link_count += 1
# South output to North input links (weight = 2) # South output to North input links (weight = 2)
for col in xrange(num_columns): for col in range(num_columns):
for row in xrange(num_rows): for row in range(num_rows):
if (row + 1 < num_rows): if (row + 1 < num_rows):
north_in = col + (row * num_columns) north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns) south_out = col + ((row + 1) * num_columns)

View File

@@ -82,7 +82,7 @@ class Mesh_westfirst(SimpleTopology):
# distributed across the network. # distributed across the network.
network_nodes = [] network_nodes = []
remainder_nodes = [] remainder_nodes = []
for node_index in xrange(len(nodes)): for node_index in range(len(nodes)):
if node_index < (len(nodes) - remainder): if node_index < (len(nodes) - remainder):
network_nodes.append(nodes[node_index]) network_nodes.append(nodes[node_index])
else: else:
@@ -114,8 +114,8 @@ class Mesh_westfirst(SimpleTopology):
int_links = [] int_links = []
# East output to West input links (weight = 2) # East output to West input links (weight = 2)
for row in xrange(num_rows): for row in range(num_rows):
for col in xrange(num_columns): for col in range(num_columns):
if (col + 1 < num_columns): if (col + 1 < num_columns):
east_out = col + (row * num_columns) east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns) west_in = (col + 1) + (row * num_columns)
@@ -127,8 +127,8 @@ class Mesh_westfirst(SimpleTopology):
link_count += 1 link_count += 1
# West output to East input links (weight = 1) # West output to East input links (weight = 1)
for row in xrange(num_rows): for row in range(num_rows):
for col in xrange(num_columns): for col in range(num_columns):
if (col + 1 < num_columns): if (col + 1 < num_columns):
east_in = col + (row * num_columns) east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns) west_out = (col + 1) + (row * num_columns)
@@ -141,8 +141,8 @@ class Mesh_westfirst(SimpleTopology):
# North output to South input links (weight = 2) # North output to South input links (weight = 2)
for col in xrange(num_columns): for col in range(num_columns):
for row in xrange(num_rows): for row in range(num_rows):
if (row + 1 < num_rows): if (row + 1 < num_rows):
north_out = col + (row * num_columns) north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns) south_in = col + ((row + 1) * num_columns)
@@ -154,8 +154,8 @@ class Mesh_westfirst(SimpleTopology):
link_count += 1 link_count += 1
# South output to North input links (weight = 2) # South output to North input links (weight = 2)
for col in xrange(num_columns): for col in range(num_columns):
for row in xrange(num_rows): for row in range(num_rows):
if (row + 1 < num_rows): if (row + 1 < num_rows):
north_in = col + (row * num_columns) north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns) south_out = col + ((row + 1) * num_columns)

View File

@@ -63,8 +63,8 @@ class Pt2Pt(SimpleTopology):
link_count = len(nodes) link_count = len(nodes)
int_links = [] int_links = []
for i in xrange(len(nodes)): for i in range(len(nodes)):
for j in xrange(len(nodes)): for j in range(len(nodes)):
if (i != j): if (i != j):
link_count += 1 link_count += 1
int_links.append(IntLink(link_id=link_count, int_links.append(IntLink(link_id=link_count,