mem-cache: Implement AssociativeSet from AssociativeCache

AssociativeSet can reuse most of the generic cache library code with the
addition of a secure bit. This reduces duplicated code.

Change-Id: I008ef79b0dd5f95418a3fb79396aeb0a6c601784
This commit is contained in:
Pranith Kumar
2024-01-12 21:54:51 -05:00
parent f3bc10c168
commit 769f750eb9
16 changed files with 116 additions and 219 deletions

View File

@@ -51,9 +51,11 @@ AccessMapPatternMatching::AccessMapPatternMatching(
lowCacheHitThreshold(p.low_cache_hit_threshold),
epochCycles(p.epoch_cycles),
offChipMemoryLatency(p.offchip_memory_latency),
accessMapTable(p.access_map_table_assoc, p.access_map_table_entries,
p.access_map_table_indexing_policy,
accessMapTable("AccessMapTable",
p.access_map_table_entries,
p.access_map_table_assoc,
p.access_map_table_replacement_policy,
p.access_map_table_indexing_policy,
AccessMapEntry(hotZoneSize / blkSize)),
numGoodPrefetches(0), numTotalPrefetches(0), numRawCacheMisses(0),
numRawCacheHits(0), degree(startDegree), usefulDegree(startDegree),

View File

@@ -29,6 +29,9 @@
#ifndef __CACHE_PREFETCH_ASSOCIATIVE_SET_HH__
#define __CACHE_PREFETCH_ASSOCIATIVE_SET_HH__
#include <type_traits>
#include "base/cache/associative_cache.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "mem/cache/tags/tagged_entry.hh"
@@ -42,38 +45,27 @@ namespace gem5
* bool value is used as an additional tag data of the entry.
*/
template<class Entry>
class AssociativeSet
class AssociativeSet : public AssociativeCache<Entry>
{
static_assert(std::is_base_of_v<TaggedEntry, Entry>,
"Entry must derive from TaggedEntry");
/** Associativity of the container */
const int associativity;
/**
* Total number of entries, entries are organized in sets of the provided
* associativity. The number of associative sets is obtained by dividing
* numEntries by associativity.
*/
const int numEntries;
/** Pointer to the indexing policy */
BaseIndexingPolicy* const indexingPolicy;
/** Pointer to the replacement policy */
replacement_policy::Base* const replacementPolicy;
/** Vector containing the entries of the container */
std::vector<Entry> entries;
public:
/**
* Public constructor
* @param assoc number of elements in each associative set
* @param name Name of the cache
* @param num_entries total number of entries of the container, the number
* of sets can be calculated dividing this balue by the 'assoc' value
* @param idx_policy indexing policy
* of sets can be calculated dividing this balue by the 'assoc' value
* @param assoc number of elements in each associative set
* @param rpl_policy replacement policy
* @param idx_policy indexing policy
* @param init_val initial value of the elements of the set
*/
AssociativeSet(int assoc, int num_entries, BaseIndexingPolicy *idx_policy,
replacement_policy::Base *rpl_policy, Entry const &init_val = Entry());
AssociativeSet(const char *name, const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val = Entry());
/**
* Find an entry within the set
@@ -84,28 +76,6 @@ class AssociativeSet
*/
Entry* findEntry(Addr addr, bool is_secure) const;
/**
* Do an access to the entry, this is required to
* update the replacement information data.
* @param entry the accessed entry
*/
void accessEntry(Entry *entry);
/**
* Find a victim to be replaced
* @param addr key to select the possible victim
* @result entry to be victimized
*/
Entry* findVictim(Addr addr);
/**
* Find the set of entries that could be replaced given
* that we want to add a new entry with the provided key
* @param addr key to select the set of entries
* @result vector of candidates matching with the provided key
*/
std::vector<Entry *> getPossibleEntries(const Addr addr) const;
/**
* Indicate that an entry has just been inserted
* @param addr key of the container
@@ -114,54 +84,16 @@ class AssociativeSet
*/
void insertEntry(Addr addr, bool is_secure, Entry* entry);
/**
* Invalidate an entry and its respective replacement data.
*
* @param entry Entry to be invalidated.
*/
void invalidate(Entry* entry);
private:
// The following APIs are excluded since they lack the secure bit
using AssociativeCache<Entry>::getTag;
using AssociativeCache<Entry>::accessEntryByAddr;
using AssociativeCache<Entry>::findEntry;
using AssociativeCache<Entry>::insertEntry;
using AssociativeCache<Entry>::getPossibleEntries;
/** Iterator types */
using const_iterator = typename std::vector<Entry>::const_iterator;
using iterator = typename std::vector<Entry>::iterator;
/**
* Returns an iterator to the first entry of the dictionary
* @result iterator to the first element
*/
iterator begin()
{
return entries.begin();
}
/**
* Returns an iterator pointing to the end of the the dictionary
* (placeholder element, should not be accessed)
* @result iterator to the end element
*/
iterator end()
{
return entries.end();
}
/**
* Returns an iterator to the first entry of the dictionary
* @result iterator to the first element
*/
const_iterator begin() const
{
return entries.begin();
}
/**
* Returns an iterator pointing to the end of the the dictionary
* (placeholder element, should not be accessed)
* @result iterator to the end element
*/
const_iterator end() const
{
return entries.end();
}
using AssociativeCache<Entry>::replPolicy;
using AssociativeCache<Entry>::indexingPolicy;
};
} // namespace gem5

View File

@@ -35,93 +35,41 @@
namespace gem5
{
template<class Entry>
AssociativeSet<Entry>::AssociativeSet(int assoc, int num_entries,
BaseIndexingPolicy *idx_policy, replacement_policy::Base *rpl_policy,
Entry const &init_value)
: associativity(assoc), numEntries(num_entries), indexingPolicy(idx_policy),
replacementPolicy(rpl_policy), entries(numEntries, init_value)
template <class Entry>
AssociativeSet<Entry>::AssociativeSet(const char *name,
const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val)
: AssociativeCache<Entry>(name, num_entries, associativity_,
repl_policy, indexing_policy, init_val)
{
fatal_if(!isPowerOf2(num_entries), "The number of entries of an "
"AssociativeSet<> must be a power of 2");
fatal_if(!isPowerOf2(assoc), "The associativity of an AssociativeSet<> "
"must be a power of 2");
for (unsigned int entry_idx = 0; entry_idx < numEntries; entry_idx += 1) {
Entry* entry = &entries[entry_idx];
indexingPolicy->setEntry(entry, entry_idx);
entry->replacementData = replacementPolicy->instantiateEntry();
}
}
template<class Entry>
template <class Entry>
Entry*
AssociativeSet<Entry>::findEntry(Addr addr, bool is_secure) const
{
Addr tag = indexingPolicy->extractTag(addr);
const std::vector<ReplaceableEntry*> selected_entries =
indexingPolicy->getPossibleEntries(addr);
auto candidates = indexingPolicy->getPossibleEntries(addr);
for (const auto& location : selected_entries) {
Entry* entry = static_cast<Entry *>(location);
if ((entry->getTag() == tag) && entry->isValid() &&
entry->isSecure() == is_secure) {
for (auto candidate : candidates) {
Entry* entry = static_cast<Entry*>(candidate);
if (entry->matchTag(tag, is_secure)) {
return entry;
}
}
return nullptr;
}
template<class Entry>
void
AssociativeSet<Entry>::accessEntry(Entry *entry)
{
replacementPolicy->touch(entry->replacementData);
}
template<class Entry>
Entry*
AssociativeSet<Entry>::findVictim(Addr addr)
{
// Get possible entries to be victimized
const std::vector<ReplaceableEntry*> selected_entries =
indexingPolicy->getPossibleEntries(addr);
Entry* victim = static_cast<Entry*>(replacementPolicy->getVictim(
selected_entries));
// There is only one eviction for this replacement
invalidate(victim);
return victim;
}
template<class Entry>
std::vector<Entry *>
AssociativeSet<Entry>::getPossibleEntries(const Addr addr) const
{
std::vector<ReplaceableEntry *> selected_entries =
indexingPolicy->getPossibleEntries(addr);
std::vector<Entry *> entries(selected_entries.size(), nullptr);
unsigned int idx = 0;
for (auto &entry : selected_entries) {
entries[idx++] = static_cast<Entry *>(entry);
}
return entries;
}
template<class Entry>
void
AssociativeSet<Entry>::insertEntry(Addr addr, bool is_secure, Entry* entry)
{
entry->insert(indexingPolicy->extractTag(addr), is_secure);
replacementPolicy->reset(entry->replacementData);
}
template<class Entry>
void
AssociativeSet<Entry>::invalidate(Entry* entry)
{
entry->invalidate();
replacementPolicy->invalidate(entry->replacementData);
replPolicy->reset(entry->replacementData);
}
} // namespace gem5

View File

@@ -44,11 +44,15 @@ IndirectMemory::IndirectMemory(const IndirectMemoryPrefetcherParams &p)
shiftValues(p.shift_values), prefetchThreshold(p.prefetch_threshold),
streamCounterThreshold(p.stream_counter_threshold),
streamingDistance(p.streaming_distance),
prefetchTable(p.pt_table_assoc, p.pt_table_entries,
p.pt_table_indexing_policy, p.pt_table_replacement_policy,
prefetchTable((name() + ".PrefetchTable").c_str(),
p.pt_table_entries,
p.pt_table_assoc,
p.pt_table_replacement_policy,
p.pt_table_indexing_policy,
PrefetchTableEntry(p.num_indirect_counter_bits)),
ipd(p.ipd_table_assoc, p.ipd_table_entries, p.ipd_table_indexing_policy,
ipd((name() + ".IPD").c_str(), p.ipd_table_entries, p.ipd_table_assoc,
p.ipd_table_replacement_policy,
p.ipd_table_indexing_policy,
IndirectPatternDetectorEntry(p.addr_array_len, shiftValues.size())),
ipdEntryTrackingMisses(nullptr), byteOrder(p.sys->getGuestByteOrder())
{
@@ -81,8 +85,7 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
}
} else {
// if misses are not being tracked, attempt to detect stream accesses
PrefetchTableEntry *pt_entry =
prefetchTable.findEntry(pc, false /* unused */);
PrefetchTableEntry *pt_entry = prefetchTable.findEntry(pc);
if (pt_entry != nullptr) {
prefetchTable.accessEntry(pt_entry);
@@ -156,7 +159,7 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
} else {
pt_entry = prefetchTable.findVictim(pc);
assert(pt_entry != nullptr);
prefetchTable.insertEntry(pc, false /* unused */, pt_entry);
prefetchTable.insertEntry(pc, pt_entry);
pt_entry->address = addr;
pt_entry->secure = is_secure;
}
@@ -169,8 +172,7 @@ IndirectMemory::allocateOrUpdateIPDEntry(
{
// The address of the pt_entry is used to index the IPD
Addr ipd_entry_addr = (Addr) pt_entry;
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(ipd_entry_addr,
false/* unused */);
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(ipd_entry_addr);
if (ipd_entry != nullptr) {
ipd.accessEntry(ipd_entry);
if (!ipd_entry->secondIndexSet) {
@@ -187,7 +189,7 @@ IndirectMemory::allocateOrUpdateIPDEntry(
} else {
ipd_entry = ipd.findVictim(ipd_entry_addr);
assert(ipd_entry != nullptr);
ipd.insertEntry(ipd_entry_addr, false /* unused */, ipd_entry);
ipd.insertEntry(ipd_entry_addr, ipd_entry);
ipd_entry->idx1 = index;
ipdEntryTrackingMisses = ipd_entry;
}

View File

@@ -121,7 +121,7 @@ class IndirectMemory : public Queued
}
};
/** Prefetch table */
AssociativeSet<PrefetchTableEntry> prefetchTable;
AssociativeCache<PrefetchTableEntry> prefetchTable;
/** Indirect Pattern Detector entrt */
struct IndirectPatternDetectorEntry : public TaggedEntry
@@ -160,7 +160,7 @@ class IndirectMemory : public Queued
}
};
/** Indirect Pattern Detector (IPD) table */
AssociativeSet<IndirectPatternDetectorEntry> ipd;
AssociativeCache<IndirectPatternDetectorEntry> ipd;
/** Entry currently tracking misses */
IndirectPatternDetectorEntry *ipdEntryTrackingMisses;

View File

@@ -44,19 +44,23 @@ IrregularStreamBuffer::IrregularStreamBuffer(
chunkSize(p.chunk_size),
prefetchCandidatesPerEntry(p.prefetch_candidates_per_entry),
degree(p.degree),
trainingUnit(p.training_unit_assoc, p.training_unit_entries,
p.training_unit_indexing_policy,
p.training_unit_replacement_policy),
psAddressMappingCache(p.address_map_cache_assoc,
trainingUnit((name() + ".TrainingUnit").c_str(),
p.training_unit_entries,
p.training_unit_assoc,
p.training_unit_replacement_policy,
p.training_unit_indexing_policy),
psAddressMappingCache((name() + ".PSAddressMappingCache").c_str(),
p.address_map_cache_entries,
p.ps_address_map_cache_indexing_policy,
p.address_map_cache_assoc,
p.ps_address_map_cache_replacement_policy,
p.ps_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits)),
spAddressMappingCache(p.address_map_cache_assoc,
spAddressMappingCache((name() + ".SPAddressMappingCache").c_str(),
p.address_map_cache_entries,
p.sp_address_map_cache_indexing_policy,
p.address_map_cache_assoc,
p.sp_address_map_cache_replacement_policy,
p.sp_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits)),
structuralAddressCounter(0)

View File

@@ -46,8 +46,9 @@ PIF::PIF(const PIFPrefetcherParams &p)
succSize(p.succ_spatial_region_bits),
maxCompactorEntries(p.compactor_entries),
historyBuffer(p.history_buffer_size),
index(p.index_assoc, p.index_entries, p.index_indexing_policy,
p.index_replacement_policy),
index((name() + ".PIFIndex").c_str(), p.index_entries, p.index_assoc,
p.index_replacement_policy,
p.index_indexing_policy),
streamAddressBuffer(p.stream_address_buffer_entries),
listenersPC()
{
@@ -176,15 +177,13 @@ PIF::notifyRetiredInst(const Addr pc)
// the 'iterator' table to point to the new entry
historyBuffer.push_back(spatialCompactor);
IndexEntry *idx_entry =
index.findEntry(spatialCompactor.trigger, false);
auto idx_entry = index.findEntry(spatialCompactor.trigger);
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);
} else {
idx_entry = index.findVictim(spatialCompactor.trigger);
assert(idx_entry != nullptr);
index.insertEntry(spatialCompactor.trigger, false,
idx_entry);
index.insertEntry(spatialCompactor.trigger, idx_entry);
}
idx_entry->historyIt =
historyBuffer.getIterator(historyBuffer.tail());
@@ -220,7 +219,7 @@ PIF::calculatePrefetch(const PrefetchInfo &pfi,
// Check if a valid entry in the 'index' table is found and allocate a new
// active prediction stream
IndexEntry *idx_entry = index.findEntry(pc, /* unused */ false);
IndexEntry *idx_entry = index.findEntry(pc);
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);

View File

@@ -138,11 +138,12 @@ class PIF : public Queued
{
HistoryBuffer::iterator historyIt;
};
/**
* The index table is a small cache-like structure that facilitates
* fast search of the history buffer.
*/
AssociativeSet<IndexEntry> index;
AssociativeCache<IndexEntry> index;
/**
* A Stream Address Buffer (SAB) tracks a window of consecutive

View File

@@ -48,12 +48,16 @@ SignaturePath::SignaturePath(const SignaturePathPrefetcherParams &p)
signatureBits(p.signature_bits),
prefetchConfidenceThreshold(p.prefetch_confidence_threshold),
lookaheadConfidenceThreshold(p.lookahead_confidence_threshold),
signatureTable(p.signature_table_assoc, p.signature_table_entries,
p.signature_table_indexing_policy,
p.signature_table_replacement_policy),
patternTable(p.pattern_table_assoc, p.pattern_table_entries,
p.pattern_table_indexing_policy,
signatureTable((name() + ".SignatureTable").c_str(),
p.signature_table_entries,
p.signature_table_assoc,
p.signature_table_replacement_policy,
p.signature_table_indexing_policy),
patternTable((name() + ".PatternTable").c_str(),
p.pattern_table_entries,
p.pattern_table_assoc,
p.pattern_table_replacement_policy,
p.pattern_table_indexing_policy,
PatternEntry(stridesPerPatternEntry, p.num_counter_bits))
{
fatal_if(prefetchConfidenceThreshold < 0,
@@ -188,7 +192,7 @@ SignaturePath::getSignatureEntry(Addr ppn, bool is_secure,
SignaturePath::PatternEntry &
SignaturePath::getPatternEntry(Addr signature)
{
PatternEntry* pattern_entry = patternTable.findEntry(signature, false);
PatternEntry* pattern_entry = patternTable.findEntry(signature);
if (pattern_entry != nullptr) {
// Signature found
patternTable.accessEntry(pattern_entry);
@@ -197,7 +201,7 @@ SignaturePath::getPatternEntry(Addr signature)
pattern_entry = patternTable.findVictim(signature);
assert(pattern_entry != nullptr);
patternTable.insertEntry(signature, false, pattern_entry);
patternTable.insertEntry(signature, pattern_entry);
}
return *pattern_entry;
}
@@ -273,7 +277,7 @@ SignaturePath::calculatePrefetch(const PrefetchInfo &pfi,
// confidence, these are prefetch candidates
// - select the entry with the highest counter as the "lookahead"
PatternEntry *current_pattern_entry =
patternTable.findEntry(current_signature, false);
patternTable.findEntry(current_signature);
PatternStrideEntry const *lookahead = nullptr;
if (current_pattern_entry != nullptr) {
unsigned long max_counter = 0;

View File

@@ -146,8 +146,9 @@ class SignaturePath : public Queued
*/
PatternStrideEntry &getStrideEntry(stride_t stride);
};
/** Pattern table */
AssociativeSet<PatternEntry> patternTable;
AssociativeCache<PatternEntry> patternTable;
/**
* Generates a new signature from an existing one and a new stride

View File

@@ -42,10 +42,11 @@ namespace prefetch
SignaturePathV2::SignaturePathV2(const SignaturePathPrefetcherV2Params &p)
: SignaturePath(p),
globalHistoryRegister(p.global_history_register_entries,
globalHistoryRegister((name() + ".GlobalHistoryRegister").c_str(),
p.global_history_register_entries,
p.global_history_register_indexing_policy,
p.global_history_register_entries,
p.global_history_register_replacement_policy,
p.global_history_register_indexing_policy,
GlobalHistoryEntry())
{
}
@@ -124,7 +125,7 @@ SignaturePathV2::handlePageCrossingLookahead(signature_t signature,
GlobalHistoryEntry *gh_entry = globalHistoryRegister.findVictim(0);
assert(gh_entry != nullptr);
// Any address value works, as it is never used
globalHistoryRegister.insertEntry(0, false, gh_entry);
globalHistoryRegister.insertEntry(0, gh_entry);
gh_entry->signature = signature;
gh_entry->lastBlock = last_offset;

View File

@@ -66,7 +66,7 @@ class SignaturePathV2 : public SignaturePath
delta(0) {}
};
/** Global History Register */
AssociativeSet<GlobalHistoryEntry> globalHistoryRegister;
AssociativeCache<GlobalHistoryEntry> globalHistoryRegister;
double calculateLookaheadConfidence(PatternEntry const &sig,
PatternStrideEntry const &lookahead) const override;

View File

@@ -42,16 +42,18 @@ STeMS::STeMS(const STeMSPrefetcherParams &p)
: Queued(p), spatialRegionSize(p.spatial_region_size),
spatialRegionSizeBits(floorLog2(p.spatial_region_size)),
reconstructionEntries(p.reconstruction_entries),
activeGenerationTable(p.active_generation_table_assoc,
activeGenerationTable((name() + ".ActiveGenerationTable").c_str(),
p.active_generation_table_entries,
p.active_generation_table_indexing_policy,
p.active_generation_table_assoc,
p.active_generation_table_replacement_policy,
p.active_generation_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize)),
patternSequenceTable(p.pattern_sequence_table_assoc,
patternSequenceTable((name() + ".PatternSequenceTable").c_str(),
p.pattern_sequence_table_entries,
p.pattern_sequence_table_indexing_policy,
p.pattern_sequence_table_assoc,
p.pattern_sequence_table_replacement_policy,
p.pattern_sequence_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize)),
rmob(p.region_miss_order_buffer_entries),
@@ -90,15 +92,12 @@ STeMS::checkForActiveGenerationsEnd(const CacheAccessor &cache)
}
if (generation_ended) {
// PST is indexed using the PC (secure bit is unused)
ActiveGenerationTableEntry *pst_entry =
patternSequenceTable.findEntry(pst_addr,
false /*unused*/);
auto pst_entry = patternSequenceTable.findEntry(pst_addr);
if (pst_entry == nullptr) {
// Tipically an entry will not exist
pst_entry = patternSequenceTable.findVictim(pst_addr);
assert(pst_entry != nullptr);
patternSequenceTable.insertEntry(pst_addr,
false /*unused*/, pst_entry);
patternSequenceTable.insertEntry(pst_addr, pst_entry);
} else {
patternSequenceTable.accessEntry(pst_entry);
}
@@ -222,8 +221,7 @@ STeMS::reconstructSequence(
idx = 0;
for (auto it = rmob_it; it != rmob.end() && (idx < reconstructionEntries);
it++) {
ActiveGenerationTableEntry *pst_entry =
patternSequenceTable.findEntry(it->pstAddress, false /* unused */);
auto pst_entry = patternSequenceTable.findEntry(it->pstAddress);
if (pst_entry != nullptr) {
patternSequenceTable.accessEntry(pst_entry);
for (auto &seq_entry : pst_entry->sequence) {

View File

@@ -155,7 +155,7 @@ class STeMS : public Queued
/** Active Generation Table (AGT) */
AssociativeSet<ActiveGenerationTableEntry> activeGenerationTable;
/** Pattern Sequence Table (PST) */
AssociativeSet<ActiveGenerationTableEntry> patternSequenceTable;
AssociativeCache<ActiveGenerationTableEntry> patternSequenceTable;
/** Data type of the Region Miss Order Buffer entry */
struct RegionMissOrderBufferEntry

View File

@@ -86,7 +86,7 @@ Stride::Stride(const StridePrefetcherParams &p)
degree(p.degree),
distance(p.distance),
pcTableInfo(p.table_assoc, p.table_entries, p.table_indexing_policy,
p.table_replacement_policy)
p.table_replacement_policy)
{
}
@@ -105,16 +105,21 @@ Stride::findTable(int context)
Stride::PCTable*
Stride::allocateNewContext(int context)
{
std::string table_name = name() + ".PCTable" + std::to_string(context);
// Create new table
auto insertion_result = pcTables.insert(std::make_pair(context,
PCTable(pcTableInfo.assoc, pcTableInfo.numEntries,
pcTableInfo.indexingPolicy, pcTableInfo.replacementPolicy,
StrideEntry(initConfidence))));
auto ins_result = pcTables.emplace(std::piecewise_construct,
std::forward_as_tuple(context),
std::forward_as_tuple(table_name.c_str(),
pcTableInfo.numEntries,
pcTableInfo.assoc,
pcTableInfo.replacementPolicy,
pcTableInfo.indexingPolicy,
StrideEntry(initConfidence)));
DPRINTF(HWPrefetch, "Adding context %i with stride entries\n", context);
// Get iterator to new pc table, and then return a pointer to the new table
return &(insertion_result.first->second);
return &(ins_result.first->second);
}
void

View File

@@ -125,8 +125,8 @@ class Stride : public Queued
replacement_policy::Base* const replacementPolicy;
PCTableInfo(int assoc, int num_entries,
BaseIndexingPolicy* indexing_policy,
replacement_policy::Base* repl_policy)
BaseIndexingPolicy* indexing_policy,
replacement_policy::Base* repl_policy)
: assoc(assoc), numEntries(num_entries),
indexingPolicy(indexing_policy), replacementPolicy(repl_policy)
{