base, mem-cache: Make the AssociativeCache more generic (#1446)

This PR implements #1429. It mainly achieve so with the following
changes

1) The IndexingPolicy is now a templated SimObject to make its APIs work
with different data types.
As an example, look at the getPossibleEntries, which is requiring an
Addr object whereas we want to be able to call the method with different
keys depending on the Tag
2) The AssociativeCache extracts type information from the Entry
template parameter.
This means any AssociativeCache entry will have to define the following
types:

KeyType = This is the data type used for lookups (in its simplest case,
it is Addr)
IndexingPolicy = This is the base indexing policy SimObject

As an example, the PR is also reworking the TaggedEntry to be
AssociativeCache compliant. This
ultimately allows us to remove the weird overloading of cache querying
methods with the secure flag, and to
remove the AssociativeSet which was providing such weird interface.
As mentioned in the [base, mem-cache: Rewrite TaggedEntry
code](7ee9790464)
commit, further cleanup is needed. TaggedEntry
is really a misleading name as its sole difference with the CacheEntry
(which is also tagged) is the presence of
the secure bit. A better name should be chosen.
This commit is contained in:
Giacomo Travaglini
2024-08-23 21:07:43 +01:00
committed by GitHub
53 changed files with 718 additions and 691 deletions

View File

@@ -1,4 +1,16 @@
/*
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2024 Pranith Kumar
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved
@@ -48,12 +60,14 @@ namespace gem5
template <typename Entry>
class AssociativeCache : public Named
{
static_assert(std::is_base_of_v<CacheEntry, Entry>,
"Entry should be derived from CacheEntry");
protected:
static_assert(std::is_base_of_v<ReplaceableEntry, Entry>,
"Entry should be derived from ReplaceableEntry");
typedef replacement_policy::Base BaseReplacementPolicy;
protected:
typedef typename Entry::IndexingPolicy IndexingPolicy;
typedef typename Entry::KeyType KeyType;
/** Associativity of the cache. */
size_t associativity;
@@ -62,7 +76,7 @@ class AssociativeCache : public Named
BaseReplacementPolicy *replPolicy;
/** Indexing policy of the cache */
BaseIndexingPolicy *indexingPolicy;
IndexingPolicy *indexingPolicy;
/** The entries */
std::vector<Entry> entries;
@@ -100,7 +114,7 @@ class AssociativeCache : public Named
AssociativeCache(const char *name, const size_t num_entries,
const size_t associativity_,
BaseReplacementPolicy *repl_policy,
BaseIndexingPolicy *indexing_policy,
IndexingPolicy *indexing_policy,
Entry const &init_val = Entry())
: Named(std::string(name)),
associativity(associativity_),
@@ -137,7 +151,7 @@ class AssociativeCache : public Named
init(const size_t num_entries,
const size_t associativity_,
BaseReplacementPolicy *_repl_policy,
BaseIndexingPolicy *_indexing_policy,
IndexingPolicy *_indexing_policy,
Entry const &init_val = Entry())
{
associativity = associativity_;
@@ -148,27 +162,16 @@ class AssociativeCache : public Named
initParams(num_entries, associativity);
}
/**
* Get the tag for the addr
* @param addr Addr to get the tag for
* @return Tag for the address
*/
virtual Addr
getTag(const Addr addr) const
{
return indexingPolicy->extractTag(addr);
}
/**
* Do an access to the entry if it exists.
* This is required to update the replacement information data.
* @param addr key to the entry
* @param key key to the entry
* @return The entry if it exists
*/
virtual Entry*
accessEntryByAddr(const Addr addr)
accessEntry(const KeyType &key)
{
auto entry = findEntry(addr);
auto entry = findEntry(key);
if (entry) {
accessEntry(entry);
@@ -189,20 +192,18 @@ class AssociativeCache : public Named
/**
* Find an entry within the set
* @param addr key element
* @param key key element
* @return returns a pointer to the wanted entry or nullptr if it does not
* exist.
*/
virtual Entry*
findEntry(const Addr addr) const
findEntry(const KeyType &key) const
{
auto tag = getTag(addr);
auto candidates = indexingPolicy->getPossibleEntries(addr);
auto candidates = indexingPolicy->getPossibleEntries(key);
for (auto candidate : candidates) {
Entry *entry = static_cast<Entry*>(candidate);
if (entry->matchTag(tag)) {
if (entry->match(key)) {
return entry;
}
}
@@ -212,13 +213,13 @@ class AssociativeCache : public Named
/**
* Find a victim to be replaced
* @param addr key to select the possible victim
* @param key key to select the possible victim
* @result entry to be victimized
*/
virtual Entry*
findVictim(const Addr addr)
findVictim(const KeyType &key)
{
auto candidates = indexingPolicy->getPossibleEntries(addr);
auto candidates = indexingPolicy->getPossibleEntries(key);
auto victim = static_cast<Entry*>(replPolicy->getVictim(candidates));
@@ -241,13 +242,13 @@ class AssociativeCache : public Named
/**
* Indicate that an entry has just been inserted
* @param addr key of the container
* @param key key of the container
* @param entry pointer to the container entry to be inserted
*/
virtual void
insertEntry(const Addr addr, Entry *entry)
insertEntry(const KeyType &key, Entry *entry)
{
entry->insert(indexingPolicy->extractTag(addr));
entry->insert(key);
replPolicy->reset(entry->replacementData);
}
@@ -258,10 +259,10 @@ class AssociativeCache : public Named
* @result vector of candidates matching with the provided key
*/
std::vector<Entry *>
getPossibleEntries(const Addr addr) const
getPossibleEntries(const KeyType &key) const
{
std::vector<ReplaceableEntry *> selected_entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
std::vector<Entry *> entries;

View File

@@ -1,4 +1,16 @@
/**
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2024 - Pranith Kumar
* Copyright (c) 2020 Inria
* All rights reserved.
@@ -35,6 +47,7 @@
#include "base/cprintf.hh"
#include "base/types.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
namespace gem5
{
@@ -46,7 +59,13 @@ namespace gem5
class CacheEntry : public ReplaceableEntry
{
public:
CacheEntry() = default;
using IndexingPolicy = BaseIndexingPolicy;
using KeyType = Addr;
using TagExtractor = std::function<Addr(Addr)>;
CacheEntry(TagExtractor ext)
: extractTag(ext), valid(false), tag(MaxAddr)
{}
~CacheEntry() = default;
/**
@@ -66,26 +85,26 @@ class CacheEntry : public ReplaceableEntry
/**
* Checks if the given tag information corresponds to this entry's.
*
* @param tag The tag value to compare to.
* @param addr The address value to be compared before tag is extracted
* @return True if the tag information match this entry's.
*/
virtual bool
matchTag(const Addr tag) const
match(const Addr addr) const
{
return isValid() && (getTag() == tag);
return isValid() && (getTag() == extractTag(addr));
}
/**
* Insert the block by assigning it a tag and marking it valid. Touches
* block if it hadn't been touched previously.
*
* @param tag The tag value.
* @param addr The address value.
*/
virtual void
insert(const Addr tag)
insert(const Addr addr)
{
setValid();
setTag(tag);
setTag(extractTag(addr));
}
/** Invalidate the block. Its contents are no longer valid. */
@@ -120,15 +139,18 @@ class CacheEntry : public ReplaceableEntry
}
private:
/** Callback used to extract the tag from the entry */
TagExtractor extractTag;
/**
* Valid bit. The contents of this entry are only valid if this bit is set.
* @sa invalidate()
* @sa insert()
*/
bool valid{false};
bool valid;
/** The entry's tag. */
Addr tag{MaxAddr};
Addr tag;
};
} // namespace gem5

22
src/mem/cache/base.cc vendored
View File

@@ -125,7 +125,8 @@ BaseCache::BaseCache(const BaseCacheParams &p, unsigned blk_size)
// forward snoops is overridden in init() once we can query
// whether the connected requestor is actually snooping or not
tempBlock = new TempCacheBlk(blkSize);
tempBlock = new TempCacheBlk(blkSize,
genTagExtractor(tags->params().indexing_policy));
tags->tagsInit();
if (prefetcher)
@@ -416,7 +417,7 @@ BaseCache::recvTimingReq(PacketPtr pkt)
// Now that the write is here, mark it accessible again, so the
// write will succeed. LockedRMWReadReq brings the block in in
// exclusive mode, so we know it was previously writable.
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = tags->findBlock({pkt->getAddr(), pkt->isSecure()});
assert(blk && blk->isValid());
assert(!blk->isSet(CacheBlk::WritableBit) &&
!blk->isSet(CacheBlk::ReadableBit));
@@ -550,7 +551,7 @@ BaseCache::recvTimingResp(PacketPtr pkt)
// the response is an invalidation
assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = tags->findBlock({pkt->getAddr(), pkt->isSecure()});
if (is_fill && !is_error) {
DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
@@ -719,7 +720,7 @@ BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
{
Addr blk_addr = pkt->getBlockAddr(blkSize);
bool is_secure = pkt->isSecure();
CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
CacheBlk *blk = tags->findBlock({pkt->getAddr(), is_secure});
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
pkt->pushLabel(name());
@@ -910,7 +911,7 @@ BaseCache::getNextQueueEntry()
PacketPtr pkt = prefetcher->getPacket();
if (pkt) {
Addr pf_addr = pkt->getBlockAddr(blkSize);
if (tags->findBlock(pf_addr, pkt->isSecure())) {
if (tags->findBlock({pf_addr, pkt->isSecure()})) {
DPRINTF(HWPrefetch, "Prefetch %#x has hit in cache, "
"dropped.\n", pf_addr);
prefetcher->pfHitInCache();
@@ -1031,8 +1032,9 @@ BaseCache::updateCompressionData(CacheBlk *&blk, const uint64_t* data,
bool victim_itself = false;
CacheBlk *victim = nullptr;
if (replaceExpansions || is_data_contraction) {
victim = tags->findVictim(regenerateBlkAddr(blk),
blk->isSecure(), compression_size, evict_blks,
victim = tags->findVictim(
{regenerateBlkAddr(blk), blk->isSecure()},
compression_size, evict_blks,
blk->getPartitionId());
// It is valid to return nullptr if there is no victim
@@ -1546,7 +1548,7 @@ BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
// cache... just use temporary storage to complete the
// current request and then get rid of it
blk = tempBlock;
tempBlock->insert(addr, is_secure);
tempBlock->insert({addr, is_secure});
DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
is_secure ? "s" : "ns");
}
@@ -1647,7 +1649,7 @@ BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
partitionManager->readPacketPartitionID(pkt) : 0;
// Find replacement victim
std::vector<CacheBlk*> evict_blks;
CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
CacheBlk *victim = tags->findVictim({addr, is_secure}, blk_size_bits,
evict_blks, partition_id);
// It is valid to return nullptr if there is no victim
@@ -1911,7 +1913,7 @@ BaseCache::sendMSHRQueuePacket(MSHR* mshr)
}
}
CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
CacheBlk *blk = tags->findBlock({mshr->blkAddr, mshr->isSecure});
// either a prefetch that is not present upstream, or a normal
// MSHR request, proceed to get the packet to send downstream

View File

@@ -1283,17 +1283,17 @@ class BaseCache : public ClockedObject
}
bool inCache(Addr addr, bool is_secure) const {
return tags->findBlock(addr, is_secure);
return tags->findBlock({addr, is_secure});
}
bool hasBeenPrefetched(Addr addr, bool is_secure) const {
CacheBlk *block = tags->findBlock(addr, is_secure);
CacheBlk *block = tags->findBlock({addr, is_secure});
return block && block->wasPrefetched();
}
bool hasBeenPrefetched(Addr addr, bool is_secure,
RequestorID requestor) const {
CacheBlk *block = tags->findBlock(addr, is_secure);
CacheBlk *block = tags->findBlock({addr, is_secure});
return block && block->wasPrefetched() &&
(block->getSrcRequestorId() == requestor);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2010-2019 ARM Limited
* Copyright (c) 2010-2019, 2024 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@@ -172,7 +172,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
// flush and invalidate any existing block
CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
CacheBlk *old_blk(tags->findBlock({pkt->getAddr(), pkt->isSecure()}));
if (old_blk && old_blk->isValid()) {
BaseCache::evictBlock(old_blk, writebacks);
}
@@ -1268,7 +1268,7 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
}
bool is_secure = pkt->isSecure();
CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
CacheBlk *blk = tags->findBlock({pkt->getAddr(), is_secure});
Addr blk_addr = pkt->getBlockAddr(blkSize);
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
@@ -1383,7 +1383,7 @@ Cache::recvAtomicSnoop(PacketPtr pkt)
return 0;
}
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = tags->findBlock({pkt->getAddr(), pkt->isSecure()});
uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
return snoop_delay + lookupLatency * clockPeriod();
}
@@ -1429,7 +1429,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
// we should never have hardware prefetches to allocated
// blocks
assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
assert(!tags->findBlock({mshr->blkAddr, mshr->isSecure}));
// We need to check the caches above us to verify that
// they don't have a copy of this block in the dirty state

View File

@@ -47,14 +47,14 @@ namespace gem5
{
void
CacheBlk::insert(const Addr tag, const bool is_secure,
CacheBlk::insert(const KeyType &tag,
const int src_requestor_ID, const uint32_t task_ID,
const uint64_t partition_id)
{
// Make sure that the block has been properly invalidated
assert(!isValid());
insert(tag, is_secure);
insert(tag);
// Set source requestor ID
setSrcRequestorId(src_requestor_ID);

View File

@@ -153,7 +153,7 @@ class CacheBlk : public TaggedEntry
std::list<Lock> lockList;
public:
CacheBlk()
CacheBlk() : TaggedEntry()
{
invalidate();
}
@@ -177,7 +177,7 @@ class CacheBlk : public TaggedEntry
assert(!isValid());
assert(other.isValid());
insert(other.getTag(), other.isSecure());
insert({other.getTag(), other.isSecure()});
if (other.wasPrefetched()) {
setPrefetched();
@@ -323,7 +323,7 @@ class CacheBlk : public TaggedEntry
* @param task_ID The new task ID.
* @param partition_id The source partition ID.
*/
void insert(const Addr tag, const bool is_secure,
void insert(const KeyType &tag,
const int src_requestor_ID, const uint32_t task_ID,
const uint64_t partition_id);
using TaggedEntry::insert;
@@ -526,9 +526,10 @@ class TempCacheBlk final : public CacheBlk
* Creates a temporary cache block, with its own storage.
* @param size The size (in bytes) of this cache block.
*/
TempCacheBlk(unsigned size) : CacheBlk()
TempCacheBlk(unsigned size, TagExtractor ext) : CacheBlk()
{
data = new uint8_t[size];
registerTagExtractor(ext);
}
TempCacheBlk(const TempCacheBlk&) = delete;
using CacheBlk::operator=;
@@ -545,10 +546,10 @@ class TempCacheBlk final : public CacheBlk
}
void
insert(const Addr addr, const bool is_secure) override
insert(const KeyType &tag) override
{
CacheBlk::insert(addr, is_secure);
_addr = addr;
CacheBlk::insert(tag);
_addr = tag.address;
}
/**

View File

@@ -36,7 +36,6 @@
#include "base/intmath.hh"
#include "base/logging.hh"
#include "debug/CacheComp.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/FrequentValuesCompressor.hh"
namespace gem5
@@ -53,7 +52,8 @@ FrequentValues::FrequentValues(const Params &p)
numSamples(p.num_samples), takenSamples(0), phase(SAMPLING),
VFT((name() + ".VFT").c_str(),
p.vft_entries, p.vft_assoc, p.vft_replacement_policy,
p.vft_indexing_policy, VFTEntry(counterBits)),
p.vft_indexing_policy,
VFTEntry(counterBits, genTagExtractor(p.vft_indexing_policy))),
codeGenerationEvent([this]{ phase = COMPRESSING; }, name())
{
fatal_if((numVFTEntries - 1) > mask(chunkSizeBits),

View File

@@ -129,8 +129,8 @@ class FrequentValues : public Base
*/
SatCounter32 counter;
VFTEntry(std::size_t num_bits)
: CacheEntry(), value(0), counter(num_bits)
VFTEntry(std::size_t num_bits, TagExtractor ext)
: CacheEntry(ext), value(0), counter(num_bits)
{
}

View File

@@ -39,6 +39,7 @@
from m5.objects.ClockedObject import ClockedObject
from m5.objects.IndexingPolicies import *
from m5.objects.ReplacementPolicies import *
from m5.objects.Tags import *
from m5.params import *
from m5.proxy import *
from m5.SimObject import *
@@ -164,7 +165,7 @@ class QueuedPrefetcher(BasePrefetcher):
)
class StridePrefetcherHashedSetAssociative(SetAssociative):
class StridePrefetcherHashedSetAssociative(TaggedSetAssociative):
type = "StridePrefetcherHashedSetAssociative"
cxx_class = "gem5::prefetch::StridePrefetcherHashedSetAssociative"
cxx_header = "mem/cache/prefetch/stride.hh"
@@ -208,7 +209,7 @@ class StridePrefetcher(QueuedPrefetcher):
table_assoc = Param.Int(4, "Associativity of the PC table")
table_entries = Param.MemorySize("64", "Number of entries of the PC table")
table_indexing_policy = Param.BaseIndexingPolicy(
table_indexing_policy = Param.TaggedIndexingPolicy(
StridePrefetcherHashedSetAssociative(
entry_size=1, assoc=Parent.table_assoc, size=Parent.table_entries
),
@@ -235,8 +236,8 @@ class IndirectMemoryPrefetcher(QueuedPrefetcher):
"16", "Number of entries of the Prefetch Table"
)
pt_table_assoc = Param.Unsigned(16, "Associativity of the Prefetch Table")
pt_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
pt_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.pt_table_assoc,
size=Parent.pt_table_entries,
@@ -256,8 +257,8 @@ class IndirectMemoryPrefetcher(QueuedPrefetcher):
ipd_table_assoc = Param.Unsigned(
4, "Associativity of the Indirect Pattern Detector"
)
ipd_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
ipd_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.ipd_table_assoc,
size=Parent.ipd_table_entries,
@@ -295,8 +296,8 @@ class SignaturePathPrefetcher(QueuedPrefetcher):
signature_table_assoc = Param.Unsigned(
2, "Associativity of the signature table"
)
signature_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
signature_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.signature_table_assoc,
size=Parent.signature_table_entries,
@@ -319,8 +320,8 @@ class SignaturePathPrefetcher(QueuedPrefetcher):
strides_per_pattern_entry = Param.Unsigned(
4, "Number of strides stored in each pattern entry"
)
pattern_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
pattern_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.pattern_table_assoc,
size=Parent.pattern_table_entries,
@@ -355,8 +356,8 @@ class SignaturePathPrefetcherV2(SignaturePathPrefetcher):
global_history_register_entries = Param.MemorySize(
"8", "Number of entries of global history register"
)
global_history_register_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
global_history_register_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.global_history_register_entries,
size=Parent.global_history_register_entries,
@@ -391,8 +392,8 @@ class AccessMapPatternMatching(ClockedObject):
access_map_table_assoc = Param.Unsigned(
8, "Associativity of the access map table"
)
access_map_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
access_map_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.access_map_table_assoc,
size=Parent.access_map_table_entries,
@@ -487,8 +488,8 @@ class IrregularStreamBufferPrefetcher(QueuedPrefetcher):
training_unit_entries = Param.MemorySize(
"128", "Number of entries of the training unit"
)
training_unit_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
training_unit_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.training_unit_assoc,
size=Parent.training_unit_entries,
@@ -508,8 +509,8 @@ class IrregularStreamBufferPrefetcher(QueuedPrefetcher):
address_map_cache_entries = Param.MemorySize(
"128", "Number of entries of the PS/SP AMCs"
)
ps_address_map_cache_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
ps_address_map_cache_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.address_map_cache_assoc,
size=Parent.address_map_cache_entries,
@@ -520,8 +521,8 @@ class IrregularStreamBufferPrefetcher(QueuedPrefetcher):
LRURP(),
"Replacement policy of the Physical-to-Structural Address Map Cache",
)
sp_address_map_cache_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
sp_address_map_cache_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.address_map_cache_assoc,
size=Parent.address_map_cache_entries,
@@ -626,8 +627,8 @@ class STeMSPrefetcher(QueuedPrefetcher):
active_generation_table_assoc = Param.Unsigned(
64, "Associativity of the active generation table"
)
active_generation_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
active_generation_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.active_generation_table_assoc,
size=Parent.active_generation_table_entries,
@@ -644,8 +645,8 @@ class STeMSPrefetcher(QueuedPrefetcher):
pattern_sequence_table_assoc = Param.Unsigned(
16384, "Associativity of the pattern sequence table"
)
pattern_sequence_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
pattern_sequence_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.pattern_sequence_table_assoc,
size=Parent.pattern_sequence_table_entries,
@@ -694,8 +695,8 @@ class PIFPrefetcher(QueuedPrefetcher):
index_entries = Param.MemorySize("64", "Number of entries in the index")
index_assoc = Param.Unsigned(64, "Associativity of the index")
index_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
index_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1, assoc=Parent.index_assoc, size=Parent.index_entries
),
"Indexing policy of the index",

View File

@@ -29,7 +29,6 @@
#include "mem/cache/prefetch/access_map_pattern_matching.hh"
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/AMPMPrefetcher.hh"
#include "params/AccessMapPatternMatching.hh"
@@ -56,7 +55,8 @@ AccessMapPatternMatching::AccessMapPatternMatching(
p.access_map_table_assoc,
p.access_map_table_replacement_policy,
p.access_map_table_indexing_policy,
AccessMapEntry(hotZoneSize / blkSize)),
AccessMapEntry(hotZoneSize / blkSize,
genTagExtractor(p.access_map_table_indexing_policy))),
numGoodPrefetches(0), numTotalPrefetches(0), numRawCacheMisses(0),
numRawCacheHits(0), degree(startDegree), usefulDegree(startDegree),
epochEvent([this]{ processEpochEvent(); }, name())
@@ -109,14 +109,15 @@ AccessMapPatternMatching::AccessMapEntry *
AccessMapPatternMatching::getAccessMapEntry(Addr am_addr,
bool is_secure)
{
AccessMapEntry *am_entry = accessMapTable.findEntry(am_addr, is_secure);
const TaggedEntry::KeyType key{am_addr, is_secure};
AccessMapEntry *am_entry = accessMapTable.findEntry(key);
if (am_entry != nullptr) {
accessMapTable.accessEntry(am_entry);
} else {
am_entry = accessMapTable.findVictim(am_addr);
am_entry = accessMapTable.findVictim(key);
assert(am_entry != nullptr);
accessMapTable.insertEntry(am_addr, is_secure, am_entry);
accessMapTable.insertEntry(key, am_entry);
}
return am_entry;
}

View File

@@ -38,8 +38,9 @@
#ifndef __MEM_CACHE_PREFETCH_ACCESS_MAP_PATTERN_MATCHING_HH__
#define __MEM_CACHE_PREFETCH_ACCESS_MAP_PATTERN_MATCHING_HH__
#include "mem/cache/prefetch/associative_set.hh"
#include "base/cache/associative_cache.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/tags/tagged_entry.hh"
#include "mem/packet.hh"
#include "sim/clocked_object.hh"
@@ -94,9 +95,10 @@ class AccessMapPatternMatching : public ClockedObject
/** vector containing the state of the cachelines in this zone */
std::vector<AccessMapState> states;
AccessMapEntry(size_t num_entries)
AccessMapEntry(size_t num_entries, TagExtractor ext)
: TaggedEntry(), states(num_entries, AM_INIT)
{
registerTagExtractor(ext);
}
void
@@ -109,7 +111,7 @@ class AccessMapPatternMatching : public ClockedObject
}
};
/** Access map table */
AssociativeSet<AccessMapEntry> accessMapTable;
AssociativeCache<AccessMapEntry> accessMapTable;
/**
* Number of good prefetches

View File

@@ -1,99 +0,0 @@
/**
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CACHE_PREFETCH_ASSOCIATIVE_SET_HH__
#define __CACHE_PREFETCH_ASSOCIATIVE_SET_HH__
#include <type_traits>
#include "base/cache/associative_cache.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "mem/cache/tags/tagged_entry.hh"
namespace gem5
{
/**
* Associative container based on the previosuly defined Entry type
* Each element is indexed by a key of type Addr, an additional
* bool value is used as an additional tag data of the entry.
*/
template<class Entry>
class AssociativeSet : public AssociativeCache<Entry>
{
static_assert(std::is_base_of_v<TaggedEntry, Entry>,
"Entry must derive from TaggedEntry");
public:
/**
* Public constructor
* @param name Name of the cache
* @param num_entries total number of entries of the container, the number
* of sets can be calculated dividing this balue by the 'assoc' value
* @param assoc number of elements in each associative set
* @param rpl_policy replacement policy
* @param idx_policy indexing policy
* @param init_val initial value of the elements of the set
*/
AssociativeSet(const char *name, const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val = Entry());
/**
* Find an entry within the set
* @param addr key element
* @param is_secure tag element
* @return returns a pointer to the wanted entry or nullptr if it does not
* exist.
*/
Entry* findEntry(Addr addr, bool is_secure) const;
/**
* Indicate that an entry has just been inserted
* @param addr key of the container
* @param is_secure tag component of the container
* @param entry pointer to the container entry to be inserted
*/
void insertEntry(Addr addr, bool is_secure, Entry* entry);
private:
// The following APIs are excluded since they lack the secure bit
using AssociativeCache<Entry>::getTag;
using AssociativeCache<Entry>::accessEntryByAddr;
using AssociativeCache<Entry>::findEntry;
using AssociativeCache<Entry>::insertEntry;
using AssociativeCache<Entry>::replPolicy;
using AssociativeCache<Entry>::indexingPolicy;
};
} // namespace gem5
#endif//__CACHE_PREFETCH_ASSOCIATIVE_SET_HH__

View File

@@ -1,77 +0,0 @@
/**
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CACHE_PREFETCH_ASSOCIATIVE_SET_IMPL_HH__
#define __CACHE_PREFETCH_ASSOCIATIVE_SET_IMPL_HH__
#include "base/intmath.hh"
#include "mem/cache/prefetch/associative_set.hh"
namespace gem5
{
template <class Entry>
AssociativeSet<Entry>::AssociativeSet(const char *name,
const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val)
: AssociativeCache<Entry>(name, num_entries, associativity_,
repl_policy, indexing_policy, init_val)
{
}
template <class Entry>
Entry*
AssociativeSet<Entry>::findEntry(Addr addr, bool is_secure) const
{
Addr tag = indexingPolicy->extractTag(addr);
auto candidates = indexingPolicy->getPossibleEntries(addr);
for (auto candidate : candidates) {
Entry* entry = static_cast<Entry*>(candidate);
if (entry->matchTag(tag, is_secure)) {
return entry;
}
}
return nullptr;
}
template<class Entry>
void
AssociativeSet<Entry>::insertEntry(Addr addr, bool is_secure, Entry* entry)
{
entry->insert(indexingPolicy->extractTag(addr), is_secure);
replPolicy->reset(entry->replacementData);
}
} // namespace gem5
#endif//__CACHE_PREFETCH_ASSOCIATIVE_SET_IMPL_HH__

View File

@@ -29,7 +29,6 @@
#include "mem/cache/prefetch/delta_correlating_prediction_tables.hh"
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/DCPTPrefetcher.hh"
#include "params/DeltaCorrelatingPredictionTables.hh"
@@ -44,7 +43,9 @@ DeltaCorrelatingPredictionTables::DeltaCorrelatingPredictionTables(
deltaBits(p.delta_bits), deltaMaskBits(p.delta_mask_bits),
table((name() + "DCPT").c_str(), p.table_entries,
p.table_assoc, p.table_replacement_policy,
p.table_indexing_policy, DCPTEntry(p.deltas_per_entry))
p.table_indexing_policy,
DCPTEntry(p.deltas_per_entry,
genTagExtractor(p.table_indexing_policy)))
{
}

View File

@@ -76,8 +76,8 @@ class DeltaCorrelatingPredictionTables : public SimObject
* Constructor
* @param num_deltas number of deltas stored in the entry
*/
DCPTEntry(unsigned int num_deltas)
: CacheEntry(), lastAddress(0), deltas(num_deltas)
DCPTEntry(unsigned int num_deltas, TagExtractor ext)
: CacheEntry(ext), lastAddress(0), deltas(num_deltas)
{
}

View File

@@ -29,7 +29,6 @@
#include "mem/cache/prefetch/indirect_memory.hh"
#include "mem/cache/base.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/IndirectMemoryPrefetcher.hh"
namespace gem5
@@ -49,11 +48,13 @@ IndirectMemory::IndirectMemory(const IndirectMemoryPrefetcherParams &p)
p.pt_table_assoc,
p.pt_table_replacement_policy,
p.pt_table_indexing_policy,
PrefetchTableEntry(p.num_indirect_counter_bits)),
PrefetchTableEntry(p.num_indirect_counter_bits,
genTagExtractor(p.pt_table_indexing_policy))),
ipd((name() + ".IPD").c_str(), p.ipd_table_entries, p.ipd_table_assoc,
p.ipd_table_replacement_policy,
p.ipd_table_indexing_policy,
IndirectPatternDetectorEntry(p.addr_array_len, shiftValues.size())),
IndirectPatternDetectorEntry(p.addr_array_len, shiftValues.size(),
genTagExtractor(p.ipd_table_indexing_policy))),
ipdEntryTrackingMisses(nullptr), byteOrder(p.sys->getGuestByteOrder())
{
}
@@ -85,7 +86,8 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
}
} else {
// if misses are not being tracked, attempt to detect stream accesses
PrefetchTableEntry *pt_entry = prefetchTable.findEntry(pc, is_secure);
const PrefetchTableEntry::KeyType key{pc, is_secure};
PrefetchTableEntry *pt_entry = prefetchTable.findEntry(key);
if (pt_entry != nullptr) {
prefetchTable.accessEntry(pt_entry);
@@ -157,9 +159,9 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
}
}
} else {
pt_entry = prefetchTable.findVictim(pc);
pt_entry = prefetchTable.findVictim(key);
assert(pt_entry != nullptr);
prefetchTable.insertEntry(pc, pt_entry-> secure, pt_entry);
prefetchTable.insertEntry(key, pt_entry);
pt_entry->address = addr;
pt_entry->secure = is_secure;
}
@@ -172,7 +174,8 @@ IndirectMemory::allocateOrUpdateIPDEntry(
{
// The address of the pt_entry is used to index the IPD
Addr ipd_entry_addr = (Addr) pt_entry;
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(ipd_entry_addr);
const IndirectPatternDetectorEntry::KeyType key{ipd_entry_addr, false};
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(key);
if (ipd_entry != nullptr) {
ipd.accessEntry(ipd_entry);
if (!ipd_entry->secondIndexSet) {
@@ -187,9 +190,9 @@ IndirectMemory::allocateOrUpdateIPDEntry(
ipdEntryTrackingMisses = nullptr;
}
} else {
ipd_entry = ipd.findVictim(ipd_entry_addr);
ipd_entry = ipd.findVictim(key);
assert(ipd_entry != nullptr);
ipd.insertEntry(ipd_entry_addr, ipd_entry);
ipd.insertEntry(key, ipd_entry);
ipd_entry->idx1 = index;
ipdEntryTrackingMisses = ipd_entry;
}

View File

@@ -41,9 +41,10 @@
#include <vector>
#include "base/cache/associative_cache.hh"
#include "base/sat_counter.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/tags/tagged_entry.hh"
namespace gem5
{
@@ -98,12 +99,14 @@ class IndirectMemory : public Queued
*/
bool increasedIndirectCounter;
PrefetchTableEntry(unsigned indirect_counter_bits)
PrefetchTableEntry(unsigned indirect_counter_bits, TagExtractor ext)
: TaggedEntry(), address(0), secure(false), streamCounter(0),
enabled(false), index(0), baseAddr(0), shift(0),
indirectCounter(indirect_counter_bits),
increasedIndirectCounter(false)
{}
{
registerTagExtractor(ext);
}
void
invalidate() override
@@ -121,7 +124,7 @@ class IndirectMemory : public Queued
}
};
/** Prefetch table */
AssociativeSet<PrefetchTableEntry> prefetchTable;
AssociativeCache<PrefetchTableEntry> prefetchTable;
/** Indirect Pattern Detector entrt */
struct IndirectPatternDetectorEntry : public TaggedEntry
@@ -142,11 +145,13 @@ class IndirectMemory : public Queued
std::vector<std::vector<Addr>> baseAddr;
IndirectPatternDetectorEntry(unsigned int num_addresses,
unsigned int num_shifts)
unsigned int num_shifts,
TagExtractor ext)
: TaggedEntry(), idx1(0), idx2(0), secondIndexSet(false),
numMisses(0),
baseAddr(num_addresses, std::vector<Addr>(num_shifts))
{
registerTagExtractor(ext);
}
void

View File

@@ -29,7 +29,6 @@
#include "mem/cache/prefetch/irregular_stream_buffer.hh"
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/IrregularStreamBufferPrefetcher.hh"
namespace gem5
@@ -45,24 +44,27 @@ IrregularStreamBuffer::IrregularStreamBuffer(
prefetchCandidatesPerEntry(p.prefetch_candidates_per_entry),
degree(p.degree),
trainingUnit((name() + ".TrainingUnit").c_str(),
p.training_unit_entries,
p.training_unit_assoc,
p.training_unit_replacement_policy,
p.training_unit_indexing_policy),
p.training_unit_entries,
p.training_unit_assoc,
p.training_unit_replacement_policy,
p.training_unit_indexing_policy,
TrainingUnitEntry(genTagExtractor(p.training_unit_indexing_policy))),
psAddressMappingCache((name() + ".PSAddressMappingCache").c_str(),
p.address_map_cache_entries,
p.address_map_cache_assoc,
p.ps_address_map_cache_replacement_policy,
p.ps_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits)),
p.address_map_cache_entries,
p.address_map_cache_assoc,
p.ps_address_map_cache_replacement_policy,
p.ps_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits,
genTagExtractor(p.ps_address_map_cache_indexing_policy))),
spAddressMappingCache((name() + ".SPAddressMappingCache").c_str(),
p.address_map_cache_entries,
p.address_map_cache_assoc,
p.sp_address_map_cache_replacement_policy,
p.sp_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits)),
p.address_map_cache_entries,
p.address_map_cache_assoc,
p.sp_address_map_cache_replacement_policy,
p.sp_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits,
genTagExtractor(p.sp_address_map_cache_indexing_policy))),
structuralAddressCounter(0)
{
assert(isPowerOf2(prefetchCandidatesPerEntry));
@@ -84,7 +86,8 @@ IrregularStreamBuffer::calculatePrefetch(const PrefetchInfo &pfi,
// Training, if the entry exists, then we found a correlation between
// the entry lastAddress (named as correlated_addr_A) and the address of
// the current access (named as correlated_addr_B)
TrainingUnitEntry *entry = trainingUnit.findEntry(pc, is_secure);
const TrainingUnitEntry::KeyType key{pc, is_secure};
TrainingUnitEntry *entry = trainingUnit.findEntry(key);
bool correlated_addr_found = false;
Addr correlated_addr_A = 0;
Addr correlated_addr_B = 0;
@@ -94,10 +97,10 @@ IrregularStreamBuffer::calculatePrefetch(const PrefetchInfo &pfi,
correlated_addr_A = entry->lastAddress;
correlated_addr_B = addr;
} else {
entry = trainingUnit.findVictim(pc);
entry = trainingUnit.findVictim(key);
assert(entry != nullptr);
trainingUnit.insertEntry(pc, is_secure, entry);
trainingUnit.insertEntry(key, entry);
}
// Update the entry
entry->lastAddress = addr;
@@ -148,15 +151,15 @@ IrregularStreamBuffer::calculatePrefetch(const PrefetchInfo &pfi,
// (given the structured address S, prefetch S+1, S+2, .. up to S+degree)
Addr amc_address = addr / prefetchCandidatesPerEntry;
Addr map_index = addr % prefetchCandidatesPerEntry;
AddressMappingEntry *ps_am = psAddressMappingCache.findEntry(amc_address,
is_secure);
AddressMappingEntry *ps_am = psAddressMappingCache.findEntry(
{amc_address, is_secure});
if (ps_am != nullptr) {
AddressMapping &mapping = ps_am->mappings[map_index];
if (mapping.counter > 0) {
Addr sp_address = mapping.address / prefetchCandidatesPerEntry;
Addr sp_index = mapping.address % prefetchCandidatesPerEntry;
AddressMappingEntry *sp_am =
spAddressMappingCache.findEntry(sp_address, is_secure);
spAddressMappingCache.findEntry({sp_address, is_secure});
if (sp_am == nullptr) {
// The entry has been evicted, can not generate prefetches
return;
@@ -182,15 +185,15 @@ IrregularStreamBuffer::getPSMapping(Addr paddr, bool is_secure)
Addr amc_address = paddr / prefetchCandidatesPerEntry;
Addr map_index = paddr % prefetchCandidatesPerEntry;
AddressMappingEntry *ps_entry =
psAddressMappingCache.findEntry(amc_address, is_secure);
psAddressMappingCache.findEntry({amc_address, is_secure});
if (ps_entry != nullptr) {
// A PS-AMC line already exists
psAddressMappingCache.accessEntry(ps_entry);
} else {
ps_entry = psAddressMappingCache.findVictim(amc_address);
ps_entry = psAddressMappingCache.findVictim({amc_address, is_secure});
assert(ps_entry != nullptr);
psAddressMappingCache.insertEntry(amc_address, is_secure, ps_entry);
psAddressMappingCache.insertEntry({amc_address, is_secure}, ps_entry);
}
return ps_entry->mappings[map_index];
}
@@ -202,14 +205,14 @@ IrregularStreamBuffer::addStructuralToPhysicalEntry(
Addr amc_address = structural_address / prefetchCandidatesPerEntry;
Addr map_index = structural_address % prefetchCandidatesPerEntry;
AddressMappingEntry *sp_entry =
spAddressMappingCache.findEntry(amc_address, is_secure);
spAddressMappingCache.findEntry({amc_address, is_secure});
if (sp_entry != nullptr) {
spAddressMappingCache.accessEntry(sp_entry);
} else {
sp_entry = spAddressMappingCache.findVictim(amc_address);
sp_entry = spAddressMappingCache.findVictim({amc_address, is_secure});
assert(sp_entry != nullptr);
spAddressMappingCache.insertEntry(amc_address, is_secure, sp_entry);
spAddressMappingCache.insertEntry({amc_address, is_secure}, sp_entry);
}
AddressMapping &mapping = sp_entry->mappings[map_index];
mapping.address = physical_address;

View File

@@ -38,10 +38,11 @@
#ifndef __MEM_CACHE_PREFETCH_IRREGULAR_STREAM_BUFFER_HH__
#define __MEM_CACHE_PREFETCH_IRREGULAR_STREAM_BUFFER_HH__
#include "base/cache/associative_cache.hh"
#include "base/callback.hh"
#include "base/sat_counter.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/tags/tagged_entry.hh"
namespace gem5
{
@@ -66,11 +67,16 @@ class IrregularStreamBuffer : public Queued
*/
struct TrainingUnitEntry : public TaggedEntry
{
TrainingUnitEntry(TagExtractor ext)
: TaggedEntry(), lastAddress(0), lastAddressSecure(false)
{
registerTagExtractor(ext);
}
Addr lastAddress;
bool lastAddressSecure;
};
/** Map of PCs to Training unit entries */
AssociativeSet<TrainingUnitEntry> trainingUnit;
AssociativeCache<TrainingUnitEntry> trainingUnit;
/** Address Mapping entry, holds an address and a confidence counter */
struct AddressMapping
@@ -88,9 +94,11 @@ class IrregularStreamBuffer : public Queued
struct AddressMappingEntry : public TaggedEntry
{
std::vector<AddressMapping> mappings;
AddressMappingEntry(size_t num_mappings, unsigned counter_bits)
AddressMappingEntry(size_t num_mappings, unsigned counter_bits,
TagExtractor ext)
: TaggedEntry(), mappings(num_mappings, counter_bits)
{
registerTagExtractor(ext);
}
void
@@ -105,9 +113,9 @@ class IrregularStreamBuffer : public Queued
};
/** Physical-to-Structured mappings table */
AssociativeSet<AddressMappingEntry> psAddressMappingCache;
AssociativeCache<AddressMappingEntry> psAddressMappingCache;
/** Structured-to-Physical mappings table */
AssociativeSet<AddressMappingEntry> spAddressMappingCache;
AssociativeCache<AddressMappingEntry> spAddressMappingCache;
/**
* Counter of allocated structural addresses, increased by "chunkSize",
* each time a new structured address is allocated

View File

@@ -31,7 +31,6 @@
#include <utility>
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/PIFPrefetcher.hh"
namespace gem5
@@ -48,7 +47,8 @@ PIF::PIF(const PIFPrefetcherParams &p)
historyBuffer(p.history_buffer_size),
index((name() + ".PIFIndex").c_str(), p.index_entries, p.index_assoc,
p.index_replacement_policy,
p.index_indexing_policy),
p.index_indexing_policy,
IndexEntry(genTagExtractor(p.index_indexing_policy))),
streamAddressBuffer(p.stream_address_buffer_entries),
listenersPC()
{
@@ -176,16 +176,15 @@ PIF::notifyRetiredInst(const Addr pc)
// Insert the spatial entry into the history buffer and update
// the 'iterator' table to point to the new entry
historyBuffer.push_back(spatialCompactor);
constexpr bool is_secure = false;
auto idx_entry = index.findEntry(spatialCompactor.trigger,
is_secure);
const IndexEntry::KeyType key{spatialCompactor.trigger, false};
auto idx_entry = index.findEntry(key);
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);
} else {
idx_entry = index.findVictim(spatialCompactor.trigger);
idx_entry = index.findVictim(key);
assert(idx_entry != nullptr);
index.insertEntry(spatialCompactor.trigger, is_secure,
idx_entry);
index.insertEntry(key, idx_entry);
}
idx_entry->historyIt =
historyBuffer.getIterator(historyBuffer.tail());
@@ -222,7 +221,7 @@ PIF::calculatePrefetch(const PrefetchInfo &pfi,
// Check if a valid entry in the 'index' table is found and allocate a new
// active prediction stream
IndexEntry *idx_entry = index.findEntry(pc, is_secure);
IndexEntry *idx_entry = index.findEntry({pc, is_secure});
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);

View File

@@ -40,9 +40,10 @@
#include <deque>
#include <vector>
#include "base/cache/associative_cache.hh"
#include "base/circular_queue.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/tags/tagged_entry.hh"
namespace gem5
{
@@ -136,6 +137,12 @@ class PIF : public Queued
struct IndexEntry : public TaggedEntry
{
IndexEntry(TagExtractor ext)
: TaggedEntry()
{
registerTagExtractor(ext);
}
HistoryBuffer::iterator historyIt;
};
@@ -143,7 +150,7 @@ class PIF : public Queued
* The index table is a small cache-like structure that facilitates
* fast search of the history buffer.
*/
AssociativeSet<IndexEntry> index;
AssociativeCache<IndexEntry> index;
/**
* A Stream Address Buffer (SAB) tracks a window of consecutive

View File

@@ -32,7 +32,6 @@
#include <climits>
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/SignaturePathPrefetcher.hh"
namespace gem5
@@ -49,16 +48,18 @@ SignaturePath::SignaturePath(const SignaturePathPrefetcherParams &p)
prefetchConfidenceThreshold(p.prefetch_confidence_threshold),
lookaheadConfidenceThreshold(p.lookahead_confidence_threshold),
signatureTable((name() + ".SignatureTable").c_str(),
p.signature_table_entries,
p.signature_table_assoc,
p.signature_table_replacement_policy,
p.signature_table_indexing_policy),
p.signature_table_entries,
p.signature_table_assoc,
p.signature_table_replacement_policy,
p.signature_table_indexing_policy,
SignatureEntry(genTagExtractor(p.signature_table_indexing_policy))),
patternTable((name() + ".PatternTable").c_str(),
p.pattern_table_entries,
p.pattern_table_assoc,
p.pattern_table_replacement_policy,
p.pattern_table_indexing_policy,
PatternEntry(stridesPerPatternEntry, p.num_counter_bits))
p.pattern_table_entries,
p.pattern_table_assoc,
p.pattern_table_replacement_policy,
p.pattern_table_indexing_policy,
PatternEntry(stridesPerPatternEntry, p.num_counter_bits,
genTagExtractor(p.pattern_table_indexing_policy)))
{
fatal_if(prefetchConfidenceThreshold < 0,
"The prefetch confidence threshold must be greater than 0\n");
@@ -169,20 +170,21 @@ SignaturePath::getSignatureEntry(Addr ppn, bool is_secure,
stride_t block, bool &miss, stride_t &stride,
double &initial_confidence)
{
SignatureEntry* signature_entry = signatureTable.findEntry(ppn, is_secure);
const SignatureEntry::KeyType key{ppn, is_secure};
SignatureEntry* signature_entry = signatureTable.findEntry(key);
if (signature_entry != nullptr) {
signatureTable.accessEntry(signature_entry);
miss = false;
stride = block - signature_entry->lastBlock;
} else {
signature_entry = signatureTable.findVictim(ppn);
signature_entry = signatureTable.findVictim(key);
assert(signature_entry != nullptr);
// Sets signature_entry->signature, initial_confidence, and stride
handleSignatureTableMiss(block, signature_entry->signature,
initial_confidence, stride);
signatureTable.insertEntry(ppn, is_secure, signature_entry);
signatureTable.insertEntry(key, signature_entry);
miss = true;
}
signature_entry->lastBlock = block;
@@ -192,17 +194,17 @@ SignaturePath::getSignatureEntry(Addr ppn, bool is_secure,
SignaturePath::PatternEntry &
SignaturePath::getPatternEntry(Addr signature)
{
constexpr bool is_secure = false;
PatternEntry* pattern_entry = patternTable.findEntry(signature, is_secure);
const PatternEntry::KeyType key{signature, false};
PatternEntry* pattern_entry = patternTable.findEntry(key);
if (pattern_entry != nullptr) {
// Signature found
patternTable.accessEntry(pattern_entry);
} else {
// Signature not found
pattern_entry = patternTable.findVictim(signature);
pattern_entry = patternTable.findVictim(key);
assert(pattern_entry != nullptr);
patternTable.insertEntry(signature, is_secure, pattern_entry);
patternTable.insertEntry(key, pattern_entry);
}
return *pattern_entry;
}
@@ -278,7 +280,7 @@ SignaturePath::calculatePrefetch(const PrefetchInfo &pfi,
// confidence, these are prefetch candidates
// - select the entry with the highest counter as the "lookahead"
PatternEntry *current_pattern_entry =
patternTable.findEntry(current_signature, is_secure);
patternTable.findEntry({current_signature, is_secure});
PatternStrideEntry const *lookahead = nullptr;
if (current_pattern_entry != nullptr) {
unsigned long max_counter = 0;

View File

@@ -40,9 +40,10 @@
#ifndef __MEM_CACHE_PREFETCH_SIGNATURE_PATH_HH__
#define __MEM_CACHE_PREFETCH_SIGNATURE_PATH_HH__
#include "base/cache/associative_cache.hh"
#include "base/sat_counter.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/tags/tagged_entry.hh"
#include "mem/packet.hh"
namespace gem5
@@ -79,11 +80,14 @@ class SignaturePath : public Queued
signature_t signature;
/** Last accessed block within a page */
stride_t lastBlock;
SignatureEntry() : signature(0), lastBlock(0)
{}
SignatureEntry(TagExtractor ext)
: TaggedEntry(), signature(0), lastBlock(0)
{
registerTagExtractor(ext);
}
};
/** Signature table */
AssociativeSet<SignatureEntry> signatureTable;
AssociativeCache<SignatureEntry> signatureTable;
/** A stride entry with its counter */
struct PatternStrideEntry
@@ -102,10 +106,12 @@ class SignaturePath : public Queued
std::vector<PatternStrideEntry> strideEntries;
/** use counter, used by SPPv2 */
SatCounter8 counter;
PatternEntry(size_t num_strides, unsigned counter_bits)
PatternEntry(size_t num_strides, unsigned counter_bits,
TagExtractor ext)
: TaggedEntry(), strideEntries(num_strides, counter_bits),
counter(counter_bits)
{
registerTagExtractor(ext);
}
/** Reset the entries to their initial values */
@@ -148,7 +154,7 @@ class SignaturePath : public Queued
};
/** Pattern table */
AssociativeSet<PatternEntry> patternTable;
AssociativeCache<PatternEntry> patternTable;
/**
* Generates a new signature from an existing one and a new stride

View File

@@ -1,4 +1,16 @@
/**
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved.
*
@@ -31,7 +43,6 @@
#include <cassert>
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/SignaturePathPrefetcherV2.hh"
namespace gem5
@@ -43,11 +54,12 @@ namespace prefetch
SignaturePathV2::SignaturePathV2(const SignaturePathPrefetcherV2Params &p)
: SignaturePath(p),
globalHistoryRegister((name() + ".GlobalHistoryRegister").c_str(),
p.global_history_register_entries,
p.global_history_register_entries,
p.global_history_register_replacement_policy,
p.global_history_register_indexing_policy,
GlobalHistoryEntry())
p.global_history_register_entries,
p.global_history_register_entries,
p.global_history_register_replacement_policy,
p.global_history_register_indexing_policy,
GlobalHistoryEntry(
genTagExtractor(p.global_history_register_indexing_policy)))
{
}
@@ -59,16 +71,13 @@ SignaturePathV2::handleSignatureTableMiss(stride_t current_block,
// This should return all entries of the GHR, since it is a fully
// associative table
std::vector<GlobalHistoryEntry *> all_ghr_entries =
globalHistoryRegister.getPossibleEntries(0 /* any value works */);
for (auto gh_entry : all_ghr_entries) {
if (gh_entry->lastBlock + gh_entry->delta == current_block) {
new_signature = gh_entry->signature;
new_conf = gh_entry->confidence;
new_stride = gh_entry->delta;
for (auto &gh_entry : globalHistoryRegister) {
if (gh_entry.lastBlock + gh_entry.delta == current_block) {
new_signature = gh_entry.signature;
new_conf = gh_entry.confidence;
new_stride = gh_entry.delta;
found = true;
globalHistoryRegister.accessEntry(gh_entry);
globalHistoryRegister.accessEntry(&gh_entry);
break;
}
}
@@ -122,11 +131,11 @@ SignaturePathV2::handlePageCrossingLookahead(signature_t signature,
{
// Always use the replacement policy to assign new entries, as all
// of them are unique, there are never "hits" in the GHR
GlobalHistoryEntry *gh_entry = globalHistoryRegister.findVictim(0);
const GlobalHistoryEntry::KeyType key{0, false};
GlobalHistoryEntry *gh_entry = globalHistoryRegister.findVictim(key);
assert(gh_entry != nullptr);
// Any address value works, as it is never used
constexpr bool is_secure = false;
globalHistoryRegister.insertEntry(0, is_secure, gh_entry);
globalHistoryRegister.insertEntry(key, gh_entry);
gh_entry->signature = signature;
gh_entry->lastBlock = last_offset;

View File

@@ -41,7 +41,7 @@
#ifndef __MEM_CACHE_PREFETCH_SIGNATURE_PATH_V2_HH__
#define __MEM_CACHE_PREFETCH_SIGNATURE_PATH_V2_HH__
#include "mem/cache/prefetch/associative_set.hh"
#include "base/cache/associative_cache.hh"
#include "mem/cache/prefetch/signature_path.hh"
#include "mem/packet.hh"
@@ -62,11 +62,15 @@ class SignaturePathV2 : public SignaturePath
double confidence;
stride_t lastBlock;
stride_t delta;
GlobalHistoryEntry() : signature(0), confidence(0.0), lastBlock(0),
delta(0) {}
GlobalHistoryEntry(TagExtractor ext)
: TaggedEntry(), signature(0), confidence(0.0), lastBlock(0),
delta(0)
{
registerTagExtractor(ext);
}
};
/** Global History Register */
AssociativeSet<GlobalHistoryEntry> globalHistoryRegister;
AssociativeCache<GlobalHistoryEntry> globalHistoryRegister;
double calculateLookaheadConfidence(PatternEntry const &sig,
PatternStrideEntry const &lookahead) const override;

View File

@@ -29,7 +29,6 @@
#include "mem/cache/prefetch/spatio_temporal_memory_streaming.hh"
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "params/STeMSPrefetcher.hh"
namespace gem5
@@ -43,19 +42,21 @@ STeMS::STeMS(const STeMSPrefetcherParams &p)
spatialRegionSizeBits(floorLog2(p.spatial_region_size)),
reconstructionEntries(p.reconstruction_entries),
activeGenerationTable((name() + ".ActiveGenerationTable").c_str(),
p.active_generation_table_entries,
p.active_generation_table_assoc,
p.active_generation_table_replacement_policy,
p.active_generation_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize)),
p.active_generation_table_entries,
p.active_generation_table_assoc,
p.active_generation_table_replacement_policy,
p.active_generation_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize,
genTagExtractor(p.active_generation_table_indexing_policy))),
patternSequenceTable((name() + ".PatternSequenceTable").c_str(),
p.pattern_sequence_table_entries,
p.pattern_sequence_table_assoc,
p.pattern_sequence_table_replacement_policy,
p.pattern_sequence_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize)),
p.pattern_sequence_table_entries,
p.pattern_sequence_table_assoc,
p.pattern_sequence_table_replacement_policy,
p.pattern_sequence_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize,
genTagExtractor(p.pattern_sequence_table_indexing_policy))),
rmob(p.region_miss_order_buffer_entries),
addDuplicateEntriesToRMOB(p.add_duplicate_entries_to_rmob),
lastTriggerCounter(0)
@@ -91,16 +92,14 @@ STeMS::checkForActiveGenerationsEnd(const CacheAccessor &cache)
}
}
if (generation_ended) {
const ActiveGenerationTableEntry::KeyType key{pst_addr, false};
// PST is indexed using the PC (secure bit is unused)
constexpr bool is_secure = false;
auto pst_entry = patternSequenceTable.findEntry(pst_addr,
is_secure);
auto pst_entry = patternSequenceTable.findEntry(key);
if (pst_entry == nullptr) {
// Tipically an entry will not exist
pst_entry = patternSequenceTable.findVictim(pst_addr);
pst_entry = patternSequenceTable.findVictim(key);
assert(pst_entry != nullptr);
patternSequenceTable.insertEntry(pst_addr, is_secure,
pst_entry);
patternSequenceTable.insertEntry(key, pst_entry);
} else {
patternSequenceTable.accessEntry(pst_entry);
}
@@ -157,8 +156,9 @@ STeMS::calculatePrefetch(const PrefetchInfo &pfi,
// Check if any active generation has ended
checkForActiveGenerationsEnd(cache);
const ActiveGenerationTableEntry::KeyType key{sr_addr, is_secure};
ActiveGenerationTableEntry *agt_entry =
activeGenerationTable.findEntry(sr_addr, is_secure);
activeGenerationTable.findEntry(key);
if (agt_entry != nullptr) {
// found an entry in the AGT, entry is currently being recorded,
// add the offset
@@ -175,9 +175,9 @@ STeMS::calculatePrefetch(const PrefetchInfo &pfi,
lastTriggerCounter = 0;
// allocate a new AGT entry
agt_entry = activeGenerationTable.findVictim(sr_addr);
agt_entry = activeGenerationTable.findVictim(key);
assert(agt_entry != nullptr);
activeGenerationTable.insertEntry(sr_addr, is_secure, agt_entry);
activeGenerationTable.insertEntry(key, agt_entry);
agt_entry->pc = pc;
agt_entry->paddress = paddr;
agt_entry->addOffset(sr_offset);
@@ -222,11 +222,10 @@ STeMS::reconstructSequence(
// Now query the PST with the PC of each RMOB entry
idx = 0;
constexpr bool is_secure = false;
for (auto it = rmob_it; it != rmob.end() && (idx < reconstructionEntries);
it++) {
auto pst_entry = patternSequenceTable.findEntry(it->pstAddress,
is_secure);
auto pst_entry = patternSequenceTable.findEntry(
{it->pstAddress, false});
if (pst_entry != nullptr) {
patternSequenceTable.accessEntry(pst_entry);
for (auto &seq_entry : pst_entry->sequence) {

View File

@@ -43,10 +43,11 @@
#include <vector>
#include "base/cache/associative_cache.hh"
#include "base/circular_queue.hh"
#include "base/sat_counter.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/tags/tagged_entry.hh"
namespace gem5
{
@@ -93,10 +94,11 @@ class STeMS : public Queued
/** Sequence of accesses */
std::vector<SequenceEntry> sequence;
ActiveGenerationTableEntry(int num_positions)
ActiveGenerationTableEntry(int num_positions, TagExtractor ext)
: TaggedEntry(), paddress(0), pc(0),
seqCounter(0), sequence(num_positions)
{
registerTagExtractor(ext);
}
void
@@ -153,9 +155,9 @@ class STeMS : public Queued
};
/** Active Generation Table (AGT) */
AssociativeSet<ActiveGenerationTableEntry> activeGenerationTable;
AssociativeCache<ActiveGenerationTableEntry> activeGenerationTable;
/** Pattern Sequence Table (PST) */
AssociativeSet<ActiveGenerationTableEntry> patternSequenceTable;
AssociativeCache<ActiveGenerationTableEntry> patternSequenceTable;
/** Data type of the Region Miss Order Buffer entry */
struct RegionMissOrderBufferEntry

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2013, 2015, 2022-2023 Arm Limited
* Copyright (c) 2012-2013, 2015, 2022-2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -53,7 +53,6 @@
#include "base/random.hh"
#include "base/trace.hh"
#include "debug/HWPrefetch.hh"
#include "mem/cache/prefetch/associative_set_impl.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "params/StridePrefetcher.hh"
@@ -63,9 +62,11 @@ namespace gem5
namespace prefetch
{
Stride::StrideEntry::StrideEntry(const SatCounter8& init_confidence)
Stride::StrideEntry::StrideEntry(const SatCounter8& init_confidence,
TagExtractor ext)
: TaggedEntry(), confidence(init_confidence)
{
registerTagExtractor(ext);
invalidate();
}
@@ -91,36 +92,36 @@ Stride::Stride(const StridePrefetcherParams &p)
{
}
Stride::PCTable*
Stride::PCTable&
Stride::findTable(int context)
{
// Check if table for given context exists
auto it = pcTables.find(context);
if (it != pcTables.end())
return &it->second;
return *(it->second);
// If table does not exist yet, create one
return allocateNewContext(context);
}
Stride::PCTable*
Stride::PCTable&
Stride::allocateNewContext(int context)
{
std::string table_name = name() + ".PCTable" + std::to_string(context);
// Create new table
auto ins_result = pcTables.emplace(std::piecewise_construct,
std::forward_as_tuple(context),
std::forward_as_tuple(table_name.c_str(),
pcTableInfo.numEntries,
pcTableInfo.assoc,
pcTableInfo.replacementPolicy,
pcTableInfo.indexingPolicy,
StrideEntry(initConfidence)));
pcTables[context].reset(new PCTable(
table_name.c_str(),
pcTableInfo.numEntries,
pcTableInfo.assoc,
pcTableInfo.replacementPolicy,
pcTableInfo.indexingPolicy,
StrideEntry(initConfidence,
genTagExtractor(pcTableInfo.indexingPolicy))));
DPRINTF(HWPrefetch, "Adding context %i with stride entries\n", context);
// Get iterator to new pc table, and then return a pointer to the new table
return &(ins_result.first->second);
// return a reference to the new table
return *(pcTables[context]);
}
void
@@ -141,13 +142,14 @@ Stride::calculatePrefetch(const PrefetchInfo &pfi,
RequestorID requestor_id = useRequestorId ? pfi.getRequestorId() : 0;
// Get corresponding pc table
PCTable* pcTable = findTable(requestor_id);
PCTable& pc_table = findTable(requestor_id);
// Search for entry in the pc table
StrideEntry *entry = pcTable->findEntry(pc, is_secure);
const StrideEntry::KeyType key{pc, is_secure};
StrideEntry *entry = pc_table.findEntry(key);
if (entry != nullptr) {
pcTable->accessEntry(entry);
pc_table.accessEntry(entry);
// Hit in table
int new_stride = pf_addr - entry->lastAddr;
@@ -198,17 +200,18 @@ Stride::calculatePrefetch(const PrefetchInfo &pfi,
DPRINTF(HWPrefetch, "Miss: PC %x pkt_addr %x (%s)\n", pc, pf_addr,
is_secure ? "s" : "ns");
StrideEntry* entry = pcTable->findVictim(pc);
StrideEntry* entry = pc_table.findVictim(key);
// Insert new entry's data
entry->lastAddr = pf_addr;
pcTable->insertEntry(pc, is_secure, entry);
pc_table.insertEntry(key, entry);
}
}
uint32_t
StridePrefetcherHashedSetAssociative::extractSet(const Addr pc) const
StridePrefetcherHashedSetAssociative::extractSet(const KeyType &key) const
{
const Addr pc = key.address;
const Addr hash1 = pc >> 1;
const Addr hash2 = hash1 >> tagShift;
return (hash1 ^ hash2) & setMask;

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2013, 2015, 2022 Arm Limited
* Copyright (c) 2012-2013, 2015, 2022, 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -51,19 +51,19 @@
#include <unordered_map>
#include <vector>
#include "base/cache/associative_cache.hh"
#include "base/sat_counter.hh"
#include "base/types.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "mem/cache/tags/indexing_policies/set_associative.hh"
#include "mem/cache/tags/tagged_entry.hh"
#include "mem/packet.hh"
#include "params/StridePrefetcherHashedSetAssociative.hh"
namespace gem5
{
class BaseIndexingPolicy;
namespace replacement_policy
{
class Base;
@@ -77,16 +77,16 @@ namespace prefetch
* Override the default set associative to apply a specific hash function
* when extracting a set.
*/
class StridePrefetcherHashedSetAssociative : public SetAssociative
class StridePrefetcherHashedSetAssociative : public TaggedSetAssociative
{
protected:
uint32_t extractSet(const Addr addr) const override;
uint32_t extractSet(const KeyType &key) const override;
Addr extractTag(const Addr addr) const override;
public:
StridePrefetcherHashedSetAssociative(
const StridePrefetcherHashedSetAssociativeParams &p)
: SetAssociative(p)
: TaggedSetAssociative(p)
{
}
~StridePrefetcherHashedSetAssociative() = default;
@@ -121,11 +121,11 @@ class Stride : public Queued
const int assoc;
const int numEntries;
BaseIndexingPolicy* const indexingPolicy;
TaggedIndexingPolicy* const indexingPolicy;
replacement_policy::Base* const replacementPolicy;
PCTableInfo(int assoc, int num_entries,
BaseIndexingPolicy* indexing_policy,
TaggedIndexingPolicy* indexing_policy,
replacement_policy::Base* repl_policy)
: assoc(assoc), numEntries(num_entries),
indexingPolicy(indexing_policy), replacementPolicy(repl_policy)
@@ -136,7 +136,7 @@ class Stride : public Queued
/** Tagged by hashed PCs. */
struct StrideEntry : public TaggedEntry
{
StrideEntry(const SatCounter8& init_confidence);
StrideEntry(const SatCounter8& init_confidence, TagExtractor ext);
void invalidate() override;
@@ -144,8 +144,8 @@ class Stride : public Queued
int stride;
SatCounter8 confidence;
};
typedef AssociativeSet<StrideEntry> PCTable;
std::unordered_map<int, PCTable> pcTables;
using PCTable = AssociativeCache<StrideEntry>;
std::unordered_map<int, std::unique_ptr<PCTable>> pcTables;
/**
* If this parameter is set to true, then the prefetcher will operate at
@@ -161,7 +161,7 @@ class Stride : public Queued
* @param context The context to be searched for.
* @return The table corresponding to the given context.
*/
PCTable* findTable(int context);
PCTable& findTable(int context);
/**
* Create a PC table for the given context.
@@ -169,7 +169,7 @@ class Stride : public Queued
* @param context The context of the new PC table.
* @return The new PC table
*/
PCTable* allocateNewContext(int context);
PCTable& allocateNewContext(int context);
public:
Stride(const StridePrefetcherParams &p);

View File

@@ -26,18 +26,28 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Import('*')
Import("*")
SimObject('Tags.py', sim_objects=[
'BaseTags', 'BaseSetAssoc', 'SectorTags', 'CompressedTags', 'FALRU'])
SimObject(
"Tags.py",
sim_objects=[
"BaseTags",
"BaseSetAssoc",
"SectorTags",
"CompressedTags",
"FALRU",
"TaggedIndexingPolicy",
"TaggedSetAssociative",
],
)
Source('base.cc')
Source('base_set_assoc.cc')
Source('compressed_tags.cc')
Source('dueling.cc')
Source('fa_lru.cc')
Source('sector_blk.cc')
Source('sector_tags.cc')
Source('super_blk.cc')
Source("base.cc")
Source("base_set_assoc.cc")
Source("compressed_tags.cc")
Source("dueling.cc")
Source("fa_lru.cc")
Source("sector_blk.cc")
Source("sector_tags.cc")
Source("super_blk.cc")
GTest('dueling.test', 'dueling.test.cc', 'dueling.cc')
GTest("dueling.test", "dueling.test.cc", "dueling.cc")

View File

@@ -39,6 +39,29 @@ from m5.params import *
from m5.proxy import *
class TaggedIndexingPolicy(SimObject):
type = "TaggedIndexingPolicy"
abstract = True
cxx_class = "gem5::IndexingPolicyTemplate<gem5::TaggedTypes>"
cxx_header = "mem/cache/tags/tagged_entry.hh"
cxx_template_params = ["class Types"]
# Get the size from the parent (cache)
size = Param.MemorySize(Parent.size, "capacity in bytes")
# Get the entry size from the parent (tags)
entry_size = Param.Int(Parent.entry_size, "entry size in bytes")
# Get the associativity
assoc = Param.Int(Parent.assoc, "associativity")
class TaggedSetAssociative(TaggedIndexingPolicy):
type = "TaggedSetAssociative"
cxx_class = "gem5::TaggedSetAssociative"
cxx_header = "mem/cache/tags/tagged_entry.hh"
class BaseTags(ClockedObject):
type = "BaseTags"
abstract = True
@@ -71,8 +94,8 @@ class BaseTags(ClockedObject):
)
# Get indexing policy
indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(), "Indexing policy"
indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(), "Indexing policy"
)
partitioning_manager = Param.PartitionManager(

View File

@@ -79,19 +79,16 @@ BaseTags::findBlockBySetAndWay(int set, int way) const
}
CacheBlk*
BaseTags::findBlock(Addr addr, bool is_secure) const
BaseTags::findBlock(const CacheBlk::KeyType &key) const
{
// Extract block tag
Addr tag = extractTag(addr);
// Find possible entries that may contain the given address
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Search for block
for (const auto& location : entries) {
CacheBlk* blk = static_cast<CacheBlk*>(location);
if (blk->matchTag(tag, is_secure)) {
if (blk->match(key)) {
return blk;
}
}
@@ -116,7 +113,7 @@ BaseTags::insertBlock(const PacketPtr pkt, CacheBlk *blk)
// Insert block with tag, src requestor id, task id and PartitionId
const auto partition_id = partitionManager ?
partitionManager->readPacketPartitionID(pkt) : 0;
blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), requestor_id,
blk->insert({pkt->getAddr(), pkt->isSecure()}, requestor_id,
pkt->req->taskId(), partition_id);
// Check if cache warm up is done

View File

@@ -56,6 +56,7 @@
#include "base/statistics.hh"
#include "base/types.hh"
#include "mem/cache/cache_blk.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "mem/packet.hh"
#include "params/BaseTags.hh"
#include "sim/clocked_object.hh"
@@ -64,7 +65,6 @@ namespace gem5
{
class System;
class IndexingPolicy;
class ReplaceableEntry;
/**
@@ -86,7 +86,7 @@ class BaseTags : public ClockedObject
System *system;
/** Indexing policy */
BaseIndexingPolicy *indexingPolicy;
TaggedIndexingPolicy *indexingPolicy;
/** Partitioning manager */
partitioning_policy::PartitionManager *partitionManager;
@@ -161,7 +161,7 @@ class BaseTags : public ClockedObject
} stats;
public:
typedef BaseTagsParams Params;
PARAMS(BaseTags);
BaseTags(const Params &p);
/**
@@ -199,7 +199,7 @@ class BaseTags : public ClockedObject
* @param is_secure True if the target memory space is secure.
* @return Pointer to the cache block.
*/
virtual CacheBlk *findBlock(Addr addr, bool is_secure) const;
virtual CacheBlk *findBlock(const CacheBlk::KeyType &key) const;
/**
* Find a block given set and way.
@@ -282,7 +282,7 @@ class BaseTags : public ClockedObject
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
virtual CacheBlk* findVictim(Addr addr, const bool is_secure,
virtual CacheBlk* findVictim(const CacheBlk::KeyType &key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0) = 0;

View File

@@ -82,6 +82,9 @@ BaseSetAssoc::tagsInit()
// Associate a replacement data entry to the block
blk->replacementData = replacementPolicy->instantiateEntry();
// This is not used as of now but we set it for security
blk->registerTagExtractor(genTagExtractor(indexingPolicy));
}
}

View File

@@ -127,7 +127,7 @@ class BaseSetAssoc : public BaseTags
*/
CacheBlk* accessBlock(const PacketPtr pkt, Cycles &lat) override
{
CacheBlk *blk = findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = findBlock({pkt->getAddr(), pkt->isSecure()});
// Access all tags in parallel, hence one in each way. The data side
// either accesses all blocks in parallel, or one block sequentially on
@@ -167,14 +167,14 @@ class BaseSetAssoc : public BaseTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType& key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0) override
{
// Get possible entries to be victimized
std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Filter entries based on PartitionID
if (partitionManager) {
@@ -243,7 +243,7 @@ class BaseSetAssoc : public BaseTags
*/
Addr regenerateBlkAddr(const CacheBlk* blk) const override
{
return indexingPolicy->regenerateAddr(blk->getTag(), blk);
return indexingPolicy->regenerateAddr({blk->getTag(), false}, blk);
}
bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {

View File

@@ -115,14 +115,14 @@ CompressedTags::tagsInit()
}
CacheBlk*
CompressedTags::findVictim(Addr addr, const bool is_secure,
CompressedTags::findVictim(const CacheBlk::KeyType& key,
const std::size_t compressed_size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0)
{
// Get all possible locations of this superblock
std::vector<ReplaceableEntry*> superblock_entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Filter entries based on PartitionID
if (partitionManager){
@@ -132,13 +132,12 @@ CompressedTags::findVictim(Addr addr, const bool is_secure,
// Check if the superblock this address belongs to has been allocated. If
// so, try co-allocating
Addr tag = extractTag(addr);
SuperBlk* victim_superblock = nullptr;
bool is_co_allocation = false;
const uint64_t offset = extractSectorOffset(addr);
const uint64_t offset = extractSectorOffset(key.address);
for (const auto& entry : superblock_entries){
SuperBlk* superblock = static_cast<SuperBlk*>(entry);
if (superblock->matchTag(tag, is_secure) &&
if (superblock->match(key) &&
!superblock->blks[offset]->isValid() &&
superblock->isCompressed() &&
superblock->canCoAllocate(compressed_size))

View File

@@ -117,7 +117,7 @@ class CompressedTags : public SectorTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType& key,
const std::size_t compressed_size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id) override;

View File

@@ -145,7 +145,7 @@ FALRU::accessBlock(const PacketPtr pkt, Cycles &lat,
{
CachesMask mask = 0;
FALRUBlk* blk =
static_cast<FALRUBlk*>(findBlock(pkt->getAddr(), pkt->isSecure()));
static_cast<FALRUBlk*>(findBlock({pkt->getAddr(), pkt->isSecure()}));
// If a cache hit
if (blk && blk->isValid()) {
@@ -167,19 +167,20 @@ FALRU::accessBlock(const PacketPtr pkt, Cycles &lat,
}
CacheBlk*
FALRU::findBlock(Addr addr, bool is_secure) const
FALRU::findBlock(const CacheBlk::KeyType &lookup) const
{
FALRUBlk* blk = nullptr;
Addr tag = extractTag(addr);
auto iter = tagHash.find(std::make_pair(tag, is_secure));
Addr tag = extractTag(lookup.address);
auto key = std::make_pair(tag, lookup.secure);
auto iter = tagHash.find(key);
if (iter != tagHash.end()) {
blk = (*iter).second;
}
if (blk && blk->isValid()) {
assert(blk->getTag() == tag);
assert(blk->isSecure() == is_secure);
assert(blk->isSecure() == lookup.secure);
}
return blk;
@@ -193,7 +194,7 @@ FALRU::findBlockBySetAndWay(int set, int way) const
}
CacheBlk*
FALRU::findVictim(Addr addr, const bool is_secure, const std::size_t size,
FALRU::findVictim(const CacheBlk::KeyType& key, const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id)
{

View File

@@ -198,7 +198,7 @@ class FALRU : public BaseTags
* @param asid The address space ID.
* @return Pointer to the cache block.
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
CacheBlk* findBlock(const CacheBlk::KeyType &lookup) const override;
/**
* Find a block given set and way.
@@ -220,7 +220,7 @@ class FALRU : public BaseTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType& key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0) override;

View File

@@ -1,3 +1,16 @@
# -*- mode:python -*-
# Copyright (c) 2024 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2018 Inria
# All rights reserved.
#
@@ -32,8 +45,9 @@ from m5.SimObject import SimObject
class BaseIndexingPolicy(SimObject):
type = "BaseIndexingPolicy"
abstract = True
cxx_class = "gem5::BaseIndexingPolicy"
cxx_class = "gem5::IndexingPolicyTemplate<gem5::AddrTypes>"
cxx_header = "mem/cache/tags/indexing_policies/base.hh"
cxx_template_params = ["class Types"]
# Get the size from the parent (cache)
size = Param.MemorySize(Parent.size, "capacity in bytes")

View File

@@ -30,6 +30,5 @@ Import('*')
SimObject('IndexingPolicies.py', sim_objects=[
'BaseIndexingPolicy', 'SetAssociative', 'SkewedAssociative'])
Source('base.cc')
Source('set_associative.cc')
Source('skewed_associative.cc')

View File

@@ -1,104 +0,0 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2014,2017 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* Definitions of a common framework for indexing policies.
*/
#include "mem/cache/tags/indexing_policies/base.hh"
#include <cstdlib>
#include "base/intmath.hh"
#include "base/logging.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
namespace gem5
{
BaseIndexingPolicy::BaseIndexingPolicy(const Params &p)
: SimObject(p), assoc(p.assoc),
numSets(p.size / (p.entry_size * assoc)),
setShift(floorLog2(p.entry_size)), setMask(numSets - 1), sets(numSets),
tagShift(setShift + floorLog2(numSets))
{
fatal_if(!isPowerOf2(numSets), "# of sets must be non-zero and a power " \
"of 2");
fatal_if(assoc <= 0, "associativity must be greater than zero");
// Make space for the entries
for (uint32_t i = 0; i < numSets; ++i) {
sets[i].resize(assoc);
}
}
ReplaceableEntry*
BaseIndexingPolicy::getEntry(const uint32_t set, const uint32_t way) const
{
return sets[set][way];
}
void
BaseIndexingPolicy::setEntry(ReplaceableEntry* entry, const uint64_t index)
{
// Calculate set and way from entry index
const std::lldiv_t div_result = std::div((long long)index, assoc);
const uint32_t set = div_result.quot;
const uint32_t way = div_result.rem;
// Sanity check
assert(set < numSets);
// Assign a free pointer
sets[set][way] = entry;
// Inform the entry its position
entry->setPosition(set, way);
}
Addr
BaseIndexingPolicy::extractTag(const Addr addr) const
{
return (addr >> tagShift);
}
} // namespace gem5

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2014,2017 ARM Limited
* Copyright (c) 2012-2014,2017,2024 Arm Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@@ -49,23 +49,32 @@
#include <vector>
#include "base/intmath.hh"
#include "base/logging.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "params/BaseIndexingPolicy.hh"
#include "sim/sim_object.hh"
namespace gem5
{
class ReplaceableEntry;
/**
* A common base class for indexing table locations. Classes that inherit
* from it determine hash functions that should be applied based on the set
* and way. These functions are then applied to re-map the original values.
* @sa \ref gem5MemorySystem "gem5 Memory System"
* @tparam Types the Types template parameter shall contain the following type
* traits:
* - KeyType = The key/lookup data type
* - Params = The indexing policy Param type
*/
class BaseIndexingPolicy : public SimObject
template <class Types>
class IndexingPolicyTemplate : public SimObject
{
protected:
using KeyType = typename Types::KeyType;
using Params = typename Types::Params;
/**
* The associativity.
*/
@@ -97,20 +106,29 @@ class BaseIndexingPolicy : public SimObject
const int tagShift;
public:
/**
* Convenience typedef.
*/
typedef BaseIndexingPolicyParams Params;
/**
* Construct and initialize this policy.
*/
BaseIndexingPolicy(const Params &p);
IndexingPolicyTemplate(const Params &p)
: SimObject(p), assoc(p.assoc),
numSets(p.size / (p.entry_size * assoc)),
setShift(floorLog2(p.entry_size)), setMask(numSets - 1), sets(numSets),
tagShift(setShift + floorLog2(numSets))
{
fatal_if(!isPowerOf2(numSets), "# of sets must be non-zero and a power " \
"of 2");
fatal_if(assoc <= 0, "associativity must be greater than zero");
// Make space for the entries
for (uint32_t i = 0; i < numSets; ++i) {
sets[i].resize(assoc);
}
}
/**
* Destructor.
*/
~BaseIndexingPolicy() {};
~IndexingPolicyTemplate() {};
/**
* Associate a pointer to an entry to its physical counterpart.
@@ -118,7 +136,23 @@ class BaseIndexingPolicy : public SimObject
* @param entry The entry pointer.
* @param index An unique index for the entry.
*/
void setEntry(ReplaceableEntry* entry, const uint64_t index);
void
setEntry(ReplaceableEntry* entry, const uint64_t index)
{
// Calculate set and way from entry index
const std::lldiv_t div_result = std::div((long long)index, assoc);
const uint32_t set = div_result.quot;
const uint32_t way = div_result.rem;
// Sanity check
assert(set < numSets);
// Assign a free pointer
sets[set][way] = entry;
// Inform the entry its position
entry->setPosition(set, way);
}
/**
* Get an entry based on its set and way. All entries must have been set
@@ -128,7 +162,11 @@ class BaseIndexingPolicy : public SimObject
* @param way The way of the desired entry.
* @return entry The entry pointer.
*/
ReplaceableEntry* getEntry(const uint32_t set, const uint32_t way) const;
ReplaceableEntry*
getEntry(const uint32_t set, const uint32_t way) const
{
return sets[set][way];
}
/**
* Generate the tag from the given address.
@@ -136,7 +174,12 @@ class BaseIndexingPolicy : public SimObject
* @param addr The address to get the tag from.
* @return The tag of the address.
*/
virtual Addr extractTag(const Addr addr) const;
virtual Addr
extractTag(const Addr addr) const
{
return (addr >> tagShift);
}
/**
* Find all possible entries for insertion and replacement of an address.
@@ -146,7 +189,7 @@ class BaseIndexingPolicy : public SimObject
* @param addr The addr to a find possible entries for.
* @return The possible entries.
*/
virtual std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr)
virtual std::vector<ReplaceableEntry*> getPossibleEntries(const KeyType &key)
const = 0;
/**
@@ -156,10 +199,34 @@ class BaseIndexingPolicy : public SimObject
* @param entry The entry.
* @return the entry's original address.
*/
virtual Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry)
const = 0;
virtual Addr regenerateAddr(const KeyType &key,
const ReplaceableEntry* entry) const = 0;
};
class AddrTypes
{
public:
using KeyType = Addr;
using Params = BaseIndexingPolicyParams;
};
using BaseIndexingPolicy = IndexingPolicyTemplate<AddrTypes>;
template class IndexingPolicyTemplate<AddrTypes>;
/**
* This helper generates an a tag extractor function object
* which will be typically used by Replaceable entries indexed
* with the BaseIndexingPolicy.
* It allows to "decouple" indexing from tagging. Those entries
* would call the functor without directly holding a pointer
* to the indexing policy which should reside in the cache.
*/
static constexpr auto
genTagExtractor(BaseIndexingPolicy *ip)
{
return [ip] (Addr addr) { return ip->extractTag(addr); };
}
} // namespace gem5
#endif //__MEM_CACHE_INDEXING_POLICIES_BASE_HH__

View File

@@ -63,14 +63,14 @@ SetAssociative::extractSet(const Addr addr) const
}
Addr
SetAssociative::regenerateAddr(const Addr tag, const ReplaceableEntry* entry)
const
SetAssociative::regenerateAddr(const Addr &tag,
const ReplaceableEntry* entry) const
{
return (tag << tagShift) | (entry->getSet() << setShift);
}
std::vector<ReplaceableEntry*>
SetAssociative::getPossibleEntries(const Addr addr) const
SetAssociative::getPossibleEntries(const Addr &addr) const
{
return sets[extractSet(addr)];
}

View File

@@ -115,7 +115,7 @@ class SetAssociative : public BaseIndexingPolicy
* @param addr The addr to a find possible entries for.
* @return The possible entries.
*/
std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr) const
std::vector<ReplaceableEntry*> getPossibleEntries(const Addr &addr) const
override;
/**
@@ -125,8 +125,8 @@ class SetAssociative : public BaseIndexingPolicy
* @param entry The entry.
* @return the entry's original addr value.
*/
Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry) const
override;
Addr regenerateAddr(const Addr &tag,
const ReplaceableEntry* entry) const override;
};
} // namespace gem5

View File

@@ -198,7 +198,7 @@ SkewedAssociative::extractSet(const Addr addr, const uint32_t way) const
}
Addr
SkewedAssociative::regenerateAddr(const Addr tag,
SkewedAssociative::regenerateAddr(const Addr &tag,
const ReplaceableEntry* entry) const
{
const Addr addr_set = (tag << (msbShift + 1)) | entry->getSet();
@@ -207,7 +207,7 @@ SkewedAssociative::regenerateAddr(const Addr tag,
}
std::vector<ReplaceableEntry*>
SkewedAssociative::getPossibleEntries(const Addr addr) const
SkewedAssociative::getPossibleEntries(const Addr &addr) const
{
std::vector<ReplaceableEntry*> entries;

View File

@@ -160,7 +160,7 @@ class SkewedAssociative : public BaseIndexingPolicy
* @param addr The addr to a find possible entries for.
* @return The possible entries.
*/
std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr) const
std::vector<ReplaceableEntry*> getPossibleEntries(const Addr &addr) const
override;
/**
@@ -171,8 +171,8 @@ class SkewedAssociative : public BaseIndexingPolicy
* @param entry The entry.
* @return the entry's address.
*/
Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry) const
override;
Addr regenerateAddr(const Addr &tag,
const ReplaceableEntry* entry) const override;
};
} // namespace gem5

View File

@@ -83,18 +83,18 @@ SectorSubBlk::setValid()
}
void
SectorSubBlk::insert(const Addr tag, const bool is_secure)
SectorSubBlk::insert(const KeyType &tag)
{
// Make sure it is not overwriting another sector
panic_if(_sectorBlk && _sectorBlk->isValid() &&
!_sectorBlk->matchTag(tag, is_secure), "Overwriting valid sector!");
!_sectorBlk->match(tag), "Overwriting valid sector!");
// If the sector is not valid, insert the new tag. The sector block
// handles its own tag's invalidation, so do not attempt to insert MaxAddr.
if ((_sectorBlk && !_sectorBlk->isValid()) && (tag != MaxAddr)) {
_sectorBlk->insert(tag, is_secure);
if ((_sectorBlk && !_sectorBlk->isValid()) && (tag.address != MaxAddr)) {
_sectorBlk->insert(tag);
}
CacheBlk::insert(tag, is_secure);
CacheBlk::insert(tag);
}
void

View File

@@ -113,7 +113,7 @@ class SectorSubBlk : public CacheBlk
*/
void setValid() override;
void insert(const Addr tag, const bool is_secure) override;
void insert(const KeyType &tag) override;
/**
* Invalidate the block and inform sector block.

View File

@@ -156,7 +156,7 @@ SectorTags::invalidate(CacheBlk *blk)
CacheBlk*
SectorTags::accessBlock(const PacketPtr pkt, Cycles &lat)
{
CacheBlk *blk = findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = findBlock({pkt->getAddr(), pkt->isSecure()});
// Access all tags in parallel, hence one in each way. The data side
// either accesses all blocks in parallel, or one block sequentially on
@@ -262,23 +262,20 @@ SectorTags::moveBlock(CacheBlk *src_blk, CacheBlk *dest_blk)
}
CacheBlk*
SectorTags::findBlock(Addr addr, bool is_secure) const
SectorTags::findBlock(const CacheBlk::KeyType &key) const
{
// Extract sector tag
const Addr tag = extractTag(addr);
// The address can only be mapped to a specific location of a sector
// due to sectors being composed of contiguous-address entries
const Addr offset = extractSectorOffset(addr);
const Addr offset = extractSectorOffset(key.address);
// Find all possible sector entries that may contain the given address
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Search for block
for (const auto& sector : entries) {
auto blk = static_cast<SectorBlk*>(sector)->blks[offset];
if (blk->matchTag(tag, is_secure)) {
if (blk->match(key)) {
return blk;
}
}
@@ -288,24 +285,24 @@ SectorTags::findBlock(Addr addr, bool is_secure) const
}
CacheBlk*
SectorTags::findVictim(Addr addr, const bool is_secure, const std::size_t size,
SectorTags::findVictim(const CacheBlk::KeyType &key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id)
{
// Get possible entries to be victimized
std::vector<ReplaceableEntry*> sector_entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Filter entries based on PartitionID
if (partitionManager)
partitionManager->filterByPartition(sector_entries, partition_id);
// Check if the sector this address belongs to has been allocated
Addr tag = extractTag(addr);
SectorBlk* victim_sector = nullptr;
for (const auto& sector : sector_entries) {
SectorBlk* sector_blk = static_cast<SectorBlk*>(sector);
if (sector_blk->matchTag(tag, is_secure)) {
if (sector_blk->match(key)) {
victim_sector = sector_blk;
break;
}
@@ -325,11 +322,12 @@ SectorTags::findVictim(Addr addr, const bool is_secure, const std::size_t size,
}
// Get the entry of the victim block within the sector
SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
SectorSubBlk* victim = victim_sector->blks[
extractSectorOffset(key.address)];
// Get evicted blocks. Blocks are only evicted if the sectors mismatch and
// the currently existing sector is valid.
if (victim_sector->matchTag(tag, is_secure)) {
if (victim_sector->match(key)) {
// It would be a hit if victim was valid, and upgrades do not call
// findVictim, so it cannot happen
assert(!victim->isValid());
@@ -360,7 +358,8 @@ SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
const SectorBlk* sec_blk = blk_cast->getSectorBlock();
const Addr sec_addr =
indexingPolicy->regenerateAddr(blk->getTag(), sec_blk);
indexingPolicy->regenerateAddr(
{blk->getTag(), blk->isSecure()}, sec_blk);
return sec_addr | ((Addr)blk_cast->getSectorOffset() << sectorShift);
}

View File

@@ -173,7 +173,7 @@ class SectorTags : public BaseTags
* @param is_secure True if the target memory space is secure.
* @return Pointer to the cache block if found.
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
CacheBlk* findBlock(const CacheBlk::KeyType &key) const override;
/**
* Find replacement victim based on address.
@@ -185,7 +185,7 @@ class SectorTags : public BaseTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType &key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id) override;

View File

@@ -1,4 +1,16 @@
/**
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2020 Inria
* All rights reserved.
*
@@ -31,26 +43,95 @@
#include <cassert>
#include "base/cache/cache_entry.hh"
#include "base/cprintf.hh"
#include "base/logging.hh"
#include "base/types.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/TaggedIndexingPolicy.hh"
#include "params/TaggedSetAssociative.hh"
namespace gem5
{
class TaggedTypes
{
public:
struct KeyType
{
Addr address;
bool secure;
};
using Params = TaggedIndexingPolicyParams;
};
using TaggedIndexingPolicy = IndexingPolicyTemplate<TaggedTypes>;
template class IndexingPolicyTemplate<TaggedTypes>;
/**
* This version of set associative indexing deals with
* a Lookup structure made of address and secure bit.
* It extracts the address but discards the secure bit which
* is used for tagging only
*/
class TaggedSetAssociative : public TaggedIndexingPolicy
{
protected:
virtual uint32_t
extractSet(const KeyType &key) const
{
return (key.address >> setShift) & setMask;
}
public:
PARAMS(TaggedSetAssociative);
TaggedSetAssociative(const Params &p)
: TaggedIndexingPolicy(p)
{}
std::vector<ReplaceableEntry*>
getPossibleEntries(const KeyType &key) const override
{
return sets[extractSet(key)];
}
Addr
regenerateAddr(const KeyType &key,
const ReplaceableEntry *entry) const override
{
return (key.address << tagShift) | (entry->getSet() << setShift);
}
};
/**
* A tagged entry is an entry containing a tag. Each tag is accompanied by a
* secure bit, which informs whether it belongs to a secure address space.
* A tagged entry's contents are only relevant if it is marked as valid.
*/
class TaggedEntry : public CacheEntry
class TaggedEntry : public ReplaceableEntry
{
public:
TaggedEntry() : CacheEntry(), _secure(false) {}
using KeyType = TaggedTypes::KeyType;
using IndexingPolicy = TaggedIndexingPolicy;
using TagExtractor = std::function<Addr(Addr)>;
TaggedEntry()
: _valid(false), _secure(false), _tag(MaxAddr)
{}
~TaggedEntry() = default;
void
registerTagExtractor(TagExtractor ext)
{
extractTag = ext;
}
/**
* Checks if the entry is valid.
*
* @return True if the entry is valid.
*/
virtual bool isValid() const { return _valid; }
/**
* Check if this block holds data from the secure memory space.
*
@@ -58,6 +139,13 @@ class TaggedEntry : public CacheEntry
*/
bool isSecure() const { return _secure; }
/**
* Get tag associated to this block.
*
* @return The tag value.
*/
virtual Addr getTag() const { return _tag; }
/**
* Checks if the given tag information corresponds to this entry's.
*
@@ -65,10 +153,11 @@ class TaggedEntry : public CacheEntry
* @param is_secure Whether secure bit is set.
* @return True if the tag information match this entry's.
*/
virtual bool
matchTag(Addr tag, bool is_secure) const
bool
match(const KeyType &key) const
{
return isValid() && (getTag() == tag) && (isSecure() == is_secure);
return isValid() && (getTag() == extractTag(key.address)) &&
(isSecure() == key.secure);
}
/**
@@ -78,20 +167,20 @@ class TaggedEntry : public CacheEntry
* @param tag The tag value.
*/
virtual void
insert(const Addr tag, const bool is_secure)
insert(const KeyType &key)
{
setValid();
setTag(tag);
if (is_secure) {
setTag(extractTag(key.address));
if (key.secure) {
setSecure();
}
}
/** Invalidate the block. Its contents are no longer valid. */
void
invalidate() override
virtual void invalidate()
{
CacheEntry::invalidate();
_valid = false;
setTag(MaxAddr);
clearSecure();
}
@@ -102,38 +191,63 @@ class TaggedEntry : public CacheEntry
isSecure(), isValid(), ReplaceableEntry::print());
}
bool
matchTag(const Addr tag) const override
{
panic("Need is_secure arg");
return false;
}
void
insert(const Addr tag) override
{
panic("Need is_secure arg");
return;
}
protected:
/**
* Set tag associated to this block.
*
* @param tag The tag value.
*/
virtual void setTag(Addr tag) { _tag = tag; }
/** Set secure bit. */
virtual void setSecure() { _secure = true; }
/** Clear secure bit. Should be only used by the invalidation function. */
void clearSecure() { _secure = false; }
/** Set valid bit. The block must be invalid beforehand. */
virtual void
setValid()
{
assert(!isValid());
_valid = true;
}
/** Callback used to extract the tag from the entry */
TagExtractor extractTag;
private:
/**
* Valid bit. The contents of this entry are only valid if this bit is set.
* @sa invalidate()
* @sa insert()
*/
bool _valid;
/**
* Secure bit. Marks whether this entry refers to an address in the secure
* memory space. Must always be modified along with the tag.
*/
bool _secure;
/** Clear secure bit. Should be only used by the invalidation function. */
void clearSecure() { _secure = false; }
/** Do not use API without is_secure flag. */
using CacheEntry::matchTag;
using CacheEntry::insert;
/** The entry's tag. */
Addr _tag;
};
/**
* This helper generates an a tag extractor function object
* which will be typically used by Replaceable entries indexed
* with the TaggedIndexingPolicy.
* It allows to "decouple" indexing from tagging. Those entries
* would call the functor without directly holding a pointer
* to the indexing policy which should reside in the cache.
*/
static constexpr auto
genTagExtractor(TaggedIndexingPolicy *ip)
{
return [ip] (Addr addr) { return ip->extractTag(addr); };
}
} // namespace gem5
#endif//__CACHE_TAGGED_ENTRY_HH__