base, mem-cache: Rewrite TaggedEntry code

The only difference between the TaggedEntry and the newly defined
CacheEntry is the presence of the secure flag in the first case.  The
need to tag a cache entry according to the security bit required the
overloading of the matching methods in the TaggedEntry class to take
security into account (See matchTag [1]), and the persistance after
PR #745 of the AssociativeSet class which is basically identical
to its AssociativeCache superclass, only it overrides its virtual
method to match the tag according to the secure bit as well.

The introduction of the KeyType parameter in the previous commit
will smoothe the differences and help unifying the interface.

Rather than overloading and overriding to account for a different
signature, we embody the difference in the KeyType class. A
CacheEntry will match with KeyType = Addr,
whereas a TaggedEntry will use the following lookup type proposed in this
patch:

struct KeyType {
    Addr address;
    bool secure;
}

This patch is partly reverting the changes in #745 which were
reimplementing TaggedEntry on top of the CacheEntry. Instead
we keep them separate as the plan is to allow different
entry types with templatization rather than polymorphism.

As a final note, I believe a separate commit will have to
change the naming of our entries; the CacheEntry should
probably be renamed into TaggedEntry and the current TaggedEntry
into something that reflect the presence of the security bit
alongside the traditional address tag

[1]: https://github.com/gem5/gem5/blob/stable/\
    src/mem/cache/tags/tagged_entry.hh#L81

Change-Id: Ifc104c8d0c1d64509f612d87b80d442e0764f7ca
Signed-off-by: Giacomo Travaglini <giacomo.travaglini@arm.com>
This commit is contained in:
Giacomo Travaglini
2024-08-08 14:19:31 +01:00
parent 1c57195d7f
commit ee9814499d
38 changed files with 385 additions and 270 deletions

View File

@@ -122,12 +122,6 @@ class CacheEntry : public ReplaceableEntry
isValid(), ReplaceableEntry::print());
}
void
setIndexingPolicy(BaseIndexingPolicy *ip)
{
indexingPolicy = ip;
}
protected:
/**
* Set tag associated to this block.

19
src/mem/cache/base.cc vendored
View File

@@ -416,7 +416,7 @@ BaseCache::recvTimingReq(PacketPtr pkt)
// Now that the write is here, mark it accessible again, so the
// write will succeed. LockedRMWReadReq brings the block in in
// exclusive mode, so we know it was previously writable.
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = tags->findBlock({pkt->getAddr(), pkt->isSecure()});
assert(blk && blk->isValid());
assert(!blk->isSet(CacheBlk::WritableBit) &&
!blk->isSet(CacheBlk::ReadableBit));
@@ -550,7 +550,7 @@ BaseCache::recvTimingResp(PacketPtr pkt)
// the response is an invalidation
assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = tags->findBlock({pkt->getAddr(), pkt->isSecure()});
if (is_fill && !is_error) {
DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
@@ -719,7 +719,7 @@ BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
{
Addr blk_addr = pkt->getBlockAddr(blkSize);
bool is_secure = pkt->isSecure();
CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
CacheBlk *blk = tags->findBlock({pkt->getAddr(), is_secure});
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
pkt->pushLabel(name());
@@ -910,7 +910,7 @@ BaseCache::getNextQueueEntry()
PacketPtr pkt = prefetcher->getPacket();
if (pkt) {
Addr pf_addr = pkt->getBlockAddr(blkSize);
if (tags->findBlock(pf_addr, pkt->isSecure())) {
if (tags->findBlock({pf_addr, pkt->isSecure()})) {
DPRINTF(HWPrefetch, "Prefetch %#x has hit in cache, "
"dropped.\n", pf_addr);
prefetcher->pfHitInCache();
@@ -1031,8 +1031,9 @@ BaseCache::updateCompressionData(CacheBlk *&blk, const uint64_t* data,
bool victim_itself = false;
CacheBlk *victim = nullptr;
if (replaceExpansions || is_data_contraction) {
victim = tags->findVictim(regenerateBlkAddr(blk),
blk->isSecure(), compression_size, evict_blks,
victim = tags->findVictim(
{regenerateBlkAddr(blk), blk->isSecure()},
compression_size, evict_blks,
blk->getPartitionId());
// It is valid to return nullptr if there is no victim
@@ -1546,7 +1547,7 @@ BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
// cache... just use temporary storage to complete the
// current request and then get rid of it
blk = tempBlock;
tempBlock->insert(addr, is_secure);
tempBlock->insert({addr, is_secure});
DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
is_secure ? "s" : "ns");
}
@@ -1647,7 +1648,7 @@ BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
partitionManager->readPacketPartitionID(pkt) : 0;
// Find replacement victim
std::vector<CacheBlk*> evict_blks;
CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
CacheBlk *victim = tags->findVictim({addr, is_secure}, blk_size_bits,
evict_blks, partition_id);
// It is valid to return nullptr if there is no victim
@@ -1911,7 +1912,7 @@ BaseCache::sendMSHRQueuePacket(MSHR* mshr)
}
}
CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
CacheBlk *blk = tags->findBlock({mshr->blkAddr, mshr->isSecure});
// either a prefetch that is not present upstream, or a normal
// MSHR request, proceed to get the packet to send downstream

View File

@@ -1283,17 +1283,17 @@ class BaseCache : public ClockedObject
}
bool inCache(Addr addr, bool is_secure) const {
return tags->findBlock(addr, is_secure);
return tags->findBlock({addr, is_secure});
}
bool hasBeenPrefetched(Addr addr, bool is_secure) const {
CacheBlk *block = tags->findBlock(addr, is_secure);
CacheBlk *block = tags->findBlock({addr, is_secure});
return block && block->wasPrefetched();
}
bool hasBeenPrefetched(Addr addr, bool is_secure,
RequestorID requestor) const {
CacheBlk *block = tags->findBlock(addr, is_secure);
CacheBlk *block = tags->findBlock({addr, is_secure});
return block && block->wasPrefetched() &&
(block->getSrcRequestorId() == requestor);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2010-2019 ARM Limited
* Copyright (c) 2010-2019, 2024 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@@ -172,7 +172,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
// flush and invalidate any existing block
CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
CacheBlk *old_blk(tags->findBlock({pkt->getAddr(), pkt->isSecure()}));
if (old_blk && old_blk->isValid()) {
BaseCache::evictBlock(old_blk, writebacks);
}
@@ -1268,7 +1268,7 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
}
bool is_secure = pkt->isSecure();
CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
CacheBlk *blk = tags->findBlock({pkt->getAddr(), is_secure});
Addr blk_addr = pkt->getBlockAddr(blkSize);
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
@@ -1383,7 +1383,7 @@ Cache::recvAtomicSnoop(PacketPtr pkt)
return 0;
}
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = tags->findBlock({pkt->getAddr(), pkt->isSecure()});
uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
return snoop_delay + lookupLatency * clockPeriod();
}
@@ -1429,7 +1429,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
// we should never have hardware prefetches to allocated
// blocks
assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
assert(!tags->findBlock({mshr->blkAddr, mshr->isSecure}));
// We need to check the caches above us to verify that
// they don't have a copy of this block in the dirty state

View File

@@ -47,14 +47,14 @@ namespace gem5
{
void
CacheBlk::insert(const Addr tag, const bool is_secure,
CacheBlk::insert(const KeyType &tag,
const int src_requestor_ID, const uint32_t task_ID,
const uint64_t partition_id)
{
// Make sure that the block has been properly invalidated
assert(!isValid());
insert(tag, is_secure);
insert(tag);
// Set source requestor ID
setSrcRequestorId(src_requestor_ID);

View File

@@ -153,7 +153,7 @@ class CacheBlk : public TaggedEntry
std::list<Lock> lockList;
public:
CacheBlk()
CacheBlk() : TaggedEntry(nullptr)
{
invalidate();
}
@@ -177,7 +177,7 @@ class CacheBlk : public TaggedEntry
assert(!isValid());
assert(other.isValid());
insert(other.getTag(), other.isSecure());
insert({other.getTag(), other.isSecure()});
if (other.wasPrefetched()) {
setPrefetched();
@@ -323,7 +323,7 @@ class CacheBlk : public TaggedEntry
* @param task_ID The new task ID.
* @param partition_id The source partition ID.
*/
void insert(const Addr tag, const bool is_secure,
void insert(const KeyType &tag,
const int src_requestor_ID, const uint32_t task_ID,
const uint64_t partition_id);
using TaggedEntry::insert;
@@ -526,7 +526,7 @@ class TempCacheBlk final : public CacheBlk
* Creates a temporary cache block, with its own storage.
* @param size The size (in bytes) of this cache block.
*/
TempCacheBlk(unsigned size, BaseIndexingPolicy *ip) : CacheBlk()
TempCacheBlk(unsigned size, TaggedIndexingPolicy *ip) : CacheBlk()
{
data = new uint8_t[size];
@@ -547,10 +547,10 @@ class TempCacheBlk final : public CacheBlk
}
void
insert(const Addr addr, const bool is_secure) override
insert(const KeyType &tag) override
{
CacheBlk::insert(addr, is_secure);
_addr = addr;
CacheBlk::insert(tag);
_addr = tag.address;
}
/**

View File

@@ -39,6 +39,7 @@
from m5.objects.ClockedObject import ClockedObject
from m5.objects.IndexingPolicies import *
from m5.objects.ReplacementPolicies import *
from m5.objects.Tags import *
from m5.params import *
from m5.proxy import *
from m5.SimObject import *
@@ -164,7 +165,7 @@ class QueuedPrefetcher(BasePrefetcher):
)
class StridePrefetcherHashedSetAssociative(SetAssociative):
class StridePrefetcherHashedSetAssociative(TaggedSetAssociative):
type = "StridePrefetcherHashedSetAssociative"
cxx_class = "gem5::prefetch::StridePrefetcherHashedSetAssociative"
cxx_header = "mem/cache/prefetch/stride.hh"
@@ -208,7 +209,7 @@ class StridePrefetcher(QueuedPrefetcher):
table_assoc = Param.Int(4, "Associativity of the PC table")
table_entries = Param.MemorySize("64", "Number of entries of the PC table")
table_indexing_policy = Param.BaseIndexingPolicy(
table_indexing_policy = Param.TaggedIndexingPolicy(
StridePrefetcherHashedSetAssociative(
entry_size=1, assoc=Parent.table_assoc, size=Parent.table_entries
),
@@ -235,8 +236,8 @@ class IndirectMemoryPrefetcher(QueuedPrefetcher):
"16", "Number of entries of the Prefetch Table"
)
pt_table_assoc = Param.Unsigned(16, "Associativity of the Prefetch Table")
pt_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
pt_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.pt_table_assoc,
size=Parent.pt_table_entries,
@@ -256,8 +257,8 @@ class IndirectMemoryPrefetcher(QueuedPrefetcher):
ipd_table_assoc = Param.Unsigned(
4, "Associativity of the Indirect Pattern Detector"
)
ipd_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
ipd_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.ipd_table_assoc,
size=Parent.ipd_table_entries,
@@ -295,8 +296,8 @@ class SignaturePathPrefetcher(QueuedPrefetcher):
signature_table_assoc = Param.Unsigned(
2, "Associativity of the signature table"
)
signature_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
signature_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.signature_table_assoc,
size=Parent.signature_table_entries,
@@ -319,8 +320,8 @@ class SignaturePathPrefetcher(QueuedPrefetcher):
strides_per_pattern_entry = Param.Unsigned(
4, "Number of strides stored in each pattern entry"
)
pattern_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
pattern_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.pattern_table_assoc,
size=Parent.pattern_table_entries,
@@ -355,8 +356,8 @@ class SignaturePathPrefetcherV2(SignaturePathPrefetcher):
global_history_register_entries = Param.MemorySize(
"8", "Number of entries of global history register"
)
global_history_register_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
global_history_register_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.global_history_register_entries,
size=Parent.global_history_register_entries,
@@ -391,8 +392,8 @@ class AccessMapPatternMatching(ClockedObject):
access_map_table_assoc = Param.Unsigned(
8, "Associativity of the access map table"
)
access_map_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
access_map_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.access_map_table_assoc,
size=Parent.access_map_table_entries,
@@ -487,8 +488,8 @@ class IrregularStreamBufferPrefetcher(QueuedPrefetcher):
training_unit_entries = Param.MemorySize(
"128", "Number of entries of the training unit"
)
training_unit_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
training_unit_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.training_unit_assoc,
size=Parent.training_unit_entries,
@@ -508,8 +509,8 @@ class IrregularStreamBufferPrefetcher(QueuedPrefetcher):
address_map_cache_entries = Param.MemorySize(
"128", "Number of entries of the PS/SP AMCs"
)
ps_address_map_cache_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
ps_address_map_cache_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.address_map_cache_assoc,
size=Parent.address_map_cache_entries,
@@ -520,8 +521,8 @@ class IrregularStreamBufferPrefetcher(QueuedPrefetcher):
LRURP(),
"Replacement policy of the Physical-to-Structural Address Map Cache",
)
sp_address_map_cache_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
sp_address_map_cache_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.address_map_cache_assoc,
size=Parent.address_map_cache_entries,
@@ -626,8 +627,8 @@ class STeMSPrefetcher(QueuedPrefetcher):
active_generation_table_assoc = Param.Unsigned(
64, "Associativity of the active generation table"
)
active_generation_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
active_generation_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.active_generation_table_assoc,
size=Parent.active_generation_table_entries,
@@ -644,8 +645,8 @@ class STeMSPrefetcher(QueuedPrefetcher):
pattern_sequence_table_assoc = Param.Unsigned(
16384, "Associativity of the pattern sequence table"
)
pattern_sequence_table_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
pattern_sequence_table_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1,
assoc=Parent.pattern_sequence_table_assoc,
size=Parent.pattern_sequence_table_entries,
@@ -694,8 +695,8 @@ class PIFPrefetcher(QueuedPrefetcher):
index_entries = Param.MemorySize("64", "Number of entries in the index")
index_assoc = Param.Unsigned(64, "Associativity of the index")
index_indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(
index_indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(
entry_size=1, assoc=Parent.index_assoc, size=Parent.index_entries
),
"Indexing policy of the index",

View File

@@ -110,14 +110,15 @@ AccessMapPatternMatching::AccessMapEntry *
AccessMapPatternMatching::getAccessMapEntry(Addr am_addr,
bool is_secure)
{
AccessMapEntry *am_entry = accessMapTable.findEntry(am_addr, is_secure);
const TaggedEntry::KeyType key{am_addr, is_secure};
AccessMapEntry *am_entry = accessMapTable.findEntry(key);
if (am_entry != nullptr) {
accessMapTable.accessEntry(am_entry);
} else {
am_entry = accessMapTable.findVictim(am_addr);
am_entry = accessMapTable.findVictim(key);
assert(am_entry != nullptr);
accessMapTable.insertEntry(am_addr, is_secure, am_entry);
accessMapTable.insertEntry(key, am_entry);
}
return am_entry;
}

View File

@@ -94,7 +94,7 @@ class AccessMapPatternMatching : public ClockedObject
/** vector containing the state of the cachelines in this zone */
std::vector<AccessMapState> states;
AccessMapEntry(size_t num_entries, BaseIndexingPolicy *ip)
AccessMapEntry(size_t num_entries, TaggedIndexingPolicy *ip)
: TaggedEntry(ip), states(num_entries, AM_INIT)
{
}

View File

@@ -1,4 +1,16 @@
/**
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved.
*
@@ -64,30 +76,14 @@ class AssociativeSet : public AssociativeCache<Entry>
AssociativeSet(const char *name, const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
typename Entry::IndexingPolicy *indexing_policy,
Entry const &init_val = Entry());
/**
* Find an entry within the set
* @param addr key element
* @param is_secure tag element
* @return returns a pointer to the wanted entry or nullptr if it does not
* exist.
*/
Entry* findEntry(Addr addr, bool is_secure) const;
/**
* Indicate that an entry has just been inserted
* @param addr key of the container
* @param is_secure tag component of the container
* @param entry pointer to the container entry to be inserted
*/
void insertEntry(Addr addr, bool is_secure, Entry* entry);
private:
// The following APIs are excluded since they lack the secure bit
using AssociativeCache<Entry>::findEntry;
using AssociativeCache<Entry>::insertEntry;
using AssociativeCache<Entry>::getPossibleEntries;
using AssociativeCache<Entry>::replPolicy;
using AssociativeCache<Entry>::indexingPolicy;
};

View File

@@ -1,4 +1,16 @@
/**
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved.
*
@@ -40,38 +52,13 @@ AssociativeSet<Entry>::AssociativeSet(const char *name,
const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
typename Entry::IndexingPolicy *indexing_policy,
Entry const &init_val)
: AssociativeCache<Entry>(name, num_entries, associativity_,
repl_policy, indexing_policy, init_val)
{
}
template <class Entry>
Entry*
AssociativeSet<Entry>::findEntry(Addr addr, bool is_secure) const
{
Addr tag = indexingPolicy->extractTag(addr);
auto candidates = indexingPolicy->getPossibleEntries(addr);
for (auto candidate : candidates) {
Entry* entry = static_cast<Entry*>(candidate);
if (entry->matchTag(tag, is_secure)) {
return entry;
}
}
return nullptr;
}
template<class Entry>
void
AssociativeSet<Entry>::insertEntry(Addr addr, bool is_secure, Entry* entry)
{
entry->insert(indexingPolicy->extractTag(addr), is_secure);
replPolicy->reset(entry->replacementData);
}
} // namespace gem5
#endif//__CACHE_PREFETCH_ASSOCIATIVE_SET_IMPL_HH__

View File

@@ -87,7 +87,8 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
}
} else {
// if misses are not being tracked, attempt to detect stream accesses
PrefetchTableEntry *pt_entry = prefetchTable.findEntry(pc, is_secure);
const PrefetchTableEntry::KeyType key{pc, is_secure};
PrefetchTableEntry *pt_entry = prefetchTable.findEntry(key);
if (pt_entry != nullptr) {
prefetchTable.accessEntry(pt_entry);
@@ -159,9 +160,9 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
}
}
} else {
pt_entry = prefetchTable.findVictim(pc);
pt_entry = prefetchTable.findVictim(key);
assert(pt_entry != nullptr);
prefetchTable.insertEntry(pc, pt_entry-> secure, pt_entry);
prefetchTable.insertEntry(key, pt_entry);
pt_entry->address = addr;
pt_entry->secure = is_secure;
}
@@ -174,7 +175,8 @@ IndirectMemory::allocateOrUpdateIPDEntry(
{
// The address of the pt_entry is used to index the IPD
Addr ipd_entry_addr = (Addr) pt_entry;
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(ipd_entry_addr);
const IndirectPatternDetectorEntry::KeyType key{ipd_entry_addr, false};
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(key);
if (ipd_entry != nullptr) {
ipd.accessEntry(ipd_entry);
if (!ipd_entry->secondIndexSet) {
@@ -189,9 +191,9 @@ IndirectMemory::allocateOrUpdateIPDEntry(
ipdEntryTrackingMisses = nullptr;
}
} else {
ipd_entry = ipd.findVictim(ipd_entry_addr);
ipd_entry = ipd.findVictim(key);
assert(ipd_entry != nullptr);
ipd.insertEntry(ipd_entry_addr, ipd_entry);
ipd.insertEntry(key, ipd_entry);
ipd_entry->idx1 = index;
ipdEntryTrackingMisses = ipd_entry;
}

View File

@@ -99,7 +99,7 @@ class IndirectMemory : public Queued
bool increasedIndirectCounter;
PrefetchTableEntry(unsigned indirect_counter_bits,
BaseIndexingPolicy *ip)
TaggedIndexingPolicy *ip)
: TaggedEntry(ip), address(0), secure(false), streamCounter(0),
enabled(false), index(0), baseAddr(0), shift(0),
indirectCounter(indirect_counter_bits),
@@ -144,7 +144,7 @@ class IndirectMemory : public Queued
IndirectPatternDetectorEntry(unsigned int num_addresses,
unsigned int num_shifts,
BaseIndexingPolicy *ip)
TaggedIndexingPolicy *ip)
: TaggedEntry(ip), idx1(0), idx2(0), secondIndexSet(false),
numMisses(0),
baseAddr(num_addresses, std::vector<Addr>(num_shifts))

View File

@@ -85,7 +85,8 @@ IrregularStreamBuffer::calculatePrefetch(const PrefetchInfo &pfi,
// Training, if the entry exists, then we found a correlation between
// the entry lastAddress (named as correlated_addr_A) and the address of
// the current access (named as correlated_addr_B)
TrainingUnitEntry *entry = trainingUnit.findEntry(pc, is_secure);
const TrainingUnitEntry::KeyType key{pc, is_secure};
TrainingUnitEntry *entry = trainingUnit.findEntry(key);
bool correlated_addr_found = false;
Addr correlated_addr_A = 0;
Addr correlated_addr_B = 0;
@@ -95,10 +96,10 @@ IrregularStreamBuffer::calculatePrefetch(const PrefetchInfo &pfi,
correlated_addr_A = entry->lastAddress;
correlated_addr_B = addr;
} else {
entry = trainingUnit.findVictim(pc);
entry = trainingUnit.findVictim(key);
assert(entry != nullptr);
trainingUnit.insertEntry(pc, is_secure, entry);
trainingUnit.insertEntry(key, entry);
}
// Update the entry
entry->lastAddress = addr;
@@ -149,15 +150,15 @@ IrregularStreamBuffer::calculatePrefetch(const PrefetchInfo &pfi,
// (given the structured address S, prefetch S+1, S+2, .. up to S+degree)
Addr amc_address = addr / prefetchCandidatesPerEntry;
Addr map_index = addr % prefetchCandidatesPerEntry;
AddressMappingEntry *ps_am = psAddressMappingCache.findEntry(amc_address,
is_secure);
AddressMappingEntry *ps_am = psAddressMappingCache.findEntry(
{amc_address, is_secure});
if (ps_am != nullptr) {
AddressMapping &mapping = ps_am->mappings[map_index];
if (mapping.counter > 0) {
Addr sp_address = mapping.address / prefetchCandidatesPerEntry;
Addr sp_index = mapping.address % prefetchCandidatesPerEntry;
AddressMappingEntry *sp_am =
spAddressMappingCache.findEntry(sp_address, is_secure);
spAddressMappingCache.findEntry({sp_address, is_secure});
if (sp_am == nullptr) {
// The entry has been evicted, can not generate prefetches
return;
@@ -183,15 +184,15 @@ IrregularStreamBuffer::getPSMapping(Addr paddr, bool is_secure)
Addr amc_address = paddr / prefetchCandidatesPerEntry;
Addr map_index = paddr % prefetchCandidatesPerEntry;
AddressMappingEntry *ps_entry =
psAddressMappingCache.findEntry(amc_address, is_secure);
psAddressMappingCache.findEntry({amc_address, is_secure});
if (ps_entry != nullptr) {
// A PS-AMC line already exists
psAddressMappingCache.accessEntry(ps_entry);
} else {
ps_entry = psAddressMappingCache.findVictim(amc_address);
ps_entry = psAddressMappingCache.findVictim({amc_address, is_secure});
assert(ps_entry != nullptr);
psAddressMappingCache.insertEntry(amc_address, is_secure, ps_entry);
psAddressMappingCache.insertEntry({amc_address, is_secure}, ps_entry);
}
return ps_entry->mappings[map_index];
}
@@ -203,14 +204,14 @@ IrregularStreamBuffer::addStructuralToPhysicalEntry(
Addr amc_address = structural_address / prefetchCandidatesPerEntry;
Addr map_index = structural_address % prefetchCandidatesPerEntry;
AddressMappingEntry *sp_entry =
spAddressMappingCache.findEntry(amc_address, is_secure);
spAddressMappingCache.findEntry({amc_address, is_secure});
if (sp_entry != nullptr) {
spAddressMappingCache.accessEntry(sp_entry);
} else {
sp_entry = spAddressMappingCache.findVictim(amc_address);
sp_entry = spAddressMappingCache.findVictim({amc_address, is_secure});
assert(sp_entry != nullptr);
spAddressMappingCache.insertEntry(amc_address, is_secure, sp_entry);
spAddressMappingCache.insertEntry({amc_address, is_secure}, sp_entry);
}
AddressMapping &mapping = sp_entry->mappings[map_index];
mapping.address = physical_address;

View File

@@ -66,7 +66,7 @@ class IrregularStreamBuffer : public Queued
*/
struct TrainingUnitEntry : public TaggedEntry
{
TrainingUnitEntry(BaseIndexingPolicy *ip)
TrainingUnitEntry(TaggedIndexingPolicy *ip)
: TaggedEntry(ip), lastAddress(0), lastAddressSecure(false)
{}
Addr lastAddress;
@@ -92,7 +92,7 @@ class IrregularStreamBuffer : public Queued
{
std::vector<AddressMapping> mappings;
AddressMappingEntry(size_t num_mappings, unsigned counter_bits,
BaseIndexingPolicy *ip)
TaggedIndexingPolicy *ip)
: TaggedEntry(ip), mappings(num_mappings, counter_bits)
{
}

View File

@@ -177,16 +177,15 @@ PIF::notifyRetiredInst(const Addr pc)
// Insert the spatial entry into the history buffer and update
// the 'iterator' table to point to the new entry
historyBuffer.push_back(spatialCompactor);
constexpr bool is_secure = false;
auto idx_entry = index.findEntry(spatialCompactor.trigger,
is_secure);
const IndexEntry::KeyType key{spatialCompactor.trigger, false};
auto idx_entry = index.findEntry(key);
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);
} else {
idx_entry = index.findVictim(spatialCompactor.trigger);
idx_entry = index.findVictim(key);
assert(idx_entry != nullptr);
index.insertEntry(spatialCompactor.trigger, is_secure,
idx_entry);
index.insertEntry(key, idx_entry);
}
idx_entry->historyIt =
historyBuffer.getIterator(historyBuffer.tail());
@@ -223,7 +222,7 @@ PIF::calculatePrefetch(const PrefetchInfo &pfi,
// Check if a valid entry in the 'index' table is found and allocate a new
// active prediction stream
IndexEntry *idx_entry = index.findEntry(pc, is_secure);
IndexEntry *idx_entry = index.findEntry({pc, is_secure});
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);

View File

@@ -171,20 +171,21 @@ SignaturePath::getSignatureEntry(Addr ppn, bool is_secure,
stride_t block, bool &miss, stride_t &stride,
double &initial_confidence)
{
SignatureEntry* signature_entry = signatureTable.findEntry(ppn, is_secure);
const SignatureEntry::KeyType key{ppn, is_secure};
SignatureEntry* signature_entry = signatureTable.findEntry(key);
if (signature_entry != nullptr) {
signatureTable.accessEntry(signature_entry);
miss = false;
stride = block - signature_entry->lastBlock;
} else {
signature_entry = signatureTable.findVictim(ppn);
signature_entry = signatureTable.findVictim(key);
assert(signature_entry != nullptr);
// Sets signature_entry->signature, initial_confidence, and stride
handleSignatureTableMiss(block, signature_entry->signature,
initial_confidence, stride);
signatureTable.insertEntry(ppn, is_secure, signature_entry);
signatureTable.insertEntry(key, signature_entry);
miss = true;
}
signature_entry->lastBlock = block;
@@ -194,17 +195,17 @@ SignaturePath::getSignatureEntry(Addr ppn, bool is_secure,
SignaturePath::PatternEntry &
SignaturePath::getPatternEntry(Addr signature)
{
constexpr bool is_secure = false;
PatternEntry* pattern_entry = patternTable.findEntry(signature, is_secure);
const PatternEntry::KeyType key{signature, false};
PatternEntry* pattern_entry = patternTable.findEntry(key);
if (pattern_entry != nullptr) {
// Signature found
patternTable.accessEntry(pattern_entry);
} else {
// Signature not found
pattern_entry = patternTable.findVictim(signature);
pattern_entry = patternTable.findVictim(key);
assert(pattern_entry != nullptr);
patternTable.insertEntry(signature, is_secure, pattern_entry);
patternTable.insertEntry(key, pattern_entry);
}
return *pattern_entry;
}
@@ -280,7 +281,7 @@ SignaturePath::calculatePrefetch(const PrefetchInfo &pfi,
// confidence, these are prefetch candidates
// - select the entry with the highest counter as the "lookahead"
PatternEntry *current_pattern_entry =
patternTable.findEntry(current_signature, is_secure);
patternTable.findEntry({current_signature, is_secure});
PatternStrideEntry const *lookahead = nullptr;
if (current_pattern_entry != nullptr) {
unsigned long max_counter = 0;

View File

@@ -79,7 +79,7 @@ class SignaturePath : public Queued
signature_t signature;
/** Last accessed block within a page */
stride_t lastBlock;
SignatureEntry(BaseIndexingPolicy *ip)
SignatureEntry(TaggedIndexingPolicy *ip)
: TaggedEntry(ip), signature(0), lastBlock(0)
{}
};
@@ -104,7 +104,7 @@ class SignaturePath : public Queued
/** use counter, used by SPPv2 */
SatCounter8 counter;
PatternEntry(size_t num_strides, unsigned counter_bits,
BaseIndexingPolicy *ip)
TaggedIndexingPolicy *ip)
: TaggedEntry(ip), strideEntries(num_strides, counter_bits),
counter(counter_bits)
{

View File

@@ -132,11 +132,11 @@ SignaturePathV2::handlePageCrossingLookahead(signature_t signature,
{
// Always use the replacement policy to assign new entries, as all
// of them are unique, there are never "hits" in the GHR
GlobalHistoryEntry *gh_entry = globalHistoryRegister.findVictim(0);
const GlobalHistoryEntry::KeyType key{0, false};
GlobalHistoryEntry *gh_entry = globalHistoryRegister.findVictim(key);
assert(gh_entry != nullptr);
// Any address value works, as it is never used
constexpr bool is_secure = false;
globalHistoryRegister.insertEntry(0, is_secure, gh_entry);
globalHistoryRegister.insertEntry(key, gh_entry);
gh_entry->signature = signature;
gh_entry->lastBlock = last_offset;

View File

@@ -62,7 +62,7 @@ class SignaturePathV2 : public SignaturePath
double confidence;
stride_t lastBlock;
stride_t delta;
GlobalHistoryEntry(BaseIndexingPolicy *ip)
GlobalHistoryEntry(TaggedIndexingPolicy *ip)
: TaggedEntry(ip), signature(0), confidence(0.0), lastBlock(0),
delta(0)
{}

View File

@@ -93,16 +93,14 @@ STeMS::checkForActiveGenerationsEnd(const CacheAccessor &cache)
}
}
if (generation_ended) {
const ActiveGenerationTableEntry::KeyType key{pst_addr, false};
// PST is indexed using the PC (secure bit is unused)
constexpr bool is_secure = false;
auto pst_entry = patternSequenceTable.findEntry(pst_addr,
is_secure);
auto pst_entry = patternSequenceTable.findEntry(key);
if (pst_entry == nullptr) {
// Tipically an entry will not exist
pst_entry = patternSequenceTable.findVictim(pst_addr);
pst_entry = patternSequenceTable.findVictim(key);
assert(pst_entry != nullptr);
patternSequenceTable.insertEntry(pst_addr, is_secure,
pst_entry);
patternSequenceTable.insertEntry(key, pst_entry);
} else {
patternSequenceTable.accessEntry(pst_entry);
}
@@ -159,8 +157,9 @@ STeMS::calculatePrefetch(const PrefetchInfo &pfi,
// Check if any active generation has ended
checkForActiveGenerationsEnd(cache);
const ActiveGenerationTableEntry::KeyType key{sr_addr, is_secure};
ActiveGenerationTableEntry *agt_entry =
activeGenerationTable.findEntry(sr_addr, is_secure);
activeGenerationTable.findEntry(key);
if (agt_entry != nullptr) {
// found an entry in the AGT, entry is currently being recorded,
// add the offset
@@ -177,9 +176,9 @@ STeMS::calculatePrefetch(const PrefetchInfo &pfi,
lastTriggerCounter = 0;
// allocate a new AGT entry
agt_entry = activeGenerationTable.findVictim(sr_addr);
agt_entry = activeGenerationTable.findVictim(key);
assert(agt_entry != nullptr);
activeGenerationTable.insertEntry(sr_addr, is_secure, agt_entry);
activeGenerationTable.insertEntry(key, agt_entry);
agt_entry->pc = pc;
agt_entry->paddress = paddr;
agt_entry->addOffset(sr_offset);
@@ -224,11 +223,10 @@ STeMS::reconstructSequence(
// Now query the PST with the PC of each RMOB entry
idx = 0;
constexpr bool is_secure = false;
for (auto it = rmob_it; it != rmob.end() && (idx < reconstructionEntries);
it++) {
auto pst_entry = patternSequenceTable.findEntry(it->pstAddress,
is_secure);
auto pst_entry = patternSequenceTable.findEntry(
{it->pstAddress, false});
if (pst_entry != nullptr) {
patternSequenceTable.accessEntry(pst_entry);
for (auto &seq_entry : pst_entry->sequence) {

View File

@@ -94,7 +94,7 @@ class STeMS : public Queued
std::vector<SequenceEntry> sequence;
ActiveGenerationTableEntry(int num_positions,
BaseIndexingPolicy *ip)
TaggedIndexingPolicy *ip)
: TaggedEntry(ip), paddress(0), pc(0),
seqCounter(0), sequence(num_positions)
{

View File

@@ -64,7 +64,7 @@ namespace prefetch
{
Stride::StrideEntry::StrideEntry(const SatCounter8& init_confidence,
BaseIndexingPolicy *ip)
TaggedIndexingPolicy *ip)
: TaggedEntry(ip), confidence(init_confidence)
{
invalidate();
@@ -144,7 +144,8 @@ Stride::calculatePrefetch(const PrefetchInfo &pfi,
PCTable& pc_table = findTable(requestor_id);
// Search for entry in the pc table
StrideEntry *entry = pc_table.findEntry(pc, is_secure);
const StrideEntry::KeyType key{pc, is_secure};
StrideEntry *entry = pc_table.findEntry(key);
if (entry != nullptr) {
pc_table.accessEntry(entry);
@@ -198,17 +199,18 @@ Stride::calculatePrefetch(const PrefetchInfo &pfi,
DPRINTF(HWPrefetch, "Miss: PC %x pkt_addr %x (%s)\n", pc, pf_addr,
is_secure ? "s" : "ns");
StrideEntry* entry = pc_table.findVictim(pc);
StrideEntry* entry = pc_table.findVictim(key);
// Insert new entry's data
entry->lastAddr = pf_addr;
pc_table.insertEntry(pc, is_secure, entry);
pc_table.insertEntry(key, entry);
}
}
uint32_t
StridePrefetcherHashedSetAssociative::extractSet(const Addr pc) const
StridePrefetcherHashedSetAssociative::extractSet(const KeyType &key) const
{
const Addr pc = key.address;
const Addr hash1 = pc >> 1;
const Addr hash2 = hash1 >> tagShift;
return (hash1 ^ hash2) & setMask;

View File

@@ -76,16 +76,16 @@ namespace prefetch
* Override the default set associative to apply a specific hash function
* when extracting a set.
*/
class StridePrefetcherHashedSetAssociative : public SetAssociative
class StridePrefetcherHashedSetAssociative : public TaggedSetAssociative
{
protected:
uint32_t extractSet(const Addr addr) const override;
uint32_t extractSet(const KeyType &key) const override;
Addr extractTag(const Addr addr) const override;
public:
StridePrefetcherHashedSetAssociative(
const StridePrefetcherHashedSetAssociativeParams &p)
: SetAssociative(p)
: TaggedSetAssociative(p)
{
}
~StridePrefetcherHashedSetAssociative() = default;
@@ -120,11 +120,11 @@ class Stride : public Queued
const int assoc;
const int numEntries;
BaseIndexingPolicy* const indexingPolicy;
TaggedIndexingPolicy* const indexingPolicy;
replacement_policy::Base* const replacementPolicy;
PCTableInfo(int assoc, int num_entries,
BaseIndexingPolicy* indexing_policy,
TaggedIndexingPolicy* indexing_policy,
replacement_policy::Base* repl_policy)
: assoc(assoc), numEntries(num_entries),
indexingPolicy(indexing_policy), replacementPolicy(repl_policy)
@@ -136,7 +136,7 @@ class Stride : public Queued
struct StrideEntry : public TaggedEntry
{
StrideEntry(const SatCounter8& init_confidence,
BaseIndexingPolicy *ip);
TaggedIndexingPolicy *ip);
void invalidate() override;

View File

@@ -26,18 +26,28 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Import('*')
Import("*")
SimObject('Tags.py', sim_objects=[
'BaseTags', 'BaseSetAssoc', 'SectorTags', 'CompressedTags', 'FALRU'])
SimObject(
"Tags.py",
sim_objects=[
"BaseTags",
"BaseSetAssoc",
"SectorTags",
"CompressedTags",
"FALRU",
"TaggedIndexingPolicy",
"TaggedSetAssociative",
],
)
Source('base.cc')
Source('base_set_assoc.cc')
Source('compressed_tags.cc')
Source('dueling.cc')
Source('fa_lru.cc')
Source('sector_blk.cc')
Source('sector_tags.cc')
Source('super_blk.cc')
Source("base.cc")
Source("base_set_assoc.cc")
Source("compressed_tags.cc")
Source("dueling.cc")
Source("fa_lru.cc")
Source("sector_blk.cc")
Source("sector_tags.cc")
Source("super_blk.cc")
GTest('dueling.test', 'dueling.test.cc', 'dueling.cc')
GTest("dueling.test", "dueling.test.cc", "dueling.cc")

View File

@@ -39,6 +39,29 @@ from m5.params import *
from m5.proxy import *
class TaggedIndexingPolicy(SimObject):
type = "TaggedIndexingPolicy"
abstract = True
cxx_class = "gem5::IndexingPolicyTemplate<gem5::TaggedTypes>"
cxx_header = "mem/cache/tags/tagged_entry.hh"
cxx_template_params = ["class Types"]
# Get the size from the parent (cache)
size = Param.MemorySize(Parent.size, "capacity in bytes")
# Get the entry size from the parent (tags)
entry_size = Param.Int(Parent.entry_size, "entry size in bytes")
# Get the associativity
assoc = Param.Int(Parent.assoc, "associativity")
class TaggedSetAssociative(TaggedIndexingPolicy):
type = "TaggedSetAssociative"
cxx_class = "gem5::TaggedSetAssociative"
cxx_header = "mem/cache/tags/tagged_entry.hh"
class BaseTags(ClockedObject):
type = "BaseTags"
abstract = True
@@ -71,8 +94,8 @@ class BaseTags(ClockedObject):
)
# Get indexing policy
indexing_policy = Param.BaseIndexingPolicy(
SetAssociative(), "Indexing policy"
indexing_policy = Param.TaggedIndexingPolicy(
TaggedSetAssociative(), "Indexing policy"
)
partitioning_manager = Param.PartitionManager(

View File

@@ -79,19 +79,16 @@ BaseTags::findBlockBySetAndWay(int set, int way) const
}
CacheBlk*
BaseTags::findBlock(Addr addr, bool is_secure) const
BaseTags::findBlock(const CacheBlk::KeyType &key) const
{
// Extract block tag
Addr tag = extractTag(addr);
// Find possible entries that may contain the given address
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Search for block
for (const auto& location : entries) {
CacheBlk* blk = static_cast<CacheBlk*>(location);
if (blk->matchTag(tag, is_secure)) {
if (blk->match(key)) {
return blk;
}
}
@@ -116,7 +113,7 @@ BaseTags::insertBlock(const PacketPtr pkt, CacheBlk *blk)
// Insert block with tag, src requestor id, task id and PartitionId
const auto partition_id = partitionManager ?
partitionManager->readPacketPartitionID(pkt) : 0;
blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), requestor_id,
blk->insert({pkt->getAddr(), pkt->isSecure()}, requestor_id,
pkt->req->taskId(), partition_id);
// Check if cache warm up is done

View File

@@ -86,7 +86,7 @@ class BaseTags : public ClockedObject
System *system;
/** Indexing policy */
BaseIndexingPolicy *indexingPolicy;
TaggedIndexingPolicy *indexingPolicy;
/** Partitioning manager */
partitioning_policy::PartitionManager *partitionManager;
@@ -199,7 +199,7 @@ class BaseTags : public ClockedObject
* @param is_secure True if the target memory space is secure.
* @return Pointer to the cache block.
*/
virtual CacheBlk *findBlock(Addr addr, bool is_secure) const;
virtual CacheBlk *findBlock(const CacheBlk::KeyType &key) const;
/**
* Find a block given set and way.
@@ -282,7 +282,7 @@ class BaseTags : public ClockedObject
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
virtual CacheBlk* findVictim(Addr addr, const bool is_secure,
virtual CacheBlk* findVictim(const CacheBlk::KeyType &key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0) = 0;

View File

@@ -127,7 +127,7 @@ class BaseSetAssoc : public BaseTags
*/
CacheBlk* accessBlock(const PacketPtr pkt, Cycles &lat) override
{
CacheBlk *blk = findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = findBlock({pkt->getAddr(), pkt->isSecure()});
// Access all tags in parallel, hence one in each way. The data side
// either accesses all blocks in parallel, or one block sequentially on
@@ -167,14 +167,14 @@ class BaseSetAssoc : public BaseTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType& key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0) override
{
// Get possible entries to be victimized
std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Filter entries based on PartitionID
if (partitionManager) {
@@ -243,7 +243,7 @@ class BaseSetAssoc : public BaseTags
*/
Addr regenerateBlkAddr(const CacheBlk* blk) const override
{
return indexingPolicy->regenerateAddr(blk->getTag(), blk);
return indexingPolicy->regenerateAddr({blk->getTag(), false}, blk);
}
bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {

View File

@@ -115,14 +115,14 @@ CompressedTags::tagsInit()
}
CacheBlk*
CompressedTags::findVictim(Addr addr, const bool is_secure,
CompressedTags::findVictim(const CacheBlk::KeyType& key,
const std::size_t compressed_size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0)
{
// Get all possible locations of this superblock
std::vector<ReplaceableEntry*> superblock_entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Filter entries based on PartitionID
if (partitionManager){
@@ -132,13 +132,12 @@ CompressedTags::findVictim(Addr addr, const bool is_secure,
// Check if the superblock this address belongs to has been allocated. If
// so, try co-allocating
Addr tag = extractTag(addr);
SuperBlk* victim_superblock = nullptr;
bool is_co_allocation = false;
const uint64_t offset = extractSectorOffset(addr);
const uint64_t offset = extractSectorOffset(key.address);
for (const auto& entry : superblock_entries){
SuperBlk* superblock = static_cast<SuperBlk*>(entry);
if (superblock->matchTag(tag, is_secure) &&
if (superblock->match(key) &&
!superblock->blks[offset]->isValid() &&
superblock->isCompressed() &&
superblock->canCoAllocate(compressed_size))

View File

@@ -117,7 +117,7 @@ class CompressedTags : public SectorTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType& key,
const std::size_t compressed_size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id) override;

View File

@@ -145,7 +145,7 @@ FALRU::accessBlock(const PacketPtr pkt, Cycles &lat,
{
CachesMask mask = 0;
FALRUBlk* blk =
static_cast<FALRUBlk*>(findBlock(pkt->getAddr(), pkt->isSecure()));
static_cast<FALRUBlk*>(findBlock({pkt->getAddr(), pkt->isSecure()}));
// If a cache hit
if (blk && blk->isValid()) {
@@ -167,19 +167,20 @@ FALRU::accessBlock(const PacketPtr pkt, Cycles &lat,
}
CacheBlk*
FALRU::findBlock(Addr addr, bool is_secure) const
FALRU::findBlock(const CacheBlk::KeyType &lookup) const
{
FALRUBlk* blk = nullptr;
Addr tag = extractTag(addr);
auto iter = tagHash.find(std::make_pair(tag, is_secure));
Addr tag = extractTag(lookup.address);
auto key = std::make_pair(tag, lookup.secure);
auto iter = tagHash.find(key);
if (iter != tagHash.end()) {
blk = (*iter).second;
}
if (blk && blk->isValid()) {
assert(blk->getTag() == tag);
assert(blk->isSecure() == is_secure);
assert(blk->isSecure() == lookup.secure);
}
return blk;
@@ -193,7 +194,7 @@ FALRU::findBlockBySetAndWay(int set, int way) const
}
CacheBlk*
FALRU::findVictim(Addr addr, const bool is_secure, const std::size_t size,
FALRU::findVictim(const CacheBlk::KeyType& key, const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id)
{

View File

@@ -198,7 +198,7 @@ class FALRU : public BaseTags
* @param asid The address space ID.
* @return Pointer to the cache block.
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
CacheBlk* findBlock(const CacheBlk::KeyType &lookup) const override;
/**
* Find a block given set and way.
@@ -220,7 +220,7 @@ class FALRU : public BaseTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType& key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id=0) override;

View File

@@ -83,18 +83,18 @@ SectorSubBlk::setValid()
}
void
SectorSubBlk::insert(const Addr tag, const bool is_secure)
SectorSubBlk::insert(const KeyType &tag)
{
// Make sure it is not overwriting another sector
panic_if(_sectorBlk && _sectorBlk->isValid() &&
!_sectorBlk->matchTag(tag, is_secure), "Overwriting valid sector!");
!_sectorBlk->match(tag), "Overwriting valid sector!");
// If the sector is not valid, insert the new tag. The sector block
// handles its own tag's invalidation, so do not attempt to insert MaxAddr.
if ((_sectorBlk && !_sectorBlk->isValid()) && (tag != MaxAddr)) {
_sectorBlk->insert(tag, is_secure);
if ((_sectorBlk && !_sectorBlk->isValid()) && (tag.address != MaxAddr)) {
_sectorBlk->insert(tag);
}
CacheBlk::insert(tag, is_secure);
CacheBlk::insert(tag);
}
void
@@ -112,7 +112,7 @@ SectorSubBlk::print() const
}
SectorBlk::SectorBlk()
: TaggedEntry(), _validCounter(0)
: TaggedEntry(nullptr), _validCounter(0)
{
}

View File

@@ -113,7 +113,7 @@ class SectorSubBlk : public CacheBlk
*/
void setValid() override;
void insert(const Addr tag, const bool is_secure) override;
void insert(const KeyType &tag) override;
/**
* Invalidate the block and inform sector block.

View File

@@ -156,7 +156,7 @@ SectorTags::invalidate(CacheBlk *blk)
CacheBlk*
SectorTags::accessBlock(const PacketPtr pkt, Cycles &lat)
{
CacheBlk *blk = findBlock(pkt->getAddr(), pkt->isSecure());
CacheBlk *blk = findBlock({pkt->getAddr(), pkt->isSecure()});
// Access all tags in parallel, hence one in each way. The data side
// either accesses all blocks in parallel, or one block sequentially on
@@ -262,23 +262,20 @@ SectorTags::moveBlock(CacheBlk *src_blk, CacheBlk *dest_blk)
}
CacheBlk*
SectorTags::findBlock(Addr addr, bool is_secure) const
SectorTags::findBlock(const CacheBlk::KeyType &key) const
{
// Extract sector tag
const Addr tag = extractTag(addr);
// The address can only be mapped to a specific location of a sector
// due to sectors being composed of contiguous-address entries
const Addr offset = extractSectorOffset(addr);
const Addr offset = extractSectorOffset(key.address);
// Find all possible sector entries that may contain the given address
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Search for block
for (const auto& sector : entries) {
auto blk = static_cast<SectorBlk*>(sector)->blks[offset];
if (blk->matchTag(tag, is_secure)) {
if (blk->match(key)) {
return blk;
}
}
@@ -288,24 +285,24 @@ SectorTags::findBlock(Addr addr, bool is_secure) const
}
CacheBlk*
SectorTags::findVictim(Addr addr, const bool is_secure, const std::size_t size,
SectorTags::findVictim(const CacheBlk::KeyType &key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id)
{
// Get possible entries to be victimized
std::vector<ReplaceableEntry*> sector_entries =
indexingPolicy->getPossibleEntries(addr);
indexingPolicy->getPossibleEntries(key);
// Filter entries based on PartitionID
if (partitionManager)
partitionManager->filterByPartition(sector_entries, partition_id);
// Check if the sector this address belongs to has been allocated
Addr tag = extractTag(addr);
SectorBlk* victim_sector = nullptr;
for (const auto& sector : sector_entries) {
SectorBlk* sector_blk = static_cast<SectorBlk*>(sector);
if (sector_blk->matchTag(tag, is_secure)) {
if (sector_blk->match(key)) {
victim_sector = sector_blk;
break;
}
@@ -325,11 +322,12 @@ SectorTags::findVictim(Addr addr, const bool is_secure, const std::size_t size,
}
// Get the entry of the victim block within the sector
SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
SectorSubBlk* victim = victim_sector->blks[
extractSectorOffset(key.address)];
// Get evicted blocks. Blocks are only evicted if the sectors mismatch and
// the currently existing sector is valid.
if (victim_sector->matchTag(tag, is_secure)) {
if (victim_sector->match(key)) {
// It would be a hit if victim was valid, and upgrades do not call
// findVictim, so it cannot happen
assert(!victim->isValid());
@@ -360,7 +358,8 @@ SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
const SectorBlk* sec_blk = blk_cast->getSectorBlock();
const Addr sec_addr =
indexingPolicy->regenerateAddr(blk->getTag(), sec_blk);
indexingPolicy->regenerateAddr(
{blk->getTag(), blk->isSecure()}, sec_blk);
return sec_addr | ((Addr)blk_cast->getSectorOffset() << sectorShift);
}

View File

@@ -173,7 +173,7 @@ class SectorTags : public BaseTags
* @param is_secure True if the target memory space is secure.
* @return Pointer to the cache block if found.
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
CacheBlk* findBlock(const CacheBlk::KeyType &key) const override;
/**
* Find replacement victim based on address.
@@ -185,7 +185,7 @@ class SectorTags : public BaseTags
* @param partition_id Partition ID for resource management.
* @return Cache block to be replaced.
*/
CacheBlk* findVictim(Addr addr, const bool is_secure,
CacheBlk* findVictim(const CacheBlk::KeyType &key,
const std::size_t size,
std::vector<CacheBlk*>& evict_blks,
const uint64_t partition_id) override;

View File

@@ -1,4 +1,16 @@
/**
* Copyright (c) 2024 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2020 Inria
* All rights reserved.
*
@@ -31,28 +43,88 @@
#include <cassert>
#include "base/cache/cache_entry.hh"
#include "base/cprintf.hh"
#include "base/logging.hh"
#include "base/types.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/TaggedIndexingPolicy.hh"
#include "params/TaggedSetAssociative.hh"
namespace gem5
{
class TaggedTypes
{
public:
struct KeyType
{
Addr address;
bool secure;
};
using Params = TaggedIndexingPolicyParams;
};
using TaggedIndexingPolicy = IndexingPolicyTemplate<TaggedTypes>;
template class IndexingPolicyTemplate<TaggedTypes>;
/**
* This version of set associative indexing deals with
* a Lookup structure made of address and secure bit.
* It extracts the address but discards the secure bit which
* is used for tagging only
*/
class TaggedSetAssociative : public TaggedIndexingPolicy
{
protected:
virtual uint32_t
extractSet(const KeyType &key) const
{
return (key.address >> setShift) & setMask;
}
public:
PARAMS(TaggedSetAssociative);
TaggedSetAssociative(const Params &p)
: TaggedIndexingPolicy(p)
{}
std::vector<ReplaceableEntry*>
getPossibleEntries(const KeyType &key) const override
{
return sets[extractSet(key)];
}
Addr
regenerateAddr(const KeyType &key,
const ReplaceableEntry *entry) const override
{
return (key.address << tagShift) | (entry->getSet() << setShift);
}
};
/**
* A tagged entry is an entry containing a tag. Each tag is accompanied by a
* secure bit, which informs whether it belongs to a secure address space.
* A tagged entry's contents are only relevant if it is marked as valid.
*/
class TaggedEntry : public CacheEntry
class TaggedEntry : public ReplaceableEntry
{
public:
TaggedEntry(BaseIndexingPolicy *ip=nullptr)
: CacheEntry(ip), _secure(false)
using KeyType = TaggedTypes::KeyType;
using IndexingPolicy = TaggedIndexingPolicy;
TaggedEntry(IndexingPolicy *ip)
: _valid(false), _secure(false), _tag(MaxAddr), indexingPolicy(ip)
{}
~TaggedEntry() = default;
/**
* Checks if the entry is valid.
*
* @return True if the entry is valid.
*/
virtual bool isValid() const { return _valid; }
/**
* Check if this block holds data from the secure memory space.
*
@@ -60,6 +132,13 @@ class TaggedEntry : public CacheEntry
*/
bool isSecure() const { return _secure; }
/**
* Get tag associated to this block.
*
* @return The tag value.
*/
virtual Addr getTag() const { return _tag; }
/**
* Checks if the given tag information corresponds to this entry's.
*
@@ -67,10 +146,11 @@ class TaggedEntry : public CacheEntry
* @param is_secure Whether secure bit is set.
* @return True if the tag information match this entry's.
*/
virtual bool
matchTag(Addr tag, bool is_secure) const
bool
match(const KeyType &key) const
{
return isValid() && (getTag() == tag) && (isSecure() == is_secure);
return isValid() && (getTag() == extractTag(key.address)) &&
(isSecure() == key.secure);
}
/**
@@ -80,23 +160,29 @@ class TaggedEntry : public CacheEntry
* @param tag The tag value.
*/
virtual void
insert(const Addr tag, const bool is_secure)
insert(const KeyType &key)
{
setValid();
setTag(tag);
if (is_secure) {
setTag(extractTag(key.address));
if (key.secure) {
setSecure();
}
}
/** Invalidate the block. Its contents are no longer valid. */
void
invalidate() override
virtual void invalidate()
{
CacheEntry::invalidate();
_valid = false;
setTag(MaxAddr);
clearSecure();
}
void
setIndexingPolicy(IndexingPolicy *ip)
{
indexingPolicy = ip;
}
std::string
print() const override
{
@@ -104,36 +190,53 @@ class TaggedEntry : public CacheEntry
isSecure(), isValid(), ReplaceableEntry::print());
}
bool
match(const Addr tag) const override
{
panic("Need is_secure arg");
return false;
}
void
insert(const Addr tag) override
{
panic("Need is_secure arg");
return;
}
protected:
/**
* Set tag associated to this block.
*
* @param tag The tag value.
*/
virtual void setTag(Addr tag) { _tag = tag; }
/** Set secure bit. */
virtual void setSecure() { _secure = true; }
Addr
extractTag(Addr addr) const
{
return indexingPolicy->extractTag(addr);
}
/** Set valid bit. The block must be invalid beforehand. */
virtual void
setValid()
{
assert(!isValid());
_valid = true;
}
private:
/**
* Valid bit. The contents of this entry are only valid if this bit is set.
* @sa invalidate()
* @sa insert()
*/
bool _valid;
/**
* Secure bit. Marks whether this entry refers to an address in the secure
* memory space. Must always be modified along with the tag.
*/
bool _secure;
/** The entry's tag. */
Addr _tag;
/** Reference to the indexing policy */
IndexingPolicy *indexingPolicy;
/** Clear secure bit. Should be only used by the invalidation function. */
void clearSecure() { _secure = false; }
/** Do not use API without is_secure flag. */
using CacheEntry::match;
using CacheEntry::insert;
};
} // namespace gem5