Add a generic cache template library (#745)

Add a generic cache template to construct internal storage structures.
Also add some example use cases by converting the prefetcher tables to
use this new library.
This commit is contained in:
Giacomo Travaglini
2024-04-11 08:00:34 +01:00
committed by GitHub
23 changed files with 625 additions and 286 deletions

325
src/base/cache/associative_cache.hh vendored Normal file
View File

@@ -0,0 +1,325 @@
/*
* Copyright (c) 2024 Pranith Kumar
* Copyright (c) 2018 Metempsy Technology Consulting
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BASE_CACHE_ASSOCIATIVE_CACHE_HH__
#define __BASE_CACHE_ASSOCIATIVE_CACHE_HH__
#include <type_traits>
#include <vector>
#include "base/cache/cache_entry.hh"
#include "base/intmath.hh"
#include "base/logging.hh"
#include "base/named.hh"
#include "base/types.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
namespace gem5
{
template <typename Entry>
class AssociativeCache : public Named
{
static_assert(std::is_base_of_v<CacheEntry, Entry>,
"Entry should be derived from CacheEntry");
typedef replacement_policy::Base BaseReplacementPolicy;
protected:
/** Associativity of the cache. */
size_t associativity;
/** The replacement policy of the cache. */
BaseReplacementPolicy *replPolicy;
/** Indexing policy of the cache */
BaseIndexingPolicy *indexingPolicy;
/** The entries */
std::vector<Entry> entries;
private:
void
initParams(size_t _num_entries, size_t _assoc)
{
fatal_if((_num_entries % _assoc) != 0, "The number of entries of an "
"AssociativeCache<> must be a multiple of its associativity");
for (auto entry_idx = 0; entry_idx < _num_entries; entry_idx++) {
Entry *entry = &entries[entry_idx];
indexingPolicy->setEntry(entry, entry_idx);
entry->replacementData = replPolicy->instantiateEntry();
}
}
public:
/**
* Empty constructor - need to call init() later with all args
*/
AssociativeCache(const char *name) : Named(std::string(name)) {}
/**
* Public constructor
* @param name Name of the cache
* @param num_entries total number of entries of the container, the number
* of sets can be calculated dividing this balue by the 'assoc' value
* @param associativity number of elements in each associative set
* @param repl_policy replacement policy
* @param indexing_policy indexing policy
*/
AssociativeCache(const char *name, const size_t num_entries,
const size_t associativity_,
BaseReplacementPolicy *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val = Entry())
: Named(std::string(name)),
associativity(associativity_),
replPolicy(repl_policy),
indexingPolicy(indexing_policy),
entries(num_entries, init_val)
{
initParams(num_entries, associativity);
}
/**
* Default destructor
*/
~AssociativeCache() = default;
/**
* Disable copy and assignment
*/
AssociativeCache(const AssociativeCache&) = delete;
AssociativeCache& operator=(const AssociativeCache&) = delete;
/**
* Clear the entries in the cache.
*/
void
clear()
{
for (auto &entry : entries) {
invalidate(&entry);
}
}
void
init(const size_t num_entries,
const size_t associativity_,
BaseReplacementPolicy *_repl_policy,
BaseIndexingPolicy *_indexing_policy,
Entry const &init_val = Entry())
{
associativity = associativity_;
replPolicy = _repl_policy;
indexingPolicy = _indexing_policy;
entries.resize(num_entries, init_val);
initParams(num_entries, associativity);
}
/**
* Get the tag for the addr
* @param addr Addr to get the tag for
* @return Tag for the address
*/
virtual Addr
getTag(const Addr addr) const
{
return indexingPolicy->extractTag(addr);
}
/**
* Do an access to the entry if it exists.
* This is required to update the replacement information data.
* @param addr key to the entry
* @return The entry if it exists
*/
virtual Entry*
accessEntryByAddr(const Addr addr)
{
auto entry = findEntry(addr);
if (entry) {
accessEntry(entry);
}
return entry;
}
/**
* Update the replacement information for an entry
* @param Entry to access and upate
*/
virtual void
accessEntry(Entry *entry)
{
replPolicy->touch(entry->replacementData);
}
/**
* Find an entry within the set
* @param addr key element
* @return returns a pointer to the wanted entry or nullptr if it does not
* exist.
*/
virtual Entry*
findEntry(const Addr addr) const
{
auto tag = getTag(addr);
auto candidates = indexingPolicy->getPossibleEntries(addr);
for (auto candidate : candidates) {
Entry *entry = static_cast<Entry*>(candidate);
if (entry->matchTag(tag)) {
return entry;
}
}
return nullptr;
}
/**
* Find a victim to be replaced
* @param addr key to select the possible victim
* @result entry to be victimized
*/
virtual Entry*
findVictim(const Addr addr)
{
auto candidates = indexingPolicy->getPossibleEntries(addr);
auto victim = static_cast<Entry*>(replPolicy->getVictim(candidates));
invalidate(victim);
return victim;
}
/**
* Invalidate an entry and its respective replacement data.
*
* @param entry Entry to be invalidated.
*/
virtual void
invalidate(Entry *entry)
{
entry->invalidate();
replPolicy->invalidate(entry->replacementData);
}
/**
* Indicate that an entry has just been inserted
* @param addr key of the container
* @param entry pointer to the container entry to be inserted
*/
virtual void
insertEntry(const Addr addr, Entry *entry)
{
entry->insert(indexingPolicy->extractTag(addr));
replPolicy->reset(entry->replacementData);
}
/**
* Find the set of entries that could be replaced given
* that we want to add a new entry with the provided key
* @param addr key to select the set of entries
* @result vector of candidates matching with the provided key
*/
std::vector<Entry *>
getPossibleEntries(const Addr addr) const
{
std::vector<ReplaceableEntry *> selected_entries =
indexingPolicy->getPossibleEntries(addr);
std::vector<Entry *> entries;
std::transform(selected_entries.begin(), selected_entries.end(),
std::back_inserter(entries), [](auto &entry) {
return static_cast<Entry *>(entry);
});
return entries;
}
/** Iterator types */
using const_iterator = typename std::vector<Entry>::const_iterator;
using iterator = typename std::vector<Entry>::iterator;
/**
* Returns an iterator to the first entry of the dictionary
* @result iterator to the first element
*/
iterator
begin()
{
return entries.begin();
}
/**
* Returns an iterator pointing to the end of the the dictionary
* (placeholder element, should not be accessed)
* @result iterator to the end element
*/
iterator
end()
{
return entries.end();
}
/**
* Returns an iterator to the first entry of the dictionary
* @result iterator to the first element
*/
const_iterator
begin() const
{
return entries.begin();
}
/**
* Returns an iterator pointing to the end of the the dictionary
* (placeholder element, should not be accessed)
* @result iterator to the end element
*/
const_iterator
end() const
{
return entries.end();
}
};
}
#endif

136
src/base/cache/cache_entry.hh vendored Normal file
View File

@@ -0,0 +1,136 @@
/**
* Copyright (c) 2024 - Pranith Kumar
* Copyright (c) 2020 Inria
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BASE_CACHE_CACHE_ENTRY_HH__
#define __BASE_CACHE_CACHE_ENTRY_HH__
#include <cassert>
#include "base/cprintf.hh"
#include "base/types.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
namespace gem5
{
/**
* A CacheEntry is an entry containing a tag. A tagged entry's contents
* are only relevant if it is marked as valid.
*/
class CacheEntry : public ReplaceableEntry
{
public:
CacheEntry() = default;
~CacheEntry() = default;
/**
* Checks if the entry is valid.
*
* @return True if the entry is valid.
*/
virtual bool isValid() const { return valid; }
/**
* Get tag associated to this block.
*
* @return The tag value.
*/
virtual Addr getTag() const { return tag; }
/**
* Checks if the given tag information corresponds to this entry's.
*
* @param tag The tag value to compare to.
* @return True if the tag information match this entry's.
*/
virtual bool
matchTag(const Addr tag) const
{
return isValid() && (getTag() == tag);
}
/**
* Insert the block by assigning it a tag and marking it valid. Touches
* block if it hadn't been touched previously.
*
* @param tag The tag value.
*/
virtual void
insert(const Addr tag)
{
setValid();
setTag(tag);
}
/** Invalidate the block. Its contents are no longer valid. */
virtual void
invalidate()
{
valid = false;
setTag(MaxAddr);
}
std::string
print() const override
{
return csprintf("tag: %#x valid: %d | %s", getTag(),
isValid(), ReplaceableEntry::print());
}
protected:
/**
* Set tag associated to this block.
*
* @param tag The tag value.
*/
virtual void setTag(Addr _tag) { tag = _tag; }
/** Set valid bit. The block must be invalid beforehand. */
virtual void
setValid()
{
assert(!isValid());
valid = true;
}
private:
/**
* Valid bit. The contents of this entry are only valid if this bit is set.
* @sa invalidate()
* @sa insert()
*/
bool valid{false};
/** The entry's tag. */
Addr tag{MaxAddr};
};
} // namespace gem5
#endif //__CACHE_ENTRY_HH__

View File

@@ -51,8 +51,9 @@ FrequentValues::FrequentValues(const Params &p)
codeGenerationTicks(p.code_generation_ticks),
checkSaturation(p.check_saturation), numVFTEntries(p.vft_entries),
numSamples(p.num_samples), takenSamples(0), phase(SAMPLING),
VFT(p.vft_assoc, p.vft_entries, p.vft_indexing_policy,
p.vft_replacement_policy, VFTEntry(counterBits)),
VFT((name() + ".VFT").c_str(),
p.vft_entries, p.vft_assoc, p.vft_replacement_policy,
p.vft_indexing_policy, VFTEntry(counterBits)),
codeGenerationEvent([this]{ phase = COMPRESSING; }, name())
{
fatal_if((numVFTEntries - 1) > mask(chunkSizeBits),
@@ -75,7 +76,7 @@ FrequentValues::compress(const std::vector<Chunk>& chunks, Cycles& comp_lat,
encoder::Code code;
int length = 0;
if (phase == COMPRESSING) {
VFTEntry* entry = VFT.findEntry(chunk, false);
VFTEntry* entry = VFT.findEntry(chunk);
// Theoretically, the code would be the index of the entry;
// however, there is no practical need to do so, and we simply
@@ -159,7 +160,7 @@ FrequentValues::decompress(const CompressionData* comp_data, uint64_t* data)
// The value at the given VFT entry must match the one stored,
// if it is not the uncompressed value
assert((comp_chunk.code.code == uncompressedValue) ||
VFT.findEntry(comp_chunk.value, false));
VFT.findEntry(comp_chunk.value));
}
}
@@ -178,7 +179,7 @@ FrequentValues::sampleValues(const std::vector<uint64_t> &data,
{
const std::vector<Chunk> chunks = toChunks(data.data());
for (const Chunk& chunk : chunks) {
VFTEntry* entry = VFT.findEntry(chunk, false);
VFTEntry* entry = VFT.findEntry(chunk);
bool saturated = false;
if (!is_invalidation) {
// If a VFT hit, increase new value's counter; otherwise, insert
@@ -187,7 +188,7 @@ FrequentValues::sampleValues(const std::vector<uint64_t> &data,
entry = VFT.findVictim(chunk);
assert(entry != nullptr);
entry->value = chunk;
VFT.insertEntry(chunk, false, entry);
VFT.insertEntry(chunk, entry);
} else {
VFT.accessEntry(entry);
}
@@ -234,7 +235,7 @@ FrequentValues::generateCodes()
// representing uncompressed values
assert(uncompressed_values.size() >= 1);
uncompressedValue = *uncompressed_values.begin();
assert(VFT.findEntry(uncompressedValue, false) == nullptr);
assert(VFT.findEntry(uncompressedValue) == nullptr);
if (useHuffmanEncoding) {
// Populate the queue, adding each entry as a tree with one node.

View File

@@ -34,13 +34,13 @@
#include <memory>
#include <vector>
#include "base/cache/associative_cache.hh"
#include "base/sat_counter.hh"
#include "base/types.hh"
#include "mem/cache/base.hh"
#include "mem/cache/cache_probe_arg.hh"
#include "mem/cache/compressors/base.hh"
#include "mem/cache/compressors/encoders/huffman.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "sim/eventq.hh"
#include "sim/probe/probe.hh"
@@ -112,7 +112,7 @@ class FrequentValues : public Base
enum Phase {SAMPLING, CODE_GENERATION, COMPRESSING};
Phase phase;
class VFTEntry : public TaggedEntry
class VFTEntry : public CacheEntry
{
public:
/**
@@ -130,14 +130,14 @@ class FrequentValues : public Base
SatCounter32 counter;
VFTEntry(std::size_t num_bits)
: TaggedEntry(), value(0), counter(num_bits)
: CacheEntry(), value(0), counter(num_bits)
{
}
void
invalidate() override
{
TaggedEntry::invalidate();
CacheEntry::invalidate();
value = 0;
counter.reset();
}
@@ -147,7 +147,7 @@ class FrequentValues : public Base
* The Value Frequency Table, a small cache that keeps track and estimates
* the frequency distribution of values in the cache.
*/
AssociativeSet<VFTEntry> VFT;
AssociativeCache<VFTEntry> VFT;
/**
* A pseudo value is used as the representation of uncompressed values.

View File

@@ -51,9 +51,11 @@ AccessMapPatternMatching::AccessMapPatternMatching(
lowCacheHitThreshold(p.low_cache_hit_threshold),
epochCycles(p.epoch_cycles),
offChipMemoryLatency(p.offchip_memory_latency),
accessMapTable(p.access_map_table_assoc, p.access_map_table_entries,
p.access_map_table_indexing_policy,
accessMapTable("AccessMapTable",
p.access_map_table_entries,
p.access_map_table_assoc,
p.access_map_table_replacement_policy,
p.access_map_table_indexing_policy,
AccessMapEntry(hotZoneSize / blkSize)),
numGoodPrefetches(0), numTotalPrefetches(0), numRawCacheMisses(0),
numRawCacheHits(0), degree(startDegree), usefulDegree(startDegree),

View File

@@ -29,6 +29,9 @@
#ifndef __CACHE_PREFETCH_ASSOCIATIVE_SET_HH__
#define __CACHE_PREFETCH_ASSOCIATIVE_SET_HH__
#include <type_traits>
#include "base/cache/associative_cache.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "mem/cache/tags/tagged_entry.hh"
@@ -42,38 +45,27 @@ namespace gem5
* bool value is used as an additional tag data of the entry.
*/
template<class Entry>
class AssociativeSet
class AssociativeSet : public AssociativeCache<Entry>
{
static_assert(std::is_base_of_v<TaggedEntry, Entry>,
"Entry must derive from TaggedEntry");
/** Associativity of the container */
const int associativity;
/**
* Total number of entries, entries are organized in sets of the provided
* associativity. The number of associative sets is obtained by dividing
* numEntries by associativity.
*/
const int numEntries;
/** Pointer to the indexing policy */
BaseIndexingPolicy* const indexingPolicy;
/** Pointer to the replacement policy */
replacement_policy::Base* const replacementPolicy;
/** Vector containing the entries of the container */
std::vector<Entry> entries;
public:
/**
* Public constructor
* @param assoc number of elements in each associative set
* @param name Name of the cache
* @param num_entries total number of entries of the container, the number
* of sets can be calculated dividing this balue by the 'assoc' value
* @param idx_policy indexing policy
* of sets can be calculated dividing this balue by the 'assoc' value
* @param assoc number of elements in each associative set
* @param rpl_policy replacement policy
* @param idx_policy indexing policy
* @param init_val initial value of the elements of the set
*/
AssociativeSet(int assoc, int num_entries, BaseIndexingPolicy *idx_policy,
replacement_policy::Base *rpl_policy, Entry const &init_val = Entry());
AssociativeSet(const char *name, const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val = Entry());
/**
* Find an entry within the set
@@ -84,28 +76,6 @@ class AssociativeSet
*/
Entry* findEntry(Addr addr, bool is_secure) const;
/**
* Do an access to the entry, this is required to
* update the replacement information data.
* @param entry the accessed entry
*/
void accessEntry(Entry *entry);
/**
* Find a victim to be replaced
* @param addr key to select the possible victim
* @result entry to be victimized
*/
Entry* findVictim(Addr addr);
/**
* Find the set of entries that could be replaced given
* that we want to add a new entry with the provided key
* @param addr key to select the set of entries
* @result vector of candidates matching with the provided key
*/
std::vector<Entry *> getPossibleEntries(const Addr addr) const;
/**
* Indicate that an entry has just been inserted
* @param addr key of the container
@@ -114,54 +84,16 @@ class AssociativeSet
*/
void insertEntry(Addr addr, bool is_secure, Entry* entry);
/**
* Invalidate an entry and its respective replacement data.
*
* @param entry Entry to be invalidated.
*/
void invalidate(Entry* entry);
private:
// The following APIs are excluded since they lack the secure bit
using AssociativeCache<Entry>::getTag;
using AssociativeCache<Entry>::accessEntryByAddr;
using AssociativeCache<Entry>::findEntry;
using AssociativeCache<Entry>::insertEntry;
using AssociativeCache<Entry>::getPossibleEntries;
/** Iterator types */
using const_iterator = typename std::vector<Entry>::const_iterator;
using iterator = typename std::vector<Entry>::iterator;
/**
* Returns an iterator to the first entry of the dictionary
* @result iterator to the first element
*/
iterator begin()
{
return entries.begin();
}
/**
* Returns an iterator pointing to the end of the the dictionary
* (placeholder element, should not be accessed)
* @result iterator to the end element
*/
iterator end()
{
return entries.end();
}
/**
* Returns an iterator to the first entry of the dictionary
* @result iterator to the first element
*/
const_iterator begin() const
{
return entries.begin();
}
/**
* Returns an iterator pointing to the end of the the dictionary
* (placeholder element, should not be accessed)
* @result iterator to the end element
*/
const_iterator end() const
{
return entries.end();
}
using AssociativeCache<Entry>::replPolicy;
using AssociativeCache<Entry>::indexingPolicy;
};
} // namespace gem5

View File

@@ -35,93 +35,41 @@
namespace gem5
{
template<class Entry>
AssociativeSet<Entry>::AssociativeSet(int assoc, int num_entries,
BaseIndexingPolicy *idx_policy, replacement_policy::Base *rpl_policy,
Entry const &init_value)
: associativity(assoc), numEntries(num_entries), indexingPolicy(idx_policy),
replacementPolicy(rpl_policy), entries(numEntries, init_value)
template <class Entry>
AssociativeSet<Entry>::AssociativeSet(const char *name,
const size_t num_entries,
const size_t associativity_,
replacement_policy::Base *repl_policy,
BaseIndexingPolicy *indexing_policy,
Entry const &init_val)
: AssociativeCache<Entry>(name, num_entries, associativity_,
repl_policy, indexing_policy, init_val)
{
fatal_if(!isPowerOf2(num_entries), "The number of entries of an "
"AssociativeSet<> must be a power of 2");
fatal_if(!isPowerOf2(assoc), "The associativity of an AssociativeSet<> "
"must be a power of 2");
for (unsigned int entry_idx = 0; entry_idx < numEntries; entry_idx += 1) {
Entry* entry = &entries[entry_idx];
indexingPolicy->setEntry(entry, entry_idx);
entry->replacementData = replacementPolicy->instantiateEntry();
}
}
template<class Entry>
template <class Entry>
Entry*
AssociativeSet<Entry>::findEntry(Addr addr, bool is_secure) const
{
Addr tag = indexingPolicy->extractTag(addr);
const std::vector<ReplaceableEntry*> selected_entries =
indexingPolicy->getPossibleEntries(addr);
auto candidates = indexingPolicy->getPossibleEntries(addr);
for (const auto& location : selected_entries) {
Entry* entry = static_cast<Entry *>(location);
if ((entry->getTag() == tag) && entry->isValid() &&
entry->isSecure() == is_secure) {
for (auto candidate : candidates) {
Entry* entry = static_cast<Entry*>(candidate);
if (entry->matchTag(tag, is_secure)) {
return entry;
}
}
return nullptr;
}
template<class Entry>
void
AssociativeSet<Entry>::accessEntry(Entry *entry)
{
replacementPolicy->touch(entry->replacementData);
}
template<class Entry>
Entry*
AssociativeSet<Entry>::findVictim(Addr addr)
{
// Get possible entries to be victimized
const std::vector<ReplaceableEntry*> selected_entries =
indexingPolicy->getPossibleEntries(addr);
Entry* victim = static_cast<Entry*>(replacementPolicy->getVictim(
selected_entries));
// There is only one eviction for this replacement
invalidate(victim);
return victim;
}
template<class Entry>
std::vector<Entry *>
AssociativeSet<Entry>::getPossibleEntries(const Addr addr) const
{
std::vector<ReplaceableEntry *> selected_entries =
indexingPolicy->getPossibleEntries(addr);
std::vector<Entry *> entries(selected_entries.size(), nullptr);
unsigned int idx = 0;
for (auto &entry : selected_entries) {
entries[idx++] = static_cast<Entry *>(entry);
}
return entries;
}
template<class Entry>
void
AssociativeSet<Entry>::insertEntry(Addr addr, bool is_secure, Entry* entry)
{
entry->insert(indexingPolicy->extractTag(addr), is_secure);
replacementPolicy->reset(entry->replacementData);
}
template<class Entry>
void
AssociativeSet<Entry>::invalidate(Entry* entry)
{
entry->invalidate();
replacementPolicy->invalidate(entry->replacementData);
replPolicy->reset(entry->replacementData);
}
} // namespace gem5

View File

@@ -42,15 +42,16 @@ namespace prefetch
DeltaCorrelatingPredictionTables::DeltaCorrelatingPredictionTables(
const DeltaCorrelatingPredictionTablesParams &p) : SimObject(p),
deltaBits(p.delta_bits), deltaMaskBits(p.delta_mask_bits),
table(p.table_assoc, p.table_entries, p.table_indexing_policy,
p.table_replacement_policy, DCPTEntry(p.deltas_per_entry))
table((name() + "DCPT").c_str(), p.table_entries,
p.table_assoc, p.table_replacement_policy,
p.table_indexing_policy, DCPTEntry(p.deltas_per_entry))
{
}
void
DeltaCorrelatingPredictionTables::DCPTEntry::invalidate()
{
TaggedEntry::invalidate();
CacheEntry::invalidate();
deltas.flush();
while (!deltas.full()) {
@@ -134,9 +135,8 @@ DeltaCorrelatingPredictionTables::calculatePrefetch(
}
Addr address = pfi.getAddr();
Addr pc = pfi.getPC();
// Look up table entry, is_secure is unused in findEntry because we
// index using the pc
DCPTEntry *entry = table.findEntry(pc, false /* unused */);
// Look up table entry
DCPTEntry *entry = table.findEntry(pc);
if (entry != nullptr) {
entry->addAddress(address, deltaBits);
//Delta correlating
@@ -144,7 +144,7 @@ DeltaCorrelatingPredictionTables::calculatePrefetch(
} else {
entry = table.findVictim(pc);
table.insertEntry(pc, false /* unused */, entry);
table.insertEntry(pc, entry);
entry->lastAddress = address;
}

View File

@@ -29,8 +29,8 @@
#ifndef __MEM_CACHE_PREFETCH_DELTA_CORRELATING_PREDICTION_TABLES_HH_
#define __MEM_CACHE_PREFETCH_DELTA_CORRELATING_PREDICTION_TABLES_HH_
#include "base/cache/associative_cache.hh"
#include "base/circular_queue.hh"
#include "mem/cache/prefetch/associative_set.hh"
#include "mem/cache/prefetch/queued.hh"
namespace gem5
@@ -65,7 +65,7 @@ class DeltaCorrelatingPredictionTables : public SimObject
const unsigned int deltaMaskBits;
/** DCPT Table entry datatype */
struct DCPTEntry : public TaggedEntry
struct DCPTEntry : public CacheEntry
{
/** Last accessed address */
Addr lastAddress;
@@ -77,7 +77,7 @@ class DeltaCorrelatingPredictionTables : public SimObject
* @param num_deltas number of deltas stored in the entry
*/
DCPTEntry(unsigned int num_deltas)
: TaggedEntry(), lastAddress(0), deltas(num_deltas)
: CacheEntry(), lastAddress(0), deltas(num_deltas)
{
}
@@ -103,7 +103,7 @@ class DeltaCorrelatingPredictionTables : public SimObject
};
/** The main table */
AssociativeSet<DCPTEntry> table;
AssociativeCache<DCPTEntry> table;
public:
DeltaCorrelatingPredictionTables(

View File

@@ -44,11 +44,15 @@ IndirectMemory::IndirectMemory(const IndirectMemoryPrefetcherParams &p)
shiftValues(p.shift_values), prefetchThreshold(p.prefetch_threshold),
streamCounterThreshold(p.stream_counter_threshold),
streamingDistance(p.streaming_distance),
prefetchTable(p.pt_table_assoc, p.pt_table_entries,
p.pt_table_indexing_policy, p.pt_table_replacement_policy,
prefetchTable((name() + ".PrefetchTable").c_str(),
p.pt_table_entries,
p.pt_table_assoc,
p.pt_table_replacement_policy,
p.pt_table_indexing_policy,
PrefetchTableEntry(p.num_indirect_counter_bits)),
ipd(p.ipd_table_assoc, p.ipd_table_entries, p.ipd_table_indexing_policy,
ipd((name() + ".IPD").c_str(), p.ipd_table_entries, p.ipd_table_assoc,
p.ipd_table_replacement_policy,
p.ipd_table_indexing_policy,
IndirectPatternDetectorEntry(p.addr_array_len, shiftValues.size())),
ipdEntryTrackingMisses(nullptr), byteOrder(p.sys->getGuestByteOrder())
{
@@ -81,8 +85,7 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
}
} else {
// if misses are not being tracked, attempt to detect stream accesses
PrefetchTableEntry *pt_entry =
prefetchTable.findEntry(pc, false /* unused */);
PrefetchTableEntry *pt_entry = prefetchTable.findEntry(pc);
if (pt_entry != nullptr) {
prefetchTable.accessEntry(pt_entry);
@@ -156,7 +159,7 @@ IndirectMemory::calculatePrefetch(const PrefetchInfo &pfi,
} else {
pt_entry = prefetchTable.findVictim(pc);
assert(pt_entry != nullptr);
prefetchTable.insertEntry(pc, false /* unused */, pt_entry);
prefetchTable.insertEntry(pc, pt_entry);
pt_entry->address = addr;
pt_entry->secure = is_secure;
}
@@ -169,8 +172,7 @@ IndirectMemory::allocateOrUpdateIPDEntry(
{
// The address of the pt_entry is used to index the IPD
Addr ipd_entry_addr = (Addr) pt_entry;
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(ipd_entry_addr,
false/* unused */);
IndirectPatternDetectorEntry *ipd_entry = ipd.findEntry(ipd_entry_addr);
if (ipd_entry != nullptr) {
ipd.accessEntry(ipd_entry);
if (!ipd_entry->secondIndexSet) {
@@ -187,7 +189,7 @@ IndirectMemory::allocateOrUpdateIPDEntry(
} else {
ipd_entry = ipd.findVictim(ipd_entry_addr);
assert(ipd_entry != nullptr);
ipd.insertEntry(ipd_entry_addr, false /* unused */, ipd_entry);
ipd.insertEntry(ipd_entry_addr, ipd_entry);
ipd_entry->idx1 = index;
ipdEntryTrackingMisses = ipd_entry;
}

View File

@@ -121,7 +121,7 @@ class IndirectMemory : public Queued
}
};
/** Prefetch table */
AssociativeSet<PrefetchTableEntry> prefetchTable;
AssociativeCache<PrefetchTableEntry> prefetchTable;
/** Indirect Pattern Detector entrt */
struct IndirectPatternDetectorEntry : public TaggedEntry
@@ -160,7 +160,7 @@ class IndirectMemory : public Queued
}
};
/** Indirect Pattern Detector (IPD) table */
AssociativeSet<IndirectPatternDetectorEntry> ipd;
AssociativeCache<IndirectPatternDetectorEntry> ipd;
/** Entry currently tracking misses */
IndirectPatternDetectorEntry *ipdEntryTrackingMisses;

View File

@@ -44,19 +44,23 @@ IrregularStreamBuffer::IrregularStreamBuffer(
chunkSize(p.chunk_size),
prefetchCandidatesPerEntry(p.prefetch_candidates_per_entry),
degree(p.degree),
trainingUnit(p.training_unit_assoc, p.training_unit_entries,
p.training_unit_indexing_policy,
p.training_unit_replacement_policy),
psAddressMappingCache(p.address_map_cache_assoc,
trainingUnit((name() + ".TrainingUnit").c_str(),
p.training_unit_entries,
p.training_unit_assoc,
p.training_unit_replacement_policy,
p.training_unit_indexing_policy),
psAddressMappingCache((name() + ".PSAddressMappingCache").c_str(),
p.address_map_cache_entries,
p.ps_address_map_cache_indexing_policy,
p.address_map_cache_assoc,
p.ps_address_map_cache_replacement_policy,
p.ps_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits)),
spAddressMappingCache(p.address_map_cache_assoc,
spAddressMappingCache((name() + ".SPAddressMappingCache").c_str(),
p.address_map_cache_entries,
p.sp_address_map_cache_indexing_policy,
p.address_map_cache_assoc,
p.sp_address_map_cache_replacement_policy,
p.sp_address_map_cache_indexing_policy,
AddressMappingEntry(prefetchCandidatesPerEntry,
p.num_counter_bits)),
structuralAddressCounter(0)

View File

@@ -46,8 +46,9 @@ PIF::PIF(const PIFPrefetcherParams &p)
succSize(p.succ_spatial_region_bits),
maxCompactorEntries(p.compactor_entries),
historyBuffer(p.history_buffer_size),
index(p.index_assoc, p.index_entries, p.index_indexing_policy,
p.index_replacement_policy),
index((name() + ".PIFIndex").c_str(), p.index_entries, p.index_assoc,
p.index_replacement_policy,
p.index_indexing_policy),
streamAddressBuffer(p.stream_address_buffer_entries),
listenersPC()
{
@@ -176,15 +177,13 @@ PIF::notifyRetiredInst(const Addr pc)
// the 'iterator' table to point to the new entry
historyBuffer.push_back(spatialCompactor);
IndexEntry *idx_entry =
index.findEntry(spatialCompactor.trigger, false);
auto idx_entry = index.findEntry(spatialCompactor.trigger);
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);
} else {
idx_entry = index.findVictim(spatialCompactor.trigger);
assert(idx_entry != nullptr);
index.insertEntry(spatialCompactor.trigger, false,
idx_entry);
index.insertEntry(spatialCompactor.trigger, idx_entry);
}
idx_entry->historyIt =
historyBuffer.getIterator(historyBuffer.tail());
@@ -220,7 +219,7 @@ PIF::calculatePrefetch(const PrefetchInfo &pfi,
// Check if a valid entry in the 'index' table is found and allocate a new
// active prediction stream
IndexEntry *idx_entry = index.findEntry(pc, /* unused */ false);
IndexEntry *idx_entry = index.findEntry(pc);
if (idx_entry != nullptr) {
index.accessEntry(idx_entry);

View File

@@ -138,11 +138,12 @@ class PIF : public Queued
{
HistoryBuffer::iterator historyIt;
};
/**
* The index table is a small cache-like structure that facilitates
* fast search of the history buffer.
*/
AssociativeSet<IndexEntry> index;
AssociativeCache<IndexEntry> index;
/**
* A Stream Address Buffer (SAB) tracks a window of consecutive

View File

@@ -48,12 +48,16 @@ SignaturePath::SignaturePath(const SignaturePathPrefetcherParams &p)
signatureBits(p.signature_bits),
prefetchConfidenceThreshold(p.prefetch_confidence_threshold),
lookaheadConfidenceThreshold(p.lookahead_confidence_threshold),
signatureTable(p.signature_table_assoc, p.signature_table_entries,
p.signature_table_indexing_policy,
p.signature_table_replacement_policy),
patternTable(p.pattern_table_assoc, p.pattern_table_entries,
p.pattern_table_indexing_policy,
signatureTable((name() + ".SignatureTable").c_str(),
p.signature_table_entries,
p.signature_table_assoc,
p.signature_table_replacement_policy,
p.signature_table_indexing_policy),
patternTable((name() + ".PatternTable").c_str(),
p.pattern_table_entries,
p.pattern_table_assoc,
p.pattern_table_replacement_policy,
p.pattern_table_indexing_policy,
PatternEntry(stridesPerPatternEntry, p.num_counter_bits))
{
fatal_if(prefetchConfidenceThreshold < 0,
@@ -188,7 +192,7 @@ SignaturePath::getSignatureEntry(Addr ppn, bool is_secure,
SignaturePath::PatternEntry &
SignaturePath::getPatternEntry(Addr signature)
{
PatternEntry* pattern_entry = patternTable.findEntry(signature, false);
PatternEntry* pattern_entry = patternTable.findEntry(signature);
if (pattern_entry != nullptr) {
// Signature found
patternTable.accessEntry(pattern_entry);
@@ -197,7 +201,7 @@ SignaturePath::getPatternEntry(Addr signature)
pattern_entry = patternTable.findVictim(signature);
assert(pattern_entry != nullptr);
patternTable.insertEntry(signature, false, pattern_entry);
patternTable.insertEntry(signature, pattern_entry);
}
return *pattern_entry;
}
@@ -273,7 +277,7 @@ SignaturePath::calculatePrefetch(const PrefetchInfo &pfi,
// confidence, these are prefetch candidates
// - select the entry with the highest counter as the "lookahead"
PatternEntry *current_pattern_entry =
patternTable.findEntry(current_signature, false);
patternTable.findEntry(current_signature);
PatternStrideEntry const *lookahead = nullptr;
if (current_pattern_entry != nullptr) {
unsigned long max_counter = 0;

View File

@@ -146,8 +146,9 @@ class SignaturePath : public Queued
*/
PatternStrideEntry &getStrideEntry(stride_t stride);
};
/** Pattern table */
AssociativeSet<PatternEntry> patternTable;
AssociativeCache<PatternEntry> patternTable;
/**
* Generates a new signature from an existing one and a new stride

View File

@@ -42,10 +42,11 @@ namespace prefetch
SignaturePathV2::SignaturePathV2(const SignaturePathPrefetcherV2Params &p)
: SignaturePath(p),
globalHistoryRegister(p.global_history_register_entries,
globalHistoryRegister((name() + ".GlobalHistoryRegister").c_str(),
p.global_history_register_entries,
p.global_history_register_indexing_policy,
p.global_history_register_entries,
p.global_history_register_replacement_policy,
p.global_history_register_indexing_policy,
GlobalHistoryEntry())
{
}
@@ -124,7 +125,7 @@ SignaturePathV2::handlePageCrossingLookahead(signature_t signature,
GlobalHistoryEntry *gh_entry = globalHistoryRegister.findVictim(0);
assert(gh_entry != nullptr);
// Any address value works, as it is never used
globalHistoryRegister.insertEntry(0, false, gh_entry);
globalHistoryRegister.insertEntry(0, gh_entry);
gh_entry->signature = signature;
gh_entry->lastBlock = last_offset;

View File

@@ -66,7 +66,7 @@ class SignaturePathV2 : public SignaturePath
delta(0) {}
};
/** Global History Register */
AssociativeSet<GlobalHistoryEntry> globalHistoryRegister;
AssociativeCache<GlobalHistoryEntry> globalHistoryRegister;
double calculateLookaheadConfidence(PatternEntry const &sig,
PatternStrideEntry const &lookahead) const override;

View File

@@ -42,16 +42,18 @@ STeMS::STeMS(const STeMSPrefetcherParams &p)
: Queued(p), spatialRegionSize(p.spatial_region_size),
spatialRegionSizeBits(floorLog2(p.spatial_region_size)),
reconstructionEntries(p.reconstruction_entries),
activeGenerationTable(p.active_generation_table_assoc,
activeGenerationTable((name() + ".ActiveGenerationTable").c_str(),
p.active_generation_table_entries,
p.active_generation_table_indexing_policy,
p.active_generation_table_assoc,
p.active_generation_table_replacement_policy,
p.active_generation_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize)),
patternSequenceTable(p.pattern_sequence_table_assoc,
patternSequenceTable((name() + ".PatternSequenceTable").c_str(),
p.pattern_sequence_table_entries,
p.pattern_sequence_table_indexing_policy,
p.pattern_sequence_table_assoc,
p.pattern_sequence_table_replacement_policy,
p.pattern_sequence_table_indexing_policy,
ActiveGenerationTableEntry(
spatialRegionSize / blkSize)),
rmob(p.region_miss_order_buffer_entries),
@@ -90,15 +92,12 @@ STeMS::checkForActiveGenerationsEnd(const CacheAccessor &cache)
}
if (generation_ended) {
// PST is indexed using the PC (secure bit is unused)
ActiveGenerationTableEntry *pst_entry =
patternSequenceTable.findEntry(pst_addr,
false /*unused*/);
auto pst_entry = patternSequenceTable.findEntry(pst_addr);
if (pst_entry == nullptr) {
// Tipically an entry will not exist
pst_entry = patternSequenceTable.findVictim(pst_addr);
assert(pst_entry != nullptr);
patternSequenceTable.insertEntry(pst_addr,
false /*unused*/, pst_entry);
patternSequenceTable.insertEntry(pst_addr, pst_entry);
} else {
patternSequenceTable.accessEntry(pst_entry);
}
@@ -222,8 +221,7 @@ STeMS::reconstructSequence(
idx = 0;
for (auto it = rmob_it; it != rmob.end() && (idx < reconstructionEntries);
it++) {
ActiveGenerationTableEntry *pst_entry =
patternSequenceTable.findEntry(it->pstAddress, false /* unused */);
auto pst_entry = patternSequenceTable.findEntry(it->pstAddress);
if (pst_entry != nullptr) {
patternSequenceTable.accessEntry(pst_entry);
for (auto &seq_entry : pst_entry->sequence) {

View File

@@ -155,7 +155,7 @@ class STeMS : public Queued
/** Active Generation Table (AGT) */
AssociativeSet<ActiveGenerationTableEntry> activeGenerationTable;
/** Pattern Sequence Table (PST) */
AssociativeSet<ActiveGenerationTableEntry> patternSequenceTable;
AssociativeCache<ActiveGenerationTableEntry> patternSequenceTable;
/** Data type of the Region Miss Order Buffer entry */
struct RegionMissOrderBufferEntry

View File

@@ -86,7 +86,7 @@ Stride::Stride(const StridePrefetcherParams &p)
degree(p.degree),
distance(p.distance),
pcTableInfo(p.table_assoc, p.table_entries, p.table_indexing_policy,
p.table_replacement_policy)
p.table_replacement_policy)
{
}
@@ -105,16 +105,21 @@ Stride::findTable(int context)
Stride::PCTable*
Stride::allocateNewContext(int context)
{
std::string table_name = name() + ".PCTable" + std::to_string(context);
// Create new table
auto insertion_result = pcTables.insert(std::make_pair(context,
PCTable(pcTableInfo.assoc, pcTableInfo.numEntries,
pcTableInfo.indexingPolicy, pcTableInfo.replacementPolicy,
StrideEntry(initConfidence))));
auto ins_result = pcTables.emplace(std::piecewise_construct,
std::forward_as_tuple(context),
std::forward_as_tuple(table_name.c_str(),
pcTableInfo.numEntries,
pcTableInfo.assoc,
pcTableInfo.replacementPolicy,
pcTableInfo.indexingPolicy,
StrideEntry(initConfidence)));
DPRINTF(HWPrefetch, "Adding context %i with stride entries\n", context);
// Get iterator to new pc table, and then return a pointer to the new table
return &(insertion_result.first->second);
return &(ins_result.first->second);
}
void

View File

@@ -125,8 +125,8 @@ class Stride : public Queued
replacement_policy::Base* const replacementPolicy;
PCTableInfo(int assoc, int num_entries,
BaseIndexingPolicy* indexing_policy,
replacement_policy::Base* repl_policy)
BaseIndexingPolicy* indexing_policy,
replacement_policy::Base* repl_policy)
: assoc(assoc), numEntries(num_entries),
indexingPolicy(indexing_policy), replacementPolicy(repl_policy)
{

View File

@@ -31,7 +31,9 @@
#include <cassert>
#include "base/cache/cache_entry.hh"
#include "base/cprintf.hh"
#include "base/logging.hh"
#include "base/types.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
@@ -43,19 +45,12 @@ namespace gem5
* secure bit, which informs whether it belongs to a secure address space.
* A tagged entry's contents are only relevant if it is marked as valid.
*/
class TaggedEntry : public ReplaceableEntry
class TaggedEntry : public CacheEntry
{
public:
TaggedEntry() : _valid(false), _secure(false), _tag(MaxAddr) {}
TaggedEntry() : CacheEntry(), _secure(false) {}
~TaggedEntry() = default;
/**
* Checks if the entry is valid.
*
* @return True if the entry is valid.
*/
virtual bool isValid() const { return _valid; }
/**
* Check if this block holds data from the secure memory space.
*
@@ -63,13 +58,6 @@ class TaggedEntry : public ReplaceableEntry
*/
bool isSecure() const { return _secure; }
/**
* Get tag associated to this block.
*
* @return The tag value.
*/
virtual Addr getTag() const { return _tag; }
/**
* Checks if the given tag information corresponds to this entry's.
*
@@ -100,10 +88,10 @@ class TaggedEntry : public ReplaceableEntry
}
/** Invalidate the block. Its contents are no longer valid. */
virtual void invalidate()
void
invalidate() override
{
_valid = false;
setTag(MaxAddr);
CacheEntry::invalidate();
clearSecure();
}
@@ -114,44 +102,36 @@ class TaggedEntry : public ReplaceableEntry
isSecure(), isValid(), ReplaceableEntry::print());
}
protected:
/**
* Set tag associated to this block.
*
* @param tag The tag value.
*/
virtual void setTag(Addr tag) { _tag = tag; }
bool
matchTag(const Addr tag) const override
{
panic("Need is_secure arg");
return false;
}
void
insert(const Addr tag) override
{
panic("Need is_secure arg");
return;
}
protected:
/** Set secure bit. */
virtual void setSecure() { _secure = true; }
/** Set valid bit. The block must be invalid beforehand. */
virtual void
setValid()
{
assert(!isValid());
_valid = true;
}
private:
/**
* Valid bit. The contents of this entry are only valid if this bit is set.
* @sa invalidate()
* @sa insert()
*/
bool _valid;
/**
* Secure bit. Marks whether this entry refers to an address in the secure
* memory space. Must always be modified along with the tag.
*/
bool _secure;
/** The entry's tag. */
Addr _tag;
/** Clear secure bit. Should be only used by the invalidation function. */
void clearSecure() { _secure = false; }
/** Do not use API without is_secure flag. */
using CacheEntry::matchTag;
using CacheEntry::insert;
};
} // namespace gem5