mem-cache: Split Tags for indexing policies

Split indexing functionality from tags, so that code duplication
is reduced when adding new classes that use different indexing
policies, such as set associative, skewed associative or other
hash-based policies.

An indexing policy defines the mapping between an address' set
and its physical location. For example, a conventional set assoc
cache maps an address to all ways in a set using an immutable
function, that is, a set x is always mapped to set x. However,
skewed assoc caches map an address to a different set for each way,
using a skewing function.

FALRU has been left unmodified as it is a specialization with its
own complexity.

Change-Id: I0838b41663f21eba0aeab7aeb7839e3703ca3324
Reviewed-on: https://gem5-review.googlesource.com/c/8885
Reviewed-by: Jason Lowe-Power <jason@lowepower.com>
Maintainer: Jason Lowe-Power <jason@lowepower.com>
This commit is contained in:
Daniel R. Carvalho
2018-03-09 15:16:41 +01:00
committed by Daniel Carvalho
parent 8f58d9fb87
commit f32882d4fc
16 changed files with 769 additions and 390 deletions

View File

@@ -36,4 +36,3 @@ Source('base.cc')
Source('base_set_assoc.cc')
Source('fa_lru.cc')
Source('sector_tags.cc')
Source('skewed_assoc.cc')

View File

@@ -38,6 +38,7 @@
from m5.params import *
from m5.proxy import *
from ClockedObject import ClockedObject
from IndexingPolicies import *
class BaseTags(ClockedObject):
type = 'BaseTags'
@@ -64,6 +65,14 @@ class BaseTags(ClockedObject):
sequential_access = Param.Bool(Parent.sequential_access,
"Whether to access tags and data sequentially")
# Get indexing policy
indexing_policy = Param.BaseIndexingPolicy(SetAssociative(),
"Indexing policy")
# Set the indexing entry size as the block size
entry_size = Param.Int(Parent.cache_line_size,
"Indexing entry size in bytes")
class BaseSetAssoc(BaseTags):
type = 'BaseSetAssoc'
cxx_header = "mem/cache/tags/base_set_assoc.hh"
@@ -85,6 +94,9 @@ class SectorTags(BaseTags):
# Number of sub-sectors (data blocks) per sector
num_blocks_per_sector = Param.Int(1, "Number of sub-sectors per sector");
# The indexing entry now is a sector block
entry_size = Parent.cache_line_size * Self.num_blocks_per_sector
# Get replacement policy from the parent (cache)
replacement_policy = Param.BaseReplacementPolicy(
Parent.replacement_policy, "Replacement policy")
@@ -96,6 +108,6 @@ class FALRU(BaseTags):
min_tracked_cache_size = Param.MemorySize("128kB", "Minimum cache size for"
" which we track statistics")
class SkewedAssoc(BaseSetAssoc):
type = 'SkewedAssoc'
cxx_header = "mem/cache/tags/skewed_assoc.hh"
# This tag uses its own embedded indexing
indexing_policy = NULL

View File

@@ -52,6 +52,7 @@
#include "base/types.hh"
#include "mem/cache/base.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "mem/request.hh"
#include "sim/core.hh"
#include "sim/sim_exit.hh"
@@ -64,7 +65,7 @@ BaseTags::BaseTags(const Params *p)
accessLatency(p->sequential_access ?
p->tag_latency + p->data_latency :
std::max(p->tag_latency, p->data_latency)),
cache(nullptr),
cache(nullptr), indexingPolicy(p->indexing_policy),
warmupBound((p->warmup_percentage/100.0) * (p->size / p->block_size)),
warmedUp(false), numBlocks(p->size / p->block_size),
dataBlks(new uint8_t[p->size]) // Allocate data storage in one big chunk
@@ -78,10 +79,10 @@ BaseTags::setCache(BaseCache *_cache)
cache = _cache;
}
std::vector<ReplaceableEntry*>
BaseTags::getPossibleLocations(const Addr addr) const
ReplaceableEntry*
BaseTags::findBlockBySetAndWay(int set, int way) const
{
panic("Unimplemented getPossibleLocations for tags subclass");
return indexingPolicy->getEntry(set, way);
}
CacheBlk*
@@ -90,12 +91,12 @@ BaseTags::findBlock(Addr addr, bool is_secure) const
// Extract block tag
Addr tag = extractTag(addr);
// Find possible locations for the given address
const std::vector<ReplaceableEntry*> locations =
getPossibleLocations(addr);
// Find possible entries that may contain the given address
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
// Search for block
for (const auto& location : locations) {
for (const auto& location : entries) {
CacheBlk* blk = static_cast<CacheBlk*>(location);
if ((blk->tag == tag) && blk->isValid() &&
(blk->isSecure() == is_secure)) {
@@ -134,6 +135,12 @@ BaseTags::insertBlock(const Addr addr, const bool is_secure,
dataAccesses += 1;
}
Addr
BaseTags::extractTag(const Addr addr) const
{
return indexingPolicy->extractTag(addr);
}
void
BaseTags::cleanupRefsVisitor(CacheBlk &blk)
{

View File

@@ -62,6 +62,7 @@
#include "sim/clocked_object.hh"
class BaseCache;
class IndexingPolicy;
class ReplaceableEntry;
/**
@@ -87,6 +88,9 @@ class BaseTags : public ClockedObject
/** Pointer to the parent cache. */
BaseCache *cache;
/** Indexing policy */
BaseIndexingPolicy *indexingPolicy;
/**
* The number of tags that need to be touched to meet the warmup
* percentage.
@@ -161,18 +165,6 @@ class BaseTags : public ClockedObject
*/
void setCache(BaseCache *_cache);
/**
* Find all possible block locations for insertion and replacement of
* an address. Should be called immediately before ReplacementPolicy's
* findVictim() not to break cache resizing.
* Returns blocks in all ways belonging to the set of the address.
*
* @param addr The addr to a find possible locations for.
* @return The possible locations.
*/
virtual std::vector<ReplaceableEntry*> getPossibleLocations(
const Addr addr) const;
public:
typedef BaseTagsParams Params;
BaseTags(const Params *p);
@@ -226,7 +218,7 @@ class BaseTags : public ClockedObject
* @param way The way of the block.
* @return The block.
*/
virtual ReplaceableEntry* findBlockBySetAndWay(int set, int way) const = 0;
virtual ReplaceableEntry* findBlockBySetAndWay(int set, int way) const;
/**
* Align an address to the block size.
@@ -303,7 +295,13 @@ class BaseTags : public ClockedObject
virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) = 0;
virtual Addr extractTag(Addr addr) const = 0;
/**
* Generate the tag from the given address.
*
* @param addr The address to get the tag from.
* @return The tag of the address.
*/
virtual Addr extractTag(const Addr addr) const;
/**
* Insert the new block into the cache and update stats.

View File

@@ -42,7 +42,7 @@
/**
* @file
* Definitions of a base set associative tag store.
* Definitions of a conventional tag store.
*/
#include "mem/cache/tags/base_set_assoc.hh"
@@ -52,27 +52,14 @@
#include "base/intmath.hh"
BaseSetAssoc::BaseSetAssoc(const Params *p)
:BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
blks(p->size / p->block_size),
numSets(p->size / (p->block_size * p->assoc)),
:BaseTags(p), allocAssoc(p->assoc), blks(p->size / p->block_size),
sequentialAccess(p->sequential_access),
sets(p->size / (p->block_size * p->assoc)),
replacementPolicy(p->replacement_policy)
{
// Check parameters
if (blkSize < 4 || !isPowerOf2(blkSize)) {
fatal("Block size must be at least 4 and a power of 2");
}
if (!isPowerOf2(numSets)) {
fatal("# of sets must be non-zero and a power of 2");
}
if (assoc <= 0) {
fatal("associativity must be greater than zero");
}
setShift = floorLog2(blkSize);
setMask = numSets - 1;
tagShift = setShift + floorLog2(numSets);
}
void
@@ -81,35 +68,19 @@ BaseSetAssoc::init(BaseCache* cache)
// Set parent cache
setCache(cache);
// Initialize blocks
unsigned blkIndex = 0; // index into blks array
for (unsigned i = 0; i < numSets; ++i) {
sets[i].resize(assoc);
// Initialize all blocks
for (unsigned blk_index = 0; blk_index < numBlocks; blk_index++) {
// Locate next cache block
BlkType* blk = &blks[blk_index];
// link in the data blocks
for (unsigned j = 0; j < assoc; ++j) {
// Select block within the set to be linked
BlkType*& blk = sets[i][j];
// Link block to indexing policy
indexingPolicy->setEntry(blk, blk_index);
// Locate next cache block
blk = &blks[blkIndex];
// Associate a data chunk to the block
blk->data = &dataBlks[blkSize*blk_index];
// Associate a data chunk to the block
blk->data = &dataBlks[blkSize*blkIndex];
// Associate a replacement data entry to the block
blk->replacementData = replacementPolicy->instantiateEntry();
// Setting the tag to j is just to prevent long chains in the
// hash table; won't matter because the block is invalid
blk->tag = j;
// Set its index
blk->setPosition(i, j);
// Update block index
++blkIndex;
}
// Associate a replacement data entry to the block
blk->replacementData = replacementPolicy->instantiateEntry();
}
}
@@ -125,14 +96,11 @@ BaseSetAssoc::invalidate(CacheBlk *blk)
replacementPolicy->invalidate(blk->replacementData);
}
ReplaceableEntry*
BaseSetAssoc::findBlockBySetAndWay(int set, int way) const
{
return sets[set][way];
}
BaseSetAssoc *
BaseSetAssocParams::create()
{
// There must be a indexing policy
fatal_if(!indexing_policy, "An indexing policy is required");
return new BaseSetAssoc(this);
}

View File

@@ -59,71 +59,35 @@
#include "mem/cache/blk.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/tags/base.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/BaseSetAssoc.hh"
/**
* A BaseSetAssoc cache tag store.
* A basic cache tag store.
* @sa \ref gem5MemorySystem "gem5 Memory System"
*
* The BaseSetAssoc placement policy divides the cache into s sets of w
* cache lines (ways). A cache line is mapped onto a set, and can be placed
* into any of the ways of this set.
* cache lines (ways).
*/
class BaseSetAssoc : public BaseTags
{
public:
/** Typedef the block type used in this tag store. */
typedef CacheBlk BlkType;
/** Typedef the set type used in this tag store. */
typedef std::vector<CacheBlk*> SetType;
protected:
/** The associativity of the cache. */
const unsigned assoc;
/** The allocatable associativity of the cache (alloc mask). */
unsigned allocAssoc;
/** The cache blocks. */
std::vector<BlkType> blks;
/** The number of sets in the cache. */
const unsigned numSets;
/** Whether tags and data are accessed sequentially. */
const bool sequentialAccess;
/** The cache sets. */
std::vector<SetType> sets;
/** The amount to shift the address to get the set. */
int setShift;
/** The amount to shift the address to get the tag. */
int tagShift;
/** Mask out all bits that aren't part of the set index. */
unsigned setMask;
/** Replacement policy */
BaseReplacementPolicy *replacementPolicy;
/**
* Find all possible block locations for insertion and replacement of
* an address. Should be called immediately before ReplacementPolicy's
* findVictim() not to break cache resizing.
* Returns blocks in all ways belonging to the set of the address.
*
* @param addr The addr to a find possible locations for.
* @return The possible locations.
*/
std::vector<ReplaceableEntry*> getPossibleLocations(const Addr addr) const
override
{
std::vector<ReplaceableEntry*> locations;
for (const auto& blk : sets[extractSet(addr)]) {
locations.push_back(static_cast<ReplaceableEntry*>(blk));
}
return locations;
}
public:
/** Convenience typedef. */
typedef BaseSetAssocParams Params;
@@ -204,15 +168,6 @@ class BaseSetAssoc : public BaseTags
return blk;
}
/**
* Find a block given set and way.
*
* @param set The set of the block.
* @param way The way of the block.
* @return The block.
*/
ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
/**
* Find replacement victim based on address. The list of evicted blocks
* only contains the victim.
@@ -225,13 +180,13 @@ class BaseSetAssoc : public BaseTags
CacheBlk* findVictim(Addr addr, const bool is_secure,
std::vector<CacheBlk*>& evict_blks) const override
{
// Get possible locations for the victim block
std::vector<ReplaceableEntry*> locations = getPossibleLocations(addr);
// Get possible entries to be victimized
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
// Choose replacement victim from replacement candidates
CacheBlk* victim = static_cast<CacheBlk*>(replacementPolicy->getVictim(
std::vector<ReplaceableEntry*>(
locations.begin(), locations.end())));
entries));
// There is only one eviction for this replacement
evict_blks.push_back(victim);
@@ -285,25 +240,14 @@ class BaseSetAssoc : public BaseTags
}
/**
* Generate the tag from the given address.
* @param addr The address to get the tag from.
* @return The tag of the address.
*/
Addr extractTag(Addr addr) const override
{
return (addr >> tagShift);
}
/**
* Regenerate the block address from the tag and set.
* Regenerate the block address from the tag and indexing location.
*
* @param block The block.
* @return the block address.
*/
Addr regenerateBlkAddr(const CacheBlk* blk) const override
{
const Addr set = blk->getSet() << setShift;
return ((blk->tag << tagShift) | set);
return indexingPolicy->regenerateAddr(blk->tag, blk);
}
void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
@@ -320,18 +264,6 @@ class BaseSetAssoc : public BaseTags
}
return false;
}
private:
/**
* Calculate the set index from the address.
*
* @param addr The address to get the set from.
* @return The set index of the address.
*/
int extractSet(Addr addr) const
{
return ((addr >> setShift) & setMask);
}
};
#endif //__MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__

View File

@@ -0,0 +1,55 @@
# Copyright (c) 2018 Inria
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Daniel Carvalho
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class BaseIndexingPolicy(SimObject):
type = 'BaseIndexingPolicy'
abstract = True
cxx_header = "mem/cache/tags/indexing_policies/base.hh"
# Get the size from the parent (cache)
size = Param.MemorySize(Parent.size, "capacity in bytes")
# Get the entry size from the parent (tags)
entry_size = Param.Int(Parent.entry_size, "entry size in bytes")
# Get the associativity
assoc = Param.Int(Parent.assoc, "associativity")
class SetAssociative(BaseIndexingPolicy):
type = 'SetAssociative'
cxx_class = 'SetAssociative'
cxx_header = "mem/cache/tags/indexing_policies/set_associative.hh"
class SkewedAssociative(BaseIndexingPolicy):
type = 'SkewedAssociative'
cxx_class = 'SkewedAssociative'
cxx_header = "mem/cache/tags/indexing_policies/skewed_associative.hh"

View File

@@ -0,0 +1,36 @@
# -*- mode:python -*-
# Copyright (c) 2018 Inria.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Daniel Carvalho
Import('*')
SimObject('IndexingPolicies.py')
Source('base.cc')
Source('set_associative.cc')
Source('skewed_associative.cc')

View File

@@ -0,0 +1,102 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2014,2017 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Daniel Carvalho
* Erik Hallnor
*/
/**
* @file
* Definitions of a common framework for indexing policies.
*/
#include "mem/cache/tags/indexing_policies/base.hh"
#include <cstdlib>
#include "base/intmath.hh"
#include "base/logging.hh"
#include "mem/cache/replacement_policies/base.hh"
BaseIndexingPolicy::BaseIndexingPolicy(const Params *p)
: SimObject(p), assoc(p->assoc),
numSets(p->size / (p->entry_size * assoc)),
setShift(floorLog2(p->entry_size)), setMask(numSets - 1), sets(numSets),
tagShift(setShift + floorLog2(numSets))
{
fatal_if(!isPowerOf2(numSets), "# of sets must be non-zero and a power " \
"of 2");
fatal_if(assoc <= 0, "associativity must be greater than zero");
// Make space for the entries
for (uint32_t i = 0; i < numSets; ++i) {
sets[i].resize(assoc);
}
}
ReplaceableEntry*
BaseIndexingPolicy::getEntry(const uint32_t set, const uint32_t way) const
{
return sets[set][way];
}
void
BaseIndexingPolicy::setEntry(ReplaceableEntry* entry, const uint64_t index)
{
// Calculate set and way from entry index
const std::lldiv_t div_result = std::div((long long)index, assoc);
const uint32_t set = div_result.quot;
const uint32_t way = div_result.rem;
// Sanity check
assert(set < numSets);
// Assign a free pointer
sets[set][way] = entry;
// Inform the entry its position
entry->setPosition(set, way);
}
Addr
BaseIndexingPolicy::extractTag(const Addr addr) const
{
return (addr >> tagShift);
}

View File

@@ -0,0 +1,163 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2014,2017 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Daniel Carvalho
* Erik Hallnor
*/
/**
* @file
* Declaration of a common framework for indexing policies.
*/
#ifndef __MEM_CACHE_INDEXING_POLICIES_BASE_HH__
#define __MEM_CACHE_INDEXING_POLICIES_BASE_HH__
#include <vector>
#include "params/BaseIndexingPolicy.hh"
#include "sim/sim_object.hh"
class ReplaceableEntry;
/**
* A common base class for indexing table locations. Classes that inherit
* from it determine hash functions that should be applied based on the set
* and way. These functions are then applied to re-map the original values.
* @sa \ref gem5MemorySystem "gem5 Memory System"
*/
class BaseIndexingPolicy : public SimObject
{
protected:
/**
* The associativity.
*/
const unsigned assoc;
/**
* The number of sets in the cache.
*/
const uint32_t numSets;
/**
* The amount to shift the address to get the set.
*/
const int setShift;
/**
* Mask out all bits that aren't part of the set index.
*/
const unsigned setMask;
/**
* The cache sets.
*/
std::vector<std::vector<ReplaceableEntry*>> sets;
/**
* The amount to shift the address to get the tag.
*/
const int tagShift;
public:
/**
* Convenience typedef.
*/
typedef BaseIndexingPolicyParams Params;
/**
* Construct and initialize this policy.
*/
BaseIndexingPolicy(const Params *p);
/**
* Destructor.
*/
~BaseIndexingPolicy() {};
/**
* Associate a pointer to an entry to its physical counterpart.
*
* @param entry The entry pointer.
* @param index An unique index for the entry.
*/
void setEntry(ReplaceableEntry* entry, const uint64_t index);
/**
* Get an entry based on its set and way. All entries must have been set
* already before calling this function.
*
* @param set The set of the desired entry.
* @param way The way of the desired entry.
* @return entry The entry pointer.
*/
ReplaceableEntry* getEntry(const uint32_t set, const uint32_t way) const;
/**
* Generate the tag from the given address.
*
* @param addr The address to get the tag from.
* @return The tag of the address.
*/
Addr extractTag(const Addr addr) const;
/**
* Find all possible entries for insertion and replacement of an address.
* Should be called immediately before ReplacementPolicy's findVictim()
* not to break cache resizing.
*
* @param addr The addr to a find possible entries for.
* @return The possible entries.
*/
virtual std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr)
const = 0;
/**
* Regenerate an entry's address from its tag and assigned indexing bits.
*
* @param tag The tag bits.
* @param entry The entry.
* @return the entry's original address.
*/
virtual Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry)
const = 0;
};
#endif //__MEM_CACHE_INDEXING_POLICIES_BASE_HH__

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2014,2017 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Daniel Carvalho
* Erik Hallnor
*/
/**
* @file
* Definitions of a set associative indexing policy.
*/
#include "mem/cache/tags/indexing_policies/set_associative.hh"
#include "mem/cache/replacement_policies/base.hh"
SetAssociative::SetAssociative(const Params *p)
: BaseIndexingPolicy(p)
{
}
uint32_t
SetAssociative::extractSet(const Addr addr) const
{
return (addr >> setShift) & setMask;
}
Addr
SetAssociative::regenerateAddr(const Addr tag, const ReplaceableEntry* entry)
const
{
return (tag << tagShift) | (entry->getSet() << setShift);
}
std::vector<ReplaceableEntry*>
SetAssociative::getPossibleEntries(const Addr addr) const
{
return sets[extractSet(addr)];
}
SetAssociative*
SetAssociativeParams::create()
{
return new SetAssociative(this);
}

View File

@@ -0,0 +1,132 @@
/*
* Copyright (c) 2018 Inria
* Copyright (c) 2012-2014,2017 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Daniel Carvalho
* Erik Hallnor
*/
/**
* @file
* Declaration of a set associative indexing policy.
*/
#ifndef __MEM_CACHE_INDEXING_POLICIES_SET_ASSOCIATIVE_HH__
#define __MEM_CACHE_INDEXING_POLICIES_SET_ASSOCIATIVE_HH__
#include <vector>
#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/SetAssociative.hh"
class ReplaceableEntry;
/**
* A set associative indexing policy.
* @sa \ref gem5MemorySystem "gem5 Memory System"
*
* The set associative indexing policy has an immutable/identity mapping, so a
* value x is always mapped to set x, independent of the way, that is,
* Hash(A, 0) = Hash(A, 1) = Hash(A, N-1), where N is the number of ways.
*
* For example, let's assume address A maps to set 3 on way 0. This policy
* makes so that A is also mappable to set 3 on every other way. Visually, the
* possible locations of A are, for a table with 4 ways and 8 sets:
* Way 0 1 2 3
* Set _ _ _ _
* 0 |_| |_| |_| |_|
* 1 |_| |_| |_| |_|
* 2 |_| |_| |_| |_|
* 3 |X| |X| |X| |X|
* 4 |_| |_| |_| |_|
* 5 |_| |_| |_| |_|
* 6 |_| |_| |_| |_|
* 7 |_| |_| |_| |_|
*/
class SetAssociative : public BaseIndexingPolicy
{
private:
/**
* Apply a hash function to calculate address set.
*
* @param addr The address to calculate the set for.
* @return The set index for given combination of address and way.
*/
uint32_t extractSet(const Addr addr) const;
public:
/**
* Convenience typedef.
*/
typedef SetAssociativeParams Params;
/**
* Construct and initialize this policy.
*/
SetAssociative(const Params *p);
/**
* Destructor.
*/
~SetAssociative() {};
/**
* Find all possible entries for insertion and replacement of an address.
* Should be called immediately before ReplacementPolicy's findVictim()
* not to break cache resizing.
* Returns entries in all ways belonging to the set of the address.
*
* @param addr The addr to a find possible entries for.
* @return The possible entries.
*/
std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr) const
override;
/**
* Regenerate an entry's address from its tag and assigned set and way.
*
* @param tag The tag bits.
* @param entry The entry.
* @return the entry's original addr value.
*/
Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry) const
override;
};
#endif //__MEM_CACHE_INDEXING_POLICIES_SET_ASSOCIATIVE_HH__

View File

@@ -30,18 +30,18 @@
/**
* @file
* Definitions of a skewed associative placement policy.
* Definitions of a skewed associative indexing policy.
*/
#include "mem/cache/tags/skewed_assoc.hh"
#include <vector>
#include "mem/cache/tags/indexing_policies/skewed_associative.hh"
#include "base/bitfield.hh"
#include "base/intmath.hh"
#include "base/logging.hh"
#include "mem/cache/replacement_policies/base.hh"
SkewedAssoc::SkewedAssoc(const Params *p)
: BaseSetAssoc(p), msbShift(floorLog2(numSets) - 1)
SkewedAssociative::SkewedAssociative(const Params *p)
: BaseIndexingPolicy(p), msbShift(floorLog2(numSets) - 1)
{
if (assoc > NUM_SKEWING_FUNCTIONS) {
warn_once("Associativity higher than number of skewing functions. " \
@@ -59,7 +59,7 @@ SkewedAssoc::SkewedAssoc(const Params *p)
}
Addr
SkewedAssoc::hash(const Addr addr) const
SkewedAssociative::hash(const Addr addr) const
{
// Get relevant bits
const uint8_t lsb = bits<Addr>(addr, 0);
@@ -71,7 +71,7 @@ SkewedAssoc::hash(const Addr addr) const
}
Addr
SkewedAssoc::dehash(const Addr addr) const
SkewedAssociative::dehash(const Addr addr) const
{
// Get relevant bits. The original MSB is one bit away on the current MSB
// (which is the XOR bit). The original LSB can be retrieved from inverting
@@ -86,7 +86,7 @@ SkewedAssoc::dehash(const Addr addr) const
}
Addr
SkewedAssoc::skew(const Addr addr, const unsigned way) const
SkewedAssociative::skew(const Addr addr, const uint32_t way) const
{
// Assume an address of size A bits can be decomposed into
// {addr3, addr2, addr1, addr0}, where:
@@ -130,7 +130,7 @@ SkewedAssoc::skew(const Addr addr, const unsigned way) const
// If we have more than 8 ways, just pile them up on hashes. This is not
// the optimal solution, and can be improved by adding more skewing
// functions to the previous selector
for (int i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
addr1 = hash(addr1);
}
@@ -138,7 +138,7 @@ SkewedAssoc::skew(const Addr addr, const unsigned way) const
}
Addr
SkewedAssoc::deskew(const Addr addr, const unsigned way) const
SkewedAssociative::deskew(const Addr addr, const uint32_t way) const
{
// Get relevant bits of the addr
Addr addr1 = bits<Addr>(addr, msbShift, 0);
@@ -146,7 +146,7 @@ SkewedAssoc::deskew(const Addr addr, const unsigned way) const
// If we have more than NUM_SKEWING_FUNCTIONS ways, unpile the hashes
if (way >= NUM_SKEWING_FUNCTIONS) {
for (int i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
addr1 = dehash(addr1);
}
}
@@ -190,58 +190,37 @@ SkewedAssoc::deskew(const Addr addr, const unsigned way) const
}
}
unsigned
SkewedAssoc::extractSet(Addr addr, unsigned way) const
uint32_t
SkewedAssociative::extractSet(const Addr addr, const uint32_t way) const
{
return skew(addr >> setShift, way) & setMask;
}
Addr
SkewedAssoc::regenerateBlkAddr(const CacheBlk* blk) const
SkewedAssociative::regenerateAddr(const Addr tag,
const ReplaceableEntry* entry) const
{
const Addr addr = (blk->tag << (msbShift + 1)) | blk->getSet();
const Addr set = deskew(addr, blk->getWay()) & setMask;
return (blk->tag << tagShift) | (set << setShift);
const Addr addr_set = (tag << (msbShift + 1)) | entry->getSet();
return (tag << tagShift) |
((deskew(addr_set, entry->getWay()) & setMask) << setShift);
}
std::vector<ReplaceableEntry*>
SkewedAssoc::getPossibleLocations(const Addr addr) const
SkewedAssociative::getPossibleEntries(const Addr addr) const
{
std::vector<ReplaceableEntry*> locations;
std::vector<ReplaceableEntry*> entries;
// Parse all ways
for (int way = 0; way < assoc; ++way) {
for (uint32_t way = 0; way < assoc; ++way) {
// Apply hash to get set, and get way entry in it
locations.push_back(sets[extractSet(addr, way)][way]);
entries.push_back(sets[extractSet(addr, way)][way]);
}
return locations;
return entries;
}
CacheBlk*
SkewedAssoc::findBlock(Addr addr, bool is_secure) const
SkewedAssociative *
SkewedAssociativeParams::create()
{
// Extract block tag
Addr tag = extractTag(addr);
// Find possible locations for the given address
std::vector<ReplaceableEntry*> locations = getPossibleLocations(addr);
// Search for block
for (const auto& location : locations) {
CacheBlk* blk = static_cast<CacheBlk*>(location);
if ((blk->tag == tag) && blk->isValid() &&
(blk->isSecure() == is_secure)) {
return blk;
}
}
// Did not find block
return nullptr;
}
SkewedAssoc *
SkewedAssocParams::create()
{
return new SkewedAssoc(this);
return new SkewedAssociative(this);
}

View File

@@ -30,28 +30,45 @@
/**
* @file
* Declaration of a skewed associative tag store.
* Declaration of a skewed associative indexing policy.
*/
#ifndef __MEM_CACHE_TAGS_SKEWED_ASSOC_HH__
#define __MEM_CACHE_TAGS_SKEWED_ASSOC_HH__
#ifndef __MEM_CACHE_INDEXING_POLICIES_SKEWED_ASSOCIATIVE_HH__
#define __MEM_CACHE_INDEXING_POLICIES_SKEWED_ASSOCIATIVE_HH__
#include "mem/cache/blk.hh"
#include "mem/cache/tags/base_set_assoc.hh"
#include "params/SkewedAssoc.hh"
#include <vector>
#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/SkewedAssociative.hh"
class ReplaceableEntry;
/**
* A SkewedAssoc cache tag store.
* @sa \ref gem5MemorySystem "gem5 Memory System"
* A skewed associative indexing policy.
* @sa \ref gem5MemorySystem "gem5 Memory System"
*
* The SkewedAssoc placement policy divides the cache into s sets of w
* cache lines (ways). A cache line has a different set mapping for each way,
* according to a skewing function.
* The skewed indexing policy has a variable mapping based on a hash function,
* so a value x can be mapped to different sets, based on the way being used.
*
* For example, let's assume address A maps to set 3 on way 0. It will likely
* have a different set for every other way. Visually, the possible locations
* of A are, for a table with 4 ways and 8 sets (arbitrarily chosen sets; these
* locations depend on A and the hashing function used):
* Way 0 1 2 3
* Set _ _ _ _
* 0 |_| |_| |X| |_|
* 1 |_| |_| |_| |X|
* 2 |_| |_| |_| |_|
* 3 |X| |_| |_| |_|
* 4 |_| |_| |_| |_|
* 5 |_| |X| |_| |_|
* 6 |_| |_| |_| |_|
* 7 |_| |_| |_| |_|
*
* If provided with an associativity higher than the number of skewing
* functions, the skewing functions of the extra ways might be sub-optimal.
*/
class SkewedAssoc : public BaseSetAssoc
class SkewedAssociative : public BaseIndexingPolicy
{
private:
/**
@@ -98,7 +115,7 @@ class SkewedAssoc : public BaseSetAssoc
* @param way The cache way, used to select a hash function.
* @return The skewed address.
*/
Addr skew(const Addr addr, const unsigned way) const;
Addr skew(const Addr addr, const uint32_t way) const;
/**
* Address deskewing function (inverse of the skew function) of the given
@@ -109,7 +126,7 @@ class SkewedAssoc : public BaseSetAssoc
* @param way The cache way, used to select a hash function.
* @return The deskewed address.
*/
Addr deskew(const Addr addr, const unsigned way) const;
Addr deskew(const Addr addr, const uint32_t way) const;
/**
* Apply a skewing function to calculate address' set given a way.
@@ -118,53 +135,43 @@ class SkewedAssoc : public BaseSetAssoc
* @param way The way to get the set from.
* @return The set index for given combination of address and way.
*/
unsigned extractSet(Addr addr, unsigned way) const;
protected:
/**
* Find all possible block locations for insertion and replacement of
* an address. Should be called immediately before ReplacementPolicy's
* findVictim() not to break cache resizing.
* Returns blocks in all ways belonging to the set of the address.
*
* @param addr The addr to a find possible locations for.
* @return The possible locations.
*/
std::vector<ReplaceableEntry*> getPossibleLocations(const Addr addr) const
override;
uint32_t extractSet(const Addr addr, const uint32_t way) const;
public:
/** Convenience typedef. */
typedef SkewedAssocParams Params;
typedef SkewedAssociativeParams Params;
/**
* Construct and initialize this tag store.
* Construct and initialize this policy.
*/
SkewedAssoc(const Params *p);
SkewedAssociative(const Params *p);
/**
* Destructor
* Destructor.
*/
~SkewedAssoc() {};
~SkewedAssociative() {};
/**
* Finds the given address in the cache, do not update replacement data.
* i.e. This is a no-side-effect find of a block.
* Find all possible entries for insertion and replacement of an address.
* Should be called immediately before ReplacementPolicy's findVictim()
* not to break cache resizing.
*
* @param addr The address to find.
* @param is_secure True if the target memory space is secure.
* @return Pointer to the cache block if found.
* @param addr The addr to a find possible entries for.
* @return The possible entries.
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr) const
override;
/**
* Regenerate the block address from the tag, set and way. Uses the
* inverse of the skewing function.
* Regenerate an entry's address from its tag and assigned set and way.
* Uses the inverse of the skewing function.
*
* @param block The block.
* @return the block address.
* @param tag The tag bits.
* @param entry The entry.
* @return the entry's address.
*/
Addr regenerateBlkAddr(const CacheBlk* blk) const override;
Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry) const
override;
};
#endif //__MEM_CACHE_TAGS_SKEWED_ASSOC_HH__
#endif //__MEM_CACHE_INDEXING_POLICIES_SKEWED_ASSOCIATIVE_HH__

View File

@@ -30,7 +30,7 @@
/**
* @file
* Definitions of a base set associative sector tag store.
* Definitions of a sector tag store.
*/
#include "mem/cache/tags/sector_tags.hh"
@@ -45,28 +45,22 @@
#include "debug/CacheRepl.hh"
#include "mem/cache/base.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
SectorTags::SectorTags(const SectorTagsParams *p)
: BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
: BaseTags(p), allocAssoc(p->assoc),
sequentialAccess(p->sequential_access),
replacementPolicy(p->replacement_policy),
numBlocksPerSector(p->num_blocks_per_sector),
numSectors(numBlocks / p->num_blocks_per_sector),
numSets(numSectors / p->assoc),
blks(numBlocks), secBlks(numSectors), sets(numSets),
sectorShift(floorLog2(blkSize)),
setShift(sectorShift + floorLog2(numBlocksPerSector)),
tagShift(setShift + floorLog2(numSets)),
sectorMask(numBlocksPerSector - 1), setMask(numSets - 1)
numSectors(numBlocks / p->num_blocks_per_sector), blks(numBlocks),
secBlks(numSectors), sectorShift(floorLog2(blkSize)),
sectorMask(numBlocksPerSector - 1)
{
// Check parameters
fatal_if(blkSize < 4 || !isPowerOf2(blkSize),
"Block size must be at least 4 and a power of 2");
fatal_if(!isPowerOf2(numSets),
"# of sets must be non-zero and a power of 2");
fatal_if(!isPowerOf2(numBlocksPerSector),
"# of blocks per sector must be non-zero and a power of 2");
fatal_if(assoc <= 0, "associativity must be greater than zero");
}
void
@@ -75,54 +69,43 @@ SectorTags::init(BaseCache* cache)
// Set parent cache
setCache(cache);
// Initialize all sets
unsigned sec_blk_index = 0; // index into sector blks array
// Initialize all blocks
unsigned blk_index = 0; // index into blks array
for (unsigned i = 0; i < numSets; ++i) {
sets[i].resize(assoc);
for (unsigned sec_blk_index = 0; sec_blk_index < numSectors;
sec_blk_index++)
{
// Locate next cache sector
SectorBlk* sec_blk = &secBlks[sec_blk_index];
// Initialize all sectors in this set
for (unsigned j = 0; j < assoc; ++j) {
// Link block to indexing policy
indexingPolicy->setEntry(sec_blk, sec_blk_index);
// Associate a replacement data entry to the sector
sec_blk->replacementData = replacementPolicy->instantiateEntry();
// Initialize all blocks in this sector
sec_blk->blks.resize(numBlocksPerSector);
for (unsigned k = 0; k < numBlocksPerSector; ++k){
// Select block within the set to be linked
SectorBlk*& sec_blk = sets[i][j];
SectorSubBlk*& blk = sec_blk->blks[k];
// Locate next cache sector
sec_blk = &secBlks[sec_blk_index];
// Locate next cache block
blk = &blks[blk_index];
// Associate a replacement data entry to the sector
sec_blk->replacementData = replacementPolicy->instantiateEntry();
// Associate a data chunk to the block
blk->data = &dataBlks[blkSize*blk_index];
// Set its index
sec_blk->setPosition(i, j);
// Associate sector block to this block
blk->setSectorBlock(sec_blk);
// Initialize all blocks in this sector
sec_blk->blks.resize(numBlocksPerSector);
for (unsigned k = 0; k < numBlocksPerSector; ++k){
// Select block within the set to be linked
SectorSubBlk*& blk = sec_blk->blks[k];
// Associate the sector replacement data to this block
blk->replacementData = sec_blk->replacementData;
// Locate next cache block
blk = &blks[blk_index];
// Set its index and sector offset
blk->setSectorOffset(k);
// Associate a data chunk to the block
blk->data = &dataBlks[blkSize*blk_index];
// Associate sector block to this block
blk->setSectorBlock(sec_blk);
// Associate the sector replacement data to this block
blk->replacementData = sec_blk->replacementData;
// Set its index and sector offset
blk->setPosition(i, j);
blk->setSectorOffset(k);
// Update block index
++blk_index;
}
// Update sector block index
++sec_blk_index;
// Update block index
++blk_index;
}
}
}
@@ -196,16 +179,6 @@ SectorTags::accessBlock(Addr addr, bool is_secure, Cycles &lat)
return blk;
}
std::vector<ReplaceableEntry*>
SectorTags::getPossibleLocations(const Addr addr) const
{
std::vector<ReplaceableEntry*> locations;
for (const auto& blk : sets[extractSet(addr)]) {
locations.push_back(static_cast<ReplaceableEntry*>(blk));
}
return locations;
}
void
SectorTags::insertBlock(const Addr addr, const bool is_secure,
const int src_master_ID, const uint32_t task_ID,
@@ -243,12 +216,12 @@ SectorTags::findBlock(Addr addr, bool is_secure) const
// due to sectors being composed of contiguous-address entries
const Addr offset = extractSectorOffset(addr);
// Find all possible sector locations for the given address
const std::vector<ReplaceableEntry*> locations =
getPossibleLocations(addr);
// Find all possible sector entries that may contain the given address
const std::vector<ReplaceableEntry*> entries =
indexingPolicy->getPossibleEntries(addr);
// Search for block
for (const auto& sector : locations) {
for (const auto& sector : entries) {
auto blk = static_cast<SectorBlk*>(sector)->blks[offset];
if (blk->getTag() == tag && blk->isValid() &&
blk->isSecure() == is_secure) {
@@ -260,24 +233,18 @@ SectorTags::findBlock(Addr addr, bool is_secure) const
return nullptr;
}
ReplaceableEntry*
SectorTags::findBlockBySetAndWay(int set, int way) const
{
return sets[set][way];
}
CacheBlk*
SectorTags::findVictim(Addr addr, const bool is_secure,
std::vector<CacheBlk*>& evict_blks) const
{
// Get all possible locations of this sector
const std::vector<ReplaceableEntry*> sector_locations =
getPossibleLocations(addr);
// Get possible entries to be victimized
const std::vector<ReplaceableEntry*> sector_entries =
indexingPolicy->getPossibleEntries(addr);
// Check if the sector this address belongs to has been allocated
Addr tag = extractTag(addr);
SectorBlk* victim_sector = nullptr;
for (const auto& sector : sector_locations) {
for (const auto& sector : sector_entries) {
SectorBlk* sector_blk = static_cast<SectorBlk*>(sector);
if ((tag == sector_blk->getTag()) && sector_blk->isValid() &&
(is_secure == sector_blk->isSecure())){
@@ -290,10 +257,10 @@ SectorTags::findVictim(Addr addr, const bool is_secure,
if (victim_sector == nullptr){
// Choose replacement victim from replacement candidates
victim_sector = static_cast<SectorBlk*>(replacementPolicy->getVictim(
sector_locations));
sector_entries));
}
// Get the location of the victim block within the sector
// Get the entry of the victim block within the sector
SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
// Get evicted blocks. Blocks are only evicted if the sectors mismatch and
@@ -317,18 +284,6 @@ SectorTags::findVictim(Addr addr, const bool is_secure,
return victim;
}
Addr
SectorTags::extractTag(Addr addr) const
{
return addr >> tagShift;
}
int
SectorTags::extractSet(Addr addr) const
{
return (addr >> setShift) & setMask;
}
int
SectorTags::extractSectorOffset(Addr addr) const
{
@@ -339,9 +294,9 @@ Addr
SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
{
const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
const Addr set = blk_cast->getSectorBlock()->getSet() << setShift;
return ((blk_cast->getTag() << tagShift) | set |
((Addr)blk_cast->getSectorOffset() << sectorShift));
const SectorBlk* sec_blk = blk_cast->getSectorBlock();
const Addr sec_addr = indexingPolicy->regenerateAddr(blk->tag, sec_blk);
return sec_addr | ((Addr)blk_cast->getSectorOffset() << sectorShift);
}
void
@@ -366,5 +321,8 @@ SectorTags::anyBlk(std::function<bool(CacheBlk &)> visitor)
SectorTags *
SectorTagsParams::create()
{
// There must be a indexing policy
fatal_if(!indexing_policy, "An indexing policy is required");
return new SectorTags(this);
}

View File

@@ -30,7 +30,7 @@
/**
* @file
* Declaration of a sector set associative tag store.
* Declaration of a sector tag store.
*/
#ifndef __MEM_CACHE_TAGS_SECTOR_TAGS_HH__
@@ -58,11 +58,6 @@ class ReplaceableEntry;
class SectorTags : public BaseTags
{
protected:
/** Typedef the set type used in this tag store. */
typedef std::vector<SectorBlk*> SetType;
/** The associativity of the cache. */
const unsigned assoc;
/** The allocatable associativity of the cache (alloc mask). */
unsigned allocAssoc;
@@ -77,40 +72,19 @@ class SectorTags : public BaseTags
/** The number of sectors in the cache. */
const unsigned numSectors;
/** The number of sets in the cache. */
const unsigned numSets;
/** The cache blocks. */
std::vector<SectorSubBlk> blks;
/** The cache sector blocks. */
std::vector<SectorBlk> secBlks;
/** The cache sets. */
std::vector<SetType> sets;
// Organization of an address: Tag | Set # | Sector Offset # | Offset #
// Organization of an address:
// Tag | Placement Location | Sector Offset # | Offset #
/** The amount to shift the address to get the sector tag. */
const int sectorShift;
/** The amount to shift the address to get the set. */
const int setShift;
/** The amount to shift the address to get the tag. */
const int tagShift;
/** Mask out all bits that aren't part of the sector tag. */
const unsigned sectorMask;
/** Mask out all bits that aren't part of the set index. */
const unsigned setMask;
/**
* Find all possible block locations for insertion and replacement of
* an address. Should be called immediately before ReplacementPolicy's
* findVictim() not to break cache resizing.
* Returns sector blocks in all ways belonging to the set of the address.
*
* @param addr The addr to a find possible locations for.
* @return The possible locations.
*/
std::vector<ReplaceableEntry*> getPossibleLocations(const Addr addr) const
override;
public:
/** Convenience typedef. */
@@ -177,15 +151,6 @@ class SectorTags : public BaseTags
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
/**
* Find a sector block given set and way.
*
* @param set The set of the block.
* @param way The way of the block.
* @return The block.
*/
ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
/**
* Find replacement victim based on address.
*
@@ -197,22 +162,6 @@ class SectorTags : public BaseTags
CacheBlk* findVictim(Addr addr, const bool is_secure,
std::vector<CacheBlk*>& evict_blks) const override;
/**
* Generate the sector tag from the given address.
*
* @param addr The address to get the sector tag from.
* @return The sector tag of the address.
*/
Addr extractTag(Addr addr) const override;
/**
* Calculate the set index from the address.
*
* @param addr The address to get the set from.
* @return The set index of the address.
*/
int extractSet(Addr addr) const;
/**
* Calculate a block's offset in a sector from the address.
*
@@ -222,7 +171,7 @@ class SectorTags : public BaseTags
int extractSectorOffset(Addr addr) const;
/**
* Regenerate the block address from the tag and set.
* Regenerate the block address from the tag and location.
*
* @param block The block.
* @return the block address.