mem-cache: Use RP for data expansion victimization

When searching for victims of a data expansion a simple approach to
make room for the expanded block is to evict every co-allocatable
block. This, however, ignores replacement policies and tends to be
inefficient. Besides, some cache compaction policies do not allow
blocks that changed their compression ratio to be allocated in the
same location (e.g., Skewed Compressed Caches), so they must be
moved elsewhere.

The replacement policy approach asks the replacement policy which
block(s) would be the best to evict in order to make room for the
expanded block. The other approach, on the other hand, simply evicts
all co-allocated entries. In the case the replacement policy selects
the superblock of the block being expanded, we must make sure the
latter is not evicted/moved by mistake.

This patch also allows the user to select which approach they would
like to use.

Change-Id: Iae57cf26dac7218c51ff0169a5cfcf3d6f8ea28a
Signed-off-by: Daniel R. Carvalho <odanrc@yahoo.com.br>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/36577
Reviewed-by: Nikos Nikoleris <nikos.nikoleris@arm.com>
Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Daniel R. Carvalho
2019-06-07 15:36:11 +02:00
committed by Daniel Carvalho
parent f415f27414
commit a2b7f9544c
5 changed files with 59 additions and 17 deletions

View File

@@ -103,6 +103,8 @@ class BaseCache(ClockedObject):
"Replacement policy")
compressor = Param.BaseCacheCompressor(NULL, "Cache compressor.")
replace_expansions = Param.Bool(True, "Apply replacement policy to " \
"decide which blocks should be evicted on a data expansion")
sequential_access = Param.Bool(False,
"Whether to access tags and data sequentially")

60
src/mem/cache/base.cc vendored
View File

@@ -99,6 +99,7 @@ BaseCache::BaseCache(const BaseCacheParams &p, unsigned blk_size)
forwardSnoops(true),
clusivity(p.clusivity),
isReadOnly(p.is_read_only),
replaceExpansions(p.replace_expansions),
blocked(0),
order(0),
noTargetMSHR(nullptr),
@@ -831,7 +832,7 @@ BaseCache::handleEvictions(std::vector<CacheBlk*> &evict_blks,
}
bool
BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
BaseCache::updateCompressionData(CacheBlk *&blk, const uint64_t* data,
PacketList &writebacks)
{
// tempBlock does not exist in the tags, so don't do anything for it.
@@ -839,11 +840,6 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
return true;
}
// Get superblock of the given block
CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
const SuperBlk* superblock = static_cast<const SuperBlk*>(
compression_blk->getSectorBlock());
// The compressor is called to compress the updated data, so that its
// metadata can be updated.
Cycles compression_lat = Cycles(0);
@@ -857,24 +853,53 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
// the bigger block
// Get previous compressed size
CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
// Check if new data is co-allocatable
const SuperBlk* superblock =
static_cast<const SuperBlk*>(compression_blk->getSectorBlock());
const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
superblock->canCoAllocate(compression_size);
// If block was compressed, possibly co-allocated with other blocks, and
// cannot be co-allocated anymore, one or more blocks must be evicted to
// make room for the expanded block. As of now we decide to evict the co-
// allocated blocks to make room for the expansion, but other approaches
// that take the replacement data of the superblock into account may
// generate better results
// make room for the expanded block
const bool was_compressed = compression_blk->isCompressed();
if (was_compressed && !is_co_allocatable) {
std::vector<CacheBlk*> evict_blks;
for (const auto& sub_blk : superblock->blks) {
if (sub_blk->isValid() && (compression_blk != sub_blk)) {
evict_blks.push_back(sub_blk);
bool victim_itself = false;
CacheBlk *victim = nullptr;
if (replaceExpansions) {
victim = tags->findVictim(regenerateBlkAddr(blk),
blk->isSecure(), compression_size, evict_blks);
// It is valid to return nullptr if there is no victim
if (!victim) {
return false;
}
// If the victim block is itself the block won't need to be moved,
// and the victim should not be evicted
if (blk == victim) {
victim_itself = true;
auto it = std::find_if(evict_blks.begin(), evict_blks.end(),
[&blk](CacheBlk* evict_blk){ return evict_blk == blk; });
evict_blks.erase(it);
}
// Print victim block's information
DPRINTF(CacheRepl, "Data expansion replacement victim: %s\n",
victim->print());
} else {
// If we do not move the expanded block, we must make room for
// the expansion to happen, so evict every co-allocated block
superblock = static_cast<const SuperBlk*>(
compression_blk->getSectorBlock());
for (auto& sub_blk : superblock->blks) {
if (sub_blk->isValid() && (blk != sub_blk)) {
evict_blks.push_back(sub_blk);
}
}
}
@@ -885,9 +910,16 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
// Update the number of data expansions
stats.dataExpansions++;
DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits"
"\n", blk->print(), prev_size, compression_size);
if (!victim_itself && replaceExpansions) {
// Move the block's contents to the invalid block so that it now
// co-allocates with the other existing superblock entry
tags->moveBlock(blk, victim);
blk = victim;
compression_blk = static_cast<CompressionBlk*>(blk);
}
}
// We always store compressed blocks when possible

10
src/mem/cache/base.hh vendored
View File

@@ -677,7 +677,7 @@ class BaseCache : public ClockedObject
* @param writebacks List for any writebacks that need to be performed.
* @return Whether operation is successful or not.
*/
bool updateCompressionData(CacheBlk *blk, const uint64_t* data,
bool updateCompressionData(CacheBlk *&blk, const uint64_t* data,
PacketList &writebacks);
/**
@@ -893,6 +893,14 @@ class BaseCache : public ClockedObject
*/
const bool isReadOnly;
/**
* when a data expansion of a compressed block happens it will not be
* able to co-allocate where it is at anymore. If true, the replacement
* policy is called to chose a new location for the block. Otherwise,
* all co-allocated blocks are evicted.
*/
const bool replaceExpansions;
/**
* Bit vector of the blocking reasons for the access path.
* @sa #BlockedCause

View File

@@ -45,7 +45,7 @@ SectorSubBlk::setSectorBlock(SectorBlk* sector_blk)
_sectorBlk = sector_blk;
}
const SectorBlk*
SectorBlk*
SectorSubBlk::getSectorBlock() const
{
return _sectorBlk;

View File

@@ -86,7 +86,7 @@ class SectorSubBlk : public CacheBlk
*
* @return The sector block pointer.
*/
const SectorBlk* getSectorBlock() const;
SectorBlk* getSectorBlock() const;
/**
* Set offset of this sub-block within the sector.