mem-cache: Handle zero sizes on compression
The size can be zero in special occasions, which would generate divisions by zero. This patch expands the stats to support them. It also fixes the compression factor calculation in the Multi compressor. As a side effect, now that zero sizes are handled, allow the Zero compressor to generate it. Change-Id: I9f7dee76576b09fdc9bef3e1f3f89be3726dcbd9 Signed-off-by: Daniel R. Carvalho <odanrc@yahoo.com.br> Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/33383 Reviewed-by: Nikos Nikoleris <nikos.nikoleris@arm.com> Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com> Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
committed by
Daniel Carvalho
parent
58c7fc72d3
commit
53ef7c1e6c
16
src/mem/cache/compressors/base.cc
vendored
16
src/mem/cache/compressors/base.cc
vendored
@@ -153,7 +153,11 @@ Base::compress(const uint64_t* data, Cycles& comp_lat, Cycles& decomp_lat)
|
||||
// Update stats
|
||||
stats.compressions++;
|
||||
stats.compressionSizeBits += comp_size_bits;
|
||||
stats.compressionSize[std::ceil(std::log2(comp_size_bits))]++;
|
||||
if (comp_size_bits != 0) {
|
||||
stats.compressionSize[1 + std::ceil(std::log2(comp_size_bits))]++;
|
||||
} else {
|
||||
stats.compressionSize[0]++;
|
||||
}
|
||||
|
||||
// Print debug information
|
||||
DPRINTF(CacheComp, "Compressed cache line from %d to %d bits. " \
|
||||
@@ -221,11 +225,15 @@ Base::BaseStats::regStats()
|
||||
{
|
||||
Stats::Group::regStats();
|
||||
|
||||
compressionSize.init(std::log2(compressor.blkSize*8) + 1);
|
||||
// Values comprised are {0, 1, 2, 4, ..., blkSize}
|
||||
compressionSize.init(std::log2(compressor.blkSize*8) + 2);
|
||||
compressionSize.subname(0, "0");
|
||||
compressionSize.subdesc(0,
|
||||
"Number of blocks that compressed to fit in 0 bits");
|
||||
for (unsigned i = 0; i <= std::log2(compressor.blkSize*8); ++i) {
|
||||
std::string str_i = std::to_string(1 << i);
|
||||
compressionSize.subname(i, str_i);
|
||||
compressionSize.subdesc(i,
|
||||
compressionSize.subname(1+i, str_i);
|
||||
compressionSize.subdesc(1+i,
|
||||
"Number of blocks that compressed to fit in " + str_i + " bits");
|
||||
}
|
||||
|
||||
|
||||
11
src/mem/cache/compressors/multi.cc
vendored
11
src/mem/cache/compressors/multi.cc
vendored
@@ -94,9 +94,16 @@ Multi::compress(const std::vector<Chunk>& chunks, Cycles& comp_lat,
|
||||
const std::size_t size = compData->getSize();
|
||||
// If the compressed size is worse than the uncompressed size,
|
||||
// we assume the size is the uncompressed size, and thus the
|
||||
// compression factor is 1
|
||||
// compression factor is 1.
|
||||
//
|
||||
// Some compressors (notably the zero compressor) may rely on
|
||||
// extra information being stored in the tags, or added in
|
||||
// another compression layer. Their size can be 0, so it is
|
||||
// assigned the highest possible compression factor (the original
|
||||
// block's size).
|
||||
compressionFactor = (size > blk_size) ? 1 :
|
||||
alignToPowerOfTwo(std::floor(blk_size / (double) size));
|
||||
((size == 0) ? blk_size :
|
||||
alignToPowerOfTwo(std::floor(blk_size / (double) size)));
|
||||
}
|
||||
};
|
||||
struct ResultsComparator
|
||||
|
||||
4
src/mem/cache/compressors/zero.hh
vendored
4
src/mem/cache/compressors/zero.hh
vendored
@@ -106,7 +106,7 @@ class Zero::PatternX
|
||||
{
|
||||
public:
|
||||
PatternX(const DictionaryEntry bytes, const int match_location)
|
||||
: DictionaryCompressor::UncompressedPattern(X, 0, 1, match_location,
|
||||
: DictionaryCompressor::UncompressedPattern(X, 0, 0, match_location,
|
||||
bytes)
|
||||
{
|
||||
}
|
||||
@@ -118,7 +118,7 @@ class Zero::PatternZ
|
||||
public:
|
||||
PatternZ(const DictionaryEntry bytes, const int match_location)
|
||||
: DictionaryCompressor::MaskedValuePattern<0, 0xFFFFFFFFFFFFFFFF>(
|
||||
Z, 1, 1, match_location, bytes)
|
||||
Z, 1, 0, match_location, bytes)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user