misc: Rename Enums namespace as enums

As part of recent decisions regarding namespace
naming conventions, all namespaces will be changed
to snake case.

::Enums became ::enums.

Change-Id: I39b5fb48817ad16abbac92f6254284b37fc90c40
Signed-off-by: Daniel R. Carvalho <odanrc@yahoo.com.br>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/45420
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Daniel R. Carvalho
2021-05-06 16:18:58 -03:00
committed by Daniel Carvalho
parent 06fb0753fe
commit 4dd099ba3d
77 changed files with 384 additions and 381 deletions

View File

@@ -1401,7 +1401,7 @@ void
BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
{
if (from_cache && blk && blk->isValid() &&
!blk->isSet(CacheBlk::DirtyBit) && clusivity == Enums::mostly_excl) {
!blk->isSet(CacheBlk::DirtyBit) && clusivity == enums::mostly_excl) {
// if we have responded to a cache, and our block is still
// valid, but not dirty, and this cache is mostly exclusive
// with respect to the cache above, drop the block

View File

@@ -437,7 +437,7 @@ class BaseCache : public ClockedObject
*/
inline bool allocOnFill(MemCmd cmd) const
{
return clusivity == Enums::mostly_incl ||
return clusivity == enums::mostly_incl ||
cmd == MemCmd::WriteLineReq ||
cmd == MemCmd::ReadReq ||
cmd == MemCmd::WriteReq ||
@@ -929,7 +929,7 @@ class BaseCache : public ClockedObject
* fill into both this cache and the cache above on a miss. Note
* that we currently do not support strict clusivity policies.
*/
const Enums::Clusivity clusivity;
const enums::Clusivity clusivity;
/**
* Is this cache read only, for example the instruction cache, or

View File

@@ -527,7 +527,7 @@ Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
// * this cache is mostly exclusive and will not fill (since
// it does not fill it will have to writeback the dirty data
// immediately which generates uneccesary writebacks).
bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
bool force_clean_rsp = isReadOnly || clusivity == enums::mostly_excl;
cmd = needsWritable ? MemCmd::ReadExReq :
(force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
}

View File

@@ -568,7 +568,7 @@ MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
} else {
DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
}
} else if (memSchedPolicy == Enums::fcfs) {
} else if (memSchedPolicy == enums::fcfs) {
// check if there is a packet going to a free rank
for (auto i = queue.begin(); i != queue.end(); ++i) {
MemPacket* mem_pkt = *i;
@@ -577,7 +577,7 @@ MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
break;
}
}
} else if (memSchedPolicy == Enums::frfcfs) {
} else if (memSchedPolicy == enums::frfcfs) {
ret = chooseNextFRFCFS(queue, extra_col_delay);
} else {
panic("No scheduling policy chosen\n");

View File

@@ -488,7 +488,7 @@ class MemCtrl : public qos::MemCtrl
* Memory controller configuration initialized based on parameter
* values.
*/
Enums::MemSched memSchedPolicy;
enums::MemSched memSchedPolicy;
/**
* Pipeline latency of the controller frontend. The frontend

View File

@@ -101,7 +101,7 @@ MemInterface::decodePacket(const PacketPtr pkt, Addr pkt_addr,
// we have removed the lowest order address bits that denote the
// position within the column
if (addrMapping == Enums::RoRaBaChCo || addrMapping == Enums::RoRaBaCoCh) {
if (addrMapping == enums::RoRaBaChCo || addrMapping == enums::RoRaBaCoCh) {
// the lowest order bits denote the column to ensure that
// sequential cache lines occupy the same row
addr = addr / burstsPerRowBuffer;
@@ -118,7 +118,7 @@ MemInterface::decodePacket(const PacketPtr pkt, Addr pkt_addr,
// lastly, get the row bits, no need to remove them from addr
row = addr % rowsPerBank;
} else if (addrMapping == Enums::RoCoRaBaCh) {
} else if (addrMapping == enums::RoCoRaBaCh) {
// with emerging technologies, could have small page size with
// interleaving granularity greater than row buffer
if (burstsPerStripe > burstsPerRowBuffer) {
@@ -592,14 +592,14 @@ DRAMInterface::doBurstAccess(MemPacket* mem_pkt, Tick next_burst_at,
++bank_ref.rowAccesses;
// if we reached the max, then issue with an auto-precharge
bool auto_precharge = pageMgmt == Enums::close ||
bool auto_precharge = pageMgmt == enums::close ||
bank_ref.rowAccesses == maxAccessesPerRow;
// if we did not hit the limit, we might still want to
// auto-precharge
if (!auto_precharge &&
(pageMgmt == Enums::open_adaptive ||
pageMgmt == Enums::close_adaptive)) {
(pageMgmt == enums::open_adaptive ||
pageMgmt == enums::close_adaptive)) {
// a twist on the open and close page policies:
// 1) open_adaptive page policy does not blindly keep the
// page open, but close it if there are no row hits, and there
@@ -642,7 +642,7 @@ DRAMInterface::doBurstAccess(MemPacket* mem_pkt, Tick next_burst_at,
// have a bank conflict
// 2) close_adaptive policy and we have not got any more hits
auto_precharge = !got_more_hits &&
(got_bank_conflict || pageMgmt == Enums::close_adaptive);
(got_bank_conflict || pageMgmt == enums::close_adaptive);
}
// DRAMPower trace command to be written
@@ -841,13 +841,13 @@ DRAMInterface::init()
// a bit of sanity checks on the interleaving, save it for here to
// ensure that the system pointer is initialised
if (range.interleaved()) {
if (addrMapping == Enums::RoRaBaChCo) {
if (addrMapping == enums::RoRaBaChCo) {
if (rowBufferSize != range.granularity()) {
fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
"address map\n", name());
}
} else if (addrMapping == Enums::RoRaBaCoCh ||
addrMapping == Enums::RoCoRaBaCh) {
} else if (addrMapping == enums::RoRaBaCoCh ||
addrMapping == enums::RoCoRaBaCh) {
// for the interleavings with channel bits in the bottom,
// if the system uses a channel striping granularity that
// is larger than the DRAM burst size, then map the

View File

@@ -120,7 +120,7 @@ class MemInterface : public AbstractMemory
* Memory controller configuration initialized based on parameter
* values.
*/
Enums::AddrMap addrMapping;
enums::AddrMap addrMapping;
/**
* General device and channel characteristics
@@ -752,7 +752,7 @@ class DRAMInterface : public MemInterface
const Tick rdToWrDlySameBG;
Enums::PageManage pageMgmt;
enums::PageManage pageMgmt;
/**
* Max column accesses (read and write) per row, before forefully
* closing it.

View File

@@ -54,11 +54,11 @@ QueuePolicy*
QueuePolicy::create(const QoSMemCtrlParams &p)
{
switch (p.qos_q_policy) {
case Enums::QoSQPolicy::fifo:
case enums::QoSQPolicy::fifo:
return new FifoQueuePolicy(p);
case Enums::QoSQPolicy::lrg:
case enums::QoSQPolicy::lrg:
return new LrgQueuePolicy(p);
case Enums::QoSQPolicy::lifo:
case enums::QoSQPolicy::lifo:
default:
return new LifoQueuePolicy(p);
}

View File

@@ -51,10 +51,10 @@ NetworkBridge::NetworkBridge(const Params &p)
lastScheduledAt = 0;
nLink = p.link;
if (mType == Enums::LINK_OBJECT) {
if (mType == enums::LINK_OBJECT) {
nLink->setLinkConsumer(this);
setSourceQueue(nLink->getBuffer(), nLink);
} else if (mType == Enums::OBJECT_LINK) {
} else if (mType == enums::OBJECT_LINK) {
nLink->setSourceQueue(&linkBuffer, this);
setLinkConsumer(nLink);
} else {
@@ -122,7 +122,7 @@ NetworkBridge::flitisizeAndSend(flit *t_flit)
// Calculate the target-width
int target_width = bitWidth;
int cur_width = nLink->bitWidth;
if (mType == Enums::OBJECT_LINK) {
if (mType == enums::OBJECT_LINK) {
target_width = nLink->bitWidth;
cur_width = bitWidth;
}