arch,cpu,mem: Replace the mmmapped IPR mechanism with local accesses.

The new local access mechanism installs a callback in the request which
implements what the mmapped IPR was doing. That avoids having to have
stubs in ISAs that don't have mmapped IPRs, avoids having to encode
what to do to communicate from the TLB and the mmapped IPR functions,
and gets rid of another global ISA interface function and header files.

Jira Issue: https://gem5.atlassian.net/browse/GEM5-187

Change-Id: I772c2ae2ca3830a4486919ce9804560c0f2d596a
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/23188
Reviewed-by: Matthew Poremba <matthew.poremba@amd.com>
Maintainer: Gabe Black <gabeblack@google.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Gabe Black
2019-11-25 19:41:51 -08:00
parent 082ec1a9c7
commit ebd62eff3c
24 changed files with 186 additions and 819 deletions

View File

@@ -129,8 +129,7 @@ MSHR::TargetList::updateWriteFlags(PacketPtr pkt)
// strictly ordered)
const Request::FlagsType no_merge_flags =
Request::UNCACHEABLE | Request::STRICT_ORDER |
Request::MMAPPED_IPR | Request::PRIVILEGED |
Request::LLSC | Request::MEM_SWAP |
Request::PRIVILEGED | Request::LLSC | Request::MEM_SWAP |
Request::MEM_SWAP_COND | Request::SECURE;
const auto &req_flags = pkt->req->getFlags();
bool compat_write = !req_flags.isSet(no_merge_flags);

View File

@@ -76,7 +76,9 @@ namespace ContextSwitchTaskId {
};
}
class Packet;
class Request;
class ThreadContext;
typedef std::shared_ptr<Request> RequestPtr;
typedef uint16_t MasterID;
@@ -119,8 +121,6 @@ class Request
* the UNCACHEABLE flag is set as well.
*/
STRICT_ORDER = 0x00000800,
/** This request is to a memory mapped register. */
MMAPPED_IPR = 0x00002000,
/** This request is made in privileged mode. */
PRIVILEGED = 0x00008000,
@@ -247,6 +247,9 @@ class Request
ARG_SEGMENT = 0x00000800,
};
using LocalAccessor =
std::function<Cycles(ThreadContext *tc, Packet *pkt)>;
private:
typedef uint16_t PrivateFlagsType;
typedef ::Flags<PrivateFlagsType> PrivateFlags;
@@ -382,6 +385,8 @@ class Request
/** A pointer to an atomic operation */
AtomicOpFunctorPtr atomicOpFunctor;
LocalAccessor _localAccessor;
public:
/**
@@ -478,10 +483,10 @@ class Request
_taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
_extraData(other._extraData), _contextId(other._contextId),
_pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
_localAccessor(other._localAccessor),
translateDelta(other.translateDelta),
accessDelta(other.accessDelta), depth(other.depth)
{
atomicOpFunctor.reset(other.atomicOpFunctor ?
other.atomicOpFunctor->clone() : nullptr);
}
@@ -536,6 +541,7 @@ class Request
accessDelta = 0;
translateDelta = 0;
atomicOpFunctor = std::move(amo_op);
_localAccessor = nullptr;
}
/**
@@ -663,6 +669,17 @@ class Request
return _time;
}
/** Is this request for a local memory mapped resource/register? */
bool isLocalAccess() { return (bool)_localAccessor; }
/** Set the function which will enact that access. */
void setLocalAccessor(LocalAccessor acc) { _localAccessor = acc; }
/** Perform the installed local access. */
Cycles
localAccessor(ThreadContext *tc, Packet *pkt)
{
return _localAccessor(tc, pkt);
}
/**
* Accessor for atomic-op functor.
*/
@@ -895,7 +912,6 @@ class Request
bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
bool isSecure() const { return _flags.isSet(SECURE); }
bool isPTWalk() const { return _flags.isSet(PT_WALK); }
bool isAcquire() const { return _flags.isSet(ACQUIRE); }