base,arch-hsail: Fix GPU build

The GPU build is currently broken due to recent changes. This fixes
the build after changes to local access, removal of getSyscallArg,
and creating of AMO header in base.

Change-Id: I43506f6fb0a92a61a50ecb9efa7ee279ecb21d98
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/27136
Reviewed-by: Anthony Gutierrez <anthony.gutierrez@amd.com>
Reviewed-by: Bradford Beckmann <brad.beckmann@amd.com>
Maintainer: Anthony Gutierrez <anthony.gutierrez@amd.com>
Tested-by: Gem5 Cloud Project GCB service account <345032938727@cloudbuild.gserviceaccount.com>
This commit is contained in:
Matthew Poremba
2020-03-26 18:22:27 -05:00
parent 7e303da76f
commit 64134b6e66
5 changed files with 17 additions and 17 deletions

View File

@@ -35,6 +35,7 @@
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
struct AtomicOpFunctor
{

View File

@@ -105,9 +105,6 @@ ClDriver::open(ThreadContext *tc, int mode, int flags)
int
ClDriver::ioctl(ThreadContext *tc, unsigned req, Addr buf_addr)
{
int index = 2;
auto process = tc->getProcessPtr();
switch (req) {
case HSA_GET_SIZES:
{

View File

@@ -251,31 +251,31 @@ class GPUDynInst : public GPUExecContext
// when true, call execContinuation when response arrives
bool useContinuation;
template<typename c0> AtomicOpFunctor*
template<typename c0> AtomicOpFunctorPtr
makeAtomicOpFunctor(c0 *reg0, c0 *reg1)
{
if (isAtomicAnd()) {
return new AtomicOpAnd<c0>(*reg0);
return m5::make_unique<AtomicOpAnd<c0>>(*reg0);
} else if (isAtomicOr()) {
return new AtomicOpOr<c0>(*reg0);
return m5::make_unique<AtomicOpOr<c0>>(*reg0);
} else if (isAtomicXor()) {
return new AtomicOpXor<c0>(*reg0);
return m5::make_unique<AtomicOpXor<c0>>(*reg0);
} else if (isAtomicCAS()) {
return new AtomicOpCAS<c0>(*reg0, *reg1, cu);
return m5::make_unique<AtomicOpCAS<c0>>(*reg0, *reg1, cu);
} else if (isAtomicExch()) {
return new AtomicOpExch<c0>(*reg0);
return m5::make_unique<AtomicOpExch<c0>>(*reg0);
} else if (isAtomicAdd()) {
return new AtomicOpAdd<c0>(*reg0);
return m5::make_unique<AtomicOpAdd<c0>>(*reg0);
} else if (isAtomicSub()) {
return new AtomicOpSub<c0>(*reg0);
return m5::make_unique<AtomicOpSub<c0>>(*reg0);
} else if (isAtomicInc()) {
return new AtomicOpInc<c0>();
return m5::make_unique<AtomicOpInc<c0>>();
} else if (isAtomicDec()) {
return new AtomicOpDec<c0>();
return m5::make_unique<AtomicOpDec<c0>>();
} else if (isAtomicMax()) {
return new AtomicOpMax<c0>(*reg0);
return m5::make_unique<AtomicOpMax<c0>>(*reg0);
} else if (isAtomicMin()) {
return new AtomicOpMin<c0>(*reg0);
return m5::make_unique<AtomicOpMin<c0>>(*reg0);
} else {
fatal("Unrecognized atomic operation");
}

View File

@@ -43,6 +43,7 @@
#include "arch/x86/pagetable.hh"
#include "arch/x86/pagetable_walker.hh"
#include "arch/x86/regs/misc.hh"
#include "arch/x86/regs/msr.hh"
#include "arch/x86/x86_traits.hh"
#include "base/bitfield.hh"
#include "base/logging.hh"
@@ -426,7 +427,7 @@ namespace X86ISA
// If this is true, we're dealing with a request
// to a non-memory address space.
if (seg == SEGMENT_REG_MS) {
return translateInt(mode == Read, req, tc);
return translateInt(mode == Mode::Read, req, tc);
}
delayedResponse = false;

View File

@@ -175,7 +175,8 @@ namespace X86ISA
*/
std::vector<EntryList> entryList;
Fault translateInt(const RequestPtr &req, ThreadContext *tc);
Fault translateInt(bool read, const RequestPtr &req,
ThreadContext *tc);
Fault translate(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, bool &delayedResponse,