diff --git a/src/base/amo.hh b/src/base/amo.hh index 718b346bc4..44dec8b6a3 100644 --- a/src/base/amo.hh +++ b/src/base/amo.hh @@ -35,6 +35,7 @@ #include #include #include +#include struct AtomicOpFunctor { diff --git a/src/gpu-compute/cl_driver.cc b/src/gpu-compute/cl_driver.cc index d8a461874f..ee86017ec4 100644 --- a/src/gpu-compute/cl_driver.cc +++ b/src/gpu-compute/cl_driver.cc @@ -105,9 +105,6 @@ ClDriver::open(ThreadContext *tc, int mode, int flags) int ClDriver::ioctl(ThreadContext *tc, unsigned req, Addr buf_addr) { - int index = 2; - auto process = tc->getProcessPtr(); - switch (req) { case HSA_GET_SIZES: { diff --git a/src/gpu-compute/gpu_dyn_inst.hh b/src/gpu-compute/gpu_dyn_inst.hh index a4a6ffb931..bee08e3dfb 100644 --- a/src/gpu-compute/gpu_dyn_inst.hh +++ b/src/gpu-compute/gpu_dyn_inst.hh @@ -251,31 +251,31 @@ class GPUDynInst : public GPUExecContext // when true, call execContinuation when response arrives bool useContinuation; - template AtomicOpFunctor* + template AtomicOpFunctorPtr makeAtomicOpFunctor(c0 *reg0, c0 *reg1) { if (isAtomicAnd()) { - return new AtomicOpAnd(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicOr()) { - return new AtomicOpOr(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicXor()) { - return new AtomicOpXor(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicCAS()) { - return new AtomicOpCAS(*reg0, *reg1, cu); + return m5::make_unique>(*reg0, *reg1, cu); } else if (isAtomicExch()) { - return new AtomicOpExch(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicAdd()) { - return new AtomicOpAdd(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicSub()) { - return new AtomicOpSub(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicInc()) { - return new AtomicOpInc(); + return m5::make_unique>(); } else if (isAtomicDec()) { - return new AtomicOpDec(); + return m5::make_unique>(); } else if (isAtomicMax()) { - return new AtomicOpMax(*reg0); + return m5::make_unique>(*reg0); } else if (isAtomicMin()) { - return new AtomicOpMin(*reg0); + return m5::make_unique>(*reg0); } else { fatal("Unrecognized atomic operation"); } diff --git a/src/gpu-compute/gpu_tlb.cc b/src/gpu-compute/gpu_tlb.cc index 12fb9aa7e7..2ae40dadf2 100644 --- a/src/gpu-compute/gpu_tlb.cc +++ b/src/gpu-compute/gpu_tlb.cc @@ -43,6 +43,7 @@ #include "arch/x86/pagetable.hh" #include "arch/x86/pagetable_walker.hh" #include "arch/x86/regs/misc.hh" +#include "arch/x86/regs/msr.hh" #include "arch/x86/x86_traits.hh" #include "base/bitfield.hh" #include "base/logging.hh" @@ -426,7 +427,7 @@ namespace X86ISA // If this is true, we're dealing with a request // to a non-memory address space. if (seg == SEGMENT_REG_MS) { - return translateInt(mode == Read, req, tc); + return translateInt(mode == Mode::Read, req, tc); } delayedResponse = false; diff --git a/src/gpu-compute/gpu_tlb.hh b/src/gpu-compute/gpu_tlb.hh index 6ed4ba1196..dbd3a16f3c 100644 --- a/src/gpu-compute/gpu_tlb.hh +++ b/src/gpu-compute/gpu_tlb.hh @@ -175,7 +175,8 @@ namespace X86ISA */ std::vector entryList; - Fault translateInt(const RequestPtr &req, ThreadContext *tc); + Fault translateInt(bool read, const RequestPtr &req, + ThreadContext *tc); Fault translate(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, bool &delayedResponse,