if env['BUILD_GPU']:
gpu_isa_switch_hdrs = Split('''
gpu_decoder.hh
+ gpu_isa.hh
gpu_types.hh
''')
--- /dev/null
+/*
+ * Copyright (c) 2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Anthony Gutierrez
+ */
+
+#ifndef __ARCH_HSAIL_GPU_ISA_HH__
+#define __ARCH_HSAIL_GPU_ISA_HH__
+
+#include <cstdint>
+
+#include "base/misc.hh"
+#include "gpu-compute/misc.hh"
+
+class Wavefront;
+
+namespace HsailISA
+{
+ typedef uint64_t MiscReg;
+
+ class GPUISA
+ {
+ public:
+ GPUISA(Wavefront &wf) : wavefront(wf)
+ {
+ }
+
+ void
+ writeMiscReg(int opIdx, MiscReg operandVal)
+ {
+ fatal("HSAIL does not implement misc registers yet\n");
+ }
+
+ MiscReg
+ readMiscReg(int opIdx) const
+ {
+ fatal("HSAIL does not implement misc registers yet\n");
+ }
+
+ bool hasScalarUnit() const { return false; }
+
+ uint32_t
+ advancePC(uint32_t old_pc, GPUDynInstPtr gpuDynInst)
+ {
+ return old_pc + 1;
+ }
+
+ private:
+ Wavefront &wavefront;
+ };
+}
+
+#endif // __ARCH_HSAIL_GPU_ISA_HH__
FetchUnit::initiateFetch(Wavefront *wavefront)
{
// calculate the virtual address to fetch from the SQC
- Addr vaddr = wavefront->pc() + wavefront->instructionBuffer.size();
+ Addr vaddr = wavefront->pc();
+
+ /**
+ * the instruction buffer holds one instruction per entry, regardless
+ * of the underlying instruction's size. the PC, however, addresses
+ * instrutions on a 32b granularity so we must account for that here.
+ */
+ for (int i = 0; i < wavefront->instructionBuffer.size(); ++i) {
+ int current_inst_size =
+ wavefront->instructionBuffer.at(i)->staticInstruction()->instSize();
+ vaddr += current_inst_size / sizeof(uint32_t);
+ }
vaddr = wavefront->basePtr + vaddr * sizeof(GPUStaticInst*);
DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Initiating fetch translation: %#x\n",
GPUStaticInst *inst_ptr = decoder.decode(inst_index_ptr[i]);
assert(inst_ptr);
+
+ if (inst_ptr->instSize() == 8) {
+ /**
+ * this instruction occupies 2 consecutive
+ * entries in the instruction array, the
+ * second of which contains a nullptr. so if
+ * this inst is 8 bytes we advance two entries
+ * instead of 1
+ */
+ ++i;
+ }
+
DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: added %s\n",
computeUnit->cu_id, wavefront->simdId,
wavefront->wfSlotId, inst_ptr->disassemble());
*/
#include "gpu-compute/gpu_exec_context.hh"
+#include "gpu-compute/wavefront.hh"
GPUExecContext::GPUExecContext(ComputeUnit *_cu, Wavefront *_wf)
- : cu(_cu), wf(_wf)
+ : cu(_cu), wf(_wf), gpuISA(_wf->gpuISA())
{
}
{
return wf;
}
+
+TheGpuISA::MiscReg
+GPUExecContext::readMiscReg(int opIdx) const
+{
+ return gpuISA.readMiscReg(opIdx);
+}
+
+void
+GPUExecContext::writeMiscReg(int opIdx, TheGpuISA::MiscReg operandVal)
+{
+ gpuISA.writeMiscReg(opIdx, operandVal);
+}
#ifndef __GPU_EXEC_CONTEXT_HH__
#define __GPU_EXEC_CONTEXT_HH__
+#include "arch/gpu_isa.hh"
+#include "config/the_gpu_isa.hh"
+
class ComputeUnit;
class Wavefront;
Wavefront* wavefront();
ComputeUnit* computeUnit();
+ TheGpuISA::MiscReg readMiscReg(int opIdx) const;
+ void writeMiscReg(int opIdx, TheGpuISA::MiscReg operandVal);
+
protected:
ComputeUnit *cu;
Wavefront *wf;
+ TheGpuISA::GPUISA &gpuISA;
};
#endif // __GPU_EXEC_CONTEXT_HH__
}
Wavefront::Wavefront(const Params *p)
- : SimObject(p), callArgMem(nullptr)
+ : SimObject(p), callArgMem(nullptr), _gpuISA(*this)
{
lastTrace = 0;
simdId = p->simdId;
computeUnit->lastExecCycle[simdId]);
computeUnit->lastExecCycle[simdId] = computeUnit->totalCycles.value();
if (pc() == old_pc) {
- uint32_t new_pc = old_pc + 1;
+ uint32_t new_pc = _gpuISA.advancePC(old_pc, ii);
// PC not modified by instruction, proceed to next or pop frame
pc(new_pc);
if (new_pc == rpc()) {
#include <stack>
#include <vector>
+#include "arch/gpu_isa.hh"
#include "base/misc.hh"
#include "base/types.hh"
+#include "config/the_gpu_isa.hh"
#include "gpu-compute/condition_register_state.hh"
#include "gpu-compute/lds_state.hh"
#include "gpu-compute/misc.hh"
*/
void setContext(const void *in);
+ TheGpuISA::GPUISA&
+ gpuISA()
+ {
+ return _gpuISA;
+ }
+
private:
+ TheGpuISA::GPUISA _gpuISA;
/**
* Stack containing Control Flow Graph nodes (i.e., kernel instructions)
* to be visited by the wavefront, and the associated execution masks. The