#include <cstdint>
+#include "arch/hsail/gpu_types.hh"
#include "base/misc.hh"
#include "gpu-compute/misc.hh"
uint32_t
advancePC(uint32_t old_pc, GPUDynInstPtr gpuDynInst)
{
- return old_pc + 1;
+ return old_pc + sizeof(RawMachInst);
}
private:
// our model uses to represent an actual instruction. In
// the case of HSAIL this is just an index into a list of
// instruction objects.
- typedef uint64_t RawMachInst;
+ typedef uint32_t RawMachInst;
// The MachInst is a representation of an instruction
// that has more information than just the machine code.
{
Wavefront *w = gpuDynInst->wavefront();
- const uint32_t curr_pc = w->pc();
+ const uint32_t curr_pc M5_VAR_USED = w->pc();
const uint32_t curr_rpc = w->rpc();
const VectorMask curr_mask = w->execMask();
}
// not taken branch
- const uint32_t false_pc = curr_pc + 1;
+ const uint32_t false_pc = nextInstAddr();
assert(true_pc != false_pc);
if (false_pc != rpc && true_mask.count() < curr_mask.count()) {
VectorMask false_mask = curr_mask & ~true_mask;
* Defines the base class representing HSAIL GPU static instructions.
*/
+#include "arch/hsail/gpu_types.hh"
#include "gpu-compute/gpu_static_inst.hh"
class BrigObject;
public:
HsailGPUStaticInst(const BrigObject *obj, const std::string &opcode);
void generateDisassembly();
- uint32_t instSize() { return 4; }
+ int instSize() const override { return sizeof(RawMachInst); }
bool isValid() const override { return true; }
protected:
kernelInfo[i].code_offs = code_offs;
name_offs += k->name().size() + 1;
- code_offs += k->numInsts() * sizeof(GPUStaticInst*);
+ code_offs += k->numInsts() * sizeof(TheGpuISA::RawMachInst);
}
}
HsaCode *k = kernels[i];
// add one for terminating '\0'
sizes->string_table_size += k->name().size() + 1;
- sizes->code_size += k->numInsts() * sizeof(GPUStaticInst*);
+ sizes->code_size +=
+ k->numInsts() * sizeof(TheGpuISA::RawMachInst);
}
sizes.copyOut(tc->getMemProxy());
* instrutions on a 32b granularity so we must account for that here.
*/
for (int i = 0; i < wavefront->instructionBuffer.size(); ++i) {
- int current_inst_size =
+ vaddr +=
wavefront->instructionBuffer.at(i)->staticInstruction()->instSize();
- vaddr += current_inst_size / sizeof(uint32_t);
}
- vaddr = wavefront->basePtr + vaddr * sizeof(GPUStaticInst*);
+ vaddr = wavefront->basePtr + vaddr;
DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Initiating fetch translation: %#x\n",
computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, vaddr);
GPUStaticInst::GPUStaticInst(const std::string &opcode)
: executed_as(Enums::SC_NONE), opcode(opcode),
- _instNum(0)
+ _instNum(0), _instAddr(0)
{
setFlag(NoOrder);
}
{
public:
GPUStaticInst(const std::string &opcode);
+ void instAddr(int inst_addr) { _instAddr = inst_addr; }
+ int instAddr() const { return _instAddr; }
+ int nextInstAddr() const { return _instAddr + instSize(); }
void instNum(int num) { _instNum = num; }
bool isGloballyCoherent() const { return _flags[GloballyCoherent]; }
bool isSystemCoherent() const { return _flags[SystemCoherent]; }
- virtual uint32_t instSize() = 0;
+ virtual int instSize() const = 0;
// only used for memory instructions
virtual void
const std::string opcode;
std::string disassembly;
int _instNum;
+ int _instAddr;
/**
* Identifier of the immediate post-dominator instruction.
*/
int numDstRegOperands() { return 0; }
int numSrcRegOperands() { return 0; }
bool isValid() const { return true; }
- uint32_t instSize() { return 0; }
+ int instSize() const override { return 0; }
};
#endif // __GPU_STATIC_INST_HH__
const BrigBase *endPtr =
obj->getCodeSectionEntry(code_dir->nextModuleEntry);
+ // the instruction's byte address (relative to the base addr
+ // of the code section)
+ int inst_addr = 0;
+ // the index that points to the instruction in the instruction
+ // array
int inst_idx = 0;
std::vector<GPUStaticInst*> instructions;
int funcarg_size_scope = 0;
"kind_label, label is: %s \n",
obj->getString(lbl->name));
- labelMap.addLabel(lbl, inst_idx, obj);
+ labelMap.addLabel(lbl, inst_addr, obj);
}
break;
if (iptr) {
DPRINTF(HSAILObject, "Initializing code, processing inst "
- "#%d idx %d: OPCODE=%d\n",
- inst_idx, _insts.size(), instPtr->opcode);
+ "byte addr #%d idx %d: OPCODE=%d\n", inst_addr,
+ inst_idx, instPtr->opcode);
- TheGpuISA::RawMachInst inst_num = decoder.saveInst(iptr);
+ TheGpuISA::RawMachInst raw_inst = decoder.saveInst(iptr);
iptr->instNum(inst_idx);
- _insts.push_back(inst_num);
+ iptr->instAddr(inst_addr);
+ _insts.push_back(raw_inst);
instructions.push_back(iptr);
}
+ inst_addr += sizeof(TheGpuISA::RawMachInst);
++inst_idx;
} else if (entryPtr->kind >= BRIG_KIND_OPERAND_BEGIN &&
entryPtr->kind < BRIG_KIND_OPERAND_END) {
}
BasicBlock*
-ControlFlowInfo::basicBlock(int inst_num) const {
+ControlFlowInfo::basicBlock(int inst_addr) const {
for (auto& block: basicBlocks) {
- int first_block_id = block->firstInstruction->instNum();
- if (inst_num >= first_block_id &&
- inst_num < first_block_id + block->size) {
+ int first_block_addr = block->firstInstruction->instAddr();
+ if (inst_addr >= first_block_addr && inst_addr <
+ first_block_addr + block->size * sizeof(TheGpuISA::RawMachInst)) {
return block.get();
}
}
std::set<int> leaders;
// first instruction is a leader
leaders.insert(0);
- for (int i = 1; i < instructions.size(); i++) {
- GPUStaticInst* instruction = instructions[i];
+ for (const auto &instruction : instructions) {
if (instruction->isBranch()) {
const int target_pc = instruction->getTargetPc();
leaders.insert(target_pc);
- leaders.insert(i + 1);
+ leaders.insert(instruction->nextInstAddr());
}
}
size_t block_size = 0;
- for (int i = 0; i < instructions.size(); i++) {
- if (leaders.find(i) != leaders.end()) {
+ for (const auto &instruction : instructions) {
+ if (leaders.find(instruction->instAddr()) != leaders.end()) {
uint32_t id = basicBlocks.size();
if (id > 0) {
basicBlocks.back()->size = block_size;
}
block_size = 0;
- basicBlocks.emplace_back(new BasicBlock(id, instructions[i]));
+ basicBlocks.emplace_back(new BasicBlock(id, instruction));
}
block_size++;
}
// Unconditional jump instructions have a unique successor
if (!last->isUnconditionalJump()) {
- BasicBlock* next_bb = basicBlock(last->instNum() + 1);
+ BasicBlock* next_bb = basicBlock(last->nextInstAddr());
bb->successorIds.insert(next_bb->id);
}
}
BasicBlock* ipd_block = basicBlocks[*(candidates.begin())].get();
if (!ipd_block->isExit()) {
GPUStaticInst* ipd_first_inst = ipd_block->firstInstruction;
- last_instruction->ipdInstNum(ipd_first_inst->instNum());
+ last_instruction->ipdInstNum(ipd_first_inst->instAddr());
} else {
- last_instruction->ipdInstNum(last_instruction->instNum() + 1);
+ last_instruction->ipdInstNum(last_instruction->nextInstAddr());
}
}
}
ControlFlowInfo::printBasicBlocks() const
{
for (GPUStaticInst* inst : instructions) {
- int inst_num = inst->instNum();
- std::cout << inst_num << " [" << basicBlock(inst_num)->id
+ int inst_addr = inst->instAddr();
+ std::cout << inst_addr << " [" << basicBlock(inst_addr)->id
<< "]: " << inst->disassemble();
if (inst->isBranch()) {
std::cout << ", PC = " << inst->getTargetPc();
GPUStaticInst* lastInstruction(const BasicBlock* block) const;
- BasicBlock* basicBlock(int inst_num) const;
+ BasicBlock* basicBlock(int inst_addr) const;
BasicBlock* postDominator(const BasicBlock* block) const;