+//===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The AMDGPUAsmPrinter is used to print both assembly string and also binary
+// code. When passed an MCAsmStreamer it prints assembly and when passed
+// an MCObjectStreamer it outputs binary code.
+//
+//===----------------------------------------------------------------------===//
+//
+
#include "AMDGPUAsmPrinter.h"
#include "AMDGPU.h"
+//===-- AMDGPUAsmPrinter.h - Print AMDGPU assembly code -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AMDGPU Assembly printer class.
+//
+//===----------------------------------------------------------------------===//
#ifndef AMDGPU_ASMPRINTER_H
#define AMDGPU_ASMPRINTER_H
+//===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower AMDGPU MachineInstrs to their corresponding
+// MCInst.
+//
+//===----------------------------------------------------------------------===//
+//
#include "AMDGPUMCInstLower.h"
#include "AMDGPUAsmPrinter.h"
AMDGPUMCInstLower::AMDGPUMCInstLower() { }
-void AMDGPUMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
for (unsigned i = 0, e = MI->getNumExplicitOperands(); i != e; ++i) {
while (I != MBB->end() && I->isInsideBundle()) {
MCInst MCBundleInst;
const MachineInstr *BundledInst = I;
- MCInstLowering.Lower(BundledInst, MCBundleInst);
+ MCInstLowering.lower(BundledInst, MCBundleInst);
OutStreamer.EmitInstruction(MCBundleInst);
++I;
}
} else {
MCInst TmpInst;
- MCInstLowering.Lower(MI, TmpInst);
+ MCInstLowering.lower(MI, TmpInst);
OutStreamer.EmitInstruction(TmpInst);
}
}
+//===- AMDGPUMCInstLower.h MachineInstr Lowering Interface ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
#ifndef AMDGPU_MCINSTLOWER_H
#define AMDGPU_MCINSTLOWER_H
public:
AMDGPUMCInstLower();
- /// Lower - Lower a MachineInstr to an MCInst
- void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+ /// lower - Lower a MachineInstr to an MCInst
+ void lower(const MachineInstr *MI, MCInst &OutMI) const;
};
+//===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AMDGPU specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
#include "AMDGPUSubtarget.h"
-//===-- MCTargetDesc/AMDGPUMCAsmInfo.cpp - TODO: Add brief description -------===//
+//===-- MCTargetDesc/AMDGPUMCAsmInfo.cpp - Assembly Info ------------------===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// TODO: Add full description
-//
-//===----------------------------------------------------------------------===//
#include "AMDGPUMCAsmInfo.h"
-#ifndef NULL
-#define NULL 0
-#endif
using namespace llvm;
AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo()
{
HasSingleParameterDotFile = false;
- WeakDefDirective = NULL;
+ WeakDefDirective = 0;
//===------------------------------------------------------------------===//
HasSubsectionsViaSymbols = true;
HasMachoZeroFillDirective = false;
Data16bitsDirective = ".short\t";
Data32bitsDirective = ".long\t";
Data64bitsDirective = ".quad\t";
- GPRel32Directive = NULL;
+ GPRel32Directive = 0;
SunStyleELFSectionSwitchSyntax = true;
UsesELFSectionDirectiveForBSS = true;
HasMicrosoftFastStdCallMangling = false;
HasNoDeadStrip = true;
HasSymbolResolver = false;
WeakRefDirective = ".weakref\t";
- LinkOnceDirective = NULL;
+ LinkOnceDirective = 0;
//===--- Dwarf Emission Directives -----------------------------------===//
HasLEB128 = true;
SupportsDebugInformation = true;
DwarfUsesLabelOffsetForRanges = true;
//===--- CBE Asm Translation Table -----------------------------------===//
- AsmTransCBE = NULL;
+ AsmTransCBE = 0;
}
const char*
AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const
{
switch (AS) {
default:
- return NULL;
+ return 0;
case 0:
- return NULL;
+ return 0;
};
- return NULL;
+ return 0;
}
const MCSection*
AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const
{
- return NULL;
+ return 0;
}
+//===-- AMDGPUMCTargetDesc.cpp - AMDGPU Target Descriptions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides AMDGPU specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
#include "AMDGPUMCTargetDesc.h"
#include "AMDGPUMCAsmInfo.h"
#include "InstPrinter/AMDGPUInstPrinter.h"
void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
-/*
- if (MI.getNumOperands() > 1 && MI.getOperand(0).isReg() &&
- MI.getOperand(0).isDead()) {
- return;
- }
-*/
if (isTexOp(MI.getOpcode())) {
EmitTexInstr(MI, Fixups, OS);
} else if (isFCOp(MI.getOpcode())){
if (GET_FLAG_OPERAND_IDX(MCDesc.TSFlags) != 0)
NumOperands--;
- // Some instructions are just place holder instructions that represent
- // operations that the GPU does automatically. They should be ignored.
-// if (TII->isPlaceHolderOpcode(MI.getOpcode())) {
-// return;
-// }
-
if(MI.getOpcode() == AMDGPU::PRED_X)
NumOperands = 2;
~SIMCCodeEmitter() { }
- /// getBinaryCodeForInstr - Function generated by tablegen for encoding
- /// instructions based on the *.td files.
-// virtual uint64_t getBinaryCodeForInstr(const MCInst &MI,
-// SmallVectorImpl<MCFixup> &Fixups) const;
-
/// EncodeInstruction - Encode the instruction and write it to the OS.
virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const;
.addImm(0); // Flag
NewMI->setIsInsideBundle(Chan != 0);
- TII->AddFlag(NewMI, 0, Flags);
+ TII->addFlag(NewMI, 0, Flags);
}
MI.eraseFromParent();
}
.addOperand(MI->getOperand(1))
.addImm(0) // Flags
.addReg(AMDGPU::PRED_SEL_OFF);
- TII->AddFlag(NewMI, 0, MO_FLAG_CLAMP);
+ TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
break;
}
case AMDGPU::FABS_R600:
.addOperand(MI->getOperand(1))
.addImm(0) // Flags
.addReg(AMDGPU::PRED_SEL_OFF);
- TII->AddFlag(NewMI, 1, MO_FLAG_ABS);
+ TII->addFlag(NewMI, 1, MO_FLAG_ABS);
break;
}
.addOperand(MI->getOperand(1))
.addImm(0) // Flags
.addReg(AMDGPU::PRED_SEL_OFF);
- TII->AddFlag(NewMI, 1, MO_FLAG_NEG);
+ TII->addFlag(NewMI, 1, MO_FLAG_NEG);
break;
}
unsigned maskedRegister = MI->getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
- TII->AddFlag(defInstr, 0, MO_FLAG_MASK);
+ TII->addFlag(defInstr, 0, MO_FLAG_MASK);
// Return early so the instruction is not erased
return BB;
}
.addOperand(MI->getOperand(1))
.addImm(OPCODE_IS_ZERO)
.addImm(0); // Flags
- TII->AddFlag(NewMI, 1, MO_FLAG_PUSH);
+ TII->addFlag(NewMI, 1, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
.addOperand(MI->getOperand(0))
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
.addOperand(MI->getOperand(1))
.addImm(OPCODE_IS_ZERO_INT)
.addImm(0); // Flags
- TII->AddFlag(NewMI, 1, MO_FLAG_PUSH);
+ TII->addFlag(NewMI, 1, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
.addOperand(MI->getOperand(0))
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
{
if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
&& AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
- for (unsigned i = 0; i < 4; i++) {
- unsigned SubRegIndex = RI.getSubRegFromChannel(i);
+ for (unsigned I = 0; I < 4; I++) {
+ unsigned SubRegIndex = RI.getSubRegFromChannel(I);
BuildMI(MBB, MI, DL, get(AMDGPU::MOV))
.addReg(RI.getSubReg(DestReg, SubRegIndex), RegState::Define)
.addReg(RI.getSubReg(SrcReg, SubRegIndex))
// Some instructions act as place holders to emulate operations that the GPU
// hardware does automatically. This function can be used to check if
// an opcode falls into this category.
-bool R600InstrInfo::isPlaceHolderOpcode(unsigned opcode) const
+bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const
{
- switch (opcode) {
+ switch (Opcode) {
default: return false;
case AMDGPU::RETURN:
case AMDGPU::LAST:
}
}
-bool R600InstrInfo::isReductionOp(unsigned opcode) const
+bool R600InstrInfo::isReductionOp(unsigned Opcode) const
{
- switch(opcode) {
+ switch(Opcode) {
default: return false;
case AMDGPU::DOT4_r600:
case AMDGPU::DOT4_eg:
}
}
-bool R600InstrInfo::isCubeOp(unsigned opcode) const
+bool R600InstrInfo::isCubeOp(unsigned Opcode) const
{
- switch(opcode) {
+ switch(Opcode) {
default: return false;
case AMDGPU::CUBE_r600_pseudo:
case AMDGPU::CUBE_r600_real:
}
static bool
-isPredicateSetter(unsigned opcode)
+isPredicateSetter(unsigned Opcode)
{
- switch (opcode) {
+ switch (Opcode) {
case AMDGPU::PRED_X:
return true;
default:
} else {
MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
assert(PredSet && "No previous predicate !");
- AddFlag(PredSet, 1, MO_FLAG_PUSH);
+ addFlag(PredSet, 1, MO_FLAG_PUSH);
PredSet->getOperand(2).setImm(Cond[1].getImm());
BuildMI(&MBB, DL, get(AMDGPU::JUMP))
} else {
MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
assert(PredSet && "No previous predicate !");
- AddFlag(PredSet, 1, MO_FLAG_PUSH);
+ addFlag(PredSet, 1, MO_FLAG_PUSH);
PredSet->getOperand(2).setImm(Cond[1].getImm());
BuildMI(&MBB, DL, get(AMDGPU::JUMP))
.addMBB(TBB)
case AMDGPU::JUMP:
if (isPredicated(I)) {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
- ClearFlag(predSet, 1, MO_FLAG_PUSH);
+ clearFlag(predSet, 1, MO_FLAG_PUSH);
}
I->eraseFromParent();
break;
case AMDGPU::JUMP:
if (isPredicated(I)) {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
- ClearFlag(predSet, 1, MO_FLAG_PUSH);
+ clearFlag(predSet, 1, MO_FLAG_PUSH);
}
I->eraseFromParent();
break;
// Instruction flag getters/setters
//===----------------------------------------------------------------------===//
-bool R600InstrInfo::HasFlagOperand(const MachineInstr &MI) const
+bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const
{
return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
}
-MachineOperand &R600InstrInfo::GetFlagOp(MachineInstr *MI) const
+MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI) const
{
unsigned FlagIndex = GET_FLAG_OPERAND_IDX(get(MI->getOpcode()).TSFlags);
assert(FlagIndex != 0 &&
return FlagOp;
}
-void R600InstrInfo::AddFlag(MachineInstr *MI, unsigned Operand,
+void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
unsigned Flag) const
{
- MachineOperand &FlagOp = GetFlagOp(MI);
+ MachineOperand &FlagOp = getFlagOp(MI);
FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
}
-void R600InstrInfo::ClearFlag(MachineInstr *MI, unsigned Operand,
+void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
unsigned Flag) const
{
- MachineOperand &FlagOp = GetFlagOp(MI);
+ MachineOperand &FlagOp = getFlagOp(MI);
unsigned InstFlags = FlagOp.getImm();
InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
FlagOp.setImm(InstFlags);
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const { return 1;}
- bool HasFlagOperand(const MachineInstr &MI) const;
+ ///hasFlagOperand - Returns true if this instruction has an operand for
+ /// storing target flags.
+ bool hasFlagOperand(const MachineInstr &MI) const;
- ///AddFlag - Add one of the MO_FLAG* flags to the specified Operand.
- void AddFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
+ ///addFlag - Add one of the MO_FLAG* flags to the specified Operand.
+ void addFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
- ///IsFlagSet - Determine if the specified flag is set on this Operand.
- bool IsFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const;
+ ///isFlagSet - Determine if the specified flag is set on this Operand.
+ bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const;
- ///GetFlagOp - Return the operand containing the flags for this instruction.
- MachineOperand &GetFlagOp(MachineInstr *MI) const;
+ ///getFlagOp - Return the operand containing the flags for this instruction.
+ MachineOperand &getFlagOp(MachineInstr *MI) const;
- ///ClearFlag - Clear the specified flag on the instruction.
- void ClearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
+ ///clearFlag - Clear the specified flag on the instruction.
+ void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
};
} // End llvm namespace
Module *Mod = const_cast<Function*>(Fun)->getParent();
NamedMDNode * MD = Mod->getOrInsertNamedMetadata("opencl.kernels");
- if (!MD or !MD->getNumOperands()) {
+ if (!MD || !MD->getNumOperands()) {
return false;
}
for (int i = 0; i < int(MD->getNumOperands()); i++) {
- if (!MD->getOperand(i) or !MD->getOperand(i)->getOperand(0)) {
+ if (!MD->getOperand(i) || !MD->getOperand(i)->getOperand(0)) {
continue;
}
}
if (isa<IntegerType>(Val->getType())) {
- assert(0 and "Internal error");
+ assert(0 && "Internal error");
return false;
}
P.OffsetInDW = getListSize();
P.SizeInDW = CalculateArgumentSize(Arg);
- if (isa<PointerType>(Arg->getType()) and Arg->hasByValAttr()) {
+ if (isa<PointerType>(Arg->getType()) && Arg->hasByValAttr()) {
std::set<Value*> Visited;
P.IsIndirect = IsIndirect(P.Val, Visited);
}
int R600KernelParameters::CalculateArgumentSize(Argument *Arg) {
Type* T = Arg->getType();
- if (Arg->hasByValAttr() and dyn_cast<PointerType>(T)) {
+ if (Arg->hasByValAttr() && dyn_cast<PointerType>(T)) {
T = dyn_cast<PointerType>(T)->getElementType();
}
Addrspace = AMDGPUAS::PARAM_D_ADDRESS;
}
- if (GEP and GEP->getType()->getAddressSpace() != Addrspace) {
+ if (GEP && GEP->getType()->getAddressSpace() != Addrspace) {
Value *Op = GEP->getPointerOperand();
if (dyn_cast<PointerType>(Op->getType())->getAddressSpace() != Addrspace) {
Type * ArgType = P.Val->getType();
PointerType * ArgPtrType = dyn_cast<PointerType>(P.Val->getType());
- if (ArgPtrType and Arg->hasByValAttr()) {
+ if (ArgPtrType && Arg->hasByValAttr()) {
Value* ParamAddrSpacePtr = ConstantPointerNull::get(
PointerType::get(Type::getInt32Ty(*Context),
Addrspace));
assert(!Name.empty());
- if (Name == "image2d_t" or Name == "image3d_t") {
+ if (Name == "image2d_t" || Name == "image3d_t") {
int LastID = std::max(getLastSpecialID("image2d_t"),
getLastSpecialID("image3d_t"));