X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeon%2FR600Instructions.td;h=73c2002a382d927153798cd687b32b2c9a8082e6;hb=67a47a445b544ac638d10303dc697d70f25d12fb;hp=99e4b4fd0b89de461a13f664a4223b1e91a820d0;hpb=94e797d0faed18dfa80bcce7a6d03ef369b6a820;p=mesa.git diff --git a/src/gallium/drivers/radeon/R600Instructions.td b/src/gallium/drivers/radeon/R600Instructions.td index 99e4b4fd0b8..73c2002a382 100644 --- a/src/gallium/drivers/radeon/R600Instructions.td +++ b/src/gallium/drivers/radeon/R600Instructions.td @@ -1,4 +1,4 @@ -//===-- R600Instructions.td - TODO: Add brief description -------===// +//===-- R600Instructions.td - R600 Instruction defs -------*- tablegen -*-===// // // The LLVM Compiler Infrastructure // @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// TODO: Add full description +// R600 Tablegen instruction definitions // //===----------------------------------------------------------------------===// @@ -18,11 +18,13 @@ class InstR600 inst, dag outs, dag ins, string asm, list pattern, : AMDGPUInst { field bits<32> Inst; - bit Trig = 0; + bit Trig = 0; bit Op3 = 0; + bit isVector = 0; + bits<2> FlagOperandIdx = 0; let Inst = inst; - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; let OutOperandList = outs; let InOperandList = ins; let AsmString = asm; @@ -31,6 +33,11 @@ class InstR600 inst, dag outs, dag ins, string asm, list pattern, let TSFlags{4} = Trig; let TSFlags{5} = Op3; + + // Vector instructions are instructions that must fill all slots in an + // instruction group + let TSFlags{6} = isVector; + let TSFlags{8-7} = FlagOperandIdx; } class InstR600ISA pattern> : @@ -38,14 +45,20 @@ class InstR600ISA pattern> : { field bits<64> Inst; - let Namespace = "AMDIL"; + let Namespace = "AMDGPU"; +} + +def MEMxi : Operand { + let MIOperandInfo = (ops R600_TReg32_X:$ptr, i32imm:$index); } -def MEMri : Operand { +def MEMrr : Operand { let MIOperandInfo = (ops R600_Reg32:$ptr, R600_Reg32:$index); } def ADDRParam : ComplexPattern; +def ADDRDWord : ComplexPattern; +def ADDRVTX_READ : ComplexPattern; class R600_ALU { @@ -58,13 +71,16 @@ class R600_ALU { } +def R600_Pred : PredicateOperand; + class R600_1OP inst, string opName, list pattern, InstrItinClass itin = AnyALU> : InstR600 ; @@ -73,7 +89,7 @@ class R600_2OP inst, string opName, list pattern, InstrItinClass itin = AnyALU> : InstR600 inst, string opName, list pattern, InstrItinClass itin = AnyALU> : InstR600 { @@ -91,6 +107,30 @@ class R600_3OP inst, string opName, list pattern, let Op3 = 1; } + + +def PRED_X : InstR600 <0, (outs R600_Predicate_Bit:$dst), + (ins R600_Reg32:$src0, i32imm:$src1, i32imm:$flags), + "PRED $dst, $src0, $src1", + [], NullALU> +{ + let DisableEncoding = "$src0"; + field bits<32> Inst; + bits<32> src1; + + let Inst = src1; + let FlagOperandIdx = 3; +} + +let isTerminator = 1, isBranch = 1 in { +def JUMP : InstR600 <0x10, + (outs), + (ins brtarget:$target, R600_Pred:$p), + "JUMP $target ($p)", + [], AnyALU + >; +} + class R600_REDUCTION inst, dag ins, string asm, list pattern, InstrItinClass itin = VecALU> : InstR600 ; -class EG_CF_RAT cf_inst, bits <6> rat_inst, dag outs, dag ins, - string asm> : - InstR600ISA +class EG_CF_RAT cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs, + dag ins, string asm, list pattern> : + InstR600ISA { bits<7> RW_GPR; bits<7> INDEX_GPR; - bits<4> RAT_ID; bits<2> RIM; bits<2> TYPE; @@ -141,7 +180,7 @@ class EG_CF_RAT cf_inst, bits <6> rat_inst, dag outs, dag ins, bits<1> BARRIER; /* CF_ALLOC_EXPORT_WORD0_RAT */ - let Inst{3-0} = RAT_ID; + let Inst{3-0} = rat_id; let Inst{9-4} = rat_inst; let Inst{10} = 0; /* Reserved */ let Inst{12-11} = RIM; @@ -152,8 +191,6 @@ class EG_CF_RAT cf_inst, bits <6> rat_inst, dag outs, dag ins, let Inst{31-30} = ELEM_SIZE; /* CF_ALLOC_EXPORT_WORD1_BUF */ -/* XXX: We can't have auto encoding of 64-bit instructions until LLVM 3.1 :( */ -/* let Inst{43-32} = ARRAY_SIZE; let Inst{47-44} = COMP_MASK; let Inst{51-48} = BURST_COUNT; @@ -162,7 +199,6 @@ class EG_CF_RAT cf_inst, bits <6> rat_inst, dag outs, dag ins, let Inst{61-54} = cf_inst; let Inst{62} = MARK; let Inst{63} = BARRIER; -*/ } /* @@ -183,11 +219,10 @@ def store_global : PatFrag<(ops node:$value, node:$ptr), def load_param : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return true; const Value *Src = cast(N)->getSrcValue(); if (Src) { PointerType * PT = dyn_cast(Src->getType()); - return PT && PT->getAddressSpace() == AMDILAS::PARAM_I_ADDRESS; + return PT && PT->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS; } return false; }]>; @@ -223,17 +258,23 @@ def load_param : PatFrag<(ops node:$ptr), //} */ def isR600 : Predicate<"Subtarget.device()" - "->getGeneration() == AMDILDeviceInfo::HD4XXX">; + "->getGeneration() == AMDGPUDeviceInfo::HD4XXX">; +def isR700 : Predicate<"Subtarget.device()" + "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&" + "Subtarget.device()->getDeviceFlag()" + ">= OCL_DEVICE_RV710">; def isEG : Predicate<"Subtarget.device()" - "->getGeneration() >= AMDILDeviceInfo::HD5XXX && " + "->getGeneration() >= AMDGPUDeviceInfo::HD5XXX && " "Subtarget.device()->getDeviceFlag() != OCL_DEVICE_CAYMAN">; def isCayman : Predicate<"Subtarget.device()" "->getDeviceFlag() == OCL_DEVICE_CAYMAN">; def isEGorCayman : Predicate<"Subtarget.device()" - "->getGeneration() >= AMDILDeviceInfo::HD5XXX">; + "->getGeneration() == AMDGPUDeviceInfo::HD5XXX" + "|| Subtarget.device()->getGeneration() ==" + "AMDGPUDeviceInfo::HD6XXX">; def isR600toCayman : Predicate< - "Subtarget.device()->getGeneration() <= AMDILDeviceInfo::HD6XXX">; + "Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX">; let Predicates = [isR600toCayman] in { @@ -241,13 +282,11 @@ let Predicates = [isR600toCayman] in { /* ------------------------------------------- */ /* Common Instructions R600, R700, Evergreen, Cayman */ /* ------------------------------------------- */ -let Gen = AMDGPUGen.R600_CAYMAN in { - def ADD : R600_2OP < 0x0, "ADD", - [(set R600_Reg32:$dst, (fadd R600_Reg32:$src0, R600_Reg32:$src1))] > { - let AMDILOp = AMDILInst.ADD_f32; -} + [(set R600_Reg32:$dst, (fadd R600_Reg32:$src0, R600_Reg32:$src1))] +>; + // Non-IEEE MUL: 0 * anything = 0 def MUL : R600_2OP < 0x1, "MUL NON-IEEE", @@ -256,21 +295,18 @@ def MUL : R600_2OP < def MUL_IEEE : R600_2OP < 0x2, "MUL_IEEE", - [(set R600_Reg32:$dst, (fmul R600_Reg32:$src0, R600_Reg32:$src1))]> { - let AMDILOp = AMDILInst.MUL_IEEE_f32; -} + [(set R600_Reg32:$dst, (fmul R600_Reg32:$src0, R600_Reg32:$src1))] +>; def MAX : R600_2OP < 0x3, "MAX", - [(set R600_Reg32:$dst, (int_AMDIL_max R600_Reg32:$src0, R600_Reg32:$src1))]> { - let AMDILOp = AMDILInst.MAX_f32; -} + [(set R600_Reg32:$dst, (AMDGPUfmax R600_Reg32:$src0, R600_Reg32:$src1))] +>; def MIN : R600_2OP < 0x4, "MIN", - [(set R600_Reg32:$dst, (int_AMDIL_min R600_Reg32:$src0, R600_Reg32:$src1))]> { - let AMDILOp = AMDILInst.MIN_f32; -} + [(set R600_Reg32:$dst, (AMDGPUfmin R600_Reg32:$src0, R600_Reg32:$src1))] +>; /* For the SET* instructions there is a naming conflict in TargetSelectionDAG.td, * so some of the instruction names don't match the asm string. @@ -279,32 +315,36 @@ def MIN : R600_2OP < def SETE : R600_2OP < 0x08, "SETE", - [(set R600_Reg32:$dst, (int_AMDGPU_seq R600_Reg32:$src0, R600_Reg32:$src1))]> { - let AMDILOp = AMDILInst.FEQ; -} + [(set R600_Reg32:$dst, + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, + COND_EQ))] +>; def SGT : R600_2OP < 0x09, "SETGT", - [(set R600_Reg32:$dst, (int_AMDGPU_sgt R600_Reg32:$src0, R600_Reg32:$src1))] + [(set R600_Reg32:$dst, + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, + COND_GT))] >; def SGE : R600_2OP < 0xA, "SETGE", - [(set R600_Reg32:$dst, (int_AMDGPU_sge R600_Reg32:$src0, R600_Reg32:$src1))]> { - let AMDILOp = AMDILInst.FGE; -} + [(set R600_Reg32:$dst, + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, + COND_GE))] +>; def SNE : R600_2OP < 0xB, "SETNE", - [(set R600_Reg32:$dst, (int_AMDGPU_sne R600_Reg32:$src0, R600_Reg32:$src1))]> { - let AMDILOp = AMDILInst.FNE; -} + [(set R600_Reg32:$dst, + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, + COND_NE))] +>; def FRACT : R600_1OP < 0x10, "FRACT", - []> { - let AMDILOp = AMDILInst.FRAC_f32; -} + [(set R600_Reg32:$dst, (AMDGPUfract R600_Reg32:$src))] +>; def TRUNC : R600_1OP < 0x11, "TRUNC", @@ -313,117 +353,143 @@ def TRUNC : R600_1OP < def CEIL : R600_1OP < 0x12, "CEIL", - [(set R600_Reg32:$dst, (int_AMDIL_round_neginf R600_Reg32:$src))]> { - let AMDILOp = AMDILInst.ROUND_NEGINF_f32; -} + [(set R600_Reg32:$dst, (fceil R600_Reg32:$src))] +>; def RNDNE : R600_1OP < 0x13, "RNDNE", - [(set R600_Reg32:$dst, (int_AMDIL_round_nearest R600_Reg32:$src))]> { - let AMDILOp = AMDILInst.ROUND_NEAREST_f32; -} + [(set R600_Reg32:$dst, (frint R600_Reg32:$src))] +>; def FLOOR : R600_1OP < 0x14, "FLOOR", [(set R600_Reg32:$dst, (int_AMDGPU_floor R600_Reg32:$src))] >; -def MOV : R600_1OP <0x19, "MOV", []>; +def MOV : InstR600 <0x19, (outs R600_Reg32:$dst), + (ins R600_Reg32:$src0, i32imm:$flags, + R600_Pred:$p), + "MOV $dst, $src0", [], AnyALU> { + let FlagOperandIdx = 2; +} -def KILLGT : R600_2OP < - 0x2D, "KILLGT", - [] +class MOV_IMM : InstR600 <0x19, + (outs R600_Reg32:$dst), + (ins R600_Reg32:$alu_literal, R600_Pred:$p, immType:$imm), + "MOV_IMM $dst, $imm", + [], AnyALU +>; + +def MOV_IMM_I32 : MOV_IMM; +def : Pat < + (imm:$val), + (MOV_IMM_I32 (i32 ALU_LITERAL_X), imm:$val) +>; + +def MOV_IMM_F32 : MOV_IMM; +def : Pat < + (fpimm:$val), + (MOV_IMM_F32 (i32 ALU_LITERAL_X), fpimm:$val) >; +def KILLGT : InstR600 <0x2D, + (outs R600_Reg32:$dst), + (ins R600_Reg32:$src0, R600_Reg32:$src1, i32imm:$flags, R600_Pred:$p, + variable_ops), + "KILLGT $dst, $src0, $src1, $flags ($p)", + [], + NullALU>{ + let FlagOperandIdx = 3; +} + def AND_INT : R600_2OP < 0x30, "AND_INT", - []> { - let AMDILOp = AMDILInst.AND_i32; -} + [(set R600_Reg32:$dst, (and R600_Reg32:$src0, R600_Reg32:$src1))] +>; def OR_INT : R600_2OP < 0x31, "OR_INT", - []>{ - let AMDILOp = AMDILInst.BINARY_OR_i32; -} + [(set R600_Reg32:$dst, (or R600_Reg32:$src0, R600_Reg32:$src1))] +>; def XOR_INT : R600_2OP < 0x32, "XOR_INT", - [] + [(set R600_Reg32:$dst, (xor R600_Reg32:$src0, R600_Reg32:$src1))] >; def NOT_INT : R600_1OP < 0x33, "NOT_INT", - []>{ - let AMDILOp = AMDILInst.BINARY_NOT_i32; -} + [(set R600_Reg32:$dst, (not R600_Reg32:$src))] +>; def ADD_INT : R600_2OP < 0x34, "ADD_INT", - []>{ - let AMDILOp = AMDILInst.ADD_i32; -} + [(set R600_Reg32:$dst, (add R600_Reg32:$src0, R600_Reg32:$src1))] +>; def SUB_INT : R600_2OP < 0x35, "SUB_INT", - [] + [(set R600_Reg32:$dst, (sub R600_Reg32:$src0, R600_Reg32:$src1))] >; def MAX_INT : R600_2OP < 0x36, "MAX_INT", - [(set R600_Reg32:$dst, (int_AMDGPU_imax R600_Reg32:$src0, R600_Reg32:$src1))]>; + [(set R600_Reg32:$dst, (AMDGPUsmax R600_Reg32:$src0, R600_Reg32:$src1))]>; def MIN_INT : R600_2OP < 0x37, "MIN_INT", - [(set R600_Reg32:$dst, (int_AMDGPU_imin R600_Reg32:$src0, R600_Reg32:$src1))]>; + [(set R600_Reg32:$dst, (AMDGPUsmin R600_Reg32:$src0, R600_Reg32:$src1))]>; def MAX_UINT : R600_2OP < 0x38, "MAX_UINT", - [(set R600_Reg32:$dst, (int_AMDGPU_umax R600_Reg32:$src0, R600_Reg32:$src1))]>; + [(set R600_Reg32:$dst, (AMDGPUsmax R600_Reg32:$src0, R600_Reg32:$src1))] +>; def MIN_UINT : R600_2OP < 0x39, "MIN_UINT", - [(set R600_Reg32:$dst, (int_AMDGPU_umin R600_Reg32:$src0, R600_Reg32:$src1))]>; - + [(set R600_Reg32:$dst, (AMDGPUumin R600_Reg32:$src0, R600_Reg32:$src1))] +>; def SETE_INT : R600_2OP < 0x3A, "SETE_INT", - []>{ - let AMDILOp = AMDILInst.IEQ; -} + [(set (i32 R600_Reg32:$dst), + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETEQ))] +>; def SETGT_INT : R600_2OP < 0x3B, "SGT_INT", - [] + [(set (i32 R600_Reg32:$dst), + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGT))] >; def SETGE_INT : R600_2OP < 0x3C, "SETGE_INT", - []>{ - let AMDILOp = AMDILInst.IGE; -} + [(set (i32 R600_Reg32:$dst), + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGE))] +>; def SETNE_INT : R600_2OP < 0x3D, "SETNE_INT", - []>{ - let AMDILOp = AMDILInst.INE; -} + [(set (i32 R600_Reg32:$dst), + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETNE))] +>; def SETGT_UINT : R600_2OP < 0x3E, "SETGT_UINT", - []>{ - let AMDILOp = AMDILInst.UGT; -} + [(set (i32 R600_Reg32:$dst), + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGT))] +>; def SETGE_UINT : R600_2OP < 0x3F, "SETGE_UINT", - []>{ - let AMDILOp = AMDILInst.UGE; -} + [(set (i32 R600_Reg32:$dst), + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGE))] +>; def CNDE_INT : R600_3OP < 0x1C, "CNDE_INT", - [] + [(set (i32 R600_Reg32:$dst), + (select R600_Reg32:$src0, R600_Reg32:$src2, R600_Reg32:$src1))] >; /* Texture instructions */ @@ -431,8 +497,11 @@ def CNDE_INT : R600_3OP < def TEX_LD : R600_TEX < 0x03, "TEX_LD", - [(set R600_Reg128:$dst, (int_AMDGPU_txf R600_Reg128:$src0, imm:$src1, imm:$src2))] ->; + [(set R600_Reg128:$dst, (int_AMDGPU_txf R600_Reg128:$src0, imm:$src1, imm:$src2, imm:$src3, imm:$src4, imm:$src5))] +> { +let AsmString = "TEX_LD $dst, $src0, $src1, $src2, $src3, $src4, $src5"; +let InOperandList = (ins R600_Reg128:$src0, i32imm:$src1, i32imm:$src2, i32imm:$src3, i32imm:$src4, i32imm:$src5); +} def TEX_GET_TEXTURE_RESINFO : R600_TEX < 0x04, "TEX_GET_TEXTURE_RESINFO", @@ -449,6 +518,16 @@ def TEX_GET_GRADIENTS_V : R600_TEX < [(set R600_Reg128:$dst, (int_AMDGPU_ddy R600_Reg128:$src0, imm:$src1, imm:$src2))] >; +def TEX_SET_GRADIENTS_H : R600_TEX < + 0x0B, "TEX_SET_GRADIENTS_H", + [] +>; + +def TEX_SET_GRADIENTS_V : R600_TEX < + 0x0C, "TEX_SET_GRADIENTS_V", + [] +>; + def TEX_SAMPLE : R600_TEX < 0x10, "TEX_SAMPLE", [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$src1, imm:$src2))] @@ -481,24 +560,12 @@ def TEX_SAMPLE_C_LB : R600_TEX < def TEX_SAMPLE_G : R600_TEX < 0x14, "TEX_SAMPLE_G", - [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, imm:$src1, imm:$src2))] + [] >; def TEX_SAMPLE_C_G : R600_TEX < 0x1C, "TEX_SAMPLE_C_G", - [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))] ->; - -} // End Gen R600_CAYMAN - -def KILP : Pat < - (int_AMDGPU_kilp), - (MASK_WRITE (KILLGT (f32 ONE), (f32 ZERO))) ->; - -def KIL : Pat < - (int_AMDGPU_kill R600_Reg32:$src0), - (MASK_WRITE (KILLGT (f32 ZERO), (f32 R600_Reg32:$src0))) + [] >; /* Helper classes for common instructions */ @@ -510,15 +577,15 @@ class MUL_LIT_Common inst> : R600_3OP < class MULADD_Common inst> : R600_3OP < inst, "MULADD", - []> { - let AMDILOp = AMDILInst.MAD_f32; -} + [(set (f32 R600_Reg32:$dst), + (IL_mad R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2))] +>; class CNDE_Common inst> : R600_3OP < inst, "CNDE", - []> { - let AMDILOp = AMDILInst.CMOVLOG_f32; -} + [(set (f32 R600_Reg32:$dst), + (select (i32 (fp_to_sint (fneg R600_Reg32:$src0))), (f32 R600_Reg32:$src2), (f32 R600_Reg32:$src1)))] +>; class CNDGT_Common inst> : R600_3OP < inst, "CNDGT", @@ -532,35 +599,64 @@ class CNDGE_Common inst> : R600_3OP < class DOT4_Common inst> : R600_REDUCTION < inst, - (ins R600_Reg128:$src0, R600_Reg128:$src1), + (ins R600_Reg128:$src0, R600_Reg128:$src1, i32imm:$flags), "DOT4 $dst $src0, $src1", - [(set R600_Reg32:$dst, (int_AMDGPU_dp4 R600_Reg128:$src0, R600_Reg128:$src1))] ->; + [] + > { + let FlagOperandIdx = 3; +} -class CUBE_Common inst> : InstR600 < - inst, - (outs R600_Reg128:$dst), - (ins R600_Reg128:$src), - "CUBE $dst $src", - [(set R600_Reg128:$dst, (int_AMDGPU_cube R600_Reg128:$src))], - VecALU +class DOT4_Pat : Pat < + (int_AMDGPU_dp4 R600_Reg128:$src0, R600_Reg128:$src1), + (dot4 R600_Reg128:$src0, R600_Reg128:$src1, 0) >; +multiclass CUBE_Common inst> { + + def _pseudo : InstR600 < + inst, + (outs R600_Reg128:$dst), + (ins R600_Reg128:$src), + "CUBE $dst $src", + [(set R600_Reg128:$dst, (int_AMDGPU_cube R600_Reg128:$src))], + VecALU + >; + + def _real : InstR600 < + inst, + (outs R600_Reg32:$dst), + (ins R600_Reg32:$src0, R600_Reg32:$src1, i32imm:$flags), + "CUBE $dst, $src0, $src1", + [], VecALU + >{ + let FlagOperandIdx = 3; + } +} + class EXP_IEEE_Common inst> : R600_1OP < inst, "EXP_IEEE", - []> { - let AMDILOp = AMDILInst.EXP_f32; -} + [(set R600_Reg32:$dst, (fexp2 R600_Reg32:$src))] +>; class FLT_TO_INT_Common inst> : R600_1OP < - inst, "FLT_TO_INT", []> { - let AMDILOp = AMDILInst.FTOI; -} + inst, "FLT_TO_INT", + [(set R600_Reg32:$dst, (fp_to_sint R600_Reg32:$src))] +>; class INT_TO_FLT_Common inst> : R600_1OP < - inst, "INT_TO_FLT", []> { - let AMDILOp = AMDILInst.ITOF; -} + inst, "INT_TO_FLT", + [(set R600_Reg32:$dst, (sint_to_fp R600_Reg32:$src))] +>; + +class FLT_TO_UINT_Common inst> : R600_1OP < + inst, "FLT_TO_UINT", + [(set R600_Reg32:$dst, (fp_to_uint R600_Reg32:$src))] +>; + +class UINT_TO_FLT_Common inst> : R600_1OP < + inst, "UINT_TO_FLT", + [(set R600_Reg32:$dst, (uint_to_fp R600_Reg32:$src))] +>; class LOG_CLAMPED_Common inst> : R600_1OP < inst, "LOG_CLAMPED", @@ -569,44 +665,38 @@ class LOG_CLAMPED_Common inst> : R600_1OP < class LOG_IEEE_Common inst> : R600_1OP < inst, "LOG_IEEE", - []> { - let AMDILOp = AMDILInst.LOG_f32; -} + [(set R600_Reg32:$dst, (int_AMDIL_log R600_Reg32:$src))] +>; class LSHL_Common inst> : R600_2OP < inst, "LSHL $dst, $src0, $src1", - [] >{ - let AMDILOp = AMDILInst.SHL_i32; -} + [(set R600_Reg32:$dst, (shl R600_Reg32:$src0, R600_Reg32:$src1))] +>; class LSHR_Common inst> : R600_2OP < inst, "LSHR $dst, $src0, $src1", - [] >{ - let AMDILOp = AMDILInst.USHR_i32; -} + [(set R600_Reg32:$dst, (srl R600_Reg32:$src0, R600_Reg32:$src1))] +>; class ASHR_Common inst> : R600_2OP < inst, "ASHR $dst, $src0, $src1", - [] >{ - let AMDILOp = AMDILInst.SHR_i32; -} + [(set R600_Reg32:$dst, (sra R600_Reg32:$src0, R600_Reg32:$src1))] +>; class MULHI_INT_Common inst> : R600_2OP < inst, "MULHI_INT $dst, $src0, $src1", - [] >{ - let AMDILOp = AMDILInst.SMULHI_i32; -} + [(set R600_Reg32:$dst, (mulhs R600_Reg32:$src0, R600_Reg32:$src1))] +>; class MULHI_UINT_Common inst> : R600_2OP < - inst, "MULHI $dst, $src0, $src1", - [] + inst, "MULHI $dst, $src0, $src1", + [(set R600_Reg32:$dst, (mulhu R600_Reg32:$src0, R600_Reg32:$src1))] >; class MULLO_INT_Common inst> : R600_2OP < inst, "MULLO_INT $dst, $src0, $src1", - [] >{ - let AMDILOp = AMDILInst.SMUL_i32; -} + [(set R600_Reg32:$dst, (mul R600_Reg32:$src0, R600_Reg32:$src1))] +>; class MULLO_UINT_Common inst> : R600_2OP < inst, "MULLO_UINT $dst, $src0, $src1", @@ -620,13 +710,12 @@ class RECIP_CLAMPED_Common inst> : R600_1OP < class RECIP_IEEE_Common inst> : R600_1OP < inst, "RECIP_IEEE", - [(set R600_Reg32:$dst, (int_AMDGPU_rcp R600_Reg32:$src))]> { - let AMDILOp = AMDILInst.RSQ_f32; -} + [(set R600_Reg32:$dst, (int_AMDGPU_rcp R600_Reg32:$src))] +>; class RECIP_UINT_Common inst> : R600_1OP < inst, "RECIP_INT $dst, $src", - [] + [(set R600_Reg32:$dst, (AMDGPUurecip R600_Reg32:$src))] >; class RECIPSQRT_CLAMPED_Common inst> : R600_1OP < @@ -640,16 +729,12 @@ class RECIPSQRT_IEEE_Common inst> : R600_1OP < >; class SIN_Common inst> : R600_1OP < - inst, "SIN", - []>{ - let AMDILOp = AMDILInst.SIN_f32; + inst, "SIN", []>{ let Trig = 1; } class COS_Common inst> : R600_1OP < - inst, "COS", - []> { - let AMDILOp = AMDILInst.COS_f32; + inst, "COS", []> { let Trig = 1; } @@ -661,11 +746,6 @@ class DIV_Common : Pat< (MUL R600_Reg32:$src0, (recip_ieee R600_Reg32:$src1)) >; -class LRP_Common : Pat < - (int_AMDGPU_lrp R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2), - (muladd R600_Reg32:$src0, R600_Reg32:$src1, (MUL (SUB_f32 ONE, R600_Reg32:$src0), R600_Reg32:$src2)) ->; - class SSG_Common : Pat < (int_AMDGPU_ssg R600_Reg32:$src), (cndgt R600_Reg32:$src, (f32 ONE), (cndge R600_Reg32:$src, (f32 ZERO), (f32 NEG_ONE))) @@ -682,15 +762,14 @@ class TGSI_LIT_Z_Common ; def MULADD_r600 : MULADD_Common<0x10>; def CNDE_r600 : CNDE_Common<0x18>; def CNDGT_r600 : CNDGT_Common<0x19>; def CNDGE_r600 : CNDGE_Common<0x1A>; def DOT4_r600 : DOT4_Common<0x50>; - def CUBE_r600 : CUBE_Common<0x52>; + def : DOT4_Pat ; + defm CUBE_r600 : CUBE_Common<0x52>; def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>; def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>; def LOG_IEEE_r600 : LOG_IEEE_Common<0x63>; @@ -700,6 +779,8 @@ let Gen = AMDGPUGen.R600 in { def RECIPSQRT_IEEE_r600 : RECIPSQRT_IEEE_Common<0x69>; def FLT_TO_INT_r600 : FLT_TO_INT_Common<0x6b>; def INT_TO_FLT_r600 : INT_TO_FLT_Common<0x6c>; + def FLT_TO_UINT_r600 : FLT_TO_UINT_Common<0x79>; + def UINT_TO_FLT_r600 : UINT_TO_FLT_Common<0x6d>; def SIN_r600 : SIN_Common<0x6E>; def COS_r600 : COS_Common<0x6F>; def ASHR_r600 : ASHR_Common<0x70>; @@ -709,56 +790,147 @@ let Gen = AMDGPUGen.R600 in { def MULHI_INT_r600 : MULHI_INT_Common<0x74>; def MULLO_UINT_r600 : MULLO_UINT_Common<0x75>; def MULHI_UINT_r600 : MULHI_UINT_Common<0x76>; - def RECIP_UINT_r600 : RECIP_UINT_Common <0x77>; - -} // End AMDGPUGen.R600 + def RECIP_UINT_r600 : RECIP_UINT_Common <0x78>; def DIV_r600 : DIV_Common; - def LRP_r600 : LRP_Common; def POW_r600 : POW_Common; def SSG_r600 : SSG_Common; def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common; } -/* ----------------- */ -/* R700+ Trig helper */ -/* ----------------- */ - -/* -class TRIG_HELPER_r700 : Pat < - (trig_inst R600_Reg32:$src), - (trig_inst (fmul R600_Reg32:$src, (PI)))) +// Helper pattern for normalizing inputs to triginomic instructions for R700+ +// cards. +class TRIG_eg : Pat< + (intr R600_Reg32:$src), + (trig (MUL (MOV_IMM_I32 (i32 ALU_LITERAL_X), CONST.TWO_PI_INV), R600_Reg32:$src)) >; -*/ -/* ---------------------- */ -/* Evergreen Instructions */ -/* ---------------------- */ +//===----------------------------------------------------------------------===// +// R700 Only instructions +//===----------------------------------------------------------------------===// +let Predicates = [isR700] in { + def SIN_r700 : SIN_Common<0x6E>; + def COS_r700 : COS_Common<0x6F>; + + // R700 normalizes inputs to SIN/COS the same as EG + def : TRIG_eg ; + def : TRIG_eg ; +} + +//===----------------------------------------------------------------------===// +// Evergreen Only instructions +//===----------------------------------------------------------------------===// let Predicates = [isEG] in { + +def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>; + +def MULLO_INT_eg : MULLO_INT_Common<0x8F>; +def MULHI_INT_eg : MULHI_INT_Common<0x90>; +def MULLO_UINT_eg : MULLO_UINT_Common<0x91>; +def MULHI_UINT_eg : MULHI_UINT_Common<0x92>; +def RECIP_UINT_eg : RECIP_UINT_Common<0x94>; + +} // End Predicates = [isEG] + +/* ------------------------------- */ +/* Evergreen / Cayman Instructions */ +/* ------------------------------- */ -let Gen = AMDGPUGen.EG in { +let Predicates = [isEGorCayman] in { + + // BFE_UINT - bit_extract, an optimization for mask and shift + // Src0 = Input + // Src1 = Offset + // Src2 = Width + // + // bit_extract = (Input << (32 - Offset - Width)) >> (32 - Width) + // + // Example Usage: + // (Offset, Width) + // + // (0, 8) = (Input << 24) >> 24 = (Input & 0xff) >> 0 + // (8, 8) = (Input << 16) >> 24 = (Input & 0xffff) >> 8 + // (16,8) = (Input << 8) >> 24 = (Input & 0xffffff) >> 16 + // (24,8) = (Input << 0) >> 24 = (Input & 0xffffffff) >> 24 + def BFE_UINT_eg : R600_3OP <0x4, "BFE_UINT", + [(set R600_Reg32:$dst, (int_AMDIL_bit_extract_u32 R600_Reg32:$src0, + R600_Reg32:$src1, + R600_Reg32:$src2))], + VecALU + >; + + def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", + [(set R600_Reg32:$dst, (AMDGPUbitalign R600_Reg32:$src0, R600_Reg32:$src1, + R600_Reg32:$src2))], + VecALU + >; + + def MULADD_eg : MULADD_Common<0x14>; + def ASHR_eg : ASHR_Common<0x15>; + def LSHR_eg : LSHR_Common<0x16>; + def LSHL_eg : LSHL_Common<0x17>; + def CNDE_eg : CNDE_Common<0x19>; + def CNDGT_eg : CNDGT_Common<0x1A>; + def CNDGE_eg : CNDGE_Common<0x1B>; + def MUL_LIT_eg : MUL_LIT_Common<0x1F>; + def EXP_IEEE_eg : EXP_IEEE_Common<0x81>; + def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>; + def LOG_IEEE_eg : LOG_IEEE_Common<0x83>; + def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>; + def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>; + def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>; + def SIN_eg : SIN_Common<0x8D>; + def COS_eg : COS_Common<0x8E>; + def DOT4_eg : DOT4_Common<0xBE>; + def : DOT4_Pat ; + defm CUBE_eg : CUBE_Common<0xC0>; + + def DIV_eg : DIV_Common; + def POW_eg : POW_Common; + def SSG_eg : SSG_Common; + def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common; + + def : TRIG_eg ; + def : TRIG_eg ; + + def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> { + let Pattern = []; + } + + def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>; + + def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> { + let Pattern = []; + } + + def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>; + + def : Pat<(fp_to_sint R600_Reg32:$src), + (FLT_TO_INT_eg (TRUNC R600_Reg32:$src))>; + + def : Pat<(fp_to_uint R600_Reg32:$src), + (FLT_TO_UINT_eg (TRUNC R600_Reg32:$src))>; -def RAT_WRITE_CACHELESS_eg : - EG_CF_RAT <0x57, 0x2, (outs), (ins R600_TReg32_X:$rw_gpr, - R600_TReg32_X:$index_gpr, i32imm:$rat_id), ""> +//===----------------------------------------------------------------------===// +// Memory read/write instructions +//===----------------------------------------------------------------------===// + +let usesCustomInserter = 1 in { + +def RAT_WRITE_CACHELESS_eg : EG_CF_RAT <0x57, 0x2, 0, (outs), + (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr), + "RAT_WRITE_CACHELESS_eg $rw_gpr, $index_gpr", + [(global_store (i32 R600_TReg32_X:$rw_gpr), R600_TReg32_X:$index_gpr)]> { -/* - let Inst{3-0} = RAT_ID; - let Inst{21-15} = RW_GPR; - let Inst{29-23} = INDEX_GPR; - /* Propery of the UAV */ - let Inst{31-30} = ELEM_SIZE; -*/ let RIM = 0; /* XXX: Have a separate instruction for non-indexed writes. */ let TYPE = 1; let RW_REL = 0; let ELEM_SIZE = 0; -/* let ARRAY_SIZE = 0; let COMP_MASK = 1; let BURST_COUNT = 0; @@ -766,46 +938,59 @@ def RAT_WRITE_CACHELESS_eg : let EOP = 0; let MARK = 0; let BARRIER = 1; -*/ } -def VTX_READ_eg : InstR600ISA < (outs R600_TReg32_X:$dst), - (ins R600_TReg32_X:$src, i32imm:$buffer_id), - "VTX_READ_eg $dst, $src", []> -{ -/* - bits<7> DST_GPR; - bits<7> SRC_GPR; - bits<8> BUFFER_ID; -*/ - /* If any of these field below need to be calculated at compile time, and - * a ins operand for them and move them to the list of operands above. */ +} // End usesCustomInserter = 1 - /* XXX: This instruction is manual encoded, so none of these values are used. - */ -/* - bits<5> VC_INST = 0; //VC_INST_FETCH - bits<2> FETCH_TYPE = 2; - bits<1> FETCH_WHOLE_QUAD = 1; - bits<1> SRC_REL = 0; - bits<2> SRC_SEL_X = 0; - bits<6> MEGA_FETCH_COUNT = 4; -*/ -/* +// Floating point global_store +def : Pat < + (global_store (f32 R600_TReg32_X:$val), R600_TReg32_X:$ptr), + (RAT_WRITE_CACHELESS_eg R600_TReg32_X:$val, R600_TReg32_X:$ptr) +>; - bits<1> DST_REL = 0; - bits<3> DST_SEL_X = 0; - bits<3> DST_SEL_Y = 7; //Masked - bits<3> DST_SEL_Z = 7; //Masked - bits<3> DST_SEL_W = 7; //Masked - bits<1> USE_CONST_FIELDS = 1; //Masked - bits<6> DATA_FORMAT = 0; - bits<2> NUM_FORMAT_ALL = 0; - bits<1> FORMAT_COMP_ALL = 0; - bits<1> SRF_MODE_ALL = 0; -*/ +class VTX_READ_eg buffer_id, dag outs, list pattern> + : InstR600ISA { -/* + // Operands + bits<7> DST_GPR; + bits<7> SRC_GPR; + + // Static fields + bits<5> VC_INST = 0; + bits<2> FETCH_TYPE = 2; + bits<1> FETCH_WHOLE_QUAD = 0; + bits<8> BUFFER_ID = buffer_id; + bits<1> SRC_REL = 0; + // XXX: We can infer this field based on the SRC_GPR. This would allow us + // to store vertex addresses in any channel, not just X. + bits<2> SRC_SEL_X = 0; + bits<6> MEGA_FETCH_COUNT; + bits<1> DST_REL = 0; + bits<3> DST_SEL_X; + bits<3> DST_SEL_Y; + bits<3> DST_SEL_Z; + bits<3> DST_SEL_W; + // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL, + // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored, + // however, based on my testing if USE_CONST_FIELDS is set, then all + // these fields need to be set to 0. + bits<1> USE_CONST_FIELDS = 0; + bits<6> DATA_FORMAT; + bits<2> NUM_FORMAT_ALL = 1; + bits<1> FORMAT_COMP_ALL = 0; + bits<1> SRF_MODE_ALL = 0; + + // LLVM can only encode 64-bit instructions, so these fields are manually + // encoded in R600CodeEmitter + // + // bits<16> OFFSET; + // bits<2> ENDIAN_SWAP = 0; + // bits<1> CONST_BUF_NO_STRIDE = 0; + // bits<1> MEGA_FETCH = 0; + // bits<1> ALT_CONST = 0; + // bits<2> BUFFER_INDEX_MODE = 0; + + // VTX_WORD0 let Inst{4-0} = VC_INST; let Inst{6-5} = FETCH_TYPE; let Inst{7} = FETCH_WHOLE_QUAD; @@ -814,18 +999,11 @@ def VTX_READ_eg : InstR600ISA < (outs R600_TReg32_X:$dst), let Inst{23} = SRC_REL; let Inst{25-24} = SRC_SEL_X; let Inst{31-26} = MEGA_FETCH_COUNT; -*/ - /* DST_GPR is OK to leave uncommented, because LLVM 3.0 only prevents you - * from statically setting bits > 31. This field will be set by - * getMachineValueOp which can set bits > 31. - */ -// let Inst{32-38} = DST_GPR; - - /* XXX: Uncomment for LLVM 3.1 which supports 64-bit instructions */ -/* + // VTX_WORD1_GPR + let Inst{38-32} = DST_GPR; let Inst{39} = DST_REL; - let Inst{40} = 0; //Reserved + let Inst{40} = 0; // Reserved let Inst{43-41} = DST_SEL_X; let Inst{46-44} = DST_SEL_Y; let Inst{49-47} = DST_SEL_Z; @@ -835,91 +1013,114 @@ def VTX_READ_eg : InstR600ISA < (outs R600_TReg32_X:$dst), let Inst{61-60} = NUM_FORMAT_ALL; let Inst{62} = FORMAT_COMP_ALL; let Inst{63} = SRF_MODE_ALL; -*/ + + // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding + // is done in R600CodeEmitter + // + // Inst{79-64} = OFFSET; + // Inst{81-80} = ENDIAN_SWAP; + // Inst{82} = CONST_BUF_NO_STRIDE; + // Inst{83} = MEGA_FETCH; + // Inst{84} = ALT_CONST; + // Inst{86-85} = BUFFER_INDEX_MODE; + // Inst{95-86} = 0; Reserved + + // VTX_WORD3 (Padding) + // + // Inst{127-96} = 0; } +class VTX_READ_32_eg buffer_id, list pattern> + : VTX_READ_eg { + + let MEGA_FETCH_COUNT = 4; + let DST_SEL_X = 0; + let DST_SEL_Y = 7; // Masked + let DST_SEL_Z = 7; // Masked + let DST_SEL_W = 7; // Masked + let DATA_FORMAT = 0xD; // COLOR_32 + + // This is not really necessary, but there were some GPU hangs that appeared + // to be caused by ALU instructions in the next instruction group that wrote + // to the $ptr registers of the VTX_READ. + // e.g. + // %T3_X = VTX_READ_PARAM_i32_eg %T2_X, 24 + // %T2_X = MOV %ZERO + //Adding this constraint prevents this from happening. + let Constraints = "$ptr.ptr = $dst"; +} +class VTX_READ_128_eg buffer_id, list pattern> + : VTX_READ_eg { -} // End AMDGPUGen.EG -/* XXX: Need to convert PTR to rat_id */ -/* -def : Pat <(store_global (f32 R600_Reg32:$value), node:$ptr), - (RAT_WRITE_CACHELESS_eg (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), - (f32 R600_Reg32:$value), - sel_x), - (f32 ZERO), 0, R600_Reg32:$ptr)>; -*/ + let MEGA_FETCH_COUNT = 16; + let DST_SEL_X = 0; + let DST_SEL_Y = 1; + let DST_SEL_Z = 2; + let DST_SEL_W = 3; + let DATA_FORMAT = 0x22; // COLOR_32_32_32_32 -class VTX_Param_Read_Pattern : Pat < - (vt (load_param ADDRParam:$mem)), - (VTX_READ_eg (i32 R600_Reg32:$mem), 0)>; + // XXX: Need to force VTX_READ_128 instructions to write to the same register + // that holds its buffer address to avoid potential hangs. We can't use + // the same constraint as VTX_READ_32_eg, because the $ptr.ptr and $dst + // registers are different sizes. +} -def : VTX_Param_Read_Pattern ; -def : VTX_Param_Read_Pattern ; +//===----------------------------------------------------------------------===// +// VTX Read from parameter memory space +//===----------------------------------------------------------------------===// -} // End isEG Predicate +class VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0, + [(set (vt R600_TReg32_X:$dst), (load_param ADDRVTX_READ:$ptr))] +>; -/* ------------------------------- */ -/* Evergreen / Cayman Instructions */ -/* ------------------------------- */ +def VTX_READ_PARAM_i32_eg : VTX_READ_PARAM_32_eg; +def VTX_READ_PARAM_f32_eg : VTX_READ_PARAM_32_eg; -let Predicates = [isEGorCayman] in { - -class TRIG_eg : Pat< - (intr R600_Reg32:$src), - (trig (MUL (MOV (LOADCONST_i32 CONST.TWO_PI_INV)), R600_Reg32:$src)) ->; -let Gen = AMDGPUGen.EG_CAYMAN in { +//===----------------------------------------------------------------------===// +// VTX Read from global memory space +//===----------------------------------------------------------------------===// - def MULADD_eg : MULADD_Common<0x14>; - def ASHR_eg : ASHR_Common<0x15>; - def LSHR_eg : LSHR_Common<0x16>; - def LSHL_eg : LSHL_Common<0x17>; - def CNDE_eg : CNDE_Common<0x19>; - def CNDGT_eg : CNDGT_Common<0x1A>; - def CNDGE_eg : CNDGE_Common<0x1B>; - def MUL_LIT_eg : MUL_LIT_Common<0x1F>; - def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50>; - def EXP_IEEE_eg : EXP_IEEE_Common<0x81>; - def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>; - def LOG_IEEE_eg : LOG_IEEE_Common<0x83>; - def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>; - def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>; - def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>; - def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>; - def SIN_eg : SIN_Common<0x8D>; - def COS_eg : COS_Common<0x8E>; - def MULLO_INT_eg : MULLO_INT_Common<0x8F>; - def MULHI_INT_eg : MULHI_INT_Common<0x90>; - def MULLO_UINT_eg : MULLO_UINT_Common<0x91>; - def MULHI_UINT_eg : MULHI_UINT_Common<0x92>; - def RECIP_UINT_eg : RECIP_UINT_Common<0x94>; - def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>; - def DOT4_eg : DOT4_Common<0xBE>; - def CUBE_eg : CUBE_Common<0xC0>; +// 32-bit reads -} // End AMDGPUGen.EG_CAYMAN +class VTX_READ_GLOBAL_eg : VTX_READ_32_eg <1, + [(set (vt R600_TReg32_X:$dst), (global_load ADDRVTX_READ:$ptr))] +>; - def DIV_eg : DIV_Common; - def LRP_eg : LRP_Common; - def POW_eg : POW_Common; - def SSG_eg : SSG_Common; - def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common; +def VTX_READ_GLOBAL_i32_eg : VTX_READ_GLOBAL_eg; +def VTX_READ_GLOBAL_f32_eg : VTX_READ_GLOBAL_eg; - def : TRIG_eg ; - def : TRIG_eg ; +// 128-bit reads + +class VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1, + [(set (vt R600_Reg128:$dst), (global_load ADDRVTX_READ:$ptr))] +>; + +def VTX_READ_GLOBAL_v4i32_eg : VTX_READ_GLOBAL_128_eg; +def VTX_READ_GLOBAL_v4f32_eg : VTX_READ_GLOBAL_128_eg; } let Predicates = [isCayman] in { -let Gen = AMDGPUGen.CAYMAN in { +let isVector = 1 in { + +def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>; - /* XXX: I'm not sure if this opcode is correct. */ - def RECIP_UINT_cm : RECIP_UINT_Common<0x77>; +def MULLO_INT_cm : MULLO_INT_Common<0x8F>; +def MULHI_INT_cm : MULHI_INT_Common<0x90>; +def MULLO_UINT_cm : MULLO_UINT_Common<0x91>; +def MULHI_UINT_cm : MULHI_UINT_Common<0x92>; -} // End AMDGPUGen.CAYMAN +} // End isVector = 1 + +// RECIP_UINT emulation for Cayman +def : Pat < + (AMDGPUurecip R600_Reg32:$src0), + (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg R600_Reg32:$src0)), + (MOV_IMM_I32 (i32 ALU_LITERAL_X), 0x4f800000))) +>; } // End isCayman @@ -965,63 +1166,123 @@ class R600PreloadInst : AMDGPUInst < [(set R600_TReg32:$dst, (intr))] >; -def TGID_X : R600PreloadInst <"TGID_X", int_r600_read_tgid_x>; -def TGID_Y : R600PreloadInst <"TGID_Y", int_r600_read_tgid_y>; -def TGID_Z : R600PreloadInst <"TGID_Z", int_r600_read_tgid_z>; +def R600_LOAD_CONST : AMDGPUShaderInst < + (outs R600_Reg32:$dst), + (ins i32imm:$src0), + "R600_LOAD_CONST $dst, $src0", + [(set R600_Reg32:$dst, (int_AMDGPU_load_const imm:$src0))] +>; -def TIDIG_X : R600PreloadInst <"TIDIG_X", int_r600_read_tidig_x>; -def TIDIG_Y : R600PreloadInst <"TIDIG_Y", int_r600_read_tidig_y>; -def TIDIG_Z : R600PreloadInst <"TIDIG_Z", int_r600_read_tidig_z>; +def RESERVE_REG : AMDGPUShaderInst < + (outs), + (ins i32imm:$src), + "RESERVE_REG $src", + [(int_AMDGPU_reserve_reg imm:$src)] +>; -def NGROUPS_X : R600PreloadInst <"NGROUPS_X", int_r600_read_ngroups_x>; -def NGROUPS_Y : R600PreloadInst <"NGROUPS_Y", int_r600_read_ngroups_y>; -def NGROUPS_Z : R600PreloadInst <"NGROUPS_Z", int_r600_read_ngroups_z>; +def TXD: AMDGPUShaderInst < + (outs R600_Reg128:$dst), + (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$src3, i32imm:$src4), + "TXD $dst, $src0, $src1, $src2, $src3, $src4", + [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$src3, imm:$src4))] +>; -def GLOBAL_SIZE_X : R600PreloadInst <"GLOBAL_SIZE_X", - int_r600_read_global_size_x>; -def GLOBAL_SIZE_Y : R600PreloadInst <"GLOBAL_SIZE_Y", - int_r600_read_global_size_y>; -def GLOBAL_SIZE_Z : R600PreloadInst <"GLOBAL_SIZE_Z", - int_r600_read_global_size_z>; +def TXD_SHADOW: AMDGPUShaderInst < + (outs R600_Reg128:$dst), + (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$src3, i32imm:$src4), + "TXD_SHADOW $dst, $src0, $src1, $src2, $src3, $src4", + [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$src3, TEX_SHADOW:$src4))] +>; -def LOCAL_SIZE_X : R600PreloadInst <"LOCAL_SIZE_X", - int_r600_read_local_size_x>; -def LOCAL_SIZE_Y : R600PreloadInst <"LOCAL_SIZE_Y", - int_r600_read_local_size_y>; -def LOCAL_SIZE_Z : R600PreloadInst <"LOCAL_SIZE_Z", - int_r600_read_local_size_z>; +} // End usesCustomInserter = 1, isPseudo = 1 -def LOAD_INPUT : AMDGPUShaderInst < - (outs R600_Reg32:$dst), - (ins i32imm:$src), - "LOAD_INPUT $dst, $src", - [(set R600_Reg32:$dst, (int_R600_load_input imm:$src))] +} // End isCodeGenOnly = 1 + +def CLAMP_R600 : CLAMP ; +def FABS_R600 : FABS; +def FNEG_R600 : FNEG; + +let usesCustomInserter = 1 in { + +def MASK_WRITE : AMDGPUShaderInst < + (outs), + (ins R600_Reg32:$src), + "MASK_WRITE $src", + [] >; -def STORE_OUTPUT: AMDGPUShaderInst < - (outs R600_Reg32:$dst), - (ins R600_Reg32:$src0, i32imm:$src1), - "STORE_OUTPUT $dst, $src0, $src1", - [(set R600_Reg32:$dst, (int_AMDGPU_store_output R600_Reg32:$src0, imm:$src1))] +} // End usesCustomInserter = 1 + +//===----------------------------------------------------------------------===// +// ISel Patterns +//===----------------------------------------------------------------------===// + +// KIL Patterns +def KILP : Pat < + (int_AMDGPU_kilp), + (MASK_WRITE (KILLGT (f32 ONE), (f32 ZERO), 0)) >; -} // End usesCustomInserter = 1, isPseudo = 1 +def KIL : Pat < + (int_AMDGPU_kill R600_Reg32:$src0), + (MASK_WRITE (KILLGT (f32 ZERO), (f32 R600_Reg32:$src0), 0)) +>; -} // End isCodeGenOnly = 1 +// SGT Reverse args +def : Pat < + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LT), + (SGT R600_Reg32:$src1, R600_Reg32:$src0) +>; +// SGE Reverse args +def : Pat < + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LE), + (SGE R600_Reg32:$src1, R600_Reg32:$src0) +>; +// SETGT_INT reverse args +def : Pat < + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLT), + (SETGT_INT R600_Reg32:$src1, R600_Reg32:$src0) +>; -let isPseudo = 1 in { +// SETGE_INT reverse args +def : Pat < + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLE), + (SETGE_INT R600_Reg32:$src1, R600_Reg32:$src0) +>; -def LOAD_VTX : AMDGPUShaderInst < - (outs R600_Reg32:$dst), - (ins MEMri:$mem), - "LOAD_VTX", - [(set (i32 R600_Reg32:$dst), (load_param ADDRParam:$mem))] +// SETGT_UINT reverse args +def : Pat < + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULT), + (SETGT_UINT R600_Reg32:$src1, R600_Reg32:$src0) >; +// SETGE_UINT reverse args +def : Pat < + (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULE), + (SETGE_UINT R600_Reg32:$src0, R600_Reg32:$src1) +>; -} //End isPseudo +// The next two patterns are special cases for handling 'true if ordered' and +// 'true if unordered' conditionals. The assumption here is that the behavior of +// SETE and SNE conforms to the Direct3D 10 rules for floating point values +// described here: +// http://msdn.microsoft.com/en-us/library/windows/desktop/cc308050.aspx#alpha_32_bit +// We assume that SETE returns false when one of the operands is NAN and +// SNE returns true when on of the operands is NAN + +//SETE - 'true if ordered' +def : Pat < + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETO), + (SETE R600_Reg32:$src0, R600_Reg32:$src1) +>; + +//SNE - 'true if unordered' +def : Pat < + (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETUO), + (SNE R600_Reg32:$src0, R600_Reg32:$src1) +>; def : Extract_Element ; def : Extract_Element ; @@ -1043,7 +1304,13 @@ def : Insert_Element ; def : Insert_Element ; def : Insert_Element ; +def : Vector_Build ; +def : Vector_Build ; + +// bitconvert patterns -include "R600ShaderPatterns.td" +def : BitConvert ; +def : BitConvert ; +def : BitConvert ; } // End isR600toCayman Predicate