1 //===-- R600Instructions.td - R600 Instruction defs -------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // R600 Tablegen instruction definitions
12 //===----------------------------------------------------------------------===//
14 include "R600Intrinsics.td"
16 class InstR600 <bits<32> inst, dag outs, dag ins, string asm, list<dag> pattern,
18 : AMDGPUInst <outs, ins, asm, pattern> {
26 let Namespace = "AMDGPU";
27 let OutOperandList = outs;
28 let InOperandList = ins;
30 let Pattern = pattern;
33 let TSFlags{4} = Trig;
36 // Vector instructions are instructions that must fill all slots in an
38 let TSFlags{6} = isVector;
41 class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
42 AMDGPUInst <outs, ins, asm, pattern>
46 let Namespace = "AMDGPU";
49 def MEMxi : Operand<iPTR> {
50 let MIOperandInfo = (ops R600_TReg32_X:$ptr, i32imm:$index);
53 def MEMrr : Operand<iPTR> {
54 let MIOperandInfo = (ops R600_Reg32:$ptr, R600_Reg32:$index);
57 def ADDRParam : ComplexPattern<i32, 2, "SelectADDRParam", [], []>;
58 def ADDRDWord : ComplexPattern<i32, 1, "SelectADDRDWord", [], []>;
59 def ADDRVTX_READ : ComplexPattern<i32, 2, "SelectADDRVTX_READ", [], []>;
72 def R600_Pred : PredicateOperand<i32, (ops R600_Predicate),
76 class R600_1OP <bits<32> inst, string opName, list<dag> pattern,
77 InstrItinClass itin = AnyALU> :
79 (outs R600_Reg32:$dst),
80 (ins R600_Reg32:$src, R600_Pred:$p, variable_ops),
81 !strconcat(opName, " $dst, $src ($p)"),
86 class R600_2OP <bits<32> inst, string opName, list<dag> pattern,
87 InstrItinClass itin = AnyALU> :
89 (outs R600_Reg32:$dst),
90 (ins R600_Reg32:$src0, R600_Reg32:$src1,R600_Pred:$p, variable_ops),
91 !strconcat(opName, " $dst, $src0, $src1"),
96 class R600_3OP <bits<32> inst, string opName, list<dag> pattern,
97 InstrItinClass itin = AnyALU> :
99 (outs R600_Reg32:$dst),
100 (ins R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2,R600_Pred:$p, variable_ops),
101 !strconcat(opName, " $dst, $src0, $src1, $src2"),
110 def PRED_X : AMDGPUInst <(outs R600_Predicate_Bit:$dst),
111 (ins R600_Reg32:$src0, i32imm:$src1),
112 "PRED $dst, $src0, $src1",
115 let DisableEncoding = "$src0";
124 let isTerminator = 1, isBranch = 1 in {
125 def JUMP : InstR600 <0x10,
127 (ins brtarget:$target, R600_Pred:$p),
133 class R600_REDUCTION <bits<32> inst, dag ins, string asm, list<dag> pattern,
134 InstrItinClass itin = VecALU> :
136 (outs R600_Reg32:$dst),
144 class R600_TEX <bits<32> inst, string opName, list<dag> pattern,
145 InstrItinClass itin = AnyALU> :
147 (outs R600_Reg128:$dst),
148 (ins R600_Reg128:$src0, i32imm:$src1, i32imm:$src2),
149 !strconcat(opName, "$dst, $src0, $src1, $src2"),
154 def TEX_SHADOW : PatLeaf<
156 [{uint32_t TType = (uint32_t)N->getZExtValue();
157 return (TType >= 6 && TType <= 8) || TType == 11 || TType == 12;
161 class EG_CF_RAT <bits <8> cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs,
162 dag ins, string asm, list<dag> pattern> :
163 InstR600ISA <outs, ins, asm, pattern>
181 /* CF_ALLOC_EXPORT_WORD0_RAT */
182 let Inst{3-0} = rat_id;
183 let Inst{9-4} = rat_inst;
184 let Inst{10} = 0; /* Reserved */
185 let Inst{12-11} = RIM;
186 let Inst{14-13} = TYPE;
187 let Inst{21-15} = RW_GPR;
188 let Inst{22} = RW_REL;
189 let Inst{29-23} = INDEX_GPR;
190 let Inst{31-30} = ELEM_SIZE;
192 /* CF_ALLOC_EXPORT_WORD1_BUF */
193 let Inst{43-32} = ARRAY_SIZE;
194 let Inst{47-44} = COMP_MASK;
195 let Inst{51-48} = BURST_COUNT;
198 let Inst{61-54} = cf_inst;
200 let Inst{63} = BARRIER;
204 def store_global : PatFrag<(ops node:$value, node:$ptr),
205 (store node:$value, node:$ptr),
208 const PointerType *Type;
209 if ((src = cast<StoreSDNode>(N)->getSrcValue() &&
210 PT = dyn_cast<PointerType>(Src->getType()))) {
211 return PT->getAddressSpace() == 1;
218 def load_param : PatFrag<(ops node:$ptr),
221 const Value *Src = cast<LoadSDNode>(N)->getSrcValue();
223 PointerType * PT = dyn_cast<PointerType>(Src->getType());
224 return PT && PT->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS;
229 //class EG_CF <bits<32> inst, string asm> :
230 // InstR600 <inst, (outs), (ins), asm, []>;
232 /* XXX: We will use this when we emit the real ISA.
246 let Inst{23-0} = ADDR;
247 let Inst{26-24} = JTS;
248 let Inst{34-32} = PC;
249 let Inst{39-35} = CF_CONST;
250 let Inst{41-40} = COND;
251 let Inst{47-42} = COUNT;
254 let Inst{61-54} = CF_INST;
259 def isR600 : Predicate<"Subtarget.device()"
260 "->getGeneration() == AMDGPUDeviceInfo::HD4XXX">;
261 def isR700 : Predicate<"Subtarget.device()"
262 "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&"
263 "Subtarget.device()->getDeviceFlag()"
264 ">= OCL_DEVICE_RV710">;
265 def isEG : Predicate<"Subtarget.device()"
266 "->getGeneration() >= AMDGPUDeviceInfo::HD5XXX && "
267 "Subtarget.device()->getDeviceFlag() != OCL_DEVICE_CAYMAN">;
268 def isCayman : Predicate<"Subtarget.device()"
269 "->getDeviceFlag() == OCL_DEVICE_CAYMAN">;
270 def isEGorCayman : Predicate<"Subtarget.device()"
271 "->getGeneration() == AMDGPUDeviceInfo::HD5XXX"
272 "|| Subtarget.device()->getGeneration() =="
273 "AMDGPUDeviceInfo::HD6XXX">;
275 def isR600toCayman : Predicate<
276 "Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX">;
279 let Predicates = [isR600toCayman] in {
281 /* ------------------------------------------- */
282 /* Common Instructions R600, R700, Evergreen, Cayman */
283 /* ------------------------------------------- */
286 [(set R600_Reg32:$dst, (fadd R600_Reg32:$src0, R600_Reg32:$src1))]
289 // Non-IEEE MUL: 0 * anything = 0
292 [(set R600_Reg32:$dst, (int_AMDGPU_mul R600_Reg32:$src0, R600_Reg32:$src1))]
295 def MUL_IEEE : R600_2OP <
297 [(set R600_Reg32:$dst, (fmul R600_Reg32:$src0, R600_Reg32:$src1))]
302 [(set R600_Reg32:$dst, (AMDGPUfmax R600_Reg32:$src0, R600_Reg32:$src1))]
307 [(set R600_Reg32:$dst, (AMDGPUfmin R600_Reg32:$src0, R600_Reg32:$src1))]
310 /* For the SET* instructions there is a naming conflict in TargetSelectionDAG.td,
311 * so some of the instruction names don't match the asm string.
312 * XXX: Use the defs in TargetSelectionDAG.td instead of intrinsics.
315 def SETE : R600_2OP <
317 [(set R600_Reg32:$dst,
318 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
324 [(set R600_Reg32:$dst,
325 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
331 [(set R600_Reg32:$dst,
332 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
338 [(set R600_Reg32:$dst,
339 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
343 def FRACT : R600_1OP <
345 [(set R600_Reg32:$dst, (AMDGPUfract R600_Reg32:$src))]
348 def TRUNC : R600_1OP <
350 [(set R600_Reg32:$dst, (int_AMDGPU_trunc R600_Reg32:$src))]
353 def CEIL : R600_1OP <
355 [(set R600_Reg32:$dst, (fceil R600_Reg32:$src))]
358 def RNDNE : R600_1OP <
360 [(set R600_Reg32:$dst, (frint R600_Reg32:$src))]
363 def FLOOR : R600_1OP <
365 [(set R600_Reg32:$dst, (int_AMDGPU_floor R600_Reg32:$src))]
368 def MOV : R600_1OP <0x19, "MOV", []>;
370 class MOV_IMM <ValueType vt, Operand immType> : InstR600 <0x19,
371 (outs R600_Reg32:$dst),
372 (ins R600_Reg32:$alu_literal, R600_Pred:$p, immType:$imm),
373 "MOV_IMM $dst, $imm",
377 def MOV_IMM_I32 : MOV_IMM<i32, i32imm>;
380 (MOV_IMM_I32 (i32 ALU_LITERAL_X), imm:$val)
383 def MOV_IMM_F32 : MOV_IMM<f32, f32imm>;
386 (MOV_IMM_F32 (i32 ALU_LITERAL_X), fpimm:$val)
389 def KILLGT : R600_2OP <
394 def AND_INT : R600_2OP <
396 [(set R600_Reg32:$dst, (and R600_Reg32:$src0, R600_Reg32:$src1))]
399 def OR_INT : R600_2OP <
401 [(set R600_Reg32:$dst, (or R600_Reg32:$src0, R600_Reg32:$src1))]
404 def XOR_INT : R600_2OP <
406 [(set R600_Reg32:$dst, (xor R600_Reg32:$src0, R600_Reg32:$src1))]
409 def NOT_INT : R600_1OP <
411 [(set R600_Reg32:$dst, (not R600_Reg32:$src))]
414 def ADD_INT : R600_2OP <
416 [(set R600_Reg32:$dst, (add R600_Reg32:$src0, R600_Reg32:$src1))]
419 def SUB_INT : R600_2OP <
421 [(set R600_Reg32:$dst, (sub R600_Reg32:$src0, R600_Reg32:$src1))]
424 def MAX_INT : R600_2OP <
426 [(set R600_Reg32:$dst, (AMDGPUsmax R600_Reg32:$src0, R600_Reg32:$src1))]>;
428 def MIN_INT : R600_2OP <
430 [(set R600_Reg32:$dst, (AMDGPUsmin R600_Reg32:$src0, R600_Reg32:$src1))]>;
432 def MAX_UINT : R600_2OP <
434 [(set R600_Reg32:$dst, (AMDGPUsmax R600_Reg32:$src0, R600_Reg32:$src1))]
437 def MIN_UINT : R600_2OP <
439 [(set R600_Reg32:$dst, (AMDGPUumin R600_Reg32:$src0, R600_Reg32:$src1))]
442 def SETE_INT : R600_2OP <
444 [(set (i32 R600_Reg32:$dst),
445 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETEQ))]
448 def SETGT_INT : R600_2OP <
450 [(set (i32 R600_Reg32:$dst),
451 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGT))]
454 def SETGE_INT : R600_2OP <
456 [(set (i32 R600_Reg32:$dst),
457 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGE))]
460 def SETNE_INT : R600_2OP <
462 [(set (i32 R600_Reg32:$dst),
463 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETNE))]
466 def SETGT_UINT : R600_2OP <
468 [(set (i32 R600_Reg32:$dst),
469 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGT))]
472 def SETGE_UINT : R600_2OP <
474 [(set (i32 R600_Reg32:$dst),
475 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGE))]
478 def CNDE_INT : R600_3OP <
480 [(set (i32 R600_Reg32:$dst),
481 (select R600_Reg32:$src0, R600_Reg32:$src2, R600_Reg32:$src1))]
484 /* Texture instructions */
487 def TEX_LD : R600_TEX <
489 [(set R600_Reg128:$dst, (int_AMDGPU_txf R600_Reg128:$src0, imm:$src1, imm:$src2, imm:$src3, imm:$src4, imm:$src5))]
491 let AsmString = "TEX_LD $dst, $src0, $src1, $src2, $src3, $src4, $src5";
492 let InOperandList = (ins R600_Reg128:$src0, i32imm:$src1, i32imm:$src2, i32imm:$src3, i32imm:$src4, i32imm:$src5);
495 def TEX_GET_TEXTURE_RESINFO : R600_TEX <
496 0x04, "TEX_GET_TEXTURE_RESINFO",
497 [(set R600_Reg128:$dst, (int_AMDGPU_txq R600_Reg128:$src0, imm:$src1, imm:$src2))]
500 def TEX_GET_GRADIENTS_H : R600_TEX <
501 0x07, "TEX_GET_GRADIENTS_H",
502 [(set R600_Reg128:$dst, (int_AMDGPU_ddx R600_Reg128:$src0, imm:$src1, imm:$src2))]
505 def TEX_GET_GRADIENTS_V : R600_TEX <
506 0x08, "TEX_GET_GRADIENTS_V",
507 [(set R600_Reg128:$dst, (int_AMDGPU_ddy R600_Reg128:$src0, imm:$src1, imm:$src2))]
510 def TEX_SET_GRADIENTS_H : R600_TEX <
511 0x0B, "TEX_SET_GRADIENTS_H",
515 def TEX_SET_GRADIENTS_V : R600_TEX <
516 0x0C, "TEX_SET_GRADIENTS_V",
520 def TEX_SAMPLE : R600_TEX <
522 [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$src1, imm:$src2))]
525 def TEX_SAMPLE_C : R600_TEX <
526 0x18, "TEX_SAMPLE_C",
527 [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))]
530 def TEX_SAMPLE_L : R600_TEX <
531 0x11, "TEX_SAMPLE_L",
532 [(set R600_Reg128:$dst, (int_AMDGPU_txl R600_Reg128:$src0, imm:$src1, imm:$src2))]
535 def TEX_SAMPLE_C_L : R600_TEX <
536 0x19, "TEX_SAMPLE_C_L",
537 [(set R600_Reg128:$dst, (int_AMDGPU_txl R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))]
540 def TEX_SAMPLE_LB : R600_TEX <
541 0x12, "TEX_SAMPLE_LB",
542 [(set R600_Reg128:$dst, (int_AMDGPU_txb R600_Reg128:$src0, imm:$src1, imm:$src2))]
545 def TEX_SAMPLE_C_LB : R600_TEX <
546 0x1A, "TEX_SAMPLE_C_LB",
547 [(set R600_Reg128:$dst, (int_AMDGPU_txb R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))]
550 def TEX_SAMPLE_G : R600_TEX <
551 0x14, "TEX_SAMPLE_G",
555 def TEX_SAMPLE_C_G : R600_TEX <
556 0x1C, "TEX_SAMPLE_C_G",
560 /* Helper classes for common instructions */
562 class MUL_LIT_Common <bits<32> inst> : R600_3OP <
567 class MULADD_Common <bits<32> inst> : R600_3OP <
569 [(set (f32 R600_Reg32:$dst),
570 (IL_mad R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2))]
573 class CNDE_Common <bits<32> inst> : R600_3OP <
575 [(set (f32 R600_Reg32:$dst),
576 (select (i32 (fp_to_sint (fneg R600_Reg32:$src0))), (f32 R600_Reg32:$src2), (f32 R600_Reg32:$src1)))]
579 class CNDGT_Common <bits<32> inst> : R600_3OP <
584 class CNDGE_Common <bits<32> inst> : R600_3OP <
586 [(set R600_Reg32:$dst, (int_AMDGPU_cndlt R600_Reg32:$src0, R600_Reg32:$src2, R600_Reg32:$src1))]
589 class DOT4_Common <bits<32> inst> : R600_REDUCTION <
591 (ins R600_Reg128:$src0, R600_Reg128:$src1),
592 "DOT4 $dst $src0, $src1",
593 [(set R600_Reg32:$dst, (int_AMDGPU_dp4 R600_Reg128:$src0, R600_Reg128:$src1))]
596 class CUBE_Common <bits<32> inst> : InstR600 <
598 (outs R600_Reg128:$dst),
599 (ins R600_Reg128:$src),
601 [(set R600_Reg128:$dst, (int_AMDGPU_cube R600_Reg128:$src))],
605 class EXP_IEEE_Common <bits<32> inst> : R600_1OP <
607 [(set R600_Reg32:$dst, (fexp2 R600_Reg32:$src))]
610 class FLT_TO_INT_Common <bits<32> inst> : R600_1OP <
612 [(set R600_Reg32:$dst, (fp_to_sint R600_Reg32:$src))]
615 class INT_TO_FLT_Common <bits<32> inst> : R600_1OP <
617 [(set R600_Reg32:$dst, (sint_to_fp R600_Reg32:$src))]
620 class FLT_TO_UINT_Common <bits<32> inst> : R600_1OP <
622 [(set R600_Reg32:$dst, (fp_to_uint R600_Reg32:$src))]
625 class UINT_TO_FLT_Common <bits<32> inst> : R600_1OP <
627 [(set R600_Reg32:$dst, (uint_to_fp R600_Reg32:$src))]
630 class LOG_CLAMPED_Common <bits<32> inst> : R600_1OP <
635 class LOG_IEEE_Common <bits<32> inst> : R600_1OP <
637 [(set R600_Reg32:$dst, (int_AMDIL_log R600_Reg32:$src))]
640 class LSHL_Common <bits<32> inst> : R600_2OP <
641 inst, "LSHL $dst, $src0, $src1",
642 [(set R600_Reg32:$dst, (shl R600_Reg32:$src0, R600_Reg32:$src1))]
645 class LSHR_Common <bits<32> inst> : R600_2OP <
646 inst, "LSHR $dst, $src0, $src1",
647 [(set R600_Reg32:$dst, (srl R600_Reg32:$src0, R600_Reg32:$src1))]
650 class ASHR_Common <bits<32> inst> : R600_2OP <
651 inst, "ASHR $dst, $src0, $src1",
652 [(set R600_Reg32:$dst, (sra R600_Reg32:$src0, R600_Reg32:$src1))]
655 class MULHI_INT_Common <bits<32> inst> : R600_2OP <
656 inst, "MULHI_INT $dst, $src0, $src1",
657 [(set R600_Reg32:$dst, (mulhs R600_Reg32:$src0, R600_Reg32:$src1))]
660 class MULHI_UINT_Common <bits<32> inst> : R600_2OP <
661 inst, "MULHI $dst, $src0, $src1",
662 [(set R600_Reg32:$dst, (mulhu R600_Reg32:$src0, R600_Reg32:$src1))]
665 class MULLO_INT_Common <bits<32> inst> : R600_2OP <
666 inst, "MULLO_INT $dst, $src0, $src1",
667 [(set R600_Reg32:$dst, (mul R600_Reg32:$src0, R600_Reg32:$src1))]
670 class MULLO_UINT_Common <bits<32> inst> : R600_2OP <
671 inst, "MULLO_UINT $dst, $src0, $src1",
675 class RECIP_CLAMPED_Common <bits<32> inst> : R600_1OP <
676 inst, "RECIP_CLAMPED",
680 class RECIP_IEEE_Common <bits<32> inst> : R600_1OP <
682 [(set R600_Reg32:$dst, (int_AMDGPU_rcp R600_Reg32:$src))]
685 class RECIP_UINT_Common <bits<32> inst> : R600_1OP <
686 inst, "RECIP_INT $dst, $src",
687 [(set R600_Reg32:$dst, (AMDGPUurecip R600_Reg32:$src))]
690 class RECIPSQRT_CLAMPED_Common <bits<32> inst> : R600_1OP <
691 inst, "RECIPSQRT_CLAMPED",
692 [(set R600_Reg32:$dst, (int_AMDGPU_rsq R600_Reg32:$src))]
695 class RECIPSQRT_IEEE_Common <bits<32> inst> : R600_1OP <
696 inst, "RECIPSQRT_IEEE",
700 class SIN_Common <bits<32> inst> : R600_1OP <
705 class COS_Common <bits<32> inst> : R600_1OP <
710 /* Helper patterns for complex intrinsics */
711 /* -------------------------------------- */
713 class DIV_Common <InstR600 recip_ieee> : Pat<
714 (int_AMDGPU_div R600_Reg32:$src0, R600_Reg32:$src1),
715 (MUL R600_Reg32:$src0, (recip_ieee R600_Reg32:$src1))
718 class SSG_Common <InstR600 cndgt, InstR600 cndge> : Pat <
719 (int_AMDGPU_ssg R600_Reg32:$src),
720 (cndgt R600_Reg32:$src, (f32 ONE), (cndge R600_Reg32:$src, (f32 ZERO), (f32 NEG_ONE)))
723 class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ieee> : Pat <
724 (int_TGSI_lit_z R600_Reg32:$src_x, R600_Reg32:$src_y, R600_Reg32:$src_w),
725 (exp_ieee (mul_lit (log_clamped (MAX R600_Reg32:$src_y, (f32 ZERO))), R600_Reg32:$src_w, R600_Reg32:$src_x))
728 /* ---------------------- */
729 /* R600 / R700 Only Instructions */
730 /* ---------------------- */
732 let Predicates = [isR600] in {
734 def MUL_LIT_r600 : MUL_LIT_Common<0x0C>;
735 def MULADD_r600 : MULADD_Common<0x10>;
736 def CNDE_r600 : CNDE_Common<0x18>;
737 def CNDGT_r600 : CNDGT_Common<0x19>;
738 def CNDGE_r600 : CNDGE_Common<0x1A>;
739 def DOT4_r600 : DOT4_Common<0x50>;
740 def CUBE_r600 : CUBE_Common<0x52>;
741 def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>;
742 def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>;
743 def LOG_IEEE_r600 : LOG_IEEE_Common<0x63>;
744 def RECIP_CLAMPED_r600 : RECIP_CLAMPED_Common<0x64>;
745 def RECIP_IEEE_r600 : RECIP_IEEE_Common<0x66>;
746 def RECIPSQRT_CLAMPED_r600 : RECIPSQRT_CLAMPED_Common<0x67>;
747 def RECIPSQRT_IEEE_r600 : RECIPSQRT_IEEE_Common<0x69>;
748 def FLT_TO_INT_r600 : FLT_TO_INT_Common<0x6b>;
749 def INT_TO_FLT_r600 : INT_TO_FLT_Common<0x6c>;
750 def FLT_TO_UINT_r600 : FLT_TO_UINT_Common<0x79>;
751 def UINT_TO_FLT_r600 : UINT_TO_FLT_Common<0x6d>;
752 def SIN_r600 : SIN_Common<0x6E>;
753 def COS_r600 : COS_Common<0x6F>;
754 def ASHR_r600 : ASHR_Common<0x70>;
755 def LSHR_r600 : LSHR_Common<0x71>;
756 def LSHL_r600 : LSHL_Common<0x72>;
757 def MULLO_INT_r600 : MULLO_INT_Common<0x73>;
758 def MULHI_INT_r600 : MULHI_INT_Common<0x74>;
759 def MULLO_UINT_r600 : MULLO_UINT_Common<0x75>;
760 def MULHI_UINT_r600 : MULHI_UINT_Common<0x76>;
761 def RECIP_UINT_r600 : RECIP_UINT_Common <0x78>;
763 def DIV_r600 : DIV_Common<RECIP_IEEE_r600>;
764 def POW_r600 : POW_Common<LOG_IEEE_r600, EXP_IEEE_r600, MUL, GPRF32>;
765 def SSG_r600 : SSG_Common<CNDGT_r600, CNDGE_r600>;
766 def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
770 // Helper pattern for normalizing inputs to triginomic instructions for R700+
772 class TRIG_eg <InstR600 trig, Intrinsic intr> : Pat<
773 (intr R600_Reg32:$src),
774 (trig (MUL (MOV_IMM_I32 (i32 ALU_LITERAL_X), CONST.TWO_PI_INV), R600_Reg32:$src))
777 //===----------------------------------------------------------------------===//
778 // R700 Only instructions
779 //===----------------------------------------------------------------------===//
781 let Predicates = [isR700] in {
782 def SIN_r700 : SIN_Common<0x6E>;
783 def COS_r700 : COS_Common<0x6F>;
785 // R700 normalizes inputs to SIN/COS the same as EG
786 def : TRIG_eg <SIN_r700, int_AMDGPU_sin>;
787 def : TRIG_eg <COS_r700, int_AMDGPU_cos>;
790 //===----------------------------------------------------------------------===//
791 // Evergreen Only instructions
792 //===----------------------------------------------------------------------===//
794 let Predicates = [isEG] in {
796 def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>;
798 def MULLO_INT_eg : MULLO_INT_Common<0x8F>;
799 def MULHI_INT_eg : MULHI_INT_Common<0x90>;
800 def MULLO_UINT_eg : MULLO_UINT_Common<0x91>;
801 def MULHI_UINT_eg : MULHI_UINT_Common<0x92>;
802 def RECIP_UINT_eg : RECIP_UINT_Common<0x94>;
804 } // End Predicates = [isEG]
806 /* ------------------------------- */
807 /* Evergreen / Cayman Instructions */
808 /* ------------------------------- */
810 let Predicates = [isEGorCayman] in {
812 // BFE_UINT - bit_extract, an optimization for mask and shift
817 // bit_extract = (Input << (32 - Offset - Width)) >> (32 - Width)
822 // (0, 8) = (Input << 24) >> 24 = (Input & 0xff) >> 0
823 // (8, 8) = (Input << 16) >> 24 = (Input & 0xffff) >> 8
824 // (16,8) = (Input << 8) >> 24 = (Input & 0xffffff) >> 16
825 // (24,8) = (Input << 0) >> 24 = (Input & 0xffffffff) >> 24
826 def BFE_UINT_eg : R600_3OP <0x4, "BFE_UINT",
827 [(set R600_Reg32:$dst, (int_AMDIL_bit_extract_u32 R600_Reg32:$src0,
833 def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT",
834 [(set R600_Reg32:$dst, (AMDGPUbitalign R600_Reg32:$src0, R600_Reg32:$src1,
839 def MULADD_eg : MULADD_Common<0x14>;
840 def ASHR_eg : ASHR_Common<0x15>;
841 def LSHR_eg : LSHR_Common<0x16>;
842 def LSHL_eg : LSHL_Common<0x17>;
843 def CNDE_eg : CNDE_Common<0x19>;
844 def CNDGT_eg : CNDGT_Common<0x1A>;
845 def CNDGE_eg : CNDGE_Common<0x1B>;
846 def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
847 def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;
848 def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
849 def LOG_IEEE_eg : LOG_IEEE_Common<0x83>;
850 def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>;
851 def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>;
852 def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>;
853 def SIN_eg : SIN_Common<0x8D>;
854 def COS_eg : COS_Common<0x8E>;
855 def DOT4_eg : DOT4_Common<0xBE>;
856 def CUBE_eg : CUBE_Common<0xC0>;
858 def DIV_eg : DIV_Common<RECIP_IEEE_eg>;
859 def POW_eg : POW_Common<LOG_IEEE_eg, EXP_IEEE_eg, MUL, GPRF32>;
860 def SSG_eg : SSG_Common<CNDGT_eg, CNDGE_eg>;
861 def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common<MUL_LIT_eg, LOG_CLAMPED_eg, EXP_IEEE_eg>;
863 def : TRIG_eg <SIN_eg, int_AMDGPU_sin>;
864 def : TRIG_eg <COS_eg, int_AMDGPU_cos>;
866 def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
870 def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
872 def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> {
876 def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
878 def : Pat<(fp_to_sint R600_Reg32:$src),
879 (FLT_TO_INT_eg (TRUNC R600_Reg32:$src))>;
881 def : Pat<(fp_to_uint R600_Reg32:$src),
882 (FLT_TO_UINT_eg (TRUNC R600_Reg32:$src))>;
884 //===----------------------------------------------------------------------===//
885 // Memory read/write instructions
886 //===----------------------------------------------------------------------===//
888 let usesCustomInserter = 1 in {
890 def RAT_WRITE_CACHELESS_eg : EG_CF_RAT <0x57, 0x2, 0, (outs),
891 (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr),
892 "RAT_WRITE_CACHELESS_eg $rw_gpr, $index_gpr",
893 [(global_store (i32 R600_TReg32_X:$rw_gpr), R600_TReg32_X:$index_gpr)]>
896 /* XXX: Have a separate instruction for non-indexed writes. */
910 } // End usesCustomInserter = 1
912 // Floating point global_store
914 (global_store (f32 R600_TReg32_X:$val), R600_TReg32_X:$ptr),
915 (RAT_WRITE_CACHELESS_eg R600_TReg32_X:$val, R600_TReg32_X:$ptr)
918 class VTX_READ_eg <bits<8> buffer_id, dag outs, list<dag> pattern>
919 : InstR600ISA <outs, (ins MEMxi:$ptr), "VTX_READ_eg $dst, $ptr", pattern> {
927 bits<2> FETCH_TYPE = 2;
928 bits<1> FETCH_WHOLE_QUAD = 0;
929 bits<8> BUFFER_ID = buffer_id;
931 // XXX: We can infer this field based on the SRC_GPR. This would allow us
932 // to store vertex addresses in any channel, not just X.
933 bits<2> SRC_SEL_X = 0;
934 bits<6> MEGA_FETCH_COUNT;
940 // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL,
941 // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored,
942 // however, based on my testing if USE_CONST_FIELDS is set, then all
943 // these fields need to be set to 0.
944 bits<1> USE_CONST_FIELDS = 0;
946 bits<2> NUM_FORMAT_ALL = 1;
947 bits<1> FORMAT_COMP_ALL = 0;
948 bits<1> SRF_MODE_ALL = 0;
950 // LLVM can only encode 64-bit instructions, so these fields are manually
951 // encoded in R600CodeEmitter
954 // bits<2> ENDIAN_SWAP = 0;
955 // bits<1> CONST_BUF_NO_STRIDE = 0;
956 // bits<1> MEGA_FETCH = 0;
957 // bits<1> ALT_CONST = 0;
958 // bits<2> BUFFER_INDEX_MODE = 0;
961 let Inst{4-0} = VC_INST;
962 let Inst{6-5} = FETCH_TYPE;
963 let Inst{7} = FETCH_WHOLE_QUAD;
964 let Inst{15-8} = BUFFER_ID;
965 let Inst{22-16} = SRC_GPR;
966 let Inst{23} = SRC_REL;
967 let Inst{25-24} = SRC_SEL_X;
968 let Inst{31-26} = MEGA_FETCH_COUNT;
971 let Inst{38-32} = DST_GPR;
972 let Inst{39} = DST_REL;
973 let Inst{40} = 0; // Reserved
974 let Inst{43-41} = DST_SEL_X;
975 let Inst{46-44} = DST_SEL_Y;
976 let Inst{49-47} = DST_SEL_Z;
977 let Inst{52-50} = DST_SEL_W;
978 let Inst{53} = USE_CONST_FIELDS;
979 let Inst{59-54} = DATA_FORMAT;
980 let Inst{61-60} = NUM_FORMAT_ALL;
981 let Inst{62} = FORMAT_COMP_ALL;
982 let Inst{63} = SRF_MODE_ALL;
984 // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
985 // is done in R600CodeEmitter
987 // Inst{79-64} = OFFSET;
988 // Inst{81-80} = ENDIAN_SWAP;
989 // Inst{82} = CONST_BUF_NO_STRIDE;
990 // Inst{83} = MEGA_FETCH;
991 // Inst{84} = ALT_CONST;
992 // Inst{86-85} = BUFFER_INDEX_MODE;
993 // Inst{95-86} = 0; Reserved
995 // VTX_WORD3 (Padding)
1000 class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
1001 : VTX_READ_eg <buffer_id, (outs R600_TReg32_X:$dst), pattern> {
1003 let MEGA_FETCH_COUNT = 4;
1005 let DST_SEL_Y = 7; // Masked
1006 let DST_SEL_Z = 7; // Masked
1007 let DST_SEL_W = 7; // Masked
1008 let DATA_FORMAT = 0xD; // COLOR_32
1010 // This is not really necessary, but there were some GPU hangs that appeared
1011 // to be caused by ALU instructions in the next instruction group that wrote
1012 // to the $ptr registers of the VTX_READ.
1014 // %T3_X<def> = VTX_READ_PARAM_i32_eg %T2_X<kill>, 24
1015 // %T2_X<def> = MOV %ZERO
1016 //Adding this constraint prevents this from happening.
1017 let Constraints = "$ptr.ptr = $dst";
1020 class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
1021 : VTX_READ_eg <buffer_id, (outs R600_Reg128:$dst), pattern> {
1023 let MEGA_FETCH_COUNT = 16;
1028 let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
1030 // XXX: Need to force VTX_READ_128 instructions to write to the same register
1031 // that holds its buffer address to avoid potential hangs. We can't use
1032 // the same constraint as VTX_READ_32_eg, because the $ptr.ptr and $dst
1033 // registers are different sizes.
1036 //===----------------------------------------------------------------------===//
1037 // VTX Read from parameter memory space
1038 //===----------------------------------------------------------------------===//
1040 class VTX_READ_PARAM_32_eg <ValueType vt> : VTX_READ_32_eg <0,
1041 [(set (vt R600_TReg32_X:$dst), (load_param ADDRVTX_READ:$ptr))]
1044 def VTX_READ_PARAM_i32_eg : VTX_READ_PARAM_32_eg<i32>;
1045 def VTX_READ_PARAM_f32_eg : VTX_READ_PARAM_32_eg<f32>;
1048 //===----------------------------------------------------------------------===//
1049 // VTX Read from global memory space
1050 //===----------------------------------------------------------------------===//
1054 class VTX_READ_GLOBAL_eg <ValueType vt> : VTX_READ_32_eg <1,
1055 [(set (vt R600_TReg32_X:$dst), (global_load ADDRVTX_READ:$ptr))]
1058 def VTX_READ_GLOBAL_i32_eg : VTX_READ_GLOBAL_eg<i32>;
1059 def VTX_READ_GLOBAL_f32_eg : VTX_READ_GLOBAL_eg<f32>;
1063 class VTX_READ_GLOBAL_128_eg <ValueType vt> : VTX_READ_128_eg <1,
1064 [(set (vt R600_Reg128:$dst), (global_load ADDRVTX_READ:$ptr))]
1067 def VTX_READ_GLOBAL_v4i32_eg : VTX_READ_GLOBAL_128_eg<v4i32>;
1068 def VTX_READ_GLOBAL_v4f32_eg : VTX_READ_GLOBAL_128_eg<v4f32>;
1072 let Predicates = [isCayman] in {
1074 let isVector = 1 in {
1076 def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
1078 def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
1079 def MULHI_INT_cm : MULHI_INT_Common<0x90>;
1080 def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
1081 def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
1083 } // End isVector = 1
1085 // RECIP_UINT emulation for Cayman
1087 (AMDGPUurecip R600_Reg32:$src0),
1088 (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg R600_Reg32:$src0)),
1089 (MOV_IMM_I32 (i32 ALU_LITERAL_X), 0x4f800000)))
1094 /* Other Instructions */
1096 let isCodeGenOnly = 1 in {
1098 def SWIZZLE : AMDGPUShaderInst <
1099 (outs GPRV4F32:$dst),
1100 (ins GPRV4F32:$src0, i32imm:$src1),
1101 "SWIZZLE $dst, $src0, $src1",
1102 [(set GPRV4F32:$dst, (int_AMDGPU_swizzle GPRV4F32:$src0, imm:$src1))]
1106 def LAST : AMDGPUShaderInst <
1113 def GET_CHAN : AMDGPUShaderInst <
1114 (outs R600_Reg32:$dst),
1115 (ins R600_Reg128:$src0, i32imm:$src1),
1116 "GET_CHAN $dst, $src0, $src1",
1120 def MULLIT : AMDGPUShaderInst <
1121 (outs R600_Reg128:$dst),
1122 (ins R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2),
1123 "MULLIT $dst, $src0, $src1",
1124 [(set R600_Reg128:$dst, (int_AMDGPU_mullit R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2))]
1127 let usesCustomInserter = 1, isPseudo = 1 in {
1129 class R600PreloadInst <string asm, Intrinsic intr> : AMDGPUInst <
1130 (outs R600_TReg32:$dst),
1133 [(set R600_TReg32:$dst, (intr))]
1136 def R600_LOAD_CONST : AMDGPUShaderInst <
1137 (outs R600_Reg32:$dst),
1139 "R600_LOAD_CONST $dst, $src0",
1140 [(set R600_Reg32:$dst, (int_AMDGPU_load_const imm:$src0))]
1143 def RESERVE_REG : AMDGPUShaderInst <
1147 [(int_AMDGPU_reserve_reg imm:$src)]
1150 def TXD: AMDGPUShaderInst <
1151 (outs R600_Reg128:$dst),
1152 (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$src3, i32imm:$src4),
1153 "TXD $dst, $src0, $src1, $src2, $src3, $src4",
1154 [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$src3, imm:$src4))]
1157 def TXD_SHADOW: AMDGPUShaderInst <
1158 (outs R600_Reg128:$dst),
1159 (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$src3, i32imm:$src4),
1160 "TXD_SHADOW $dst, $src0, $src1, $src2, $src3, $src4",
1161 [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$src3, TEX_SHADOW:$src4))]
1164 } // End usesCustomInserter = 1, isPseudo = 1
1166 } // End isCodeGenOnly = 1
1168 def CLAMP_R600 : CLAMP <R600_Reg32>;
1169 def FABS_R600 : FABS<R600_Reg32>;
1170 def FNEG_R600 : FNEG<R600_Reg32>;
1172 let usesCustomInserter = 1 in {
1174 def MASK_WRITE : AMDGPUShaderInst <
1176 (ins R600_Reg32:$src),
1181 } // End usesCustomInserter = 1
1183 //===----------------------------------------------------------------------===//
1185 //===----------------------------------------------------------------------===//
1190 (MASK_WRITE (KILLGT (f32 ONE), (f32 ZERO)))
1194 (int_AMDGPU_kill R600_Reg32:$src0),
1195 (MASK_WRITE (KILLGT (f32 ZERO), (f32 R600_Reg32:$src0)))
1200 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LT),
1201 (SGT R600_Reg32:$src1, R600_Reg32:$src0)
1206 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LE),
1207 (SGE R600_Reg32:$src1, R600_Reg32:$src0)
1210 // SETGT_INT reverse args
1212 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLT),
1213 (SETGT_INT R600_Reg32:$src1, R600_Reg32:$src0)
1216 // SETGE_INT reverse args
1218 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLE),
1219 (SETGE_INT R600_Reg32:$src1, R600_Reg32:$src0)
1222 // SETGT_UINT reverse args
1224 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULT),
1225 (SETGT_UINT R600_Reg32:$src1, R600_Reg32:$src0)
1228 // SETGE_UINT reverse args
1230 (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULE),
1231 (SETGE_UINT R600_Reg32:$src0, R600_Reg32:$src1)
1234 // The next two patterns are special cases for handling 'true if ordered' and
1235 // 'true if unordered' conditionals. The assumption here is that the behavior of
1236 // SETE and SNE conforms to the Direct3D 10 rules for floating point values
1238 // http://msdn.microsoft.com/en-us/library/windows/desktop/cc308050.aspx#alpha_32_bit
1239 // We assume that SETE returns false when one of the operands is NAN and
1240 // SNE returns true when on of the operands is NAN
1242 //SETE - 'true if ordered'
1244 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETO),
1245 (SETE R600_Reg32:$src0, R600_Reg32:$src1)
1248 //SNE - 'true if unordered'
1250 (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETUO),
1251 (SNE R600_Reg32:$src0, R600_Reg32:$src1)
1254 def : Extract_Element <f32, v4f32, R600_Reg128, 0, sel_x>;
1255 def : Extract_Element <f32, v4f32, R600_Reg128, 1, sel_y>;
1256 def : Extract_Element <f32, v4f32, R600_Reg128, 2, sel_z>;
1257 def : Extract_Element <f32, v4f32, R600_Reg128, 3, sel_w>;
1259 def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 4, sel_x>;
1260 def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 5, sel_y>;
1261 def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 6, sel_z>;
1262 def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 7, sel_w>;
1264 def : Extract_Element <i32, v4i32, R600_Reg128, 0, sel_x>;
1265 def : Extract_Element <i32, v4i32, R600_Reg128, 1, sel_y>;
1266 def : Extract_Element <i32, v4i32, R600_Reg128, 2, sel_z>;
1267 def : Extract_Element <i32, v4i32, R600_Reg128, 3, sel_w>;
1269 def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 4, sel_x>;
1270 def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 5, sel_y>;
1271 def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 6, sel_z>;
1272 def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 7, sel_w>;
1274 def : Vector_Build <v4f32, R600_Reg32>;
1275 def : Vector_Build <v4i32, R600_Reg32>;
1277 // bitconvert patterns
1279 def : BitConvert <i32, f32, R600_Reg32>;
1280 def : BitConvert <f32, i32, R600_Reg32>;
1281 def : BitConvert <v4f32, v4i32, R600_Reg128>;
1283 } // End isR600toCayman Predicate