1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is the parent TargetLowering class for hardware code gen targets.
12 //===----------------------------------------------------------------------===//
14 #include "AMDGPUISelLowering.h"
15 #include "AMDILIntrinsicInfo.h"
16 #include "AMDGPUUtil.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine
&TM
) :
22 AMDILTargetLowering(TM
)
24 // We need to custom lower some of the intrinsics
25 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
27 setOperationAction(ISD::SELECT_CC
, MVT::f32
, Custom
);
28 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
30 // Library functions. These default to Expand, but we have instructions
32 setOperationAction(ISD::FCEIL
, MVT::f32
, Legal
);
36 SDValue
AMDGPUTargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
)
39 switch (Op
.getOpcode()) {
40 default: return AMDILTargetLowering::LowerOperation(Op
, DAG
);
41 case ISD::INTRINSIC_WO_CHAIN
: return LowerINTRINSIC_WO_CHAIN(Op
, DAG
);
42 case ISD::SELECT_CC
: return LowerSELECT_CC(Op
, DAG
);
46 SDValue
AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op
,
47 SelectionDAG
&DAG
) const
49 unsigned IntrinsicID
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
50 DebugLoc DL
= Op
.getDebugLoc();
51 EVT VT
= Op
.getValueType();
53 switch (IntrinsicID
) {
55 case AMDGPUIntrinsic::AMDIL_abs
:
56 return LowerIntrinsicIABS(Op
, DAG
);
57 case AMDGPUIntrinsic::AMDGPU_lrp
:
58 return LowerIntrinsicLRP(Op
, DAG
);
59 case AMDGPUIntrinsic::AMDIL_fraction
:
60 return DAG
.getNode(AMDGPUISD::FRACT
, DL
, VT
, Op
.getOperand(1));
61 case AMDGPUIntrinsic::AMDIL_mad
:
62 return DAG
.getNode(AMDILISD::MAD
, DL
, VT
, Op
.getOperand(1),
63 Op
.getOperand(2), Op
.getOperand(3));
64 case AMDGPUIntrinsic::AMDIL_max
:
65 return DAG
.getNode(AMDGPUISD::FMAX
, DL
, VT
, Op
.getOperand(1),
67 case AMDGPUIntrinsic::AMDGPU_imax
:
68 return DAG
.getNode(AMDGPUISD::SMAX
, DL
, VT
, Op
.getOperand(1),
70 case AMDGPUIntrinsic::AMDGPU_umax
:
71 return DAG
.getNode(AMDGPUISD::UMAX
, DL
, VT
, Op
.getOperand(1),
73 case AMDGPUIntrinsic::AMDIL_min
:
74 return DAG
.getNode(AMDGPUISD::FMIN
, DL
, VT
, Op
.getOperand(1),
76 case AMDGPUIntrinsic::AMDGPU_imin
:
77 return DAG
.getNode(AMDGPUISD::SMIN
, DL
, VT
, Op
.getOperand(1),
79 case AMDGPUIntrinsic::AMDGPU_umin
:
80 return DAG
.getNode(AMDGPUISD::UMIN
, DL
, VT
, Op
.getOperand(1),
82 case AMDGPUIntrinsic::AMDIL_round_posinf
:
83 return DAG
.getNode(ISD::FCEIL
, DL
, VT
, Op
.getOperand(1));
87 ///IABS(a) = SMAX(sub(0, a), a)
88 SDValue
AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op
,
89 SelectionDAG
&DAG
) const
92 DebugLoc DL
= Op
.getDebugLoc();
93 EVT VT
= Op
.getValueType();
94 SDValue Neg
= DAG
.getNode(ISD::SUB
, DL
, VT
, DAG
.getConstant(0, VT
),
97 return DAG
.getNode(AMDGPUISD::SMAX
, DL
, VT
, Neg
, Op
.getOperand(1));
100 /// Linear Interpolation
101 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
102 SDValue
AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op
,
103 SelectionDAG
&DAG
) const
105 DebugLoc DL
= Op
.getDebugLoc();
106 EVT VT
= Op
.getValueType();
107 SDValue OneSubA
= DAG
.getNode(ISD::FSUB
, DL
, VT
,
108 DAG
.getConstantFP(1.0f
, MVT::f32
),
110 SDValue OneSubAC
= DAG
.getNode(ISD::FMUL
, DL
, VT
, OneSubA
,
112 return DAG
.getNode(AMDILISD::MAD
, DL
, VT
, Op
.getOperand(1),
117 SDValue
AMDGPUTargetLowering::LowerSELECT_CC(SDValue Op
,
118 SelectionDAG
&DAG
) const
120 DebugLoc DL
= Op
.getDebugLoc();
121 EVT VT
= Op
.getValueType();
123 SDValue LHS
= Op
.getOperand(0);
124 SDValue RHS
= Op
.getOperand(1);
125 SDValue True
= Op
.getOperand(2);
126 SDValue False
= Op
.getOperand(3);
127 SDValue CC
= Op
.getOperand(4);
128 ISD::CondCode CCOpcode
= cast
<CondCodeSDNode
>(CC
)->get();
131 // LHS and RHS are guaranteed to be the same value type
132 EVT CompareVT
= LHS
.getValueType();
134 // We need all the operands of SELECT_CC to have the same value type, so if
135 // necessary we need to convert LHS and RHS to be the same type True and
136 // False. True and False are guaranteed to have the same type as this
139 if (CompareVT
!= VT
) {
140 ISD::NodeType ConversionOp
= ISD::DELETED_NODE
;
141 if (VT
== MVT::f32
&& CompareVT
== MVT::i32
) {
142 if (isUnsignedIntSetCC(CCOpcode
)) {
143 ConversionOp
= ISD::UINT_TO_FP
;
145 ConversionOp
= ISD::SINT_TO_FP
;
147 } else if (VT
== MVT::i32
&& CompareVT
== MVT::f32
) {
148 ConversionOp
= ISD::FP_TO_SINT
;
150 // I don't think there will be any other type pairings.
151 assert(!"Unhandled operand type parings in SELECT_CC");
153 // XXX Check the value of LHS and RHS and avoid creating sequences like
155 LHS
= DAG
.getNode(ConversionOp
, DL
, VT
, LHS
);
156 RHS
= DAG
.getNode(ConversionOp
, DL
, VT
, RHS
);
159 // If True is a hardware TRUE value and False is a hardware FALSE value or
160 // vice-versa we can handle this with a native instruction (SET* instructions).
161 if ((isHWTrueValue(True
) && isHWFalseValue(False
))) {
162 return DAG
.getNode(ISD::SELECT_CC
, DL
, VT
, LHS
, RHS
, True
, False
, CC
);
165 // XXX If True is a hardware TRUE value and False is a hardware FALSE value,
166 // we can handle this with a native instruction, but we need to swap true
167 // and false and change the conditional.
168 if (isHWTrueValue(False
) && isHWFalseValue(True
)) {
171 // XXX Check if we can lower this to a SELECT or if it is supported by a native
172 // operation. (The code below does this but we don't have the Instruction
173 // selection patterns to do this yet.
175 if (isZero(LHS
) || isZero(RHS
)) {
176 SDValue Cond
= (isZero(LHS
) ? RHS
: LHS
);
187 // We can lower to select
194 return DAG
.getNode(ISD::SELECT
, DL
, VT
, Cond
, True
, False
);
196 // Supported by a native operation (CNDGE, CNDGT)
197 return DAG
.getNode(ISD::SELECT_CC
, DL
, VT
, LHS
, RHS
, True
, False
, CC
);
202 // If we make it this for it means we have no native instructions to handle
203 // this SELECT_CC, so we must lower it.
204 SDValue HWTrue
, HWFalse
;
206 if (VT
== MVT::f32
) {
207 HWTrue
= DAG
.getConstantFP(1.0f
, VT
);
208 HWFalse
= DAG
.getConstantFP(0.0f
, VT
);
209 } else if (VT
== MVT::i32
) {
210 HWTrue
= DAG
.getConstant(-1, VT
);
211 HWFalse
= DAG
.getConstant(0, VT
);
214 assert(!"Unhandled value type in LowerSELECT_CC");
217 // Lower this unsupported SELECT_CC into a combination of two supported
218 // SELECT_CC operations.
219 SDValue Cond
= DAG
.getNode(ISD::SELECT_CC
, DL
, VT
, LHS
, RHS
, HWTrue
, HWFalse
, CC
);
221 return DAG
.getNode(ISD::SELECT
, DL
, VT
, Cond
, True
, False
);
224 //===----------------------------------------------------------------------===//
226 //===----------------------------------------------------------------------===//
228 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op
) const
230 if (ConstantFPSDNode
* CFP
= dyn_cast
<ConstantFPSDNode
>(Op
)) {
231 return CFP
->isExactlyValue(1.0);
233 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
)) {
234 return C
->isAllOnesValue();
239 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op
) const
241 if (ConstantFPSDNode
* CFP
= dyn_cast
<ConstantFPSDNode
>(Op
)) {
242 return CFP
->getValueAPF().isZero();
244 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
)) {
245 return C
->isNullValue();
250 void AMDGPUTargetLowering::addLiveIn(MachineInstr
* MI
,
251 MachineFunction
* MF
, MachineRegisterInfo
& MRI
,
252 const TargetInstrInfo
* TII
, unsigned reg
) const
254 AMDGPU::utilAddLiveIn(MF
, MRI
, TII
, reg
, MI
->getOperand(0).getReg());
257 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
259 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode
) const
262 default: return AMDILTargetLowering::getTargetNodeName(Opcode
);
264 NODE_NAME_CASE(FRACT
)