2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file lower_instructions.cpp
27 * Many GPUs lack native instructions for certain expression operations, and
28 * must replace them with some other expression tree. This pass lowers some
29 * of the most common cases, allowing the lowering code to be implemented once
30 * rather than in each driver backend.
32 * Currently supported transformations:
35 * - INT_DIV_TO_MUL_RCP
41 * - BITFIELD_INSERT_TO_BFM_BFI
45 * Breaks an ir_binop_sub expression down to add(op0, neg(op1))
47 * This simplifies expression reassociation, and for many backends
48 * there is no subtract operation separate from adding the negation.
49 * For backends with native subtract operations, they will probably
50 * want to recognize add(op0, neg(op1)) or the other way around to
51 * produce a subtract anyway.
53 * DIV_TO_MUL_RCP and INT_DIV_TO_MUL_RCP:
54 * --------------------------------------
55 * Breaks an ir_binop_div expression down to op0 * (rcp(op1)).
57 * Many GPUs don't have a divide instruction (945 and 965 included),
58 * but they do have an RCP instruction to compute an approximate
59 * reciprocal. By breaking the operation down, constant reciprocals
60 * can get constant folded.
62 * DIV_TO_MUL_RCP only lowers floating point division; INT_DIV_TO_MUL_RCP
63 * handles the integer case, converting to and from floating point so that
66 * EXP_TO_EXP2 and LOG_TO_LOG2:
67 * ----------------------------
68 * Many GPUs don't have a base e log or exponent instruction, but they
69 * do have base 2 versions, so this pass converts exp and log to exp2
70 * and log2 operations.
74 * Many older GPUs don't have an x**y instruction. For these GPUs, convert
75 * x**y to 2**(y * log2(x)).
79 * Breaks an ir_binop_mod expression down to (op1 * fract(op0 / op1))
81 * Many GPUs don't have a MOD instruction (945 and 965 included), and
82 * if we have to break it down like this anyway, it gives an
83 * opportunity to do things like constant fold the (1.0 / op1) easily.
87 * Converts ir_binop_ldexp to arithmetic and bit operations.
89 * BITFIELD_INSERT_TO_BFM_BFI:
90 * ---------------------------
91 * Breaks ir_quadop_bitfield_insert into ir_binop_bfm (bitfield mask) and
92 * ir_triop_bfi (bitfield insert).
94 * Many GPUs implement the bitfieldInsert() built-in from ARB_gpu_shader_5
95 * with a pair of instructions.
99 #include "main/core.h" /* for M_LOG2E */
100 #include "glsl_types.h"
102 #include "ir_builder.h"
103 #include "ir_optimization.h"
105 using namespace ir_builder
;
109 class lower_instructions_visitor
: public ir_hierarchical_visitor
{
111 lower_instructions_visitor(unsigned lower
)
112 : progress(false), lower(lower
) { }
114 ir_visitor_status
visit_leave(ir_expression
*);
119 unsigned lower
; /** Bitfield of which operations to lower */
121 void sub_to_add_neg(ir_expression
*);
122 void div_to_mul_rcp(ir_expression
*);
123 void int_div_to_mul_rcp(ir_expression
*);
124 void mod_to_fract(ir_expression
*);
125 void exp_to_exp2(ir_expression
*);
126 void pow_to_exp2(ir_expression
*);
127 void log_to_log2(ir_expression
*);
128 void bitfield_insert_to_bfm_bfi(ir_expression
*);
129 void ldexp_to_arith(ir_expression
*);
132 } /* anonymous namespace */
135 * Determine if a particular type of lowering should occur
137 #define lowering(x) (this->lower & x)
140 lower_instructions(exec_list
*instructions
, unsigned what_to_lower
)
142 lower_instructions_visitor
v(what_to_lower
);
144 visit_list_elements(&v
, instructions
);
149 lower_instructions_visitor::sub_to_add_neg(ir_expression
*ir
)
151 ir
->operation
= ir_binop_add
;
152 ir
->operands
[1] = new(ir
) ir_expression(ir_unop_neg
, ir
->operands
[1]->type
,
153 ir
->operands
[1], NULL
);
154 this->progress
= true;
158 lower_instructions_visitor::div_to_mul_rcp(ir_expression
*ir
)
160 assert(ir
->operands
[1]->type
->is_float());
162 /* New expression for the 1.0 / op1 */
164 expr
= new(ir
) ir_expression(ir_unop_rcp
,
165 ir
->operands
[1]->type
,
168 /* op0 / op1 -> op0 * (1.0 / op1) */
169 ir
->operation
= ir_binop_mul
;
170 ir
->operands
[1] = expr
;
172 this->progress
= true;
176 lower_instructions_visitor::int_div_to_mul_rcp(ir_expression
*ir
)
178 assert(ir
->operands
[1]->type
->is_integer());
180 /* Be careful with integer division -- we need to do it as a
181 * float and re-truncate, since rcp(n > 1) of an integer would
184 ir_rvalue
*op0
, *op1
;
185 const struct glsl_type
*vec_type
;
187 vec_type
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
188 ir
->operands
[1]->type
->vector_elements
,
189 ir
->operands
[1]->type
->matrix_columns
);
191 if (ir
->operands
[1]->type
->base_type
== GLSL_TYPE_INT
)
192 op1
= new(ir
) ir_expression(ir_unop_i2f
, vec_type
, ir
->operands
[1], NULL
);
194 op1
= new(ir
) ir_expression(ir_unop_u2f
, vec_type
, ir
->operands
[1], NULL
);
196 op1
= new(ir
) ir_expression(ir_unop_rcp
, op1
->type
, op1
, NULL
);
198 vec_type
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
199 ir
->operands
[0]->type
->vector_elements
,
200 ir
->operands
[0]->type
->matrix_columns
);
202 if (ir
->operands
[0]->type
->base_type
== GLSL_TYPE_INT
)
203 op0
= new(ir
) ir_expression(ir_unop_i2f
, vec_type
, ir
->operands
[0], NULL
);
205 op0
= new(ir
) ir_expression(ir_unop_u2f
, vec_type
, ir
->operands
[0], NULL
);
207 vec_type
= glsl_type::get_instance(GLSL_TYPE_FLOAT
,
208 ir
->type
->vector_elements
,
209 ir
->type
->matrix_columns
);
211 op0
= new(ir
) ir_expression(ir_binop_mul
, vec_type
, op0
, op1
);
213 if (ir
->operands
[1]->type
->base_type
== GLSL_TYPE_INT
) {
214 ir
->operation
= ir_unop_f2i
;
215 ir
->operands
[0] = op0
;
217 ir
->operation
= ir_unop_i2u
;
218 ir
->operands
[0] = new(ir
) ir_expression(ir_unop_f2i
, op0
);
220 ir
->operands
[1] = NULL
;
222 this->progress
= true;
226 lower_instructions_visitor::exp_to_exp2(ir_expression
*ir
)
228 ir_constant
*log2_e
= new(ir
) ir_constant(float(M_LOG2E
));
230 ir
->operation
= ir_unop_exp2
;
231 ir
->operands
[0] = new(ir
) ir_expression(ir_binop_mul
, ir
->operands
[0]->type
,
232 ir
->operands
[0], log2_e
);
233 this->progress
= true;
237 lower_instructions_visitor::pow_to_exp2(ir_expression
*ir
)
239 ir_expression
*const log2_x
=
240 new(ir
) ir_expression(ir_unop_log2
, ir
->operands
[0]->type
,
243 ir
->operation
= ir_unop_exp2
;
244 ir
->operands
[0] = new(ir
) ir_expression(ir_binop_mul
, ir
->operands
[1]->type
,
245 ir
->operands
[1], log2_x
);
246 ir
->operands
[1] = NULL
;
247 this->progress
= true;
251 lower_instructions_visitor::log_to_log2(ir_expression
*ir
)
253 ir
->operation
= ir_binop_mul
;
254 ir
->operands
[0] = new(ir
) ir_expression(ir_unop_log2
, ir
->operands
[0]->type
,
255 ir
->operands
[0], NULL
);
256 ir
->operands
[1] = new(ir
) ir_constant(float(1.0 / M_LOG2E
));
257 this->progress
= true;
261 lower_instructions_visitor::mod_to_fract(ir_expression
*ir
)
263 ir_variable
*temp
= new(ir
) ir_variable(ir
->operands
[1]->type
, "mod_b",
265 this->base_ir
->insert_before(temp
);
267 ir_assignment
*const assign
=
268 new(ir
) ir_assignment(new(ir
) ir_dereference_variable(temp
),
269 ir
->operands
[1], NULL
);
271 this->base_ir
->insert_before(assign
);
273 ir_expression
*const div_expr
=
274 new(ir
) ir_expression(ir_binop_div
, ir
->operands
[0]->type
,
276 new(ir
) ir_dereference_variable(temp
));
278 /* Don't generate new IR that would need to be lowered in an additional
281 if (lowering(DIV_TO_MUL_RCP
))
282 div_to_mul_rcp(div_expr
);
284 ir_rvalue
*expr
= new(ir
) ir_expression(ir_unop_fract
,
285 ir
->operands
[0]->type
,
289 ir
->operation
= ir_binop_mul
;
290 ir
->operands
[0] = new(ir
) ir_dereference_variable(temp
);
291 ir
->operands
[1] = expr
;
292 this->progress
= true;
296 lower_instructions_visitor::bitfield_insert_to_bfm_bfi(ir_expression
*ir
)
299 * ir_quadop_bitfield_insert base insert offset bits
301 * ir_triop_bfi (ir_binop_bfm bits offset) insert base
304 ir_rvalue
*base_expr
= ir
->operands
[0];
306 ir
->operation
= ir_triop_bfi
;
307 ir
->operands
[0] = new(ir
) ir_expression(ir_binop_bfm
,
308 ir
->type
->get_base_type(),
311 /* ir->operands[1] is still the value to insert. */
312 ir
->operands
[2] = base_expr
;
313 ir
->operands
[3] = NULL
;
315 this->progress
= true;
319 lower_instructions_visitor::ldexp_to_arith(ir_expression
*ir
)
322 * ir_binop_ldexp x exp
325 * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
326 * resulting_biased_exp = extracted_biased_exp + exp;
328 * if (resulting_biased_exp < 1) {
329 * return copysign(0.0, x);
332 * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) |
333 * lshift(i2u(resulting_biased_exp), exp_shift));
335 * which we can't actually implement as such, since the GLSL IR doesn't
336 * have vectorized if-statements. We actually implement it without branches
337 * using conditional-select:
339 * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
340 * resulting_biased_exp = extracted_biased_exp + exp;
342 * is_not_zero_or_underflow = gequal(resulting_biased_exp, 1);
343 * x = csel(is_not_zero_or_underflow, x, copysign(0.0f, x));
344 * resulting_biased_exp = csel(is_not_zero_or_underflow,
345 * resulting_biased_exp, 0);
347 * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) |
348 * lshift(i2u(resulting_biased_exp), exp_shift));
351 const unsigned vec_elem
= ir
->type
->vector_elements
;
354 const glsl_type
*ivec
= glsl_type::get_instance(GLSL_TYPE_INT
, vec_elem
, 1);
355 const glsl_type
*bvec
= glsl_type::get_instance(GLSL_TYPE_BOOL
, vec_elem
, 1);
358 ir_constant
*zeroi
= ir_constant::zero(ir
, ivec
);
360 ir_constant
*sign_mask
= new(ir
) ir_constant(0x80000000u
, vec_elem
);
362 ir_constant
*exp_shift
= new(ir
) ir_constant(23u, vec_elem
);
363 ir_constant
*exp_width
= new(ir
) ir_constant(8u, vec_elem
);
365 /* Temporary variables */
366 ir_variable
*x
= new(ir
) ir_variable(ir
->type
, "x", ir_var_temporary
);
367 ir_variable
*exp
= new(ir
) ir_variable(ivec
, "exp", ir_var_temporary
);
369 ir_variable
*zero_sign_x
= new(ir
) ir_variable(ir
->type
, "zero_sign_x",
372 ir_variable
*extracted_biased_exp
=
373 new(ir
) ir_variable(ivec
, "extracted_biased_exp", ir_var_temporary
);
374 ir_variable
*resulting_biased_exp
=
375 new(ir
) ir_variable(ivec
, "resulting_biased_exp", ir_var_temporary
);
377 ir_variable
*is_not_zero_or_underflow
=
378 new(ir
) ir_variable(bvec
, "is_not_zero_or_underflow", ir_var_temporary
);
380 ir_instruction
&i
= *base_ir
;
382 /* Copy <x> and <exp> arguments. */
384 i
.insert_before(assign(x
, ir
->operands
[0]));
385 i
.insert_before(exp
);
386 i
.insert_before(assign(exp
, ir
->operands
[1]));
388 /* Extract the biased exponent from <x>. */
389 i
.insert_before(extracted_biased_exp
);
390 i
.insert_before(assign(extracted_biased_exp
,
391 rshift(bitcast_f2i(abs(x
)), exp_shift
)));
393 i
.insert_before(resulting_biased_exp
);
394 i
.insert_before(assign(resulting_biased_exp
,
395 add(extracted_biased_exp
, exp
)));
397 /* Test if result is ±0.0, subnormal, or underflow by checking if the
398 * resulting biased exponent would be less than 0x1. If so, the result is
399 * 0.0 with the sign of x. (Actually, invert the conditions so that
400 * immediate values are the second arguments, which is better for i965)
402 i
.insert_before(zero_sign_x
);
403 i
.insert_before(assign(zero_sign_x
,
404 bitcast_u2f(bit_and(bitcast_f2u(x
), sign_mask
))));
406 i
.insert_before(is_not_zero_or_underflow
);
407 i
.insert_before(assign(is_not_zero_or_underflow
,
408 gequal(resulting_biased_exp
,
409 new(ir
) ir_constant(0x1, vec_elem
))));
410 i
.insert_before(assign(x
, csel(is_not_zero_or_underflow
,
412 i
.insert_before(assign(resulting_biased_exp
,
413 csel(is_not_zero_or_underflow
,
414 resulting_biased_exp
, zeroi
)));
416 /* We could test for overflows by checking if the resulting biased exponent
417 * would be greater than 0xFE. Turns out we don't need to because the GLSL
420 * "If this product is too large to be represented in the
421 * floating-point type, the result is undefined."
424 ir_constant
*exp_shift_clone
= exp_shift
->clone(ir
, NULL
);
425 ir
->operation
= ir_unop_bitcast_i2f
;
426 ir
->operands
[0] = bitfield_insert(bitcast_f2i(x
), resulting_biased_exp
,
427 exp_shift_clone
, exp_width
);
428 ir
->operands
[1] = NULL
;
430 /* Don't generate new IR that would need to be lowered in an additional
433 if (lowering(BITFIELD_INSERT_TO_BFM_BFI
))
434 bitfield_insert_to_bfm_bfi(ir
->operands
[0]->as_expression());
436 this->progress
= true;
440 lower_instructions_visitor::visit_leave(ir_expression
*ir
)
442 switch (ir
->operation
) {
444 if (lowering(SUB_TO_ADD_NEG
))
449 if (ir
->operands
[1]->type
->is_integer() && lowering(INT_DIV_TO_MUL_RCP
))
450 int_div_to_mul_rcp(ir
);
451 else if (ir
->operands
[1]->type
->is_float() && lowering(DIV_TO_MUL_RCP
))
456 if (lowering(EXP_TO_EXP2
))
461 if (lowering(LOG_TO_LOG2
))
466 if (lowering(MOD_TO_FRACT
) && ir
->type
->is_float())
471 if (lowering(POW_TO_EXP2
))
475 case ir_quadop_bitfield_insert
:
476 if (lowering(BITFIELD_INSERT_TO_BFM_BFI
))
477 bitfield_insert_to_bfm_bfi(ir
);
481 if (lowering(LDEXP_TO_ARITH
))
486 return visit_continue
;
489 return visit_continue
;