glsl: Fold implementation of ir_dereference_array::constant_referenced into wrapper
[mesa.git] / src / glsl / lower_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_instructions.cpp
26 *
27 * Many GPUs lack native instructions for certain expression operations, and
28 * must replace them with some other expression tree. This pass lowers some
29 * of the most common cases, allowing the lowering code to be implemented once
30 * rather than in each driver backend.
31 *
32 * Currently supported transformations:
33 * - SUB_TO_ADD_NEG
34 * - DIV_TO_MUL_RCP
35 * - INT_DIV_TO_MUL_RCP
36 * - EXP_TO_EXP2
37 * - POW_TO_EXP2
38 * - LOG_TO_LOG2
39 * - MOD_TO_FRACT
40 * - LDEXP_TO_ARITH
41 * - BITFIELD_INSERT_TO_BFM_BFI
42 *
43 * SUB_TO_ADD_NEG:
44 * ---------------
45 * Breaks an ir_binop_sub expression down to add(op0, neg(op1))
46 *
47 * This simplifies expression reassociation, and for many backends
48 * there is no subtract operation separate from adding the negation.
49 * For backends with native subtract operations, they will probably
50 * want to recognize add(op0, neg(op1)) or the other way around to
51 * produce a subtract anyway.
52 *
53 * DIV_TO_MUL_RCP and INT_DIV_TO_MUL_RCP:
54 * --------------------------------------
55 * Breaks an ir_binop_div expression down to op0 * (rcp(op1)).
56 *
57 * Many GPUs don't have a divide instruction (945 and 965 included),
58 * but they do have an RCP instruction to compute an approximate
59 * reciprocal. By breaking the operation down, constant reciprocals
60 * can get constant folded.
61 *
62 * DIV_TO_MUL_RCP only lowers floating point division; INT_DIV_TO_MUL_RCP
63 * handles the integer case, converting to and from floating point so that
64 * RCP is possible.
65 *
66 * EXP_TO_EXP2 and LOG_TO_LOG2:
67 * ----------------------------
68 * Many GPUs don't have a base e log or exponent instruction, but they
69 * do have base 2 versions, so this pass converts exp and log to exp2
70 * and log2 operations.
71 *
72 * POW_TO_EXP2:
73 * -----------
74 * Many older GPUs don't have an x**y instruction. For these GPUs, convert
75 * x**y to 2**(y * log2(x)).
76 *
77 * MOD_TO_FRACT:
78 * -------------
79 * Breaks an ir_binop_mod expression down to (op1 * fract(op0 / op1))
80 *
81 * Many GPUs don't have a MOD instruction (945 and 965 included), and
82 * if we have to break it down like this anyway, it gives an
83 * opportunity to do things like constant fold the (1.0 / op1) easily.
84 *
85 * LDEXP_TO_ARITH:
86 * -------------
87 * Converts ir_binop_ldexp to arithmetic and bit operations.
88 *
89 * BITFIELD_INSERT_TO_BFM_BFI:
90 * ---------------------------
91 * Breaks ir_quadop_bitfield_insert into ir_binop_bfm (bitfield mask) and
92 * ir_triop_bfi (bitfield insert).
93 *
94 * Many GPUs implement the bitfieldInsert() built-in from ARB_gpu_shader_5
95 * with a pair of instructions.
96 *
97 */
98
99 #include "main/core.h" /* for M_LOG2E */
100 #include "glsl_types.h"
101 #include "ir.h"
102 #include "ir_builder.h"
103 #include "ir_optimization.h"
104
105 using namespace ir_builder;
106
107 namespace {
108
109 class lower_instructions_visitor : public ir_hierarchical_visitor {
110 public:
111 lower_instructions_visitor(unsigned lower)
112 : progress(false), lower(lower) { }
113
114 ir_visitor_status visit_leave(ir_expression *);
115
116 bool progress;
117
118 private:
119 unsigned lower; /** Bitfield of which operations to lower */
120
121 void sub_to_add_neg(ir_expression *);
122 void div_to_mul_rcp(ir_expression *);
123 void int_div_to_mul_rcp(ir_expression *);
124 void mod_to_fract(ir_expression *);
125 void exp_to_exp2(ir_expression *);
126 void pow_to_exp2(ir_expression *);
127 void log_to_log2(ir_expression *);
128 void bitfield_insert_to_bfm_bfi(ir_expression *);
129 void ldexp_to_arith(ir_expression *);
130 };
131
132 } /* anonymous namespace */
133
134 /**
135 * Determine if a particular type of lowering should occur
136 */
137 #define lowering(x) (this->lower & x)
138
139 bool
140 lower_instructions(exec_list *instructions, unsigned what_to_lower)
141 {
142 lower_instructions_visitor v(what_to_lower);
143
144 visit_list_elements(&v, instructions);
145 return v.progress;
146 }
147
148 void
149 lower_instructions_visitor::sub_to_add_neg(ir_expression *ir)
150 {
151 ir->operation = ir_binop_add;
152 ir->operands[1] = new(ir) ir_expression(ir_unop_neg, ir->operands[1]->type,
153 ir->operands[1], NULL);
154 this->progress = true;
155 }
156
157 void
158 lower_instructions_visitor::div_to_mul_rcp(ir_expression *ir)
159 {
160 assert(ir->operands[1]->type->is_float());
161
162 /* New expression for the 1.0 / op1 */
163 ir_rvalue *expr;
164 expr = new(ir) ir_expression(ir_unop_rcp,
165 ir->operands[1]->type,
166 ir->operands[1]);
167
168 /* op0 / op1 -> op0 * (1.0 / op1) */
169 ir->operation = ir_binop_mul;
170 ir->operands[1] = expr;
171
172 this->progress = true;
173 }
174
175 void
176 lower_instructions_visitor::int_div_to_mul_rcp(ir_expression *ir)
177 {
178 assert(ir->operands[1]->type->is_integer());
179
180 /* Be careful with integer division -- we need to do it as a
181 * float and re-truncate, since rcp(n > 1) of an integer would
182 * just be 0.
183 */
184 ir_rvalue *op0, *op1;
185 const struct glsl_type *vec_type;
186
187 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
188 ir->operands[1]->type->vector_elements,
189 ir->operands[1]->type->matrix_columns);
190
191 if (ir->operands[1]->type->base_type == GLSL_TYPE_INT)
192 op1 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[1], NULL);
193 else
194 op1 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[1], NULL);
195
196 op1 = new(ir) ir_expression(ir_unop_rcp, op1->type, op1, NULL);
197
198 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
199 ir->operands[0]->type->vector_elements,
200 ir->operands[0]->type->matrix_columns);
201
202 if (ir->operands[0]->type->base_type == GLSL_TYPE_INT)
203 op0 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[0], NULL);
204 else
205 op0 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[0], NULL);
206
207 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
208 ir->type->vector_elements,
209 ir->type->matrix_columns);
210
211 op0 = new(ir) ir_expression(ir_binop_mul, vec_type, op0, op1);
212
213 if (ir->operands[1]->type->base_type == GLSL_TYPE_INT) {
214 ir->operation = ir_unop_f2i;
215 ir->operands[0] = op0;
216 } else {
217 ir->operation = ir_unop_i2u;
218 ir->operands[0] = new(ir) ir_expression(ir_unop_f2i, op0);
219 }
220 ir->operands[1] = NULL;
221
222 this->progress = true;
223 }
224
225 void
226 lower_instructions_visitor::exp_to_exp2(ir_expression *ir)
227 {
228 ir_constant *log2_e = new(ir) ir_constant(float(M_LOG2E));
229
230 ir->operation = ir_unop_exp2;
231 ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[0]->type,
232 ir->operands[0], log2_e);
233 this->progress = true;
234 }
235
236 void
237 lower_instructions_visitor::pow_to_exp2(ir_expression *ir)
238 {
239 ir_expression *const log2_x =
240 new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
241 ir->operands[0]);
242
243 ir->operation = ir_unop_exp2;
244 ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[1]->type,
245 ir->operands[1], log2_x);
246 ir->operands[1] = NULL;
247 this->progress = true;
248 }
249
250 void
251 lower_instructions_visitor::log_to_log2(ir_expression *ir)
252 {
253 ir->operation = ir_binop_mul;
254 ir->operands[0] = new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
255 ir->operands[0], NULL);
256 ir->operands[1] = new(ir) ir_constant(float(1.0 / M_LOG2E));
257 this->progress = true;
258 }
259
260 void
261 lower_instructions_visitor::mod_to_fract(ir_expression *ir)
262 {
263 ir_variable *temp = new(ir) ir_variable(ir->operands[1]->type, "mod_b",
264 ir_var_temporary);
265 this->base_ir->insert_before(temp);
266
267 ir_assignment *const assign =
268 new(ir) ir_assignment(new(ir) ir_dereference_variable(temp),
269 ir->operands[1], NULL);
270
271 this->base_ir->insert_before(assign);
272
273 ir_expression *const div_expr =
274 new(ir) ir_expression(ir_binop_div, ir->operands[0]->type,
275 ir->operands[0],
276 new(ir) ir_dereference_variable(temp));
277
278 /* Don't generate new IR that would need to be lowered in an additional
279 * pass.
280 */
281 if (lowering(DIV_TO_MUL_RCP))
282 div_to_mul_rcp(div_expr);
283
284 ir_rvalue *expr = new(ir) ir_expression(ir_unop_fract,
285 ir->operands[0]->type,
286 div_expr,
287 NULL);
288
289 ir->operation = ir_binop_mul;
290 ir->operands[0] = new(ir) ir_dereference_variable(temp);
291 ir->operands[1] = expr;
292 this->progress = true;
293 }
294
295 void
296 lower_instructions_visitor::bitfield_insert_to_bfm_bfi(ir_expression *ir)
297 {
298 /* Translates
299 * ir_quadop_bitfield_insert base insert offset bits
300 * into
301 * ir_triop_bfi (ir_binop_bfm bits offset) insert base
302 */
303
304 ir_rvalue *base_expr = ir->operands[0];
305
306 ir->operation = ir_triop_bfi;
307 ir->operands[0] = new(ir) ir_expression(ir_binop_bfm,
308 ir->type->get_base_type(),
309 ir->operands[3],
310 ir->operands[2]);
311 /* ir->operands[1] is still the value to insert. */
312 ir->operands[2] = base_expr;
313 ir->operands[3] = NULL;
314
315 this->progress = true;
316 }
317
318 void
319 lower_instructions_visitor::ldexp_to_arith(ir_expression *ir)
320 {
321 /* Translates
322 * ir_binop_ldexp x exp
323 * into
324 *
325 * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
326 * resulting_biased_exp = extracted_biased_exp + exp;
327 *
328 * if (resulting_biased_exp < 1) {
329 * return copysign(0.0, x);
330 * }
331 *
332 * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) |
333 * lshift(i2u(resulting_biased_exp), exp_shift));
334 *
335 * which we can't actually implement as such, since the GLSL IR doesn't
336 * have vectorized if-statements. We actually implement it without branches
337 * using conditional-select:
338 *
339 * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
340 * resulting_biased_exp = extracted_biased_exp + exp;
341 *
342 * is_not_zero_or_underflow = gequal(resulting_biased_exp, 1);
343 * x = csel(is_not_zero_or_underflow, x, copysign(0.0f, x));
344 * resulting_biased_exp = csel(is_not_zero_or_underflow,
345 * resulting_biased_exp, 0);
346 *
347 * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) |
348 * lshift(i2u(resulting_biased_exp), exp_shift));
349 */
350
351 const unsigned vec_elem = ir->type->vector_elements;
352
353 /* Types */
354 const glsl_type *ivec = glsl_type::get_instance(GLSL_TYPE_INT, vec_elem, 1);
355 const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
356
357 /* Constants */
358 ir_constant *zeroi = ir_constant::zero(ir, ivec);
359
360 ir_constant *sign_mask = new(ir) ir_constant(0x80000000u, vec_elem);
361
362 ir_constant *exp_shift = new(ir) ir_constant(23u, vec_elem);
363 ir_constant *exp_width = new(ir) ir_constant(8u, vec_elem);
364
365 /* Temporary variables */
366 ir_variable *x = new(ir) ir_variable(ir->type, "x", ir_var_temporary);
367 ir_variable *exp = new(ir) ir_variable(ivec, "exp", ir_var_temporary);
368
369 ir_variable *zero_sign_x = new(ir) ir_variable(ir->type, "zero_sign_x",
370 ir_var_temporary);
371
372 ir_variable *extracted_biased_exp =
373 new(ir) ir_variable(ivec, "extracted_biased_exp", ir_var_temporary);
374 ir_variable *resulting_biased_exp =
375 new(ir) ir_variable(ivec, "resulting_biased_exp", ir_var_temporary);
376
377 ir_variable *is_not_zero_or_underflow =
378 new(ir) ir_variable(bvec, "is_not_zero_or_underflow", ir_var_temporary);
379
380 ir_instruction &i = *base_ir;
381
382 /* Copy <x> and <exp> arguments. */
383 i.insert_before(x);
384 i.insert_before(assign(x, ir->operands[0]));
385 i.insert_before(exp);
386 i.insert_before(assign(exp, ir->operands[1]));
387
388 /* Extract the biased exponent from <x>. */
389 i.insert_before(extracted_biased_exp);
390 i.insert_before(assign(extracted_biased_exp,
391 rshift(bitcast_f2i(abs(x)), exp_shift)));
392
393 i.insert_before(resulting_biased_exp);
394 i.insert_before(assign(resulting_biased_exp,
395 add(extracted_biased_exp, exp)));
396
397 /* Test if result is ±0.0, subnormal, or underflow by checking if the
398 * resulting biased exponent would be less than 0x1. If so, the result is
399 * 0.0 with the sign of x. (Actually, invert the conditions so that
400 * immediate values are the second arguments, which is better for i965)
401 */
402 i.insert_before(zero_sign_x);
403 i.insert_before(assign(zero_sign_x,
404 bitcast_u2f(bit_and(bitcast_f2u(x), sign_mask))));
405
406 i.insert_before(is_not_zero_or_underflow);
407 i.insert_before(assign(is_not_zero_or_underflow,
408 gequal(resulting_biased_exp,
409 new(ir) ir_constant(0x1, vec_elem))));
410 i.insert_before(assign(x, csel(is_not_zero_or_underflow,
411 x, zero_sign_x)));
412 i.insert_before(assign(resulting_biased_exp,
413 csel(is_not_zero_or_underflow,
414 resulting_biased_exp, zeroi)));
415
416 /* We could test for overflows by checking if the resulting biased exponent
417 * would be greater than 0xFE. Turns out we don't need to because the GLSL
418 * spec says:
419 *
420 * "If this product is too large to be represented in the
421 * floating-point type, the result is undefined."
422 */
423
424 ir_constant *exp_shift_clone = exp_shift->clone(ir, NULL);
425 ir->operation = ir_unop_bitcast_i2f;
426 ir->operands[0] = bitfield_insert(bitcast_f2i(x), resulting_biased_exp,
427 exp_shift_clone, exp_width);
428 ir->operands[1] = NULL;
429
430 /* Don't generate new IR that would need to be lowered in an additional
431 * pass.
432 */
433 if (lowering(BITFIELD_INSERT_TO_BFM_BFI))
434 bitfield_insert_to_bfm_bfi(ir->operands[0]->as_expression());
435
436 this->progress = true;
437 }
438
439 ir_visitor_status
440 lower_instructions_visitor::visit_leave(ir_expression *ir)
441 {
442 switch (ir->operation) {
443 case ir_binop_sub:
444 if (lowering(SUB_TO_ADD_NEG))
445 sub_to_add_neg(ir);
446 break;
447
448 case ir_binop_div:
449 if (ir->operands[1]->type->is_integer() && lowering(INT_DIV_TO_MUL_RCP))
450 int_div_to_mul_rcp(ir);
451 else if (ir->operands[1]->type->is_float() && lowering(DIV_TO_MUL_RCP))
452 div_to_mul_rcp(ir);
453 break;
454
455 case ir_unop_exp:
456 if (lowering(EXP_TO_EXP2))
457 exp_to_exp2(ir);
458 break;
459
460 case ir_unop_log:
461 if (lowering(LOG_TO_LOG2))
462 log_to_log2(ir);
463 break;
464
465 case ir_binop_mod:
466 if (lowering(MOD_TO_FRACT) && ir->type->is_float())
467 mod_to_fract(ir);
468 break;
469
470 case ir_binop_pow:
471 if (lowering(POW_TO_EXP2))
472 pow_to_exp2(ir);
473 break;
474
475 case ir_quadop_bitfield_insert:
476 if (lowering(BITFIELD_INSERT_TO_BFM_BFI))
477 bitfield_insert_to_bfm_bfi(ir);
478 break;
479
480 case ir_binop_ldexp:
481 if (lowering(LDEXP_TO_ARITH))
482 ldexp_to_arith(ir);
483 break;
484
485 default:
486 return visit_continue;
487 }
488
489 return visit_continue;
490 }