d01879cbd4fbba80354ee953918d5ad7e24d9199
[mesa.git] / src / glsl / lower_instructions.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_instructions.cpp
26 *
27 * Many GPUs lack native instructions for certain expression operations, and
28 * must replace them with some other expression tree. This pass lowers some
29 * of the most common cases, allowing the lowering code to be implemented once
30 * rather than in each driver backend.
31 *
32 * Currently supported transformations:
33 * - SUB_TO_ADD_NEG
34 * - DIV_TO_MUL_RCP
35 * - INT_DIV_TO_MUL_RCP
36 * - EXP_TO_EXP2
37 * - POW_TO_EXP2
38 * - LOG_TO_LOG2
39 * - MOD_TO_FRACT
40 * - LDEXP_TO_ARITH
41 * - LRP_TO_ARITH
42 * - BITFIELD_INSERT_TO_BFM_BFI
43 *
44 * SUB_TO_ADD_NEG:
45 * ---------------
46 * Breaks an ir_binop_sub expression down to add(op0, neg(op1))
47 *
48 * This simplifies expression reassociation, and for many backends
49 * there is no subtract operation separate from adding the negation.
50 * For backends with native subtract operations, they will probably
51 * want to recognize add(op0, neg(op1)) or the other way around to
52 * produce a subtract anyway.
53 *
54 * DIV_TO_MUL_RCP and INT_DIV_TO_MUL_RCP:
55 * --------------------------------------
56 * Breaks an ir_binop_div expression down to op0 * (rcp(op1)).
57 *
58 * Many GPUs don't have a divide instruction (945 and 965 included),
59 * but they do have an RCP instruction to compute an approximate
60 * reciprocal. By breaking the operation down, constant reciprocals
61 * can get constant folded.
62 *
63 * DIV_TO_MUL_RCP only lowers floating point division; INT_DIV_TO_MUL_RCP
64 * handles the integer case, converting to and from floating point so that
65 * RCP is possible.
66 *
67 * EXP_TO_EXP2 and LOG_TO_LOG2:
68 * ----------------------------
69 * Many GPUs don't have a base e log or exponent instruction, but they
70 * do have base 2 versions, so this pass converts exp and log to exp2
71 * and log2 operations.
72 *
73 * POW_TO_EXP2:
74 * -----------
75 * Many older GPUs don't have an x**y instruction. For these GPUs, convert
76 * x**y to 2**(y * log2(x)).
77 *
78 * MOD_TO_FRACT:
79 * -------------
80 * Breaks an ir_binop_mod expression down to (op1 * fract(op0 / op1))
81 *
82 * Many GPUs don't have a MOD instruction (945 and 965 included), and
83 * if we have to break it down like this anyway, it gives an
84 * opportunity to do things like constant fold the (1.0 / op1) easily.
85 *
86 * LDEXP_TO_ARITH:
87 * -------------
88 * Converts ir_binop_ldexp to arithmetic and bit operations.
89 *
90 * LRP_TO_ARITH:
91 * -------------
92 * Converts ir_triop_lrp to (op0 * (1.0f - op2)) + (op1 * op2).
93 *
94 * BITFIELD_INSERT_TO_BFM_BFI:
95 * ---------------------------
96 * Breaks ir_quadop_bitfield_insert into ir_binop_bfm (bitfield mask) and
97 * ir_triop_bfi (bitfield insert).
98 *
99 * Many GPUs implement the bitfieldInsert() built-in from ARB_gpu_shader_5
100 * with a pair of instructions.
101 *
102 */
103
104 #include "main/core.h" /* for M_LOG2E */
105 #include "glsl_types.h"
106 #include "ir.h"
107 #include "ir_builder.h"
108 #include "ir_optimization.h"
109
110 using namespace ir_builder;
111
112 namespace {
113
114 class lower_instructions_visitor : public ir_hierarchical_visitor {
115 public:
116 lower_instructions_visitor(unsigned lower)
117 : progress(false), lower(lower) { }
118
119 ir_visitor_status visit_leave(ir_expression *);
120
121 bool progress;
122
123 private:
124 unsigned lower; /** Bitfield of which operations to lower */
125
126 void sub_to_add_neg(ir_expression *);
127 void div_to_mul_rcp(ir_expression *);
128 void int_div_to_mul_rcp(ir_expression *);
129 void mod_to_fract(ir_expression *);
130 void exp_to_exp2(ir_expression *);
131 void pow_to_exp2(ir_expression *);
132 void log_to_log2(ir_expression *);
133 void lrp_to_arith(ir_expression *);
134 void bitfield_insert_to_bfm_bfi(ir_expression *);
135 void ldexp_to_arith(ir_expression *);
136 };
137
138 } /* anonymous namespace */
139
140 /**
141 * Determine if a particular type of lowering should occur
142 */
143 #define lowering(x) (this->lower & x)
144
145 bool
146 lower_instructions(exec_list *instructions, unsigned what_to_lower)
147 {
148 lower_instructions_visitor v(what_to_lower);
149
150 visit_list_elements(&v, instructions);
151 return v.progress;
152 }
153
154 void
155 lower_instructions_visitor::sub_to_add_neg(ir_expression *ir)
156 {
157 ir->operation = ir_binop_add;
158 ir->operands[1] = new(ir) ir_expression(ir_unop_neg, ir->operands[1]->type,
159 ir->operands[1], NULL);
160 this->progress = true;
161 }
162
163 void
164 lower_instructions_visitor::div_to_mul_rcp(ir_expression *ir)
165 {
166 assert(ir->operands[1]->type->is_float());
167
168 /* New expression for the 1.0 / op1 */
169 ir_rvalue *expr;
170 expr = new(ir) ir_expression(ir_unop_rcp,
171 ir->operands[1]->type,
172 ir->operands[1]);
173
174 /* op0 / op1 -> op0 * (1.0 / op1) */
175 ir->operation = ir_binop_mul;
176 ir->operands[1] = expr;
177
178 this->progress = true;
179 }
180
181 void
182 lower_instructions_visitor::int_div_to_mul_rcp(ir_expression *ir)
183 {
184 assert(ir->operands[1]->type->is_integer());
185
186 /* Be careful with integer division -- we need to do it as a
187 * float and re-truncate, since rcp(n > 1) of an integer would
188 * just be 0.
189 */
190 ir_rvalue *op0, *op1;
191 const struct glsl_type *vec_type;
192
193 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
194 ir->operands[1]->type->vector_elements,
195 ir->operands[1]->type->matrix_columns);
196
197 if (ir->operands[1]->type->base_type == GLSL_TYPE_INT)
198 op1 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[1], NULL);
199 else
200 op1 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[1], NULL);
201
202 op1 = new(ir) ir_expression(ir_unop_rcp, op1->type, op1, NULL);
203
204 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
205 ir->operands[0]->type->vector_elements,
206 ir->operands[0]->type->matrix_columns);
207
208 if (ir->operands[0]->type->base_type == GLSL_TYPE_INT)
209 op0 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[0], NULL);
210 else
211 op0 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[0], NULL);
212
213 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
214 ir->type->vector_elements,
215 ir->type->matrix_columns);
216
217 op0 = new(ir) ir_expression(ir_binop_mul, vec_type, op0, op1);
218
219 if (ir->operands[1]->type->base_type == GLSL_TYPE_INT) {
220 ir->operation = ir_unop_f2i;
221 ir->operands[0] = op0;
222 } else {
223 ir->operation = ir_unop_i2u;
224 ir->operands[0] = new(ir) ir_expression(ir_unop_f2i, op0);
225 }
226 ir->operands[1] = NULL;
227
228 this->progress = true;
229 }
230
231 void
232 lower_instructions_visitor::exp_to_exp2(ir_expression *ir)
233 {
234 ir_constant *log2_e = new(ir) ir_constant(float(M_LOG2E));
235
236 ir->operation = ir_unop_exp2;
237 ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[0]->type,
238 ir->operands[0], log2_e);
239 this->progress = true;
240 }
241
242 void
243 lower_instructions_visitor::pow_to_exp2(ir_expression *ir)
244 {
245 ir_expression *const log2_x =
246 new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
247 ir->operands[0]);
248
249 ir->operation = ir_unop_exp2;
250 ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[1]->type,
251 ir->operands[1], log2_x);
252 ir->operands[1] = NULL;
253 this->progress = true;
254 }
255
256 void
257 lower_instructions_visitor::log_to_log2(ir_expression *ir)
258 {
259 ir->operation = ir_binop_mul;
260 ir->operands[0] = new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
261 ir->operands[0], NULL);
262 ir->operands[1] = new(ir) ir_constant(float(1.0 / M_LOG2E));
263 this->progress = true;
264 }
265
266 void
267 lower_instructions_visitor::mod_to_fract(ir_expression *ir)
268 {
269 ir_variable *temp = new(ir) ir_variable(ir->operands[1]->type, "mod_b",
270 ir_var_temporary);
271 this->base_ir->insert_before(temp);
272
273 ir_assignment *const assign =
274 new(ir) ir_assignment(new(ir) ir_dereference_variable(temp),
275 ir->operands[1], NULL);
276
277 this->base_ir->insert_before(assign);
278
279 ir_expression *const div_expr =
280 new(ir) ir_expression(ir_binop_div, ir->operands[0]->type,
281 ir->operands[0],
282 new(ir) ir_dereference_variable(temp));
283
284 /* Don't generate new IR that would need to be lowered in an additional
285 * pass.
286 */
287 if (lowering(DIV_TO_MUL_RCP))
288 div_to_mul_rcp(div_expr);
289
290 ir_rvalue *expr = new(ir) ir_expression(ir_unop_fract,
291 ir->operands[0]->type,
292 div_expr,
293 NULL);
294
295 ir->operation = ir_binop_mul;
296 ir->operands[0] = new(ir) ir_dereference_variable(temp);
297 ir->operands[1] = expr;
298 this->progress = true;
299 }
300
301 void
302 lower_instructions_visitor::lrp_to_arith(ir_expression *ir)
303 {
304 /* (lrp x y a) -> x*(1-a) + y*a */
305
306 /* Save op2 */
307 ir_variable *temp = new(ir) ir_variable(ir->operands[2]->type, "lrp_factor",
308 ir_var_temporary);
309 this->base_ir->insert_before(temp);
310 this->base_ir->insert_before(assign(temp, ir->operands[2]));
311
312 ir_constant *one = new(ir) ir_constant(1.0f);
313
314 ir->operation = ir_binop_add;
315 ir->operands[0] = mul(ir->operands[0], sub(one, temp));
316 ir->operands[1] = mul(ir->operands[1], temp);
317 ir->operands[2] = NULL;
318
319 this->progress = true;
320 }
321
322 void
323 lower_instructions_visitor::bitfield_insert_to_bfm_bfi(ir_expression *ir)
324 {
325 /* Translates
326 * ir_quadop_bitfield_insert base insert offset bits
327 * into
328 * ir_triop_bfi (ir_binop_bfm bits offset) insert base
329 */
330
331 ir_rvalue *base_expr = ir->operands[0];
332
333 ir->operation = ir_triop_bfi;
334 ir->operands[0] = new(ir) ir_expression(ir_binop_bfm,
335 ir->type->get_base_type(),
336 ir->operands[3],
337 ir->operands[2]);
338 /* ir->operands[1] is still the value to insert. */
339 ir->operands[2] = base_expr;
340 ir->operands[3] = NULL;
341
342 this->progress = true;
343 }
344
345 void
346 lower_instructions_visitor::ldexp_to_arith(ir_expression *ir)
347 {
348 /* Translates
349 * ir_binop_ldexp x exp
350 * into
351 *
352 * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
353 * resulting_biased_exp = extracted_biased_exp + exp;
354 *
355 * if (resulting_biased_exp < 1) {
356 * return copysign(0.0, x);
357 * }
358 *
359 * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) |
360 * lshift(i2u(resulting_biased_exp), exp_shift));
361 *
362 * which we can't actually implement as such, since the GLSL IR doesn't
363 * have vectorized if-statements. We actually implement it without branches
364 * using conditional-select:
365 *
366 * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
367 * resulting_biased_exp = extracted_biased_exp + exp;
368 *
369 * is_not_zero_or_underflow = gequal(resulting_biased_exp, 1);
370 * x = csel(is_not_zero_or_underflow, x, copysign(0.0f, x));
371 * resulting_biased_exp = csel(is_not_zero_or_underflow,
372 * resulting_biased_exp, 0);
373 *
374 * return bitcast_u2f((bitcast_f2u(x) & sign_mantissa_mask) |
375 * lshift(i2u(resulting_biased_exp), exp_shift));
376 */
377
378 const unsigned vec_elem = ir->type->vector_elements;
379
380 /* Types */
381 const glsl_type *ivec = glsl_type::get_instance(GLSL_TYPE_INT, vec_elem, 1);
382 const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
383
384 /* Constants */
385 ir_constant *zeroi = ir_constant::zero(ir, ivec);
386 ir_constant *zerof = ir_constant::zero(ir, ir->type);
387
388 ir_constant *sign_mantissa_mask = new(ir) ir_constant(0x807fffffu, vec_elem);
389 ir_constant *sign_mask = new(ir) ir_constant(0x80000000u, vec_elem);
390
391 ir_constant *exp_shift = new(ir) ir_constant(23u, vec_elem);
392
393 /* Temporary variables */
394 ir_variable *x = new(ir) ir_variable(ir->type, "x", ir_var_temporary);
395 ir_variable *exp = new(ir) ir_variable(ivec, "exp", ir_var_temporary);
396
397 ir_variable *zero_sign_x = new(ir) ir_variable(ir->type, "zero_sign_x",
398 ir_var_temporary);
399
400 ir_variable *extracted_biased_exp =
401 new(ir) ir_variable(ivec, "extracted_biased_exp", ir_var_temporary);
402 ir_variable *resulting_biased_exp =
403 new(ir) ir_variable(ivec, "resulting_biased_exp", ir_var_temporary);
404
405 ir_variable *is_not_zero_or_underflow =
406 new(ir) ir_variable(bvec, "is_not_zero_or_underflow", ir_var_temporary);
407
408 ir_instruction &i = *base_ir;
409
410 /* Copy <x> and <exp> arguments. */
411 i.insert_before(x);
412 i.insert_before(assign(x, ir->operands[0]));
413 i.insert_before(exp);
414 i.insert_before(assign(exp, ir->operands[1]));
415
416 /* Extract the biased exponent from <x>. */
417 i.insert_before(extracted_biased_exp);
418 i.insert_before(assign(extracted_biased_exp,
419 rshift(bitcast_f2i(abs(x)), exp_shift)));
420
421 i.insert_before(resulting_biased_exp);
422 i.insert_before(assign(resulting_biased_exp,
423 add(extracted_biased_exp, exp)));
424
425 /* Test if result is ±0.0, subnormal, or underflow by checking if the
426 * resulting biased exponent would be less than 0x1. If so, the result is
427 * 0.0 with the sign of x. (Actually, invert the conditions so that
428 * immediate values are the second arguments, which is better for i965)
429 */
430 i.insert_before(zero_sign_x);
431 i.insert_before(assign(zero_sign_x,
432 bitcast_u2f(bit_or(bit_and(bitcast_f2u(x), sign_mask),
433 bitcast_f2u(zerof)))));
434
435 i.insert_before(is_not_zero_or_underflow);
436 i.insert_before(assign(is_not_zero_or_underflow,
437 gequal(resulting_biased_exp,
438 new(ir) ir_constant(0x1, vec_elem))));
439 i.insert_before(assign(x, csel(is_not_zero_or_underflow,
440 x, zero_sign_x)));
441 i.insert_before(assign(resulting_biased_exp,
442 csel(is_not_zero_or_underflow,
443 resulting_biased_exp, zeroi)));
444
445 /* We could test for overflows by checking if the resulting biased exponent
446 * would be greater than 0xFE. Turns out we don't need to because the GLSL
447 * spec says:
448 *
449 * "If this product is too large to be represented in the
450 * floating-point type, the result is undefined."
451 */
452
453 ir_constant *exp_shift_clone = exp_shift->clone(ir, NULL);
454 ir->operation = ir_unop_bitcast_u2f;
455 ir->operands[0] = bit_or(bit_and(bitcast_f2u(x), sign_mantissa_mask),
456 lshift(i2u(resulting_biased_exp), exp_shift_clone));
457 ir->operands[1] = NULL;
458
459 this->progress = true;
460 }
461
462 ir_visitor_status
463 lower_instructions_visitor::visit_leave(ir_expression *ir)
464 {
465 switch (ir->operation) {
466 case ir_binop_sub:
467 if (lowering(SUB_TO_ADD_NEG))
468 sub_to_add_neg(ir);
469 break;
470
471 case ir_binop_div:
472 if (ir->operands[1]->type->is_integer() && lowering(INT_DIV_TO_MUL_RCP))
473 int_div_to_mul_rcp(ir);
474 else if (ir->operands[1]->type->is_float() && lowering(DIV_TO_MUL_RCP))
475 div_to_mul_rcp(ir);
476 break;
477
478 case ir_unop_exp:
479 if (lowering(EXP_TO_EXP2))
480 exp_to_exp2(ir);
481 break;
482
483 case ir_unop_log:
484 if (lowering(LOG_TO_LOG2))
485 log_to_log2(ir);
486 break;
487
488 case ir_binop_mod:
489 if (lowering(MOD_TO_FRACT) && ir->type->is_float())
490 mod_to_fract(ir);
491 break;
492
493 case ir_binop_pow:
494 if (lowering(POW_TO_EXP2))
495 pow_to_exp2(ir);
496 break;
497
498 case ir_triop_lrp:
499 if (lowering(LRP_TO_ARITH))
500 lrp_to_arith(ir);
501 break;
502
503 case ir_quadop_bitfield_insert:
504 if (lowering(BITFIELD_INSERT_TO_BFM_BFI))
505 bitfield_insert_to_bfm_bfi(ir);
506 break;
507
508 case ir_binop_ldexp:
509 if (lowering(LDEXP_TO_ARITH))
510 ldexp_to_arith(ir);
511 break;
512
513 default:
514 return visit_continue;
515 }
516
517 return visit_continue;
518 }