radeon: use _mesa_get_current_tex_object() in radeonSetTexBuffer2()
[mesa.git] / src / glsl / opt_algebraic.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file opt_algebraic.cpp
26 *
27 * Takes advantage of association, commutivity, and other algebraic
28 * properties to simplify expressions.
29 */
30
31 #include "ir.h"
32 #include "ir_visitor.h"
33 #include "ir_rvalue_visitor.h"
34 #include "ir_optimization.h"
35 #include "ir_builder.h"
36 #include "glsl_types.h"
37
38 using namespace ir_builder;
39
40 namespace {
41
42 /**
43 * Visitor class for replacing expressions with ir_constant values.
44 */
45
46 class ir_algebraic_visitor : public ir_rvalue_visitor {
47 public:
48 ir_algebraic_visitor()
49 {
50 this->progress = false;
51 this->mem_ctx = NULL;
52 }
53
54 virtual ~ir_algebraic_visitor()
55 {
56 }
57
58 ir_rvalue *handle_expression(ir_expression *ir);
59 void handle_rvalue(ir_rvalue **rvalue);
60 bool reassociate_constant(ir_expression *ir1,
61 int const_index,
62 ir_constant *constant,
63 ir_expression *ir2);
64 void reassociate_operands(ir_expression *ir1,
65 int op1,
66 ir_expression *ir2,
67 int op2);
68 ir_rvalue *swizzle_if_required(ir_expression *expr,
69 ir_rvalue *operand);
70
71 void *mem_ctx;
72
73 bool progress;
74 };
75
76 } /* unnamed namespace */
77
78 static inline bool
79 is_vec_zero(ir_constant *ir)
80 {
81 return (ir == NULL) ? false : ir->is_zero();
82 }
83
84 static inline bool
85 is_vec_one(ir_constant *ir)
86 {
87 return (ir == NULL) ? false : ir->is_one();
88 }
89
90 static inline bool
91 is_vec_two(ir_constant *ir)
92 {
93 return (ir == NULL) ? false : ir->is_value(2.0, 2);
94 }
95
96 static inline bool
97 is_vec_negative_one(ir_constant *ir)
98 {
99 return (ir == NULL) ? false : ir->is_negative_one();
100 }
101
102 static inline bool
103 is_vec_basis(ir_constant *ir)
104 {
105 return (ir == NULL) ? false : ir->is_basis();
106 }
107
108 static void
109 update_type(ir_expression *ir)
110 {
111 if (ir->operands[0]->type->is_vector())
112 ir->type = ir->operands[0]->type;
113 else
114 ir->type = ir->operands[1]->type;
115 }
116
117 void
118 ir_algebraic_visitor::reassociate_operands(ir_expression *ir1,
119 int op1,
120 ir_expression *ir2,
121 int op2)
122 {
123 ir_rvalue *temp = ir2->operands[op2];
124 ir2->operands[op2] = ir1->operands[op1];
125 ir1->operands[op1] = temp;
126
127 /* Update the type of ir2. The type of ir1 won't have changed --
128 * base types matched, and at least one of the operands of the 2
129 * binops is still a vector if any of them were.
130 */
131 update_type(ir2);
132
133 this->progress = true;
134 }
135
136 /**
137 * Reassociates a constant down a tree of adds or multiplies.
138 *
139 * Consider (2 * (a * (b * 0.5))). We want to send up with a * b.
140 */
141 bool
142 ir_algebraic_visitor::reassociate_constant(ir_expression *ir1, int const_index,
143 ir_constant *constant,
144 ir_expression *ir2)
145 {
146 if (!ir2 || ir1->operation != ir2->operation)
147 return false;
148
149 /* Don't want to even think about matrices. */
150 if (ir1->operands[0]->type->is_matrix() ||
151 ir1->operands[1]->type->is_matrix() ||
152 ir2->operands[0]->type->is_matrix() ||
153 ir2->operands[1]->type->is_matrix())
154 return false;
155
156 ir_constant *ir2_const[2];
157 ir2_const[0] = ir2->operands[0]->constant_expression_value();
158 ir2_const[1] = ir2->operands[1]->constant_expression_value();
159
160 if (ir2_const[0] && ir2_const[1])
161 return false;
162
163 if (ir2_const[0]) {
164 reassociate_operands(ir1, const_index, ir2, 1);
165 return true;
166 } else if (ir2_const[1]) {
167 reassociate_operands(ir1, const_index, ir2, 0);
168 return true;
169 }
170
171 if (reassociate_constant(ir1, const_index, constant,
172 ir2->operands[0]->as_expression())) {
173 update_type(ir2);
174 return true;
175 }
176
177 if (reassociate_constant(ir1, const_index, constant,
178 ir2->operands[1]->as_expression())) {
179 update_type(ir2);
180 return true;
181 }
182
183 return false;
184 }
185
186 /* When eliminating an expression and just returning one of its operands,
187 * we may need to swizzle that operand out to a vector if the expression was
188 * vector type.
189 */
190 ir_rvalue *
191 ir_algebraic_visitor::swizzle_if_required(ir_expression *expr,
192 ir_rvalue *operand)
193 {
194 if (expr->type->is_vector() && operand->type->is_scalar()) {
195 return new(mem_ctx) ir_swizzle(operand, 0, 0, 0, 0,
196 expr->type->vector_elements);
197 } else
198 return operand;
199 }
200
201 ir_rvalue *
202 ir_algebraic_visitor::handle_expression(ir_expression *ir)
203 {
204 ir_constant *op_const[4] = {NULL, NULL, NULL, NULL};
205 ir_expression *op_expr[4] = {NULL, NULL, NULL, NULL};
206 unsigned int i;
207
208 assert(ir->get_num_operands() <= 4);
209 for (i = 0; i < ir->get_num_operands(); i++) {
210 if (ir->operands[i]->type->is_matrix())
211 return ir;
212
213 op_const[i] = ir->operands[i]->constant_expression_value();
214 op_expr[i] = ir->operands[i]->as_expression();
215 }
216
217 if (this->mem_ctx == NULL)
218 this->mem_ctx = ralloc_parent(ir);
219
220 switch (ir->operation) {
221 case ir_unop_abs:
222 if (op_expr[0] == NULL)
223 break;
224
225 switch (op_expr[0]->operation) {
226 case ir_unop_abs:
227 case ir_unop_neg:
228 return abs(op_expr[0]->operands[0]);
229 default:
230 break;
231 }
232 break;
233
234 case ir_unop_neg:
235 if (op_expr[0] == NULL)
236 break;
237
238 if (op_expr[0]->operation == ir_unop_neg) {
239 return op_expr[0]->operands[0];
240 }
241 break;
242
243 case ir_unop_logic_not: {
244 enum ir_expression_operation new_op = ir_unop_logic_not;
245
246 if (op_expr[0] == NULL)
247 break;
248
249 switch (op_expr[0]->operation) {
250 case ir_binop_less: new_op = ir_binop_gequal; break;
251 case ir_binop_greater: new_op = ir_binop_lequal; break;
252 case ir_binop_lequal: new_op = ir_binop_greater; break;
253 case ir_binop_gequal: new_op = ir_binop_less; break;
254 case ir_binop_equal: new_op = ir_binop_nequal; break;
255 case ir_binop_nequal: new_op = ir_binop_equal; break;
256 case ir_binop_all_equal: new_op = ir_binop_any_nequal; break;
257 case ir_binop_any_nequal: new_op = ir_binop_all_equal; break;
258
259 default:
260 /* The default case handler is here to silence a warning from GCC.
261 */
262 break;
263 }
264
265 if (new_op != ir_unop_logic_not) {
266 return new(mem_ctx) ir_expression(new_op,
267 ir->type,
268 op_expr[0]->operands[0],
269 op_expr[0]->operands[1]);
270 }
271
272 break;
273 }
274
275 case ir_binop_add:
276 if (is_vec_zero(op_const[0]))
277 return ir->operands[1];
278 if (is_vec_zero(op_const[1]))
279 return ir->operands[0];
280
281 /* Reassociate addition of constants so that we can do constant
282 * folding.
283 */
284 if (op_const[0] && !op_const[1])
285 reassociate_constant(ir, 0, op_const[0], op_expr[1]);
286 if (op_const[1] && !op_const[0])
287 reassociate_constant(ir, 1, op_const[1], op_expr[0]);
288
289 /* Replace (-x + y) * a + x and commutative variations with lrp(x, y, a).
290 *
291 * (-x + y) * a + x
292 * (x * -a) + (y * a) + x
293 * x + (x * -a) + (y * a)
294 * x * (1 - a) + y * a
295 * lrp(x, y, a)
296 */
297 for (int mul_pos = 0; mul_pos < 2; mul_pos++) {
298 ir_expression *mul = op_expr[mul_pos];
299
300 if (!mul || mul->operation != ir_binop_mul)
301 continue;
302
303 /* Multiply found on one of the operands. Now check for an
304 * inner addition operation.
305 */
306 for (int inner_add_pos = 0; inner_add_pos < 2; inner_add_pos++) {
307 ir_expression *inner_add =
308 mul->operands[inner_add_pos]->as_expression();
309
310 if (!inner_add || inner_add->operation != ir_binop_add)
311 continue;
312
313 /* Inner addition found on one of the operands. Now check for
314 * one of the operands of the inner addition to be the negative
315 * of x_operand.
316 */
317 for (int neg_pos = 0; neg_pos < 2; neg_pos++) {
318 ir_expression *neg =
319 inner_add->operands[neg_pos]->as_expression();
320
321 if (!neg || neg->operation != ir_unop_neg)
322 continue;
323
324 ir_rvalue *x_operand = ir->operands[1 - mul_pos];
325
326 if (!neg->operands[0]->equals(x_operand))
327 continue;
328
329 ir_rvalue *y_operand = inner_add->operands[1 - neg_pos];
330 ir_rvalue *a_operand = mul->operands[1 - inner_add_pos];
331
332 if (x_operand->type != y_operand->type ||
333 x_operand->type != a_operand->type)
334 continue;
335
336 return lrp(x_operand, y_operand, a_operand);
337 }
338 }
339 }
340 break;
341
342 case ir_binop_sub:
343 if (is_vec_zero(op_const[0]))
344 return neg(ir->operands[1]);
345 if (is_vec_zero(op_const[1]))
346 return ir->operands[0];
347 break;
348
349 case ir_binop_mul:
350 if (is_vec_one(op_const[0]))
351 return ir->operands[1];
352 if (is_vec_one(op_const[1]))
353 return ir->operands[0];
354
355 if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1]))
356 return ir_constant::zero(ir, ir->type);
357
358 if (is_vec_negative_one(op_const[0]))
359 return neg(ir->operands[1]);
360 if (is_vec_negative_one(op_const[1]))
361 return neg(ir->operands[0]);
362
363
364 /* Reassociate multiplication of constants so that we can do
365 * constant folding.
366 */
367 if (op_const[0] && !op_const[1])
368 reassociate_constant(ir, 0, op_const[0], op_expr[1]);
369 if (op_const[1] && !op_const[0])
370 reassociate_constant(ir, 1, op_const[1], op_expr[0]);
371
372 break;
373
374 case ir_binop_div:
375 if (is_vec_one(op_const[0]) && ir->type->base_type == GLSL_TYPE_FLOAT) {
376 return new(mem_ctx) ir_expression(ir_unop_rcp,
377 ir->operands[1]->type,
378 ir->operands[1],
379 NULL);
380 }
381 if (is_vec_one(op_const[1]))
382 return ir->operands[0];
383 break;
384
385 case ir_binop_dot:
386 if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1]))
387 return ir_constant::zero(mem_ctx, ir->type);
388
389 if (is_vec_basis(op_const[0])) {
390 unsigned component = 0;
391 for (unsigned c = 0; c < op_const[0]->type->vector_elements; c++) {
392 if (op_const[0]->value.f[c] == 1.0)
393 component = c;
394 }
395 return new(mem_ctx) ir_swizzle(ir->operands[1], component, 0, 0, 0, 1);
396 }
397 if (is_vec_basis(op_const[1])) {
398 unsigned component = 0;
399 for (unsigned c = 0; c < op_const[1]->type->vector_elements; c++) {
400 if (op_const[1]->value.f[c] == 1.0)
401 component = c;
402 }
403 return new(mem_ctx) ir_swizzle(ir->operands[0], component, 0, 0, 0, 1);
404 }
405 break;
406
407 case ir_binop_rshift:
408 case ir_binop_lshift:
409 /* 0 >> x == 0 */
410 if (is_vec_zero(op_const[0]))
411 return ir->operands[0];
412 /* x >> 0 == x */
413 if (is_vec_zero(op_const[1]))
414 return ir->operands[0];
415 break;
416
417 case ir_binop_logic_and:
418 if (is_vec_one(op_const[0])) {
419 return ir->operands[1];
420 } else if (is_vec_one(op_const[1])) {
421 return ir->operands[0];
422 } else if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1])) {
423 return ir_constant::zero(mem_ctx, ir->type);
424 } else if (op_expr[0] && op_expr[0]->operation == ir_unop_logic_not &&
425 op_expr[1] && op_expr[1]->operation == ir_unop_logic_not) {
426 /* De Morgan's Law:
427 * (not A) and (not B) === not (A or B)
428 */
429 return logic_not(logic_or(op_expr[0]->operands[0],
430 op_expr[1]->operands[0]));
431 } else if (ir->operands[0]->equals(ir->operands[1])) {
432 /* (a && a) == a */
433 return ir->operands[0];
434 }
435 break;
436
437 case ir_binop_logic_xor:
438 if (is_vec_zero(op_const[0])) {
439 return ir->operands[1];
440 } else if (is_vec_zero(op_const[1])) {
441 return ir->operands[0];
442 } else if (is_vec_one(op_const[0])) {
443 return logic_not(ir->operands[1]);
444 } else if (is_vec_one(op_const[1])) {
445 return logic_not(ir->operands[0]);
446 } else if (ir->operands[0]->equals(ir->operands[1])) {
447 /* (a ^^ a) == false */
448 return ir_constant::zero(mem_ctx, ir->type);
449 }
450 break;
451
452 case ir_binop_logic_or:
453 if (is_vec_zero(op_const[0])) {
454 return ir->operands[1];
455 } else if (is_vec_zero(op_const[1])) {
456 return ir->operands[0];
457 } else if (is_vec_one(op_const[0]) || is_vec_one(op_const[1])) {
458 ir_constant_data data;
459
460 for (unsigned i = 0; i < 16; i++)
461 data.b[i] = true;
462
463 return new(mem_ctx) ir_constant(ir->type, &data);
464 } else if (op_expr[0] && op_expr[0]->operation == ir_unop_logic_not &&
465 op_expr[1] && op_expr[1]->operation == ir_unop_logic_not) {
466 /* De Morgan's Law:
467 * (not A) or (not B) === not (A and B)
468 */
469 return logic_not(logic_and(op_expr[0]->operands[0],
470 op_expr[1]->operands[0]));
471 } else if (ir->operands[0]->equals(ir->operands[1])) {
472 /* (a || a) == a */
473 return ir->operands[0];
474 }
475 break;
476
477 case ir_binop_pow:
478 /* 1^x == 1 */
479 if (is_vec_one(op_const[0]))
480 return op_const[0];
481
482 /* pow(2,x) == exp2(x) */
483 if (is_vec_two(op_const[0]))
484 return expr(ir_unop_exp2, ir->operands[1]);
485
486 break;
487
488 case ir_unop_rcp:
489 if (op_expr[0] && op_expr[0]->operation == ir_unop_rcp)
490 return op_expr[0]->operands[0];
491
492 /* While ir_to_mesa.cpp will lower sqrt(x) to rcp(rsq(x)), it does so at
493 * its IR level, so we can always apply this transformation.
494 */
495 if (op_expr[0] && op_expr[0]->operation == ir_unop_rsq)
496 return sqrt(op_expr[0]->operands[0]);
497
498 /* As far as we know, all backends are OK with rsq. */
499 if (op_expr[0] && op_expr[0]->operation == ir_unop_sqrt) {
500 return rsq(op_expr[0]->operands[0]);
501 }
502
503 break;
504
505 case ir_triop_lrp:
506 /* Operands are (x, y, a). */
507 if (is_vec_zero(op_const[2])) {
508 return ir->operands[0];
509 } else if (is_vec_one(op_const[2])) {
510 return ir->operands[1];
511 }
512 break;
513
514 default:
515 break;
516 }
517
518 return ir;
519 }
520
521 void
522 ir_algebraic_visitor::handle_rvalue(ir_rvalue **rvalue)
523 {
524 if (!*rvalue)
525 return;
526
527 ir_expression *expr = (*rvalue)->as_expression();
528 if (!expr || expr->operation == ir_quadop_vector)
529 return;
530
531 ir_rvalue *new_rvalue = handle_expression(expr);
532 if (new_rvalue == *rvalue)
533 return;
534
535 /* If the expr used to be some vec OP scalar returning a vector, and the
536 * optimization gave us back a scalar, we still need to turn it into a
537 * vector.
538 */
539 *rvalue = swizzle_if_required(expr, new_rvalue);
540
541 this->progress = true;
542 }
543
544 bool
545 do_algebraic(exec_list *instructions)
546 {
547 ir_algebraic_visitor v;
548
549 visit_list_elements(&v, instructions);
550
551 return v.progress;
552 }