2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file opt_algebraic.cpp
27 * Takes advantage of association, commutivity, and other algebraic
28 * properties to simplify expressions.
32 #include "ir_visitor.h"
33 #include "ir_rvalue_visitor.h"
34 #include "ir_optimization.h"
35 #include "ir_builder.h"
36 #include "glsl_types.h"
38 using namespace ir_builder
;
43 * Visitor class for replacing expressions with ir_constant values.
46 class ir_algebraic_visitor
: public ir_rvalue_visitor
{
48 ir_algebraic_visitor(bool native_integers
)
50 this->progress
= false;
52 this->native_integers
= native_integers
;
55 virtual ~ir_algebraic_visitor()
59 ir_rvalue
*handle_expression(ir_expression
*ir
);
60 void handle_rvalue(ir_rvalue
**rvalue
);
61 bool reassociate_constant(ir_expression
*ir1
,
63 ir_constant
*constant
,
65 void reassociate_operands(ir_expression
*ir1
,
69 ir_rvalue
*swizzle_if_required(ir_expression
*expr
,
78 } /* unnamed namespace */
81 is_vec_zero(ir_constant
*ir
)
83 return (ir
== NULL
) ? false : ir
->is_zero();
87 is_vec_one(ir_constant
*ir
)
89 return (ir
== NULL
) ? false : ir
->is_one();
93 is_vec_two(ir_constant
*ir
)
95 return (ir
== NULL
) ? false : ir
->is_value(2.0, 2);
99 is_vec_negative_one(ir_constant
*ir
)
101 return (ir
== NULL
) ? false : ir
->is_negative_one();
105 is_vec_basis(ir_constant
*ir
)
107 return (ir
== NULL
) ? false : ir
->is_basis();
111 update_type(ir_expression
*ir
)
113 if (ir
->operands
[0]->type
->is_vector())
114 ir
->type
= ir
->operands
[0]->type
;
116 ir
->type
= ir
->operands
[1]->type
;
120 ir_algebraic_visitor::reassociate_operands(ir_expression
*ir1
,
125 ir_rvalue
*temp
= ir2
->operands
[op2
];
126 ir2
->operands
[op2
] = ir1
->operands
[op1
];
127 ir1
->operands
[op1
] = temp
;
129 /* Update the type of ir2. The type of ir1 won't have changed --
130 * base types matched, and at least one of the operands of the 2
131 * binops is still a vector if any of them were.
135 this->progress
= true;
139 * Reassociates a constant down a tree of adds or multiplies.
141 * Consider (2 * (a * (b * 0.5))). We want to send up with a * b.
144 ir_algebraic_visitor::reassociate_constant(ir_expression
*ir1
, int const_index
,
145 ir_constant
*constant
,
148 if (!ir2
|| ir1
->operation
!= ir2
->operation
)
151 /* Don't want to even think about matrices. */
152 if (ir1
->operands
[0]->type
->is_matrix() ||
153 ir1
->operands
[1]->type
->is_matrix() ||
154 ir2
->operands
[0]->type
->is_matrix() ||
155 ir2
->operands
[1]->type
->is_matrix())
158 ir_constant
*ir2_const
[2];
159 ir2_const
[0] = ir2
->operands
[0]->constant_expression_value();
160 ir2_const
[1] = ir2
->operands
[1]->constant_expression_value();
162 if (ir2_const
[0] && ir2_const
[1])
166 reassociate_operands(ir1
, const_index
, ir2
, 1);
168 } else if (ir2_const
[1]) {
169 reassociate_operands(ir1
, const_index
, ir2
, 0);
173 if (reassociate_constant(ir1
, const_index
, constant
,
174 ir2
->operands
[0]->as_expression())) {
179 if (reassociate_constant(ir1
, const_index
, constant
,
180 ir2
->operands
[1]->as_expression())) {
188 /* When eliminating an expression and just returning one of its operands,
189 * we may need to swizzle that operand out to a vector if the expression was
193 ir_algebraic_visitor::swizzle_if_required(ir_expression
*expr
,
196 if (expr
->type
->is_vector() && operand
->type
->is_scalar()) {
197 return new(mem_ctx
) ir_swizzle(operand
, 0, 0, 0, 0,
198 expr
->type
->vector_elements
);
204 ir_algebraic_visitor::handle_expression(ir_expression
*ir
)
206 ir_constant
*op_const
[4] = {NULL
, NULL
, NULL
, NULL
};
207 ir_expression
*op_expr
[4] = {NULL
, NULL
, NULL
, NULL
};
210 assert(ir
->get_num_operands() <= 4);
211 for (i
= 0; i
< ir
->get_num_operands(); i
++) {
212 if (ir
->operands
[i
]->type
->is_matrix())
215 op_const
[i
] = ir
->operands
[i
]->constant_expression_value();
216 op_expr
[i
] = ir
->operands
[i
]->as_expression();
219 if (this->mem_ctx
== NULL
)
220 this->mem_ctx
= ralloc_parent(ir
);
222 switch (ir
->operation
) {
223 case ir_unop_bit_not
:
224 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_bit_not
)
225 return op_expr
[0]->operands
[0];
229 if (op_expr
[0] == NULL
)
232 switch (op_expr
[0]->operation
) {
235 return abs(op_expr
[0]->operands
[0]);
242 if (op_expr
[0] == NULL
)
245 if (op_expr
[0]->operation
== ir_unop_neg
) {
246 return op_expr
[0]->operands
[0];
251 if (op_expr
[0] == NULL
)
254 if (op_expr
[0]->operation
== ir_unop_log
) {
255 return op_expr
[0]->operands
[0];
260 if (op_expr
[0] == NULL
)
263 if (op_expr
[0]->operation
== ir_unop_exp
) {
264 return op_expr
[0]->operands
[0];
269 if (op_expr
[0] == NULL
)
272 if (op_expr
[0]->operation
== ir_unop_log2
) {
273 return op_expr
[0]->operands
[0];
278 if (op_expr
[0] == NULL
)
281 if (op_expr
[0]->operation
== ir_unop_exp2
) {
282 return op_expr
[0]->operands
[0];
286 case ir_unop_logic_not
: {
287 enum ir_expression_operation new_op
= ir_unop_logic_not
;
289 if (op_expr
[0] == NULL
)
292 switch (op_expr
[0]->operation
) {
293 case ir_binop_less
: new_op
= ir_binop_gequal
; break;
294 case ir_binop_greater
: new_op
= ir_binop_lequal
; break;
295 case ir_binop_lequal
: new_op
= ir_binop_greater
; break;
296 case ir_binop_gequal
: new_op
= ir_binop_less
; break;
297 case ir_binop_equal
: new_op
= ir_binop_nequal
; break;
298 case ir_binop_nequal
: new_op
= ir_binop_equal
; break;
299 case ir_binop_all_equal
: new_op
= ir_binop_any_nequal
; break;
300 case ir_binop_any_nequal
: new_op
= ir_binop_all_equal
; break;
303 /* The default case handler is here to silence a warning from GCC.
308 if (new_op
!= ir_unop_logic_not
) {
309 return new(mem_ctx
) ir_expression(new_op
,
311 op_expr
[0]->operands
[0],
312 op_expr
[0]->operands
[1]);
319 if (is_vec_zero(op_const
[0]))
320 return ir
->operands
[1];
321 if (is_vec_zero(op_const
[1]))
322 return ir
->operands
[0];
324 /* Reassociate addition of constants so that we can do constant
327 if (op_const
[0] && !op_const
[1])
328 reassociate_constant(ir
, 0, op_const
[0], op_expr
[1]);
329 if (op_const
[1] && !op_const
[0])
330 reassociate_constant(ir
, 1, op_const
[1], op_expr
[0]);
332 /* Replace (-x + y) * a + x and commutative variations with lrp(x, y, a).
335 * (x * -a) + (y * a) + x
336 * x + (x * -a) + (y * a)
337 * x * (1 - a) + y * a
340 for (int mul_pos
= 0; mul_pos
< 2; mul_pos
++) {
341 ir_expression
*mul
= op_expr
[mul_pos
];
343 if (!mul
|| mul
->operation
!= ir_binop_mul
)
346 /* Multiply found on one of the operands. Now check for an
347 * inner addition operation.
349 for (int inner_add_pos
= 0; inner_add_pos
< 2; inner_add_pos
++) {
350 ir_expression
*inner_add
=
351 mul
->operands
[inner_add_pos
]->as_expression();
353 if (!inner_add
|| inner_add
->operation
!= ir_binop_add
)
356 /* Inner addition found on one of the operands. Now check for
357 * one of the operands of the inner addition to be the negative
360 for (int neg_pos
= 0; neg_pos
< 2; neg_pos
++) {
362 inner_add
->operands
[neg_pos
]->as_expression();
364 if (!neg
|| neg
->operation
!= ir_unop_neg
)
367 ir_rvalue
*x_operand
= ir
->operands
[1 - mul_pos
];
369 if (!neg
->operands
[0]->equals(x_operand
))
372 ir_rvalue
*y_operand
= inner_add
->operands
[1 - neg_pos
];
373 ir_rvalue
*a_operand
= mul
->operands
[1 - inner_add_pos
];
375 if (x_operand
->type
!= y_operand
->type
||
376 x_operand
->type
!= a_operand
->type
)
379 return lrp(x_operand
, y_operand
, a_operand
);
386 if (is_vec_zero(op_const
[0]))
387 return neg(ir
->operands
[1]);
388 if (is_vec_zero(op_const
[1]))
389 return ir
->operands
[0];
393 if (is_vec_one(op_const
[0]))
394 return ir
->operands
[1];
395 if (is_vec_one(op_const
[1]))
396 return ir
->operands
[0];
398 if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1]))
399 return ir_constant::zero(ir
, ir
->type
);
401 if (is_vec_negative_one(op_const
[0]))
402 return neg(ir
->operands
[1]);
403 if (is_vec_negative_one(op_const
[1]))
404 return neg(ir
->operands
[0]);
407 /* Reassociate multiplication of constants so that we can do
410 if (op_const
[0] && !op_const
[1])
411 reassociate_constant(ir
, 0, op_const
[0], op_expr
[1]);
412 if (op_const
[1] && !op_const
[0])
413 reassociate_constant(ir
, 1, op_const
[1], op_expr
[0]);
418 if (is_vec_one(op_const
[0]) && ir
->type
->base_type
== GLSL_TYPE_FLOAT
) {
419 return new(mem_ctx
) ir_expression(ir_unop_rcp
,
420 ir
->operands
[1]->type
,
424 if (is_vec_one(op_const
[1]))
425 return ir
->operands
[0];
429 if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1]))
430 return ir_constant::zero(mem_ctx
, ir
->type
);
432 if (is_vec_basis(op_const
[0])) {
433 unsigned component
= 0;
434 for (unsigned c
= 0; c
< op_const
[0]->type
->vector_elements
; c
++) {
435 if (op_const
[0]->value
.f
[c
] == 1.0)
438 return new(mem_ctx
) ir_swizzle(ir
->operands
[1], component
, 0, 0, 0, 1);
440 if (is_vec_basis(op_const
[1])) {
441 unsigned component
= 0;
442 for (unsigned c
= 0; c
< op_const
[1]->type
->vector_elements
; c
++) {
443 if (op_const
[1]->value
.f
[c
] == 1.0)
446 return new(mem_ctx
) ir_swizzle(ir
->operands
[0], component
, 0, 0, 0, 1);
451 case ir_binop_lequal
:
452 case ir_binop_greater
:
453 case ir_binop_gequal
:
455 case ir_binop_nequal
:
456 for (int add_pos
= 0; add_pos
< 2; add_pos
++) {
457 ir_expression
*add
= op_expr
[add_pos
];
459 if (!add
|| add
->operation
!= ir_binop_add
)
462 ir_constant
*zero
= op_const
[1 - add_pos
];
463 if (!is_vec_zero(zero
))
466 return new(mem_ctx
) ir_expression(ir
->operation
,
468 neg(add
->operands
[1]));
472 case ir_binop_rshift
:
473 case ir_binop_lshift
:
475 if (is_vec_zero(op_const
[0]))
476 return ir
->operands
[0];
478 if (is_vec_zero(op_const
[1]))
479 return ir
->operands
[0];
482 case ir_binop_logic_and
:
483 if (is_vec_one(op_const
[0])) {
484 return ir
->operands
[1];
485 } else if (is_vec_one(op_const
[1])) {
486 return ir
->operands
[0];
487 } else if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1])) {
488 return ir_constant::zero(mem_ctx
, ir
->type
);
489 } else if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_logic_not
&&
490 op_expr
[1] && op_expr
[1]->operation
== ir_unop_logic_not
) {
492 * (not A) and (not B) === not (A or B)
494 return logic_not(logic_or(op_expr
[0]->operands
[0],
495 op_expr
[1]->operands
[0]));
496 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
498 return ir
->operands
[0];
502 case ir_binop_logic_xor
:
503 if (is_vec_zero(op_const
[0])) {
504 return ir
->operands
[1];
505 } else if (is_vec_zero(op_const
[1])) {
506 return ir
->operands
[0];
507 } else if (is_vec_one(op_const
[0])) {
508 return logic_not(ir
->operands
[1]);
509 } else if (is_vec_one(op_const
[1])) {
510 return logic_not(ir
->operands
[0]);
511 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
512 /* (a ^^ a) == false */
513 return ir_constant::zero(mem_ctx
, ir
->type
);
517 case ir_binop_logic_or
:
518 if (is_vec_zero(op_const
[0])) {
519 return ir
->operands
[1];
520 } else if (is_vec_zero(op_const
[1])) {
521 return ir
->operands
[0];
522 } else if (is_vec_one(op_const
[0]) || is_vec_one(op_const
[1])) {
523 ir_constant_data data
;
525 for (unsigned i
= 0; i
< 16; i
++)
528 return new(mem_ctx
) ir_constant(ir
->type
, &data
);
529 } else if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_logic_not
&&
530 op_expr
[1] && op_expr
[1]->operation
== ir_unop_logic_not
) {
532 * (not A) or (not B) === not (A and B)
534 return logic_not(logic_and(op_expr
[0]->operands
[0],
535 op_expr
[1]->operands
[0]));
536 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
538 return ir
->operands
[0];
544 if (is_vec_one(op_const
[0]))
548 if (is_vec_one(op_const
[1]))
549 return ir
->operands
[0];
551 /* pow(2,x) == exp2(x) */
552 if (is_vec_two(op_const
[0]))
553 return expr(ir_unop_exp2
, ir
->operands
[1]);
555 if (is_vec_two(op_const
[1])) {
556 ir_variable
*x
= new(ir
) ir_variable(ir
->operands
[1]->type
, "x",
558 base_ir
->insert_before(x
);
559 base_ir
->insert_before(assign(x
, ir
->operands
[0]));
566 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_rcp
)
567 return op_expr
[0]->operands
[0];
569 /* While ir_to_mesa.cpp will lower sqrt(x) to rcp(rsq(x)), it does so at
570 * its IR level, so we can always apply this transformation.
572 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_rsq
)
573 return sqrt(op_expr
[0]->operands
[0]);
575 /* As far as we know, all backends are OK with rsq. */
576 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_sqrt
) {
577 return rsq(op_expr
[0]->operands
[0]);
583 /* Operands are op0 * op1 + op2. */
584 if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1])) {
585 return ir
->operands
[2];
586 } else if (is_vec_zero(op_const
[2])) {
587 return mul(ir
->operands
[0], ir
->operands
[1]);
588 } else if (is_vec_one(op_const
[0])) {
589 return add(ir
->operands
[1], ir
->operands
[2]);
590 } else if (is_vec_one(op_const
[1])) {
591 return add(ir
->operands
[0], ir
->operands
[2]);
596 /* Operands are (x, y, a). */
597 if (is_vec_zero(op_const
[2])) {
598 return ir
->operands
[0];
599 } else if (is_vec_one(op_const
[2])) {
600 return ir
->operands
[1];
601 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
602 return ir
->operands
[0];
603 } else if (is_vec_zero(op_const
[0])) {
604 return mul(ir
->operands
[1], ir
->operands
[2]);
605 } else if (is_vec_zero(op_const
[1])) {
606 unsigned op2_components
= ir
->operands
[2]->type
->vector_elements
;
607 ir_constant
*one
= new(mem_ctx
) ir_constant(1.0f
, op2_components
);
608 return mul(ir
->operands
[0], add(one
, neg(ir
->operands
[2])));
613 if (is_vec_one(op_const
[0]))
614 return ir
->operands
[1];
615 if (is_vec_zero(op_const
[0]))
616 return ir
->operands
[2];
627 ir_algebraic_visitor::handle_rvalue(ir_rvalue
**rvalue
)
632 ir_expression
*expr
= (*rvalue
)->as_expression();
633 if (!expr
|| expr
->operation
== ir_quadop_vector
)
636 ir_rvalue
*new_rvalue
= handle_expression(expr
);
637 if (new_rvalue
== *rvalue
)
640 /* If the expr used to be some vec OP scalar returning a vector, and the
641 * optimization gave us back a scalar, we still need to turn it into a
644 *rvalue
= swizzle_if_required(expr
, new_rvalue
);
646 this->progress
= true;
650 do_algebraic(exec_list
*instructions
, bool native_integers
)
652 ir_algebraic_visitor
v(native_integers
);
654 visit_list_elements(&v
, instructions
);