2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file opt_algebraic.cpp
27 * Takes advantage of association, commutivity, and other algebraic
28 * properties to simplify expressions.
32 #include "ir_visitor.h"
33 #include "ir_rvalue_visitor.h"
34 #include "ir_optimization.h"
35 #include "ir_builder.h"
36 #include "glsl_types.h"
38 using namespace ir_builder
;
43 * Visitor class for replacing expressions with ir_constant values.
46 class ir_algebraic_visitor
: public ir_rvalue_visitor
{
48 ir_algebraic_visitor(bool native_integers
,
49 const struct gl_shader_compiler_options
*options
)
52 this->progress
= false;
54 this->native_integers
= native_integers
;
57 virtual ~ir_algebraic_visitor()
61 ir_rvalue
*handle_expression(ir_expression
*ir
);
62 void handle_rvalue(ir_rvalue
**rvalue
);
63 bool reassociate_constant(ir_expression
*ir1
,
65 ir_constant
*constant
,
67 void reassociate_operands(ir_expression
*ir1
,
71 ir_rvalue
*swizzle_if_required(ir_expression
*expr
,
74 const struct gl_shader_compiler_options
*options
;
81 } /* unnamed namespace */
84 is_vec_zero(ir_constant
*ir
)
86 return (ir
== NULL
) ? false : ir
->is_zero();
90 is_vec_one(ir_constant
*ir
)
92 return (ir
== NULL
) ? false : ir
->is_one();
96 is_vec_two(ir_constant
*ir
)
98 return (ir
== NULL
) ? false : ir
->is_value(2.0, 2);
102 is_vec_negative_one(ir_constant
*ir
)
104 return (ir
== NULL
) ? false : ir
->is_negative_one();
108 is_vec_basis(ir_constant
*ir
)
110 return (ir
== NULL
) ? false : ir
->is_basis();
114 is_valid_vec_const(ir_constant
*ir
)
119 if (!ir
->type
->is_scalar() && !ir
->type
->is_vector())
126 is_less_than_one(ir_constant
*ir
)
128 if (!is_valid_vec_const(ir
))
131 unsigned component
= 0;
132 for (int c
= 0; c
< ir
->type
->vector_elements
; c
++) {
133 if (ir
->get_float_component(c
) < 1.0f
)
137 return (component
== ir
->type
->vector_elements
);
141 is_greater_than_zero(ir_constant
*ir
)
143 if (!is_valid_vec_const(ir
))
146 unsigned component
= 0;
147 for (int c
= 0; c
< ir
->type
->vector_elements
; c
++) {
148 if (ir
->get_float_component(c
) > 0.0f
)
152 return (component
== ir
->type
->vector_elements
);
156 update_type(ir_expression
*ir
)
158 if (ir
->operands
[0]->type
->is_vector())
159 ir
->type
= ir
->operands
[0]->type
;
161 ir
->type
= ir
->operands
[1]->type
;
164 /* Recognize (v.x + v.y) + (v.z + v.w) as dot(v, 1.0) */
165 static ir_expression
*
166 try_replace_with_dot(ir_expression
*expr0
, ir_expression
*expr1
, void *mem_ctx
)
168 if (expr0
&& expr0
->operation
== ir_binop_add
&&
169 expr0
->type
->is_float() &&
170 expr1
&& expr1
->operation
== ir_binop_add
&&
171 expr1
->type
->is_float()) {
172 ir_swizzle
*x
= expr0
->operands
[0]->as_swizzle();
173 ir_swizzle
*y
= expr0
->operands
[1]->as_swizzle();
174 ir_swizzle
*z
= expr1
->operands
[0]->as_swizzle();
175 ir_swizzle
*w
= expr1
->operands
[1]->as_swizzle();
177 if (!x
|| x
->mask
.num_components
!= 1 ||
178 !y
|| y
->mask
.num_components
!= 1 ||
179 !z
|| z
->mask
.num_components
!= 1 ||
180 !w
|| w
->mask
.num_components
!= 1) {
184 bool swiz_seen
[4] = {false, false, false, false};
185 swiz_seen
[x
->mask
.x
] = true;
186 swiz_seen
[y
->mask
.x
] = true;
187 swiz_seen
[z
->mask
.x
] = true;
188 swiz_seen
[w
->mask
.x
] = true;
190 if (!swiz_seen
[0] || !swiz_seen
[1] ||
191 !swiz_seen
[2] || !swiz_seen
[3]) {
195 if (x
->val
->equals(y
->val
) &&
196 x
->val
->equals(z
->val
) &&
197 x
->val
->equals(w
->val
)) {
198 return dot(x
->val
, new(mem_ctx
) ir_constant(1.0f
, 4));
205 ir_algebraic_visitor::reassociate_operands(ir_expression
*ir1
,
210 ir_rvalue
*temp
= ir2
->operands
[op2
];
211 ir2
->operands
[op2
] = ir1
->operands
[op1
];
212 ir1
->operands
[op1
] = temp
;
214 /* Update the type of ir2. The type of ir1 won't have changed --
215 * base types matched, and at least one of the operands of the 2
216 * binops is still a vector if any of them were.
220 this->progress
= true;
224 * Reassociates a constant down a tree of adds or multiplies.
226 * Consider (2 * (a * (b * 0.5))). We want to send up with a * b.
229 ir_algebraic_visitor::reassociate_constant(ir_expression
*ir1
, int const_index
,
230 ir_constant
*constant
,
233 if (!ir2
|| ir1
->operation
!= ir2
->operation
)
236 /* Don't want to even think about matrices. */
237 if (ir1
->operands
[0]->type
->is_matrix() ||
238 ir1
->operands
[1]->type
->is_matrix() ||
239 ir2
->operands
[0]->type
->is_matrix() ||
240 ir2
->operands
[1]->type
->is_matrix())
243 ir_constant
*ir2_const
[2];
244 ir2_const
[0] = ir2
->operands
[0]->constant_expression_value();
245 ir2_const
[1] = ir2
->operands
[1]->constant_expression_value();
247 if (ir2_const
[0] && ir2_const
[1])
251 reassociate_operands(ir1
, const_index
, ir2
, 1);
253 } else if (ir2_const
[1]) {
254 reassociate_operands(ir1
, const_index
, ir2
, 0);
258 if (reassociate_constant(ir1
, const_index
, constant
,
259 ir2
->operands
[0]->as_expression())) {
264 if (reassociate_constant(ir1
, const_index
, constant
,
265 ir2
->operands
[1]->as_expression())) {
273 /* When eliminating an expression and just returning one of its operands,
274 * we may need to swizzle that operand out to a vector if the expression was
278 ir_algebraic_visitor::swizzle_if_required(ir_expression
*expr
,
281 if (expr
->type
->is_vector() && operand
->type
->is_scalar()) {
282 return new(mem_ctx
) ir_swizzle(operand
, 0, 0, 0, 0,
283 expr
->type
->vector_elements
);
289 ir_algebraic_visitor::handle_expression(ir_expression
*ir
)
291 ir_constant
*op_const
[4] = {NULL
, NULL
, NULL
, NULL
};
292 ir_expression
*op_expr
[4] = {NULL
, NULL
, NULL
, NULL
};
295 assert(ir
->get_num_operands() <= 4);
296 for (i
= 0; i
< ir
->get_num_operands(); i
++) {
297 if (ir
->operands
[i
]->type
->is_matrix())
300 op_const
[i
] = ir
->operands
[i
]->constant_expression_value();
301 op_expr
[i
] = ir
->operands
[i
]->as_expression();
304 if (this->mem_ctx
== NULL
)
305 this->mem_ctx
= ralloc_parent(ir
);
307 switch (ir
->operation
) {
308 case ir_unop_bit_not
:
309 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_bit_not
)
310 return op_expr
[0]->operands
[0];
314 if (op_expr
[0] == NULL
)
317 switch (op_expr
[0]->operation
) {
320 return abs(op_expr
[0]->operands
[0]);
327 if (op_expr
[0] == NULL
)
330 if (op_expr
[0]->operation
== ir_unop_neg
) {
331 return op_expr
[0]->operands
[0];
336 if (op_expr
[0] == NULL
)
339 if (op_expr
[0]->operation
== ir_unop_log
) {
340 return op_expr
[0]->operands
[0];
345 if (op_expr
[0] == NULL
)
348 if (op_expr
[0]->operation
== ir_unop_exp
) {
349 return op_expr
[0]->operands
[0];
354 if (op_expr
[0] == NULL
)
357 if (op_expr
[0]->operation
== ir_unop_log2
) {
358 return op_expr
[0]->operands
[0];
363 if (op_expr
[0] == NULL
)
366 if (op_expr
[0]->operation
== ir_unop_exp2
) {
367 return op_expr
[0]->operands
[0];
371 case ir_unop_logic_not
: {
372 enum ir_expression_operation new_op
= ir_unop_logic_not
;
374 if (op_expr
[0] == NULL
)
377 switch (op_expr
[0]->operation
) {
378 case ir_binop_less
: new_op
= ir_binop_gequal
; break;
379 case ir_binop_greater
: new_op
= ir_binop_lequal
; break;
380 case ir_binop_lequal
: new_op
= ir_binop_greater
; break;
381 case ir_binop_gequal
: new_op
= ir_binop_less
; break;
382 case ir_binop_equal
: new_op
= ir_binop_nequal
; break;
383 case ir_binop_nequal
: new_op
= ir_binop_equal
; break;
384 case ir_binop_all_equal
: new_op
= ir_binop_any_nequal
; break;
385 case ir_binop_any_nequal
: new_op
= ir_binop_all_equal
; break;
388 /* The default case handler is here to silence a warning from GCC.
393 if (new_op
!= ir_unop_logic_not
) {
394 return new(mem_ctx
) ir_expression(new_op
,
396 op_expr
[0]->operands
[0],
397 op_expr
[0]->operands
[1]);
404 if (is_vec_zero(op_const
[0]))
405 return ir
->operands
[1];
406 if (is_vec_zero(op_const
[1]))
407 return ir
->operands
[0];
409 /* Reassociate addition of constants so that we can do constant
412 if (op_const
[0] && !op_const
[1])
413 reassociate_constant(ir
, 0, op_const
[0], op_expr
[1]);
414 if (op_const
[1] && !op_const
[0])
415 reassociate_constant(ir
, 1, op_const
[1], op_expr
[0]);
417 /* Recognize (v.x + v.y) + (v.z + v.w) as dot(v, 1.0) */
418 if (options
->OptimizeForAOS
) {
419 ir_expression
*expr
= try_replace_with_dot(op_expr
[0], op_expr
[1],
425 /* Replace (-x + y) * a + x and commutative variations with lrp(x, y, a).
428 * (x * -a) + (y * a) + x
429 * x + (x * -a) + (y * a)
430 * x * (1 - a) + y * a
433 for (int mul_pos
= 0; mul_pos
< 2; mul_pos
++) {
434 ir_expression
*mul
= op_expr
[mul_pos
];
436 if (!mul
|| mul
->operation
!= ir_binop_mul
)
439 /* Multiply found on one of the operands. Now check for an
440 * inner addition operation.
442 for (int inner_add_pos
= 0; inner_add_pos
< 2; inner_add_pos
++) {
443 ir_expression
*inner_add
=
444 mul
->operands
[inner_add_pos
]->as_expression();
446 if (!inner_add
|| inner_add
->operation
!= ir_binop_add
)
449 /* Inner addition found on one of the operands. Now check for
450 * one of the operands of the inner addition to be the negative
453 for (int neg_pos
= 0; neg_pos
< 2; neg_pos
++) {
455 inner_add
->operands
[neg_pos
]->as_expression();
457 if (!neg
|| neg
->operation
!= ir_unop_neg
)
460 ir_rvalue
*x_operand
= ir
->operands
[1 - mul_pos
];
462 if (!neg
->operands
[0]->equals(x_operand
))
465 ir_rvalue
*y_operand
= inner_add
->operands
[1 - neg_pos
];
466 ir_rvalue
*a_operand
= mul
->operands
[1 - inner_add_pos
];
468 if (x_operand
->type
!= y_operand
->type
||
469 x_operand
->type
!= a_operand
->type
)
472 return lrp(x_operand
, y_operand
, a_operand
);
480 if (is_vec_zero(op_const
[0]))
481 return neg(ir
->operands
[1]);
482 if (is_vec_zero(op_const
[1]))
483 return ir
->operands
[0];
487 if (is_vec_one(op_const
[0]))
488 return ir
->operands
[1];
489 if (is_vec_one(op_const
[1]))
490 return ir
->operands
[0];
492 if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1]))
493 return ir_constant::zero(ir
, ir
->type
);
495 if (is_vec_negative_one(op_const
[0]))
496 return neg(ir
->operands
[1]);
497 if (is_vec_negative_one(op_const
[1]))
498 return neg(ir
->operands
[0]);
501 /* Reassociate multiplication of constants so that we can do
504 if (op_const
[0] && !op_const
[1])
505 reassociate_constant(ir
, 0, op_const
[0], op_expr
[1]);
506 if (op_const
[1] && !op_const
[0])
507 reassociate_constant(ir
, 1, op_const
[1], op_expr
[0]);
512 if (is_vec_one(op_const
[0]) && ir
->type
->base_type
== GLSL_TYPE_FLOAT
) {
513 return new(mem_ctx
) ir_expression(ir_unop_rcp
,
514 ir
->operands
[1]->type
,
518 if (is_vec_one(op_const
[1]))
519 return ir
->operands
[0];
523 if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1]))
524 return ir_constant::zero(mem_ctx
, ir
->type
);
526 if (is_vec_basis(op_const
[0])) {
527 unsigned component
= 0;
528 for (unsigned c
= 0; c
< op_const
[0]->type
->vector_elements
; c
++) {
529 if (op_const
[0]->value
.f
[c
] == 1.0)
532 return new(mem_ctx
) ir_swizzle(ir
->operands
[1], component
, 0, 0, 0, 1);
534 if (is_vec_basis(op_const
[1])) {
535 unsigned component
= 0;
536 for (unsigned c
= 0; c
< op_const
[1]->type
->vector_elements
; c
++) {
537 if (op_const
[1]->value
.f
[c
] == 1.0)
540 return new(mem_ctx
) ir_swizzle(ir
->operands
[0], component
, 0, 0, 0, 1);
545 case ir_binop_lequal
:
546 case ir_binop_greater
:
547 case ir_binop_gequal
:
549 case ir_binop_nequal
:
550 for (int add_pos
= 0; add_pos
< 2; add_pos
++) {
551 ir_expression
*add
= op_expr
[add_pos
];
553 if (!add
|| add
->operation
!= ir_binop_add
)
556 ir_constant
*zero
= op_const
[1 - add_pos
];
557 if (!is_vec_zero(zero
))
560 return new(mem_ctx
) ir_expression(ir
->operation
,
562 neg(add
->operands
[1]));
566 case ir_binop_rshift
:
567 case ir_binop_lshift
:
569 if (is_vec_zero(op_const
[0]))
570 return ir
->operands
[0];
572 if (is_vec_zero(op_const
[1]))
573 return ir
->operands
[0];
576 case ir_binop_logic_and
:
577 if (is_vec_one(op_const
[0])) {
578 return ir
->operands
[1];
579 } else if (is_vec_one(op_const
[1])) {
580 return ir
->operands
[0];
581 } else if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1])) {
582 return ir_constant::zero(mem_ctx
, ir
->type
);
583 } else if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_logic_not
&&
584 op_expr
[1] && op_expr
[1]->operation
== ir_unop_logic_not
) {
586 * (not A) and (not B) === not (A or B)
588 return logic_not(logic_or(op_expr
[0]->operands
[0],
589 op_expr
[1]->operands
[0]));
590 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
592 return ir
->operands
[0];
596 case ir_binop_logic_xor
:
597 if (is_vec_zero(op_const
[0])) {
598 return ir
->operands
[1];
599 } else if (is_vec_zero(op_const
[1])) {
600 return ir
->operands
[0];
601 } else if (is_vec_one(op_const
[0])) {
602 return logic_not(ir
->operands
[1]);
603 } else if (is_vec_one(op_const
[1])) {
604 return logic_not(ir
->operands
[0]);
605 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
606 /* (a ^^ a) == false */
607 return ir_constant::zero(mem_ctx
, ir
->type
);
611 case ir_binop_logic_or
:
612 if (is_vec_zero(op_const
[0])) {
613 return ir
->operands
[1];
614 } else if (is_vec_zero(op_const
[1])) {
615 return ir
->operands
[0];
616 } else if (is_vec_one(op_const
[0]) || is_vec_one(op_const
[1])) {
617 ir_constant_data data
;
619 for (unsigned i
= 0; i
< 16; i
++)
622 return new(mem_ctx
) ir_constant(ir
->type
, &data
);
623 } else if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_logic_not
&&
624 op_expr
[1] && op_expr
[1]->operation
== ir_unop_logic_not
) {
626 * (not A) or (not B) === not (A and B)
628 return logic_not(logic_and(op_expr
[0]->operands
[0],
629 op_expr
[1]->operands
[0]));
630 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
632 return ir
->operands
[0];
638 if (is_vec_one(op_const
[0]))
642 if (is_vec_one(op_const
[1]))
643 return ir
->operands
[0];
645 /* pow(2,x) == exp2(x) */
646 if (is_vec_two(op_const
[0]))
647 return expr(ir_unop_exp2
, ir
->operands
[1]);
649 if (is_vec_two(op_const
[1])) {
650 ir_variable
*x
= new(ir
) ir_variable(ir
->operands
[1]->type
, "x",
652 base_ir
->insert_before(x
);
653 base_ir
->insert_before(assign(x
, ir
->operands
[0]));
661 if (ir
->type
->base_type
!= GLSL_TYPE_FLOAT
)
664 /* Replace min(max) operations and its commutative combinations with
665 * a saturate operation
667 for (int op
= 0; op
< 2; op
++) {
668 ir_expression
*minmax
= op_expr
[op
];
669 ir_constant
*outer_const
= op_const
[1 - op
];
670 ir_expression_operation op_cond
= (ir
->operation
== ir_binop_max
) ?
671 ir_binop_min
: ir_binop_max
;
673 if (!minmax
|| !outer_const
|| (minmax
->operation
!= op_cond
))
676 /* Found a min(max) combination. Now try to see if its operands
677 * meet our conditions that we can do just a single saturate operation
679 for (int minmax_op
= 0; minmax_op
< 2; minmax_op
++) {
680 ir_rvalue
*inner_val_a
= minmax
->operands
[minmax_op
];
681 ir_rvalue
*inner_val_b
= minmax
->operands
[1 - minmax_op
];
683 if (!inner_val_a
|| !inner_val_b
)
686 /* Found a {min|max} ({max|min} (x, 0.0), 1.0) operation and its variations */
687 if ((outer_const
->is_one() && inner_val_a
->is_zero()) ||
688 (inner_val_a
->is_one() && outer_const
->is_zero()))
689 return saturate(inner_val_b
);
691 /* Found a {min|max} ({max|min} (x, 0.0), b) where b < 1.0
694 if (is_less_than_one(outer_const
) && inner_val_b
->is_zero())
695 return expr(ir_binop_min
, saturate(inner_val_a
), outer_const
);
697 if (!inner_val_b
->as_constant())
700 if (is_less_than_one(inner_val_b
->as_constant()) && outer_const
->is_zero())
701 return expr(ir_binop_min
, saturate(inner_val_a
), inner_val_b
);
703 /* Found a {min|max} ({max|min} (x, b), 1.0), where b > 0.0
706 if (outer_const
->is_one() && is_greater_than_zero(inner_val_b
->as_constant()))
707 return expr(ir_binop_max
, saturate(inner_val_a
), inner_val_b
);
708 if (inner_val_b
->as_constant()->is_one() && is_greater_than_zero(outer_const
))
709 return expr(ir_binop_max
, saturate(inner_val_a
), outer_const
);
716 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_rcp
)
717 return op_expr
[0]->operands
[0];
719 /* While ir_to_mesa.cpp will lower sqrt(x) to rcp(rsq(x)), it does so at
720 * its IR level, so we can always apply this transformation.
722 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_rsq
)
723 return sqrt(op_expr
[0]->operands
[0]);
725 /* As far as we know, all backends are OK with rsq. */
726 if (op_expr
[0] && op_expr
[0]->operation
== ir_unop_sqrt
) {
727 return rsq(op_expr
[0]->operands
[0]);
733 /* Operands are op0 * op1 + op2. */
734 if (is_vec_zero(op_const
[0]) || is_vec_zero(op_const
[1])) {
735 return ir
->operands
[2];
736 } else if (is_vec_zero(op_const
[2])) {
737 return mul(ir
->operands
[0], ir
->operands
[1]);
738 } else if (is_vec_one(op_const
[0])) {
739 return add(ir
->operands
[1], ir
->operands
[2]);
740 } else if (is_vec_one(op_const
[1])) {
741 return add(ir
->operands
[0], ir
->operands
[2]);
746 /* Operands are (x, y, a). */
747 if (is_vec_zero(op_const
[2])) {
748 return ir
->operands
[0];
749 } else if (is_vec_one(op_const
[2])) {
750 return ir
->operands
[1];
751 } else if (ir
->operands
[0]->equals(ir
->operands
[1])) {
752 return ir
->operands
[0];
753 } else if (is_vec_zero(op_const
[0])) {
754 return mul(ir
->operands
[1], ir
->operands
[2]);
755 } else if (is_vec_zero(op_const
[1])) {
756 unsigned op2_components
= ir
->operands
[2]->type
->vector_elements
;
757 ir_constant
*one
= new(mem_ctx
) ir_constant(1.0f
, op2_components
);
758 return mul(ir
->operands
[0], add(one
, neg(ir
->operands
[2])));
763 if (is_vec_one(op_const
[0]))
764 return ir
->operands
[1];
765 if (is_vec_zero(op_const
[0]))
766 return ir
->operands
[2];
777 ir_algebraic_visitor::handle_rvalue(ir_rvalue
**rvalue
)
782 ir_expression
*expr
= (*rvalue
)->as_expression();
783 if (!expr
|| expr
->operation
== ir_quadop_vector
)
786 ir_rvalue
*new_rvalue
= handle_expression(expr
);
787 if (new_rvalue
== *rvalue
)
790 /* If the expr used to be some vec OP scalar returning a vector, and the
791 * optimization gave us back a scalar, we still need to turn it into a
794 *rvalue
= swizzle_if_required(expr
, new_rvalue
);
796 this->progress
= true;
800 do_algebraic(exec_list
*instructions
, bool native_integers
,
801 const struct gl_shader_compiler_options
*options
)
803 ir_algebraic_visitor
v(native_integers
, options
);
805 visit_list_elements(&v
, instructions
);