glsl: Put `sample`-qualified varyings in their own packing classes
[mesa.git] / src / glsl / opt_algebraic.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file opt_algebraic.cpp
26 *
27 * Takes advantage of association, commutivity, and other algebraic
28 * properties to simplify expressions.
29 */
30
31 #include "ir.h"
32 #include "ir_visitor.h"
33 #include "ir_rvalue_visitor.h"
34 #include "ir_optimization.h"
35 #include "ir_builder.h"
36 #include "glsl_types.h"
37
38 using namespace ir_builder;
39
40 namespace {
41
42 /**
43 * Visitor class for replacing expressions with ir_constant values.
44 */
45
46 class ir_algebraic_visitor : public ir_rvalue_visitor {
47 public:
48 ir_algebraic_visitor()
49 {
50 this->progress = false;
51 this->mem_ctx = NULL;
52 }
53
54 virtual ~ir_algebraic_visitor()
55 {
56 }
57
58 ir_rvalue *handle_expression(ir_expression *ir);
59 void handle_rvalue(ir_rvalue **rvalue);
60 bool reassociate_constant(ir_expression *ir1,
61 int const_index,
62 ir_constant *constant,
63 ir_expression *ir2);
64 void reassociate_operands(ir_expression *ir1,
65 int op1,
66 ir_expression *ir2,
67 int op2);
68 ir_rvalue *swizzle_if_required(ir_expression *expr,
69 ir_rvalue *operand);
70
71 void *mem_ctx;
72
73 bool progress;
74 };
75
76 } /* unnamed namespace */
77
78 static inline bool
79 is_vec_zero(ir_constant *ir)
80 {
81 return (ir == NULL) ? false : ir->is_zero();
82 }
83
84 static inline bool
85 is_vec_one(ir_constant *ir)
86 {
87 return (ir == NULL) ? false : ir->is_one();
88 }
89
90 static inline bool
91 is_vec_negative_one(ir_constant *ir)
92 {
93 return (ir == NULL) ? false : ir->is_negative_one();
94 }
95
96 static inline bool
97 is_vec_basis(ir_constant *ir)
98 {
99 return (ir == NULL) ? false : ir->is_basis();
100 }
101
102 static void
103 update_type(ir_expression *ir)
104 {
105 if (ir->operands[0]->type->is_vector())
106 ir->type = ir->operands[0]->type;
107 else
108 ir->type = ir->operands[1]->type;
109 }
110
111 void
112 ir_algebraic_visitor::reassociate_operands(ir_expression *ir1,
113 int op1,
114 ir_expression *ir2,
115 int op2)
116 {
117 ir_rvalue *temp = ir2->operands[op2];
118 ir2->operands[op2] = ir1->operands[op1];
119 ir1->operands[op1] = temp;
120
121 /* Update the type of ir2. The type of ir1 won't have changed --
122 * base types matched, and at least one of the operands of the 2
123 * binops is still a vector if any of them were.
124 */
125 update_type(ir2);
126
127 this->progress = true;
128 }
129
130 /**
131 * Reassociates a constant down a tree of adds or multiplies.
132 *
133 * Consider (2 * (a * (b * 0.5))). We want to send up with a * b.
134 */
135 bool
136 ir_algebraic_visitor::reassociate_constant(ir_expression *ir1, int const_index,
137 ir_constant *constant,
138 ir_expression *ir2)
139 {
140 if (!ir2 || ir1->operation != ir2->operation)
141 return false;
142
143 /* Don't want to even think about matrices. */
144 if (ir1->operands[0]->type->is_matrix() ||
145 ir1->operands[1]->type->is_matrix() ||
146 ir2->operands[0]->type->is_matrix() ||
147 ir2->operands[1]->type->is_matrix())
148 return false;
149
150 ir_constant *ir2_const[2];
151 ir2_const[0] = ir2->operands[0]->constant_expression_value();
152 ir2_const[1] = ir2->operands[1]->constant_expression_value();
153
154 if (ir2_const[0] && ir2_const[1])
155 return false;
156
157 if (ir2_const[0]) {
158 reassociate_operands(ir1, const_index, ir2, 1);
159 return true;
160 } else if (ir2_const[1]) {
161 reassociate_operands(ir1, const_index, ir2, 0);
162 return true;
163 }
164
165 if (reassociate_constant(ir1, const_index, constant,
166 ir2->operands[0]->as_expression())) {
167 update_type(ir2);
168 return true;
169 }
170
171 if (reassociate_constant(ir1, const_index, constant,
172 ir2->operands[1]->as_expression())) {
173 update_type(ir2);
174 return true;
175 }
176
177 return false;
178 }
179
180 /* When eliminating an expression and just returning one of its operands,
181 * we may need to swizzle that operand out to a vector if the expression was
182 * vector type.
183 */
184 ir_rvalue *
185 ir_algebraic_visitor::swizzle_if_required(ir_expression *expr,
186 ir_rvalue *operand)
187 {
188 if (expr->type->is_vector() && operand->type->is_scalar()) {
189 return new(mem_ctx) ir_swizzle(operand, 0, 0, 0, 0,
190 expr->type->vector_elements);
191 } else
192 return operand;
193 }
194
195 ir_rvalue *
196 ir_algebraic_visitor::handle_expression(ir_expression *ir)
197 {
198 ir_constant *op_const[4] = {NULL, NULL, NULL, NULL};
199 ir_expression *op_expr[4] = {NULL, NULL, NULL, NULL};
200 unsigned int i;
201
202 assert(ir->get_num_operands() <= 4);
203 for (i = 0; i < ir->get_num_operands(); i++) {
204 if (ir->operands[i]->type->is_matrix())
205 return ir;
206
207 op_const[i] = ir->operands[i]->constant_expression_value();
208 op_expr[i] = ir->operands[i]->as_expression();
209 }
210
211 if (this->mem_ctx == NULL)
212 this->mem_ctx = ralloc_parent(ir);
213
214 switch (ir->operation) {
215 case ir_unop_abs:
216 if (op_expr[0] == NULL)
217 break;
218
219 switch (op_expr[0]->operation) {
220 case ir_unop_abs:
221 case ir_unop_neg:
222 return abs(op_expr[0]->operands[0]);
223 default:
224 break;
225 }
226 break;
227
228 case ir_unop_neg:
229 if (op_expr[0] == NULL)
230 break;
231
232 if (op_expr[0]->operation == ir_unop_neg) {
233 return op_expr[0]->operands[0];
234 }
235 break;
236
237 case ir_unop_logic_not: {
238 enum ir_expression_operation new_op = ir_unop_logic_not;
239
240 if (op_expr[0] == NULL)
241 break;
242
243 switch (op_expr[0]->operation) {
244 case ir_binop_less: new_op = ir_binop_gequal; break;
245 case ir_binop_greater: new_op = ir_binop_lequal; break;
246 case ir_binop_lequal: new_op = ir_binop_greater; break;
247 case ir_binop_gequal: new_op = ir_binop_less; break;
248 case ir_binop_equal: new_op = ir_binop_nequal; break;
249 case ir_binop_nequal: new_op = ir_binop_equal; break;
250 case ir_binop_all_equal: new_op = ir_binop_any_nequal; break;
251 case ir_binop_any_nequal: new_op = ir_binop_all_equal; break;
252
253 default:
254 /* The default case handler is here to silence a warning from GCC.
255 */
256 break;
257 }
258
259 if (new_op != ir_unop_logic_not) {
260 return new(mem_ctx) ir_expression(new_op,
261 ir->type,
262 op_expr[0]->operands[0],
263 op_expr[0]->operands[1]);
264 }
265
266 break;
267 }
268
269 case ir_binop_add:
270 if (is_vec_zero(op_const[0]))
271 return ir->operands[1];
272 if (is_vec_zero(op_const[1]))
273 return ir->operands[0];
274
275 /* Reassociate addition of constants so that we can do constant
276 * folding.
277 */
278 if (op_const[0] && !op_const[1])
279 reassociate_constant(ir, 0, op_const[0], op_expr[1]);
280 if (op_const[1] && !op_const[0])
281 reassociate_constant(ir, 1, op_const[1], op_expr[0]);
282 break;
283
284 case ir_binop_sub:
285 if (is_vec_zero(op_const[0]))
286 return neg(ir->operands[1]);
287 if (is_vec_zero(op_const[1]))
288 return ir->operands[0];
289 break;
290
291 case ir_binop_mul:
292 if (is_vec_one(op_const[0]))
293 return ir->operands[1];
294 if (is_vec_one(op_const[1]))
295 return ir->operands[0];
296
297 if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1]))
298 return ir_constant::zero(ir, ir->type);
299
300 if (is_vec_negative_one(op_const[0]))
301 return neg(ir->operands[1]);
302 if (is_vec_negative_one(op_const[1]))
303 return neg(ir->operands[0]);
304
305
306 /* Reassociate multiplication of constants so that we can do
307 * constant folding.
308 */
309 if (op_const[0] && !op_const[1])
310 reassociate_constant(ir, 0, op_const[0], op_expr[1]);
311 if (op_const[1] && !op_const[0])
312 reassociate_constant(ir, 1, op_const[1], op_expr[0]);
313
314 break;
315
316 case ir_binop_div:
317 if (is_vec_one(op_const[0]) && ir->type->base_type == GLSL_TYPE_FLOAT) {
318 return new(mem_ctx) ir_expression(ir_unop_rcp,
319 ir->operands[1]->type,
320 ir->operands[1],
321 NULL);
322 }
323 if (is_vec_one(op_const[1]))
324 return ir->operands[0];
325 break;
326
327 case ir_binop_dot:
328 if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1]))
329 return ir_constant::zero(mem_ctx, ir->type);
330
331 if (is_vec_basis(op_const[0])) {
332 unsigned component = 0;
333 for (unsigned c = 0; c < op_const[0]->type->vector_elements; c++) {
334 if (op_const[0]->value.f[c] == 1.0)
335 component = c;
336 }
337 return new(mem_ctx) ir_swizzle(ir->operands[1], component, 0, 0, 0, 1);
338 }
339 if (is_vec_basis(op_const[1])) {
340 unsigned component = 0;
341 for (unsigned c = 0; c < op_const[1]->type->vector_elements; c++) {
342 if (op_const[1]->value.f[c] == 1.0)
343 component = c;
344 }
345 return new(mem_ctx) ir_swizzle(ir->operands[0], component, 0, 0, 0, 1);
346 }
347 break;
348
349 case ir_binop_rshift:
350 case ir_binop_lshift:
351 /* 0 >> x == 0 */
352 if (is_vec_zero(op_const[0]))
353 return ir->operands[0];
354 /* x >> 0 == x */
355 if (is_vec_zero(op_const[1]))
356 return ir->operands[0];
357 break;
358
359 case ir_binop_logic_and:
360 if (is_vec_one(op_const[0])) {
361 return ir->operands[1];
362 } else if (is_vec_one(op_const[1])) {
363 return ir->operands[0];
364 } else if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1])) {
365 return ir_constant::zero(mem_ctx, ir->type);
366 } else if (op_expr[0] && op_expr[0]->operation == ir_unop_logic_not &&
367 op_expr[1] && op_expr[1]->operation == ir_unop_logic_not) {
368 /* De Morgan's Law:
369 * (not A) and (not B) === not (A or B)
370 */
371 return logic_not(logic_or(op_expr[0]->operands[0],
372 op_expr[1]->operands[0]));
373 } else if (ir->operands[0]->equals(ir->operands[1])) {
374 /* (a && a) == a */
375 return ir->operands[0];
376 }
377 break;
378
379 case ir_binop_logic_xor:
380 if (is_vec_zero(op_const[0])) {
381 return ir->operands[1];
382 } else if (is_vec_zero(op_const[1])) {
383 return ir->operands[0];
384 } else if (is_vec_one(op_const[0])) {
385 return logic_not(ir->operands[1]);
386 } else if (is_vec_one(op_const[1])) {
387 return logic_not(ir->operands[0]);
388 } else if (ir->operands[0]->equals(ir->operands[1])) {
389 /* (a ^^ a) == false */
390 return ir_constant::zero(mem_ctx, ir->type);
391 }
392 break;
393
394 case ir_binop_logic_or:
395 if (is_vec_zero(op_const[0])) {
396 return ir->operands[1];
397 } else if (is_vec_zero(op_const[1])) {
398 return ir->operands[0];
399 } else if (is_vec_one(op_const[0]) || is_vec_one(op_const[1])) {
400 ir_constant_data data;
401
402 for (unsigned i = 0; i < 16; i++)
403 data.b[i] = true;
404
405 return new(mem_ctx) ir_constant(ir->type, &data);
406 } else if (op_expr[0] && op_expr[0]->operation == ir_unop_logic_not &&
407 op_expr[1] && op_expr[1]->operation == ir_unop_logic_not) {
408 /* De Morgan's Law:
409 * (not A) or (not B) === not (A and B)
410 */
411 return logic_not(logic_and(op_expr[0]->operands[0],
412 op_expr[1]->operands[0]));
413 } else if (ir->operands[0]->equals(ir->operands[1])) {
414 /* (a || a) == a */
415 return ir->operands[0];
416 }
417 break;
418
419 case ir_unop_rcp:
420 if (op_expr[0] && op_expr[0]->operation == ir_unop_rcp)
421 return op_expr[0]->operands[0];
422
423 /* While ir_to_mesa.cpp will lower sqrt(x) to rcp(rsq(x)), it does so at
424 * its IR level, so we can always apply this transformation.
425 */
426 if (op_expr[0] && op_expr[0]->operation == ir_unop_rsq)
427 return sqrt(op_expr[0]->operands[0]);
428
429 /* As far as we know, all backends are OK with rsq. */
430 if (op_expr[0] && op_expr[0]->operation == ir_unop_sqrt) {
431 return rsq(op_expr[0]->operands[0]);
432 }
433
434 break;
435
436 case ir_triop_lrp:
437 /* Operands are (x, y, a). */
438 if (is_vec_zero(op_const[2])) {
439 return ir->operands[0];
440 } else if (is_vec_one(op_const[2])) {
441 return ir->operands[1];
442 }
443 break;
444
445 default:
446 break;
447 }
448
449 return ir;
450 }
451
452 void
453 ir_algebraic_visitor::handle_rvalue(ir_rvalue **rvalue)
454 {
455 if (!*rvalue)
456 return;
457
458 ir_expression *expr = (*rvalue)->as_expression();
459 if (!expr || expr->operation == ir_quadop_vector)
460 return;
461
462 ir_rvalue *new_rvalue = handle_expression(expr);
463 if (new_rvalue == *rvalue)
464 return;
465
466 /* If the expr used to be some vec OP scalar returning a vector, and the
467 * optimization gave us back a scalar, we still need to turn it into a
468 * vector.
469 */
470 *rvalue = swizzle_if_required(expr, new_rvalue);
471
472 this->progress = true;
473 }
474
475 bool
476 do_algebraic(exec_list *instructions)
477 {
478 ir_algebraic_visitor v;
479
480 visit_list_elements(&v, instructions);
481
482 return v.progress;
483 }