glsl/lower_if: conditionally lower if-branches based on their size
[mesa.git] / src / compiler / glsl / lower_if_to_cond_assign.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_if_to_cond_assign.cpp
26 *
27 * This flattens if-statements to conditional assignments if:
28 *
29 * - the GPU has limited or no flow control support
30 * (controlled by max_depth)
31 *
32 * - small conditional branches are more expensive than conditional assignments
33 * (controlled by min_branch_cost, that's the cost for a branch to be
34 * preserved)
35 *
36 * It can't handle other control flow being inside of its block, such
37 * as calls or loops. Hopefully loop unrolling and inlining will take
38 * care of those.
39 *
40 * Drivers for GPUs with no control flow support should simply call
41 *
42 * lower_if_to_cond_assign(instructions)
43 *
44 * to attempt to flatten all if-statements.
45 *
46 * Some GPUs (such as i965 prior to gen6) do support control flow, but have a
47 * maximum nesting depth N. Drivers for such hardware can call
48 *
49 * lower_if_to_cond_assign(instructions, N)
50 *
51 * to attempt to flatten any if-statements appearing at depth > N.
52 */
53
54 #include "compiler/glsl_types.h"
55 #include "ir.h"
56 #include "util/set.h"
57 #include "util/hash_table.h" /* Needed for the hashing functions */
58 #include "main/macros.h" /* for MAX2 */
59
60 namespace {
61
62 class ir_if_to_cond_assign_visitor : public ir_hierarchical_visitor {
63 public:
64 ir_if_to_cond_assign_visitor(gl_shader_stage stage,
65 unsigned max_depth,
66 unsigned min_branch_cost)
67 {
68 this->progress = false;
69 this->stage = stage;
70 this->max_depth = max_depth;
71 this->min_branch_cost = min_branch_cost;
72 this->depth = 0;
73
74 this->condition_variables =
75 _mesa_set_create(NULL, _mesa_hash_pointer,
76 _mesa_key_pointer_equal);
77 }
78
79 ~ir_if_to_cond_assign_visitor()
80 {
81 _mesa_set_destroy(this->condition_variables, NULL);
82 }
83
84 ir_visitor_status visit_enter(ir_if *);
85 ir_visitor_status visit_leave(ir_if *);
86
87 bool found_unsupported_op;
88 bool found_expensive_op;
89 bool is_then;
90 bool progress;
91 gl_shader_stage stage;
92 unsigned then_cost;
93 unsigned else_cost;
94 unsigned min_branch_cost;
95 unsigned max_depth;
96 unsigned depth;
97
98 struct set *condition_variables;
99 };
100
101 } /* anonymous namespace */
102
103 bool
104 lower_if_to_cond_assign(gl_shader_stage stage, exec_list *instructions,
105 unsigned max_depth, unsigned min_branch_cost)
106 {
107 if (max_depth == UINT_MAX)
108 return false;
109
110 ir_if_to_cond_assign_visitor v(stage, max_depth, min_branch_cost);
111
112 visit_list_elements(&v, instructions);
113
114 return v.progress;
115 }
116
117 void
118 check_ir_node(ir_instruction *ir, void *data)
119 {
120 ir_if_to_cond_assign_visitor *v = (ir_if_to_cond_assign_visitor *)data;
121
122 switch (ir->ir_type) {
123 case ir_type_call:
124 case ir_type_discard:
125 case ir_type_loop:
126 case ir_type_loop_jump:
127 case ir_type_return:
128 case ir_type_emit_vertex:
129 case ir_type_end_primitive:
130 case ir_type_barrier:
131 v->found_unsupported_op = true;
132 break;
133
134 case ir_type_dereference_variable: {
135 ir_variable *var = ir->as_dereference_variable()->variable_referenced();
136
137 /* Lowering branches with TCS output accesses breaks many piglit tests,
138 * so don't touch them for now.
139 */
140 if (v->stage == MESA_SHADER_TESS_CTRL &&
141 var->data.mode == ir_var_shader_out)
142 v->found_unsupported_op = true;
143 break;
144 }
145
146 /* SSBO, images, atomic counters are handled by ir_type_call */
147 case ir_type_texture:
148 v->found_expensive_op = true;
149 break;
150
151 case ir_type_expression:
152 case ir_type_dereference_array:
153 case ir_type_dereference_record:
154 if (v->is_then)
155 v->then_cost++;
156 else
157 v->else_cost++;
158 break;
159
160 default:
161 break;
162 }
163 }
164
165 void
166 move_block_to_cond_assign(void *mem_ctx,
167 ir_if *if_ir, ir_rvalue *cond_expr,
168 exec_list *instructions,
169 struct set *set)
170 {
171 foreach_in_list_safe(ir_instruction, ir, instructions) {
172 if (ir->ir_type == ir_type_assignment) {
173 ir_assignment *assign = (ir_assignment *)ir;
174
175 if (_mesa_set_search(set, assign) == NULL) {
176 _mesa_set_add(set, assign);
177
178 /* If the LHS of the assignment is a condition variable that was
179 * previously added, insert an additional assignment of false to
180 * the variable.
181 */
182 const bool assign_to_cv =
183 _mesa_set_search(
184 set, assign->lhs->variable_referenced()) != NULL;
185
186 if (!assign->condition) {
187 if (assign_to_cv) {
188 assign->rhs =
189 new(mem_ctx) ir_expression(ir_binop_logic_and,
190 glsl_type::bool_type,
191 cond_expr->clone(mem_ctx, NULL),
192 assign->rhs);
193 } else {
194 assign->condition = cond_expr->clone(mem_ctx, NULL);
195 }
196 } else {
197 assign->condition =
198 new(mem_ctx) ir_expression(ir_binop_logic_and,
199 glsl_type::bool_type,
200 cond_expr->clone(mem_ctx, NULL),
201 assign->condition);
202 }
203 }
204 }
205
206 /* Now, move from the if block to the block surrounding it. */
207 ir->remove();
208 if_ir->insert_before(ir);
209 }
210 }
211
212 ir_visitor_status
213 ir_if_to_cond_assign_visitor::visit_enter(ir_if *ir)
214 {
215 (void) ir;
216 this->depth++;
217
218 return visit_continue;
219 }
220
221 ir_visitor_status
222 ir_if_to_cond_assign_visitor::visit_leave(ir_if *ir)
223 {
224 bool must_lower = this->depth-- > this->max_depth;
225
226 /* Only flatten when beyond the GPU's maximum supported nesting depth. */
227 if (!must_lower && this->min_branch_cost == 0)
228 return visit_continue;
229
230 this->found_unsupported_op = false;
231 this->found_expensive_op = false;
232 this->then_cost = 0;
233 this->else_cost = 0;
234
235 ir_assignment *assign;
236
237 /* Check that both blocks don't contain anything we can't support. */
238 this->is_then = true;
239 foreach_in_list(ir_instruction, then_ir, &ir->then_instructions) {
240 visit_tree(then_ir, check_ir_node, this);
241 }
242
243 this->is_then = false;
244 foreach_in_list(ir_instruction, else_ir, &ir->else_instructions) {
245 visit_tree(else_ir, check_ir_node, this);
246 }
247
248 if (this->found_unsupported_op)
249 return visit_continue; /* can't handle inner unsupported opcodes */
250
251 /* Skip if the branch cost is high enough or if there's an expensive op. */
252 if (!must_lower &&
253 (this->found_expensive_op ||
254 MAX2(this->then_cost, this->else_cost) >= this->min_branch_cost))
255 return visit_continue;
256
257 void *mem_ctx = ralloc_parent(ir);
258
259 /* Store the condition to a variable. Move all of the instructions from
260 * the then-clause of the if-statement. Use the condition variable as a
261 * condition for all assignments.
262 */
263 ir_variable *const then_var =
264 new(mem_ctx) ir_variable(glsl_type::bool_type,
265 "if_to_cond_assign_then",
266 ir_var_temporary);
267 ir->insert_before(then_var);
268
269 ir_dereference_variable *then_cond =
270 new(mem_ctx) ir_dereference_variable(then_var);
271
272 assign = new(mem_ctx) ir_assignment(then_cond, ir->condition);
273 ir->insert_before(assign);
274
275 move_block_to_cond_assign(mem_ctx, ir, then_cond,
276 &ir->then_instructions,
277 this->condition_variables);
278
279 /* Add the new condition variable to the hash table. This allows us to
280 * find this variable when lowering other (enclosing) if-statements.
281 */
282 _mesa_set_add(this->condition_variables, then_var);
283
284 /* If there are instructions in the else-clause, store the inverse of the
285 * condition to a variable. Move all of the instructions from the
286 * else-clause if the if-statement. Use the (inverse) condition variable
287 * as a condition for all assignments.
288 */
289 if (!ir->else_instructions.is_empty()) {
290 ir_variable *const else_var =
291 new(mem_ctx) ir_variable(glsl_type::bool_type,
292 "if_to_cond_assign_else",
293 ir_var_temporary);
294 ir->insert_before(else_var);
295
296 ir_dereference_variable *else_cond =
297 new(mem_ctx) ir_dereference_variable(else_var);
298
299 ir_rvalue *inverse =
300 new(mem_ctx) ir_expression(ir_unop_logic_not,
301 then_cond->clone(mem_ctx, NULL));
302
303 assign = new(mem_ctx) ir_assignment(else_cond, inverse);
304 ir->insert_before(assign);
305
306 move_block_to_cond_assign(mem_ctx, ir, else_cond,
307 &ir->else_instructions,
308 this->condition_variables);
309
310 /* Add the new condition variable to the hash table. This allows us to
311 * find this variable when lowering other (enclosing) if-statements.
312 */
313 _mesa_set_add(this->condition_variables, else_var);
314 }
315
316 ir->remove();
317
318 this->progress = true;
319
320 return visit_continue;
321 }