2 * Copyright © 2014 Intel Corporation
3 * Copyright © 2015 Red Hat
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Jason Ekstrand (jason@jlekstrand.net)
26 * Rob Clark (robclark@freedesktop.org)
31 #include "compiler/nir/nir_builder.h"
32 #include "compiler/nir/nir_control_flow.h"
34 /* Based on nir_opt_peephole_select, and hacked up to more aggressively
35 * flatten anything that can be flattened
37 * This *might* be something that other drivers could use. On the other
38 * hand, I think most other hw has predicated instructions or similar
39 * to select which side of if/else writes back result (and therefore
40 * not having to assign unique registers to both sides of the if/else.
41 * (And hopefully those drivers don't also have crazy scheduling reqs
42 * and can more easily do this in their backend.)
44 * TODO eventually when we have proper flow control in the backend:
46 * + Probably weight differently normal ALUs vs SFUs (cos/rcp/exp)
47 * since executing extra SFUs for the branch-not-taken path will
48 * generally be much more expensive.
50 * Possibly what constitutes an ALU vs SFU differs between hw
51 * backends.. but that seems doubtful.
53 * + Account for texture fetch and memory accesses (incl UBOs)
54 * since these will be more expensive..
56 * + When if-condition is const (or uniform) or we have some way
57 * to know that all threads in the warp take the same branch
58 * then we should prefer to not flatten the if/else..
68 valid_dest(nir_block
*block
, nir_dest
*dest
)
74 /* We only lower blocks that do not contain other blocks
75 * (so this is run iteratively in a loop). Therefore if
76 * we get this far, it should not have any if_uses:
78 assert(list_empty(&dest
->ssa
.if_uses
));
80 /* The only uses of this definition must be phi's in the
81 * successor or in the current block
83 nir_foreach_use(&dest
->ssa
, use
) {
84 nir_instr
*dest_instr
= use
->parent_instr
;
85 if (dest_instr
->block
== block
)
87 if ((dest_instr
->type
== nir_instr_type_phi
) &&
88 (dest_instr
->block
== block
->successors
[0]))
97 block_check_for_allowed_instrs(nir_block
*block
)
99 nir_foreach_instr(block
, instr
) {
100 switch (instr
->type
) {
101 case nir_instr_type_intrinsic
: {
102 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
103 const nir_intrinsic_info
*info
=
104 &nir_intrinsic_infos
[intr
->intrinsic
];
106 switch (intr
->intrinsic
) {
107 case nir_intrinsic_discard_if
:
108 /* to simplify things, we want discard_if src in ssa: */
109 if (!intr
->src
[0].is_ssa
)
112 case nir_intrinsic_discard
:
113 /* discard/discard_if can be reordered, but only
114 * with some special care
117 case nir_intrinsic_store_output
:
118 /* TODO technically, if both if and else store
119 * the same output, we can hoist that out to
120 * the end of the block w/ a phi..
121 * In practice, the tgsi shaders we already get
122 * do this for us, so I think we don't need to
125 if (!(info
->flags
& NIR_INTRINSIC_CAN_REORDER
))
132 case nir_instr_type_tex
: {
133 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
134 if (!valid_dest(block
, &tex
->dest
))
138 case nir_instr_type_phi
: {
139 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
140 if (!valid_dest(block
, &phi
->dest
))
144 case nir_instr_type_alu
: {
145 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
146 if (!valid_dest(block
, &alu
->dest
.dest
))
151 case nir_instr_type_load_const
:
152 case nir_instr_type_ssa_undef
:
153 break; /* always ssa dest */
163 /* flatten an then or else block: */
165 flatten_block(nir_builder
*bld
, nir_block
*if_block
, nir_block
*prev_block
,
166 nir_ssa_def
*condition
, bool invert
)
168 nir_foreach_instr_safe(if_block
, instr
) {
169 if (instr
->type
== nir_instr_type_intrinsic
) {
170 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
171 if ((intr
->intrinsic
== nir_intrinsic_discard
) ||
172 (intr
->intrinsic
== nir_intrinsic_discard_if
)) {
173 nir_ssa_def
*discard_cond
;
175 bld
->cursor
= nir_after_instr(
176 nir_block_last_instr(prev_block
));
179 condition
= nir_inot(bld
, condition
);
183 if (intr
->intrinsic
== nir_intrinsic_discard
) {
184 discard_cond
= condition
;
186 assert(intr
->src
[0].is_ssa
);
187 /* discard_if gets re-written w/ src and'd: */
188 discard_cond
= nir_iand(bld
, condition
, intr
->src
[0].ssa
);
191 nir_intrinsic_instr
*discard_if
=
192 nir_intrinsic_instr_create(bld
->shader
,
193 nir_intrinsic_discard_if
);
194 discard_if
->src
[0] = nir_src_for_ssa(discard_cond
);
196 nir_instr_insert_after(nir_block_last_instr(prev_block
),
198 nir_instr_remove(instr
);
202 /* if not an handled specially, just move to prev block: */
204 /* NOTE: exec_node_remove() is safe here (vs nir_instr_remove()
205 * since we are re-adding the instructin back in to the prev
206 * block (so no dangling SSA uses)
208 exec_node_remove(&instr
->node
);
209 instr
->block
= prev_block
;
210 exec_list_push_tail(&prev_block
->instr_list
, &instr
->node
);
216 lower_if_else_block(nir_block
*block
, void *void_state
)
218 struct lower_state
*state
= void_state
;
220 /* If the block is empty, then it certainly doesn't have any phi nodes,
221 * so we can skip it. This also ensures that we do an early skip on the
222 * end block of the function which isn't actually attached to the CFG.
224 if (exec_list_is_empty(&block
->instr_list
))
227 if (nir_cf_node_is_first(&block
->cf_node
))
230 nir_cf_node
*prev_node
= nir_cf_node_prev(&block
->cf_node
);
231 if (prev_node
->type
!= nir_cf_node_if
)
234 nir_if
*if_stmt
= nir_cf_node_as_if(prev_node
);
235 nir_cf_node
*then_node
= nir_if_first_then_node(if_stmt
);
236 nir_cf_node
*else_node
= nir_if_first_else_node(if_stmt
);
238 /* We can only have one block in each side ... */
239 if (nir_if_last_then_node(if_stmt
) != then_node
||
240 nir_if_last_else_node(if_stmt
) != else_node
)
243 nir_block
*then_block
= nir_cf_node_as_block(then_node
);
244 nir_block
*else_block
= nir_cf_node_as_block(else_node
);
246 /* ... and those blocks must only contain "allowed" instructions. */
247 if (!block_check_for_allowed_instrs(then_block
) ||
248 !block_check_for_allowed_instrs(else_block
))
251 /* condition should be ssa too, which simplifies flatten_block: */
252 if (!if_stmt
->condition
.is_ssa
)
255 /* At this point, we know that the previous CFG node is an if-then
256 * statement containing only moves to phi nodes in this block. We can
257 * just remove that entire CF node and replace all of the phi nodes with
261 nir_block
*prev_block
= nir_cf_node_as_block(nir_cf_node_prev(prev_node
));
262 assert(prev_block
->cf_node
.type
== nir_cf_node_block
);
264 /* First, we move the remaining instructions from the blocks to the
265 * block before. There are a few things that need handling specially
266 * like discard/discard_if.
268 flatten_block(&state
->b
, then_block
, prev_block
,
269 if_stmt
->condition
.ssa
, false);
270 flatten_block(&state
->b
, else_block
, prev_block
,
271 if_stmt
->condition
.ssa
, true);
273 nir_foreach_instr_safe(block
, instr
) {
274 if (instr
->type
!= nir_instr_type_phi
)
277 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
278 nir_alu_instr
*sel
= nir_alu_instr_create(state
->mem_ctx
, nir_op_bcsel
);
279 nir_src_copy(&sel
->src
[0].src
, &if_stmt
->condition
, state
->mem_ctx
);
280 /* Splat the condition to all channels */
281 memset(sel
->src
[0].swizzle
, 0, sizeof sel
->src
[0].swizzle
);
283 assert(exec_list_length(&phi
->srcs
) == 2);
284 nir_foreach_phi_src(phi
, src
) {
285 assert(src
->pred
== then_block
|| src
->pred
== else_block
);
286 assert(src
->src
.is_ssa
);
288 unsigned idx
= src
->pred
== then_block
? 1 : 2;
289 nir_src_copy(&sel
->src
[idx
].src
, &src
->src
, state
->mem_ctx
);
292 nir_ssa_dest_init(&sel
->instr
, &sel
->dest
.dest
,
293 phi
->dest
.ssa
.num_components
, 32, phi
->dest
.ssa
.name
);
294 sel
->dest
.write_mask
= (1 << phi
->dest
.ssa
.num_components
) - 1;
296 nir_ssa_def_rewrite_uses(&phi
->dest
.ssa
,
297 nir_src_for_ssa(&sel
->dest
.dest
.ssa
));
299 nir_instr_insert_before(&phi
->instr
, &sel
->instr
);
300 nir_instr_remove(&phi
->instr
);
303 nir_cf_node_remove(&if_stmt
->cf_node
);
304 state
->progress
= true;
310 lower_if_else_impl(nir_function_impl
*impl
)
312 struct lower_state state
;
314 state
.mem_ctx
= ralloc_parent(impl
);
315 state
.progress
= false;
316 nir_builder_init(&state
.b
, impl
);
318 nir_foreach_block(impl
, lower_if_else_block
, &state
);
321 nir_metadata_preserve(impl
, nir_metadata_none
);
323 return state
.progress
;
327 ir3_nir_lower_if_else(nir_shader
*shader
)
329 bool progress
= false;
331 nir_foreach_function(shader
, function
) {
333 progress
|= lower_if_else_impl(function
->impl
);