2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
29 #include "nir_control_flow.h"
30 #include "nir_search_helpers.h"
33 * Implements a small peephole optimization that looks for
44 * and replaces it with:
52 * where the SSA defs are ALU operations or other cheap instructions (not
53 * texturing, for example).
55 * If the number of ALU operations in the branches is greater than the limit
56 * parameter, then the optimization is skipped. In limit=0 mode, the SSA defs
57 * must only be MOVs which we expect to get copy-propagated away once they're
58 * out of the inner blocks.
62 block_check_for_allowed_instrs(nir_block
*block
, unsigned *count
,
63 bool alu_ok
, bool indirect_load_ok
,
64 bool expensive_alu_ok
)
66 nir_foreach_instr(instr
, block
) {
67 switch (instr
->type
) {
68 case nir_instr_type_intrinsic
: {
69 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
71 switch (intrin
->intrinsic
) {
72 case nir_intrinsic_load_deref
: {
73 nir_deref_instr
*const deref
= nir_src_as_deref(intrin
->src
[0]);
75 switch (deref
->mode
) {
76 case nir_var_shader_in
:
78 /* Don't try to remove flow control around an indirect load
79 * because that flow control may be trying to avoid invalid
82 if (!indirect_load_ok
&& nir_deref_instr_has_indirect(deref
))
93 case nir_intrinsic_load_uniform
:
105 case nir_instr_type_deref
:
106 case nir_instr_type_load_const
:
109 case nir_instr_type_alu
: {
110 nir_alu_instr
*mov
= nir_instr_as_alu(instr
);
111 bool movelike
= false;
138 if (!alu_ok
|| !expensive_alu_ok
)
145 /* It must be a move-like operation. */
152 if (!mov
->dest
.dest
.is_ssa
)
155 const struct nir_block
*const expected_block
= mov
->instr
.block
;
156 const nir_alu_type expected_type
=
157 nir_alu_type_get_base_type(nir_op_infos
[mov
->op
].output_type
);
160 /* If the ALU operation is an fsat or a move-like operation, do
161 * not count it. The expectation is that it will eventually be
162 * merged as a destination modifier or source modifier on some
165 if (mov
->op
!= nir_op_fsat
&& !movelike
)
168 /* Can't handle saturate */
169 if (mov
->dest
.saturate
)
172 /* It cannot have any if-uses */
173 if (!list_is_empty(&mov
->dest
.dest
.ssa
.if_uses
))
176 /* The only uses of this definition must be phis in the successor */
177 nir_foreach_use(use
, &mov
->dest
.dest
.ssa
) {
178 if (use
->parent_instr
->type
!= nir_instr_type_phi
||
179 use
->parent_instr
->block
!= block
->successors
[0])
195 nir_opt_peephole_select_block(nir_block
*block
, nir_shader
*shader
,
196 unsigned limit
, bool indirect_load_ok
,
197 bool expensive_alu_ok
)
199 if (nir_cf_node_is_first(&block
->cf_node
))
202 nir_cf_node
*prev_node
= nir_cf_node_prev(&block
->cf_node
);
203 if (prev_node
->type
!= nir_cf_node_if
)
206 nir_if
*if_stmt
= nir_cf_node_as_if(prev_node
);
208 if (if_stmt
->control
== nir_selection_control_dont_flatten
)
211 nir_block
*then_block
= nir_if_first_then_block(if_stmt
);
212 nir_block
*else_block
= nir_if_first_else_block(if_stmt
);
214 /* We can only have one block in each side ... */
215 if (nir_if_last_then_block(if_stmt
) != then_block
||
216 nir_if_last_else_block(if_stmt
) != else_block
)
219 if (if_stmt
->control
== nir_selection_control_flatten
) {
220 /* Override driver defaults */
221 indirect_load_ok
= true;
222 expensive_alu_ok
= true;
225 /* ... and those blocks must only contain "allowed" instructions. */
227 if (!block_check_for_allowed_instrs(then_block
, &count
, limit
!= 0,
228 indirect_load_ok
, expensive_alu_ok
) ||
229 !block_check_for_allowed_instrs(else_block
, &count
, limit
!= 0,
230 indirect_load_ok
, expensive_alu_ok
))
233 if (count
> limit
&& if_stmt
->control
!= nir_selection_control_flatten
)
236 /* At this point, we know that the previous CFG node is an if-then
237 * statement containing only moves to phi nodes in this block. We can
238 * just remove that entire CF node and replace all of the phi nodes with
242 nir_block
*prev_block
= nir_cf_node_as_block(nir_cf_node_prev(prev_node
));
244 /* First, we move the remaining instructions from the blocks to the
245 * block before. We have already guaranteed that this is safe by
246 * calling block_check_for_allowed_instrs()
248 nir_foreach_instr_safe(instr
, then_block
) {
249 exec_node_remove(&instr
->node
);
250 instr
->block
= prev_block
;
251 exec_list_push_tail(&prev_block
->instr_list
, &instr
->node
);
254 nir_foreach_instr_safe(instr
, else_block
) {
255 exec_node_remove(&instr
->node
);
256 instr
->block
= prev_block
;
257 exec_list_push_tail(&prev_block
->instr_list
, &instr
->node
);
260 nir_foreach_instr_safe(instr
, block
) {
261 if (instr
->type
!= nir_instr_type_phi
)
264 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
265 nir_alu_instr
*sel
= nir_alu_instr_create(shader
, nir_op_bcsel
);
266 nir_src_copy(&sel
->src
[0].src
, &if_stmt
->condition
, sel
);
267 /* Splat the condition to all channels */
268 memset(sel
->src
[0].swizzle
, 0, sizeof sel
->src
[0].swizzle
);
270 assert(exec_list_length(&phi
->srcs
) == 2);
271 nir_foreach_phi_src(src
, phi
) {
272 assert(src
->pred
== then_block
|| src
->pred
== else_block
);
273 assert(src
->src
.is_ssa
);
275 unsigned idx
= src
->pred
== then_block
? 1 : 2;
276 nir_src_copy(&sel
->src
[idx
].src
, &src
->src
, sel
);
279 nir_ssa_dest_init(&sel
->instr
, &sel
->dest
.dest
,
280 phi
->dest
.ssa
.num_components
,
281 phi
->dest
.ssa
.bit_size
, phi
->dest
.ssa
.name
);
282 sel
->dest
.write_mask
= (1 << phi
->dest
.ssa
.num_components
) - 1;
284 nir_ssa_def_rewrite_uses(&phi
->dest
.ssa
,
285 nir_src_for_ssa(&sel
->dest
.dest
.ssa
));
287 nir_instr_insert_before(&phi
->instr
, &sel
->instr
);
288 nir_instr_remove(&phi
->instr
);
291 nir_cf_node_remove(&if_stmt
->cf_node
);
296 nir_opt_peephole_select_impl(nir_function_impl
*impl
, unsigned limit
,
297 bool indirect_load_ok
, bool expensive_alu_ok
)
299 nir_shader
*shader
= impl
->function
->shader
;
300 bool progress
= false;
302 nir_foreach_block_safe(block
, impl
) {
303 progress
|= nir_opt_peephole_select_block(block
, shader
, limit
,
309 nir_metadata_preserve(impl
, nir_metadata_none
);
312 impl
->valid_metadata
&= ~nir_metadata_not_properly_reset
;
320 nir_opt_peephole_select(nir_shader
*shader
, unsigned limit
,
321 bool indirect_load_ok
, bool expensive_alu_ok
)
323 bool progress
= false;
325 nir_foreach_function(function
, shader
) {
327 progress
|= nir_opt_peephole_select_impl(function
->impl
, limit
,