2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a pass that lowers vector phi nodes to scalar phi nodes when
32 * we don't think it will hurt anything.
35 struct lower_phis_to_scalar_state
{
39 /* Hash table marking which phi nodes are scalarizable. The key is
40 * pointers to phi instructions and the entry is either NULL for not
41 * scalarizable or non-null for scalarizable.
43 struct hash_table
*phi_table
;
47 should_lower_phi(nir_phi_instr
*phi
, struct lower_phis_to_scalar_state
*state
);
50 is_phi_src_scalarizable(nir_phi_src
*src
,
51 struct lower_phis_to_scalar_state
*state
)
53 /* Don't know what to do with non-ssa sources */
57 nir_instr
*src_instr
= src
->src
.ssa
->parent_instr
;
58 switch (src_instr
->type
) {
59 case nir_instr_type_alu
: {
60 nir_alu_instr
*src_alu
= nir_instr_as_alu(src_instr
);
62 /* ALU operations with output_size == 0 should be scalarized. We
63 * will also see a bunch of vecN operations from scalarizing ALU
64 * operations and, since they can easily be copy-propagated, they
67 return nir_op_infos
[src_alu
->op
].output_size
== 0 ||
68 nir_op_is_vec(src_alu
->op
);
71 case nir_instr_type_phi
:
72 /* A phi is scalarizable if we're going to lower it */
73 return should_lower_phi(nir_instr_as_phi(src_instr
), state
);
75 case nir_instr_type_load_const
:
76 /* These are trivially scalarizable */
79 case nir_instr_type_ssa_undef
:
80 /* The caller of this function is going to OR the results and we don't
81 * want undefs to count so we return false.
85 case nir_instr_type_intrinsic
: {
86 nir_intrinsic_instr
*src_intrin
= nir_instr_as_intrinsic(src_instr
);
88 switch (src_intrin
->intrinsic
) {
89 case nir_intrinsic_load_deref
: {
90 nir_deref_instr
*deref
= nir_src_as_deref(src_intrin
->src
[0]);
91 return deref
->mode
== nir_var_shader_in
||
92 deref
->mode
== nir_var_uniform
||
93 deref
->mode
== nir_var_mem_ubo
||
94 deref
->mode
== nir_var_mem_ssbo
||
95 deref
->mode
== nir_var_mem_global
;
98 case nir_intrinsic_interp_deref_at_centroid
:
99 case nir_intrinsic_interp_deref_at_sample
:
100 case nir_intrinsic_interp_deref_at_offset
:
101 case nir_intrinsic_interp_deref_at_vertex
:
102 case nir_intrinsic_load_uniform
:
103 case nir_intrinsic_load_ubo
:
104 case nir_intrinsic_load_ssbo
:
105 case nir_intrinsic_load_global
:
106 case nir_intrinsic_load_global_constant
:
107 case nir_intrinsic_load_input
:
116 /* We can't scalarize this type of instruction */
122 * Determines if the given phi node should be lowered. The only phi nodes
123 * we will scalarize at the moment are those where all of the sources are
126 * The reason for this comes down to coalescing. Since phi sources can't
127 * swizzle, swizzles on phis have to be resolved by inserting a mov right
128 * before the phi. The choice then becomes between movs to pick off
129 * components for a scalar phi or potentially movs to recombine components
130 * for a vector phi. The problem is that the movs generated to pick off
131 * the components are almost uncoalescable. We can't coalesce them in NIR
132 * because we need them to pick off components and we can't coalesce them
133 * in the backend because the source register is a vector and the
134 * destination is a scalar that may be used at other places in the program.
135 * On the other hand, if we have a bunch of scalars going into a vector
136 * phi, the situation is much better. In this case, if the SSA def is
137 * generated in the predecessor block to the corresponding phi source, the
138 * backend code will be an ALU op into a temporary and then a mov into the
139 * given vector component; this move can almost certainly be coalesced
143 should_lower_phi(nir_phi_instr
*phi
, struct lower_phis_to_scalar_state
*state
)
146 if (phi
->dest
.ssa
.num_components
== 1)
149 struct hash_entry
*entry
= _mesa_hash_table_search(state
->phi_table
, phi
);
151 return entry
->data
!= NULL
;
153 /* Insert an entry and mark it as scalarizable for now. That way
154 * we don't recurse forever and a cycle in the dependence graph
155 * won't automatically make us fail to scalarize.
157 entry
= _mesa_hash_table_insert(state
->phi_table
, phi
, (void *)(intptr_t)1);
159 bool scalarizable
= false;
161 nir_foreach_phi_src(src
, phi
) {
162 /* This loop ignores srcs that are not scalarizable because its likely
163 * still worth copying to temps if another phi source is scalarizable.
164 * This reduces register spilling by a huge amount in the i965 driver for
167 scalarizable
= is_phi_src_scalarizable(src
, state
);
172 /* The hash table entry for 'phi' may have changed while recursing the
173 * dependence graph, so we need to reset it */
174 entry
= _mesa_hash_table_search(state
->phi_table
, phi
);
177 entry
->data
= (void *)(intptr_t)scalarizable
;
183 lower_phis_to_scalar_block(nir_block
*block
,
184 struct lower_phis_to_scalar_state
*state
)
186 bool progress
= false;
188 /* Find the last phi node in the block */
189 nir_phi_instr
*last_phi
= NULL
;
190 nir_foreach_instr(instr
, block
) {
191 if (instr
->type
!= nir_instr_type_phi
)
194 last_phi
= nir_instr_as_phi(instr
);
197 /* We have to handle the phi nodes in their own pass due to the way
198 * we're modifying the linked list of instructions.
200 nir_foreach_instr_safe(instr
, block
) {
201 if (instr
->type
!= nir_instr_type_phi
)
204 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
206 if (!should_lower_phi(phi
, state
))
209 unsigned bit_size
= phi
->dest
.ssa
.bit_size
;
211 /* Create a vecN operation to combine the results. Most of these
212 * will be redundant, but copy propagation should clean them up for
213 * us. No need to add the complexity here.
215 nir_op vec_op
= nir_op_vec(phi
->dest
.ssa
.num_components
);
217 nir_alu_instr
*vec
= nir_alu_instr_create(state
->mem_ctx
, vec_op
);
218 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
,
219 phi
->dest
.ssa
.num_components
,
221 vec
->dest
.write_mask
= (1 << phi
->dest
.ssa
.num_components
) - 1;
223 for (unsigned i
= 0; i
< phi
->dest
.ssa
.num_components
; i
++) {
224 nir_phi_instr
*new_phi
= nir_phi_instr_create(state
->mem_ctx
);
225 nir_ssa_dest_init(&new_phi
->instr
, &new_phi
->dest
, 1,
226 phi
->dest
.ssa
.bit_size
, NULL
);
228 vec
->src
[i
].src
= nir_src_for_ssa(&new_phi
->dest
.ssa
);
230 nir_foreach_phi_src(src
, phi
) {
231 /* We need to insert a mov to grab the i'th component of src */
232 nir_alu_instr
*mov
= nir_alu_instr_create(state
->mem_ctx
,
234 nir_ssa_dest_init(&mov
->instr
, &mov
->dest
.dest
, 1, bit_size
, NULL
);
235 mov
->dest
.write_mask
= 1;
236 nir_src_copy(&mov
->src
[0].src
, &src
->src
, state
->mem_ctx
);
237 mov
->src
[0].swizzle
[0] = i
;
239 /* Insert at the end of the predecessor but before the jump */
240 nir_instr
*pred_last_instr
= nir_block_last_instr(src
->pred
);
241 if (pred_last_instr
&& pred_last_instr
->type
== nir_instr_type_jump
)
242 nir_instr_insert_before(pred_last_instr
, &mov
->instr
);
244 nir_instr_insert_after_block(src
->pred
, &mov
->instr
);
246 nir_phi_src
*new_src
= ralloc(new_phi
, nir_phi_src
);
247 new_src
->pred
= src
->pred
;
248 new_src
->src
= nir_src_for_ssa(&mov
->dest
.dest
.ssa
);
250 exec_list_push_tail(&new_phi
->srcs
, &new_src
->node
);
253 nir_instr_insert_before(&phi
->instr
, &new_phi
->instr
);
256 nir_instr_insert_after(&last_phi
->instr
, &vec
->instr
);
258 nir_ssa_def_rewrite_uses(&phi
->dest
.ssa
,
259 nir_src_for_ssa(&vec
->dest
.dest
.ssa
));
261 ralloc_steal(state
->dead_ctx
, phi
);
262 nir_instr_remove(&phi
->instr
);
266 /* We're using the safe iterator and inserting all the newly
267 * scalarized phi nodes before their non-scalarized version so that's
268 * ok. However, we are also inserting vec operations after all of
269 * the last phi node so once we get here, we can't trust even the
270 * safe iterator to stop properly. We have to break manually.
272 if (instr
== &last_phi
->instr
)
280 lower_phis_to_scalar_impl(nir_function_impl
*impl
)
282 struct lower_phis_to_scalar_state state
;
283 bool progress
= false;
285 state
.mem_ctx
= ralloc_parent(impl
);
286 state
.dead_ctx
= ralloc_context(NULL
);
287 state
.phi_table
= _mesa_pointer_hash_table_create(state
.dead_ctx
);
289 nir_foreach_block(block
, impl
) {
290 progress
= lower_phis_to_scalar_block(block
, &state
) || progress
;
293 nir_metadata_preserve(impl
, nir_metadata_block_index
|
294 nir_metadata_dominance
);
296 ralloc_free(state
.dead_ctx
);
300 /** A pass that lowers vector phi nodes to scalar
302 * This pass loops through the blocks and lowers looks for vector phi nodes
303 * it can lower to scalar phi nodes. Not all phi nodes are lowered. For
304 * instance, if one of the sources is a non-scalarizable vector, then we
305 * don't bother lowering because that would generate hard-to-coalesce movs.
308 nir_lower_phis_to_scalar(nir_shader
*shader
)
310 bool progress
= false;
312 nir_foreach_function(function
, shader
) {
314 progress
= lower_phis_to_scalar_impl(function
->impl
) || progress
;