2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a simple pass that lowers vecN instructions to a series of
32 * moves with partial writes.
36 src_matches_dest_reg(nir_dest
*dest
, nir_src
*src
)
38 if (dest
->is_ssa
|| src
->is_ssa
)
41 return (dest
->reg
.reg
== src
->reg
.reg
&&
42 dest
->reg
.base_offset
== src
->reg
.base_offset
&&
43 !dest
->reg
.indirect
&&
48 * For a given starting writemask channel and corresponding source index in
49 * the vec instruction, insert a MOV to the vec instruction's dest of all the
50 * writemask channels that get read from the same src reg.
52 * Returns the writemask of our MOV, so the parent loop calling this knows
53 * which ones have been processed.
56 insert_mov(nir_alu_instr
*vec
, unsigned start_idx
, nir_shader
*shader
)
58 assert(start_idx
< nir_op_infos
[vec
->op
].num_inputs
);
60 nir_alu_instr
*mov
= nir_alu_instr_create(shader
, nir_op_mov
);
61 nir_alu_src_copy(&mov
->src
[0], &vec
->src
[start_idx
], mov
);
62 nir_alu_dest_copy(&mov
->dest
, &vec
->dest
, mov
);
64 mov
->dest
.write_mask
= (1u << start_idx
);
65 mov
->src
[0].swizzle
[start_idx
] = vec
->src
[start_idx
].swizzle
[0];
66 mov
->src
[0].negate
= vec
->src
[start_idx
].negate
;
67 mov
->src
[0].abs
= vec
->src
[start_idx
].abs
;
69 for (unsigned i
= start_idx
+ 1; i
< 4; i
++) {
70 if (!(vec
->dest
.write_mask
& (1 << i
)))
73 if (nir_srcs_equal(vec
->src
[i
].src
, vec
->src
[start_idx
].src
) &&
74 vec
->src
[i
].negate
== vec
->src
[start_idx
].negate
&&
75 vec
->src
[i
].abs
== vec
->src
[start_idx
].abs
) {
76 mov
->dest
.write_mask
|= (1 << i
);
77 mov
->src
[0].swizzle
[i
] = vec
->src
[i
].swizzle
[0];
81 unsigned channels_handled
= mov
->dest
.write_mask
;
83 /* In some situations (if the vecN is involved in a phi-web), we can end
84 * up with a mov from a register to itself. Some of those channels may end
85 * up doing nothing and there's no reason to have them as part of the mov.
87 if (src_matches_dest_reg(&mov
->dest
.dest
, &mov
->src
[0].src
) &&
88 !mov
->src
[0].abs
&& !mov
->src
[0].negate
) {
89 for (unsigned i
= 0; i
< 4; i
++) {
90 if (mov
->src
[0].swizzle
[i
] == i
) {
91 mov
->dest
.write_mask
&= ~(1 << i
);
96 /* Only emit the instruction if it actually does something */
97 if (mov
->dest
.write_mask
) {
98 nir_instr_insert_before(&vec
->instr
, &mov
->instr
);
103 return channels_handled
;
107 has_replicated_dest(nir_alu_instr
*alu
)
109 return alu
->op
== nir_op_fdot_replicated2
||
110 alu
->op
== nir_op_fdot_replicated3
||
111 alu
->op
== nir_op_fdot_replicated4
||
112 alu
->op
== nir_op_fdph_replicated
;
115 /* Attempts to coalesce the "move" from the given source of the vec to the
116 * destination of the instruction generating the value. If, for whatever
117 * reason, we cannot coalesce the mmove, it does nothing and returns 0. We
118 * can then call insert_mov as normal.
121 try_coalesce(nir_alu_instr
*vec
, unsigned start_idx
)
123 assert(start_idx
< nir_op_infos
[vec
->op
].num_inputs
);
125 /* We will only even try if the source is SSA */
126 if (!vec
->src
[start_idx
].src
.is_ssa
)
129 assert(vec
->src
[start_idx
].src
.ssa
);
131 /* If we are going to do a reswizzle, then the vecN operation must be the
132 * only use of the source value. We also can't have any source modifiers.
134 nir_foreach_use(src
, vec
->src
[start_idx
].src
.ssa
) {
135 if (src
->parent_instr
!= &vec
->instr
)
138 nir_alu_src
*alu_src
= exec_node_data(nir_alu_src
, src
, src
);
139 if (alu_src
->abs
|| alu_src
->negate
)
143 if (!list_is_empty(&vec
->src
[start_idx
].src
.ssa
->if_uses
))
146 if (vec
->src
[start_idx
].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
149 nir_alu_instr
*src_alu
=
150 nir_instr_as_alu(vec
->src
[start_idx
].src
.ssa
->parent_instr
);
152 if (has_replicated_dest(src_alu
)) {
153 /* The fdot instruction is special: It replicates its result to all
154 * components. This means that we can always rewrite its destination
155 * and we don't need to swizzle anything.
158 /* We only care about being able to re-swizzle the instruction if it is
159 * something that we can reswizzle. It must be per-component. The one
160 * exception to this is the fdotN instructions which implicitly splat
161 * their result out to all channels.
163 if (nir_op_infos
[src_alu
->op
].output_size
!= 0)
166 /* If we are going to reswizzle the instruction, we can't have any
167 * non-per-component sources either.
169 for (unsigned j
= 0; j
< nir_op_infos
[src_alu
->op
].num_inputs
; j
++)
170 if (nir_op_infos
[src_alu
->op
].input_sizes
[j
] != 0)
174 /* Stash off all of the ALU instruction's swizzles. */
175 uint8_t swizzles
[4][4];
176 for (unsigned j
= 0; j
< nir_op_infos
[src_alu
->op
].num_inputs
; j
++)
177 for (unsigned i
= 0; i
< 4; i
++)
178 swizzles
[j
][i
] = src_alu
->src
[j
].swizzle
[i
];
180 unsigned write_mask
= 0;
181 for (unsigned i
= start_idx
; i
< 4; i
++) {
182 if (!(vec
->dest
.write_mask
& (1 << i
)))
185 if (!vec
->src
[i
].src
.is_ssa
||
186 vec
->src
[i
].src
.ssa
!= &src_alu
->dest
.dest
.ssa
)
189 /* At this point, the give vec source matchese up with the ALU
190 * instruction so we can re-swizzle that component to match.
192 write_mask
|= 1 << i
;
193 if (has_replicated_dest(src_alu
)) {
194 /* Since the destination is a single replicated value, we don't need
195 * to do any reswizzling
198 for (unsigned j
= 0; j
< nir_op_infos
[src_alu
->op
].num_inputs
; j
++)
199 src_alu
->src
[j
].swizzle
[i
] = swizzles
[j
][vec
->src
[i
].swizzle
[0]];
202 /* Clear the no longer needed vec source */
203 nir_instr_rewrite_src(&vec
->instr
, &vec
->src
[i
].src
, NIR_SRC_INIT
);
206 nir_instr_rewrite_dest(&src_alu
->instr
, &src_alu
->dest
.dest
, vec
->dest
.dest
);
207 src_alu
->dest
.write_mask
= write_mask
;
213 lower_vec_to_movs_block(nir_block
*block
, nir_function_impl
*impl
)
215 bool progress
= false;
216 nir_shader
*shader
= impl
->function
->shader
;
218 nir_foreach_instr_safe(instr
, block
) {
219 if (instr
->type
!= nir_instr_type_alu
)
222 nir_alu_instr
*vec
= nir_instr_as_alu(instr
);
230 continue; /* The loop */
233 bool vec_had_ssa_dest
= vec
->dest
.dest
.is_ssa
;
234 if (vec
->dest
.dest
.is_ssa
) {
235 /* Since we insert multiple MOVs, we have a register destination. */
236 nir_register
*reg
= nir_local_reg_create(impl
);
237 reg
->num_components
= vec
->dest
.dest
.ssa
.num_components
;
238 reg
->bit_size
= vec
->dest
.dest
.ssa
.bit_size
;
240 nir_ssa_def_rewrite_uses(&vec
->dest
.dest
.ssa
, nir_src_for_reg(reg
));
242 nir_instr_rewrite_dest(&vec
->instr
, &vec
->dest
.dest
,
243 nir_dest_for_reg(reg
));
246 unsigned finished_write_mask
= 0;
248 /* First, emit a MOV for all the src channels that are in the
249 * destination reg, in case other values we're populating in the dest
250 * might overwrite them.
252 for (unsigned i
= 0; i
< 4; i
++) {
253 if (!(vec
->dest
.write_mask
& (1 << i
)))
256 if (src_matches_dest_reg(&vec
->dest
.dest
, &vec
->src
[i
].src
)) {
257 finished_write_mask
|= insert_mov(vec
, i
, shader
);
262 /* Now, emit MOVs for all the other src channels. */
263 for (unsigned i
= 0; i
< 4; i
++) {
264 if (!(vec
->dest
.write_mask
& (1 << i
)))
267 /* Coalescing moves the register writes from the vec up to the ALU
268 * instruction in the source. We can only do this if the original
269 * vecN had an SSA destination.
271 if (vec_had_ssa_dest
&& !(finished_write_mask
& (1 << i
)))
272 finished_write_mask
|= try_coalesce(vec
, i
);
274 if (!(finished_write_mask
& (1 << i
)))
275 finished_write_mask
|= insert_mov(vec
, i
, shader
);
278 nir_instr_remove(&vec
->instr
);
287 nir_lower_vec_to_movs_impl(nir_function_impl
*impl
)
289 bool progress
= false;
291 nir_foreach_block(block
, impl
) {
292 progress
|= lower_vec_to_movs_block(block
, impl
);
296 nir_metadata_preserve(impl
, nir_metadata_block_index
|
297 nir_metadata_dominance
);
299 nir_metadata_preserve(impl
, nir_metadata_all
);
306 nir_lower_vec_to_movs(nir_shader
*shader
)
308 bool progress
= false;
310 nir_foreach_function(function
, shader
) {
312 progress
= nir_lower_vec_to_movs_impl(function
->impl
) || progress
;