2cb0457f9ba6ee8a8be3caeaa56c00c29309c613
2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a simple pass that lowers vecN instructions to a series of
32 * moves with partial writes.
36 src_matches_dest_reg(nir_dest
*dest
, nir_src
*src
)
38 if (dest
->is_ssa
|| src
->is_ssa
)
41 return (dest
->reg
.reg
== src
->reg
.reg
&&
42 dest
->reg
.base_offset
== src
->reg
.base_offset
&&
43 !dest
->reg
.indirect
&&
48 * For a given starting writemask channel and corresponding source index in
49 * the vec instruction, insert a MOV to the vec instruction's dest of all the
50 * writemask channels that get read from the same src reg.
52 * Returns the writemask of our MOV, so the parent loop calling this knows
53 * which ones have been processed.
56 insert_mov(nir_alu_instr
*vec
, unsigned start_idx
, nir_shader
*shader
)
58 assert(start_idx
< nir_op_infos
[vec
->op
].num_inputs
);
60 nir_alu_instr
*mov
= nir_alu_instr_create(shader
, nir_op_imov
);
61 nir_alu_src_copy(&mov
->src
[0], &vec
->src
[start_idx
], mov
);
62 nir_alu_dest_copy(&mov
->dest
, &vec
->dest
, mov
);
64 mov
->dest
.write_mask
= (1u << start_idx
);
65 mov
->src
[0].swizzle
[start_idx
] = vec
->src
[start_idx
].swizzle
[0];
67 for (unsigned i
= start_idx
+ 1; i
< 4; i
++) {
68 if (!(vec
->dest
.write_mask
& (1 << i
)))
71 if (nir_srcs_equal(vec
->src
[i
].src
, vec
->src
[start_idx
].src
)) {
72 mov
->dest
.write_mask
|= (1 << i
);
73 mov
->src
[0].swizzle
[i
] = vec
->src
[i
].swizzle
[0];
77 nir_instr_insert_before(&vec
->instr
, &mov
->instr
);
79 return mov
->dest
.write_mask
;
83 has_replicated_dest(nir_alu_instr
*alu
)
85 return alu
->op
== nir_op_fdot_replicated2
||
86 alu
->op
== nir_op_fdot_replicated3
||
87 alu
->op
== nir_op_fdot_replicated4
;
90 /* Attempts to coalesce the "move" from the given source of the vec to the
91 * destination of the instruction generating the value. If, for whatever
92 * reason, we cannot coalesce the mmove, it does nothing and returns 0. We
93 * can then call insert_mov as normal.
96 try_coalesce(nir_alu_instr
*vec
, unsigned start_idx
, nir_shader
*shader
)
98 assert(start_idx
< nir_op_infos
[vec
->op
].num_inputs
);
100 /* We will only even try if the source is SSA */
101 if (!vec
->src
[start_idx
].src
.is_ssa
)
104 assert(vec
->src
[start_idx
].src
.ssa
);
106 /* If we are going to do a reswizzle, then the vecN operation must be the
107 * only use of the source value. We also can't have any source modifiers.
109 nir_foreach_use(vec
->src
[start_idx
].src
.ssa
, src
) {
110 if (src
->parent_instr
!= &vec
->instr
)
113 nir_alu_src
*alu_src
= exec_node_data(nir_alu_src
, src
, src
);
114 if (alu_src
->abs
|| alu_src
->negate
)
118 if (!list_empty(&vec
->src
[start_idx
].src
.ssa
->if_uses
))
121 if (vec
->src
[start_idx
].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
124 nir_alu_instr
*src_alu
=
125 nir_instr_as_alu(vec
->src
[start_idx
].src
.ssa
->parent_instr
);
127 if (has_replicated_dest(src_alu
)) {
128 /* The fdot instruction is special: It replicates its result to all
129 * components. This means that we can always rewrite its destination
130 * and we don't need to swizzle anything.
133 /* We only care about being able to re-swizzle the instruction if it is
134 * something that we can reswizzle. It must be per-component. The one
135 * exception to this is the fdotN instructions which implicitly splat
136 * their result out to all channels.
138 if (nir_op_infos
[src_alu
->op
].output_size
!= 0)
141 /* If we are going to reswizzle the instruction, we can't have any
142 * non-per-component sources either.
144 for (unsigned j
= 0; j
< nir_op_infos
[src_alu
->op
].num_inputs
; j
++)
145 if (nir_op_infos
[src_alu
->op
].input_sizes
[j
] != 0)
149 /* Stash off all of the ALU instruction's swizzles. */
150 uint8_t swizzles
[4][4];
151 for (unsigned j
= 0; j
< nir_op_infos
[src_alu
->op
].num_inputs
; j
++)
152 for (unsigned i
= 0; i
< 4; i
++)
153 swizzles
[j
][i
] = src_alu
->src
[j
].swizzle
[i
];
155 unsigned write_mask
= 0;
156 for (unsigned i
= start_idx
; i
< 4; i
++) {
157 if (!(vec
->dest
.write_mask
& (1 << i
)))
160 if (!vec
->src
[i
].src
.is_ssa
||
161 vec
->src
[i
].src
.ssa
!= &src_alu
->dest
.dest
.ssa
)
164 /* At this point, the give vec source matchese up with the ALU
165 * instruction so we can re-swizzle that component to match.
167 write_mask
|= 1 << i
;
168 if (has_replicated_dest(src_alu
)) {
169 /* Since the destination is a single replicated value, we don't need
170 * to do any reswizzling
173 for (unsigned j
= 0; j
< nir_op_infos
[src_alu
->op
].num_inputs
; j
++)
174 src_alu
->src
[j
].swizzle
[i
] = swizzles
[j
][vec
->src
[i
].swizzle
[0]];
177 /* Clear the no longer needed vec source */
178 nir_instr_rewrite_src(&vec
->instr
, &vec
->src
[i
].src
, NIR_SRC_INIT
);
181 nir_instr_rewrite_dest(&src_alu
->instr
, &src_alu
->dest
.dest
, vec
->dest
.dest
);
182 src_alu
->dest
.write_mask
= write_mask
;
188 lower_vec_to_movs_block(nir_block
*block
, void *void_impl
)
190 nir_function_impl
*impl
= void_impl
;
191 nir_shader
*shader
= impl
->overload
->function
->shader
;
193 nir_foreach_instr_safe(block
, instr
) {
194 if (instr
->type
!= nir_instr_type_alu
)
197 nir_alu_instr
*vec
= nir_instr_as_alu(instr
);
205 continue; /* The loop */
208 if (vec
->dest
.dest
.is_ssa
) {
209 /* Since we insert multiple MOVs, we have a register destination. */
210 nir_register
*reg
= nir_local_reg_create(impl
);
211 reg
->num_components
= vec
->dest
.dest
.ssa
.num_components
;
213 nir_ssa_def_rewrite_uses(&vec
->dest
.dest
.ssa
, nir_src_for_reg(reg
));
215 nir_instr_rewrite_dest(&vec
->instr
, &vec
->dest
.dest
,
216 nir_dest_for_reg(reg
));
219 unsigned finished_write_mask
= 0;
221 /* First, emit a MOV for all the src channels that are in the
222 * destination reg, in case other values we're populating in the dest
223 * might overwrite them.
225 for (unsigned i
= 0; i
< 4; i
++) {
226 if (!(vec
->dest
.write_mask
& (1 << i
)))
229 if (src_matches_dest_reg(&vec
->dest
.dest
, &vec
->src
[i
].src
)) {
230 finished_write_mask
|= insert_mov(vec
, i
, shader
);
235 /* Now, emit MOVs for all the other src channels. */
236 for (unsigned i
= 0; i
< 4; i
++) {
237 if (!(vec
->dest
.write_mask
& (1 << i
)))
240 if (!(finished_write_mask
& (1 << i
)))
241 finished_write_mask
|= try_coalesce(vec
, i
, shader
);
243 if (!(finished_write_mask
& (1 << i
)))
244 finished_write_mask
|= insert_mov(vec
, i
, shader
);
247 nir_instr_remove(&vec
->instr
);
255 nir_lower_vec_to_movs_impl(nir_function_impl
*impl
)
257 nir_foreach_block(impl
, lower_vec_to_movs_block
, impl
);
261 nir_lower_vec_to_movs(nir_shader
*shader
)
263 nir_foreach_overload(shader
, overload
) {
265 nir_lower_vec_to_movs_impl(overload
->impl
);