2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a small peephole optimization that looks for a multiply that
32 * is only ever used in an add and replaces both with an fma.
35 struct peephole_ffma_state
{
37 nir_function_impl
*impl
;
42 are_all_uses_fadd(nir_ssa_def
*def
)
44 if (!list_empty(&def
->if_uses
))
47 nir_foreach_use(use_src
, def
) {
48 nir_instr
*use_instr
= use_src
->parent_instr
;
50 if (use_instr
->type
!= nir_instr_type_alu
)
53 nir_alu_instr
*use_alu
= nir_instr_as_alu(use_instr
);
54 switch (use_alu
->op
) {
56 break; /* This one's ok */
62 assert(use_alu
->dest
.dest
.is_ssa
);
63 if (!are_all_uses_fadd(&use_alu
->dest
.dest
.ssa
))
75 static nir_alu_instr
*
76 get_mul_for_src(nir_alu_src
*src
, int num_components
,
77 uint8_t swizzle
[4], bool *negate
, bool *abs
)
79 uint8_t swizzle_tmp
[4];
80 assert(src
->src
.is_ssa
&& !src
->abs
&& !src
->negate
);
82 nir_instr
*instr
= src
->src
.ssa
->parent_instr
;
83 if (instr
->type
!= nir_instr_type_alu
)
86 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
88 /* We want to bail if any of the other ALU operations involved is labled
89 * exact. One reason for this is that, while the value that is changing is
90 * actually the result of the add and not the multiply, the intention of
91 * the user when they specify an exact multiply is that they want *that*
92 * value and what they don't care about is the add. Another reason is that
93 * SPIR-V explicitly requires this behaviour.
101 alu
= get_mul_for_src(&alu
->src
[0], num_components
, swizzle
, negate
, abs
);
105 alu
= get_mul_for_src(&alu
->src
[0], num_components
, swizzle
, negate
, abs
);
110 alu
= get_mul_for_src(&alu
->src
[0], num_components
, swizzle
, negate
, abs
);
116 /* Only absorb a fmul into a ffma if the fmul is is only used in fadd
117 * operations. This prevents us from being too aggressive with our
118 * fusing which can actually lead to more instructions.
120 if (!are_all_uses_fadd(&alu
->dest
.dest
.ssa
))
131 /* Copy swizzle data before overwriting it to avoid setting a wrong swizzle.
134 * Former swizzle[] = xyzw
135 * src->swizzle[] = zyxx
137 * Expected output swizzle = zyxx
138 * If we reuse swizzle in the loop, then output swizzle would be zyzz.
140 memcpy(swizzle_tmp
, swizzle
, 4*sizeof(uint8_t));
141 for (int i
= 0; i
< num_components
; i
++)
142 swizzle
[i
] = swizzle_tmp
[src
->swizzle
[i
]];
148 * Given a list of (at least two) nir_alu_src's, tells if any of them is a
149 * constant value and is used only once.
152 any_alu_src_is_a_constant(nir_alu_src srcs
[])
154 for (unsigned i
= 0; i
< 2; i
++) {
155 if (srcs
[i
].src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
156 nir_load_const_instr
*load_const
=
157 nir_instr_as_load_const (srcs
[i
].src
.ssa
->parent_instr
);
159 if (list_is_singular(&load_const
->def
.uses
) &&
160 list_empty(&load_const
->def
.if_uses
)) {
170 brw_nir_opt_peephole_ffma_block(nir_block
*block
, void *mem_ctx
)
172 bool progress
= false;
174 nir_foreach_instr_safe(instr
, block
) {
175 if (instr
->type
!= nir_instr_type_alu
)
178 nir_alu_instr
*add
= nir_instr_as_alu(instr
);
179 if (add
->op
!= nir_op_fadd
)
182 assert(add
->dest
.dest
.is_ssa
);
186 assert(add
->src
[0].src
.is_ssa
&& add
->src
[1].src
.is_ssa
);
188 /* This, is the case a + a. We would rather handle this with an
189 * algebraic reduction than fuse it. Also, we want to only fuse
190 * things where the multiply is used only once and, in this case,
191 * it would be used twice by the same instruction.
193 if (add
->src
[0].src
.ssa
== add
->src
[1].src
.ssa
)
197 uint8_t add_mul_src
, swizzle
[4];
199 for (add_mul_src
= 0; add_mul_src
< 2; add_mul_src
++) {
200 for (unsigned i
= 0; i
< 4; i
++)
206 mul
= get_mul_for_src(&add
->src
[add_mul_src
],
207 add
->dest
.dest
.ssa
.num_components
,
208 swizzle
, &negate
, &abs
);
217 unsigned bit_size
= add
->dest
.dest
.ssa
.bit_size
;
219 nir_ssa_def
*mul_src
[2];
220 mul_src
[0] = mul
->src
[0].src
.ssa
;
221 mul_src
[1] = mul
->src
[1].src
.ssa
;
223 /* If any of the operands of the fmul and any of the fadd is a constant,
224 * we bypass because it will be more efficient as the constants will be
225 * propagated as operands, potentially saving two load_const instructions.
227 if (any_alu_src_is_a_constant(mul
->src
) &&
228 any_alu_src_is_a_constant(add
->src
)) {
233 for (unsigned i
= 0; i
< 2; i
++) {
234 nir_alu_instr
*abs
= nir_alu_instr_create(mem_ctx
, nir_op_fabs
);
235 abs
->src
[0].src
= nir_src_for_ssa(mul_src
[i
]);
236 nir_ssa_dest_init(&abs
->instr
, &abs
->dest
.dest
,
237 mul_src
[i
]->num_components
, bit_size
, NULL
);
238 abs
->dest
.write_mask
= (1 << mul_src
[i
]->num_components
) - 1;
239 nir_instr_insert_before(&add
->instr
, &abs
->instr
);
240 mul_src
[i
] = &abs
->dest
.dest
.ssa
;
245 nir_alu_instr
*neg
= nir_alu_instr_create(mem_ctx
, nir_op_fneg
);
246 neg
->src
[0].src
= nir_src_for_ssa(mul_src
[0]);
247 nir_ssa_dest_init(&neg
->instr
, &neg
->dest
.dest
,
248 mul_src
[0]->num_components
, bit_size
, NULL
);
249 neg
->dest
.write_mask
= (1 << mul_src
[0]->num_components
) - 1;
250 nir_instr_insert_before(&add
->instr
, &neg
->instr
);
251 mul_src
[0] = &neg
->dest
.dest
.ssa
;
254 nir_alu_instr
*ffma
= nir_alu_instr_create(mem_ctx
, nir_op_ffma
);
255 ffma
->dest
.saturate
= add
->dest
.saturate
;
256 ffma
->dest
.write_mask
= add
->dest
.write_mask
;
258 for (unsigned i
= 0; i
< 2; i
++) {
259 ffma
->src
[i
].src
= nir_src_for_ssa(mul_src
[i
]);
260 for (unsigned j
= 0; j
< add
->dest
.dest
.ssa
.num_components
; j
++)
261 ffma
->src
[i
].swizzle
[j
] = mul
->src
[i
].swizzle
[swizzle
[j
]];
263 nir_alu_src_copy(&ffma
->src
[2], &add
->src
[1 - add_mul_src
], ffma
);
265 assert(add
->dest
.dest
.is_ssa
);
267 nir_ssa_dest_init(&ffma
->instr
, &ffma
->dest
.dest
,
268 add
->dest
.dest
.ssa
.num_components
,
270 add
->dest
.dest
.ssa
.name
);
271 nir_ssa_def_rewrite_uses(&add
->dest
.dest
.ssa
,
272 nir_src_for_ssa(&ffma
->dest
.dest
.ssa
));
274 nir_instr_insert_before(&add
->instr
, &ffma
->instr
);
275 assert(list_empty(&add
->dest
.dest
.ssa
.uses
));
276 nir_instr_remove(&add
->instr
);
285 brw_nir_opt_peephole_ffma_impl(nir_function_impl
*impl
)
287 bool progress
= false;
288 void *mem_ctx
= ralloc_parent(impl
);
290 nir_foreach_block(block
, impl
) {
291 progress
|= brw_nir_opt_peephole_ffma_block(block
, mem_ctx
);
295 nir_metadata_preserve(impl
, nir_metadata_block_index
|
296 nir_metadata_dominance
);
302 brw_nir_opt_peephole_ffma(nir_shader
*shader
)
304 bool progress
= false;
306 nir_foreach_function(function
, shader
) {
308 progress
|= brw_nir_opt_peephole_ffma_impl(function
->impl
);