2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a small peephole optimization that looks for a multiply that
32 * is only ever used in an add and replaces both with an fma.
35 struct peephole_ffma_state
{
37 nir_function_impl
*impl
;
42 are_all_uses_fadd(nir_ssa_def
*def
)
44 if (!list_empty(&def
->if_uses
))
47 nir_foreach_use(def
, use_src
) {
48 nir_instr
*use_instr
= use_src
->parent_instr
;
50 if (use_instr
->type
!= nir_instr_type_alu
)
53 nir_alu_instr
*use_alu
= nir_instr_as_alu(use_instr
);
54 switch (use_alu
->op
) {
56 break; /* This one's ok */
62 assert(use_alu
->dest
.dest
.is_ssa
);
63 if (!are_all_uses_fadd(&use_alu
->dest
.dest
.ssa
))
75 static nir_alu_instr
*
76 get_mul_for_src(nir_alu_src
*src
, int num_components
,
77 uint8_t swizzle
[4], bool *negate
, bool *abs
)
79 uint8_t swizzle_tmp
[4];
80 assert(src
->src
.is_ssa
&& !src
->abs
&& !src
->negate
);
82 nir_instr
*instr
= src
->src
.ssa
->parent_instr
;
83 if (instr
->type
!= nir_instr_type_alu
)
86 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
90 alu
= get_mul_for_src(&alu
->src
[0], num_components
, swizzle
, negate
, abs
);
94 alu
= get_mul_for_src(&alu
->src
[0], num_components
, swizzle
, negate
, abs
);
99 alu
= get_mul_for_src(&alu
->src
[0], num_components
, swizzle
, negate
, abs
);
105 /* Only absorb a fmul into a ffma if the fmul is is only used in fadd
106 * operations. This prevents us from being too aggressive with our
107 * fusing which can actually lead to more instructions.
109 if (!are_all_uses_fadd(&alu
->dest
.dest
.ssa
))
120 /* Copy swizzle data before overwriting it to avoid setting a wrong swizzle.
123 * Former swizzle[] = xyzw
124 * src->swizzle[] = zyxx
126 * Expected output swizzle = zyxx
127 * If we reuse swizzle in the loop, then output swizzle would be zyzz.
129 memcpy(swizzle_tmp
, swizzle
, 4*sizeof(uint8_t));
130 for (int i
= 0; i
< num_components
; i
++)
131 swizzle
[i
] = swizzle_tmp
[src
->swizzle
[i
]];
137 * Given a list of (at least two) nir_alu_src's, tells if any of them is a
138 * constant value and is used only once.
141 any_alu_src_is_a_constant(nir_alu_src srcs
[])
143 for (unsigned i
= 0; i
< 2; i
++) {
144 if (srcs
[i
].src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
145 nir_load_const_instr
*load_const
=
146 nir_instr_as_load_const (srcs
[i
].src
.ssa
->parent_instr
);
148 if (list_is_singular(&load_const
->def
.uses
) &&
149 list_empty(&load_const
->def
.if_uses
)) {
159 brw_nir_opt_peephole_ffma_block(nir_block
*block
, void *void_state
)
161 struct peephole_ffma_state
*state
= void_state
;
163 nir_foreach_instr_safe(block
, instr
) {
164 if (instr
->type
!= nir_instr_type_alu
)
167 nir_alu_instr
*add
= nir_instr_as_alu(instr
);
168 if (add
->op
!= nir_op_fadd
)
171 /* TODO: Maybe bail if this expression is considered "precise"? */
173 assert(add
->src
[0].src
.is_ssa
&& add
->src
[1].src
.is_ssa
);
175 /* This, is the case a + a. We would rather handle this with an
176 * algebraic reduction than fuse it. Also, we want to only fuse
177 * things where the multiply is used only once and, in this case,
178 * it would be used twice by the same instruction.
180 if (add
->src
[0].src
.ssa
== add
->src
[1].src
.ssa
)
184 uint8_t add_mul_src
, swizzle
[4];
186 for (add_mul_src
= 0; add_mul_src
< 2; add_mul_src
++) {
187 for (unsigned i
= 0; i
< 4; i
++)
193 mul
= get_mul_for_src(&add
->src
[add_mul_src
],
194 add
->dest
.dest
.ssa
.num_components
,
195 swizzle
, &negate
, &abs
);
204 nir_ssa_def
*mul_src
[2];
205 mul_src
[0] = mul
->src
[0].src
.ssa
;
206 mul_src
[1] = mul
->src
[1].src
.ssa
;
208 /* If any of the operands of the fmul and any of the fadd is a constant,
209 * we bypass because it will be more efficient as the constants will be
210 * propagated as operands, potentially saving two load_const instructions.
212 if (any_alu_src_is_a_constant(mul
->src
) &&
213 any_alu_src_is_a_constant(add
->src
)) {
218 for (unsigned i
= 0; i
< 2; i
++) {
219 nir_alu_instr
*abs
= nir_alu_instr_create(state
->mem_ctx
,
221 abs
->src
[0].src
= nir_src_for_ssa(mul_src
[i
]);
222 nir_ssa_dest_init(&abs
->instr
, &abs
->dest
.dest
,
223 mul_src
[i
]->num_components
, NULL
);
224 abs
->dest
.write_mask
= (1 << mul_src
[i
]->num_components
) - 1;
225 nir_instr_insert_before(&add
->instr
, &abs
->instr
);
226 mul_src
[i
] = &abs
->dest
.dest
.ssa
;
231 nir_alu_instr
*neg
= nir_alu_instr_create(state
->mem_ctx
,
233 neg
->src
[0].src
= nir_src_for_ssa(mul_src
[0]);
234 nir_ssa_dest_init(&neg
->instr
, &neg
->dest
.dest
,
235 mul_src
[0]->num_components
, NULL
);
236 neg
->dest
.write_mask
= (1 << mul_src
[0]->num_components
) - 1;
237 nir_instr_insert_before(&add
->instr
, &neg
->instr
);
238 mul_src
[0] = &neg
->dest
.dest
.ssa
;
241 nir_alu_instr
*ffma
= nir_alu_instr_create(state
->mem_ctx
, nir_op_ffma
);
242 ffma
->dest
.saturate
= add
->dest
.saturate
;
243 ffma
->dest
.write_mask
= add
->dest
.write_mask
;
245 for (unsigned i
= 0; i
< 2; i
++) {
246 ffma
->src
[i
].src
= nir_src_for_ssa(mul_src
[i
]);
247 for (unsigned j
= 0; j
< add
->dest
.dest
.ssa
.num_components
; j
++)
248 ffma
->src
[i
].swizzle
[j
] = mul
->src
[i
].swizzle
[swizzle
[j
]];
250 nir_alu_src_copy(&ffma
->src
[2], &add
->src
[1 - add_mul_src
], ffma
);
252 assert(add
->dest
.dest
.is_ssa
);
254 nir_ssa_dest_init(&ffma
->instr
, &ffma
->dest
.dest
,
255 add
->dest
.dest
.ssa
.num_components
,
256 add
->dest
.dest
.ssa
.name
);
257 nir_ssa_def_rewrite_uses(&add
->dest
.dest
.ssa
,
258 nir_src_for_ssa(&ffma
->dest
.dest
.ssa
));
260 nir_instr_insert_before(&add
->instr
, &ffma
->instr
);
261 assert(list_empty(&add
->dest
.dest
.ssa
.uses
));
262 nir_instr_remove(&add
->instr
);
264 state
->progress
= true;
271 brw_nir_opt_peephole_ffma_impl(nir_function_impl
*impl
)
273 struct peephole_ffma_state state
;
275 state
.mem_ctx
= ralloc_parent(impl
);
277 state
.progress
= false;
279 nir_foreach_block(impl
, brw_nir_opt_peephole_ffma_block
, &state
);
282 nir_metadata_preserve(impl
, nir_metadata_block_index
|
283 nir_metadata_dominance
);
285 return state
.progress
;
289 brw_nir_opt_peephole_ffma(nir_shader
*shader
)
291 bool progress
= false;
293 nir_foreach_overload(shader
, overload
) {
295 progress
|= brw_nir_opt_peephole_ffma_impl(overload
->impl
);