2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a small peephole optimization that looks for a multiply that
32 * is only ever used in an add and replaces both with an fma.
35 struct peephole_ffma_state
{
37 nir_function_impl
*impl
;
42 are_all_uses_fadd(nir_ssa_def
*def
)
44 if (def
->if_uses
->entries
> 0)
47 struct set_entry
*use_iter
;
48 set_foreach(def
->uses
, use_iter
) {
49 nir_instr
*use_instr
= (nir_instr
*)use_iter
->key
;
51 if (use_instr
->type
!= nir_instr_type_alu
)
54 nir_alu_instr
*use_alu
= nir_instr_as_alu(use_instr
);
55 switch (use_alu
->op
) {
57 break; /* This one's ok */
63 assert(use_alu
->dest
.dest
.is_ssa
);
64 if (!are_all_uses_fadd(&use_alu
->dest
.dest
.ssa
))
76 static nir_alu_instr
*
77 get_mul_for_src(nir_alu_src
*src
, uint8_t swizzle
[4], bool *negate
, bool *abs
)
79 assert(src
->src
.is_ssa
&& !src
->abs
&& !src
->negate
);
81 nir_instr
*instr
= src
->src
.ssa
->parent_instr
;
82 if (instr
->type
!= nir_instr_type_alu
)
85 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
89 alu
= get_mul_for_src(&alu
->src
[0], swizzle
, negate
, abs
);
93 alu
= get_mul_for_src(&alu
->src
[0], swizzle
, negate
, abs
);
98 alu
= get_mul_for_src(&alu
->src
[0], swizzle
, negate
, abs
);
104 /* Only absorb a fmul into a ffma if the fmul is is only used in fadd
105 * operations. This prevents us from being too aggressive with our
106 * fusing which can actually lead to more instructions.
108 if (!are_all_uses_fadd(&alu
->dest
.dest
.ssa
))
119 for (unsigned i
= 0; i
< 4; i
++) {
120 if (!(alu
->dest
.write_mask
& (1 << i
)))
123 swizzle
[i
] = swizzle
[src
->swizzle
[i
]];
130 nir_opt_peephole_ffma_block(nir_block
*block
, void *void_state
)
132 struct peephole_ffma_state
*state
= void_state
;
134 nir_foreach_instr_safe(block
, instr
) {
135 if (instr
->type
!= nir_instr_type_alu
)
138 nir_alu_instr
*add
= nir_instr_as_alu(instr
);
139 if (add
->op
!= nir_op_fadd
)
142 /* TODO: Maybe bail if this expression is considered "precise"? */
144 assert(add
->src
[0].src
.is_ssa
&& add
->src
[1].src
.is_ssa
);
146 /* This, is the case a + a. We would rather handle this with an
147 * algebraic reduction than fuse it. Also, we want to only fuse
148 * things where the multiply is used only once and, in this case,
149 * it would be used twice by the same instruction.
151 if (add
->src
[0].src
.ssa
== add
->src
[1].src
.ssa
)
155 uint8_t add_mul_src
, swizzle
[4];
157 for (add_mul_src
= 0; add_mul_src
< 2; add_mul_src
++) {
158 for (unsigned i
= 0; i
< 4; i
++)
164 mul
= get_mul_for_src(&add
->src
[add_mul_src
], swizzle
, &negate
, &abs
);
173 nir_ssa_def
*mul_src
[2];
174 mul_src
[0] = mul
->src
[0].src
.ssa
;
175 mul_src
[1] = mul
->src
[1].src
.ssa
;
178 for (unsigned i
= 0; i
< 2; i
++) {
179 nir_alu_instr
*abs
= nir_alu_instr_create(state
->mem_ctx
,
181 abs
->src
[0].src
= nir_src_for_ssa(mul_src
[i
]);
182 nir_ssa_dest_init(&abs
->instr
, &abs
->dest
.dest
,
183 mul_src
[i
]->num_components
, NULL
);
184 abs
->dest
.write_mask
= (1 << mul_src
[i
]->num_components
) - 1;
185 nir_instr_insert_before(&add
->instr
, &abs
->instr
);
186 mul_src
[i
] = &abs
->dest
.dest
.ssa
;
191 nir_alu_instr
*neg
= nir_alu_instr_create(state
->mem_ctx
,
193 neg
->src
[0].src
= nir_src_for_ssa(mul_src
[0]);
194 nir_ssa_dest_init(&neg
->instr
, &neg
->dest
.dest
,
195 mul_src
[0]->num_components
, NULL
);
196 neg
->dest
.write_mask
= (1 << mul_src
[0]->num_components
) - 1;
197 nir_instr_insert_before(&add
->instr
, &neg
->instr
);
198 mul_src
[0] = &neg
->dest
.dest
.ssa
;
201 nir_alu_instr
*ffma
= nir_alu_instr_create(state
->mem_ctx
, nir_op_ffma
);
202 ffma
->dest
.saturate
= add
->dest
.saturate
;
203 ffma
->dest
.write_mask
= add
->dest
.write_mask
;
205 for (unsigned i
= 0; i
< 2; i
++) {
206 ffma
->src
[i
].src
= nir_src_for_ssa(mul_src
[i
]);
207 for (unsigned j
= 0; j
< add
->dest
.dest
.ssa
.num_components
; j
++)
208 ffma
->src
[i
].swizzle
[j
] = mul
->src
[i
].swizzle
[swizzle
[j
]];
210 nir_alu_src_copy(&ffma
->src
[2], &add
->src
[1 - add_mul_src
],
213 assert(add
->dest
.dest
.is_ssa
);
215 nir_ssa_dest_init(&ffma
->instr
, &ffma
->dest
.dest
,
216 add
->dest
.dest
.ssa
.num_components
,
217 add
->dest
.dest
.ssa
.name
);
218 nir_ssa_def_rewrite_uses(&add
->dest
.dest
.ssa
,
219 nir_src_for_ssa(&ffma
->dest
.dest
.ssa
),
222 nir_instr_insert_before(&add
->instr
, &ffma
->instr
);
223 assert(add
->dest
.dest
.ssa
.uses
->entries
== 0);
224 nir_instr_remove(&add
->instr
);
226 state
->progress
= true;
233 nir_opt_peephole_ffma_impl(nir_function_impl
*impl
)
235 struct peephole_ffma_state state
;
237 state
.mem_ctx
= ralloc_parent(impl
);
239 state
.progress
= false;
241 nir_foreach_block(impl
, nir_opt_peephole_ffma_block
, &state
);
244 nir_metadata_preserve(impl
, nir_metadata_block_index
|
245 nir_metadata_dominance
);
247 return state
.progress
;
251 nir_opt_peephole_ffma(nir_shader
*shader
)
253 bool progress
= false;
255 nir_foreach_overload(shader
, overload
) {
257 progress
|= nir_opt_peephole_ffma_impl(overload
->impl
);