2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements a small peephole optimization that looks for a multiply that
32 * is only ever used in an add and replaces both with an fma.
35 struct peephole_ffma_state
{
37 nir_function_impl
*impl
;
41 static nir_alu_instr
*
42 get_mul_for_src(nir_alu_src
*src
, uint8_t swizzle
[4], bool *negate
, bool *abs
)
44 assert(src
->src
.is_ssa
&& !src
->abs
&& !src
->negate
);
46 nir_instr
*instr
= src
->src
.ssa
->parent_instr
;
47 if (instr
->type
!= nir_instr_type_alu
)
50 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
54 alu
= get_mul_for_src(&alu
->src
[0], swizzle
, negate
, abs
);
58 alu
= get_mul_for_src(&alu
->src
[0], swizzle
, negate
, abs
);
63 alu
= get_mul_for_src(&alu
->src
[0], swizzle
, negate
, abs
);
78 for (unsigned i
= 0; i
< 4; i
++) {
79 if (!(alu
->dest
.write_mask
& (1 << i
)))
82 swizzle
[i
] = swizzle
[src
->swizzle
[i
]];
89 nir_opt_peephole_ffma_block(nir_block
*block
, void *void_state
)
91 struct peephole_ffma_state
*state
= void_state
;
93 nir_foreach_instr_safe(block
, instr
) {
94 if (instr
->type
!= nir_instr_type_alu
)
97 nir_alu_instr
*add
= nir_instr_as_alu(instr
);
98 if (add
->op
!= nir_op_fadd
)
101 /* TODO: Maybe bail if this expression is considered "precise"? */
103 assert(add
->src
[0].src
.is_ssa
&& add
->src
[1].src
.is_ssa
);
105 /* This, is the case a + a. We would rather handle this with an
106 * algebraic reduction than fuse it. Also, we want to only fuse
107 * things where the multiply is used only once and, in this case,
108 * it would be used twice by the same instruction.
110 if (add
->src
[0].src
.ssa
== add
->src
[1].src
.ssa
)
114 uint8_t add_mul_src
, swizzle
[4];
116 for (add_mul_src
= 0; add_mul_src
< 2; add_mul_src
++) {
117 for (unsigned i
= 0; i
< 4; i
++)
123 mul
= get_mul_for_src(&add
->src
[add_mul_src
], swizzle
, &negate
, &abs
);
132 nir_ssa_def
*mul_src
[2];
133 mul_src
[0] = mul
->src
[0].src
.ssa
;
134 mul_src
[1] = mul
->src
[1].src
.ssa
;
137 for (unsigned i
= 0; i
< 2; i
++) {
138 nir_alu_instr
*abs
= nir_alu_instr_create(state
->mem_ctx
,
140 abs
->src
[0].src
= nir_src_for_ssa(mul_src
[i
]);
141 nir_ssa_dest_init(&abs
->instr
, &abs
->dest
.dest
,
142 mul_src
[i
]->num_components
, NULL
);
143 abs
->dest
.write_mask
= (1 << mul_src
[i
]->num_components
) - 1;
144 nir_instr_insert_before(&add
->instr
, &abs
->instr
);
145 mul_src
[i
] = &abs
->dest
.dest
.ssa
;
150 nir_alu_instr
*neg
= nir_alu_instr_create(state
->mem_ctx
,
152 neg
->src
[0].src
= nir_src_for_ssa(mul_src
[0]);
153 nir_ssa_dest_init(&neg
->instr
, &neg
->dest
.dest
,
154 mul_src
[0]->num_components
, NULL
);
155 neg
->dest
.write_mask
= (1 << mul_src
[0]->num_components
) - 1;
156 nir_instr_insert_before(&add
->instr
, &neg
->instr
);
157 mul_src
[0] = &neg
->dest
.dest
.ssa
;
160 nir_alu_instr
*ffma
= nir_alu_instr_create(state
->mem_ctx
, nir_op_ffma
);
161 ffma
->dest
.saturate
= add
->dest
.saturate
;
162 ffma
->dest
.write_mask
= add
->dest
.write_mask
;
164 for (unsigned i
= 0; i
< 2; i
++) {
165 ffma
->src
[i
].src
= nir_src_for_ssa(mul_src
[i
]);
166 for (unsigned j
= 0; j
< add
->dest
.dest
.ssa
.num_components
; j
++)
167 ffma
->src
[i
].swizzle
[j
] = mul
->src
[i
].swizzle
[swizzle
[j
]];
169 nir_alu_src_copy(&ffma
->src
[2], &add
->src
[1 - add_mul_src
],
172 assert(add
->dest
.dest
.is_ssa
);
174 nir_ssa_dest_init(&ffma
->instr
, &ffma
->dest
.dest
,
175 add
->dest
.dest
.ssa
.num_components
,
176 add
->dest
.dest
.ssa
.name
);
177 nir_ssa_def_rewrite_uses(&add
->dest
.dest
.ssa
,
178 nir_src_for_ssa(&ffma
->dest
.dest
.ssa
),
181 nir_instr_insert_before(&add
->instr
, &ffma
->instr
);
182 assert(add
->dest
.dest
.ssa
.uses
->entries
== 0);
183 nir_instr_remove(&add
->instr
);
185 state
->progress
= true;
192 nir_opt_peephole_ffma_impl(nir_function_impl
*impl
)
194 struct peephole_ffma_state state
;
196 state
.mem_ctx
= ralloc_parent(impl
);
198 state
.progress
= false;
200 nir_foreach_block(impl
, nir_opt_peephole_ffma_block
, &state
);
203 nir_metadata_preserve(impl
, nir_metadata_block_index
|
204 nir_metadata_dominance
);
206 return state
.progress
;
210 nir_opt_peephole_ffma(nir_shader
*shader
)
212 bool progress
= false;
214 nir_foreach_overload(shader
, overload
) {
216 progress
|= nir_opt_peephole_ffma_impl(overload
->impl
);