2 * Copyright © 2014-2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 /** @file nir_lower_alu_to_scalar.c
29 * Replaces nir_alu_instr operations with more than one channel used in the
30 * arguments with individual per-channel operations.
34 nir_alu_ssa_dest_init(nir_alu_instr
*instr
, unsigned num_components
,
37 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
.dest
, num_components
,
39 instr
->dest
.write_mask
= (1 << num_components
) - 1;
43 lower_reduction(nir_alu_instr
*instr
, nir_op chan_op
, nir_op merge_op
,
46 unsigned num_components
= nir_op_infos
[instr
->op
].input_sizes
[0];
48 nir_ssa_def
*last
= NULL
;
49 for (unsigned i
= 0; i
< num_components
; i
++) {
50 nir_alu_instr
*chan
= nir_alu_instr_create(builder
->shader
, chan_op
);
51 nir_alu_ssa_dest_init(chan
, 1, instr
->dest
.dest
.ssa
.bit_size
);
52 nir_alu_src_copy(&chan
->src
[0], &instr
->src
[0], chan
);
53 chan
->src
[0].swizzle
[0] = chan
->src
[0].swizzle
[i
];
54 if (nir_op_infos
[chan_op
].num_inputs
> 1) {
55 assert(nir_op_infos
[chan_op
].num_inputs
== 2);
56 nir_alu_src_copy(&chan
->src
[1], &instr
->src
[1], chan
);
57 chan
->src
[1].swizzle
[0] = chan
->src
[1].swizzle
[i
];
60 nir_builder_instr_insert(builder
, &chan
->instr
);
63 last
= &chan
->dest
.dest
.ssa
;
65 last
= nir_build_alu(builder
, merge_op
,
66 last
, &chan
->dest
.dest
.ssa
, NULL
, NULL
);
70 assert(instr
->dest
.write_mask
== 1);
71 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(last
));
72 nir_instr_remove(&instr
->instr
);
76 lower_alu_instr_scalar(nir_alu_instr
*instr
, nir_builder
*b
)
78 unsigned num_src
= nir_op_infos
[instr
->op
].num_inputs
;
81 assert(instr
->dest
.dest
.is_ssa
);
82 assert(instr
->dest
.write_mask
!= 0);
84 b
->cursor
= nir_before_instr(&instr
->instr
);
85 b
->exact
= instr
->exact
;
87 #define LOWER_REDUCTION(name, chan, merge) \
91 lower_reduction(instr, chan, merge, b); \
98 /* We don't need to scalarize these ops, they're the ones generated to
99 * group up outputs into a value that can be SSAed.
103 case nir_op_pack_half_2x16
:
104 if (!b
->shader
->options
->lower_pack_half_2x16
)
108 nir_pack_half_2x16_split(b
, nir_channel(b
, instr
->src
[0].src
.ssa
,
109 instr
->src
[0].swizzle
[0]),
110 nir_channel(b
, instr
->src
[0].src
.ssa
,
111 instr
->src
[0].swizzle
[1]));
113 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(val
));
114 nir_instr_remove(&instr
->instr
);
117 case nir_op_unpack_unorm_4x8
:
118 case nir_op_unpack_snorm_4x8
:
119 case nir_op_unpack_unorm_2x16
:
120 case nir_op_unpack_snorm_2x16
:
121 /* There is no scalar version of these ops, unless we were to break it
122 * down to bitshifts and math (which is definitely not intended).
126 case nir_op_unpack_half_2x16
: {
127 if (!b
->shader
->options
->lower_unpack_half_2x16
)
130 nir_ssa_def
*comps
[2];
131 comps
[0] = nir_unpack_half_2x16_split_x(b
, instr
->src
[0].src
.ssa
);
132 comps
[1] = nir_unpack_half_2x16_split_y(b
, instr
->src
[0].src
.ssa
);
133 nir_ssa_def
*vec
= nir_vec(b
, comps
, 2);
135 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(vec
));
136 nir_instr_remove(&instr
->instr
);
140 case nir_op_pack_uvec2_to_uint
: {
141 assert(b
->shader
->options
->lower_pack_snorm_2x16
||
142 b
->shader
->options
->lower_pack_unorm_2x16
);
145 nir_extract_u16(b
, instr
->src
[0].src
.ssa
, nir_imm_int(b
, 0));
147 nir_ior(b
, nir_ishl(b
, nir_channel(b
, word
, 1), nir_imm_int(b
, 16)),
148 nir_channel(b
, word
, 0));
150 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(val
));
151 nir_instr_remove(&instr
->instr
);
155 case nir_op_pack_uvec4_to_uint
: {
156 assert(b
->shader
->options
->lower_pack_snorm_4x8
||
157 b
->shader
->options
->lower_pack_unorm_4x8
);
160 nir_extract_u8(b
, instr
->src
[0].src
.ssa
, nir_imm_int(b
, 0));
162 nir_ior(b
, nir_ior(b
, nir_ishl(b
, nir_channel(b
, byte
, 3), nir_imm_int(b
, 24)),
163 nir_ishl(b
, nir_channel(b
, byte
, 2), nir_imm_int(b
, 16))),
164 nir_ior(b
, nir_ishl(b
, nir_channel(b
, byte
, 1), nir_imm_int(b
, 8)),
165 nir_channel(b
, byte
, 0)));
167 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(val
));
168 nir_instr_remove(&instr
->instr
);
174 for (unsigned i
= 0; i
< 3; i
++) {
175 sum
[i
] = nir_fmul(b
, nir_channel(b
, instr
->src
[0].src
.ssa
,
176 instr
->src
[0].swizzle
[i
]),
177 nir_channel(b
, instr
->src
[1].src
.ssa
,
178 instr
->src
[1].swizzle
[i
]));
180 sum
[3] = nir_channel(b
, instr
->src
[1].src
.ssa
, instr
->src
[1].swizzle
[3]);
182 nir_ssa_def
*val
= nir_fadd(b
, nir_fadd(b
, sum
[0], sum
[1]),
183 nir_fadd(b
, sum
[2], sum
[3]));
185 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(val
));
186 nir_instr_remove(&instr
->instr
);
190 LOWER_REDUCTION(nir_op_fdot
, nir_op_fmul
, nir_op_fadd
);
191 LOWER_REDUCTION(nir_op_ball_fequal
, nir_op_feq
, nir_op_iand
);
192 LOWER_REDUCTION(nir_op_ball_iequal
, nir_op_ieq
, nir_op_iand
);
193 LOWER_REDUCTION(nir_op_bany_fnequal
, nir_op_fne
, nir_op_ior
);
194 LOWER_REDUCTION(nir_op_bany_inequal
, nir_op_ine
, nir_op_ior
);
195 LOWER_REDUCTION(nir_op_fall_equal
, nir_op_seq
, nir_op_fand
);
196 LOWER_REDUCTION(nir_op_fany_nequal
, nir_op_sne
, nir_op_for
);
202 if (instr
->dest
.dest
.ssa
.num_components
== 1)
205 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
206 nir_ssa_def
*comps
[] = { NULL
, NULL
, NULL
, NULL
};
208 for (chan
= 0; chan
< 4; chan
++) {
209 if (!(instr
->dest
.write_mask
& (1 << chan
)))
212 nir_alu_instr
*lower
= nir_alu_instr_create(b
->shader
, instr
->op
);
213 for (i
= 0; i
< num_src
; i
++) {
214 /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
215 * args (input_sizes[] == 1).
217 assert(nir_op_infos
[instr
->op
].input_sizes
[i
] < 2);
218 unsigned src_chan
= (nir_op_infos
[instr
->op
].input_sizes
[i
] == 1 ?
221 nir_alu_src_copy(&lower
->src
[i
], &instr
->src
[i
], lower
);
222 for (int j
= 0; j
< 4; j
++)
223 lower
->src
[i
].swizzle
[j
] = instr
->src
[i
].swizzle
[src_chan
];
226 nir_alu_ssa_dest_init(lower
, 1, instr
->dest
.dest
.ssa
.bit_size
);
227 lower
->dest
.saturate
= instr
->dest
.saturate
;
228 comps
[chan
] = &lower
->dest
.dest
.ssa
;
230 nir_builder_instr_insert(b
, &lower
->instr
);
233 nir_ssa_def
*vec
= nir_vec(b
, comps
, num_components
);
235 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(vec
));
237 nir_instr_remove(&instr
->instr
);
241 lower_alu_to_scalar_block(nir_block
*block
, void *builder
)
243 nir_foreach_instr_safe(block
, instr
) {
244 if (instr
->type
== nir_instr_type_alu
)
245 lower_alu_instr_scalar(nir_instr_as_alu(instr
), builder
);
252 nir_lower_alu_to_scalar_impl(nir_function_impl
*impl
)
255 nir_builder_init(&builder
, impl
);
257 nir_foreach_block(impl
, lower_alu_to_scalar_block
, &builder
);
261 nir_lower_alu_to_scalar(nir_shader
*shader
)
263 nir_foreach_function(shader
, function
) {
265 nir_lower_alu_to_scalar_impl(function
->impl
);