2 * Copyright © 2014-2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 struct alu_to_scalar_data
{
28 nir_instr_filter_cb cb
;
32 /** @file nir_lower_alu_to_scalar.c
34 * Replaces nir_alu_instr operations with more than one channel used in the
35 * arguments with individual per-channel operations.
39 inst_is_vector_alu(const nir_instr
*instr
, const void *_state
)
41 if (instr
->type
!= nir_instr_type_alu
)
44 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
46 /* There is no ALU instruction which has a scalar destination, scalar
47 * src[0], and some other vector source.
49 assert(alu
->dest
.dest
.is_ssa
);
50 assert(alu
->src
[0].src
.is_ssa
);
51 return alu
->dest
.dest
.ssa
.num_components
> 1 ||
52 nir_op_infos
[alu
->op
].input_sizes
[0] > 1;
56 nir_alu_ssa_dest_init(nir_alu_instr
*alu
, unsigned num_components
,
59 nir_ssa_dest_init(&alu
->instr
, &alu
->dest
.dest
, num_components
,
61 alu
->dest
.write_mask
= (1 << num_components
) - 1;
65 lower_reduction(nir_alu_instr
*alu
, nir_op chan_op
, nir_op merge_op
,
68 unsigned num_components
= nir_op_infos
[alu
->op
].input_sizes
[0];
70 nir_ssa_def
*last
= NULL
;
71 for (unsigned i
= 0; i
< num_components
; i
++) {
72 nir_alu_instr
*chan
= nir_alu_instr_create(builder
->shader
, chan_op
);
73 nir_alu_ssa_dest_init(chan
, 1, alu
->dest
.dest
.ssa
.bit_size
);
74 nir_alu_src_copy(&chan
->src
[0], &alu
->src
[0], chan
);
75 chan
->src
[0].swizzle
[0] = chan
->src
[0].swizzle
[i
];
76 if (nir_op_infos
[chan_op
].num_inputs
> 1) {
77 assert(nir_op_infos
[chan_op
].num_inputs
== 2);
78 nir_alu_src_copy(&chan
->src
[1], &alu
->src
[1], chan
);
79 chan
->src
[1].swizzle
[0] = chan
->src
[1].swizzle
[i
];
81 chan
->exact
= alu
->exact
;
83 nir_builder_instr_insert(builder
, &chan
->instr
);
86 last
= &chan
->dest
.dest
.ssa
;
88 last
= nir_build_alu(builder
, merge_op
,
89 last
, &chan
->dest
.dest
.ssa
, NULL
, NULL
);
97 lower_alu_instr_scalar(nir_builder
*b
, nir_instr
*instr
, void *_data
)
99 struct alu_to_scalar_data
*data
= _data
;
100 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
101 unsigned num_src
= nir_op_infos
[alu
->op
].num_inputs
;
104 assert(alu
->dest
.dest
.is_ssa
);
105 assert(alu
->dest
.write_mask
!= 0);
107 b
->cursor
= nir_before_instr(&alu
->instr
);
108 b
->exact
= alu
->exact
;
110 if (data
->cb
&& !data
->cb(instr
, data
->data
))
113 #define LOWER_REDUCTION(name, chan, merge) \
117 return lower_reduction(alu, chan, merge, b); \
123 case nir_op_cube_face_coord
:
124 case nir_op_cube_face_index
:
125 /* We don't need to scalarize these ops, they're the ones generated to
126 * group up outputs into a value that can be SSAed.
130 case nir_op_pack_half_2x16
: {
131 if (!b
->shader
->options
->lower_pack_half_2x16
)
134 nir_ssa_def
*src_vec2
= nir_ssa_for_alu_src(b
, alu
, 0);
135 return nir_pack_half_2x16_split(b
, nir_channel(b
, src_vec2
, 0),
136 nir_channel(b
, src_vec2
, 1));
139 case nir_op_unpack_unorm_4x8
:
140 case nir_op_unpack_snorm_4x8
:
141 case nir_op_unpack_unorm_2x16
:
142 case nir_op_unpack_snorm_2x16
:
143 /* There is no scalar version of these ops, unless we were to break it
144 * down to bitshifts and math (which is definitely not intended).
148 case nir_op_unpack_half_2x16_flush_to_zero
:
149 case nir_op_unpack_half_2x16
: {
150 if (!b
->shader
->options
->lower_unpack_half_2x16
)
153 nir_ssa_def
*packed
= nir_ssa_for_alu_src(b
, alu
, 0);
154 if (alu
->op
== nir_op_unpack_half_2x16_flush_to_zero
) {
156 nir_unpack_half_2x16_split_x_flush_to_zero(b
,
158 nir_unpack_half_2x16_split_y_flush_to_zero(b
,
162 nir_unpack_half_2x16_split_x(b
, packed
),
163 nir_unpack_half_2x16_split_y(b
, packed
));
167 case nir_op_pack_uvec2_to_uint
: {
168 assert(b
->shader
->options
->lower_pack_snorm_2x16
||
169 b
->shader
->options
->lower_pack_unorm_2x16
);
171 nir_ssa_def
*word
= nir_extract_u16(b
, nir_ssa_for_alu_src(b
, alu
, 0),
173 return nir_ior(b
, nir_ishl(b
, nir_channel(b
, word
, 1),
175 nir_channel(b
, word
, 0));
178 case nir_op_pack_uvec4_to_uint
: {
179 assert(b
->shader
->options
->lower_pack_snorm_4x8
||
180 b
->shader
->options
->lower_pack_unorm_4x8
);
182 nir_ssa_def
*byte
= nir_extract_u8(b
, nir_ssa_for_alu_src(b
, alu
, 0),
184 return nir_ior(b
, nir_ior(b
, nir_ishl(b
, nir_channel(b
, byte
, 3),
186 nir_ishl(b
, nir_channel(b
, byte
, 2),
187 nir_imm_int(b
, 16))),
188 nir_ior(b
, nir_ishl(b
, nir_channel(b
, byte
, 1),
190 nir_channel(b
, byte
, 0)));
194 nir_ssa_def
*src0_vec
= nir_ssa_for_alu_src(b
, alu
, 0);
195 nir_ssa_def
*src1_vec
= nir_ssa_for_alu_src(b
, alu
, 1);
198 for (unsigned i
= 0; i
< 3; i
++) {
199 sum
[i
] = nir_fmul(b
, nir_channel(b
, src0_vec
, i
),
200 nir_channel(b
, src1_vec
, i
));
202 sum
[3] = nir_channel(b
, src1_vec
, 3);
204 return nir_fadd(b
, nir_fadd(b
, sum
[0], sum
[1]),
205 nir_fadd(b
, sum
[2], sum
[3]));
208 case nir_op_unpack_64_2x32
:
209 case nir_op_unpack_32_2x16
:
212 LOWER_REDUCTION(nir_op_fdot
, nir_op_fmul
, nir_op_fadd
);
213 LOWER_REDUCTION(nir_op_ball_fequal
, nir_op_feq
, nir_op_iand
);
214 LOWER_REDUCTION(nir_op_ball_iequal
, nir_op_ieq
, nir_op_iand
);
215 LOWER_REDUCTION(nir_op_bany_fnequal
, nir_op_fne
, nir_op_ior
);
216 LOWER_REDUCTION(nir_op_bany_inequal
, nir_op_ine
, nir_op_ior
);
217 LOWER_REDUCTION(nir_op_b32all_fequal
, nir_op_feq32
, nir_op_iand
);
218 LOWER_REDUCTION(nir_op_b32all_iequal
, nir_op_ieq32
, nir_op_iand
);
219 LOWER_REDUCTION(nir_op_b32any_fnequal
, nir_op_fne32
, nir_op_ior
);
220 LOWER_REDUCTION(nir_op_b32any_inequal
, nir_op_ine32
, nir_op_ior
);
221 LOWER_REDUCTION(nir_op_fall_equal
, nir_op_seq
, nir_op_fmin
);
222 LOWER_REDUCTION(nir_op_fany_nequal
, nir_op_sne
, nir_op_fmax
);
228 if (alu
->dest
.dest
.ssa
.num_components
== 1)
231 unsigned num_components
= alu
->dest
.dest
.ssa
.num_components
;
232 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
] = { NULL
};
234 for (chan
= 0; chan
< NIR_MAX_VEC_COMPONENTS
; chan
++) {
235 if (!(alu
->dest
.write_mask
& (1 << chan
)))
238 nir_alu_instr
*lower
= nir_alu_instr_create(b
->shader
, alu
->op
);
239 for (i
= 0; i
< num_src
; i
++) {
240 /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
241 * args (input_sizes[] == 1).
243 assert(nir_op_infos
[alu
->op
].input_sizes
[i
] < 2);
244 unsigned src_chan
= (nir_op_infos
[alu
->op
].input_sizes
[i
] == 1 ?
247 nir_alu_src_copy(&lower
->src
[i
], &alu
->src
[i
], lower
);
248 for (int j
= 0; j
< NIR_MAX_VEC_COMPONENTS
; j
++)
249 lower
->src
[i
].swizzle
[j
] = alu
->src
[i
].swizzle
[src_chan
];
252 nir_alu_ssa_dest_init(lower
, 1, alu
->dest
.dest
.ssa
.bit_size
);
253 lower
->dest
.saturate
= alu
->dest
.saturate
;
254 comps
[chan
] = &lower
->dest
.dest
.ssa
;
255 lower
->exact
= alu
->exact
;
257 nir_builder_instr_insert(b
, &lower
->instr
);
260 return nir_vec(b
, comps
, num_components
);
264 nir_lower_alu_to_scalar(nir_shader
*shader
, nir_instr_filter_cb cb
, const void *_data
)
266 struct alu_to_scalar_data data
= {
271 return nir_shader_lower_instructions(shader
,
273 lower_alu_instr_scalar
,