2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
28 * \file nir_opt_intrinsics.c
31 static nir_intrinsic_instr
*
32 lower_subgroups_64bit_split_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
33 unsigned int component
)
37 comp
= nir_unpack_64_2x32_split_x(b
, intrin
->src
[0].ssa
);
39 comp
= nir_unpack_64_2x32_split_y(b
, intrin
->src
[0].ssa
);
41 nir_intrinsic_instr
*intr
= nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
42 nir_ssa_dest_init(&intr
->instr
, &intr
->dest
, 1, 32, NULL
);
43 intr
->const_index
[0] = intrin
->const_index
[0];
44 intr
->const_index
[1] = intrin
->const_index
[1];
45 intr
->src
[0] = nir_src_for_ssa(comp
);
46 if (nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
== 2)
47 nir_src_copy(&intr
->src
[1], &intrin
->src
[1], intr
);
49 intr
->num_components
= 1;
50 nir_builder_instr_insert(b
, &intr
->instr
);
55 lower_subgroup_op_to_32bit(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
57 assert(intrin
->src
[0].ssa
->bit_size
== 64);
58 nir_intrinsic_instr
*intr_x
= lower_subgroups_64bit_split_intrinsic(b
, intrin
, 0);
59 nir_intrinsic_instr
*intr_y
= lower_subgroups_64bit_split_intrinsic(b
, intrin
, 1);
60 return nir_pack_64_2x32_split(b
, &intr_x
->dest
.ssa
, &intr_y
->dest
.ssa
);
64 ballot_type_to_uint(nir_builder
*b
, nir_ssa_def
*value
, unsigned bit_size
)
66 /* We only use this on uvec4 types */
67 assert(value
->num_components
== 4 && value
->bit_size
== 32);
70 return nir_channel(b
, value
, 0);
72 assert(bit_size
== 64);
73 return nir_pack_64_2x32_split(b
, nir_channel(b
, value
, 0),
74 nir_channel(b
, value
, 1));
78 /* Converts a uint32_t or uint64_t value to uint64_t or uvec4 */
80 uint_to_ballot_type(nir_builder
*b
, nir_ssa_def
*value
,
81 unsigned num_components
, unsigned bit_size
)
83 assert(value
->num_components
== 1);
84 assert(value
->bit_size
== 32 || value
->bit_size
== 64);
86 nir_ssa_def
*zero
= nir_imm_int(b
, 0);
87 if (num_components
> 1) {
88 /* SPIR-V uses a uvec4 for ballot values */
89 assert(num_components
== 4);
90 assert(bit_size
== 32);
92 if (value
->bit_size
== 32) {
93 return nir_vec4(b
, value
, zero
, zero
, zero
);
95 assert(value
->bit_size
== 64);
96 return nir_vec4(b
, nir_unpack_64_2x32_split_x(b
, value
),
97 nir_unpack_64_2x32_split_y(b
, value
),
101 /* GLSL uses a uint64_t for ballot values */
102 assert(num_components
== 1);
103 assert(bit_size
== 64);
105 if (value
->bit_size
== 32) {
106 return nir_pack_64_2x32_split(b
, value
, zero
);
108 assert(value
->bit_size
== 64);
115 lower_subgroup_op_to_scalar(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
118 /* This is safe to call on scalar things but it would be silly */
119 assert(intrin
->dest
.ssa
.num_components
> 1);
121 nir_ssa_def
*value
= nir_ssa_for_src(b
, intrin
->src
[0],
122 intrin
->num_components
);
123 nir_ssa_def
*reads
[4];
125 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
126 nir_intrinsic_instr
*chan_intrin
=
127 nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
128 nir_ssa_dest_init(&chan_intrin
->instr
, &chan_intrin
->dest
,
129 1, intrin
->dest
.ssa
.bit_size
, NULL
);
130 chan_intrin
->num_components
= 1;
133 chan_intrin
->src
[0] = nir_src_for_ssa(nir_channel(b
, value
, i
));
135 if (nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
> 1) {
136 assert(nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
== 2);
137 nir_src_copy(&chan_intrin
->src
[1], &intrin
->src
[1], chan_intrin
);
140 chan_intrin
->const_index
[0] = intrin
->const_index
[0];
141 chan_intrin
->const_index
[1] = intrin
->const_index
[1];
143 if (lower_to_32bit
&& chan_intrin
->src
[0].ssa
->bit_size
== 64) {
144 reads
[i
] = lower_subgroup_op_to_32bit(b
, chan_intrin
);
146 nir_builder_instr_insert(b
, &chan_intrin
->instr
);
147 reads
[i
] = &chan_intrin
->dest
.ssa
;
151 return nir_vec(b
, reads
, intrin
->num_components
);
155 lower_vote_eq_to_scalar(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
157 assert(intrin
->src
[0].is_ssa
);
158 nir_ssa_def
*value
= intrin
->src
[0].ssa
;
160 nir_ssa_def
*result
= NULL
;
161 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
162 nir_intrinsic_instr
*chan_intrin
=
163 nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
164 nir_ssa_dest_init(&chan_intrin
->instr
, &chan_intrin
->dest
,
165 1, intrin
->dest
.ssa
.bit_size
, NULL
);
166 chan_intrin
->num_components
= 1;
167 chan_intrin
->src
[0] = nir_src_for_ssa(nir_channel(b
, value
, i
));
168 nir_builder_instr_insert(b
, &chan_intrin
->instr
);
171 result
= nir_iand(b
, result
, &chan_intrin
->dest
.ssa
);
173 result
= &chan_intrin
->dest
.ssa
;
181 lower_vote_eq_to_ballot(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
182 const nir_lower_subgroups_options
*options
)
184 assert(intrin
->src
[0].is_ssa
);
185 nir_ssa_def
*value
= intrin
->src
[0].ssa
;
187 /* We have to implicitly lower to scalar */
188 nir_ssa_def
*all_eq
= NULL
;
189 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
190 nir_intrinsic_instr
*rfi
=
191 nir_intrinsic_instr_create(b
->shader
,
192 nir_intrinsic_read_first_invocation
);
193 nir_ssa_dest_init(&rfi
->instr
, &rfi
->dest
,
194 1, value
->bit_size
, NULL
);
195 rfi
->num_components
= 1;
196 rfi
->src
[0] = nir_src_for_ssa(nir_channel(b
, value
, i
));
197 nir_builder_instr_insert(b
, &rfi
->instr
);
200 if (intrin
->intrinsic
== nir_intrinsic_vote_feq
) {
201 is_eq
= nir_feq(b
, &rfi
->dest
.ssa
, nir_channel(b
, value
, i
));
203 is_eq
= nir_ieq(b
, &rfi
->dest
.ssa
, nir_channel(b
, value
, i
));
206 if (all_eq
== NULL
) {
209 all_eq
= nir_iand(b
, all_eq
, is_eq
);
213 nir_intrinsic_instr
*ballot
=
214 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_ballot
);
215 nir_ssa_dest_init(&ballot
->instr
, &ballot
->dest
,
216 1, options
->ballot_bit_size
, NULL
);
217 ballot
->num_components
= 1;
218 ballot
->src
[0] = nir_src_for_ssa(nir_inot(b
, all_eq
));
219 nir_builder_instr_insert(b
, &ballot
->instr
);
221 return nir_ieq(b
, &ballot
->dest
.ssa
,
222 nir_imm_intN_t(b
, 0, options
->ballot_bit_size
));
226 lower_shuffle(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
227 bool lower_to_scalar
, bool lower_to_32bit
)
229 nir_ssa_def
*index
= nir_load_subgroup_invocation(b
);
230 switch (intrin
->intrinsic
) {
231 case nir_intrinsic_shuffle_xor
:
232 assert(intrin
->src
[1].is_ssa
);
233 index
= nir_ixor(b
, index
, intrin
->src
[1].ssa
);
235 case nir_intrinsic_shuffle_up
:
236 assert(intrin
->src
[1].is_ssa
);
237 index
= nir_isub(b
, index
, intrin
->src
[1].ssa
);
239 case nir_intrinsic_shuffle_down
:
240 assert(intrin
->src
[1].is_ssa
);
241 index
= nir_iadd(b
, index
, intrin
->src
[1].ssa
);
243 case nir_intrinsic_quad_broadcast
:
244 assert(intrin
->src
[1].is_ssa
);
245 index
= nir_ior(b
, nir_iand(b
, index
, nir_imm_int(b
, ~0x3)),
248 case nir_intrinsic_quad_swap_horizontal
:
249 /* For Quad operations, subgroups are divided into quads where
250 * (invocation % 4) is the index to a square arranged as follows:
258 index
= nir_ixor(b
, index
, nir_imm_int(b
, 0x1));
260 case nir_intrinsic_quad_swap_vertical
:
261 index
= nir_ixor(b
, index
, nir_imm_int(b
, 0x2));
263 case nir_intrinsic_quad_swap_diagonal
:
264 index
= nir_ixor(b
, index
, nir_imm_int(b
, 0x3));
267 unreachable("Invalid intrinsic");
270 nir_intrinsic_instr
*shuffle
=
271 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_shuffle
);
272 shuffle
->num_components
= intrin
->num_components
;
273 nir_src_copy(&shuffle
->src
[0], &intrin
->src
[0], shuffle
);
274 shuffle
->src
[1] = nir_src_for_ssa(index
);
275 nir_ssa_dest_init(&shuffle
->instr
, &shuffle
->dest
,
276 intrin
->dest
.ssa
.num_components
,
277 intrin
->dest
.ssa
.bit_size
, NULL
);
279 if (lower_to_scalar
&& shuffle
->num_components
> 1) {
280 return lower_subgroup_op_to_scalar(b
, shuffle
, lower_to_32bit
);
281 } else if (lower_to_32bit
&& shuffle
->src
[0].ssa
->bit_size
== 64) {
282 return lower_subgroup_op_to_32bit(b
, shuffle
);
284 nir_builder_instr_insert(b
, &shuffle
->instr
);
285 return &shuffle
->dest
.ssa
;
290 lower_subgroups_filter(const nir_instr
*instr
, const void *_options
)
292 return instr
->type
== nir_instr_type_intrinsic
;
296 lower_subgroups_instr(nir_builder
*b
, nir_instr
*instr
, void *_options
)
298 const nir_lower_subgroups_options
*options
= _options
;
300 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
301 switch (intrin
->intrinsic
) {
302 case nir_intrinsic_vote_any
:
303 case nir_intrinsic_vote_all
:
304 if (options
->lower_vote_trivial
)
305 return nir_ssa_for_src(b
, intrin
->src
[0], 1);
308 case nir_intrinsic_vote_feq
:
309 case nir_intrinsic_vote_ieq
:
310 if (options
->lower_vote_trivial
)
311 return nir_imm_true(b
);
313 if (options
->lower_vote_eq_to_ballot
)
314 return lower_vote_eq_to_ballot(b
, intrin
, options
);
316 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
317 return lower_vote_eq_to_scalar(b
, intrin
);
320 case nir_intrinsic_load_subgroup_size
:
321 if (options
->subgroup_size
)
322 return nir_imm_int(b
, options
->subgroup_size
);
325 case nir_intrinsic_read_invocation
:
326 case nir_intrinsic_read_first_invocation
:
327 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
328 return lower_subgroup_op_to_scalar(b
, intrin
, false);
331 case nir_intrinsic_load_subgroup_eq_mask
:
332 case nir_intrinsic_load_subgroup_ge_mask
:
333 case nir_intrinsic_load_subgroup_gt_mask
:
334 case nir_intrinsic_load_subgroup_le_mask
:
335 case nir_intrinsic_load_subgroup_lt_mask
: {
336 if (!options
->lower_subgroup_masks
)
339 /* If either the result or the requested bit size is 64-bits then we
340 * know that we have 64-bit types and using them will probably be more
341 * efficient than messing around with 32-bit shifts and packing.
343 const unsigned bit_size
= MAX2(options
->ballot_bit_size
,
344 intrin
->dest
.ssa
.bit_size
);
346 assert(options
->subgroup_size
<= 64);
347 uint64_t group_mask
= ~0ull >> (64 - options
->subgroup_size
);
349 nir_ssa_def
*count
= nir_load_subgroup_invocation(b
);
351 switch (intrin
->intrinsic
) {
352 case nir_intrinsic_load_subgroup_eq_mask
:
353 val
= nir_ishl(b
, nir_imm_intN_t(b
, 1ull, bit_size
), count
);
355 case nir_intrinsic_load_subgroup_ge_mask
:
356 val
= nir_iand(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~0ull, bit_size
), count
),
357 nir_imm_intN_t(b
, group_mask
, bit_size
));
359 case nir_intrinsic_load_subgroup_gt_mask
:
360 val
= nir_iand(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~1ull, bit_size
), count
),
361 nir_imm_intN_t(b
, group_mask
, bit_size
));
363 case nir_intrinsic_load_subgroup_le_mask
:
364 val
= nir_inot(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~1ull, bit_size
), count
));
366 case nir_intrinsic_load_subgroup_lt_mask
:
367 val
= nir_inot(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~0ull, bit_size
), count
));
370 unreachable("you seriously can't tell this is unreachable?");
373 return uint_to_ballot_type(b
, val
,
374 intrin
->dest
.ssa
.num_components
,
375 intrin
->dest
.ssa
.bit_size
);
378 case nir_intrinsic_ballot
: {
379 if (intrin
->dest
.ssa
.num_components
== 1 &&
380 intrin
->dest
.ssa
.bit_size
== options
->ballot_bit_size
)
383 nir_intrinsic_instr
*ballot
=
384 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_ballot
);
385 ballot
->num_components
= 1;
386 nir_ssa_dest_init(&ballot
->instr
, &ballot
->dest
,
387 1, options
->ballot_bit_size
, NULL
);
388 nir_src_copy(&ballot
->src
[0], &intrin
->src
[0], ballot
);
389 nir_builder_instr_insert(b
, &ballot
->instr
);
391 return uint_to_ballot_type(b
, &ballot
->dest
.ssa
,
392 intrin
->dest
.ssa
.num_components
,
393 intrin
->dest
.ssa
.bit_size
);
396 case nir_intrinsic_ballot_bitfield_extract
:
397 case nir_intrinsic_ballot_bit_count_reduce
:
398 case nir_intrinsic_ballot_find_lsb
:
399 case nir_intrinsic_ballot_find_msb
: {
400 assert(intrin
->src
[0].is_ssa
);
401 nir_ssa_def
*int_val
= ballot_type_to_uint(b
, intrin
->src
[0].ssa
,
402 options
->ballot_bit_size
);
403 switch (intrin
->intrinsic
) {
404 case nir_intrinsic_ballot_bitfield_extract
:
405 assert(intrin
->src
[1].is_ssa
);
406 return nir_i2b(b
, nir_iand(b
, nir_ushr(b
, int_val
,
408 nir_imm_intN_t(b
, 1, options
->ballot_bit_size
)));
409 case nir_intrinsic_ballot_bit_count_reduce
:
410 return nir_bit_count(b
, int_val
);
411 case nir_intrinsic_ballot_find_lsb
:
412 return nir_find_lsb(b
, int_val
);
413 case nir_intrinsic_ballot_find_msb
:
414 return nir_ufind_msb(b
, int_val
);
416 unreachable("you seriously can't tell this is unreachable?");
420 case nir_intrinsic_ballot_bit_count_exclusive
:
421 case nir_intrinsic_ballot_bit_count_inclusive
: {
422 nir_ssa_def
*count
= nir_load_subgroup_invocation(b
);
423 nir_ssa_def
*mask
= nir_imm_intN_t(b
, ~0ull, options
->ballot_bit_size
);
424 if (intrin
->intrinsic
== nir_intrinsic_ballot_bit_count_inclusive
) {
425 const unsigned bits
= options
->ballot_bit_size
;
426 mask
= nir_ushr(b
, mask
, nir_isub(b
, nir_imm_int(b
, bits
- 1), count
));
428 mask
= nir_inot(b
, nir_ishl(b
, mask
, count
));
431 assert(intrin
->src
[0].is_ssa
);
432 nir_ssa_def
*int_val
= ballot_type_to_uint(b
, intrin
->src
[0].ssa
,
433 options
->ballot_bit_size
);
435 return nir_bit_count(b
, nir_iand(b
, int_val
, mask
));
438 case nir_intrinsic_elect
: {
439 nir_intrinsic_instr
*first
=
440 nir_intrinsic_instr_create(b
->shader
,
441 nir_intrinsic_first_invocation
);
442 nir_ssa_dest_init(&first
->instr
, &first
->dest
, 1, 32, NULL
);
443 nir_builder_instr_insert(b
, &first
->instr
);
445 return nir_ieq(b
, nir_load_subgroup_invocation(b
), &first
->dest
.ssa
);
448 case nir_intrinsic_shuffle
:
449 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
450 return lower_subgroup_op_to_scalar(b
, intrin
, options
->lower_shuffle_to_32bit
);
451 else if (options
->lower_shuffle_to_32bit
&& intrin
->src
[0].ssa
->bit_size
== 64)
452 return lower_subgroup_op_to_32bit(b
, intrin
);
455 case nir_intrinsic_shuffle_xor
:
456 case nir_intrinsic_shuffle_up
:
457 case nir_intrinsic_shuffle_down
:
458 if (options
->lower_shuffle
)
459 return lower_shuffle(b
, intrin
, options
->lower_to_scalar
, options
->lower_shuffle_to_32bit
);
460 else if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
461 return lower_subgroup_op_to_scalar(b
, intrin
, options
->lower_shuffle_to_32bit
);
462 else if (options
->lower_shuffle_to_32bit
&& intrin
->src
[0].ssa
->bit_size
== 64)
463 return lower_subgroup_op_to_32bit(b
, intrin
);
466 case nir_intrinsic_quad_broadcast
:
467 case nir_intrinsic_quad_swap_horizontal
:
468 case nir_intrinsic_quad_swap_vertical
:
469 case nir_intrinsic_quad_swap_diagonal
:
470 if (options
->lower_quad
)
471 return lower_shuffle(b
, intrin
, options
->lower_to_scalar
, false);
472 else if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
473 return lower_subgroup_op_to_scalar(b
, intrin
, false);
476 case nir_intrinsic_reduce
:
477 case nir_intrinsic_inclusive_scan
:
478 case nir_intrinsic_exclusive_scan
:
479 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
480 return lower_subgroup_op_to_scalar(b
, intrin
, false);
491 nir_lower_subgroups(nir_shader
*shader
,
492 const nir_lower_subgroups_options
*options
)
494 return nir_shader_lower_instructions(shader
,
495 lower_subgroups_filter
,
496 lower_subgroups_instr
,