2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
28 * \file nir_opt_intrinsics.c
31 static nir_intrinsic_instr
*
32 lower_subgroups_64bit_split_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
33 unsigned int component
)
37 comp
= nir_unpack_64_2x32_split_x(b
, intrin
->src
[0].ssa
);
39 comp
= nir_unpack_64_2x32_split_y(b
, intrin
->src
[0].ssa
);
41 nir_intrinsic_instr
*intr
= nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
42 nir_ssa_dest_init(&intr
->instr
, &intr
->dest
, 1, 32, NULL
);
43 intr
->const_index
[0] = intrin
->const_index
[0];
44 intr
->const_index
[1] = intrin
->const_index
[1];
45 intr
->src
[0] = nir_src_for_ssa(comp
);
46 if (nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
== 2)
47 nir_src_copy(&intr
->src
[1], &intrin
->src
[1], intr
);
49 intr
->num_components
= 1;
50 nir_builder_instr_insert(b
, &intr
->instr
);
55 lower_subgroup_op_to_32bit(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
57 assert(intrin
->src
[0].ssa
->bit_size
== 64);
58 nir_intrinsic_instr
*intr_x
= lower_subgroups_64bit_split_intrinsic(b
, intrin
, 0);
59 nir_intrinsic_instr
*intr_y
= lower_subgroups_64bit_split_intrinsic(b
, intrin
, 1);
60 return nir_pack_64_2x32_split(b
, &intr_x
->dest
.ssa
, &intr_y
->dest
.ssa
);
64 ballot_type_to_uint(nir_builder
*b
, nir_ssa_def
*value
, unsigned bit_size
)
66 /* We only use this on uvec4 types */
67 assert(value
->num_components
== 4 && value
->bit_size
== 32);
70 return nir_channel(b
, value
, 0);
72 assert(bit_size
== 64);
73 return nir_pack_64_2x32_split(b
, nir_channel(b
, value
, 0),
74 nir_channel(b
, value
, 1));
78 /* Converts a uint32_t or uint64_t value to uint64_t or uvec4 */
80 uint_to_ballot_type(nir_builder
*b
, nir_ssa_def
*value
,
81 unsigned num_components
, unsigned bit_size
)
83 assert(value
->num_components
== 1);
84 assert(value
->bit_size
== 32 || value
->bit_size
== 64);
86 nir_ssa_def
*zero
= nir_imm_int(b
, 0);
87 if (num_components
> 1) {
88 /* SPIR-V uses a uvec4 for ballot values */
89 assert(num_components
== 4);
90 assert(bit_size
== 32);
92 if (value
->bit_size
== 32) {
93 return nir_vec4(b
, value
, zero
, zero
, zero
);
95 assert(value
->bit_size
== 64);
96 return nir_vec4(b
, nir_unpack_64_2x32_split_x(b
, value
),
97 nir_unpack_64_2x32_split_y(b
, value
),
101 /* GLSL uses a uint64_t for ballot values */
102 assert(num_components
== 1);
103 assert(bit_size
== 64);
105 if (value
->bit_size
== 32) {
106 return nir_pack_64_2x32_split(b
, value
, zero
);
108 assert(value
->bit_size
== 64);
115 lower_subgroup_op_to_scalar(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
118 /* This is safe to call on scalar things but it would be silly */
119 assert(intrin
->dest
.ssa
.num_components
> 1);
121 nir_ssa_def
*value
= nir_ssa_for_src(b
, intrin
->src
[0],
122 intrin
->num_components
);
123 nir_ssa_def
*reads
[4];
125 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
126 nir_intrinsic_instr
*chan_intrin
=
127 nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
128 nir_ssa_dest_init(&chan_intrin
->instr
, &chan_intrin
->dest
,
129 1, intrin
->dest
.ssa
.bit_size
, NULL
);
130 chan_intrin
->num_components
= 1;
133 chan_intrin
->src
[0] = nir_src_for_ssa(nir_channel(b
, value
, i
));
135 if (nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
> 1) {
136 assert(nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
== 2);
137 nir_src_copy(&chan_intrin
->src
[1], &intrin
->src
[1], chan_intrin
);
140 chan_intrin
->const_index
[0] = intrin
->const_index
[0];
141 chan_intrin
->const_index
[1] = intrin
->const_index
[1];
143 if (lower_to_32bit
&& chan_intrin
->src
[0].ssa
->bit_size
== 64) {
144 reads
[i
] = lower_subgroup_op_to_32bit(b
, chan_intrin
);
146 nir_builder_instr_insert(b
, &chan_intrin
->instr
);
147 reads
[i
] = &chan_intrin
->dest
.ssa
;
151 return nir_vec(b
, reads
, intrin
->num_components
);
155 lower_vote_eq_to_scalar(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
157 assert(intrin
->src
[0].is_ssa
);
158 nir_ssa_def
*value
= intrin
->src
[0].ssa
;
160 nir_ssa_def
*result
= NULL
;
161 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
162 nir_intrinsic_instr
*chan_intrin
=
163 nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
164 nir_ssa_dest_init(&chan_intrin
->instr
, &chan_intrin
->dest
,
165 1, intrin
->dest
.ssa
.bit_size
, NULL
);
166 chan_intrin
->num_components
= 1;
167 chan_intrin
->src
[0] = nir_src_for_ssa(nir_channel(b
, value
, i
));
168 nir_builder_instr_insert(b
, &chan_intrin
->instr
);
171 result
= nir_iand(b
, result
, &chan_intrin
->dest
.ssa
);
173 result
= &chan_intrin
->dest
.ssa
;
181 lower_vote_eq_to_ballot(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
182 const nir_lower_subgroups_options
*options
)
184 assert(intrin
->src
[0].is_ssa
);
185 nir_ssa_def
*value
= intrin
->src
[0].ssa
;
187 /* We have to implicitly lower to scalar */
188 nir_ssa_def
*all_eq
= NULL
;
189 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
190 nir_intrinsic_instr
*rfi
=
191 nir_intrinsic_instr_create(b
->shader
,
192 nir_intrinsic_read_first_invocation
);
193 nir_ssa_dest_init(&rfi
->instr
, &rfi
->dest
,
194 1, value
->bit_size
, NULL
);
195 rfi
->num_components
= 1;
196 rfi
->src
[0] = nir_src_for_ssa(nir_channel(b
, value
, i
));
197 nir_builder_instr_insert(b
, &rfi
->instr
);
200 if (intrin
->intrinsic
== nir_intrinsic_vote_feq
) {
201 is_eq
= nir_feq(b
, &rfi
->dest
.ssa
, nir_channel(b
, value
, i
));
203 is_eq
= nir_ieq(b
, &rfi
->dest
.ssa
, nir_channel(b
, value
, i
));
206 if (all_eq
== NULL
) {
209 all_eq
= nir_iand(b
, all_eq
, is_eq
);
213 nir_intrinsic_instr
*ballot
=
214 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_ballot
);
215 nir_ssa_dest_init(&ballot
->instr
, &ballot
->dest
,
216 1, options
->ballot_bit_size
, NULL
);
217 ballot
->num_components
= 1;
218 ballot
->src
[0] = nir_src_for_ssa(nir_inot(b
, all_eq
));
219 nir_builder_instr_insert(b
, &ballot
->instr
);
221 return nir_ieq(b
, &ballot
->dest
.ssa
,
222 nir_imm_intN_t(b
, 0, options
->ballot_bit_size
));
226 lower_shuffle(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
227 bool lower_to_scalar
, bool lower_to_32bit
)
229 nir_ssa_def
*index
= nir_load_subgroup_invocation(b
);
230 switch (intrin
->intrinsic
) {
231 case nir_intrinsic_shuffle_xor
:
232 assert(intrin
->src
[1].is_ssa
);
233 index
= nir_ixor(b
, index
, intrin
->src
[1].ssa
);
235 case nir_intrinsic_shuffle_up
:
236 assert(intrin
->src
[1].is_ssa
);
237 index
= nir_isub(b
, index
, intrin
->src
[1].ssa
);
239 case nir_intrinsic_shuffle_down
:
240 assert(intrin
->src
[1].is_ssa
);
241 index
= nir_iadd(b
, index
, intrin
->src
[1].ssa
);
243 case nir_intrinsic_quad_broadcast
:
244 assert(intrin
->src
[1].is_ssa
);
245 index
= nir_ior(b
, nir_iand(b
, index
, nir_imm_int(b
, ~0x3)),
248 case nir_intrinsic_quad_swap_horizontal
:
249 /* For Quad operations, subgroups are divided into quads where
250 * (invocation % 4) is the index to a square arranged as follows:
258 index
= nir_ixor(b
, index
, nir_imm_int(b
, 0x1));
260 case nir_intrinsic_quad_swap_vertical
:
261 index
= nir_ixor(b
, index
, nir_imm_int(b
, 0x2));
263 case nir_intrinsic_quad_swap_diagonal
:
264 index
= nir_ixor(b
, index
, nir_imm_int(b
, 0x3));
267 unreachable("Invalid intrinsic");
270 nir_intrinsic_instr
*shuffle
=
271 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_shuffle
);
272 shuffle
->num_components
= intrin
->num_components
;
273 nir_src_copy(&shuffle
->src
[0], &intrin
->src
[0], shuffle
);
274 shuffle
->src
[1] = nir_src_for_ssa(index
);
275 nir_ssa_dest_init(&shuffle
->instr
, &shuffle
->dest
,
276 intrin
->dest
.ssa
.num_components
,
277 intrin
->dest
.ssa
.bit_size
, NULL
);
279 if (lower_to_scalar
&& shuffle
->num_components
> 1) {
280 return lower_subgroup_op_to_scalar(b
, shuffle
, lower_to_32bit
);
281 } else if (lower_to_32bit
&& shuffle
->src
[0].ssa
->bit_size
== 64) {
282 return lower_subgroup_op_to_32bit(b
, shuffle
);
284 nir_builder_instr_insert(b
, &shuffle
->instr
);
285 return &shuffle
->dest
.ssa
;
290 lower_subgroups_filter(const nir_instr
*instr
, const void *_options
)
292 return instr
->type
== nir_instr_type_intrinsic
;
296 build_subgroup_mask(nir_builder
*b
, unsigned bit_size
,
297 const nir_lower_subgroups_options
*options
)
299 return nir_ushr(b
, nir_imm_intN_t(b
, ~0ull, bit_size
),
300 nir_isub(b
, nir_imm_int(b
, bit_size
),
301 nir_load_subgroup_size(b
)));
305 lower_subgroups_instr(nir_builder
*b
, nir_instr
*instr
, void *_options
)
307 const nir_lower_subgroups_options
*options
= _options
;
309 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
310 switch (intrin
->intrinsic
) {
311 case nir_intrinsic_vote_any
:
312 case nir_intrinsic_vote_all
:
313 if (options
->lower_vote_trivial
)
314 return nir_ssa_for_src(b
, intrin
->src
[0], 1);
317 case nir_intrinsic_vote_feq
:
318 case nir_intrinsic_vote_ieq
:
319 if (options
->lower_vote_trivial
)
320 return nir_imm_true(b
);
322 if (options
->lower_vote_eq_to_ballot
)
323 return lower_vote_eq_to_ballot(b
, intrin
, options
);
325 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
326 return lower_vote_eq_to_scalar(b
, intrin
);
329 case nir_intrinsic_load_subgroup_size
:
330 if (options
->subgroup_size
)
331 return nir_imm_int(b
, options
->subgroup_size
);
334 case nir_intrinsic_read_invocation
:
335 case nir_intrinsic_read_first_invocation
:
336 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
337 return lower_subgroup_op_to_scalar(b
, intrin
, false);
340 case nir_intrinsic_load_subgroup_eq_mask
:
341 case nir_intrinsic_load_subgroup_ge_mask
:
342 case nir_intrinsic_load_subgroup_gt_mask
:
343 case nir_intrinsic_load_subgroup_le_mask
:
344 case nir_intrinsic_load_subgroup_lt_mask
: {
345 if (!options
->lower_subgroup_masks
)
348 /* If either the result or the requested bit size is 64-bits then we
349 * know that we have 64-bit types and using them will probably be more
350 * efficient than messing around with 32-bit shifts and packing.
352 const unsigned bit_size
= MAX2(options
->ballot_bit_size
,
353 intrin
->dest
.ssa
.bit_size
);
355 nir_ssa_def
*count
= nir_load_subgroup_invocation(b
);
357 switch (intrin
->intrinsic
) {
358 case nir_intrinsic_load_subgroup_eq_mask
:
359 val
= nir_ishl(b
, nir_imm_intN_t(b
, 1ull, bit_size
), count
);
361 case nir_intrinsic_load_subgroup_ge_mask
:
362 val
= nir_iand(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~0ull, bit_size
), count
),
363 build_subgroup_mask(b
, bit_size
, options
));
365 case nir_intrinsic_load_subgroup_gt_mask
:
366 val
= nir_iand(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~1ull, bit_size
), count
),
367 build_subgroup_mask(b
, bit_size
, options
));
369 case nir_intrinsic_load_subgroup_le_mask
:
370 val
= nir_inot(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~1ull, bit_size
), count
));
372 case nir_intrinsic_load_subgroup_lt_mask
:
373 val
= nir_inot(b
, nir_ishl(b
, nir_imm_intN_t(b
, ~0ull, bit_size
), count
));
376 unreachable("you seriously can't tell this is unreachable?");
379 return uint_to_ballot_type(b
, val
,
380 intrin
->dest
.ssa
.num_components
,
381 intrin
->dest
.ssa
.bit_size
);
384 case nir_intrinsic_ballot
: {
385 if (intrin
->dest
.ssa
.num_components
== 1 &&
386 intrin
->dest
.ssa
.bit_size
== options
->ballot_bit_size
)
389 nir_intrinsic_instr
*ballot
=
390 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_ballot
);
391 ballot
->num_components
= 1;
392 nir_ssa_dest_init(&ballot
->instr
, &ballot
->dest
,
393 1, options
->ballot_bit_size
, NULL
);
394 nir_src_copy(&ballot
->src
[0], &intrin
->src
[0], ballot
);
395 nir_builder_instr_insert(b
, &ballot
->instr
);
397 return uint_to_ballot_type(b
, &ballot
->dest
.ssa
,
398 intrin
->dest
.ssa
.num_components
,
399 intrin
->dest
.ssa
.bit_size
);
402 case nir_intrinsic_ballot_bitfield_extract
:
403 case nir_intrinsic_ballot_bit_count_reduce
:
404 case nir_intrinsic_ballot_find_lsb
:
405 case nir_intrinsic_ballot_find_msb
: {
406 assert(intrin
->src
[0].is_ssa
);
407 nir_ssa_def
*int_val
= ballot_type_to_uint(b
, intrin
->src
[0].ssa
,
408 options
->ballot_bit_size
);
409 switch (intrin
->intrinsic
) {
410 case nir_intrinsic_ballot_bitfield_extract
:
411 assert(intrin
->src
[1].is_ssa
);
412 return nir_i2b(b
, nir_iand(b
, nir_ushr(b
, int_val
,
414 nir_imm_intN_t(b
, 1, options
->ballot_bit_size
)));
415 case nir_intrinsic_ballot_bit_count_reduce
:
416 return nir_bit_count(b
, int_val
);
417 case nir_intrinsic_ballot_find_lsb
:
418 return nir_find_lsb(b
, int_val
);
419 case nir_intrinsic_ballot_find_msb
:
420 return nir_ufind_msb(b
, int_val
);
422 unreachable("you seriously can't tell this is unreachable?");
426 case nir_intrinsic_ballot_bit_count_exclusive
:
427 case nir_intrinsic_ballot_bit_count_inclusive
: {
428 nir_ssa_def
*count
= nir_load_subgroup_invocation(b
);
429 nir_ssa_def
*mask
= nir_imm_intN_t(b
, ~0ull, options
->ballot_bit_size
);
430 if (intrin
->intrinsic
== nir_intrinsic_ballot_bit_count_inclusive
) {
431 const unsigned bits
= options
->ballot_bit_size
;
432 mask
= nir_ushr(b
, mask
, nir_isub(b
, nir_imm_int(b
, bits
- 1), count
));
434 mask
= nir_inot(b
, nir_ishl(b
, mask
, count
));
437 assert(intrin
->src
[0].is_ssa
);
438 nir_ssa_def
*int_val
= ballot_type_to_uint(b
, intrin
->src
[0].ssa
,
439 options
->ballot_bit_size
);
441 return nir_bit_count(b
, nir_iand(b
, int_val
, mask
));
444 case nir_intrinsic_elect
: {
445 nir_intrinsic_instr
*first
=
446 nir_intrinsic_instr_create(b
->shader
,
447 nir_intrinsic_first_invocation
);
448 nir_ssa_dest_init(&first
->instr
, &first
->dest
, 1, 32, NULL
);
449 nir_builder_instr_insert(b
, &first
->instr
);
451 return nir_ieq(b
, nir_load_subgroup_invocation(b
), &first
->dest
.ssa
);
454 case nir_intrinsic_shuffle
:
455 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
456 return lower_subgroup_op_to_scalar(b
, intrin
, options
->lower_shuffle_to_32bit
);
457 else if (options
->lower_shuffle_to_32bit
&& intrin
->src
[0].ssa
->bit_size
== 64)
458 return lower_subgroup_op_to_32bit(b
, intrin
);
461 case nir_intrinsic_shuffle_xor
:
462 case nir_intrinsic_shuffle_up
:
463 case nir_intrinsic_shuffle_down
:
464 if (options
->lower_shuffle
)
465 return lower_shuffle(b
, intrin
, options
->lower_to_scalar
, options
->lower_shuffle_to_32bit
);
466 else if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
467 return lower_subgroup_op_to_scalar(b
, intrin
, options
->lower_shuffle_to_32bit
);
468 else if (options
->lower_shuffle_to_32bit
&& intrin
->src
[0].ssa
->bit_size
== 64)
469 return lower_subgroup_op_to_32bit(b
, intrin
);
472 case nir_intrinsic_quad_broadcast
:
473 case nir_intrinsic_quad_swap_horizontal
:
474 case nir_intrinsic_quad_swap_vertical
:
475 case nir_intrinsic_quad_swap_diagonal
:
476 if (options
->lower_quad
)
477 return lower_shuffle(b
, intrin
, options
->lower_to_scalar
, false);
478 else if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
479 return lower_subgroup_op_to_scalar(b
, intrin
, false);
482 case nir_intrinsic_reduce
: {
483 nir_ssa_def
*ret
= NULL
;
484 /* A cluster size greater than the subgroup size is implemention defined */
485 if (options
->subgroup_size
&&
486 nir_intrinsic_cluster_size(intrin
) >= options
->subgroup_size
) {
487 nir_intrinsic_set_cluster_size(intrin
, 0);
488 ret
= NIR_LOWER_INSTR_PROGRESS
;
490 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
491 ret
= lower_subgroup_op_to_scalar(b
, intrin
, false);
494 case nir_intrinsic_inclusive_scan
:
495 case nir_intrinsic_exclusive_scan
:
496 if (options
->lower_to_scalar
&& intrin
->num_components
> 1)
497 return lower_subgroup_op_to_scalar(b
, intrin
, false);
508 nir_lower_subgroups(nir_shader
*shader
,
509 const nir_lower_subgroups_options
*options
)
511 return nir_shader_lower_instructions(shader
,
512 lower_subgroups_filter
,
513 lower_subgroups_instr
,