nir/loop_analyze: Use new eval_const_* helpers in test_iterations
[mesa.git] / src / compiler / spirv / vtn_subgroup.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vtn_private.h"
25
26 static void
27 vtn_build_subgroup_instr(struct vtn_builder *b,
28 nir_intrinsic_op nir_op,
29 struct vtn_ssa_value *dst,
30 struct vtn_ssa_value *src0,
31 nir_ssa_def *index,
32 unsigned const_idx0,
33 unsigned const_idx1)
34 {
35 /* Some of the subgroup operations take an index. SPIR-V allows this to be
36 * any integer type. To make things simpler for drivers, we only support
37 * 32-bit indices.
38 */
39 if (index && index->bit_size != 32)
40 index = nir_u2u32(&b->nb, index);
41
42 vtn_assert(dst->type == src0->type);
43 if (!glsl_type_is_vector_or_scalar(dst->type)) {
44 for (unsigned i = 0; i < glsl_get_length(dst->type); i++) {
45 vtn_build_subgroup_instr(b, nir_op, dst->elems[i],
46 src0->elems[i], index,
47 const_idx0, const_idx1);
48 }
49 return;
50 }
51
52 nir_intrinsic_instr *intrin =
53 nir_intrinsic_instr_create(b->nb.shader, nir_op);
54 nir_ssa_dest_init_for_type(&intrin->instr, &intrin->dest,
55 dst->type, NULL);
56 intrin->num_components = intrin->dest.ssa.num_components;
57
58 intrin->src[0] = nir_src_for_ssa(src0->def);
59 if (index)
60 intrin->src[1] = nir_src_for_ssa(index);
61
62 intrin->const_index[0] = const_idx0;
63 intrin->const_index[1] = const_idx1;
64
65 nir_builder_instr_insert(&b->nb, &intrin->instr);
66
67 dst->def = &intrin->dest.ssa;
68 }
69
70 void
71 vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
72 const uint32_t *w, unsigned count)
73 {
74 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
75
76 val->ssa = vtn_create_ssa_value(b, val->type->type);
77
78 switch (opcode) {
79 case SpvOpGroupNonUniformElect: {
80 vtn_fail_if(val->type->type != glsl_bool_type(),
81 "OpGroupNonUniformElect must return a Bool");
82 nir_intrinsic_instr *elect =
83 nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_elect);
84 nir_ssa_dest_init_for_type(&elect->instr, &elect->dest,
85 val->type->type, NULL);
86 nir_builder_instr_insert(&b->nb, &elect->instr);
87 val->ssa->def = &elect->dest.ssa;
88 break;
89 }
90
91 case SpvOpGroupNonUniformBallot: ++w;
92 case SpvOpSubgroupBallotKHR: {
93 vtn_fail_if(val->type->type != glsl_vector_type(GLSL_TYPE_UINT, 4),
94 "OpGroupNonUniformBallot must return a uvec4");
95 nir_intrinsic_instr *ballot =
96 nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_ballot);
97 ballot->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
98 nir_ssa_dest_init(&ballot->instr, &ballot->dest, 4, 32, NULL);
99 ballot->num_components = 4;
100 nir_builder_instr_insert(&b->nb, &ballot->instr);
101 val->ssa->def = &ballot->dest.ssa;
102 break;
103 }
104
105 case SpvOpGroupNonUniformInverseBallot: {
106 /* This one is just a BallotBitfieldExtract with subgroup invocation.
107 * We could add a NIR intrinsic but it's easier to just lower it on the
108 * spot.
109 */
110 nir_intrinsic_instr *intrin =
111 nir_intrinsic_instr_create(b->nb.shader,
112 nir_intrinsic_ballot_bitfield_extract);
113
114 intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
115 intrin->src[1] = nir_src_for_ssa(nir_load_subgroup_invocation(&b->nb));
116
117 nir_ssa_dest_init_for_type(&intrin->instr, &intrin->dest,
118 val->type->type, NULL);
119 nir_builder_instr_insert(&b->nb, &intrin->instr);
120
121 val->ssa->def = &intrin->dest.ssa;
122 break;
123 }
124
125 case SpvOpGroupNonUniformBallotBitExtract:
126 case SpvOpGroupNonUniformBallotBitCount:
127 case SpvOpGroupNonUniformBallotFindLSB:
128 case SpvOpGroupNonUniformBallotFindMSB: {
129 nir_ssa_def *src0, *src1 = NULL;
130 nir_intrinsic_op op;
131 switch (opcode) {
132 case SpvOpGroupNonUniformBallotBitExtract:
133 op = nir_intrinsic_ballot_bitfield_extract;
134 src0 = vtn_ssa_value(b, w[4])->def;
135 src1 = vtn_ssa_value(b, w[5])->def;
136 break;
137 case SpvOpGroupNonUniformBallotBitCount:
138 switch ((SpvGroupOperation)w[4]) {
139 case SpvGroupOperationReduce:
140 op = nir_intrinsic_ballot_bit_count_reduce;
141 break;
142 case SpvGroupOperationInclusiveScan:
143 op = nir_intrinsic_ballot_bit_count_inclusive;
144 break;
145 case SpvGroupOperationExclusiveScan:
146 op = nir_intrinsic_ballot_bit_count_exclusive;
147 break;
148 default:
149 unreachable("Invalid group operation");
150 }
151 src0 = vtn_ssa_value(b, w[5])->def;
152 break;
153 case SpvOpGroupNonUniformBallotFindLSB:
154 op = nir_intrinsic_ballot_find_lsb;
155 src0 = vtn_ssa_value(b, w[4])->def;
156 break;
157 case SpvOpGroupNonUniformBallotFindMSB:
158 op = nir_intrinsic_ballot_find_msb;
159 src0 = vtn_ssa_value(b, w[4])->def;
160 break;
161 default:
162 unreachable("Unhandled opcode");
163 }
164
165 nir_intrinsic_instr *intrin =
166 nir_intrinsic_instr_create(b->nb.shader, op);
167
168 intrin->src[0] = nir_src_for_ssa(src0);
169 if (src1)
170 intrin->src[1] = nir_src_for_ssa(src1);
171
172 nir_ssa_dest_init_for_type(&intrin->instr, &intrin->dest,
173 val->type->type, NULL);
174 nir_builder_instr_insert(&b->nb, &intrin->instr);
175
176 val->ssa->def = &intrin->dest.ssa;
177 break;
178 }
179
180 case SpvOpGroupNonUniformBroadcastFirst: ++w;
181 case SpvOpSubgroupFirstInvocationKHR:
182 vtn_build_subgroup_instr(b, nir_intrinsic_read_first_invocation,
183 val->ssa, vtn_ssa_value(b, w[3]), NULL, 0, 0);
184 break;
185
186 case SpvOpGroupNonUniformBroadcast:
187 case SpvOpGroupBroadcast: ++w;
188 case SpvOpSubgroupReadInvocationKHR:
189 vtn_build_subgroup_instr(b, nir_intrinsic_read_invocation,
190 val->ssa, vtn_ssa_value(b, w[3]),
191 vtn_ssa_value(b, w[4])->def, 0, 0);
192 break;
193
194 case SpvOpGroupNonUniformAll:
195 case SpvOpGroupNonUniformAny:
196 case SpvOpGroupNonUniformAllEqual:
197 case SpvOpGroupAll:
198 case SpvOpGroupAny:
199 case SpvOpSubgroupAllKHR:
200 case SpvOpSubgroupAnyKHR:
201 case SpvOpSubgroupAllEqualKHR: {
202 vtn_fail_if(val->type->type != glsl_bool_type(),
203 "OpGroupNonUniform(All|Any|AllEqual) must return a bool");
204 nir_intrinsic_op op;
205 switch (opcode) {
206 case SpvOpGroupNonUniformAll:
207 case SpvOpGroupAll:
208 case SpvOpSubgroupAllKHR:
209 op = nir_intrinsic_vote_all;
210 break;
211 case SpvOpGroupNonUniformAny:
212 case SpvOpGroupAny:
213 case SpvOpSubgroupAnyKHR:
214 op = nir_intrinsic_vote_any;
215 break;
216 case SpvOpGroupNonUniformAllEqual:
217 case SpvOpSubgroupAllEqualKHR: {
218 switch (glsl_get_base_type(val->type->type)) {
219 case GLSL_TYPE_FLOAT:
220 case GLSL_TYPE_DOUBLE:
221 op = nir_intrinsic_vote_feq;
222 break;
223 case GLSL_TYPE_UINT:
224 case GLSL_TYPE_INT:
225 case GLSL_TYPE_UINT64:
226 case GLSL_TYPE_INT64:
227 case GLSL_TYPE_BOOL:
228 op = nir_intrinsic_vote_ieq;
229 break;
230 default:
231 unreachable("Unhandled type");
232 }
233 break;
234 }
235 default:
236 unreachable("Unhandled opcode");
237 }
238
239 nir_ssa_def *src0;
240 if (opcode == SpvOpGroupNonUniformAll || opcode == SpvOpGroupAll ||
241 opcode == SpvOpGroupNonUniformAny || opcode == SpvOpGroupAny ||
242 opcode == SpvOpGroupNonUniformAllEqual) {
243 src0 = vtn_ssa_value(b, w[4])->def;
244 } else {
245 src0 = vtn_ssa_value(b, w[3])->def;
246 }
247 nir_intrinsic_instr *intrin =
248 nir_intrinsic_instr_create(b->nb.shader, op);
249 intrin->num_components = src0->num_components;
250 intrin->src[0] = nir_src_for_ssa(src0);
251 nir_ssa_dest_init_for_type(&intrin->instr, &intrin->dest,
252 val->type->type, NULL);
253 nir_builder_instr_insert(&b->nb, &intrin->instr);
254
255 val->ssa->def = &intrin->dest.ssa;
256 break;
257 }
258
259 case SpvOpGroupNonUniformShuffle:
260 case SpvOpGroupNonUniformShuffleXor:
261 case SpvOpGroupNonUniformShuffleUp:
262 case SpvOpGroupNonUniformShuffleDown: {
263 nir_intrinsic_op op;
264 switch (opcode) {
265 case SpvOpGroupNonUniformShuffle:
266 op = nir_intrinsic_shuffle;
267 break;
268 case SpvOpGroupNonUniformShuffleXor:
269 op = nir_intrinsic_shuffle_xor;
270 break;
271 case SpvOpGroupNonUniformShuffleUp:
272 op = nir_intrinsic_shuffle_up;
273 break;
274 case SpvOpGroupNonUniformShuffleDown:
275 op = nir_intrinsic_shuffle_down;
276 break;
277 default:
278 unreachable("Invalid opcode");
279 }
280 vtn_build_subgroup_instr(b, op, val->ssa, vtn_ssa_value(b, w[4]),
281 vtn_ssa_value(b, w[5])->def, 0, 0);
282 break;
283 }
284
285 case SpvOpGroupNonUniformQuadBroadcast:
286 vtn_build_subgroup_instr(b, nir_intrinsic_quad_broadcast,
287 val->ssa, vtn_ssa_value(b, w[4]),
288 vtn_ssa_value(b, w[5])->def, 0, 0);
289 break;
290
291 case SpvOpGroupNonUniformQuadSwap: {
292 unsigned direction = vtn_constant_uint(b, w[5]);
293 nir_intrinsic_op op;
294 switch (direction) {
295 case 0:
296 op = nir_intrinsic_quad_swap_horizontal;
297 break;
298 case 1:
299 op = nir_intrinsic_quad_swap_vertical;
300 break;
301 case 2:
302 op = nir_intrinsic_quad_swap_diagonal;
303 break;
304 default:
305 vtn_fail("Invalid constant value in OpGroupNonUniformQuadSwap");
306 }
307 vtn_build_subgroup_instr(b, op, val->ssa, vtn_ssa_value(b, w[4]),
308 NULL, 0, 0);
309 break;
310 }
311
312 case SpvOpGroupNonUniformIAdd:
313 case SpvOpGroupNonUniformFAdd:
314 case SpvOpGroupNonUniformIMul:
315 case SpvOpGroupNonUniformFMul:
316 case SpvOpGroupNonUniformSMin:
317 case SpvOpGroupNonUniformUMin:
318 case SpvOpGroupNonUniformFMin:
319 case SpvOpGroupNonUniformSMax:
320 case SpvOpGroupNonUniformUMax:
321 case SpvOpGroupNonUniformFMax:
322 case SpvOpGroupNonUniformBitwiseAnd:
323 case SpvOpGroupNonUniformBitwiseOr:
324 case SpvOpGroupNonUniformBitwiseXor:
325 case SpvOpGroupNonUniformLogicalAnd:
326 case SpvOpGroupNonUniformLogicalOr:
327 case SpvOpGroupNonUniformLogicalXor:
328 case SpvOpGroupIAdd:
329 case SpvOpGroupFAdd:
330 case SpvOpGroupFMin:
331 case SpvOpGroupUMin:
332 case SpvOpGroupSMin:
333 case SpvOpGroupFMax:
334 case SpvOpGroupUMax:
335 case SpvOpGroupSMax:
336 case SpvOpGroupIAddNonUniformAMD:
337 case SpvOpGroupFAddNonUniformAMD:
338 case SpvOpGroupFMinNonUniformAMD:
339 case SpvOpGroupUMinNonUniformAMD:
340 case SpvOpGroupSMinNonUniformAMD:
341 case SpvOpGroupFMaxNonUniformAMD:
342 case SpvOpGroupUMaxNonUniformAMD:
343 case SpvOpGroupSMaxNonUniformAMD: {
344 nir_op reduction_op;
345 switch (opcode) {
346 case SpvOpGroupNonUniformIAdd:
347 case SpvOpGroupIAdd:
348 case SpvOpGroupIAddNonUniformAMD:
349 reduction_op = nir_op_iadd;
350 break;
351 case SpvOpGroupNonUniformFAdd:
352 case SpvOpGroupFAdd:
353 case SpvOpGroupFAddNonUniformAMD:
354 reduction_op = nir_op_fadd;
355 break;
356 case SpvOpGroupNonUniformIMul:
357 reduction_op = nir_op_imul;
358 break;
359 case SpvOpGroupNonUniformFMul:
360 reduction_op = nir_op_fmul;
361 break;
362 case SpvOpGroupNonUniformSMin:
363 case SpvOpGroupSMin:
364 case SpvOpGroupSMinNonUniformAMD:
365 reduction_op = nir_op_imin;
366 break;
367 case SpvOpGroupNonUniformUMin:
368 case SpvOpGroupUMin:
369 case SpvOpGroupUMinNonUniformAMD:
370 reduction_op = nir_op_umin;
371 break;
372 case SpvOpGroupNonUniformFMin:
373 case SpvOpGroupFMin:
374 case SpvOpGroupFMinNonUniformAMD:
375 reduction_op = nir_op_fmin;
376 break;
377 case SpvOpGroupNonUniformSMax:
378 case SpvOpGroupSMax:
379 case SpvOpGroupSMaxNonUniformAMD:
380 reduction_op = nir_op_imax;
381 break;
382 case SpvOpGroupNonUniformUMax:
383 case SpvOpGroupUMax:
384 case SpvOpGroupUMaxNonUniformAMD:
385 reduction_op = nir_op_umax;
386 break;
387 case SpvOpGroupNonUniformFMax:
388 case SpvOpGroupFMax:
389 case SpvOpGroupFMaxNonUniformAMD:
390 reduction_op = nir_op_fmax;
391 break;
392 case SpvOpGroupNonUniformBitwiseAnd:
393 case SpvOpGroupNonUniformLogicalAnd:
394 reduction_op = nir_op_iand;
395 break;
396 case SpvOpGroupNonUniformBitwiseOr:
397 case SpvOpGroupNonUniformLogicalOr:
398 reduction_op = nir_op_ior;
399 break;
400 case SpvOpGroupNonUniformBitwiseXor:
401 case SpvOpGroupNonUniformLogicalXor:
402 reduction_op = nir_op_ixor;
403 break;
404 default:
405 unreachable("Invalid reduction operation");
406 }
407
408 nir_intrinsic_op op;
409 unsigned cluster_size = 0;
410 switch ((SpvGroupOperation)w[4]) {
411 case SpvGroupOperationReduce:
412 op = nir_intrinsic_reduce;
413 break;
414 case SpvGroupOperationInclusiveScan:
415 op = nir_intrinsic_inclusive_scan;
416 break;
417 case SpvGroupOperationExclusiveScan:
418 op = nir_intrinsic_exclusive_scan;
419 break;
420 case SpvGroupOperationClusteredReduce:
421 op = nir_intrinsic_reduce;
422 assert(count == 7);
423 cluster_size = vtn_constant_uint(b, w[6]);
424 break;
425 default:
426 unreachable("Invalid group operation");
427 }
428
429 vtn_build_subgroup_instr(b, op, val->ssa, vtn_ssa_value(b, w[5]),
430 NULL, reduction_op, cluster_size);
431 break;
432 }
433
434 default:
435 unreachable("Invalid SPIR-V opcode");
436 }
437 }