8067c4194778d0cd96b0501ad48ddec3f9447f32
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
34
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
37
38 #include <stdio.h>
39
40 void
41 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
42 size_t spirv_offset, const char *message)
43 {
44 if (b->options->debug.func) {
45 b->options->debug.func(b->options->debug.private_data,
46 level, spirv_offset, message);
47 }
48
49 #ifndef NDEBUG
50 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
51 fprintf(stderr, "%s\n", message);
52 #endif
53 }
54
55 void
56 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
57 size_t spirv_offset, const char *fmt, ...)
58 {
59 va_list args;
60 char *msg;
61
62 va_start(args, fmt);
63 msg = ralloc_vasprintf(NULL, fmt, args);
64 va_end(args);
65
66 vtn_log(b, level, spirv_offset, msg);
67
68 ralloc_free(msg);
69 }
70
71 static void
72 vtn_log_err(struct vtn_builder *b,
73 enum nir_spirv_debug_level level, const char *prefix,
74 const char *file, unsigned line,
75 const char *fmt, va_list args)
76 {
77 char *msg;
78
79 msg = ralloc_strdup(NULL, prefix);
80
81 #ifndef NDEBUG
82 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
83 #endif
84
85 ralloc_asprintf_append(&msg, " ");
86
87 ralloc_vasprintf_append(&msg, fmt, args);
88
89 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
90 b->spirv_offset);
91
92 if (b->file) {
93 ralloc_asprintf_append(&msg,
94 "\n in SPIR-V source file %s, line %d, col %d",
95 b->file, b->line, b->col);
96 }
97
98 vtn_log(b, level, b->spirv_offset, msg);
99
100 ralloc_free(msg);
101 }
102
103 static void
104 vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
105 {
106 static int idx = 0;
107
108 char filename[1024];
109 int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
110 path, prefix, idx++);
111 if (len < 0 || len >= sizeof(filename))
112 return;
113
114 FILE *f = fopen(filename, "w");
115 if (f == NULL)
116 return;
117
118 fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
119 fclose(f);
120
121 vtn_info("SPIR-V shader dumped to %s", filename);
122 }
123
124 void
125 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
126 const char *fmt, ...)
127 {
128 va_list args;
129
130 va_start(args, fmt);
131 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
132 file, line, fmt, args);
133 va_end(args);
134 }
135
136 void
137 _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
138 const char *fmt, ...)
139 {
140 va_list args;
141
142 va_start(args, fmt);
143 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
144 file, line, fmt, args);
145 va_end(args);
146 }
147
148 void
149 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
150 const char *fmt, ...)
151 {
152 va_list args;
153
154 va_start(args, fmt);
155 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
156 file, line, fmt, args);
157 va_end(args);
158
159 const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
160 if (dump_path)
161 vtn_dump_shader(b, dump_path, "fail");
162
163 longjmp(b->fail_jump, 1);
164 }
165
166 static struct vtn_ssa_value *
167 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
168 {
169 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
170 val->type = glsl_get_bare_type(type);
171
172 if (glsl_type_is_vector_or_scalar(type)) {
173 unsigned num_components = glsl_get_vector_elements(val->type);
174 unsigned bit_size = glsl_get_bit_size(val->type);
175 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
176 } else {
177 unsigned elems = glsl_get_length(val->type);
178 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
179 if (glsl_type_is_array_or_matrix(type)) {
180 const struct glsl_type *elem_type = glsl_get_array_element(type);
181 for (unsigned i = 0; i < elems; i++)
182 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
183 } else {
184 vtn_assert(glsl_type_is_struct_or_ifc(type));
185 for (unsigned i = 0; i < elems; i++) {
186 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
187 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
188 }
189 }
190 }
191
192 return val;
193 }
194
195 static struct vtn_ssa_value *
196 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
197 const struct glsl_type *type)
198 {
199 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
200
201 if (entry)
202 return entry->data;
203
204 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
205 val->type = glsl_get_bare_type(type);
206
207 if (glsl_type_is_vector_or_scalar(type)) {
208 unsigned num_components = glsl_get_vector_elements(val->type);
209 unsigned bit_size = glsl_get_bit_size(type);
210 nir_load_const_instr *load =
211 nir_load_const_instr_create(b->shader, num_components, bit_size);
212
213 memcpy(load->value, constant->values,
214 sizeof(nir_const_value) * num_components);
215
216 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
217 val->def = &load->def;
218 } else {
219 unsigned elems = glsl_get_length(val->type);
220 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
221 if (glsl_type_is_array_or_matrix(type)) {
222 const struct glsl_type *elem_type = glsl_get_array_element(type);
223 for (unsigned i = 0; i < elems; i++) {
224 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
225 elem_type);
226 }
227 } else {
228 vtn_assert(glsl_type_is_struct_or_ifc(type));
229 for (unsigned i = 0; i < elems; i++) {
230 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
231 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
232 elem_type);
233 }
234 }
235 }
236
237 return val;
238 }
239
240 struct vtn_ssa_value *
241 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
242 {
243 struct vtn_value *val = vtn_untyped_value(b, value_id);
244 switch (val->value_type) {
245 case vtn_value_type_undef:
246 return vtn_undef_ssa_value(b, val->type->type);
247
248 case vtn_value_type_constant:
249 return vtn_const_ssa_value(b, val->constant, val->type->type);
250
251 case vtn_value_type_ssa:
252 return val->ssa;
253
254 case vtn_value_type_pointer:
255 vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
256 struct vtn_ssa_value *ssa =
257 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
258 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
259 return ssa;
260
261 default:
262 vtn_fail("Invalid type for an SSA value");
263 }
264 }
265
266 struct vtn_value *
267 vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
268 struct vtn_ssa_value *ssa)
269 {
270 struct vtn_type *type = vtn_get_value_type(b, value_id);
271
272 /* See vtn_create_ssa_value */
273 vtn_fail_if(ssa->type != glsl_get_bare_type(type->type),
274 "Type mismatch for SPIR-V SSA value");
275
276 struct vtn_value *val;
277 if (type->base_type == vtn_base_type_pointer) {
278 val = vtn_push_pointer(b, value_id, vtn_pointer_from_ssa(b, ssa->def, type));
279 } else {
280 /* Don't trip the value_type_ssa check in vtn_push_value */
281 val = vtn_push_value(b, value_id, vtn_value_type_invalid);
282 val->value_type = vtn_value_type_ssa;
283 val->ssa = ssa;
284 }
285
286 return val;
287 }
288
289 nir_ssa_def *
290 vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id)
291 {
292 struct vtn_ssa_value *ssa = vtn_ssa_value(b, value_id);
293 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa->type),
294 "Expected a vector or scalar type");
295 return ssa->def;
296 }
297
298 struct vtn_value *
299 vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def)
300 {
301 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
302 * type will be valid by the time we get here.
303 */
304 struct vtn_type *type = vtn_get_value_type(b, value_id);
305 vtn_fail_if(def->num_components != glsl_get_vector_elements(type->type) ||
306 def->bit_size != glsl_get_bit_size(type->type),
307 "Mismatch between NIR and SPIR-V type.");
308 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
309 ssa->def = def;
310 return vtn_push_ssa_value(b, value_id, ssa);
311 }
312
313 static nir_deref_instr *
314 vtn_get_image(struct vtn_builder *b, uint32_t value_id)
315 {
316 struct vtn_type *type = vtn_get_value_type(b, value_id);
317 vtn_assert(type->base_type == vtn_base_type_image);
318 return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
319 nir_var_uniform, type->glsl_image, 0);
320 }
321
322 static void
323 vtn_push_image(struct vtn_builder *b, uint32_t value_id,
324 nir_deref_instr *deref)
325 {
326 struct vtn_type *type = vtn_get_value_type(b, value_id);
327 vtn_assert(type->base_type == vtn_base_type_image);
328 vtn_push_nir_ssa(b, value_id, &deref->dest.ssa);
329 }
330
331 static nir_deref_instr *
332 vtn_get_sampler(struct vtn_builder *b, uint32_t value_id)
333 {
334 struct vtn_type *type = vtn_get_value_type(b, value_id);
335 vtn_assert(type->base_type == vtn_base_type_sampler);
336 return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
337 nir_var_uniform, glsl_bare_sampler_type(), 0);
338 }
339
340 nir_ssa_def *
341 vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
342 struct vtn_sampled_image si)
343 {
344 return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa);
345 }
346
347 static void
348 vtn_push_sampled_image(struct vtn_builder *b, uint32_t value_id,
349 struct vtn_sampled_image si)
350 {
351 struct vtn_type *type = vtn_get_value_type(b, value_id);
352 vtn_assert(type->base_type == vtn_base_type_sampled_image);
353 vtn_push_nir_ssa(b, value_id, vtn_sampled_image_to_nir_ssa(b, si));
354 }
355
356 static struct vtn_sampled_image
357 vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id)
358 {
359 struct vtn_type *type = vtn_get_value_type(b, value_id);
360 vtn_assert(type->base_type == vtn_base_type_sampled_image);
361 nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
362
363 struct vtn_sampled_image si = { NULL, };
364 si.image = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 0),
365 nir_var_uniform,
366 type->image->glsl_image, 0);
367 si.sampler = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 1),
368 nir_var_uniform,
369 glsl_bare_sampler_type(), 0);
370 return si;
371 }
372
373 static char *
374 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
375 unsigned word_count, unsigned *words_used)
376 {
377 char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
378 if (words_used) {
379 /* Ammount of space taken by the string (including the null) */
380 unsigned len = strlen(dup) + 1;
381 *words_used = DIV_ROUND_UP(len, sizeof(*words));
382 }
383 return dup;
384 }
385
386 const uint32_t *
387 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
388 const uint32_t *end, vtn_instruction_handler handler)
389 {
390 b->file = NULL;
391 b->line = -1;
392 b->col = -1;
393
394 const uint32_t *w = start;
395 while (w < end) {
396 SpvOp opcode = w[0] & SpvOpCodeMask;
397 unsigned count = w[0] >> SpvWordCountShift;
398 vtn_assert(count >= 1 && w + count <= end);
399
400 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
401
402 switch (opcode) {
403 case SpvOpNop:
404 break; /* Do nothing */
405
406 case SpvOpLine:
407 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
408 b->line = w[2];
409 b->col = w[3];
410 break;
411
412 case SpvOpNoLine:
413 b->file = NULL;
414 b->line = -1;
415 b->col = -1;
416 break;
417
418 default:
419 if (!handler(b, opcode, w, count))
420 return w;
421 break;
422 }
423
424 w += count;
425 }
426
427 b->spirv_offset = 0;
428 b->file = NULL;
429 b->line = -1;
430 b->col = -1;
431
432 assert(w == end);
433 return w;
434 }
435
436 static bool
437 vtn_handle_non_semantic_instruction(struct vtn_builder *b, SpvOp ext_opcode,
438 const uint32_t *w, unsigned count)
439 {
440 /* Do nothing. */
441 return true;
442 }
443
444 static void
445 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
446 const uint32_t *w, unsigned count)
447 {
448 const char *ext = (const char *)&w[2];
449 switch (opcode) {
450 case SpvOpExtInstImport: {
451 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
452 if (strcmp(ext, "GLSL.std.450") == 0) {
453 val->ext_handler = vtn_handle_glsl450_instruction;
454 } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
455 && (b->options && b->options->caps.amd_gcn_shader)) {
456 val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
457 } else if ((strcmp(ext, "SPV_AMD_shader_ballot") == 0)
458 && (b->options && b->options->caps.amd_shader_ballot)) {
459 val->ext_handler = vtn_handle_amd_shader_ballot_instruction;
460 } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
461 && (b->options && b->options->caps.amd_trinary_minmax)) {
462 val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
463 } else if ((strcmp(ext, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
464 && (b->options && b->options->caps.amd_shader_explicit_vertex_parameter)) {
465 val->ext_handler = vtn_handle_amd_shader_explicit_vertex_parameter_instruction;
466 } else if (strcmp(ext, "OpenCL.std") == 0) {
467 val->ext_handler = vtn_handle_opencl_instruction;
468 } else if (strstr(ext, "NonSemantic.") == ext) {
469 val->ext_handler = vtn_handle_non_semantic_instruction;
470 } else {
471 vtn_fail("Unsupported extension: %s", ext);
472 }
473 break;
474 }
475
476 case SpvOpExtInst: {
477 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
478 bool handled = val->ext_handler(b, w[4], w, count);
479 vtn_assert(handled);
480 break;
481 }
482
483 default:
484 vtn_fail_with_opcode("Unhandled opcode", opcode);
485 }
486 }
487
488 static void
489 _foreach_decoration_helper(struct vtn_builder *b,
490 struct vtn_value *base_value,
491 int parent_member,
492 struct vtn_value *value,
493 vtn_decoration_foreach_cb cb, void *data)
494 {
495 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
496 int member;
497 if (dec->scope == VTN_DEC_DECORATION) {
498 member = parent_member;
499 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
500 vtn_fail_if(value->value_type != vtn_value_type_type ||
501 value->type->base_type != vtn_base_type_struct,
502 "OpMemberDecorate and OpGroupMemberDecorate are only "
503 "allowed on OpTypeStruct");
504 /* This means we haven't recursed yet */
505 assert(value == base_value);
506
507 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
508
509 vtn_fail_if(member >= base_value->type->length,
510 "OpMemberDecorate specifies member %d but the "
511 "OpTypeStruct has only %u members",
512 member, base_value->type->length);
513 } else {
514 /* Not a decoration */
515 assert(dec->scope == VTN_DEC_EXECUTION_MODE);
516 continue;
517 }
518
519 if (dec->group) {
520 assert(dec->group->value_type == vtn_value_type_decoration_group);
521 _foreach_decoration_helper(b, base_value, member, dec->group,
522 cb, data);
523 } else {
524 cb(b, base_value, member, dec, data);
525 }
526 }
527 }
528
529 /** Iterates (recursively if needed) over all of the decorations on a value
530 *
531 * This function iterates over all of the decorations applied to a given
532 * value. If it encounters a decoration group, it recurses into the group
533 * and iterates over all of those decorations as well.
534 */
535 void
536 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
537 vtn_decoration_foreach_cb cb, void *data)
538 {
539 _foreach_decoration_helper(b, value, -1, value, cb, data);
540 }
541
542 void
543 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
544 vtn_execution_mode_foreach_cb cb, void *data)
545 {
546 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
547 if (dec->scope != VTN_DEC_EXECUTION_MODE)
548 continue;
549
550 assert(dec->group == NULL);
551 cb(b, value, dec, data);
552 }
553 }
554
555 void
556 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
557 const uint32_t *w, unsigned count)
558 {
559 const uint32_t *w_end = w + count;
560 const uint32_t target = w[1];
561 w += 2;
562
563 switch (opcode) {
564 case SpvOpDecorationGroup:
565 vtn_push_value(b, target, vtn_value_type_decoration_group);
566 break;
567
568 case SpvOpDecorate:
569 case SpvOpDecorateId:
570 case SpvOpMemberDecorate:
571 case SpvOpDecorateString:
572 case SpvOpMemberDecorateString:
573 case SpvOpExecutionMode:
574 case SpvOpExecutionModeId: {
575 struct vtn_value *val = vtn_untyped_value(b, target);
576
577 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
578 switch (opcode) {
579 case SpvOpDecorate:
580 case SpvOpDecorateId:
581 case SpvOpDecorateString:
582 dec->scope = VTN_DEC_DECORATION;
583 break;
584 case SpvOpMemberDecorate:
585 case SpvOpMemberDecorateString:
586 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
587 vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
588 "Member argument of OpMemberDecorate too large");
589 break;
590 case SpvOpExecutionMode:
591 case SpvOpExecutionModeId:
592 dec->scope = VTN_DEC_EXECUTION_MODE;
593 break;
594 default:
595 unreachable("Invalid decoration opcode");
596 }
597 dec->decoration = *(w++);
598 dec->operands = w;
599
600 /* Link into the list */
601 dec->next = val->decoration;
602 val->decoration = dec;
603 break;
604 }
605
606 case SpvOpGroupMemberDecorate:
607 case SpvOpGroupDecorate: {
608 struct vtn_value *group =
609 vtn_value(b, target, vtn_value_type_decoration_group);
610
611 for (; w < w_end; w++) {
612 struct vtn_value *val = vtn_untyped_value(b, *w);
613 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
614
615 dec->group = group;
616 if (opcode == SpvOpGroupDecorate) {
617 dec->scope = VTN_DEC_DECORATION;
618 } else {
619 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
620 vtn_fail_if(dec->scope < 0, /* Check for overflow */
621 "Member argument of OpGroupMemberDecorate too large");
622 }
623
624 /* Link into the list */
625 dec->next = val->decoration;
626 val->decoration = dec;
627 }
628 break;
629 }
630
631 default:
632 unreachable("Unhandled opcode");
633 }
634 }
635
636 struct member_decoration_ctx {
637 unsigned num_fields;
638 struct glsl_struct_field *fields;
639 struct vtn_type *type;
640 };
641
642 /**
643 * Returns true if the given type contains a struct decorated Block or
644 * BufferBlock
645 */
646 bool
647 vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type)
648 {
649 switch (type->base_type) {
650 case vtn_base_type_array:
651 return vtn_type_contains_block(b, type->array_element);
652 case vtn_base_type_struct:
653 if (type->block || type->buffer_block)
654 return true;
655 for (unsigned i = 0; i < type->length; i++) {
656 if (vtn_type_contains_block(b, type->members[i]))
657 return true;
658 }
659 return false;
660 default:
661 return false;
662 }
663 }
664
665 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
666 * OpStore, or OpCopyMemory between them without breaking anything.
667 * Technically, the SPIR-V rules require the exact same type ID but this lets
668 * us internally be a bit looser.
669 */
670 bool
671 vtn_types_compatible(struct vtn_builder *b,
672 struct vtn_type *t1, struct vtn_type *t2)
673 {
674 if (t1->id == t2->id)
675 return true;
676
677 if (t1->base_type != t2->base_type)
678 return false;
679
680 switch (t1->base_type) {
681 case vtn_base_type_void:
682 case vtn_base_type_scalar:
683 case vtn_base_type_vector:
684 case vtn_base_type_matrix:
685 case vtn_base_type_image:
686 case vtn_base_type_sampler:
687 case vtn_base_type_sampled_image:
688 return t1->type == t2->type;
689
690 case vtn_base_type_array:
691 return t1->length == t2->length &&
692 vtn_types_compatible(b, t1->array_element, t2->array_element);
693
694 case vtn_base_type_pointer:
695 return vtn_types_compatible(b, t1->deref, t2->deref);
696
697 case vtn_base_type_struct:
698 if (t1->length != t2->length)
699 return false;
700
701 for (unsigned i = 0; i < t1->length; i++) {
702 if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
703 return false;
704 }
705 return true;
706
707 case vtn_base_type_function:
708 /* This case shouldn't get hit since you can't copy around function
709 * types. Just require them to be identical.
710 */
711 return false;
712 }
713
714 vtn_fail("Invalid base type");
715 }
716
717 struct vtn_type *
718 vtn_type_without_array(struct vtn_type *type)
719 {
720 while (type->base_type == vtn_base_type_array)
721 type = type->array_element;
722 return type;
723 }
724
725 /* does a shallow copy of a vtn_type */
726
727 static struct vtn_type *
728 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
729 {
730 struct vtn_type *dest = ralloc(b, struct vtn_type);
731 *dest = *src;
732
733 switch (src->base_type) {
734 case vtn_base_type_void:
735 case vtn_base_type_scalar:
736 case vtn_base_type_vector:
737 case vtn_base_type_matrix:
738 case vtn_base_type_array:
739 case vtn_base_type_pointer:
740 case vtn_base_type_image:
741 case vtn_base_type_sampler:
742 case vtn_base_type_sampled_image:
743 /* Nothing more to do */
744 break;
745
746 case vtn_base_type_struct:
747 dest->members = ralloc_array(b, struct vtn_type *, src->length);
748 memcpy(dest->members, src->members,
749 src->length * sizeof(src->members[0]));
750
751 dest->offsets = ralloc_array(b, unsigned, src->length);
752 memcpy(dest->offsets, src->offsets,
753 src->length * sizeof(src->offsets[0]));
754 break;
755
756 case vtn_base_type_function:
757 dest->params = ralloc_array(b, struct vtn_type *, src->length);
758 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
759 break;
760 }
761
762 return dest;
763 }
764
765 static const struct glsl_type *
766 wrap_type_in_array(const struct glsl_type *type,
767 const struct glsl_type *array_type)
768 {
769 if (!glsl_type_is_array(array_type))
770 return type;
771
772 const struct glsl_type *elem_type =
773 wrap_type_in_array(type, glsl_get_array_element(array_type));
774 return glsl_array_type(elem_type, glsl_get_length(array_type),
775 glsl_get_explicit_stride(array_type));
776 }
777
778 static bool
779 vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
780 {
781 /* For OpenCL we never want to strip the info from the types, and it makes
782 * type comparisons easier in later stages.
783 */
784 if (b->options->environment == NIR_SPIRV_OPENCL)
785 return true;
786
787 switch (mode) {
788 case vtn_variable_mode_input:
789 case vtn_variable_mode_output:
790 /* Layout decorations kept because we need offsets for XFB arrays of
791 * blocks.
792 */
793 return b->shader->info.has_transform_feedback_varyings;
794
795 case vtn_variable_mode_ssbo:
796 case vtn_variable_mode_phys_ssbo:
797 case vtn_variable_mode_ubo:
798 return true;
799
800 default:
801 return false;
802 }
803 }
804
805 const struct glsl_type *
806 vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
807 enum vtn_variable_mode mode)
808 {
809 if (mode == vtn_variable_mode_atomic_counter) {
810 vtn_fail_if(glsl_without_array(type->type) != glsl_uint_type(),
811 "Variables in the AtomicCounter storage class should be "
812 "(possibly arrays of arrays of) uint.");
813 return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
814 }
815
816 if (mode == vtn_variable_mode_uniform) {
817 switch (type->base_type) {
818 case vtn_base_type_array: {
819 const struct glsl_type *elem_type =
820 vtn_type_get_nir_type(b, type->array_element, mode);
821
822 return glsl_array_type(elem_type, type->length,
823 glsl_get_explicit_stride(type->type));
824 }
825
826 case vtn_base_type_struct: {
827 bool need_new_struct = false;
828 const uint32_t num_fields = type->length;
829 NIR_VLA(struct glsl_struct_field, fields, num_fields);
830 for (unsigned i = 0; i < num_fields; i++) {
831 fields[i] = *glsl_get_struct_field_data(type->type, i);
832 const struct glsl_type *field_nir_type =
833 vtn_type_get_nir_type(b, type->members[i], mode);
834 if (fields[i].type != field_nir_type) {
835 fields[i].type = field_nir_type;
836 need_new_struct = true;
837 }
838 }
839 if (need_new_struct) {
840 if (glsl_type_is_interface(type->type)) {
841 return glsl_interface_type(fields, num_fields,
842 /* packing */ 0, false,
843 glsl_get_type_name(type->type));
844 } else {
845 return glsl_struct_type(fields, num_fields,
846 glsl_get_type_name(type->type),
847 glsl_struct_type_is_packed(type->type));
848 }
849 } else {
850 /* No changes, just pass it on */
851 return type->type;
852 }
853 }
854
855 case vtn_base_type_image:
856 return type->glsl_image;
857
858 case vtn_base_type_sampler:
859 return glsl_bare_sampler_type();
860
861 case vtn_base_type_sampled_image:
862 return type->image->glsl_image;
863
864 default:
865 return type->type;
866 }
867 }
868
869 /* Layout decorations are allowed but ignored in certain conditions,
870 * to allow SPIR-V generators perform type deduplication. Discard
871 * unnecessary ones when passing to NIR.
872 */
873 if (!vtn_type_needs_explicit_layout(b, mode))
874 return glsl_get_bare_type(type->type);
875
876 return type->type;
877 }
878
879 static struct vtn_type *
880 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
881 {
882 type->members[member] = vtn_type_copy(b, type->members[member]);
883 type = type->members[member];
884
885 /* We may have an array of matrices.... Oh, joy! */
886 while (glsl_type_is_array(type->type)) {
887 type->array_element = vtn_type_copy(b, type->array_element);
888 type = type->array_element;
889 }
890
891 vtn_assert(glsl_type_is_matrix(type->type));
892
893 return type;
894 }
895
896 static void
897 vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
898 int member, enum gl_access_qualifier access)
899 {
900 type->members[member] = vtn_type_copy(b, type->members[member]);
901 type = type->members[member];
902
903 type->access |= access;
904 }
905
906 static void
907 array_stride_decoration_cb(struct vtn_builder *b,
908 struct vtn_value *val, int member,
909 const struct vtn_decoration *dec, void *void_ctx)
910 {
911 struct vtn_type *type = val->type;
912
913 if (dec->decoration == SpvDecorationArrayStride) {
914 if (vtn_type_contains_block(b, type)) {
915 vtn_warn("The ArrayStride decoration cannot be applied to an array "
916 "type which contains a structure type decorated Block "
917 "or BufferBlock");
918 /* Ignore the decoration */
919 } else {
920 vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
921 type->stride = dec->operands[0];
922 }
923 }
924 }
925
926 static void
927 struct_member_decoration_cb(struct vtn_builder *b,
928 UNUSED struct vtn_value *val, int member,
929 const struct vtn_decoration *dec, void *void_ctx)
930 {
931 struct member_decoration_ctx *ctx = void_ctx;
932
933 if (member < 0)
934 return;
935
936 assert(member < ctx->num_fields);
937
938 switch (dec->decoration) {
939 case SpvDecorationRelaxedPrecision:
940 case SpvDecorationUniform:
941 case SpvDecorationUniformId:
942 break; /* FIXME: Do nothing with this for now. */
943 case SpvDecorationNonWritable:
944 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
945 break;
946 case SpvDecorationNonReadable:
947 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
948 break;
949 case SpvDecorationVolatile:
950 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
951 break;
952 case SpvDecorationCoherent:
953 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
954 break;
955 case SpvDecorationNoPerspective:
956 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
957 break;
958 case SpvDecorationFlat:
959 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
960 break;
961 case SpvDecorationExplicitInterpAMD:
962 ctx->fields[member].interpolation = INTERP_MODE_EXPLICIT;
963 break;
964 case SpvDecorationCentroid:
965 ctx->fields[member].centroid = true;
966 break;
967 case SpvDecorationSample:
968 ctx->fields[member].sample = true;
969 break;
970 case SpvDecorationStream:
971 /* This is handled later by var_decoration_cb in vtn_variables.c */
972 break;
973 case SpvDecorationLocation:
974 ctx->fields[member].location = dec->operands[0];
975 break;
976 case SpvDecorationComponent:
977 break; /* FIXME: What should we do with these? */
978 case SpvDecorationBuiltIn:
979 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
980 ctx->type->members[member]->is_builtin = true;
981 ctx->type->members[member]->builtin = dec->operands[0];
982 ctx->type->builtin_block = true;
983 break;
984 case SpvDecorationOffset:
985 ctx->type->offsets[member] = dec->operands[0];
986 ctx->fields[member].offset = dec->operands[0];
987 break;
988 case SpvDecorationMatrixStride:
989 /* Handled as a second pass */
990 break;
991 case SpvDecorationColMajor:
992 break; /* Nothing to do here. Column-major is the default. */
993 case SpvDecorationRowMajor:
994 mutable_matrix_member(b, ctx->type, member)->row_major = true;
995 break;
996
997 case SpvDecorationPatch:
998 break;
999
1000 case SpvDecorationSpecId:
1001 case SpvDecorationBlock:
1002 case SpvDecorationBufferBlock:
1003 case SpvDecorationArrayStride:
1004 case SpvDecorationGLSLShared:
1005 case SpvDecorationGLSLPacked:
1006 case SpvDecorationInvariant:
1007 case SpvDecorationRestrict:
1008 case SpvDecorationAliased:
1009 case SpvDecorationConstant:
1010 case SpvDecorationIndex:
1011 case SpvDecorationBinding:
1012 case SpvDecorationDescriptorSet:
1013 case SpvDecorationLinkageAttributes:
1014 case SpvDecorationNoContraction:
1015 case SpvDecorationInputAttachmentIndex:
1016 vtn_warn("Decoration not allowed on struct members: %s",
1017 spirv_decoration_to_string(dec->decoration));
1018 break;
1019
1020 case SpvDecorationXfbBuffer:
1021 case SpvDecorationXfbStride:
1022 /* This is handled later by var_decoration_cb in vtn_variables.c */
1023 break;
1024
1025 case SpvDecorationCPacked:
1026 if (b->shader->info.stage != MESA_SHADER_KERNEL)
1027 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1028 spirv_decoration_to_string(dec->decoration));
1029 else
1030 ctx->type->packed = true;
1031 break;
1032
1033 case SpvDecorationSaturatedConversion:
1034 case SpvDecorationFuncParamAttr:
1035 case SpvDecorationFPRoundingMode:
1036 case SpvDecorationFPFastMathMode:
1037 case SpvDecorationAlignment:
1038 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1039 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1040 spirv_decoration_to_string(dec->decoration));
1041 }
1042 break;
1043
1044 case SpvDecorationUserSemantic:
1045 case SpvDecorationUserTypeGOOGLE:
1046 /* User semantic decorations can safely be ignored by the driver. */
1047 break;
1048
1049 default:
1050 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1051 }
1052 }
1053
1054 /** Chases the array type all the way down to the tail and rewrites the
1055 * glsl_types to be based off the tail's glsl_type.
1056 */
1057 static void
1058 vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
1059 {
1060 if (type->base_type != vtn_base_type_array)
1061 return;
1062
1063 vtn_array_type_rewrite_glsl_type(type->array_element);
1064
1065 type->type = glsl_array_type(type->array_element->type,
1066 type->length, type->stride);
1067 }
1068
1069 /* Matrix strides are handled as a separate pass because we need to know
1070 * whether the matrix is row-major or not first.
1071 */
1072 static void
1073 struct_member_matrix_stride_cb(struct vtn_builder *b,
1074 UNUSED struct vtn_value *val, int member,
1075 const struct vtn_decoration *dec,
1076 void *void_ctx)
1077 {
1078 if (dec->decoration != SpvDecorationMatrixStride)
1079 return;
1080
1081 vtn_fail_if(member < 0,
1082 "The MatrixStride decoration is only allowed on members "
1083 "of OpTypeStruct");
1084 vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
1085
1086 struct member_decoration_ctx *ctx = void_ctx;
1087
1088 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
1089 if (mat_type->row_major) {
1090 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
1091 mat_type->stride = mat_type->array_element->stride;
1092 mat_type->array_element->stride = dec->operands[0];
1093
1094 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1095 dec->operands[0], true);
1096 mat_type->array_element->type = glsl_get_column_type(mat_type->type);
1097 } else {
1098 vtn_assert(mat_type->array_element->stride > 0);
1099 mat_type->stride = dec->operands[0];
1100
1101 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1102 dec->operands[0], false);
1103 }
1104
1105 /* Now that we've replaced the glsl_type with a properly strided matrix
1106 * type, rewrite the member type so that it's an array of the proper kind
1107 * of glsl_type.
1108 */
1109 vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
1110 ctx->fields[member].type = ctx->type->members[member]->type;
1111 }
1112
1113 static void
1114 struct_block_decoration_cb(struct vtn_builder *b,
1115 struct vtn_value *val, int member,
1116 const struct vtn_decoration *dec, void *ctx)
1117 {
1118 if (member != -1)
1119 return;
1120
1121 struct vtn_type *type = val->type;
1122 if (dec->decoration == SpvDecorationBlock)
1123 type->block = true;
1124 else if (dec->decoration == SpvDecorationBufferBlock)
1125 type->buffer_block = true;
1126 }
1127
1128 static void
1129 type_decoration_cb(struct vtn_builder *b,
1130 struct vtn_value *val, int member,
1131 const struct vtn_decoration *dec, UNUSED void *ctx)
1132 {
1133 struct vtn_type *type = val->type;
1134
1135 if (member != -1) {
1136 /* This should have been handled by OpTypeStruct */
1137 assert(val->type->base_type == vtn_base_type_struct);
1138 assert(member >= 0 && member < val->type->length);
1139 return;
1140 }
1141
1142 switch (dec->decoration) {
1143 case SpvDecorationArrayStride:
1144 vtn_assert(type->base_type == vtn_base_type_array ||
1145 type->base_type == vtn_base_type_pointer);
1146 break;
1147 case SpvDecorationBlock:
1148 vtn_assert(type->base_type == vtn_base_type_struct);
1149 vtn_assert(type->block);
1150 break;
1151 case SpvDecorationBufferBlock:
1152 vtn_assert(type->base_type == vtn_base_type_struct);
1153 vtn_assert(type->buffer_block);
1154 break;
1155 case SpvDecorationGLSLShared:
1156 case SpvDecorationGLSLPacked:
1157 /* Ignore these, since we get explicit offsets anyways */
1158 break;
1159
1160 case SpvDecorationRowMajor:
1161 case SpvDecorationColMajor:
1162 case SpvDecorationMatrixStride:
1163 case SpvDecorationBuiltIn:
1164 case SpvDecorationNoPerspective:
1165 case SpvDecorationFlat:
1166 case SpvDecorationPatch:
1167 case SpvDecorationCentroid:
1168 case SpvDecorationSample:
1169 case SpvDecorationExplicitInterpAMD:
1170 case SpvDecorationVolatile:
1171 case SpvDecorationCoherent:
1172 case SpvDecorationNonWritable:
1173 case SpvDecorationNonReadable:
1174 case SpvDecorationUniform:
1175 case SpvDecorationUniformId:
1176 case SpvDecorationLocation:
1177 case SpvDecorationComponent:
1178 case SpvDecorationOffset:
1179 case SpvDecorationXfbBuffer:
1180 case SpvDecorationXfbStride:
1181 case SpvDecorationUserSemantic:
1182 vtn_warn("Decoration only allowed for struct members: %s",
1183 spirv_decoration_to_string(dec->decoration));
1184 break;
1185
1186 case SpvDecorationStream:
1187 /* We don't need to do anything here, as stream is filled up when
1188 * aplying the decoration to a variable, just check that if it is not a
1189 * struct member, it should be a struct.
1190 */
1191 vtn_assert(type->base_type == vtn_base_type_struct);
1192 break;
1193
1194 case SpvDecorationRelaxedPrecision:
1195 case SpvDecorationSpecId:
1196 case SpvDecorationInvariant:
1197 case SpvDecorationRestrict:
1198 case SpvDecorationAliased:
1199 case SpvDecorationConstant:
1200 case SpvDecorationIndex:
1201 case SpvDecorationBinding:
1202 case SpvDecorationDescriptorSet:
1203 case SpvDecorationLinkageAttributes:
1204 case SpvDecorationNoContraction:
1205 case SpvDecorationInputAttachmentIndex:
1206 vtn_warn("Decoration not allowed on types: %s",
1207 spirv_decoration_to_string(dec->decoration));
1208 break;
1209
1210 case SpvDecorationCPacked:
1211 if (b->shader->info.stage != MESA_SHADER_KERNEL)
1212 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1213 spirv_decoration_to_string(dec->decoration));
1214 else
1215 type->packed = true;
1216 break;
1217
1218 case SpvDecorationSaturatedConversion:
1219 case SpvDecorationFuncParamAttr:
1220 case SpvDecorationFPRoundingMode:
1221 case SpvDecorationFPFastMathMode:
1222 case SpvDecorationAlignment:
1223 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1224 spirv_decoration_to_string(dec->decoration));
1225 break;
1226
1227 case SpvDecorationUserTypeGOOGLE:
1228 /* User semantic decorations can safely be ignored by the driver. */
1229 break;
1230
1231 default:
1232 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1233 }
1234 }
1235
1236 static unsigned
1237 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
1238 {
1239 switch (format) {
1240 case SpvImageFormatUnknown: return PIPE_FORMAT_NONE;
1241 case SpvImageFormatRgba32f: return PIPE_FORMAT_R32G32B32A32_FLOAT;
1242 case SpvImageFormatRgba16f: return PIPE_FORMAT_R16G16B16A16_FLOAT;
1243 case SpvImageFormatR32f: return PIPE_FORMAT_R32_FLOAT;
1244 case SpvImageFormatRgba8: return PIPE_FORMAT_R8G8B8A8_UNORM;
1245 case SpvImageFormatRgba8Snorm: return PIPE_FORMAT_R8G8B8A8_SNORM;
1246 case SpvImageFormatRg32f: return PIPE_FORMAT_R32G32_FLOAT;
1247 case SpvImageFormatRg16f: return PIPE_FORMAT_R16G16_FLOAT;
1248 case SpvImageFormatR11fG11fB10f: return PIPE_FORMAT_R11G11B10_FLOAT;
1249 case SpvImageFormatR16f: return PIPE_FORMAT_R16_FLOAT;
1250 case SpvImageFormatRgba16: return PIPE_FORMAT_R16G16B16A16_UNORM;
1251 case SpvImageFormatRgb10A2: return PIPE_FORMAT_R10G10B10A2_UNORM;
1252 case SpvImageFormatRg16: return PIPE_FORMAT_R16G16_UNORM;
1253 case SpvImageFormatRg8: return PIPE_FORMAT_R8G8_UNORM;
1254 case SpvImageFormatR16: return PIPE_FORMAT_R16_UNORM;
1255 case SpvImageFormatR8: return PIPE_FORMAT_R8_UNORM;
1256 case SpvImageFormatRgba16Snorm: return PIPE_FORMAT_R16G16B16A16_SNORM;
1257 case SpvImageFormatRg16Snorm: return PIPE_FORMAT_R16G16_SNORM;
1258 case SpvImageFormatRg8Snorm: return PIPE_FORMAT_R8G8_SNORM;
1259 case SpvImageFormatR16Snorm: return PIPE_FORMAT_R16_SNORM;
1260 case SpvImageFormatR8Snorm: return PIPE_FORMAT_R8_SNORM;
1261 case SpvImageFormatRgba32i: return PIPE_FORMAT_R32G32B32A32_SINT;
1262 case SpvImageFormatRgba16i: return PIPE_FORMAT_R16G16B16A16_SINT;
1263 case SpvImageFormatRgba8i: return PIPE_FORMAT_R8G8B8A8_SINT;
1264 case SpvImageFormatR32i: return PIPE_FORMAT_R32_SINT;
1265 case SpvImageFormatRg32i: return PIPE_FORMAT_R32G32_SINT;
1266 case SpvImageFormatRg16i: return PIPE_FORMAT_R16G16_SINT;
1267 case SpvImageFormatRg8i: return PIPE_FORMAT_R8G8_SINT;
1268 case SpvImageFormatR16i: return PIPE_FORMAT_R16_SINT;
1269 case SpvImageFormatR8i: return PIPE_FORMAT_R8_SINT;
1270 case SpvImageFormatRgba32ui: return PIPE_FORMAT_R32G32B32A32_UINT;
1271 case SpvImageFormatRgba16ui: return PIPE_FORMAT_R16G16B16A16_UINT;
1272 case SpvImageFormatRgba8ui: return PIPE_FORMAT_R8G8B8A8_UINT;
1273 case SpvImageFormatR32ui: return PIPE_FORMAT_R32_UINT;
1274 case SpvImageFormatRgb10a2ui: return PIPE_FORMAT_R10G10B10A2_UINT;
1275 case SpvImageFormatRg32ui: return PIPE_FORMAT_R32G32_UINT;
1276 case SpvImageFormatRg16ui: return PIPE_FORMAT_R16G16_UINT;
1277 case SpvImageFormatRg8ui: return PIPE_FORMAT_R8G8_UINT;
1278 case SpvImageFormatR16ui: return PIPE_FORMAT_R16_UINT;
1279 case SpvImageFormatR8ui: return PIPE_FORMAT_R8_UINT;
1280 default:
1281 vtn_fail("Invalid image format: %s (%u)",
1282 spirv_imageformat_to_string(format), format);
1283 }
1284 }
1285
1286 static void
1287 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
1288 const uint32_t *w, unsigned count)
1289 {
1290 struct vtn_value *val = NULL;
1291
1292 /* In order to properly handle forward declarations, we have to defer
1293 * allocation for pointer types.
1294 */
1295 if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) {
1296 val = vtn_push_value(b, w[1], vtn_value_type_type);
1297 vtn_fail_if(val->type != NULL,
1298 "Only pointers can have forward declarations");
1299 val->type = rzalloc(b, struct vtn_type);
1300 val->type->id = w[1];
1301 }
1302
1303 switch (opcode) {
1304 case SpvOpTypeVoid:
1305 val->type->base_type = vtn_base_type_void;
1306 val->type->type = glsl_void_type();
1307 break;
1308 case SpvOpTypeBool:
1309 val->type->base_type = vtn_base_type_scalar;
1310 val->type->type = glsl_bool_type();
1311 val->type->length = 1;
1312 break;
1313 case SpvOpTypeInt: {
1314 int bit_size = w[2];
1315 const bool signedness = w[3];
1316 val->type->base_type = vtn_base_type_scalar;
1317 switch (bit_size) {
1318 case 64:
1319 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
1320 break;
1321 case 32:
1322 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
1323 break;
1324 case 16:
1325 val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type());
1326 break;
1327 case 8:
1328 val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
1329 break;
1330 default:
1331 vtn_fail("Invalid int bit size: %u", bit_size);
1332 }
1333 val->type->length = 1;
1334 break;
1335 }
1336
1337 case SpvOpTypeFloat: {
1338 int bit_size = w[2];
1339 val->type->base_type = vtn_base_type_scalar;
1340 switch (bit_size) {
1341 case 16:
1342 val->type->type = glsl_float16_t_type();
1343 break;
1344 case 32:
1345 val->type->type = glsl_float_type();
1346 break;
1347 case 64:
1348 val->type->type = glsl_double_type();
1349 break;
1350 default:
1351 vtn_fail("Invalid float bit size: %u", bit_size);
1352 }
1353 val->type->length = 1;
1354 break;
1355 }
1356
1357 case SpvOpTypeVector: {
1358 struct vtn_type *base = vtn_get_type(b, w[2]);
1359 unsigned elems = w[3];
1360
1361 vtn_fail_if(base->base_type != vtn_base_type_scalar,
1362 "Base type for OpTypeVector must be a scalar");
1363 vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16),
1364 "Invalid component count for OpTypeVector");
1365
1366 val->type->base_type = vtn_base_type_vector;
1367 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
1368 val->type->length = elems;
1369 val->type->stride = glsl_type_is_boolean(val->type->type)
1370 ? 4 : glsl_get_bit_size(base->type) / 8;
1371 val->type->array_element = base;
1372 break;
1373 }
1374
1375 case SpvOpTypeMatrix: {
1376 struct vtn_type *base = vtn_get_type(b, w[2]);
1377 unsigned columns = w[3];
1378
1379 vtn_fail_if(base->base_type != vtn_base_type_vector,
1380 "Base type for OpTypeMatrix must be a vector");
1381 vtn_fail_if(columns < 2 || columns > 4,
1382 "Invalid column count for OpTypeMatrix");
1383
1384 val->type->base_type = vtn_base_type_matrix;
1385 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
1386 glsl_get_vector_elements(base->type),
1387 columns);
1388 vtn_fail_if(glsl_type_is_error(val->type->type),
1389 "Unsupported base type for OpTypeMatrix");
1390 assert(!glsl_type_is_error(val->type->type));
1391 val->type->length = columns;
1392 val->type->array_element = base;
1393 val->type->row_major = false;
1394 val->type->stride = 0;
1395 break;
1396 }
1397
1398 case SpvOpTypeRuntimeArray:
1399 case SpvOpTypeArray: {
1400 struct vtn_type *array_element = vtn_get_type(b, w[2]);
1401
1402 if (opcode == SpvOpTypeRuntimeArray) {
1403 /* A length of 0 is used to denote unsized arrays */
1404 val->type->length = 0;
1405 } else {
1406 val->type->length = vtn_constant_uint(b, w[3]);
1407 }
1408
1409 val->type->base_type = vtn_base_type_array;
1410 val->type->array_element = array_element;
1411 if (b->shader->info.stage == MESA_SHADER_KERNEL)
1412 val->type->stride = glsl_get_cl_size(array_element->type);
1413
1414 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1415 val->type->type = glsl_array_type(array_element->type, val->type->length,
1416 val->type->stride);
1417 break;
1418 }
1419
1420 case SpvOpTypeStruct: {
1421 unsigned num_fields = count - 2;
1422 val->type->base_type = vtn_base_type_struct;
1423 val->type->length = num_fields;
1424 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
1425 val->type->offsets = ralloc_array(b, unsigned, num_fields);
1426 val->type->packed = false;
1427
1428 NIR_VLA(struct glsl_struct_field, fields, count);
1429 for (unsigned i = 0; i < num_fields; i++) {
1430 val->type->members[i] = vtn_get_type(b, w[i + 2]);
1431 fields[i] = (struct glsl_struct_field) {
1432 .type = val->type->members[i]->type,
1433 .name = ralloc_asprintf(b, "field%d", i),
1434 .location = -1,
1435 .offset = -1,
1436 };
1437 }
1438
1439 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1440 unsigned offset = 0;
1441 for (unsigned i = 0; i < num_fields; i++) {
1442 offset = align(offset, glsl_get_cl_alignment(fields[i].type));
1443 fields[i].offset = offset;
1444 offset += glsl_get_cl_size(fields[i].type);
1445 }
1446 }
1447
1448 struct member_decoration_ctx ctx = {
1449 .num_fields = num_fields,
1450 .fields = fields,
1451 .type = val->type
1452 };
1453
1454 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1455 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1456
1457 vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
1458
1459 const char *name = val->name;
1460
1461 if (val->type->block || val->type->buffer_block) {
1462 /* Packing will be ignored since types coming from SPIR-V are
1463 * explicitly laid out.
1464 */
1465 val->type->type = glsl_interface_type(fields, num_fields,
1466 /* packing */ 0, false,
1467 name ? name : "block");
1468 } else {
1469 val->type->type = glsl_struct_type(fields, num_fields,
1470 name ? name : "struct", false);
1471 }
1472 break;
1473 }
1474
1475 case SpvOpTypeFunction: {
1476 val->type->base_type = vtn_base_type_function;
1477 val->type->type = NULL;
1478
1479 val->type->return_type = vtn_get_type(b, w[2]);
1480
1481 const unsigned num_params = count - 3;
1482 val->type->length = num_params;
1483 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1484 for (unsigned i = 0; i < count - 3; i++) {
1485 val->type->params[i] = vtn_get_type(b, w[i + 3]);
1486 }
1487 break;
1488 }
1489
1490 case SpvOpTypePointer:
1491 case SpvOpTypeForwardPointer: {
1492 /* We can't blindly push the value because it might be a forward
1493 * declaration.
1494 */
1495 val = vtn_untyped_value(b, w[1]);
1496
1497 SpvStorageClass storage_class = w[2];
1498
1499 if (val->value_type == vtn_value_type_invalid) {
1500 val->value_type = vtn_value_type_type;
1501 val->type = rzalloc(b, struct vtn_type);
1502 val->type->id = w[1];
1503 val->type->base_type = vtn_base_type_pointer;
1504 val->type->storage_class = storage_class;
1505
1506 /* These can actually be stored to nir_variables and used as SSA
1507 * values so they need a real glsl_type.
1508 */
1509 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1510 b, storage_class, NULL, NULL);
1511 val->type->type = nir_address_format_to_glsl_type(
1512 vtn_mode_to_address_format(b, mode));
1513 } else {
1514 vtn_fail_if(val->type->storage_class != storage_class,
1515 "The storage classes of an OpTypePointer and any "
1516 "OpTypeForwardPointers that provide forward "
1517 "declarations of it must match.");
1518 }
1519
1520 if (opcode == SpvOpTypePointer) {
1521 vtn_fail_if(val->type->deref != NULL,
1522 "While OpTypeForwardPointer can be used to provide a "
1523 "forward declaration of a pointer, OpTypePointer can "
1524 "only be used once for a given id.");
1525
1526 val->type->deref = vtn_get_type(b, w[3]);
1527
1528 /* Only certain storage classes use ArrayStride. The others (in
1529 * particular Workgroup) are expected to be laid out by the driver.
1530 */
1531 switch (storage_class) {
1532 case SpvStorageClassUniform:
1533 case SpvStorageClassPushConstant:
1534 case SpvStorageClassStorageBuffer:
1535 case SpvStorageClassPhysicalStorageBuffer:
1536 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1537 break;
1538 default:
1539 /* Nothing to do. */
1540 break;
1541 }
1542
1543 if (b->physical_ptrs) {
1544 switch (storage_class) {
1545 case SpvStorageClassFunction:
1546 case SpvStorageClassWorkgroup:
1547 case SpvStorageClassCrossWorkgroup:
1548 case SpvStorageClassUniformConstant:
1549 val->type->stride = align(glsl_get_cl_size(val->type->deref->type),
1550 glsl_get_cl_alignment(val->type->deref->type));
1551 break;
1552 default:
1553 break;
1554 }
1555 }
1556 }
1557 break;
1558 }
1559
1560 case SpvOpTypeImage: {
1561 val->type->base_type = vtn_base_type_image;
1562
1563 /* Images are represented in NIR as a scalar SSA value that is the
1564 * result of a deref instruction. An OpLoad on an OpTypeImage pointer
1565 * from UniformConstant memory just takes the NIR deref from the pointer
1566 * and turns it into an SSA value.
1567 */
1568 val->type->type = nir_address_format_to_glsl_type(
1569 vtn_mode_to_address_format(b, vtn_variable_mode_function));
1570
1571 const struct vtn_type *sampled_type = vtn_get_type(b, w[2]);
1572 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1573 vtn_fail_if(sampled_type->base_type != vtn_base_type_void,
1574 "Sampled type of OpTypeImage must be void for kernels");
1575 } else {
1576 vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
1577 glsl_get_bit_size(sampled_type->type) != 32,
1578 "Sampled type of OpTypeImage must be a 32-bit scalar");
1579 }
1580
1581 enum glsl_sampler_dim dim;
1582 switch ((SpvDim)w[3]) {
1583 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
1584 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
1585 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
1586 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
1587 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
1588 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
1589 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1590 default:
1591 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1592 spirv_dim_to_string((SpvDim)w[3]), w[3]);
1593 }
1594
1595 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1596 * The “Depth” operand of OpTypeImage is ignored.
1597 */
1598 bool is_array = w[5];
1599 bool multisampled = w[6];
1600 unsigned sampled = w[7];
1601 SpvImageFormat format = w[8];
1602
1603 if (count > 9)
1604 val->type->access_qualifier = w[9];
1605 else if (b->shader->info.stage == MESA_SHADER_KERNEL)
1606 /* Per the CL C spec: If no qualifier is provided, read_only is assumed. */
1607 val->type->access_qualifier = SpvAccessQualifierReadOnly;
1608 else
1609 val->type->access_qualifier = SpvAccessQualifierReadWrite;
1610
1611 if (multisampled) {
1612 if (dim == GLSL_SAMPLER_DIM_2D)
1613 dim = GLSL_SAMPLER_DIM_MS;
1614 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1615 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1616 else
1617 vtn_fail("Unsupported multisampled image type");
1618 }
1619
1620 val->type->image_format = translate_image_format(b, format);
1621
1622 enum glsl_base_type sampled_base_type =
1623 glsl_get_base_type(sampled_type->type);
1624 if (sampled == 1) {
1625 val->type->glsl_image = glsl_sampler_type(dim, false, is_array,
1626 sampled_base_type);
1627 } else if (sampled == 2) {
1628 val->type->glsl_image = glsl_image_type(dim, is_array,
1629 sampled_base_type);
1630 } else if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1631 val->type->glsl_image = glsl_image_type(dim, is_array,
1632 GLSL_TYPE_VOID);
1633 } else {
1634 vtn_fail("We need to know if the image will be sampled");
1635 }
1636 break;
1637 }
1638
1639 case SpvOpTypeSampledImage: {
1640 val->type->base_type = vtn_base_type_sampled_image;
1641 val->type->image = vtn_get_type(b, w[2]);
1642
1643 /* Sampled images are represented NIR as a vec2 SSA value where each
1644 * component is the result of a deref instruction. The first component
1645 * is the image and the second is the sampler. An OpLoad on an
1646 * OpTypeSampledImage pointer from UniformConstant memory just takes
1647 * the NIR deref from the pointer and duplicates it to both vector
1648 * components.
1649 */
1650 nir_address_format addr_format =
1651 vtn_mode_to_address_format(b, vtn_variable_mode_function);
1652 assert(nir_address_format_num_components(addr_format) == 1);
1653 unsigned bit_size = nir_address_format_bit_size(addr_format);
1654 assert(bit_size == 32 || bit_size == 64);
1655
1656 enum glsl_base_type base_type =
1657 bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64;
1658 val->type->type = glsl_vector_type(base_type, 2);
1659 break;
1660 }
1661
1662 case SpvOpTypeSampler:
1663 val->type->base_type = vtn_base_type_sampler;
1664
1665 /* Samplers are represented in NIR as a scalar SSA value that is the
1666 * result of a deref instruction. An OpLoad on an OpTypeSampler pointer
1667 * from UniformConstant memory just takes the NIR deref from the pointer
1668 * and turns it into an SSA value.
1669 */
1670 val->type->type = nir_address_format_to_glsl_type(
1671 vtn_mode_to_address_format(b, vtn_variable_mode_function));
1672 break;
1673
1674 case SpvOpTypeOpaque:
1675 case SpvOpTypeEvent:
1676 case SpvOpTypeDeviceEvent:
1677 case SpvOpTypeReserveId:
1678 case SpvOpTypeQueue:
1679 case SpvOpTypePipe:
1680 default:
1681 vtn_fail_with_opcode("Unhandled opcode", opcode);
1682 }
1683
1684 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1685
1686 if (val->type->base_type == vtn_base_type_struct &&
1687 (val->type->block || val->type->buffer_block)) {
1688 for (unsigned i = 0; i < val->type->length; i++) {
1689 vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]),
1690 "Block and BufferBlock decorations cannot decorate a "
1691 "structure type that is nested at any level inside "
1692 "another structure type decorated with Block or "
1693 "BufferBlock.");
1694 }
1695 }
1696 }
1697
1698 static nir_constant *
1699 vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
1700 {
1701 nir_constant *c = rzalloc(b, nir_constant);
1702
1703 switch (type->base_type) {
1704 case vtn_base_type_scalar:
1705 case vtn_base_type_vector:
1706 /* Nothing to do here. It's already initialized to zero */
1707 break;
1708
1709 case vtn_base_type_pointer: {
1710 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1711 b, type->storage_class, type->deref, NULL);
1712 nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
1713
1714 const nir_const_value *null_value = nir_address_format_null_value(addr_format);
1715 memcpy(c->values, null_value,
1716 sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
1717 break;
1718 }
1719
1720 case vtn_base_type_void:
1721 case vtn_base_type_image:
1722 case vtn_base_type_sampler:
1723 case vtn_base_type_sampled_image:
1724 case vtn_base_type_function:
1725 /* For those we have to return something but it doesn't matter what. */
1726 break;
1727
1728 case vtn_base_type_matrix:
1729 case vtn_base_type_array:
1730 vtn_assert(type->length > 0);
1731 c->num_elements = type->length;
1732 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1733
1734 c->elements[0] = vtn_null_constant(b, type->array_element);
1735 for (unsigned i = 1; i < c->num_elements; i++)
1736 c->elements[i] = c->elements[0];
1737 break;
1738
1739 case vtn_base_type_struct:
1740 c->num_elements = type->length;
1741 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1742 for (unsigned i = 0; i < c->num_elements; i++)
1743 c->elements[i] = vtn_null_constant(b, type->members[i]);
1744 break;
1745
1746 default:
1747 vtn_fail("Invalid type for null constant");
1748 }
1749
1750 return c;
1751 }
1752
1753 static void
1754 spec_constant_decoration_cb(struct vtn_builder *b, UNUSED struct vtn_value *val,
1755 ASSERTED int member,
1756 const struct vtn_decoration *dec, void *data)
1757 {
1758 vtn_assert(member == -1);
1759 if (dec->decoration != SpvDecorationSpecId)
1760 return;
1761
1762 nir_const_value *value = data;
1763 for (unsigned i = 0; i < b->num_specializations; i++) {
1764 if (b->specializations[i].id == dec->operands[0]) {
1765 *value = b->specializations[i].value;
1766 return;
1767 }
1768 }
1769 }
1770
1771 static void
1772 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1773 struct vtn_value *val,
1774 ASSERTED int member,
1775 const struct vtn_decoration *dec,
1776 UNUSED void *data)
1777 {
1778 vtn_assert(member == -1);
1779 if (dec->decoration != SpvDecorationBuiltIn ||
1780 dec->operands[0] != SpvBuiltInWorkgroupSize)
1781 return;
1782
1783 vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1784 b->workgroup_size_builtin = val;
1785 }
1786
1787 static void
1788 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1789 const uint32_t *w, unsigned count)
1790 {
1791 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1792 val->constant = rzalloc(b, nir_constant);
1793 switch (opcode) {
1794 case SpvOpConstantTrue:
1795 case SpvOpConstantFalse:
1796 case SpvOpSpecConstantTrue:
1797 case SpvOpSpecConstantFalse: {
1798 vtn_fail_if(val->type->type != glsl_bool_type(),
1799 "Result type of %s must be OpTypeBool",
1800 spirv_op_to_string(opcode));
1801
1802 bool bval = (opcode == SpvOpConstantTrue ||
1803 opcode == SpvOpSpecConstantTrue);
1804
1805 nir_const_value u32val = nir_const_value_for_uint(bval, 32);
1806
1807 if (opcode == SpvOpSpecConstantTrue ||
1808 opcode == SpvOpSpecConstantFalse)
1809 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32val);
1810
1811 val->constant->values[0].b = u32val.u32 != 0;
1812 break;
1813 }
1814
1815 case SpvOpConstant:
1816 case SpvOpSpecConstant: {
1817 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1818 "Result type of %s must be a scalar",
1819 spirv_op_to_string(opcode));
1820 int bit_size = glsl_get_bit_size(val->type->type);
1821 switch (bit_size) {
1822 case 64:
1823 val->constant->values[0].u64 = vtn_u64_literal(&w[3]);
1824 break;
1825 case 32:
1826 val->constant->values[0].u32 = w[3];
1827 break;
1828 case 16:
1829 val->constant->values[0].u16 = w[3];
1830 break;
1831 case 8:
1832 val->constant->values[0].u8 = w[3];
1833 break;
1834 default:
1835 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
1836 }
1837
1838 if (opcode == SpvOpSpecConstant)
1839 vtn_foreach_decoration(b, val, spec_constant_decoration_cb,
1840 &val->constant->values[0]);
1841 break;
1842 }
1843
1844 case SpvOpSpecConstantComposite:
1845 case SpvOpConstantComposite: {
1846 unsigned elem_count = count - 3;
1847 vtn_fail_if(elem_count != val->type->length,
1848 "%s has %u constituents, expected %u",
1849 spirv_op_to_string(opcode), elem_count, val->type->length);
1850
1851 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1852 for (unsigned i = 0; i < elem_count; i++) {
1853 struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
1854
1855 if (val->value_type == vtn_value_type_constant) {
1856 elems[i] = val->constant;
1857 } else {
1858 vtn_fail_if(val->value_type != vtn_value_type_undef,
1859 "only constants or undefs allowed for "
1860 "SpvOpConstantComposite");
1861 /* to make it easier, just insert a NULL constant for now */
1862 elems[i] = vtn_null_constant(b, val->type);
1863 }
1864 }
1865
1866 switch (val->type->base_type) {
1867 case vtn_base_type_vector: {
1868 assert(glsl_type_is_vector(val->type->type));
1869 for (unsigned i = 0; i < elem_count; i++)
1870 val->constant->values[i] = elems[i]->values[0];
1871 break;
1872 }
1873
1874 case vtn_base_type_matrix:
1875 case vtn_base_type_struct:
1876 case vtn_base_type_array:
1877 ralloc_steal(val->constant, elems);
1878 val->constant->num_elements = elem_count;
1879 val->constant->elements = elems;
1880 break;
1881
1882 default:
1883 vtn_fail("Result type of %s must be a composite type",
1884 spirv_op_to_string(opcode));
1885 }
1886 break;
1887 }
1888
1889 case SpvOpSpecConstantOp: {
1890 nir_const_value u32op = nir_const_value_for_uint(w[3], 32);
1891 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32op);
1892 SpvOp opcode = u32op.u32;
1893 switch (opcode) {
1894 case SpvOpVectorShuffle: {
1895 struct vtn_value *v0 = &b->values[w[4]];
1896 struct vtn_value *v1 = &b->values[w[5]];
1897
1898 vtn_assert(v0->value_type == vtn_value_type_constant ||
1899 v0->value_type == vtn_value_type_undef);
1900 vtn_assert(v1->value_type == vtn_value_type_constant ||
1901 v1->value_type == vtn_value_type_undef);
1902
1903 unsigned len0 = glsl_get_vector_elements(v0->type->type);
1904 unsigned len1 = glsl_get_vector_elements(v1->type->type);
1905
1906 vtn_assert(len0 + len1 < 16);
1907
1908 unsigned bit_size = glsl_get_bit_size(val->type->type);
1909 unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
1910 unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
1911
1912 vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1913 (void)bit_size0; (void)bit_size1;
1914
1915 nir_const_value undef = { .u64 = 0xdeadbeefdeadbeef };
1916 nir_const_value combined[NIR_MAX_VEC_COMPONENTS * 2];
1917
1918 if (v0->value_type == vtn_value_type_constant) {
1919 for (unsigned i = 0; i < len0; i++)
1920 combined[i] = v0->constant->values[i];
1921 }
1922 if (v1->value_type == vtn_value_type_constant) {
1923 for (unsigned i = 0; i < len1; i++)
1924 combined[len0 + i] = v1->constant->values[i];
1925 }
1926
1927 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1928 uint32_t comp = w[i + 6];
1929 if (comp == (uint32_t)-1) {
1930 /* If component is not used, set the value to a known constant
1931 * to detect if it is wrongly used.
1932 */
1933 val->constant->values[j] = undef;
1934 } else {
1935 vtn_fail_if(comp >= len0 + len1,
1936 "All Component literals must either be FFFFFFFF "
1937 "or in [0, N - 1] (inclusive).");
1938 val->constant->values[j] = combined[comp];
1939 }
1940 }
1941 break;
1942 }
1943
1944 case SpvOpCompositeExtract:
1945 case SpvOpCompositeInsert: {
1946 struct vtn_value *comp;
1947 unsigned deref_start;
1948 struct nir_constant **c;
1949 if (opcode == SpvOpCompositeExtract) {
1950 comp = vtn_value(b, w[4], vtn_value_type_constant);
1951 deref_start = 5;
1952 c = &comp->constant;
1953 } else {
1954 comp = vtn_value(b, w[5], vtn_value_type_constant);
1955 deref_start = 6;
1956 val->constant = nir_constant_clone(comp->constant,
1957 (nir_variable *)b);
1958 c = &val->constant;
1959 }
1960
1961 int elem = -1;
1962 const struct vtn_type *type = comp->type;
1963 for (unsigned i = deref_start; i < count; i++) {
1964 vtn_fail_if(w[i] > type->length,
1965 "%uth index of %s is %u but the type has only "
1966 "%u elements", i - deref_start,
1967 spirv_op_to_string(opcode), w[i], type->length);
1968
1969 switch (type->base_type) {
1970 case vtn_base_type_vector:
1971 elem = w[i];
1972 type = type->array_element;
1973 break;
1974
1975 case vtn_base_type_matrix:
1976 case vtn_base_type_array:
1977 c = &(*c)->elements[w[i]];
1978 type = type->array_element;
1979 break;
1980
1981 case vtn_base_type_struct:
1982 c = &(*c)->elements[w[i]];
1983 type = type->members[w[i]];
1984 break;
1985
1986 default:
1987 vtn_fail("%s must only index into composite types",
1988 spirv_op_to_string(opcode));
1989 }
1990 }
1991
1992 if (opcode == SpvOpCompositeExtract) {
1993 if (elem == -1) {
1994 val->constant = *c;
1995 } else {
1996 unsigned num_components = type->length;
1997 for (unsigned i = 0; i < num_components; i++)
1998 val->constant->values[i] = (*c)->values[elem + i];
1999 }
2000 } else {
2001 struct vtn_value *insert =
2002 vtn_value(b, w[4], vtn_value_type_constant);
2003 vtn_assert(insert->type == type);
2004 if (elem == -1) {
2005 *c = insert->constant;
2006 } else {
2007 unsigned num_components = type->length;
2008 for (unsigned i = 0; i < num_components; i++)
2009 (*c)->values[elem + i] = insert->constant->values[i];
2010 }
2011 }
2012 break;
2013 }
2014
2015 default: {
2016 bool swap;
2017 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
2018 nir_alu_type src_alu_type = dst_alu_type;
2019 unsigned num_components = glsl_get_vector_elements(val->type->type);
2020 unsigned bit_size;
2021
2022 vtn_assert(count <= 7);
2023
2024 switch (opcode) {
2025 case SpvOpSConvert:
2026 case SpvOpFConvert:
2027 case SpvOpUConvert:
2028 /* We have a source in a conversion */
2029 src_alu_type =
2030 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b, w[4])->type);
2031 /* We use the bitsize of the conversion source to evaluate the opcode later */
2032 bit_size = glsl_get_bit_size(vtn_get_value_type(b, w[4])->type);
2033 break;
2034 default:
2035 bit_size = glsl_get_bit_size(val->type->type);
2036 };
2037
2038 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
2039 nir_alu_type_get_type_size(src_alu_type),
2040 nir_alu_type_get_type_size(dst_alu_type));
2041 nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
2042
2043 for (unsigned i = 0; i < count - 4; i++) {
2044 struct vtn_value *src_val =
2045 vtn_value(b, w[4 + i], vtn_value_type_constant);
2046
2047 /* If this is an unsized source, pull the bit size from the
2048 * source; otherwise, we'll use the bit size from the destination.
2049 */
2050 if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
2051 bit_size = glsl_get_bit_size(src_val->type->type);
2052
2053 unsigned src_comps = nir_op_infos[op].input_sizes[i] ?
2054 nir_op_infos[op].input_sizes[i] :
2055 num_components;
2056
2057 unsigned j = swap ? 1 - i : i;
2058 for (unsigned c = 0; c < src_comps; c++)
2059 src[j][c] = src_val->constant->values[c];
2060 }
2061
2062 /* fix up fixed size sources */
2063 switch (op) {
2064 case nir_op_ishl:
2065 case nir_op_ishr:
2066 case nir_op_ushr: {
2067 if (bit_size == 32)
2068 break;
2069 for (unsigned i = 0; i < num_components; ++i) {
2070 switch (bit_size) {
2071 case 64: src[1][i].u32 = src[1][i].u64; break;
2072 case 16: src[1][i].u32 = src[1][i].u16; break;
2073 case 8: src[1][i].u32 = src[1][i].u8; break;
2074 }
2075 }
2076 break;
2077 }
2078 default:
2079 break;
2080 }
2081
2082 nir_const_value *srcs[3] = {
2083 src[0], src[1], src[2],
2084 };
2085 nir_eval_const_opcode(op, val->constant->values,
2086 num_components, bit_size, srcs,
2087 b->shader->info.float_controls_execution_mode);
2088 break;
2089 } /* default */
2090 }
2091 break;
2092 }
2093
2094 case SpvOpConstantNull:
2095 val->constant = vtn_null_constant(b, val->type);
2096 break;
2097
2098 case SpvOpConstantSampler:
2099 vtn_fail("OpConstantSampler requires Kernel Capability");
2100 break;
2101
2102 default:
2103 vtn_fail_with_opcode("Unhandled opcode", opcode);
2104 }
2105
2106 /* Now that we have the value, update the workgroup size if needed */
2107 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
2108 }
2109
2110 SpvMemorySemanticsMask
2111 vtn_storage_class_to_memory_semantics(SpvStorageClass sc)
2112 {
2113 switch (sc) {
2114 case SpvStorageClassStorageBuffer:
2115 case SpvStorageClassPhysicalStorageBuffer:
2116 return SpvMemorySemanticsUniformMemoryMask;
2117 case SpvStorageClassWorkgroup:
2118 return SpvMemorySemanticsWorkgroupMemoryMask;
2119 default:
2120 return SpvMemorySemanticsMaskNone;
2121 }
2122 }
2123
2124 static void
2125 vtn_split_barrier_semantics(struct vtn_builder *b,
2126 SpvMemorySemanticsMask semantics,
2127 SpvMemorySemanticsMask *before,
2128 SpvMemorySemanticsMask *after)
2129 {
2130 /* For memory semantics embedded in operations, we split them into up to
2131 * two barriers, to be added before and after the operation. This is less
2132 * strict than if we propagated until the final backend stage, but still
2133 * result in correct execution.
2134 *
2135 * A further improvement could be pipe this information (and use!) into the
2136 * next compiler layers, at the expense of making the handling of barriers
2137 * more complicated.
2138 */
2139
2140 *before = SpvMemorySemanticsMaskNone;
2141 *after = SpvMemorySemanticsMaskNone;
2142
2143 SpvMemorySemanticsMask order_semantics =
2144 semantics & (SpvMemorySemanticsAcquireMask |
2145 SpvMemorySemanticsReleaseMask |
2146 SpvMemorySemanticsAcquireReleaseMask |
2147 SpvMemorySemanticsSequentiallyConsistentMask);
2148
2149 if (util_bitcount(order_semantics) > 1) {
2150 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2151 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2152 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2153 */
2154 vtn_warn("Multiple memory ordering semantics specified, "
2155 "assuming AcquireRelease.");
2156 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2157 }
2158
2159 const SpvMemorySemanticsMask av_vis_semantics =
2160 semantics & (SpvMemorySemanticsMakeAvailableMask |
2161 SpvMemorySemanticsMakeVisibleMask);
2162
2163 const SpvMemorySemanticsMask storage_semantics =
2164 semantics & (SpvMemorySemanticsUniformMemoryMask |
2165 SpvMemorySemanticsSubgroupMemoryMask |
2166 SpvMemorySemanticsWorkgroupMemoryMask |
2167 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2168 SpvMemorySemanticsAtomicCounterMemoryMask |
2169 SpvMemorySemanticsImageMemoryMask |
2170 SpvMemorySemanticsOutputMemoryMask);
2171
2172 const SpvMemorySemanticsMask other_semantics =
2173 semantics & ~(order_semantics | av_vis_semantics | storage_semantics);
2174
2175 if (other_semantics)
2176 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics);
2177
2178 /* SequentiallyConsistent is treated as AcquireRelease. */
2179
2180 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2181 * associated with a Store. All the write operations with a matching
2182 * semantics will not be reordered after the Store.
2183 */
2184 if (order_semantics & (SpvMemorySemanticsReleaseMask |
2185 SpvMemorySemanticsAcquireReleaseMask |
2186 SpvMemorySemanticsSequentiallyConsistentMask)) {
2187 *before |= SpvMemorySemanticsReleaseMask | storage_semantics;
2188 }
2189
2190 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2191 * associated with a Load. All the operations with a matching semantics
2192 * will not be reordered before the Load.
2193 */
2194 if (order_semantics & (SpvMemorySemanticsAcquireMask |
2195 SpvMemorySemanticsAcquireReleaseMask |
2196 SpvMemorySemanticsSequentiallyConsistentMask)) {
2197 *after |= SpvMemorySemanticsAcquireMask | storage_semantics;
2198 }
2199
2200 if (av_vis_semantics & SpvMemorySemanticsMakeVisibleMask)
2201 *before |= SpvMemorySemanticsMakeVisibleMask | storage_semantics;
2202
2203 if (av_vis_semantics & SpvMemorySemanticsMakeAvailableMask)
2204 *after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
2205 }
2206
2207 static nir_memory_semantics
2208 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder *b,
2209 SpvMemorySemanticsMask semantics)
2210 {
2211 nir_memory_semantics nir_semantics = 0;
2212
2213 SpvMemorySemanticsMask order_semantics =
2214 semantics & (SpvMemorySemanticsAcquireMask |
2215 SpvMemorySemanticsReleaseMask |
2216 SpvMemorySemanticsAcquireReleaseMask |
2217 SpvMemorySemanticsSequentiallyConsistentMask);
2218
2219 if (util_bitcount(order_semantics) > 1) {
2220 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2221 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2222 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2223 */
2224 vtn_warn("Multiple memory ordering semantics bits specified, "
2225 "assuming AcquireRelease.");
2226 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2227 }
2228
2229 switch (order_semantics) {
2230 case 0:
2231 /* Not an ordering barrier. */
2232 break;
2233
2234 case SpvMemorySemanticsAcquireMask:
2235 nir_semantics = NIR_MEMORY_ACQUIRE;
2236 break;
2237
2238 case SpvMemorySemanticsReleaseMask:
2239 nir_semantics = NIR_MEMORY_RELEASE;
2240 break;
2241
2242 case SpvMemorySemanticsSequentiallyConsistentMask:
2243 /* Fall through. Treated as AcquireRelease in Vulkan. */
2244 case SpvMemorySemanticsAcquireReleaseMask:
2245 nir_semantics = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE;
2246 break;
2247
2248 default:
2249 unreachable("Invalid memory order semantics");
2250 }
2251
2252 if (semantics & SpvMemorySemanticsMakeAvailableMask) {
2253 vtn_fail_if(!b->options->caps.vk_memory_model,
2254 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2255 "capability must be declared.");
2256 nir_semantics |= NIR_MEMORY_MAKE_AVAILABLE;
2257 }
2258
2259 if (semantics & SpvMemorySemanticsMakeVisibleMask) {
2260 vtn_fail_if(!b->options->caps.vk_memory_model,
2261 "To use MakeVisible memory semantics the VulkanMemoryModel "
2262 "capability must be declared.");
2263 nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
2264 }
2265
2266 return nir_semantics;
2267 }
2268
2269 static nir_variable_mode
2270 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder *b,
2271 SpvMemorySemanticsMask semantics)
2272 {
2273 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2274 * and AtomicCounterMemory are ignored".
2275 */
2276 semantics &= ~(SpvMemorySemanticsSubgroupMemoryMask |
2277 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2278 SpvMemorySemanticsAtomicCounterMemoryMask);
2279
2280 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2281 * for SpvMemorySemanticsImageMemoryMask.
2282 */
2283
2284 nir_variable_mode modes = 0;
2285 if (semantics & (SpvMemorySemanticsUniformMemoryMask |
2286 SpvMemorySemanticsImageMemoryMask)) {
2287 modes |= nir_var_uniform |
2288 nir_var_mem_ubo |
2289 nir_var_mem_ssbo |
2290 nir_var_mem_global;
2291 }
2292 if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
2293 modes |= nir_var_mem_shared;
2294 if (semantics & SpvMemorySemanticsOutputMemoryMask) {
2295 modes |= nir_var_shader_out;
2296 }
2297
2298 return modes;
2299 }
2300
2301 static nir_scope
2302 vtn_scope_to_nir_scope(struct vtn_builder *b, SpvScope scope)
2303 {
2304 nir_scope nir_scope;
2305 switch (scope) {
2306 case SpvScopeDevice:
2307 vtn_fail_if(b->options->caps.vk_memory_model &&
2308 !b->options->caps.vk_memory_model_device_scope,
2309 "If the Vulkan memory model is declared and any instruction "
2310 "uses Device scope, the VulkanMemoryModelDeviceScope "
2311 "capability must be declared.");
2312 nir_scope = NIR_SCOPE_DEVICE;
2313 break;
2314
2315 case SpvScopeQueueFamily:
2316 vtn_fail_if(!b->options->caps.vk_memory_model,
2317 "To use Queue Family scope, the VulkanMemoryModel capability "
2318 "must be declared.");
2319 nir_scope = NIR_SCOPE_QUEUE_FAMILY;
2320 break;
2321
2322 case SpvScopeWorkgroup:
2323 nir_scope = NIR_SCOPE_WORKGROUP;
2324 break;
2325
2326 case SpvScopeSubgroup:
2327 nir_scope = NIR_SCOPE_SUBGROUP;
2328 break;
2329
2330 case SpvScopeInvocation:
2331 nir_scope = NIR_SCOPE_INVOCATION;
2332 break;
2333
2334 default:
2335 vtn_fail("Invalid memory scope");
2336 }
2337
2338 return nir_scope;
2339 }
2340
2341 static void
2342 vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
2343 SpvScope mem_scope,
2344 SpvMemorySemanticsMask semantics)
2345 {
2346 nir_memory_semantics nir_semantics =
2347 vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2348 nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
2349 nir_scope nir_exec_scope = vtn_scope_to_nir_scope(b, exec_scope);
2350
2351 /* Memory semantics is optional for OpControlBarrier. */
2352 nir_scope nir_mem_scope;
2353 if (nir_semantics == 0 || modes == 0)
2354 nir_mem_scope = NIR_SCOPE_NONE;
2355 else
2356 nir_mem_scope = vtn_scope_to_nir_scope(b, mem_scope);
2357
2358 nir_scoped_barrier(&b->nb, nir_exec_scope, nir_mem_scope, nir_semantics, modes);
2359 }
2360
2361 static void
2362 vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
2363 SpvMemorySemanticsMask semantics)
2364 {
2365 nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
2366 nir_memory_semantics nir_semantics =
2367 vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2368
2369 /* No barrier to add. */
2370 if (nir_semantics == 0 || modes == 0)
2371 return;
2372
2373 nir_scope nir_mem_scope = vtn_scope_to_nir_scope(b, scope);
2374 nir_scoped_barrier(&b->nb, NIR_SCOPE_NONE, nir_mem_scope, nir_semantics, modes);
2375 }
2376
2377 struct vtn_ssa_value *
2378 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
2379 {
2380 /* Always use bare types for SSA values for a couple of reasons:
2381 *
2382 * 1. Code which emits deref chains should never listen to the explicit
2383 * layout information on the SSA value if any exists. If we've
2384 * accidentally been relying on this, we want to find those bugs.
2385 *
2386 * 2. We want to be able to quickly check that an SSA value being assigned
2387 * to a SPIR-V value has the right type. Using bare types everywhere
2388 * ensures that we can pointer-compare.
2389 */
2390 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
2391 val->type = glsl_get_bare_type(type);
2392
2393
2394 if (!glsl_type_is_vector_or_scalar(type)) {
2395 unsigned elems = glsl_get_length(val->type);
2396 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2397 if (glsl_type_is_array_or_matrix(type)) {
2398 const struct glsl_type *elem_type = glsl_get_array_element(type);
2399 for (unsigned i = 0; i < elems; i++)
2400 val->elems[i] = vtn_create_ssa_value(b, elem_type);
2401 } else {
2402 vtn_assert(glsl_type_is_struct_or_ifc(type));
2403 for (unsigned i = 0; i < elems; i++) {
2404 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
2405 val->elems[i] = vtn_create_ssa_value(b, elem_type);
2406 }
2407 }
2408 }
2409
2410 return val;
2411 }
2412
2413 static nir_tex_src
2414 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
2415 {
2416 nir_tex_src src;
2417 src.src = nir_src_for_ssa(vtn_get_nir_ssa(b, index));
2418 src.src_type = type;
2419 return src;
2420 }
2421
2422 static uint32_t
2423 image_operand_arg(struct vtn_builder *b, const uint32_t *w, uint32_t count,
2424 uint32_t mask_idx, SpvImageOperandsMask op)
2425 {
2426 static const SpvImageOperandsMask ops_with_arg =
2427 SpvImageOperandsBiasMask |
2428 SpvImageOperandsLodMask |
2429 SpvImageOperandsGradMask |
2430 SpvImageOperandsConstOffsetMask |
2431 SpvImageOperandsOffsetMask |
2432 SpvImageOperandsConstOffsetsMask |
2433 SpvImageOperandsSampleMask |
2434 SpvImageOperandsMinLodMask |
2435 SpvImageOperandsMakeTexelAvailableMask |
2436 SpvImageOperandsMakeTexelVisibleMask;
2437
2438 assert(util_bitcount(op) == 1);
2439 assert(w[mask_idx] & op);
2440 assert(op & ops_with_arg);
2441
2442 uint32_t idx = util_bitcount(w[mask_idx] & (op - 1) & ops_with_arg) + 1;
2443
2444 /* Adjust indices for operands with two arguments. */
2445 static const SpvImageOperandsMask ops_with_two_args =
2446 SpvImageOperandsGradMask;
2447 idx += util_bitcount(w[mask_idx] & (op - 1) & ops_with_two_args);
2448
2449 idx += mask_idx;
2450
2451 vtn_fail_if(idx + (op & ops_with_two_args ? 1 : 0) >= count,
2452 "Image op claims to have %s but does not enough "
2453 "following operands", spirv_imageoperands_to_string(op));
2454
2455 return idx;
2456 }
2457
2458 static void
2459 non_uniform_decoration_cb(struct vtn_builder *b,
2460 struct vtn_value *val, int member,
2461 const struct vtn_decoration *dec, void *void_ctx)
2462 {
2463 enum gl_access_qualifier *access = void_ctx;
2464 switch (dec->decoration) {
2465 case SpvDecorationNonUniformEXT:
2466 *access |= ACCESS_NON_UNIFORM;
2467 break;
2468
2469 default:
2470 break;
2471 }
2472 }
2473
2474 static void
2475 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
2476 const uint32_t *w, unsigned count)
2477 {
2478 struct vtn_type *ret_type = vtn_get_type(b, w[1]);
2479
2480 if (opcode == SpvOpSampledImage) {
2481 struct vtn_sampled_image si = {
2482 .image = vtn_get_image(b, w[3]),
2483 .sampler = vtn_get_sampler(b, w[4]),
2484 };
2485 vtn_push_sampled_image(b, w[2], si);
2486 return;
2487 } else if (opcode == SpvOpImage) {
2488 struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2489 vtn_push_image(b, w[2], si.image);
2490 return;
2491 }
2492
2493 nir_deref_instr *image = NULL, *sampler = NULL;
2494 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
2495 if (sampled_val->type->base_type == vtn_base_type_sampled_image) {
2496 struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2497 image = si.image;
2498 sampler = si.sampler;
2499 } else {
2500 image = vtn_get_image(b, w[3]);
2501 }
2502
2503 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image->type);
2504 const bool is_array = glsl_sampler_type_is_array(image->type);
2505 nir_alu_type dest_type = nir_type_invalid;
2506
2507 /* Figure out the base texture operation */
2508 nir_texop texop;
2509 switch (opcode) {
2510 case SpvOpImageSampleImplicitLod:
2511 case SpvOpImageSampleDrefImplicitLod:
2512 case SpvOpImageSampleProjImplicitLod:
2513 case SpvOpImageSampleProjDrefImplicitLod:
2514 texop = nir_texop_tex;
2515 break;
2516
2517 case SpvOpImageSampleExplicitLod:
2518 case SpvOpImageSampleDrefExplicitLod:
2519 case SpvOpImageSampleProjExplicitLod:
2520 case SpvOpImageSampleProjDrefExplicitLod:
2521 texop = nir_texop_txl;
2522 break;
2523
2524 case SpvOpImageFetch:
2525 if (sampler_dim == GLSL_SAMPLER_DIM_MS) {
2526 texop = nir_texop_txf_ms;
2527 } else {
2528 texop = nir_texop_txf;
2529 }
2530 break;
2531
2532 case SpvOpImageGather:
2533 case SpvOpImageDrefGather:
2534 texop = nir_texop_tg4;
2535 break;
2536
2537 case SpvOpImageQuerySizeLod:
2538 case SpvOpImageQuerySize:
2539 texop = nir_texop_txs;
2540 dest_type = nir_type_int;
2541 break;
2542
2543 case SpvOpImageQueryLod:
2544 texop = nir_texop_lod;
2545 dest_type = nir_type_float;
2546 break;
2547
2548 case SpvOpImageQueryLevels:
2549 texop = nir_texop_query_levels;
2550 dest_type = nir_type_int;
2551 break;
2552
2553 case SpvOpImageQuerySamples:
2554 texop = nir_texop_texture_samples;
2555 dest_type = nir_type_int;
2556 break;
2557
2558 case SpvOpFragmentFetchAMD:
2559 texop = nir_texop_fragment_fetch;
2560 break;
2561
2562 case SpvOpFragmentMaskFetchAMD:
2563 texop = nir_texop_fragment_mask_fetch;
2564 break;
2565
2566 default:
2567 vtn_fail_with_opcode("Unhandled opcode", opcode);
2568 }
2569
2570 nir_tex_src srcs[10]; /* 10 should be enough */
2571 nir_tex_src *p = srcs;
2572
2573 p->src = nir_src_for_ssa(&image->dest.ssa);
2574 p->src_type = nir_tex_src_texture_deref;
2575 p++;
2576
2577 switch (texop) {
2578 case nir_texop_tex:
2579 case nir_texop_txb:
2580 case nir_texop_txl:
2581 case nir_texop_txd:
2582 case nir_texop_tg4:
2583 case nir_texop_lod:
2584 vtn_fail_if(sampler == NULL,
2585 "%s requires an image of type OpTypeSampledImage",
2586 spirv_op_to_string(opcode));
2587 p->src = nir_src_for_ssa(&sampler->dest.ssa);
2588 p->src_type = nir_tex_src_sampler_deref;
2589 p++;
2590 break;
2591 case nir_texop_txf:
2592 case nir_texop_txf_ms:
2593 case nir_texop_txs:
2594 case nir_texop_query_levels:
2595 case nir_texop_texture_samples:
2596 case nir_texop_samples_identical:
2597 case nir_texop_fragment_fetch:
2598 case nir_texop_fragment_mask_fetch:
2599 /* These don't */
2600 break;
2601 case nir_texop_txf_ms_fb:
2602 vtn_fail("unexpected nir_texop_txf_ms_fb");
2603 break;
2604 case nir_texop_txf_ms_mcs:
2605 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2606 case nir_texop_tex_prefetch:
2607 vtn_fail("unexpected nir_texop_tex_prefetch");
2608 }
2609
2610 unsigned idx = 4;
2611
2612 struct nir_ssa_def *coord;
2613 unsigned coord_components;
2614 switch (opcode) {
2615 case SpvOpImageSampleImplicitLod:
2616 case SpvOpImageSampleExplicitLod:
2617 case SpvOpImageSampleDrefImplicitLod:
2618 case SpvOpImageSampleDrefExplicitLod:
2619 case SpvOpImageSampleProjImplicitLod:
2620 case SpvOpImageSampleProjExplicitLod:
2621 case SpvOpImageSampleProjDrefImplicitLod:
2622 case SpvOpImageSampleProjDrefExplicitLod:
2623 case SpvOpImageFetch:
2624 case SpvOpImageGather:
2625 case SpvOpImageDrefGather:
2626 case SpvOpImageQueryLod:
2627 case SpvOpFragmentFetchAMD:
2628 case SpvOpFragmentMaskFetchAMD: {
2629 /* All these types have the coordinate as their first real argument */
2630 coord_components = glsl_get_sampler_dim_coordinate_components(sampler_dim);
2631
2632 if (is_array && texop != nir_texop_lod)
2633 coord_components++;
2634
2635 struct vtn_ssa_value *coord_val = vtn_ssa_value(b, w[idx++]);
2636 coord = coord_val->def;
2637 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
2638 (1 << coord_components) - 1));
2639
2640 /* OpenCL allows integer sampling coordinates */
2641 if (glsl_type_is_integer(coord_val->type) &&
2642 opcode == SpvOpImageSampleExplicitLod) {
2643 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
2644 "Unless the Kernel capability is being used, the coordinate parameter "
2645 "OpImageSampleExplicitLod must be floating point.");
2646
2647 p->src = nir_src_for_ssa(nir_i2f32(&b->nb, p->src.ssa));
2648 }
2649
2650 p->src_type = nir_tex_src_coord;
2651 p++;
2652 break;
2653 }
2654
2655 default:
2656 coord = NULL;
2657 coord_components = 0;
2658 break;
2659 }
2660
2661 switch (opcode) {
2662 case SpvOpImageSampleProjImplicitLod:
2663 case SpvOpImageSampleProjExplicitLod:
2664 case SpvOpImageSampleProjDrefImplicitLod:
2665 case SpvOpImageSampleProjDrefExplicitLod:
2666 /* These have the projector as the last coordinate component */
2667 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
2668 p->src_type = nir_tex_src_projector;
2669 p++;
2670 break;
2671
2672 default:
2673 break;
2674 }
2675
2676 bool is_shadow = false;
2677 unsigned gather_component = 0;
2678 switch (opcode) {
2679 case SpvOpImageSampleDrefImplicitLod:
2680 case SpvOpImageSampleDrefExplicitLod:
2681 case SpvOpImageSampleProjDrefImplicitLod:
2682 case SpvOpImageSampleProjDrefExplicitLod:
2683 case SpvOpImageDrefGather:
2684 /* These all have an explicit depth value as their next source */
2685 is_shadow = true;
2686 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
2687 break;
2688
2689 case SpvOpImageGather:
2690 /* This has a component as its next source */
2691 gather_component = vtn_constant_uint(b, w[idx++]);
2692 break;
2693
2694 default:
2695 break;
2696 }
2697
2698 /* For OpImageQuerySizeLod, we always have an LOD */
2699 if (opcode == SpvOpImageQuerySizeLod)
2700 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2701
2702 /* For OpFragmentFetchAMD, we always have a multisample index */
2703 if (opcode == SpvOpFragmentFetchAMD)
2704 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
2705
2706 /* Now we need to handle some number of optional arguments */
2707 struct vtn_value *gather_offsets = NULL;
2708 if (idx < count) {
2709 uint32_t operands = w[idx];
2710
2711 if (operands & SpvImageOperandsBiasMask) {
2712 vtn_assert(texop == nir_texop_tex ||
2713 texop == nir_texop_tg4);
2714 if (texop == nir_texop_tex)
2715 texop = nir_texop_txb;
2716 uint32_t arg = image_operand_arg(b, w, count, idx,
2717 SpvImageOperandsBiasMask);
2718 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_bias);
2719 }
2720
2721 if (operands & SpvImageOperandsLodMask) {
2722 vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
2723 texop == nir_texop_txs || texop == nir_texop_tg4);
2724 uint32_t arg = image_operand_arg(b, w, count, idx,
2725 SpvImageOperandsLodMask);
2726 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_lod);
2727 }
2728
2729 if (operands & SpvImageOperandsGradMask) {
2730 vtn_assert(texop == nir_texop_txl);
2731 texop = nir_texop_txd;
2732 uint32_t arg = image_operand_arg(b, w, count, idx,
2733 SpvImageOperandsGradMask);
2734 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ddx);
2735 (*p++) = vtn_tex_src(b, w[arg + 1], nir_tex_src_ddy);
2736 }
2737
2738 vtn_fail_if(util_bitcount(operands & (SpvImageOperandsConstOffsetsMask |
2739 SpvImageOperandsOffsetMask |
2740 SpvImageOperandsConstOffsetMask)) > 1,
2741 "At most one of the ConstOffset, Offset, and ConstOffsets "
2742 "image operands can be used on a given instruction.");
2743
2744 if (operands & SpvImageOperandsOffsetMask) {
2745 uint32_t arg = image_operand_arg(b, w, count, idx,
2746 SpvImageOperandsOffsetMask);
2747 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2748 }
2749
2750 if (operands & SpvImageOperandsConstOffsetMask) {
2751 uint32_t arg = image_operand_arg(b, w, count, idx,
2752 SpvImageOperandsConstOffsetMask);
2753 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2754 }
2755
2756 if (operands & SpvImageOperandsConstOffsetsMask) {
2757 vtn_assert(texop == nir_texop_tg4);
2758 uint32_t arg = image_operand_arg(b, w, count, idx,
2759 SpvImageOperandsConstOffsetsMask);
2760 gather_offsets = vtn_value(b, w[arg], vtn_value_type_constant);
2761 }
2762
2763 if (operands & SpvImageOperandsSampleMask) {
2764 vtn_assert(texop == nir_texop_txf_ms);
2765 uint32_t arg = image_operand_arg(b, w, count, idx,
2766 SpvImageOperandsSampleMask);
2767 texop = nir_texop_txf_ms;
2768 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ms_index);
2769 }
2770
2771 if (operands & SpvImageOperandsMinLodMask) {
2772 vtn_assert(texop == nir_texop_tex ||
2773 texop == nir_texop_txb ||
2774 texop == nir_texop_txd);
2775 uint32_t arg = image_operand_arg(b, w, count, idx,
2776 SpvImageOperandsMinLodMask);
2777 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_min_lod);
2778 }
2779 }
2780
2781 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
2782 instr->op = texop;
2783
2784 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
2785
2786 instr->coord_components = coord_components;
2787 instr->sampler_dim = sampler_dim;
2788 instr->is_array = is_array;
2789 instr->is_shadow = is_shadow;
2790 instr->is_new_style_shadow =
2791 is_shadow && glsl_get_components(ret_type->type) == 1;
2792 instr->component = gather_component;
2793
2794 /* The Vulkan spec says:
2795 *
2796 * "If an instruction loads from or stores to a resource (including
2797 * atomics and image instructions) and the resource descriptor being
2798 * accessed is not dynamically uniform, then the operand corresponding
2799 * to that resource (e.g. the pointer or sampled image operand) must be
2800 * decorated with NonUniform."
2801 *
2802 * It's very careful to specify that the exact operand must be decorated
2803 * NonUniform. The SPIR-V parser is not expected to chase through long
2804 * chains to find the NonUniform decoration. It's either right there or we
2805 * can assume it doesn't exist.
2806 */
2807 enum gl_access_qualifier access = 0;
2808 vtn_foreach_decoration(b, sampled_val, non_uniform_decoration_cb, &access);
2809
2810 if (image && (access & ACCESS_NON_UNIFORM))
2811 instr->texture_non_uniform = true;
2812
2813 if (sampler && (access & ACCESS_NON_UNIFORM))
2814 instr->sampler_non_uniform = true;
2815
2816 /* for non-query ops, get dest_type from SPIR-V return type */
2817 if (dest_type == nir_type_invalid) {
2818 /* the return type should match the image type, unless the image type is
2819 * VOID (CL image), in which case the return type dictates the sampler
2820 */
2821 enum glsl_base_type sampler_base =
2822 glsl_get_sampler_result_type(image->type);
2823 enum glsl_base_type ret_base = glsl_get_base_type(ret_type->type);
2824 vtn_fail_if(sampler_base != ret_base && sampler_base != GLSL_TYPE_VOID,
2825 "SPIR-V return type mismatches image type. This is only valid "
2826 "for untyped images (OpenCL).");
2827 switch (ret_base) {
2828 case GLSL_TYPE_FLOAT: dest_type = nir_type_float; break;
2829 case GLSL_TYPE_INT: dest_type = nir_type_int; break;
2830 case GLSL_TYPE_UINT: dest_type = nir_type_uint; break;
2831 case GLSL_TYPE_BOOL: dest_type = nir_type_bool; break;
2832 default:
2833 vtn_fail("Invalid base type for sampler result");
2834 }
2835 }
2836
2837 instr->dest_type = dest_type;
2838
2839 nir_ssa_dest_init(&instr->instr, &instr->dest,
2840 nir_tex_instr_dest_size(instr), 32, NULL);
2841
2842 vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2843 nir_tex_instr_dest_size(instr));
2844
2845 if (gather_offsets) {
2846 vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
2847 gather_offsets->type->length != 4,
2848 "ConstOffsets must be an array of size four of vectors "
2849 "of two integer components");
2850
2851 struct vtn_type *vec_type = gather_offsets->type->array_element;
2852 vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
2853 vec_type->length != 2 ||
2854 !glsl_type_is_integer(vec_type->type),
2855 "ConstOffsets must be an array of size four of vectors "
2856 "of two integer components");
2857
2858 unsigned bit_size = glsl_get_bit_size(vec_type->type);
2859 for (uint32_t i = 0; i < 4; i++) {
2860 const nir_const_value *cvec =
2861 gather_offsets->constant->elements[i]->values;
2862 for (uint32_t j = 0; j < 2; j++) {
2863 switch (bit_size) {
2864 case 8: instr->tg4_offsets[i][j] = cvec[j].i8; break;
2865 case 16: instr->tg4_offsets[i][j] = cvec[j].i16; break;
2866 case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
2867 case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
2868 default:
2869 vtn_fail("Unsupported bit size: %u", bit_size);
2870 }
2871 }
2872 }
2873 }
2874
2875 nir_builder_instr_insert(&b->nb, &instr->instr);
2876
2877 vtn_push_nir_ssa(b, w[2], &instr->dest.ssa);
2878 }
2879
2880 static void
2881 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2882 const uint32_t *w, nir_src *src)
2883 {
2884 switch (opcode) {
2885 case SpvOpAtomicIIncrement:
2886 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
2887 break;
2888
2889 case SpvOpAtomicIDecrement:
2890 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
2891 break;
2892
2893 case SpvOpAtomicISub:
2894 src[0] =
2895 nir_src_for_ssa(nir_ineg(&b->nb, vtn_get_nir_ssa(b, w[6])));
2896 break;
2897
2898 case SpvOpAtomicCompareExchange:
2899 case SpvOpAtomicCompareExchangeWeak:
2900 src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[8]));
2901 src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[7]));
2902 break;
2903
2904 case SpvOpAtomicExchange:
2905 case SpvOpAtomicIAdd:
2906 case SpvOpAtomicSMin:
2907 case SpvOpAtomicUMin:
2908 case SpvOpAtomicSMax:
2909 case SpvOpAtomicUMax:
2910 case SpvOpAtomicAnd:
2911 case SpvOpAtomicOr:
2912 case SpvOpAtomicXor:
2913 case SpvOpAtomicFAddEXT:
2914 src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
2915 break;
2916
2917 default:
2918 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
2919 }
2920 }
2921
2922 static nir_ssa_def *
2923 get_image_coord(struct vtn_builder *b, uint32_t value)
2924 {
2925 nir_ssa_def *coord = vtn_get_nir_ssa(b, value);
2926
2927 /* The image_load_store intrinsics assume a 4-dim coordinate */
2928 unsigned swizzle[4];
2929 for (unsigned i = 0; i < 4; i++)
2930 swizzle[i] = MIN2(i, coord->num_components - 1);
2931
2932 return nir_swizzle(&b->nb, coord, swizzle, 4);
2933 }
2934
2935 static nir_ssa_def *
2936 expand_to_vec4(nir_builder *b, nir_ssa_def *value)
2937 {
2938 if (value->num_components == 4)
2939 return value;
2940
2941 unsigned swiz[4];
2942 for (unsigned i = 0; i < 4; i++)
2943 swiz[i] = i < value->num_components ? i : 0;
2944 return nir_swizzle(b, value, swiz, 4);
2945 }
2946
2947 static void
2948 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
2949 const uint32_t *w, unsigned count)
2950 {
2951 /* Just get this one out of the way */
2952 if (opcode == SpvOpImageTexelPointer) {
2953 struct vtn_value *val =
2954 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
2955 val->image = ralloc(b, struct vtn_image_pointer);
2956
2957 val->image->image = vtn_nir_deref(b, w[3]);
2958 val->image->coord = get_image_coord(b, w[4]);
2959 val->image->sample = vtn_get_nir_ssa(b, w[5]);
2960 val->image->lod = nir_imm_int(&b->nb, 0);
2961 return;
2962 }
2963
2964 struct vtn_image_pointer image;
2965 SpvScope scope = SpvScopeInvocation;
2966 SpvMemorySemanticsMask semantics = 0;
2967
2968 enum gl_access_qualifier access = 0;
2969
2970 struct vtn_value *res_val;
2971 switch (opcode) {
2972 case SpvOpAtomicExchange:
2973 case SpvOpAtomicCompareExchange:
2974 case SpvOpAtomicCompareExchangeWeak:
2975 case SpvOpAtomicIIncrement:
2976 case SpvOpAtomicIDecrement:
2977 case SpvOpAtomicIAdd:
2978 case SpvOpAtomicISub:
2979 case SpvOpAtomicLoad:
2980 case SpvOpAtomicSMin:
2981 case SpvOpAtomicUMin:
2982 case SpvOpAtomicSMax:
2983 case SpvOpAtomicUMax:
2984 case SpvOpAtomicAnd:
2985 case SpvOpAtomicOr:
2986 case SpvOpAtomicXor:
2987 case SpvOpAtomicFAddEXT:
2988 res_val = vtn_value(b, w[3], vtn_value_type_image_pointer);
2989 image = *res_val->image;
2990 scope = vtn_constant_uint(b, w[4]);
2991 semantics = vtn_constant_uint(b, w[5]);
2992 access |= ACCESS_COHERENT;
2993 break;
2994
2995 case SpvOpAtomicStore:
2996 res_val = vtn_value(b, w[1], vtn_value_type_image_pointer);
2997 image = *res_val->image;
2998 scope = vtn_constant_uint(b, w[2]);
2999 semantics = vtn_constant_uint(b, w[3]);
3000 access |= ACCESS_COHERENT;
3001 break;
3002
3003 case SpvOpImageQuerySize:
3004 res_val = vtn_untyped_value(b, w[3]);
3005 image.image = vtn_get_image(b, w[3]);
3006 image.coord = NULL;
3007 image.sample = NULL;
3008 image.lod = NULL;
3009 break;
3010
3011 case SpvOpImageRead: {
3012 res_val = vtn_untyped_value(b, w[3]);
3013 image.image = vtn_get_image(b, w[3]);
3014 image.coord = get_image_coord(b, w[4]);
3015
3016 const SpvImageOperandsMask operands =
3017 count > 5 ? w[5] : SpvImageOperandsMaskNone;
3018
3019 if (operands & SpvImageOperandsSampleMask) {
3020 uint32_t arg = image_operand_arg(b, w, count, 5,
3021 SpvImageOperandsSampleMask);
3022 image.sample = vtn_get_nir_ssa(b, w[arg]);
3023 } else {
3024 image.sample = nir_ssa_undef(&b->nb, 1, 32);
3025 }
3026
3027 if (operands & SpvImageOperandsMakeTexelVisibleMask) {
3028 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3029 "MakeTexelVisible requires NonPrivateTexel to also be set.");
3030 uint32_t arg = image_operand_arg(b, w, count, 5,
3031 SpvImageOperandsMakeTexelVisibleMask);
3032 semantics = SpvMemorySemanticsMakeVisibleMask;
3033 scope = vtn_constant_uint(b, w[arg]);
3034 }
3035
3036 if (operands & SpvImageOperandsLodMask) {
3037 uint32_t arg = image_operand_arg(b, w, count, 5,
3038 SpvImageOperandsLodMask);
3039 image.lod = vtn_get_nir_ssa(b, w[arg]);
3040 } else {
3041 image.lod = nir_imm_int(&b->nb, 0);
3042 }
3043
3044 /* TODO: Volatile. */
3045
3046 break;
3047 }
3048
3049 case SpvOpImageWrite: {
3050 res_val = vtn_untyped_value(b, w[1]);
3051 image.image = vtn_get_image(b, w[1]);
3052 image.coord = get_image_coord(b, w[2]);
3053
3054 /* texel = w[3] */
3055
3056 const SpvImageOperandsMask operands =
3057 count > 4 ? w[4] : SpvImageOperandsMaskNone;
3058
3059 if (operands & SpvImageOperandsSampleMask) {
3060 uint32_t arg = image_operand_arg(b, w, count, 4,
3061 SpvImageOperandsSampleMask);
3062 image.sample = vtn_get_nir_ssa(b, w[arg]);
3063 } else {
3064 image.sample = nir_ssa_undef(&b->nb, 1, 32);
3065 }
3066
3067 if (operands & SpvImageOperandsMakeTexelAvailableMask) {
3068 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3069 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
3070 uint32_t arg = image_operand_arg(b, w, count, 4,
3071 SpvImageOperandsMakeTexelAvailableMask);
3072 semantics = SpvMemorySemanticsMakeAvailableMask;
3073 scope = vtn_constant_uint(b, w[arg]);
3074 }
3075
3076 if (operands & SpvImageOperandsLodMask) {
3077 uint32_t arg = image_operand_arg(b, w, count, 4,
3078 SpvImageOperandsLodMask);
3079 image.lod = vtn_get_nir_ssa(b, w[arg]);
3080 } else {
3081 image.lod = nir_imm_int(&b->nb, 0);
3082 }
3083
3084 /* TODO: Volatile. */
3085
3086 break;
3087 }
3088
3089 default:
3090 vtn_fail_with_opcode("Invalid image opcode", opcode);
3091 }
3092
3093 nir_intrinsic_op op;
3094 switch (opcode) {
3095 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
3096 OP(ImageQuerySize, size)
3097 OP(ImageRead, load)
3098 OP(ImageWrite, store)
3099 OP(AtomicLoad, load)
3100 OP(AtomicStore, store)
3101 OP(AtomicExchange, atomic_exchange)
3102 OP(AtomicCompareExchange, atomic_comp_swap)
3103 OP(AtomicCompareExchangeWeak, atomic_comp_swap)
3104 OP(AtomicIIncrement, atomic_add)
3105 OP(AtomicIDecrement, atomic_add)
3106 OP(AtomicIAdd, atomic_add)
3107 OP(AtomicISub, atomic_add)
3108 OP(AtomicSMin, atomic_imin)
3109 OP(AtomicUMin, atomic_umin)
3110 OP(AtomicSMax, atomic_imax)
3111 OP(AtomicUMax, atomic_umax)
3112 OP(AtomicAnd, atomic_and)
3113 OP(AtomicOr, atomic_or)
3114 OP(AtomicXor, atomic_xor)
3115 OP(AtomicFAddEXT, atomic_fadd)
3116 #undef OP
3117 default:
3118 vtn_fail_with_opcode("Invalid image opcode", opcode);
3119 }
3120
3121 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
3122
3123 intrin->src[0] = nir_src_for_ssa(&image.image->dest.ssa);
3124
3125 if (opcode == SpvOpImageQuerySize) {
3126 /* ImageQuerySize only has an LOD which is currently always 0 */
3127 intrin->src[1] = nir_src_for_ssa(nir_imm_int(&b->nb, 0));
3128 } else {
3129 /* The image coordinate is always 4 components but we may not have that
3130 * many. Swizzle to compensate.
3131 */
3132 intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord));
3133 intrin->src[2] = nir_src_for_ssa(image.sample);
3134 }
3135
3136 /* The Vulkan spec says:
3137 *
3138 * "If an instruction loads from or stores to a resource (including
3139 * atomics and image instructions) and the resource descriptor being
3140 * accessed is not dynamically uniform, then the operand corresponding
3141 * to that resource (e.g. the pointer or sampled image operand) must be
3142 * decorated with NonUniform."
3143 *
3144 * It's very careful to specify that the exact operand must be decorated
3145 * NonUniform. The SPIR-V parser is not expected to chase through long
3146 * chains to find the NonUniform decoration. It's either right there or we
3147 * can assume it doesn't exist.
3148 */
3149 vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access);
3150 nir_intrinsic_set_access(intrin, access);
3151
3152 switch (opcode) {
3153 case SpvOpAtomicLoad:
3154 case SpvOpImageQuerySize:
3155 case SpvOpImageRead:
3156 if (opcode == SpvOpImageRead || opcode == SpvOpAtomicLoad) {
3157 /* Only OpImageRead can support a lod parameter if
3158 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3159 * intrinsics definition for atomics requires us to set it for
3160 * OpAtomicLoad.
3161 */
3162 intrin->src[3] = nir_src_for_ssa(image.lod);
3163 }
3164 break;
3165 case SpvOpAtomicStore:
3166 case SpvOpImageWrite: {
3167 const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
3168 struct vtn_ssa_value *value = vtn_ssa_value(b, value_id);
3169 /* nir_intrinsic_image_deref_store always takes a vec4 value */
3170 assert(op == nir_intrinsic_image_deref_store);
3171 intrin->num_components = 4;
3172 intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value->def));
3173 /* Only OpImageWrite can support a lod parameter if
3174 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3175 * intrinsics definition for atomics requires us to set it for
3176 * OpAtomicStore.
3177 */
3178 intrin->src[4] = nir_src_for_ssa(image.lod);
3179
3180 if (opcode == SpvOpImageWrite)
3181 nir_intrinsic_set_type(intrin, nir_get_nir_type_for_glsl_type(value->type));
3182 break;
3183 }
3184
3185 case SpvOpAtomicCompareExchange:
3186 case SpvOpAtomicCompareExchangeWeak:
3187 case SpvOpAtomicIIncrement:
3188 case SpvOpAtomicIDecrement:
3189 case SpvOpAtomicExchange:
3190 case SpvOpAtomicIAdd:
3191 case SpvOpAtomicISub:
3192 case SpvOpAtomicSMin:
3193 case SpvOpAtomicUMin:
3194 case SpvOpAtomicSMax:
3195 case SpvOpAtomicUMax:
3196 case SpvOpAtomicAnd:
3197 case SpvOpAtomicOr:
3198 case SpvOpAtomicXor:
3199 case SpvOpAtomicFAddEXT:
3200 fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
3201 break;
3202
3203 default:
3204 vtn_fail_with_opcode("Invalid image opcode", opcode);
3205 }
3206
3207 /* Image operations implicitly have the Image storage memory semantics. */
3208 semantics |= SpvMemorySemanticsImageMemoryMask;
3209
3210 SpvMemorySemanticsMask before_semantics;
3211 SpvMemorySemanticsMask after_semantics;
3212 vtn_split_barrier_semantics(b, semantics, &before_semantics, &after_semantics);
3213
3214 if (before_semantics)
3215 vtn_emit_memory_barrier(b, scope, before_semantics);
3216
3217 if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
3218 struct vtn_type *type = vtn_get_type(b, w[1]);
3219
3220 unsigned dest_components = glsl_get_vector_elements(type->type);
3221 if (nir_intrinsic_infos[op].dest_components == 0)
3222 intrin->num_components = dest_components;
3223
3224 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
3225 nir_intrinsic_dest_components(intrin), 32, NULL);
3226
3227 nir_builder_instr_insert(&b->nb, &intrin->instr);
3228
3229 nir_ssa_def *result = &intrin->dest.ssa;
3230 if (nir_intrinsic_dest_components(intrin) != dest_components)
3231 result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
3232
3233 vtn_push_nir_ssa(b, w[2], result);
3234
3235 if (opcode == SpvOpImageRead)
3236 nir_intrinsic_set_type(intrin, nir_get_nir_type_for_glsl_type(type->type));
3237 } else {
3238 nir_builder_instr_insert(&b->nb, &intrin->instr);
3239 }
3240
3241 if (after_semantics)
3242 vtn_emit_memory_barrier(b, scope, after_semantics);
3243 }
3244
3245 static nir_intrinsic_op
3246 get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
3247 {
3248 switch (opcode) {
3249 case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
3250 case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
3251 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
3252 OP(AtomicExchange, atomic_exchange)
3253 OP(AtomicCompareExchange, atomic_comp_swap)
3254 OP(AtomicCompareExchangeWeak, atomic_comp_swap)
3255 OP(AtomicIIncrement, atomic_add)
3256 OP(AtomicIDecrement, atomic_add)
3257 OP(AtomicIAdd, atomic_add)
3258 OP(AtomicISub, atomic_add)
3259 OP(AtomicSMin, atomic_imin)
3260 OP(AtomicUMin, atomic_umin)
3261 OP(AtomicSMax, atomic_imax)
3262 OP(AtomicUMax, atomic_umax)
3263 OP(AtomicAnd, atomic_and)
3264 OP(AtomicOr, atomic_or)
3265 OP(AtomicXor, atomic_xor)
3266 OP(AtomicFAddEXT, atomic_fadd)
3267 #undef OP
3268 default:
3269 vtn_fail_with_opcode("Invalid SSBO atomic", opcode);
3270 }
3271 }
3272
3273 static nir_intrinsic_op
3274 get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
3275 {
3276 switch (opcode) {
3277 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3278 OP(AtomicLoad, read_deref)
3279 OP(AtomicExchange, exchange)
3280 OP(AtomicCompareExchange, comp_swap)
3281 OP(AtomicCompareExchangeWeak, comp_swap)
3282 OP(AtomicIIncrement, inc_deref)
3283 OP(AtomicIDecrement, post_dec_deref)
3284 OP(AtomicIAdd, add_deref)
3285 OP(AtomicISub, add_deref)
3286 OP(AtomicUMin, min_deref)
3287 OP(AtomicUMax, max_deref)
3288 OP(AtomicAnd, and_deref)
3289 OP(AtomicOr, or_deref)
3290 OP(AtomicXor, xor_deref)
3291 #undef OP
3292 default:
3293 /* We left the following out: AtomicStore, AtomicSMin and
3294 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3295 * moment Atomic Counter support is needed for ARB_spirv support, so is
3296 * only need to support GLSL Atomic Counters that are uints and don't
3297 * allow direct storage.
3298 */
3299 vtn_fail("Invalid uniform atomic");
3300 }
3301 }
3302
3303 static nir_intrinsic_op
3304 get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
3305 {
3306 switch (opcode) {
3307 case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
3308 case SpvOpAtomicStore: return nir_intrinsic_store_deref;
3309 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3310 OP(AtomicExchange, atomic_exchange)
3311 OP(AtomicCompareExchange, atomic_comp_swap)
3312 OP(AtomicCompareExchangeWeak, atomic_comp_swap)
3313 OP(AtomicIIncrement, atomic_add)
3314 OP(AtomicIDecrement, atomic_add)
3315 OP(AtomicIAdd, atomic_add)
3316 OP(AtomicISub, atomic_add)
3317 OP(AtomicSMin, atomic_imin)
3318 OP(AtomicUMin, atomic_umin)
3319 OP(AtomicSMax, atomic_imax)
3320 OP(AtomicUMax, atomic_umax)
3321 OP(AtomicAnd, atomic_and)
3322 OP(AtomicOr, atomic_or)
3323 OP(AtomicXor, atomic_xor)
3324 OP(AtomicFAddEXT, atomic_fadd)
3325 #undef OP
3326 default:
3327 vtn_fail_with_opcode("Invalid shared atomic", opcode);
3328 }
3329 }
3330
3331 /*
3332 * Handles shared atomics, ssbo atomics and atomic counters.
3333 */
3334 static void
3335 vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
3336 const uint32_t *w, UNUSED unsigned count)
3337 {
3338 struct vtn_pointer *ptr;
3339 nir_intrinsic_instr *atomic;
3340
3341 SpvScope scope = SpvScopeInvocation;
3342 SpvMemorySemanticsMask semantics = 0;
3343
3344 switch (opcode) {
3345 case SpvOpAtomicLoad:
3346 case SpvOpAtomicExchange:
3347 case SpvOpAtomicCompareExchange:
3348 case SpvOpAtomicCompareExchangeWeak:
3349 case SpvOpAtomicIIncrement:
3350 case SpvOpAtomicIDecrement:
3351 case SpvOpAtomicIAdd:
3352 case SpvOpAtomicISub:
3353 case SpvOpAtomicSMin:
3354 case SpvOpAtomicUMin:
3355 case SpvOpAtomicSMax:
3356 case SpvOpAtomicUMax:
3357 case SpvOpAtomicAnd:
3358 case SpvOpAtomicOr:
3359 case SpvOpAtomicXor:
3360 case SpvOpAtomicFAddEXT:
3361 ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
3362 scope = vtn_constant_uint(b, w[4]);
3363 semantics = vtn_constant_uint(b, w[5]);
3364 break;
3365
3366 case SpvOpAtomicStore:
3367 ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
3368 scope = vtn_constant_uint(b, w[2]);
3369 semantics = vtn_constant_uint(b, w[3]);
3370 break;
3371
3372 default:
3373 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3374 }
3375
3376 /* uniform as "atomic counter uniform" */
3377 if (ptr->mode == vtn_variable_mode_atomic_counter) {
3378 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
3379 nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
3380 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3381 atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
3382
3383 /* SSBO needs to initialize index/offset. In this case we don't need to,
3384 * as that info is already stored on the ptr->var->var nir_variable (see
3385 * vtn_create_variable)
3386 */
3387
3388 switch (opcode) {
3389 case SpvOpAtomicLoad:
3390 case SpvOpAtomicExchange:
3391 case SpvOpAtomicCompareExchange:
3392 case SpvOpAtomicCompareExchangeWeak:
3393 case SpvOpAtomicIIncrement:
3394 case SpvOpAtomicIDecrement:
3395 case SpvOpAtomicIAdd:
3396 case SpvOpAtomicISub:
3397 case SpvOpAtomicSMin:
3398 case SpvOpAtomicUMin:
3399 case SpvOpAtomicSMax:
3400 case SpvOpAtomicUMax:
3401 case SpvOpAtomicAnd:
3402 case SpvOpAtomicOr:
3403 case SpvOpAtomicXor:
3404 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3405 * atomic counter uniforms doesn't have sources
3406 */
3407 break;
3408
3409 default:
3410 unreachable("Invalid SPIR-V atomic");
3411
3412 }
3413 } else if (vtn_pointer_uses_ssa_offset(b, ptr)) {
3414 nir_ssa_def *offset, *index;
3415 offset = vtn_pointer_to_offset(b, ptr, &index);
3416
3417 assert(ptr->mode == vtn_variable_mode_ssbo);
3418
3419 nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode);
3420 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3421
3422 nir_intrinsic_set_access(atomic, ACCESS_COHERENT);
3423
3424 int src = 0;
3425 switch (opcode) {
3426 case SpvOpAtomicLoad:
3427 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
3428 nir_intrinsic_set_align(atomic, 4, 0);
3429 if (ptr->mode == vtn_variable_mode_ssbo)
3430 atomic->src[src++] = nir_src_for_ssa(index);
3431 atomic->src[src++] = nir_src_for_ssa(offset);
3432 break;
3433
3434 case SpvOpAtomicStore:
3435 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
3436 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
3437 nir_intrinsic_set_align(atomic, 4, 0);
3438 atomic->src[src++] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[4]));
3439 if (ptr->mode == vtn_variable_mode_ssbo)
3440 atomic->src[src++] = nir_src_for_ssa(index);
3441 atomic->src[src++] = nir_src_for_ssa(offset);
3442 break;
3443
3444 case SpvOpAtomicExchange:
3445 case SpvOpAtomicCompareExchange:
3446 case SpvOpAtomicCompareExchangeWeak:
3447 case SpvOpAtomicIIncrement:
3448 case SpvOpAtomicIDecrement:
3449 case SpvOpAtomicIAdd:
3450 case SpvOpAtomicISub:
3451 case SpvOpAtomicSMin:
3452 case SpvOpAtomicUMin:
3453 case SpvOpAtomicSMax:
3454 case SpvOpAtomicUMax:
3455 case SpvOpAtomicAnd:
3456 case SpvOpAtomicOr:
3457 case SpvOpAtomicXor:
3458 case SpvOpAtomicFAddEXT:
3459 if (ptr->mode == vtn_variable_mode_ssbo)
3460 atomic->src[src++] = nir_src_for_ssa(index);
3461 atomic->src[src++] = nir_src_for_ssa(offset);
3462 fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
3463 break;
3464
3465 default:
3466 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3467 }
3468 } else {
3469 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
3470 const struct glsl_type *deref_type = deref->type;
3471 nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
3472 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3473 atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
3474
3475 if (ptr->mode != vtn_variable_mode_workgroup)
3476 nir_intrinsic_set_access(atomic, ACCESS_COHERENT);
3477
3478 switch (opcode) {
3479 case SpvOpAtomicLoad:
3480 atomic->num_components = glsl_get_vector_elements(deref_type);
3481 break;
3482
3483 case SpvOpAtomicStore:
3484 atomic->num_components = glsl_get_vector_elements(deref_type);
3485 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
3486 atomic->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[4]));
3487 break;
3488
3489 case SpvOpAtomicExchange:
3490 case SpvOpAtomicCompareExchange:
3491 case SpvOpAtomicCompareExchangeWeak:
3492 case SpvOpAtomicIIncrement:
3493 case SpvOpAtomicIDecrement:
3494 case SpvOpAtomicIAdd:
3495 case SpvOpAtomicISub:
3496 case SpvOpAtomicSMin:
3497 case SpvOpAtomicUMin:
3498 case SpvOpAtomicSMax:
3499 case SpvOpAtomicUMax:
3500 case SpvOpAtomicAnd:
3501 case SpvOpAtomicOr:
3502 case SpvOpAtomicXor:
3503 case SpvOpAtomicFAddEXT:
3504 fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
3505 break;
3506
3507 default:
3508 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3509 }
3510 }
3511
3512 /* Atomic ordering operations will implicitly apply to the atomic operation
3513 * storage class, so include that too.
3514 */
3515 semantics |= vtn_storage_class_to_memory_semantics(ptr->ptr_type->storage_class);
3516
3517 SpvMemorySemanticsMask before_semantics;
3518 SpvMemorySemanticsMask after_semantics;
3519 vtn_split_barrier_semantics(b, semantics, &before_semantics, &after_semantics);
3520
3521 if (before_semantics)
3522 vtn_emit_memory_barrier(b, scope, before_semantics);
3523
3524 if (opcode != SpvOpAtomicStore) {
3525 struct vtn_type *type = vtn_get_type(b, w[1]);
3526
3527 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
3528 glsl_get_vector_elements(type->type),
3529 glsl_get_bit_size(type->type), NULL);
3530
3531 vtn_push_nir_ssa(b, w[2], &atomic->dest.ssa);
3532 }
3533
3534 nir_builder_instr_insert(&b->nb, &atomic->instr);
3535
3536 if (after_semantics)
3537 vtn_emit_memory_barrier(b, scope, after_semantics);
3538 }
3539
3540 static nir_alu_instr *
3541 create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
3542 {
3543 nir_op op = nir_op_vec(num_components);
3544 nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
3545 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
3546 bit_size, NULL);
3547 vec->dest.write_mask = (1 << num_components) - 1;
3548
3549 return vec;
3550 }
3551
3552 struct vtn_ssa_value *
3553 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
3554 {
3555 if (src->transposed)
3556 return src->transposed;
3557
3558 struct vtn_ssa_value *dest =
3559 vtn_create_ssa_value(b, glsl_transposed_type(src->type));
3560
3561 for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
3562 nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type),
3563 glsl_get_bit_size(src->type));
3564 if (glsl_type_is_vector_or_scalar(src->type)) {
3565 vec->src[0].src = nir_src_for_ssa(src->def);
3566 vec->src[0].swizzle[0] = i;
3567 } else {
3568 for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
3569 vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
3570 vec->src[j].swizzle[0] = i;
3571 }
3572 }
3573 nir_builder_instr_insert(&b->nb, &vec->instr);
3574 dest->elems[i]->def = &vec->dest.dest.ssa;
3575 }
3576
3577 dest->transposed = src;
3578
3579 return dest;
3580 }
3581
3582 static nir_ssa_def *
3583 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
3584 nir_ssa_def *src0, nir_ssa_def *src1,
3585 const uint32_t *indices)
3586 {
3587 nir_alu_instr *vec = create_vec(b, num_components, src0->bit_size);
3588
3589 for (unsigned i = 0; i < num_components; i++) {
3590 uint32_t index = indices[i];
3591 if (index == 0xffffffff) {
3592 vec->src[i].src =
3593 nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
3594 } else if (index < src0->num_components) {
3595 vec->src[i].src = nir_src_for_ssa(src0);
3596 vec->src[i].swizzle[0] = index;
3597 } else {
3598 vec->src[i].src = nir_src_for_ssa(src1);
3599 vec->src[i].swizzle[0] = index - src0->num_components;
3600 }
3601 }
3602
3603 nir_builder_instr_insert(&b->nb, &vec->instr);
3604
3605 return &vec->dest.dest.ssa;
3606 }
3607
3608 /*
3609 * Concatentates a number of vectors/scalars together to produce a vector
3610 */
3611 static nir_ssa_def *
3612 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
3613 unsigned num_srcs, nir_ssa_def **srcs)
3614 {
3615 nir_alu_instr *vec = create_vec(b, num_components, srcs[0]->bit_size);
3616
3617 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3618 *
3619 * "When constructing a vector, there must be at least two Constituent
3620 * operands."
3621 */
3622 vtn_assert(num_srcs >= 2);
3623
3624 unsigned dest_idx = 0;
3625 for (unsigned i = 0; i < num_srcs; i++) {
3626 nir_ssa_def *src = srcs[i];
3627 vtn_assert(dest_idx + src->num_components <= num_components);
3628 for (unsigned j = 0; j < src->num_components; j++) {
3629 vec->src[dest_idx].src = nir_src_for_ssa(src);
3630 vec->src[dest_idx].swizzle[0] = j;
3631 dest_idx++;
3632 }
3633 }
3634
3635 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3636 *
3637 * "When constructing a vector, the total number of components in all
3638 * the operands must equal the number of components in Result Type."
3639 */
3640 vtn_assert(dest_idx == num_components);
3641
3642 nir_builder_instr_insert(&b->nb, &vec->instr);
3643
3644 return &vec->dest.dest.ssa;
3645 }
3646
3647 static struct vtn_ssa_value *
3648 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
3649 {
3650 struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
3651 dest->type = src->type;
3652
3653 if (glsl_type_is_vector_or_scalar(src->type)) {
3654 dest->def = src->def;
3655 } else {
3656 unsigned elems = glsl_get_length(src->type);
3657
3658 dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
3659 for (unsigned i = 0; i < elems; i++)
3660 dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
3661 }
3662
3663 return dest;
3664 }
3665
3666 static struct vtn_ssa_value *
3667 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
3668 struct vtn_ssa_value *insert, const uint32_t *indices,
3669 unsigned num_indices)
3670 {
3671 struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
3672
3673 struct vtn_ssa_value *cur = dest;
3674 unsigned i;
3675 for (i = 0; i < num_indices - 1; i++) {
3676 /* If we got a vector here, that means the next index will be trying to
3677 * dereference a scalar.
3678 */
3679 vtn_fail_if(glsl_type_is_vector_or_scalar(cur->type),
3680 "OpCompositeInsert has too many indices.");
3681 vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
3682 "All indices in an OpCompositeInsert must be in-bounds");
3683 cur = cur->elems[indices[i]];
3684 }
3685
3686 if (glsl_type_is_vector_or_scalar(cur->type)) {
3687 vtn_fail_if(indices[i] >= glsl_get_vector_elements(cur->type),
3688 "All indices in an OpCompositeInsert must be in-bounds");
3689
3690 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3691 * the component granularity. In that case, the last index will be
3692 * the index to insert the scalar into the vector.
3693 */
3694
3695 cur->def = nir_vector_insert_imm(&b->nb, cur->def, insert->def, indices[i]);
3696 } else {
3697 vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
3698 "All indices in an OpCompositeInsert must be in-bounds");
3699 cur->elems[indices[i]] = insert;
3700 }
3701
3702 return dest;
3703 }
3704
3705 static struct vtn_ssa_value *
3706 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
3707 const uint32_t *indices, unsigned num_indices)
3708 {
3709 struct vtn_ssa_value *cur = src;
3710 for (unsigned i = 0; i < num_indices; i++) {
3711 if (glsl_type_is_vector_or_scalar(cur->type)) {
3712 vtn_assert(i == num_indices - 1);
3713 vtn_fail_if(indices[i] >= glsl_get_vector_elements(cur->type),
3714 "All indices in an OpCompositeExtract must be in-bounds");
3715
3716 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3717 * the component granularity. The last index will be the index of the
3718 * vector to extract.
3719 */
3720
3721 const struct glsl_type *scalar_type =
3722 glsl_scalar_type(glsl_get_base_type(cur->type));
3723 struct vtn_ssa_value *ret = vtn_create_ssa_value(b, scalar_type);
3724 ret->def = nir_channel(&b->nb, cur->def, indices[i]);
3725 return ret;
3726 } else {
3727 vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
3728 "All indices in an OpCompositeExtract must be in-bounds");
3729 cur = cur->elems[indices[i]];
3730 }
3731 }
3732
3733 return cur;
3734 }
3735
3736 static void
3737 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
3738 const uint32_t *w, unsigned count)
3739 {
3740 struct vtn_type *type = vtn_get_type(b, w[1]);
3741 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
3742
3743 switch (opcode) {
3744 case SpvOpVectorExtractDynamic:
3745 ssa->def = nir_vector_extract(&b->nb, vtn_get_nir_ssa(b, w[3]),
3746 vtn_get_nir_ssa(b, w[4]));
3747 break;
3748
3749 case SpvOpVectorInsertDynamic:
3750 ssa->def = nir_vector_insert(&b->nb, vtn_get_nir_ssa(b, w[3]),
3751 vtn_get_nir_ssa(b, w[4]),
3752 vtn_get_nir_ssa(b, w[5]));
3753 break;
3754
3755 case SpvOpVectorShuffle:
3756 ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type->type),
3757 vtn_get_nir_ssa(b, w[3]),
3758 vtn_get_nir_ssa(b, w[4]),
3759 w + 5);
3760 break;
3761
3762 case SpvOpCompositeConstruct: {
3763 unsigned elems = count - 3;
3764 assume(elems >= 1);
3765 if (glsl_type_is_vector_or_scalar(type->type)) {
3766 nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
3767 for (unsigned i = 0; i < elems; i++)
3768 srcs[i] = vtn_get_nir_ssa(b, w[3 + i]);
3769 ssa->def =
3770 vtn_vector_construct(b, glsl_get_vector_elements(type->type),
3771 elems, srcs);
3772 } else {
3773 ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
3774 for (unsigned i = 0; i < elems; i++)
3775 ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
3776 }
3777 break;
3778 }
3779 case SpvOpCompositeExtract:
3780 ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
3781 w + 4, count - 4);
3782 break;
3783
3784 case SpvOpCompositeInsert:
3785 ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
3786 vtn_ssa_value(b, w[3]),
3787 w + 5, count - 5);
3788 break;
3789
3790 case SpvOpCopyLogical:
3791 ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
3792 break;
3793 case SpvOpCopyObject:
3794 vtn_copy_value(b, w[3], w[2]);
3795 return;
3796
3797 default:
3798 vtn_fail_with_opcode("unknown composite operation", opcode);
3799 }
3800
3801 vtn_push_ssa_value(b, w[2], ssa);
3802 }
3803
3804 static void
3805 vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
3806 {
3807 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
3808 nir_builder_instr_insert(&b->nb, &intrin->instr);
3809 }
3810
3811 void
3812 vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
3813 SpvMemorySemanticsMask semantics)
3814 {
3815 if (b->shader->options->use_scoped_barrier) {
3816 vtn_emit_scoped_memory_barrier(b, scope, semantics);
3817 return;
3818 }
3819
3820 static const SpvMemorySemanticsMask all_memory_semantics =
3821 SpvMemorySemanticsUniformMemoryMask |
3822 SpvMemorySemanticsWorkgroupMemoryMask |
3823 SpvMemorySemanticsAtomicCounterMemoryMask |
3824 SpvMemorySemanticsImageMemoryMask |
3825 SpvMemorySemanticsOutputMemoryMask;
3826
3827 /* If we're not actually doing a memory barrier, bail */
3828 if (!(semantics & all_memory_semantics))
3829 return;
3830
3831 /* GL and Vulkan don't have these */
3832 vtn_assert(scope != SpvScopeCrossDevice);
3833
3834 if (scope == SpvScopeSubgroup)
3835 return; /* Nothing to do here */
3836
3837 if (scope == SpvScopeWorkgroup) {
3838 vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
3839 return;
3840 }
3841
3842 /* There's only two scopes thing left */
3843 vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
3844
3845 /* Map the GLSL memoryBarrier() construct and any barriers with more than one
3846 * semantic to the corresponding NIR one.
3847 */
3848 if (util_bitcount(semantics & all_memory_semantics) > 1) {
3849 vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
3850 if (semantics & SpvMemorySemanticsOutputMemoryMask) {
3851 /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
3852 * TCS outputs, so we have to emit it's own intrinsic for that. We
3853 * then need to emit another memory_barrier to prevent moving
3854 * non-output operations to before the tcs_patch barrier.
3855 */
3856 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
3857 vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
3858 }
3859 return;
3860 }
3861
3862 /* Issue a more specific barrier */
3863 switch (semantics & all_memory_semantics) {
3864 case SpvMemorySemanticsUniformMemoryMask:
3865 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
3866 break;
3867 case SpvMemorySemanticsWorkgroupMemoryMask:
3868 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
3869 break;
3870 case SpvMemorySemanticsAtomicCounterMemoryMask:
3871 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
3872 break;
3873 case SpvMemorySemanticsImageMemoryMask:
3874 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
3875 break;
3876 case SpvMemorySemanticsOutputMemoryMask:
3877 if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL)
3878 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
3879 break;
3880 default:
3881 break;
3882 }
3883 }
3884
3885 static void
3886 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
3887 const uint32_t *w, UNUSED unsigned count)
3888 {
3889 switch (opcode) {
3890 case SpvOpEmitVertex:
3891 case SpvOpEmitStreamVertex:
3892 case SpvOpEndPrimitive:
3893 case SpvOpEndStreamPrimitive: {
3894 nir_intrinsic_op intrinsic_op;
3895 switch (opcode) {
3896 case SpvOpEmitVertex:
3897 case SpvOpEmitStreamVertex:
3898 intrinsic_op = nir_intrinsic_emit_vertex;
3899 break;
3900 case SpvOpEndPrimitive:
3901 case SpvOpEndStreamPrimitive:
3902 intrinsic_op = nir_intrinsic_end_primitive;
3903 break;
3904 default:
3905 unreachable("Invalid opcode");
3906 }
3907
3908 nir_intrinsic_instr *intrin =
3909 nir_intrinsic_instr_create(b->shader, intrinsic_op);
3910
3911 switch (opcode) {
3912 case SpvOpEmitStreamVertex:
3913 case SpvOpEndStreamPrimitive: {
3914 unsigned stream = vtn_constant_uint(b, w[1]);
3915 nir_intrinsic_set_stream_id(intrin, stream);
3916 break;
3917 }
3918
3919 default:
3920 break;
3921 }
3922
3923 nir_builder_instr_insert(&b->nb, &intrin->instr);
3924 break;
3925 }
3926
3927 case SpvOpMemoryBarrier: {
3928 SpvScope scope = vtn_constant_uint(b, w[1]);
3929 SpvMemorySemanticsMask semantics = vtn_constant_uint(b, w[2]);
3930 vtn_emit_memory_barrier(b, scope, semantics);
3931 return;
3932 }
3933
3934 case SpvOpControlBarrier: {
3935 SpvScope execution_scope = vtn_constant_uint(b, w[1]);
3936 SpvScope memory_scope = vtn_constant_uint(b, w[2]);
3937 SpvMemorySemanticsMask memory_semantics = vtn_constant_uint(b, w[3]);
3938
3939 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3940 * memory semantics of None for GLSL barrier().
3941 * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
3942 * Device instead of Workgroup for execution scope.
3943 */
3944 if (b->wa_glslang_cs_barrier &&
3945 b->nb.shader->info.stage == MESA_SHADER_COMPUTE &&
3946 (execution_scope == SpvScopeWorkgroup ||
3947 execution_scope == SpvScopeDevice) &&
3948 memory_semantics == SpvMemorySemanticsMaskNone) {
3949 execution_scope = SpvScopeWorkgroup;
3950 memory_scope = SpvScopeWorkgroup;
3951 memory_semantics = SpvMemorySemanticsAcquireReleaseMask |
3952 SpvMemorySemanticsWorkgroupMemoryMask;
3953 }
3954
3955 /* From the SPIR-V spec:
3956 *
3957 * "When used with the TessellationControl execution model, it also
3958 * implicitly synchronizes the Output Storage Class: Writes to Output
3959 * variables performed by any invocation executed prior to a
3960 * OpControlBarrier will be visible to any other invocation after
3961 * return from that OpControlBarrier."
3962 */
3963 if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL) {
3964 memory_semantics &= ~(SpvMemorySemanticsAcquireMask |
3965 SpvMemorySemanticsReleaseMask |
3966 SpvMemorySemanticsAcquireReleaseMask |
3967 SpvMemorySemanticsSequentiallyConsistentMask);
3968 memory_semantics |= SpvMemorySemanticsAcquireReleaseMask |
3969 SpvMemorySemanticsOutputMemoryMask;
3970 }
3971
3972 if (b->shader->options->use_scoped_barrier) {
3973 vtn_emit_scoped_control_barrier(b, execution_scope, memory_scope,
3974 memory_semantics);
3975 } else {
3976 vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
3977
3978 if (execution_scope == SpvScopeWorkgroup)
3979 vtn_emit_barrier(b, nir_intrinsic_control_barrier);
3980 }
3981 break;
3982 }
3983
3984 default:
3985 unreachable("unknown barrier instruction");
3986 }
3987 }
3988
3989 static unsigned
3990 gl_primitive_from_spv_execution_mode(struct vtn_builder *b,
3991 SpvExecutionMode mode)
3992 {
3993 switch (mode) {
3994 case SpvExecutionModeInputPoints:
3995 case SpvExecutionModeOutputPoints:
3996 return 0; /* GL_POINTS */
3997 case SpvExecutionModeInputLines:
3998 return 1; /* GL_LINES */
3999 case SpvExecutionModeInputLinesAdjacency:
4000 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
4001 case SpvExecutionModeTriangles:
4002 return 4; /* GL_TRIANGLES */
4003 case SpvExecutionModeInputTrianglesAdjacency:
4004 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
4005 case SpvExecutionModeQuads:
4006 return 7; /* GL_QUADS */
4007 case SpvExecutionModeIsolines:
4008 return 0x8E7A; /* GL_ISOLINES */
4009 case SpvExecutionModeOutputLineStrip:
4010 return 3; /* GL_LINE_STRIP */
4011 case SpvExecutionModeOutputTriangleStrip:
4012 return 5; /* GL_TRIANGLE_STRIP */
4013 default:
4014 vtn_fail("Invalid primitive type: %s (%u)",
4015 spirv_executionmode_to_string(mode), mode);
4016 }
4017 }
4018
4019 static unsigned
4020 vertices_in_from_spv_execution_mode(struct vtn_builder *b,
4021 SpvExecutionMode mode)
4022 {
4023 switch (mode) {
4024 case SpvExecutionModeInputPoints:
4025 return 1;
4026 case SpvExecutionModeInputLines:
4027 return 2;
4028 case SpvExecutionModeInputLinesAdjacency:
4029 return 4;
4030 case SpvExecutionModeTriangles:
4031 return 3;
4032 case SpvExecutionModeInputTrianglesAdjacency:
4033 return 6;
4034 default:
4035 vtn_fail("Invalid GS input mode: %s (%u)",
4036 spirv_executionmode_to_string(mode), mode);
4037 }
4038 }
4039
4040 static gl_shader_stage
4041 stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model)
4042 {
4043 switch (model) {
4044 case SpvExecutionModelVertex:
4045 return MESA_SHADER_VERTEX;
4046 case SpvExecutionModelTessellationControl:
4047 return MESA_SHADER_TESS_CTRL;
4048 case SpvExecutionModelTessellationEvaluation:
4049 return MESA_SHADER_TESS_EVAL;
4050 case SpvExecutionModelGeometry:
4051 return MESA_SHADER_GEOMETRY;
4052 case SpvExecutionModelFragment:
4053 return MESA_SHADER_FRAGMENT;
4054 case SpvExecutionModelGLCompute:
4055 return MESA_SHADER_COMPUTE;
4056 case SpvExecutionModelKernel:
4057 return MESA_SHADER_KERNEL;
4058 default:
4059 vtn_fail("Unsupported execution model: %s (%u)",
4060 spirv_executionmodel_to_string(model), model);
4061 }
4062 }
4063
4064 #define spv_check_supported(name, cap) do { \
4065 if (!(b->options && b->options->caps.name)) \
4066 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
4067 spirv_capability_to_string(cap), cap); \
4068 } while(0)
4069
4070
4071 void
4072 vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
4073 unsigned count)
4074 {
4075 struct vtn_value *entry_point = &b->values[w[2]];
4076 /* Let this be a name label regardless */
4077 unsigned name_words;
4078 entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
4079
4080 if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
4081 stage_for_execution_model(b, w[1]) != b->entry_point_stage)
4082 return;
4083
4084 vtn_assert(b->entry_point == NULL);
4085 b->entry_point = entry_point;
4086 }
4087
4088 static bool
4089 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
4090 const uint32_t *w, unsigned count)
4091 {
4092 switch (opcode) {
4093 case SpvOpSource: {
4094 const char *lang;
4095 switch (w[1]) {
4096 default:
4097 case SpvSourceLanguageUnknown: lang = "unknown"; break;
4098 case SpvSourceLanguageESSL: lang = "ESSL"; break;
4099 case SpvSourceLanguageGLSL: lang = "GLSL"; break;
4100 case SpvSourceLanguageOpenCL_C: lang = "OpenCL C"; break;
4101 case SpvSourceLanguageOpenCL_CPP: lang = "OpenCL C++"; break;
4102 case SpvSourceLanguageHLSL: lang = "HLSL"; break;
4103 }
4104
4105 uint32_t version = w[2];
4106
4107 const char *file =
4108 (count > 3) ? vtn_value(b, w[3], vtn_value_type_string)->str : "";
4109
4110 vtn_info("Parsing SPIR-V from %s %u source file %s", lang, version, file);
4111 break;
4112 }
4113
4114 case SpvOpSourceExtension:
4115 case SpvOpSourceContinued:
4116 case SpvOpExtension:
4117 case SpvOpModuleProcessed:
4118 /* Unhandled, but these are for debug so that's ok. */
4119 break;
4120
4121 case SpvOpCapability: {
4122 SpvCapability cap = w[1];
4123 switch (cap) {
4124 case SpvCapabilityMatrix:
4125 case SpvCapabilityShader:
4126 case SpvCapabilityGeometry:
4127 case SpvCapabilityGeometryPointSize:
4128 case SpvCapabilityUniformBufferArrayDynamicIndexing:
4129 case SpvCapabilitySampledImageArrayDynamicIndexing:
4130 case SpvCapabilityStorageBufferArrayDynamicIndexing:
4131 case SpvCapabilityStorageImageArrayDynamicIndexing:
4132 case SpvCapabilityImageRect:
4133 case SpvCapabilitySampledRect:
4134 case SpvCapabilitySampled1D:
4135 case SpvCapabilityImage1D:
4136 case SpvCapabilitySampledCubeArray:
4137 case SpvCapabilityImageCubeArray:
4138 case SpvCapabilitySampledBuffer:
4139 case SpvCapabilityImageBuffer:
4140 case SpvCapabilityImageQuery:
4141 case SpvCapabilityDerivativeControl:
4142 case SpvCapabilityInterpolationFunction:
4143 case SpvCapabilityMultiViewport:
4144 case SpvCapabilitySampleRateShading:
4145 case SpvCapabilityClipDistance:
4146 case SpvCapabilityCullDistance:
4147 case SpvCapabilityInputAttachment:
4148 case SpvCapabilityImageGatherExtended:
4149 case SpvCapabilityStorageImageExtendedFormats:
4150 case SpvCapabilityVector16:
4151 break;
4152
4153 case SpvCapabilityLinkage:
4154 case SpvCapabilityFloat16Buffer:
4155 case SpvCapabilitySparseResidency:
4156 vtn_warn("Unsupported SPIR-V capability: %s",
4157 spirv_capability_to_string(cap));
4158 break;
4159
4160 case SpvCapabilityMinLod:
4161 spv_check_supported(min_lod, cap);
4162 break;
4163
4164 case SpvCapabilityAtomicStorage:
4165 spv_check_supported(atomic_storage, cap);
4166 break;
4167
4168 case SpvCapabilityFloat64:
4169 spv_check_supported(float64, cap);
4170 break;
4171 case SpvCapabilityInt64:
4172 spv_check_supported(int64, cap);
4173 break;
4174 case SpvCapabilityInt16:
4175 spv_check_supported(int16, cap);
4176 break;
4177 case SpvCapabilityInt8:
4178 spv_check_supported(int8, cap);
4179 break;
4180
4181 case SpvCapabilityTransformFeedback:
4182 spv_check_supported(transform_feedback, cap);
4183 break;
4184
4185 case SpvCapabilityGeometryStreams:
4186 spv_check_supported(geometry_streams, cap);
4187 break;
4188
4189 case SpvCapabilityInt64Atomics:
4190 spv_check_supported(int64_atomics, cap);
4191 break;
4192
4193 case SpvCapabilityStorageImageMultisample:
4194 spv_check_supported(storage_image_ms, cap);
4195 break;
4196
4197 case SpvCapabilityAddresses:
4198 spv_check_supported(address, cap);
4199 break;
4200
4201 case SpvCapabilityKernel:
4202 spv_check_supported(kernel, cap);
4203 break;
4204
4205 case SpvCapabilityImageBasic:
4206 spv_check_supported(kernel_image, cap);
4207 break;
4208
4209 case SpvCapabilityImageReadWrite:
4210 case SpvCapabilityImageMipmap:
4211 case SpvCapabilityPipes:
4212 case SpvCapabilityDeviceEnqueue:
4213 case SpvCapabilityLiteralSampler:
4214 case SpvCapabilityGenericPointer:
4215 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
4216 spirv_capability_to_string(cap));
4217 break;
4218
4219 case SpvCapabilityImageMSArray:
4220 spv_check_supported(image_ms_array, cap);
4221 break;
4222
4223 case SpvCapabilityTessellation:
4224 case SpvCapabilityTessellationPointSize:
4225 spv_check_supported(tessellation, cap);
4226 break;
4227
4228 case SpvCapabilityDrawParameters:
4229 spv_check_supported(draw_parameters, cap);
4230 break;
4231
4232 case SpvCapabilityStorageImageReadWithoutFormat:
4233 spv_check_supported(image_read_without_format, cap);
4234 break;
4235
4236 case SpvCapabilityStorageImageWriteWithoutFormat:
4237 spv_check_supported(image_write_without_format, cap);
4238 break;
4239
4240 case SpvCapabilityDeviceGroup:
4241 spv_check_supported(device_group, cap);
4242 break;
4243
4244 case SpvCapabilityMultiView:
4245 spv_check_supported(multiview, cap);
4246 break;
4247
4248 case SpvCapabilityGroupNonUniform:
4249 spv_check_supported(subgroup_basic, cap);
4250 break;
4251
4252 case SpvCapabilitySubgroupVoteKHR:
4253 case SpvCapabilityGroupNonUniformVote:
4254 spv_check_supported(subgroup_vote, cap);
4255 break;
4256
4257 case SpvCapabilitySubgroupBallotKHR:
4258 case SpvCapabilityGroupNonUniformBallot:
4259 spv_check_supported(subgroup_ballot, cap);
4260 break;
4261
4262 case SpvCapabilityGroupNonUniformShuffle:
4263 case SpvCapabilityGroupNonUniformShuffleRelative:
4264 spv_check_supported(subgroup_shuffle, cap);
4265 break;
4266
4267 case SpvCapabilityGroupNonUniformQuad:
4268 spv_check_supported(subgroup_quad, cap);
4269 break;
4270
4271 case SpvCapabilityGroupNonUniformArithmetic:
4272 case SpvCapabilityGroupNonUniformClustered:
4273 spv_check_supported(subgroup_arithmetic, cap);
4274 break;
4275
4276 case SpvCapabilityGroups:
4277 spv_check_supported(amd_shader_ballot, cap);
4278 break;
4279
4280 case SpvCapabilityVariablePointersStorageBuffer:
4281 case SpvCapabilityVariablePointers:
4282 spv_check_supported(variable_pointers, cap);
4283 b->variable_pointers = true;
4284 break;
4285
4286 case SpvCapabilityStorageUniformBufferBlock16:
4287 case SpvCapabilityStorageUniform16:
4288 case SpvCapabilityStoragePushConstant16:
4289 case SpvCapabilityStorageInputOutput16:
4290 spv_check_supported(storage_16bit, cap);
4291 break;
4292
4293 case SpvCapabilityShaderLayer:
4294 case SpvCapabilityShaderViewportIndex:
4295 case SpvCapabilityShaderViewportIndexLayerEXT:
4296 spv_check_supported(shader_viewport_index_layer, cap);
4297 break;
4298
4299 case SpvCapabilityStorageBuffer8BitAccess:
4300 case SpvCapabilityUniformAndStorageBuffer8BitAccess:
4301 case SpvCapabilityStoragePushConstant8:
4302 spv_check_supported(storage_8bit, cap);
4303 break;
4304
4305 case SpvCapabilityShaderNonUniformEXT:
4306 spv_check_supported(descriptor_indexing, cap);
4307 break;
4308
4309 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
4310 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
4311 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
4312 spv_check_supported(descriptor_array_dynamic_indexing, cap);
4313 break;
4314
4315 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT:
4316 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT:
4317 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT:
4318 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT:
4319 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT:
4320 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT:
4321 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT:
4322 spv_check_supported(descriptor_array_non_uniform_indexing, cap);
4323 break;
4324
4325 case SpvCapabilityRuntimeDescriptorArrayEXT:
4326 spv_check_supported(runtime_descriptor_array, cap);
4327 break;
4328
4329 case SpvCapabilityStencilExportEXT:
4330 spv_check_supported(stencil_export, cap);
4331 break;
4332
4333 case SpvCapabilitySampleMaskPostDepthCoverage:
4334 spv_check_supported(post_depth_coverage, cap);
4335 break;
4336
4337 case SpvCapabilityDenormFlushToZero:
4338 case SpvCapabilityDenormPreserve:
4339 case SpvCapabilitySignedZeroInfNanPreserve:
4340 case SpvCapabilityRoundingModeRTE:
4341 case SpvCapabilityRoundingModeRTZ:
4342 spv_check_supported(float_controls, cap);
4343 break;
4344
4345 case SpvCapabilityPhysicalStorageBufferAddresses:
4346 spv_check_supported(physical_storage_buffer_address, cap);
4347 break;
4348
4349 case SpvCapabilityComputeDerivativeGroupQuadsNV:
4350 case SpvCapabilityComputeDerivativeGroupLinearNV:
4351 spv_check_supported(derivative_group, cap);
4352 break;
4353
4354 case SpvCapabilityFloat16:
4355 spv_check_supported(float16, cap);
4356 break;
4357
4358 case SpvCapabilityFragmentShaderSampleInterlockEXT:
4359 spv_check_supported(fragment_shader_sample_interlock, cap);
4360 break;
4361
4362 case SpvCapabilityFragmentShaderPixelInterlockEXT:
4363 spv_check_supported(fragment_shader_pixel_interlock, cap);
4364 break;
4365
4366 case SpvCapabilityDemoteToHelperInvocationEXT:
4367 spv_check_supported(demote_to_helper_invocation, cap);
4368 break;
4369
4370 case SpvCapabilityShaderClockKHR:
4371 spv_check_supported(shader_clock, cap);
4372 break;
4373
4374 case SpvCapabilityVulkanMemoryModel:
4375 spv_check_supported(vk_memory_model, cap);
4376 break;
4377
4378 case SpvCapabilityVulkanMemoryModelDeviceScope:
4379 spv_check_supported(vk_memory_model_device_scope, cap);
4380 break;
4381
4382 case SpvCapabilityImageReadWriteLodAMD:
4383 spv_check_supported(amd_image_read_write_lod, cap);
4384 break;
4385
4386 case SpvCapabilityIntegerFunctions2INTEL:
4387 spv_check_supported(integer_functions2, cap);
4388 break;
4389
4390 case SpvCapabilityFragmentMaskAMD:
4391 spv_check_supported(amd_fragment_mask, cap);
4392 break;
4393
4394 case SpvCapabilityImageGatherBiasLodAMD:
4395 spv_check_supported(amd_image_gather_bias_lod, cap);
4396 break;
4397
4398 case SpvCapabilityAtomicFloat32AddEXT:
4399 spv_check_supported(float32_atomic_add, cap);
4400 break;
4401
4402 case SpvCapabilityAtomicFloat64AddEXT:
4403 spv_check_supported(float64_atomic_add, cap);
4404 break;
4405
4406 default:
4407 vtn_fail("Unhandled capability: %s (%u)",
4408 spirv_capability_to_string(cap), cap);
4409 }
4410 break;
4411 }
4412
4413 case SpvOpExtInstImport:
4414 vtn_handle_extension(b, opcode, w, count);
4415 break;
4416
4417 case SpvOpMemoryModel:
4418 switch (w[1]) {
4419 case SpvAddressingModelPhysical32:
4420 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
4421 "AddressingModelPhysical32 only supported for kernels");
4422 b->shader->info.cs.ptr_size = 32;
4423 b->physical_ptrs = true;
4424 assert(nir_address_format_bit_size(b->options->global_addr_format) == 32);
4425 assert(nir_address_format_num_components(b->options->global_addr_format) == 1);
4426 assert(nir_address_format_bit_size(b->options->shared_addr_format) == 32);
4427 assert(nir_address_format_num_components(b->options->shared_addr_format) == 1);
4428 if (!b->options->constant_as_global) {
4429 assert(nir_address_format_bit_size(b->options->ubo_addr_format) == 32);
4430 assert(nir_address_format_num_components(b->options->ubo_addr_format) == 1);
4431 }
4432 break;
4433 case SpvAddressingModelPhysical64:
4434 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
4435 "AddressingModelPhysical64 only supported for kernels");
4436 b->shader->info.cs.ptr_size = 64;
4437 b->physical_ptrs = true;
4438 assert(nir_address_format_bit_size(b->options->global_addr_format) == 64);
4439 assert(nir_address_format_num_components(b->options->global_addr_format) == 1);
4440 assert(nir_address_format_bit_size(b->options->shared_addr_format) == 64);
4441 assert(nir_address_format_num_components(b->options->shared_addr_format) == 1);
4442 if (!b->options->constant_as_global) {
4443 assert(nir_address_format_bit_size(b->options->ubo_addr_format) == 64);
4444 assert(nir_address_format_num_components(b->options->ubo_addr_format) == 1);
4445 }
4446 break;
4447 case SpvAddressingModelLogical:
4448 vtn_fail_if(b->shader->info.stage == MESA_SHADER_KERNEL,
4449 "AddressingModelLogical only supported for shaders");
4450 b->physical_ptrs = false;
4451 break;
4452 case SpvAddressingModelPhysicalStorageBuffer64:
4453 vtn_fail_if(!b->options ||
4454 !b->options->caps.physical_storage_buffer_address,
4455 "AddressingModelPhysicalStorageBuffer64 not supported");
4456 break;
4457 default:
4458 vtn_fail("Unknown addressing model: %s (%u)",
4459 spirv_addressingmodel_to_string(w[1]), w[1]);
4460 break;
4461 }
4462
4463 b->mem_model = w[2];
4464 switch (w[2]) {
4465 case SpvMemoryModelSimple:
4466 case SpvMemoryModelGLSL450:
4467 case SpvMemoryModelOpenCL:
4468 break;
4469 case SpvMemoryModelVulkan:
4470 vtn_fail_if(!b->options->caps.vk_memory_model,
4471 "Vulkan memory model is unsupported by this driver");
4472 break;
4473 default:
4474 vtn_fail("Unsupported memory model: %s",
4475 spirv_memorymodel_to_string(w[2]));
4476 break;
4477 }
4478 break;
4479
4480 case SpvOpEntryPoint:
4481 vtn_handle_entry_point(b, w, count);
4482 break;
4483
4484 case SpvOpString:
4485 vtn_push_value(b, w[1], vtn_value_type_string)->str =
4486 vtn_string_literal(b, &w[2], count - 2, NULL);
4487 break;
4488
4489 case SpvOpName:
4490 b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
4491 break;
4492
4493 case SpvOpMemberName:
4494 /* TODO */
4495 break;
4496
4497 case SpvOpExecutionMode:
4498 case SpvOpExecutionModeId:
4499 case SpvOpDecorationGroup:
4500 case SpvOpDecorate:
4501 case SpvOpDecorateId:
4502 case SpvOpMemberDecorate:
4503 case SpvOpGroupDecorate:
4504 case SpvOpGroupMemberDecorate:
4505 case SpvOpDecorateString:
4506 case SpvOpMemberDecorateString:
4507 vtn_handle_decoration(b, opcode, w, count);
4508 break;
4509
4510 case SpvOpExtInst: {
4511 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
4512 if (val->ext_handler == vtn_handle_non_semantic_instruction) {
4513 /* NonSemantic extended instructions are acceptable in preamble. */
4514 vtn_handle_non_semantic_instruction(b, w[4], w, count);
4515 return true;
4516 } else {
4517 return false; /* End of preamble. */
4518 }
4519 }
4520
4521 default:
4522 return false; /* End of preamble */
4523 }
4524
4525 return true;
4526 }
4527
4528 static void
4529 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
4530 const struct vtn_decoration *mode, UNUSED void *data)
4531 {
4532 vtn_assert(b->entry_point == entry_point);
4533
4534 switch(mode->exec_mode) {
4535 case SpvExecutionModeOriginUpperLeft:
4536 case SpvExecutionModeOriginLowerLeft:
4537 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4538 b->shader->info.fs.origin_upper_left =
4539 (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
4540 break;
4541
4542 case SpvExecutionModeEarlyFragmentTests:
4543 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4544 b->shader->info.fs.early_fragment_tests = true;
4545 break;
4546
4547 case SpvExecutionModePostDepthCoverage:
4548 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4549 b->shader->info.fs.post_depth_coverage = true;
4550 break;
4551
4552 case SpvExecutionModeInvocations:
4553 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4554 b->shader->info.gs.invocations = MAX2(1, mode->operands[0]);
4555 break;
4556
4557 case SpvExecutionModeDepthReplacing:
4558 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4559 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
4560 break;
4561 case SpvExecutionModeDepthGreater:
4562 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4563 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
4564 break;
4565 case SpvExecutionModeDepthLess:
4566 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4567 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
4568 break;
4569 case SpvExecutionModeDepthUnchanged:
4570 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4571 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
4572 break;
4573
4574 case SpvExecutionModeLocalSize:
4575 vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
4576 b->shader->info.cs.local_size[0] = mode->operands[0];
4577 b->shader->info.cs.local_size[1] = mode->operands[1];
4578 b->shader->info.cs.local_size[2] = mode->operands[2];
4579 break;
4580
4581 case SpvExecutionModeLocalSizeHint:
4582 break; /* Nothing to do with this */
4583
4584 case SpvExecutionModeOutputVertices:
4585 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4586 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4587 b->shader->info.tess.tcs_vertices_out = mode->operands[0];
4588 } else {
4589 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4590 b->shader->info.gs.vertices_out = mode->operands[0];
4591 }
4592 break;
4593
4594 case SpvExecutionModeInputPoints:
4595 case SpvExecutionModeInputLines:
4596 case SpvExecutionModeInputLinesAdjacency:
4597 case SpvExecutionModeTriangles:
4598 case SpvExecutionModeInputTrianglesAdjacency:
4599 case SpvExecutionModeQuads:
4600 case SpvExecutionModeIsolines:
4601 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4602 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4603 b->shader->info.tess.primitive_mode =
4604 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4605 } else {
4606 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4607 b->shader->info.gs.vertices_in =
4608 vertices_in_from_spv_execution_mode(b, mode->exec_mode);
4609 b->shader->info.gs.input_primitive =
4610 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4611 }
4612 break;
4613
4614 case SpvExecutionModeOutputPoints:
4615 case SpvExecutionModeOutputLineStrip:
4616 case SpvExecutionModeOutputTriangleStrip:
4617 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4618 b->shader->info.gs.output_primitive =
4619 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4620 break;
4621
4622 case SpvExecutionModeSpacingEqual:
4623 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4624 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4625 b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
4626 break;
4627 case SpvExecutionModeSpacingFractionalEven:
4628 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4629 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4630 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
4631 break;
4632 case SpvExecutionModeSpacingFractionalOdd:
4633 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4634 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4635 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
4636 break;
4637 case SpvExecutionModeVertexOrderCw:
4638 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4639 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4640 b->shader->info.tess.ccw = false;
4641 break;
4642 case SpvExecutionModeVertexOrderCcw:
4643 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4644 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4645 b->shader->info.tess.ccw = true;
4646 break;
4647 case SpvExecutionModePointMode:
4648 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4649 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4650 b->shader->info.tess.point_mode = true;
4651 break;
4652
4653 case SpvExecutionModePixelCenterInteger:
4654 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4655 b->shader->info.fs.pixel_center_integer = true;
4656 break;
4657
4658 case SpvExecutionModeXfb:
4659 b->shader->info.has_transform_feedback_varyings = true;
4660 break;
4661
4662 case SpvExecutionModeVecTypeHint:
4663 break; /* OpenCL */
4664
4665 case SpvExecutionModeContractionOff:
4666 if (b->shader->info.stage != MESA_SHADER_KERNEL)
4667 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4668 spirv_executionmode_to_string(mode->exec_mode));
4669 else
4670 b->exact = true;
4671 break;
4672
4673 case SpvExecutionModeStencilRefReplacingEXT:
4674 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4675 break;
4676
4677 case SpvExecutionModeDerivativeGroupQuadsNV:
4678 vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
4679 b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_QUADS;
4680 break;
4681
4682 case SpvExecutionModeDerivativeGroupLinearNV:
4683 vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
4684 b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_LINEAR;
4685 break;
4686
4687 case SpvExecutionModePixelInterlockOrderedEXT:
4688 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4689 b->shader->info.fs.pixel_interlock_ordered = true;
4690 break;
4691
4692 case SpvExecutionModePixelInterlockUnorderedEXT:
4693 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4694 b->shader->info.fs.pixel_interlock_unordered = true;
4695 break;
4696
4697 case SpvExecutionModeSampleInterlockOrderedEXT:
4698 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4699 b->shader->info.fs.sample_interlock_ordered = true;
4700 break;
4701
4702 case SpvExecutionModeSampleInterlockUnorderedEXT:
4703 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4704 b->shader->info.fs.sample_interlock_unordered = true;
4705 break;
4706
4707 case SpvExecutionModeDenormPreserve:
4708 case SpvExecutionModeDenormFlushToZero:
4709 case SpvExecutionModeSignedZeroInfNanPreserve:
4710 case SpvExecutionModeRoundingModeRTE:
4711 case SpvExecutionModeRoundingModeRTZ: {
4712 unsigned execution_mode = 0;
4713 switch (mode->exec_mode) {
4714 case SpvExecutionModeDenormPreserve:
4715 switch (mode->operands[0]) {
4716 case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break;
4717 case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break;
4718 case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break;
4719 default: vtn_fail("Floating point type not supported");
4720 }
4721 break;
4722 case SpvExecutionModeDenormFlushToZero:
4723 switch (mode->operands[0]) {
4724 case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break;
4725 case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break;
4726 case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break;
4727 default: vtn_fail("Floating point type not supported");
4728 }
4729 break;
4730 case SpvExecutionModeSignedZeroInfNanPreserve:
4731 switch (mode->operands[0]) {
4732 case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break;
4733 case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break;
4734 case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break;
4735 default: vtn_fail("Floating point type not supported");
4736 }
4737 break;
4738 case SpvExecutionModeRoundingModeRTE:
4739 switch (mode->operands[0]) {
4740 case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break;
4741 case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break;
4742 case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break;
4743 default: vtn_fail("Floating point type not supported");
4744 }
4745 break;
4746 case SpvExecutionModeRoundingModeRTZ:
4747 switch (mode->operands[0]) {
4748 case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break;
4749 case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break;
4750 case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break;
4751 default: vtn_fail("Floating point type not supported");
4752 }
4753 break;
4754 default:
4755 break;
4756 }
4757
4758 b->shader->info.float_controls_execution_mode |= execution_mode;
4759 break;
4760 }
4761
4762 case SpvExecutionModeLocalSizeId:
4763 case SpvExecutionModeLocalSizeHintId:
4764 /* Handled later by vtn_handle_execution_mode_id(). */
4765 break;
4766
4767 default:
4768 vtn_fail("Unhandled execution mode: %s (%u)",
4769 spirv_executionmode_to_string(mode->exec_mode),
4770 mode->exec_mode);
4771 }
4772 }
4773
4774 static void
4775 vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_point,
4776 const struct vtn_decoration *mode, UNUSED void *data)
4777 {
4778
4779 vtn_assert(b->entry_point == entry_point);
4780
4781 switch (mode->exec_mode) {
4782 case SpvExecutionModeLocalSizeId:
4783 b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
4784 b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
4785 b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
4786 break;
4787
4788 case SpvExecutionModeLocalSizeHintId:
4789 /* Nothing to do with this hint. */
4790 break;
4791
4792 default:
4793 /* Nothing to do. Literal execution modes already handled by
4794 * vtn_handle_execution_mode(). */
4795 break;
4796 }
4797 }
4798
4799 static bool
4800 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
4801 const uint32_t *w, unsigned count)
4802 {
4803 vtn_set_instruction_result_type(b, opcode, w, count);
4804
4805 switch (opcode) {
4806 case SpvOpSource:
4807 case SpvOpSourceContinued:
4808 case SpvOpSourceExtension:
4809 case SpvOpExtension:
4810 case SpvOpCapability:
4811 case SpvOpExtInstImport:
4812 case SpvOpMemoryModel:
4813 case SpvOpEntryPoint:
4814 case SpvOpExecutionMode:
4815 case SpvOpString:
4816 case SpvOpName:
4817 case SpvOpMemberName:
4818 case SpvOpDecorationGroup:
4819 case SpvOpDecorate:
4820 case SpvOpDecorateId:
4821 case SpvOpMemberDecorate:
4822 case SpvOpGroupDecorate:
4823 case SpvOpGroupMemberDecorate:
4824 case SpvOpDecorateString:
4825 case SpvOpMemberDecorateString:
4826 vtn_fail("Invalid opcode types and variables section");
4827 break;
4828
4829 case SpvOpTypeVoid:
4830 case SpvOpTypeBool:
4831 case SpvOpTypeInt:
4832 case SpvOpTypeFloat:
4833 case SpvOpTypeVector:
4834 case SpvOpTypeMatrix:
4835 case SpvOpTypeImage:
4836 case SpvOpTypeSampler:
4837 case SpvOpTypeSampledImage:
4838 case SpvOpTypeArray:
4839 case SpvOpTypeRuntimeArray:
4840 case SpvOpTypeStruct:
4841 case SpvOpTypeOpaque:
4842 case SpvOpTypePointer:
4843 case SpvOpTypeForwardPointer:
4844 case SpvOpTypeFunction:
4845 case SpvOpTypeEvent:
4846 case SpvOpTypeDeviceEvent:
4847 case SpvOpTypeReserveId:
4848 case SpvOpTypeQueue:
4849 case SpvOpTypePipe:
4850 vtn_handle_type(b, opcode, w, count);
4851 break;
4852
4853 case SpvOpConstantTrue:
4854 case SpvOpConstantFalse:
4855 case SpvOpConstant:
4856 case SpvOpConstantComposite:
4857 case SpvOpConstantSampler:
4858 case SpvOpConstantNull:
4859 case SpvOpSpecConstantTrue:
4860 case SpvOpSpecConstantFalse:
4861 case SpvOpSpecConstant:
4862 case SpvOpSpecConstantComposite:
4863 case SpvOpSpecConstantOp:
4864 vtn_handle_constant(b, opcode, w, count);
4865 break;
4866
4867 case SpvOpUndef:
4868 case SpvOpVariable:
4869 vtn_handle_variables(b, opcode, w, count);
4870 break;
4871
4872 case SpvOpExtInst: {
4873 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
4874 /* NonSemantic extended instructions are acceptable in preamble, others
4875 * will indicate the end of preamble.
4876 */
4877 return val->ext_handler == vtn_handle_non_semantic_instruction;
4878 }
4879
4880 default:
4881 return false; /* End of preamble */
4882 }
4883
4884 return true;
4885 }
4886
4887 static struct vtn_ssa_value *
4888 vtn_nir_select(struct vtn_builder *b, struct vtn_ssa_value *src0,
4889 struct vtn_ssa_value *src1, struct vtn_ssa_value *src2)
4890 {
4891 struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
4892 dest->type = src1->type;
4893
4894 if (glsl_type_is_vector_or_scalar(src1->type)) {
4895 dest->def = nir_bcsel(&b->nb, src0->def, src1->def, src2->def);
4896 } else {
4897 unsigned elems = glsl_get_length(src1->type);
4898
4899 dest->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
4900 for (unsigned i = 0; i < elems; i++) {
4901 dest->elems[i] = vtn_nir_select(b, src0,
4902 src1->elems[i], src2->elems[i]);
4903 }
4904 }
4905
4906 return dest;
4907 }
4908
4909 static void
4910 vtn_handle_select(struct vtn_builder *b, SpvOp opcode,
4911 const uint32_t *w, unsigned count)
4912 {
4913 /* Handle OpSelect up-front here because it needs to be able to handle
4914 * pointers and not just regular vectors and scalars.
4915 */
4916 struct vtn_value *res_val = vtn_untyped_value(b, w[2]);
4917 struct vtn_value *cond_val = vtn_untyped_value(b, w[3]);
4918 struct vtn_value *obj1_val = vtn_untyped_value(b, w[4]);
4919 struct vtn_value *obj2_val = vtn_untyped_value(b, w[5]);
4920
4921 vtn_fail_if(obj1_val->type != res_val->type ||
4922 obj2_val->type != res_val->type,
4923 "Object types must match the result type in OpSelect");
4924
4925 vtn_fail_if((cond_val->type->base_type != vtn_base_type_scalar &&
4926 cond_val->type->base_type != vtn_base_type_vector) ||
4927 !glsl_type_is_boolean(cond_val->type->type),
4928 "OpSelect must have either a vector of booleans or "
4929 "a boolean as Condition type");
4930
4931 vtn_fail_if(cond_val->type->base_type == vtn_base_type_vector &&
4932 (res_val->type->base_type != vtn_base_type_vector ||
4933 res_val->type->length != cond_val->type->length),
4934 "When Condition type in OpSelect is a vector, the Result "
4935 "type must be a vector of the same length");
4936
4937 switch (res_val->type->base_type) {
4938 case vtn_base_type_scalar:
4939 case vtn_base_type_vector:
4940 case vtn_base_type_matrix:
4941 case vtn_base_type_array:
4942 case vtn_base_type_struct:
4943 /* OK. */
4944 break;
4945 case vtn_base_type_pointer:
4946 /* We need to have actual storage for pointer types. */
4947 vtn_fail_if(res_val->type->type == NULL,
4948 "Invalid pointer result type for OpSelect");
4949 break;
4950 default:
4951 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4952 }
4953
4954 vtn_push_ssa_value(b, w[2],
4955 vtn_nir_select(b, vtn_ssa_value(b, w[3]),
4956 vtn_ssa_value(b, w[4]),
4957 vtn_ssa_value(b, w[5])));
4958 }
4959
4960 static void
4961 vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
4962 const uint32_t *w, unsigned count)
4963 {
4964 struct vtn_type *type1 = vtn_get_value_type(b, w[3]);
4965 struct vtn_type *type2 = vtn_get_value_type(b, w[4]);
4966 vtn_fail_if(type1->base_type != vtn_base_type_pointer ||
4967 type2->base_type != vtn_base_type_pointer,
4968 "%s operands must have pointer types",
4969 spirv_op_to_string(opcode));
4970 vtn_fail_if(type1->storage_class != type2->storage_class,
4971 "%s operands must have the same storage class",
4972 spirv_op_to_string(opcode));
4973
4974 struct vtn_type *vtn_type = vtn_get_type(b, w[1]);
4975 const struct glsl_type *type = vtn_type->type;
4976
4977 nir_address_format addr_format = vtn_mode_to_address_format(
4978 b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
4979
4980 nir_ssa_def *def;
4981
4982 switch (opcode) {
4983 case SpvOpPtrDiff: {
4984 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4985 unsigned elem_size, elem_align;
4986 glsl_get_natural_size_align_bytes(type1->deref->type,
4987 &elem_size, &elem_align);
4988
4989 def = nir_build_addr_isub(&b->nb,
4990 vtn_get_nir_ssa(b, w[3]),
4991 vtn_get_nir_ssa(b, w[4]),
4992 addr_format);
4993 def = nir_idiv(&b->nb, def, nir_imm_intN_t(&b->nb, elem_size, def->bit_size));
4994 def = nir_i2i(&b->nb, def, glsl_get_bit_size(type));
4995 break;
4996 }
4997
4998 case SpvOpPtrEqual:
4999 case SpvOpPtrNotEqual: {
5000 def = nir_build_addr_ieq(&b->nb,
5001 vtn_get_nir_ssa(b, w[3]),
5002 vtn_get_nir_ssa(b, w[4]),
5003 addr_format);
5004 if (opcode == SpvOpPtrNotEqual)
5005 def = nir_inot(&b->nb, def);
5006 break;
5007 }
5008
5009 default:
5010 unreachable("Invalid ptr operation");
5011 }
5012
5013 vtn_push_nir_ssa(b, w[2], def);
5014 }
5015
5016 static bool
5017 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
5018 const uint32_t *w, unsigned count)
5019 {
5020 switch (opcode) {
5021 case SpvOpLabel:
5022 break;
5023
5024 case SpvOpLoopMerge:
5025 case SpvOpSelectionMerge:
5026 /* This is handled by cfg pre-pass and walk_blocks */
5027 break;
5028
5029 case SpvOpUndef: {
5030 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
5031 val->type = vtn_get_type(b, w[1]);
5032 break;
5033 }
5034
5035 case SpvOpExtInst:
5036 vtn_handle_extension(b, opcode, w, count);
5037 break;
5038
5039 case SpvOpVariable:
5040 case SpvOpLoad:
5041 case SpvOpStore:
5042 case SpvOpCopyMemory:
5043 case SpvOpCopyMemorySized:
5044 case SpvOpAccessChain:
5045 case SpvOpPtrAccessChain:
5046 case SpvOpInBoundsAccessChain:
5047 case SpvOpInBoundsPtrAccessChain:
5048 case SpvOpArrayLength:
5049 case SpvOpConvertPtrToU:
5050 case SpvOpConvertUToPtr:
5051 vtn_handle_variables(b, opcode, w, count);
5052 break;
5053
5054 case SpvOpFunctionCall:
5055 vtn_handle_function_call(b, opcode, w, count);
5056 break;
5057
5058 case SpvOpSampledImage:
5059 case SpvOpImage:
5060 case SpvOpImageSampleImplicitLod:
5061 case SpvOpImageSampleExplicitLod:
5062 case SpvOpImageSampleDrefImplicitLod:
5063 case SpvOpImageSampleDrefExplicitLod:
5064 case SpvOpImageSampleProjImplicitLod:
5065 case SpvOpImageSampleProjExplicitLod:
5066 case SpvOpImageSampleProjDrefImplicitLod:
5067 case SpvOpImageSampleProjDrefExplicitLod:
5068 case SpvOpImageFetch:
5069 case SpvOpImageGather:
5070 case SpvOpImageDrefGather:
5071 case SpvOpImageQuerySizeLod:
5072 case SpvOpImageQueryLod:
5073 case SpvOpImageQueryLevels:
5074 case SpvOpImageQuerySamples:
5075 vtn_handle_texture(b, opcode, w, count);
5076 break;
5077
5078 case SpvOpImageRead:
5079 case SpvOpImageWrite:
5080 case SpvOpImageTexelPointer:
5081 vtn_handle_image(b, opcode, w, count);
5082 break;
5083
5084 case SpvOpImageQuerySize: {
5085 struct vtn_type *image_type = vtn_get_value_type(b, w[3]);
5086 vtn_assert(image_type->base_type == vtn_base_type_image);
5087 if (glsl_type_is_image(image_type->glsl_image)) {
5088 vtn_handle_image(b, opcode, w, count);
5089 } else {
5090 vtn_assert(glsl_type_is_sampler(image_type->glsl_image));
5091 vtn_handle_texture(b, opcode, w, count);
5092 }
5093 break;
5094 }
5095
5096 case SpvOpFragmentMaskFetchAMD:
5097 case SpvOpFragmentFetchAMD:
5098 vtn_handle_texture(b, opcode, w, count);
5099 break;
5100
5101 case SpvOpAtomicLoad:
5102 case SpvOpAtomicExchange:
5103 case SpvOpAtomicCompareExchange:
5104 case SpvOpAtomicCompareExchangeWeak:
5105 case SpvOpAtomicIIncrement:
5106 case SpvOpAtomicIDecrement:
5107 case SpvOpAtomicIAdd:
5108 case SpvOpAtomicISub:
5109 case SpvOpAtomicSMin:
5110 case SpvOpAtomicUMin:
5111 case SpvOpAtomicSMax:
5112 case SpvOpAtomicUMax:
5113 case SpvOpAtomicAnd:
5114 case SpvOpAtomicOr:
5115 case SpvOpAtomicXor:
5116 case SpvOpAtomicFAddEXT: {
5117 struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
5118 if (pointer->value_type == vtn_value_type_image_pointer) {
5119 vtn_handle_image(b, opcode, w, count);
5120 } else {
5121 vtn_assert(pointer->value_type == vtn_value_type_pointer);
5122 vtn_handle_atomics(b, opcode, w, count);
5123 }
5124 break;
5125 }
5126
5127 case SpvOpAtomicStore: {
5128 struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
5129 if (pointer->value_type == vtn_value_type_image_pointer) {
5130 vtn_handle_image(b, opcode, w, count);
5131 } else {
5132 vtn_assert(pointer->value_type == vtn_value_type_pointer);
5133 vtn_handle_atomics(b, opcode, w, count);
5134 }
5135 break;
5136 }
5137
5138 case SpvOpSelect:
5139 vtn_handle_select(b, opcode, w, count);
5140 break;
5141
5142 case SpvOpSNegate:
5143 case SpvOpFNegate:
5144 case SpvOpNot:
5145 case SpvOpAny:
5146 case SpvOpAll:
5147 case SpvOpConvertFToU:
5148 case SpvOpConvertFToS:
5149 case SpvOpConvertSToF:
5150 case SpvOpConvertUToF:
5151 case SpvOpUConvert:
5152 case SpvOpSConvert:
5153 case SpvOpFConvert:
5154 case SpvOpQuantizeToF16:
5155 case SpvOpPtrCastToGeneric:
5156 case SpvOpGenericCastToPtr:
5157 case SpvOpIsNan:
5158 case SpvOpIsInf:
5159 case SpvOpIsFinite:
5160 case SpvOpIsNormal:
5161 case SpvOpSignBitSet:
5162 case SpvOpLessOrGreater:
5163 case SpvOpOrdered:
5164 case SpvOpUnordered:
5165 case SpvOpIAdd:
5166 case SpvOpFAdd:
5167 case SpvOpISub:
5168 case SpvOpFSub:
5169 case SpvOpIMul:
5170 case SpvOpFMul:
5171 case SpvOpUDiv:
5172 case SpvOpSDiv:
5173 case SpvOpFDiv:
5174 case SpvOpUMod:
5175 case SpvOpSRem:
5176 case SpvOpSMod:
5177 case SpvOpFRem:
5178 case SpvOpFMod:
5179 case SpvOpVectorTimesScalar:
5180 case SpvOpDot:
5181 case SpvOpIAddCarry:
5182 case SpvOpISubBorrow:
5183 case SpvOpUMulExtended:
5184 case SpvOpSMulExtended:
5185 case SpvOpShiftRightLogical:
5186 case SpvOpShiftRightArithmetic:
5187 case SpvOpShiftLeftLogical:
5188 case SpvOpLogicalEqual:
5189 case SpvOpLogicalNotEqual:
5190 case SpvOpLogicalOr:
5191 case SpvOpLogicalAnd:
5192 case SpvOpLogicalNot:
5193 case SpvOpBitwiseOr:
5194 case SpvOpBitwiseXor:
5195 case SpvOpBitwiseAnd:
5196 case SpvOpIEqual:
5197 case SpvOpFOrdEqual:
5198 case SpvOpFUnordEqual:
5199 case SpvOpINotEqual:
5200 case SpvOpFOrdNotEqual:
5201 case SpvOpFUnordNotEqual:
5202 case SpvOpULessThan:
5203 case SpvOpSLessThan:
5204 case SpvOpFOrdLessThan:
5205 case SpvOpFUnordLessThan:
5206 case SpvOpUGreaterThan:
5207 case SpvOpSGreaterThan:
5208 case SpvOpFOrdGreaterThan:
5209 case SpvOpFUnordGreaterThan:
5210 case SpvOpULessThanEqual:
5211 case SpvOpSLessThanEqual:
5212 case SpvOpFOrdLessThanEqual:
5213 case SpvOpFUnordLessThanEqual:
5214 case SpvOpUGreaterThanEqual:
5215 case SpvOpSGreaterThanEqual:
5216 case SpvOpFOrdGreaterThanEqual:
5217 case SpvOpFUnordGreaterThanEqual:
5218 case SpvOpDPdx:
5219 case SpvOpDPdy:
5220 case SpvOpFwidth:
5221 case SpvOpDPdxFine:
5222 case SpvOpDPdyFine:
5223 case SpvOpFwidthFine:
5224 case SpvOpDPdxCoarse:
5225 case SpvOpDPdyCoarse:
5226 case SpvOpFwidthCoarse:
5227 case SpvOpBitFieldInsert:
5228 case SpvOpBitFieldSExtract:
5229 case SpvOpBitFieldUExtract:
5230 case SpvOpBitReverse:
5231 case SpvOpBitCount:
5232 case SpvOpTranspose:
5233 case SpvOpOuterProduct:
5234 case SpvOpMatrixTimesScalar:
5235 case SpvOpVectorTimesMatrix:
5236 case SpvOpMatrixTimesVector:
5237 case SpvOpMatrixTimesMatrix:
5238 case SpvOpUCountLeadingZerosINTEL:
5239 case SpvOpUCountTrailingZerosINTEL:
5240 case SpvOpAbsISubINTEL:
5241 case SpvOpAbsUSubINTEL:
5242 case SpvOpIAddSatINTEL:
5243 case SpvOpUAddSatINTEL:
5244 case SpvOpIAverageINTEL:
5245 case SpvOpUAverageINTEL:
5246 case SpvOpIAverageRoundedINTEL:
5247 case SpvOpUAverageRoundedINTEL:
5248 case SpvOpISubSatINTEL:
5249 case SpvOpUSubSatINTEL:
5250 case SpvOpIMul32x16INTEL:
5251 case SpvOpUMul32x16INTEL:
5252 vtn_handle_alu(b, opcode, w, count);
5253 break;
5254
5255 case SpvOpBitcast:
5256 vtn_handle_bitcast(b, w, count);
5257 break;
5258
5259 case SpvOpVectorExtractDynamic:
5260 case SpvOpVectorInsertDynamic:
5261 case SpvOpVectorShuffle:
5262 case SpvOpCompositeConstruct:
5263 case SpvOpCompositeExtract:
5264 case SpvOpCompositeInsert:
5265 case SpvOpCopyLogical:
5266 case SpvOpCopyObject:
5267 vtn_handle_composite(b, opcode, w, count);
5268 break;
5269
5270 case SpvOpEmitVertex:
5271 case SpvOpEndPrimitive:
5272 case SpvOpEmitStreamVertex:
5273 case SpvOpEndStreamPrimitive:
5274 case SpvOpControlBarrier:
5275 case SpvOpMemoryBarrier:
5276 vtn_handle_barrier(b, opcode, w, count);
5277 break;
5278
5279 case SpvOpGroupNonUniformElect:
5280 case SpvOpGroupNonUniformAll:
5281 case SpvOpGroupNonUniformAny:
5282 case SpvOpGroupNonUniformAllEqual:
5283 case SpvOpGroupNonUniformBroadcast:
5284 case SpvOpGroupNonUniformBroadcastFirst:
5285 case SpvOpGroupNonUniformBallot:
5286 case SpvOpGroupNonUniformInverseBallot:
5287 case SpvOpGroupNonUniformBallotBitExtract:
5288 case SpvOpGroupNonUniformBallotBitCount:
5289 case SpvOpGroupNonUniformBallotFindLSB:
5290 case SpvOpGroupNonUniformBallotFindMSB:
5291 case SpvOpGroupNonUniformShuffle:
5292 case SpvOpGroupNonUniformShuffleXor:
5293 case SpvOpGroupNonUniformShuffleUp:
5294 case SpvOpGroupNonUniformShuffleDown:
5295 case SpvOpGroupNonUniformIAdd:
5296 case SpvOpGroupNonUniformFAdd:
5297 case SpvOpGroupNonUniformIMul:
5298 case SpvOpGroupNonUniformFMul:
5299 case SpvOpGroupNonUniformSMin:
5300 case SpvOpGroupNonUniformUMin:
5301 case SpvOpGroupNonUniformFMin:
5302 case SpvOpGroupNonUniformSMax:
5303 case SpvOpGroupNonUniformUMax:
5304 case SpvOpGroupNonUniformFMax:
5305 case SpvOpGroupNonUniformBitwiseAnd:
5306 case SpvOpGroupNonUniformBitwiseOr:
5307 case SpvOpGroupNonUniformBitwiseXor:
5308 case SpvOpGroupNonUniformLogicalAnd:
5309 case SpvOpGroupNonUniformLogicalOr:
5310 case SpvOpGroupNonUniformLogicalXor:
5311 case SpvOpGroupNonUniformQuadBroadcast:
5312 case SpvOpGroupNonUniformQuadSwap:
5313 case SpvOpGroupAll:
5314 case SpvOpGroupAny:
5315 case SpvOpGroupBroadcast:
5316 case SpvOpGroupIAdd:
5317 case SpvOpGroupFAdd:
5318 case SpvOpGroupFMin:
5319 case SpvOpGroupUMin:
5320 case SpvOpGroupSMin:
5321 case SpvOpGroupFMax:
5322 case SpvOpGroupUMax:
5323 case SpvOpGroupSMax:
5324 case SpvOpSubgroupBallotKHR:
5325 case SpvOpSubgroupFirstInvocationKHR:
5326 case SpvOpSubgroupReadInvocationKHR:
5327 case SpvOpSubgroupAllKHR:
5328 case SpvOpSubgroupAnyKHR:
5329 case SpvOpSubgroupAllEqualKHR:
5330 case SpvOpGroupIAddNonUniformAMD:
5331 case SpvOpGroupFAddNonUniformAMD:
5332 case SpvOpGroupFMinNonUniformAMD:
5333 case SpvOpGroupUMinNonUniformAMD:
5334 case SpvOpGroupSMinNonUniformAMD:
5335 case SpvOpGroupFMaxNonUniformAMD:
5336 case SpvOpGroupUMaxNonUniformAMD:
5337 case SpvOpGroupSMaxNonUniformAMD:
5338 vtn_handle_subgroup(b, opcode, w, count);
5339 break;
5340
5341 case SpvOpPtrDiff:
5342 case SpvOpPtrEqual:
5343 case SpvOpPtrNotEqual:
5344 vtn_handle_ptr(b, opcode, w, count);
5345 break;
5346
5347 case SpvOpBeginInvocationInterlockEXT:
5348 vtn_emit_barrier(b, nir_intrinsic_begin_invocation_interlock);
5349 break;
5350
5351 case SpvOpEndInvocationInterlockEXT:
5352 vtn_emit_barrier(b, nir_intrinsic_end_invocation_interlock);
5353 break;
5354
5355 case SpvOpDemoteToHelperInvocationEXT: {
5356 nir_intrinsic_instr *intrin =
5357 nir_intrinsic_instr_create(b->shader, nir_intrinsic_demote);
5358 nir_builder_instr_insert(&b->nb, &intrin->instr);
5359 break;
5360 }
5361
5362 case SpvOpIsHelperInvocationEXT: {
5363 nir_intrinsic_instr *intrin =
5364 nir_intrinsic_instr_create(b->shader, nir_intrinsic_is_helper_invocation);
5365 nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
5366 nir_builder_instr_insert(&b->nb, &intrin->instr);
5367
5368 vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
5369 break;
5370 }
5371
5372 case SpvOpReadClockKHR: {
5373 SpvScope scope = vtn_constant_uint(b, w[3]);
5374 nir_scope nir_scope;
5375
5376 switch (scope) {
5377 case SpvScopeDevice:
5378 nir_scope = NIR_SCOPE_DEVICE;
5379 break;
5380 case SpvScopeSubgroup:
5381 nir_scope = NIR_SCOPE_SUBGROUP;
5382 break;
5383 default:
5384 vtn_fail("invalid read clock scope");
5385 }
5386
5387 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5388 * intrinsic gives uvec2, so pack the result for the other case.
5389 */
5390 nir_intrinsic_instr *intrin =
5391 nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_shader_clock);
5392 nir_ssa_dest_init(&intrin->instr, &intrin->dest, 2, 32, NULL);
5393 nir_intrinsic_set_memory_scope(intrin, nir_scope);
5394 nir_builder_instr_insert(&b->nb, &intrin->instr);
5395
5396 struct vtn_type *type = vtn_get_type(b, w[1]);
5397 const struct glsl_type *dest_type = type->type;
5398 nir_ssa_def *result;
5399
5400 if (glsl_type_is_vector(dest_type)) {
5401 assert(dest_type == glsl_vector_type(GLSL_TYPE_UINT, 2));
5402 result = &intrin->dest.ssa;
5403 } else {
5404 assert(glsl_type_is_scalar(dest_type));
5405 assert(glsl_get_base_type(dest_type) == GLSL_TYPE_UINT64);
5406 result = nir_pack_64_2x32(&b->nb, &intrin->dest.ssa);
5407 }
5408
5409 vtn_push_nir_ssa(b, w[2], result);
5410 break;
5411 }
5412
5413 case SpvOpLifetimeStart:
5414 case SpvOpLifetimeStop:
5415 break;
5416
5417 default:
5418 vtn_fail_with_opcode("Unhandled opcode", opcode);
5419 }
5420
5421 return true;
5422 }
5423
5424 struct vtn_builder*
5425 vtn_create_builder(const uint32_t *words, size_t word_count,
5426 gl_shader_stage stage, const char *entry_point_name,
5427 const struct spirv_to_nir_options *options)
5428 {
5429 /* Initialize the vtn_builder object */
5430 struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
5431 struct spirv_to_nir_options *dup_options =
5432 ralloc(b, struct spirv_to_nir_options);
5433 *dup_options = *options;
5434
5435 b->spirv = words;
5436 b->spirv_word_count = word_count;
5437 b->file = NULL;
5438 b->line = -1;
5439 b->col = -1;
5440 list_inithead(&b->functions);
5441 b->entry_point_stage = stage;
5442 b->entry_point_name = entry_point_name;
5443 b->options = dup_options;
5444
5445 /*
5446 * Handle the SPIR-V header (first 5 dwords).
5447 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5448 */
5449 if (word_count <= 5)
5450 goto fail;
5451
5452 if (words[0] != SpvMagicNumber) {
5453 vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber);
5454 goto fail;
5455 }
5456 if (words[1] < 0x10000) {
5457 vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]);
5458 goto fail;
5459 }
5460
5461 uint16_t generator_id = words[2] >> 16;
5462 uint16_t generator_version = words[2];
5463
5464 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5465 * to provide correct memory semantics on compute shader barrier()
5466 * commands. Prior to that, we need to fix them up ourselves. This
5467 * GLSLang fix caused them to bump to generator version 3.
5468 */
5469 b->wa_glslang_cs_barrier = (generator_id == 8 && generator_version < 3);
5470
5471 /* words[2] == generator magic */
5472 unsigned value_id_bound = words[3];
5473 if (words[4] != 0) {
5474 vtn_err("words[4] was %u, want 0", words[4]);
5475 goto fail;
5476 }
5477
5478 b->value_id_bound = value_id_bound;
5479 b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
5480
5481 return b;
5482 fail:
5483 ralloc_free(b);
5484 return NULL;
5485 }
5486
5487 static nir_function *
5488 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder *b,
5489 nir_function *entry_point)
5490 {
5491 vtn_assert(entry_point == b->entry_point->func->impl->function);
5492 vtn_fail_if(!entry_point->name, "entry points are required to have a name");
5493 const char *func_name =
5494 ralloc_asprintf(b->shader, "__wrapped_%s", entry_point->name);
5495
5496 /* we shouldn't have any inputs yet */
5497 vtn_assert(!entry_point->shader->num_inputs);
5498 vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL);
5499
5500 nir_function *main_entry_point = nir_function_create(b->shader, func_name);
5501 main_entry_point->impl = nir_function_impl_create(main_entry_point);
5502 nir_builder_init(&b->nb, main_entry_point->impl);
5503 b->nb.cursor = nir_after_cf_list(&main_entry_point->impl->body);
5504 b->func_param_idx = 0;
5505
5506 nir_call_instr *call = nir_call_instr_create(b->nb.shader, entry_point);
5507
5508 for (unsigned i = 0; i < entry_point->num_params; ++i) {
5509 struct vtn_type *param_type = b->entry_point->func->type->params[i];
5510
5511 /* consider all pointers to function memory to be parameters passed
5512 * by value
5513 */
5514 bool is_by_val = param_type->base_type == vtn_base_type_pointer &&
5515 param_type->storage_class == SpvStorageClassFunction;
5516
5517 /* input variable */
5518 nir_variable *in_var = rzalloc(b->nb.shader, nir_variable);
5519 in_var->data.mode = nir_var_uniform;
5520 in_var->data.read_only = true;
5521 in_var->data.location = i;
5522 if (param_type->base_type == vtn_base_type_image) {
5523 in_var->data.access = 0;
5524 if (param_type->access_qualifier & SpvAccessQualifierReadOnly)
5525 in_var->data.access |= ACCESS_NON_WRITEABLE;
5526 if (param_type->access_qualifier & SpvAccessQualifierWriteOnly)
5527 in_var->data.access |= ACCESS_NON_READABLE;
5528 }
5529
5530 if (is_by_val)
5531 in_var->type = param_type->deref->type;
5532 else if (param_type->base_type == vtn_base_type_image)
5533 in_var->type = param_type->glsl_image;
5534 else if (param_type->base_type == vtn_base_type_sampler)
5535 in_var->type = glsl_bare_sampler_type();
5536 else
5537 in_var->type = param_type->type;
5538
5539 nir_shader_add_variable(b->nb.shader, in_var);
5540 b->nb.shader->num_inputs++;
5541
5542 /* we have to copy the entire variable into function memory */
5543 if (is_by_val) {
5544 nir_variable *copy_var =
5545 nir_local_variable_create(main_entry_point->impl, in_var->type,
5546 "copy_in");
5547 nir_copy_var(&b->nb, copy_var, in_var);
5548 call->params[i] =
5549 nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa);
5550 } else if (param_type->base_type == vtn_base_type_image ||
5551 param_type->base_type == vtn_base_type_sampler) {
5552 /* Don't load the var, just pass a deref of it */
5553 call->params[i] = nir_src_for_ssa(&nir_build_deref_var(&b->nb, in_var)->dest.ssa);
5554 } else {
5555 call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var));
5556 }
5557 }
5558
5559 nir_builder_instr_insert(&b->nb, &call->instr);
5560
5561 return main_entry_point;
5562 }
5563
5564 nir_shader *
5565 spirv_to_nir(const uint32_t *words, size_t word_count,
5566 struct nir_spirv_specialization *spec, unsigned num_spec,
5567 gl_shader_stage stage, const char *entry_point_name,
5568 const struct spirv_to_nir_options *options,
5569 const nir_shader_compiler_options *nir_options)
5570
5571 {
5572 const uint32_t *word_end = words + word_count;
5573
5574 struct vtn_builder *b = vtn_create_builder(words, word_count,
5575 stage, entry_point_name,
5576 options);
5577
5578 if (b == NULL)
5579 return NULL;
5580
5581 /* See also _vtn_fail() */
5582 if (setjmp(b->fail_jump)) {
5583 ralloc_free(b);
5584 return NULL;
5585 }
5586
5587 /* Skip the SPIR-V header, handled at vtn_create_builder */
5588 words+= 5;
5589
5590 b->shader = nir_shader_create(b, stage, nir_options, NULL);
5591
5592 /* Handle all the preamble instructions */
5593 words = vtn_foreach_instruction(b, words, word_end,
5594 vtn_handle_preamble_instruction);
5595
5596 if (b->entry_point == NULL) {
5597 vtn_fail("Entry point not found");
5598 ralloc_free(b);
5599 return NULL;
5600 }
5601
5602 /* Ensure a sane address mode is being used for function temps */
5603 assert(nir_address_format_bit_size(b->options->temp_addr_format) == nir_get_ptr_bitsize(b->shader));
5604 assert(nir_address_format_num_components(b->options->temp_addr_format) == 1);
5605
5606 /* Set shader info defaults */
5607 if (stage == MESA_SHADER_GEOMETRY)
5608 b->shader->info.gs.invocations = 1;
5609
5610 /* Parse execution modes. */
5611 vtn_foreach_execution_mode(b, b->entry_point,
5612 vtn_handle_execution_mode, NULL);
5613
5614 b->specializations = spec;
5615 b->num_specializations = num_spec;
5616
5617 /* Handle all variable, type, and constant instructions */
5618 words = vtn_foreach_instruction(b, words, word_end,
5619 vtn_handle_variable_or_type_instruction);
5620
5621 /* Parse execution modes that depend on IDs. Must happen after we have
5622 * constants parsed.
5623 */
5624 vtn_foreach_execution_mode(b, b->entry_point,
5625 vtn_handle_execution_mode_id, NULL);
5626
5627 if (b->workgroup_size_builtin) {
5628 vtn_assert(b->workgroup_size_builtin->type->type ==
5629 glsl_vector_type(GLSL_TYPE_UINT, 3));
5630
5631 nir_const_value *const_size =
5632 b->workgroup_size_builtin->constant->values;
5633
5634 b->shader->info.cs.local_size[0] = const_size[0].u32;
5635 b->shader->info.cs.local_size[1] = const_size[1].u32;
5636 b->shader->info.cs.local_size[2] = const_size[2].u32;
5637 }
5638
5639 /* Set types on all vtn_values */
5640 vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type);
5641
5642 vtn_build_cfg(b, words, word_end);
5643
5644 assert(b->entry_point->value_type == vtn_value_type_function);
5645 b->entry_point->func->referenced = true;
5646
5647 bool progress;
5648 do {
5649 progress = false;
5650 vtn_foreach_cf_node(node, &b->functions) {
5651 struct vtn_function *func = vtn_cf_node_as_function(node);
5652 if (func->referenced && !func->emitted) {
5653 b->const_table = _mesa_pointer_hash_table_create(b);
5654
5655 vtn_function_emit(b, func, vtn_handle_body_instruction);
5656 progress = true;
5657 }
5658 }
5659 } while (progress);
5660
5661 vtn_assert(b->entry_point->value_type == vtn_value_type_function);
5662 nir_function *entry_point = b->entry_point->func->impl->function;
5663 vtn_assert(entry_point);
5664
5665 /* post process entry_points with input params */
5666 if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
5667 entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
5668
5669 /* structurize the CFG */
5670 nir_lower_goto_ifs(b->shader);
5671
5672 entry_point->is_entrypoint = true;
5673
5674 /* When multiple shader stages exist in the same SPIR-V module, we
5675 * generate input and output variables for every stage, in the same
5676 * NIR program. These dead variables can be invalid NIR. For example,
5677 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5678 * VS output variables wouldn't be.
5679 *
5680 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5681 * right away. In order to do so, we must lower any constant initializers
5682 * on outputs so nir_remove_dead_variables sees that they're written to.
5683 */
5684 nir_lower_variable_initializers(b->shader, nir_var_shader_out);
5685 nir_remove_dead_variables(b->shader,
5686 nir_var_shader_in | nir_var_shader_out, NULL);
5687
5688 /* We sometimes generate bogus derefs that, while never used, give the
5689 * validator a bit of heartburn. Run dead code to get rid of them.
5690 */
5691 nir_opt_dce(b->shader);
5692
5693 /* Unparent the shader from the vtn_builder before we delete the builder */
5694 ralloc_steal(NULL, b->shader);
5695
5696 nir_shader *shader = b->shader;
5697 ralloc_free(b);
5698
5699 return shader;
5700 }