spirv: Silence a bunch of unused parameter warnings
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
34
35 #include "util/u_math.h"
36
37 #include <stdio.h>
38
39 void
40 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
41 size_t spirv_offset, const char *message)
42 {
43 if (b->options->debug.func) {
44 b->options->debug.func(b->options->debug.private_data,
45 level, spirv_offset, message);
46 }
47
48 #ifndef NDEBUG
49 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
50 fprintf(stderr, "%s\n", message);
51 #endif
52 }
53
54 void
55 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
56 size_t spirv_offset, const char *fmt, ...)
57 {
58 va_list args;
59 char *msg;
60
61 va_start(args, fmt);
62 msg = ralloc_vasprintf(NULL, fmt, args);
63 va_end(args);
64
65 vtn_log(b, level, spirv_offset, msg);
66
67 ralloc_free(msg);
68 }
69
70 static void
71 vtn_log_err(struct vtn_builder *b,
72 enum nir_spirv_debug_level level, const char *prefix,
73 const char *file, unsigned line,
74 const char *fmt, va_list args)
75 {
76 char *msg;
77
78 msg = ralloc_strdup(NULL, prefix);
79
80 #ifndef NDEBUG
81 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
82 #endif
83
84 ralloc_asprintf_append(&msg, " ");
85
86 ralloc_vasprintf_append(&msg, fmt, args);
87
88 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
89 b->spirv_offset);
90
91 if (b->file) {
92 ralloc_asprintf_append(&msg,
93 "\n in SPIR-V source file %s, line %d, col %d",
94 b->file, b->line, b->col);
95 }
96
97 vtn_log(b, level, b->spirv_offset, msg);
98
99 ralloc_free(msg);
100 }
101
102 static void
103 vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
104 {
105 static int idx = 0;
106
107 char filename[1024];
108 int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
109 path, prefix, idx++);
110 if (len < 0 || len >= sizeof(filename))
111 return;
112
113 FILE *f = fopen(filename, "w");
114 if (f == NULL)
115 return;
116
117 fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
118 fclose(f);
119
120 vtn_info("SPIR-V shader dumped to %s", filename);
121 }
122
123 void
124 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
125 const char *fmt, ...)
126 {
127 va_list args;
128
129 va_start(args, fmt);
130 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
131 file, line, fmt, args);
132 va_end(args);
133 }
134
135 void
136 _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
137 const char *fmt, ...)
138 {
139 va_list args;
140
141 va_start(args, fmt);
142 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
143 file, line, fmt, args);
144 va_end(args);
145 }
146
147 void
148 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
149 const char *fmt, ...)
150 {
151 va_list args;
152
153 va_start(args, fmt);
154 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
155 file, line, fmt, args);
156 va_end(args);
157
158 const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
159 if (dump_path)
160 vtn_dump_shader(b, dump_path, "fail");
161
162 longjmp(b->fail_jump, 1);
163 }
164
165 struct spec_constant_value {
166 bool is_double;
167 union {
168 uint32_t data32;
169 uint64_t data64;
170 };
171 };
172
173 static struct vtn_ssa_value *
174 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
175 {
176 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
177 val->type = type;
178
179 if (glsl_type_is_vector_or_scalar(type)) {
180 unsigned num_components = glsl_get_vector_elements(val->type);
181 unsigned bit_size = glsl_get_bit_size(val->type);
182 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
183 } else {
184 unsigned elems = glsl_get_length(val->type);
185 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
186 if (glsl_type_is_matrix(type)) {
187 const struct glsl_type *elem_type =
188 glsl_vector_type(glsl_get_base_type(type),
189 glsl_get_vector_elements(type));
190
191 for (unsigned i = 0; i < elems; i++)
192 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
193 } else if (glsl_type_is_array(type)) {
194 const struct glsl_type *elem_type = glsl_get_array_element(type);
195 for (unsigned i = 0; i < elems; i++)
196 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
197 } else {
198 for (unsigned i = 0; i < elems; i++) {
199 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
200 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
201 }
202 }
203 }
204
205 return val;
206 }
207
208 static struct vtn_ssa_value *
209 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
210 const struct glsl_type *type)
211 {
212 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
213
214 if (entry)
215 return entry->data;
216
217 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
218 val->type = type;
219
220 switch (glsl_get_base_type(type)) {
221 case GLSL_TYPE_INT:
222 case GLSL_TYPE_UINT:
223 case GLSL_TYPE_INT16:
224 case GLSL_TYPE_UINT16:
225 case GLSL_TYPE_UINT8:
226 case GLSL_TYPE_INT8:
227 case GLSL_TYPE_INT64:
228 case GLSL_TYPE_UINT64:
229 case GLSL_TYPE_BOOL:
230 case GLSL_TYPE_FLOAT:
231 case GLSL_TYPE_FLOAT16:
232 case GLSL_TYPE_DOUBLE: {
233 int bit_size = glsl_get_bit_size(type);
234 if (glsl_type_is_vector_or_scalar(type)) {
235 unsigned num_components = glsl_get_vector_elements(val->type);
236 nir_load_const_instr *load =
237 nir_load_const_instr_create(b->shader, num_components, bit_size);
238
239 memcpy(load->value, constant->values,
240 sizeof(nir_const_value) * load->def.num_components);
241
242 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
243 val->def = &load->def;
244 } else {
245 assert(glsl_type_is_matrix(type));
246 unsigned columns = glsl_get_matrix_columns(val->type);
247 val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
248 const struct glsl_type *column_type = glsl_get_column_type(val->type);
249 for (unsigned i = 0; i < columns; i++)
250 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
251 column_type);
252 }
253 break;
254 }
255
256 case GLSL_TYPE_ARRAY: {
257 unsigned elems = glsl_get_length(val->type);
258 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
259 const struct glsl_type *elem_type = glsl_get_array_element(val->type);
260 for (unsigned i = 0; i < elems; i++)
261 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
262 elem_type);
263 break;
264 }
265
266 case GLSL_TYPE_STRUCT: {
267 unsigned elems = glsl_get_length(val->type);
268 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
269 for (unsigned i = 0; i < elems; i++) {
270 const struct glsl_type *elem_type =
271 glsl_get_struct_field(val->type, i);
272 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
273 elem_type);
274 }
275 break;
276 }
277
278 default:
279 vtn_fail("bad constant type");
280 }
281
282 return val;
283 }
284
285 struct vtn_ssa_value *
286 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
287 {
288 struct vtn_value *val = vtn_untyped_value(b, value_id);
289 switch (val->value_type) {
290 case vtn_value_type_undef:
291 return vtn_undef_ssa_value(b, val->type->type);
292
293 case vtn_value_type_constant:
294 return vtn_const_ssa_value(b, val->constant, val->type->type);
295
296 case vtn_value_type_ssa:
297 return val->ssa;
298
299 case vtn_value_type_pointer:
300 vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
301 struct vtn_ssa_value *ssa =
302 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
303 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
304 return ssa;
305
306 default:
307 vtn_fail("Invalid type for an SSA value");
308 }
309 }
310
311 static char *
312 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
313 unsigned word_count, unsigned *words_used)
314 {
315 char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
316 if (words_used) {
317 /* Ammount of space taken by the string (including the null) */
318 unsigned len = strlen(dup) + 1;
319 *words_used = DIV_ROUND_UP(len, sizeof(*words));
320 }
321 return dup;
322 }
323
324 const uint32_t *
325 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
326 const uint32_t *end, vtn_instruction_handler handler)
327 {
328 b->file = NULL;
329 b->line = -1;
330 b->col = -1;
331
332 const uint32_t *w = start;
333 while (w < end) {
334 SpvOp opcode = w[0] & SpvOpCodeMask;
335 unsigned count = w[0] >> SpvWordCountShift;
336 vtn_assert(count >= 1 && w + count <= end);
337
338 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
339
340 switch (opcode) {
341 case SpvOpNop:
342 break; /* Do nothing */
343
344 case SpvOpLine:
345 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
346 b->line = w[2];
347 b->col = w[3];
348 break;
349
350 case SpvOpNoLine:
351 b->file = NULL;
352 b->line = -1;
353 b->col = -1;
354 break;
355
356 default:
357 if (!handler(b, opcode, w, count))
358 return w;
359 break;
360 }
361
362 w += count;
363 }
364
365 b->spirv_offset = 0;
366 b->file = NULL;
367 b->line = -1;
368 b->col = -1;
369
370 assert(w == end);
371 return w;
372 }
373
374 static bool
375 vtn_handle_non_semantic_instruction(struct vtn_builder *b, SpvOp ext_opcode,
376 const uint32_t *w, unsigned count)
377 {
378 /* Do nothing. */
379 return true;
380 }
381
382 static void
383 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
384 const uint32_t *w, unsigned count)
385 {
386 const char *ext = (const char *)&w[2];
387 switch (opcode) {
388 case SpvOpExtInstImport: {
389 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
390 if (strcmp(ext, "GLSL.std.450") == 0) {
391 val->ext_handler = vtn_handle_glsl450_instruction;
392 } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
393 && (b->options && b->options->caps.amd_gcn_shader)) {
394 val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
395 } else if ((strcmp(ext, "SPV_AMD_shader_ballot") == 0)
396 && (b->options && b->options->caps.amd_shader_ballot)) {
397 val->ext_handler = vtn_handle_amd_shader_ballot_instruction;
398 } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
399 && (b->options && b->options->caps.amd_trinary_minmax)) {
400 val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
401 } else if (strcmp(ext, "OpenCL.std") == 0) {
402 val->ext_handler = vtn_handle_opencl_instruction;
403 } else if (strstr(ext, "NonSemantic.") == ext) {
404 val->ext_handler = vtn_handle_non_semantic_instruction;
405 } else {
406 vtn_fail("Unsupported extension: %s", ext);
407 }
408 break;
409 }
410
411 case SpvOpExtInst: {
412 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
413 bool handled = val->ext_handler(b, w[4], w, count);
414 vtn_assert(handled);
415 break;
416 }
417
418 default:
419 vtn_fail_with_opcode("Unhandled opcode", opcode);
420 }
421 }
422
423 static void
424 _foreach_decoration_helper(struct vtn_builder *b,
425 struct vtn_value *base_value,
426 int parent_member,
427 struct vtn_value *value,
428 vtn_decoration_foreach_cb cb, void *data)
429 {
430 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
431 int member;
432 if (dec->scope == VTN_DEC_DECORATION) {
433 member = parent_member;
434 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
435 vtn_fail_if(value->value_type != vtn_value_type_type ||
436 value->type->base_type != vtn_base_type_struct,
437 "OpMemberDecorate and OpGroupMemberDecorate are only "
438 "allowed on OpTypeStruct");
439 /* This means we haven't recursed yet */
440 assert(value == base_value);
441
442 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
443
444 vtn_fail_if(member >= base_value->type->length,
445 "OpMemberDecorate specifies member %d but the "
446 "OpTypeStruct has only %u members",
447 member, base_value->type->length);
448 } else {
449 /* Not a decoration */
450 assert(dec->scope == VTN_DEC_EXECUTION_MODE);
451 continue;
452 }
453
454 if (dec->group) {
455 assert(dec->group->value_type == vtn_value_type_decoration_group);
456 _foreach_decoration_helper(b, base_value, member, dec->group,
457 cb, data);
458 } else {
459 cb(b, base_value, member, dec, data);
460 }
461 }
462 }
463
464 /** Iterates (recursively if needed) over all of the decorations on a value
465 *
466 * This function iterates over all of the decorations applied to a given
467 * value. If it encounters a decoration group, it recurses into the group
468 * and iterates over all of those decorations as well.
469 */
470 void
471 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
472 vtn_decoration_foreach_cb cb, void *data)
473 {
474 _foreach_decoration_helper(b, value, -1, value, cb, data);
475 }
476
477 void
478 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
479 vtn_execution_mode_foreach_cb cb, void *data)
480 {
481 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
482 if (dec->scope != VTN_DEC_EXECUTION_MODE)
483 continue;
484
485 assert(dec->group == NULL);
486 cb(b, value, dec, data);
487 }
488 }
489
490 void
491 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
492 const uint32_t *w, unsigned count)
493 {
494 const uint32_t *w_end = w + count;
495 const uint32_t target = w[1];
496 w += 2;
497
498 switch (opcode) {
499 case SpvOpDecorationGroup:
500 vtn_push_value(b, target, vtn_value_type_decoration_group);
501 break;
502
503 case SpvOpDecorate:
504 case SpvOpDecorateId:
505 case SpvOpMemberDecorate:
506 case SpvOpDecorateString:
507 case SpvOpMemberDecorateString:
508 case SpvOpExecutionMode:
509 case SpvOpExecutionModeId: {
510 struct vtn_value *val = vtn_untyped_value(b, target);
511
512 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
513 switch (opcode) {
514 case SpvOpDecorate:
515 case SpvOpDecorateId:
516 case SpvOpDecorateString:
517 dec->scope = VTN_DEC_DECORATION;
518 break;
519 case SpvOpMemberDecorate:
520 case SpvOpMemberDecorateString:
521 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
522 vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
523 "Member argument of OpMemberDecorate too large");
524 break;
525 case SpvOpExecutionMode:
526 case SpvOpExecutionModeId:
527 dec->scope = VTN_DEC_EXECUTION_MODE;
528 break;
529 default:
530 unreachable("Invalid decoration opcode");
531 }
532 dec->decoration = *(w++);
533 dec->operands = w;
534
535 /* Link into the list */
536 dec->next = val->decoration;
537 val->decoration = dec;
538 break;
539 }
540
541 case SpvOpGroupMemberDecorate:
542 case SpvOpGroupDecorate: {
543 struct vtn_value *group =
544 vtn_value(b, target, vtn_value_type_decoration_group);
545
546 for (; w < w_end; w++) {
547 struct vtn_value *val = vtn_untyped_value(b, *w);
548 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
549
550 dec->group = group;
551 if (opcode == SpvOpGroupDecorate) {
552 dec->scope = VTN_DEC_DECORATION;
553 } else {
554 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
555 vtn_fail_if(dec->scope < 0, /* Check for overflow */
556 "Member argument of OpGroupMemberDecorate too large");
557 }
558
559 /* Link into the list */
560 dec->next = val->decoration;
561 val->decoration = dec;
562 }
563 break;
564 }
565
566 default:
567 unreachable("Unhandled opcode");
568 }
569 }
570
571 struct member_decoration_ctx {
572 unsigned num_fields;
573 struct glsl_struct_field *fields;
574 struct vtn_type *type;
575 };
576
577 /**
578 * Returns true if the given type contains a struct decorated Block or
579 * BufferBlock
580 */
581 bool
582 vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type)
583 {
584 switch (type->base_type) {
585 case vtn_base_type_array:
586 return vtn_type_contains_block(b, type->array_element);
587 case vtn_base_type_struct:
588 if (type->block || type->buffer_block)
589 return true;
590 for (unsigned i = 0; i < type->length; i++) {
591 if (vtn_type_contains_block(b, type->members[i]))
592 return true;
593 }
594 return false;
595 default:
596 return false;
597 }
598 }
599
600 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
601 * OpStore, or OpCopyMemory between them without breaking anything.
602 * Technically, the SPIR-V rules require the exact same type ID but this lets
603 * us internally be a bit looser.
604 */
605 bool
606 vtn_types_compatible(struct vtn_builder *b,
607 struct vtn_type *t1, struct vtn_type *t2)
608 {
609 if (t1->id == t2->id)
610 return true;
611
612 if (t1->base_type != t2->base_type)
613 return false;
614
615 switch (t1->base_type) {
616 case vtn_base_type_void:
617 case vtn_base_type_scalar:
618 case vtn_base_type_vector:
619 case vtn_base_type_matrix:
620 case vtn_base_type_image:
621 case vtn_base_type_sampler:
622 case vtn_base_type_sampled_image:
623 return t1->type == t2->type;
624
625 case vtn_base_type_array:
626 return t1->length == t2->length &&
627 vtn_types_compatible(b, t1->array_element, t2->array_element);
628
629 case vtn_base_type_pointer:
630 return vtn_types_compatible(b, t1->deref, t2->deref);
631
632 case vtn_base_type_struct:
633 if (t1->length != t2->length)
634 return false;
635
636 for (unsigned i = 0; i < t1->length; i++) {
637 if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
638 return false;
639 }
640 return true;
641
642 case vtn_base_type_function:
643 /* This case shouldn't get hit since you can't copy around function
644 * types. Just require them to be identical.
645 */
646 return false;
647 }
648
649 vtn_fail("Invalid base type");
650 }
651
652 struct vtn_type *
653 vtn_type_without_array(struct vtn_type *type)
654 {
655 while (type->base_type == vtn_base_type_array)
656 type = type->array_element;
657 return type;
658 }
659
660 /* does a shallow copy of a vtn_type */
661
662 static struct vtn_type *
663 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
664 {
665 struct vtn_type *dest = ralloc(b, struct vtn_type);
666 *dest = *src;
667
668 switch (src->base_type) {
669 case vtn_base_type_void:
670 case vtn_base_type_scalar:
671 case vtn_base_type_vector:
672 case vtn_base_type_matrix:
673 case vtn_base_type_array:
674 case vtn_base_type_pointer:
675 case vtn_base_type_image:
676 case vtn_base_type_sampler:
677 case vtn_base_type_sampled_image:
678 /* Nothing more to do */
679 break;
680
681 case vtn_base_type_struct:
682 dest->members = ralloc_array(b, struct vtn_type *, src->length);
683 memcpy(dest->members, src->members,
684 src->length * sizeof(src->members[0]));
685
686 dest->offsets = ralloc_array(b, unsigned, src->length);
687 memcpy(dest->offsets, src->offsets,
688 src->length * sizeof(src->offsets[0]));
689 break;
690
691 case vtn_base_type_function:
692 dest->params = ralloc_array(b, struct vtn_type *, src->length);
693 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
694 break;
695 }
696
697 return dest;
698 }
699
700 static struct vtn_type *
701 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
702 {
703 type->members[member] = vtn_type_copy(b, type->members[member]);
704 type = type->members[member];
705
706 /* We may have an array of matrices.... Oh, joy! */
707 while (glsl_type_is_array(type->type)) {
708 type->array_element = vtn_type_copy(b, type->array_element);
709 type = type->array_element;
710 }
711
712 vtn_assert(glsl_type_is_matrix(type->type));
713
714 return type;
715 }
716
717 static void
718 vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
719 int member, enum gl_access_qualifier access)
720 {
721 type->members[member] = vtn_type_copy(b, type->members[member]);
722 type = type->members[member];
723
724 type->access |= access;
725 }
726
727 static void
728 array_stride_decoration_cb(struct vtn_builder *b,
729 struct vtn_value *val, int member,
730 const struct vtn_decoration *dec, void *void_ctx)
731 {
732 struct vtn_type *type = val->type;
733
734 if (dec->decoration == SpvDecorationArrayStride) {
735 if (vtn_type_contains_block(b, type)) {
736 vtn_warn("The ArrayStride decoration cannot be applied to an array "
737 "type which contains a structure type decorated Block "
738 "or BufferBlock");
739 /* Ignore the decoration */
740 } else {
741 vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
742 type->stride = dec->operands[0];
743 }
744 }
745 }
746
747 static void
748 struct_member_decoration_cb(struct vtn_builder *b,
749 UNUSED struct vtn_value *val, int member,
750 const struct vtn_decoration *dec, void *void_ctx)
751 {
752 struct member_decoration_ctx *ctx = void_ctx;
753
754 if (member < 0)
755 return;
756
757 assert(member < ctx->num_fields);
758
759 switch (dec->decoration) {
760 case SpvDecorationRelaxedPrecision:
761 case SpvDecorationUniform:
762 case SpvDecorationUniformId:
763 break; /* FIXME: Do nothing with this for now. */
764 case SpvDecorationNonWritable:
765 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
766 break;
767 case SpvDecorationNonReadable:
768 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
769 break;
770 case SpvDecorationVolatile:
771 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
772 break;
773 case SpvDecorationCoherent:
774 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
775 break;
776 case SpvDecorationNoPerspective:
777 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
778 break;
779 case SpvDecorationFlat:
780 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
781 break;
782 case SpvDecorationCentroid:
783 ctx->fields[member].centroid = true;
784 break;
785 case SpvDecorationSample:
786 ctx->fields[member].sample = true;
787 break;
788 case SpvDecorationStream:
789 /* Vulkan only allows one GS stream */
790 vtn_assert(dec->operands[0] == 0);
791 break;
792 case SpvDecorationLocation:
793 ctx->fields[member].location = dec->operands[0];
794 break;
795 case SpvDecorationComponent:
796 break; /* FIXME: What should we do with these? */
797 case SpvDecorationBuiltIn:
798 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
799 ctx->type->members[member]->is_builtin = true;
800 ctx->type->members[member]->builtin = dec->operands[0];
801 ctx->type->builtin_block = true;
802 break;
803 case SpvDecorationOffset:
804 ctx->type->offsets[member] = dec->operands[0];
805 ctx->fields[member].offset = dec->operands[0];
806 break;
807 case SpvDecorationMatrixStride:
808 /* Handled as a second pass */
809 break;
810 case SpvDecorationColMajor:
811 break; /* Nothing to do here. Column-major is the default. */
812 case SpvDecorationRowMajor:
813 mutable_matrix_member(b, ctx->type, member)->row_major = true;
814 break;
815
816 case SpvDecorationPatch:
817 break;
818
819 case SpvDecorationSpecId:
820 case SpvDecorationBlock:
821 case SpvDecorationBufferBlock:
822 case SpvDecorationArrayStride:
823 case SpvDecorationGLSLShared:
824 case SpvDecorationGLSLPacked:
825 case SpvDecorationInvariant:
826 case SpvDecorationRestrict:
827 case SpvDecorationAliased:
828 case SpvDecorationConstant:
829 case SpvDecorationIndex:
830 case SpvDecorationBinding:
831 case SpvDecorationDescriptorSet:
832 case SpvDecorationLinkageAttributes:
833 case SpvDecorationNoContraction:
834 case SpvDecorationInputAttachmentIndex:
835 vtn_warn("Decoration not allowed on struct members: %s",
836 spirv_decoration_to_string(dec->decoration));
837 break;
838
839 case SpvDecorationXfbBuffer:
840 case SpvDecorationXfbStride:
841 vtn_warn("Vulkan does not have transform feedback");
842 break;
843
844 case SpvDecorationCPacked:
845 if (b->shader->info.stage != MESA_SHADER_KERNEL)
846 vtn_warn("Decoration only allowed for CL-style kernels: %s",
847 spirv_decoration_to_string(dec->decoration));
848 else
849 ctx->type->packed = true;
850 break;
851
852 case SpvDecorationSaturatedConversion:
853 case SpvDecorationFuncParamAttr:
854 case SpvDecorationFPRoundingMode:
855 case SpvDecorationFPFastMathMode:
856 case SpvDecorationAlignment:
857 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
858 vtn_warn("Decoration only allowed for CL-style kernels: %s",
859 spirv_decoration_to_string(dec->decoration));
860 }
861 break;
862
863 case SpvDecorationUserSemantic:
864 /* User semantic decorations can safely be ignored by the driver. */
865 break;
866
867 default:
868 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
869 }
870 }
871
872 /** Chases the array type all the way down to the tail and rewrites the
873 * glsl_types to be based off the tail's glsl_type.
874 */
875 static void
876 vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
877 {
878 if (type->base_type != vtn_base_type_array)
879 return;
880
881 vtn_array_type_rewrite_glsl_type(type->array_element);
882
883 type->type = glsl_array_type(type->array_element->type,
884 type->length, type->stride);
885 }
886
887 /* Matrix strides are handled as a separate pass because we need to know
888 * whether the matrix is row-major or not first.
889 */
890 static void
891 struct_member_matrix_stride_cb(struct vtn_builder *b,
892 UNUSED struct vtn_value *val, int member,
893 const struct vtn_decoration *dec,
894 void *void_ctx)
895 {
896 if (dec->decoration != SpvDecorationMatrixStride)
897 return;
898
899 vtn_fail_if(member < 0,
900 "The MatrixStride decoration is only allowed on members "
901 "of OpTypeStruct");
902 vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
903
904 struct member_decoration_ctx *ctx = void_ctx;
905
906 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
907 if (mat_type->row_major) {
908 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
909 mat_type->stride = mat_type->array_element->stride;
910 mat_type->array_element->stride = dec->operands[0];
911
912 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
913 dec->operands[0], true);
914 mat_type->array_element->type = glsl_get_column_type(mat_type->type);
915 } else {
916 vtn_assert(mat_type->array_element->stride > 0);
917 mat_type->stride = dec->operands[0];
918
919 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
920 dec->operands[0], false);
921 }
922
923 /* Now that we've replaced the glsl_type with a properly strided matrix
924 * type, rewrite the member type so that it's an array of the proper kind
925 * of glsl_type.
926 */
927 vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
928 ctx->fields[member].type = ctx->type->members[member]->type;
929 }
930
931 static void
932 struct_block_decoration_cb(struct vtn_builder *b,
933 struct vtn_value *val, int member,
934 const struct vtn_decoration *dec, void *ctx)
935 {
936 if (member != -1)
937 return;
938
939 struct vtn_type *type = val->type;
940 if (dec->decoration == SpvDecorationBlock)
941 type->block = true;
942 else if (dec->decoration == SpvDecorationBufferBlock)
943 type->buffer_block = true;
944 }
945
946 static void
947 type_decoration_cb(struct vtn_builder *b,
948 struct vtn_value *val, int member,
949 const struct vtn_decoration *dec, UNUSED void *ctx)
950 {
951 struct vtn_type *type = val->type;
952
953 if (member != -1) {
954 /* This should have been handled by OpTypeStruct */
955 assert(val->type->base_type == vtn_base_type_struct);
956 assert(member >= 0 && member < val->type->length);
957 return;
958 }
959
960 switch (dec->decoration) {
961 case SpvDecorationArrayStride:
962 vtn_assert(type->base_type == vtn_base_type_array ||
963 type->base_type == vtn_base_type_pointer);
964 break;
965 case SpvDecorationBlock:
966 vtn_assert(type->base_type == vtn_base_type_struct);
967 vtn_assert(type->block);
968 break;
969 case SpvDecorationBufferBlock:
970 vtn_assert(type->base_type == vtn_base_type_struct);
971 vtn_assert(type->buffer_block);
972 break;
973 case SpvDecorationGLSLShared:
974 case SpvDecorationGLSLPacked:
975 /* Ignore these, since we get explicit offsets anyways */
976 break;
977
978 case SpvDecorationRowMajor:
979 case SpvDecorationColMajor:
980 case SpvDecorationMatrixStride:
981 case SpvDecorationBuiltIn:
982 case SpvDecorationNoPerspective:
983 case SpvDecorationFlat:
984 case SpvDecorationPatch:
985 case SpvDecorationCentroid:
986 case SpvDecorationSample:
987 case SpvDecorationVolatile:
988 case SpvDecorationCoherent:
989 case SpvDecorationNonWritable:
990 case SpvDecorationNonReadable:
991 case SpvDecorationUniform:
992 case SpvDecorationUniformId:
993 case SpvDecorationLocation:
994 case SpvDecorationComponent:
995 case SpvDecorationOffset:
996 case SpvDecorationXfbBuffer:
997 case SpvDecorationXfbStride:
998 case SpvDecorationUserSemantic:
999 vtn_warn("Decoration only allowed for struct members: %s",
1000 spirv_decoration_to_string(dec->decoration));
1001 break;
1002
1003 case SpvDecorationStream:
1004 /* We don't need to do anything here, as stream is filled up when
1005 * aplying the decoration to a variable, just check that if it is not a
1006 * struct member, it should be a struct.
1007 */
1008 vtn_assert(type->base_type == vtn_base_type_struct);
1009 break;
1010
1011 case SpvDecorationRelaxedPrecision:
1012 case SpvDecorationSpecId:
1013 case SpvDecorationInvariant:
1014 case SpvDecorationRestrict:
1015 case SpvDecorationAliased:
1016 case SpvDecorationConstant:
1017 case SpvDecorationIndex:
1018 case SpvDecorationBinding:
1019 case SpvDecorationDescriptorSet:
1020 case SpvDecorationLinkageAttributes:
1021 case SpvDecorationNoContraction:
1022 case SpvDecorationInputAttachmentIndex:
1023 vtn_warn("Decoration not allowed on types: %s",
1024 spirv_decoration_to_string(dec->decoration));
1025 break;
1026
1027 case SpvDecorationCPacked:
1028 if (b->shader->info.stage != MESA_SHADER_KERNEL)
1029 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1030 spirv_decoration_to_string(dec->decoration));
1031 else
1032 type->packed = true;
1033 break;
1034
1035 case SpvDecorationSaturatedConversion:
1036 case SpvDecorationFuncParamAttr:
1037 case SpvDecorationFPRoundingMode:
1038 case SpvDecorationFPFastMathMode:
1039 case SpvDecorationAlignment:
1040 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1041 spirv_decoration_to_string(dec->decoration));
1042 break;
1043
1044 default:
1045 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1046 }
1047 }
1048
1049 static unsigned
1050 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
1051 {
1052 switch (format) {
1053 case SpvImageFormatUnknown: return 0; /* GL_NONE */
1054 case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
1055 case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
1056 case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
1057 case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
1058 case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
1059 case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
1060 case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
1061 case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
1062 case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
1063 case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
1064 case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
1065 case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
1066 case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
1067 case SpvImageFormatR16: return 0x822A; /* GL_R16 */
1068 case SpvImageFormatR8: return 0x8229; /* GL_R8 */
1069 case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
1070 case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
1071 case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
1072 case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
1073 case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
1074 case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
1075 case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
1076 case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
1077 case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
1078 case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
1079 case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
1080 case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
1081 case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
1082 case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
1083 case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
1084 case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
1085 case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
1086 case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
1087 case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
1088 case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
1089 case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
1090 case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
1091 case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
1092 case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
1093 default:
1094 vtn_fail("Invalid image format: %s (%u)",
1095 spirv_imageformat_to_string(format), format);
1096 }
1097 }
1098
1099 static void
1100 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
1101 const uint32_t *w, unsigned count)
1102 {
1103 struct vtn_value *val = NULL;
1104
1105 /* In order to properly handle forward declarations, we have to defer
1106 * allocation for pointer types.
1107 */
1108 if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) {
1109 val = vtn_push_value(b, w[1], vtn_value_type_type);
1110 vtn_fail_if(val->type != NULL,
1111 "Only pointers can have forward declarations");
1112 val->type = rzalloc(b, struct vtn_type);
1113 val->type->id = w[1];
1114 }
1115
1116 switch (opcode) {
1117 case SpvOpTypeVoid:
1118 val->type->base_type = vtn_base_type_void;
1119 val->type->type = glsl_void_type();
1120 break;
1121 case SpvOpTypeBool:
1122 val->type->base_type = vtn_base_type_scalar;
1123 val->type->type = glsl_bool_type();
1124 val->type->length = 1;
1125 break;
1126 case SpvOpTypeInt: {
1127 int bit_size = w[2];
1128 const bool signedness = w[3];
1129 val->type->base_type = vtn_base_type_scalar;
1130 switch (bit_size) {
1131 case 64:
1132 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
1133 break;
1134 case 32:
1135 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
1136 break;
1137 case 16:
1138 val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type());
1139 break;
1140 case 8:
1141 val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
1142 break;
1143 default:
1144 vtn_fail("Invalid int bit size: %u", bit_size);
1145 }
1146 val->type->length = 1;
1147 break;
1148 }
1149
1150 case SpvOpTypeFloat: {
1151 int bit_size = w[2];
1152 val->type->base_type = vtn_base_type_scalar;
1153 switch (bit_size) {
1154 case 16:
1155 val->type->type = glsl_float16_t_type();
1156 break;
1157 case 32:
1158 val->type->type = glsl_float_type();
1159 break;
1160 case 64:
1161 val->type->type = glsl_double_type();
1162 break;
1163 default:
1164 vtn_fail("Invalid float bit size: %u", bit_size);
1165 }
1166 val->type->length = 1;
1167 break;
1168 }
1169
1170 case SpvOpTypeVector: {
1171 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
1172 unsigned elems = w[3];
1173
1174 vtn_fail_if(base->base_type != vtn_base_type_scalar,
1175 "Base type for OpTypeVector must be a scalar");
1176 vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16),
1177 "Invalid component count for OpTypeVector");
1178
1179 val->type->base_type = vtn_base_type_vector;
1180 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
1181 val->type->length = elems;
1182 val->type->stride = glsl_type_is_boolean(val->type->type)
1183 ? 4 : glsl_get_bit_size(base->type) / 8;
1184 val->type->array_element = base;
1185 break;
1186 }
1187
1188 case SpvOpTypeMatrix: {
1189 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
1190 unsigned columns = w[3];
1191
1192 vtn_fail_if(base->base_type != vtn_base_type_vector,
1193 "Base type for OpTypeMatrix must be a vector");
1194 vtn_fail_if(columns < 2 || columns > 4,
1195 "Invalid column count for OpTypeMatrix");
1196
1197 val->type->base_type = vtn_base_type_matrix;
1198 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
1199 glsl_get_vector_elements(base->type),
1200 columns);
1201 vtn_fail_if(glsl_type_is_error(val->type->type),
1202 "Unsupported base type for OpTypeMatrix");
1203 assert(!glsl_type_is_error(val->type->type));
1204 val->type->length = columns;
1205 val->type->array_element = base;
1206 val->type->row_major = false;
1207 val->type->stride = 0;
1208 break;
1209 }
1210
1211 case SpvOpTypeRuntimeArray:
1212 case SpvOpTypeArray: {
1213 struct vtn_type *array_element =
1214 vtn_value(b, w[2], vtn_value_type_type)->type;
1215
1216 if (opcode == SpvOpTypeRuntimeArray) {
1217 /* A length of 0 is used to denote unsized arrays */
1218 val->type->length = 0;
1219 } else {
1220 val->type->length = vtn_constant_uint(b, w[3]);
1221 }
1222
1223 val->type->base_type = vtn_base_type_array;
1224 val->type->array_element = array_element;
1225 if (b->shader->info.stage == MESA_SHADER_KERNEL)
1226 val->type->stride = glsl_get_cl_size(array_element->type);
1227
1228 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1229 val->type->type = glsl_array_type(array_element->type, val->type->length,
1230 val->type->stride);
1231 break;
1232 }
1233
1234 case SpvOpTypeStruct: {
1235 unsigned num_fields = count - 2;
1236 val->type->base_type = vtn_base_type_struct;
1237 val->type->length = num_fields;
1238 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
1239 val->type->offsets = ralloc_array(b, unsigned, num_fields);
1240 val->type->packed = false;
1241
1242 NIR_VLA(struct glsl_struct_field, fields, count);
1243 for (unsigned i = 0; i < num_fields; i++) {
1244 val->type->members[i] =
1245 vtn_value(b, w[i + 2], vtn_value_type_type)->type;
1246 fields[i] = (struct glsl_struct_field) {
1247 .type = val->type->members[i]->type,
1248 .name = ralloc_asprintf(b, "field%d", i),
1249 .location = -1,
1250 .offset = -1,
1251 };
1252 }
1253
1254 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1255 unsigned offset = 0;
1256 for (unsigned i = 0; i < num_fields; i++) {
1257 offset = align(offset, glsl_get_cl_alignment(fields[i].type));
1258 fields[i].offset = offset;
1259 offset += glsl_get_cl_size(fields[i].type);
1260 }
1261 }
1262
1263 struct member_decoration_ctx ctx = {
1264 .num_fields = num_fields,
1265 .fields = fields,
1266 .type = val->type
1267 };
1268
1269 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1270 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1271
1272 vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
1273
1274 const char *name = val->name;
1275
1276 if (val->type->block || val->type->buffer_block) {
1277 /* Packing will be ignored since types coming from SPIR-V are
1278 * explicitly laid out.
1279 */
1280 val->type->type = glsl_interface_type(fields, num_fields,
1281 /* packing */ 0, false,
1282 name ? name : "block");
1283 } else {
1284 val->type->type = glsl_struct_type(fields, num_fields,
1285 name ? name : "struct", false);
1286 }
1287 break;
1288 }
1289
1290 case SpvOpTypeFunction: {
1291 val->type->base_type = vtn_base_type_function;
1292 val->type->type = NULL;
1293
1294 val->type->return_type = vtn_value(b, w[2], vtn_value_type_type)->type;
1295
1296 const unsigned num_params = count - 3;
1297 val->type->length = num_params;
1298 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1299 for (unsigned i = 0; i < count - 3; i++) {
1300 val->type->params[i] =
1301 vtn_value(b, w[i + 3], vtn_value_type_type)->type;
1302 }
1303 break;
1304 }
1305
1306 case SpvOpTypePointer:
1307 case SpvOpTypeForwardPointer: {
1308 /* We can't blindly push the value because it might be a forward
1309 * declaration.
1310 */
1311 val = vtn_untyped_value(b, w[1]);
1312
1313 SpvStorageClass storage_class = w[2];
1314
1315 if (val->value_type == vtn_value_type_invalid) {
1316 val->value_type = vtn_value_type_type;
1317 val->type = rzalloc(b, struct vtn_type);
1318 val->type->id = w[1];
1319 val->type->base_type = vtn_base_type_pointer;
1320 val->type->storage_class = storage_class;
1321
1322 /* These can actually be stored to nir_variables and used as SSA
1323 * values so they need a real glsl_type.
1324 */
1325 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1326 b, storage_class, NULL, NULL);
1327 val->type->type = nir_address_format_to_glsl_type(
1328 vtn_mode_to_address_format(b, mode));
1329 } else {
1330 vtn_fail_if(val->type->storage_class != storage_class,
1331 "The storage classes of an OpTypePointer and any "
1332 "OpTypeForwardPointers that provide forward "
1333 "declarations of it must match.");
1334 }
1335
1336 if (opcode == SpvOpTypePointer) {
1337 vtn_fail_if(val->type->deref != NULL,
1338 "While OpTypeForwardPointer can be used to provide a "
1339 "forward declaration of a pointer, OpTypePointer can "
1340 "only be used once for a given id.");
1341
1342 val->type->deref = vtn_value(b, w[3], vtn_value_type_type)->type;
1343
1344 /* Only certain storage classes use ArrayStride. The others (in
1345 * particular Workgroup) are expected to be laid out by the driver.
1346 */
1347 switch (storage_class) {
1348 case SpvStorageClassUniform:
1349 case SpvStorageClassPushConstant:
1350 case SpvStorageClassStorageBuffer:
1351 case SpvStorageClassPhysicalStorageBuffer:
1352 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1353 break;
1354 default:
1355 /* Nothing to do. */
1356 break;
1357 }
1358
1359 if (b->physical_ptrs) {
1360 switch (storage_class) {
1361 case SpvStorageClassFunction:
1362 case SpvStorageClassWorkgroup:
1363 case SpvStorageClassCrossWorkgroup:
1364 case SpvStorageClassUniformConstant:
1365 val->type->stride = align(glsl_get_cl_size(val->type->deref->type),
1366 glsl_get_cl_alignment(val->type->deref->type));
1367 break;
1368 default:
1369 break;
1370 }
1371 }
1372 }
1373 break;
1374 }
1375
1376 case SpvOpTypeImage: {
1377 val->type->base_type = vtn_base_type_image;
1378
1379 const struct vtn_type *sampled_type =
1380 vtn_value(b, w[2], vtn_value_type_type)->type;
1381
1382 vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
1383 glsl_get_bit_size(sampled_type->type) != 32,
1384 "Sampled type of OpTypeImage must be a 32-bit scalar");
1385
1386 enum glsl_sampler_dim dim;
1387 switch ((SpvDim)w[3]) {
1388 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
1389 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
1390 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
1391 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
1392 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
1393 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
1394 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1395 default:
1396 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1397 spirv_dim_to_string((SpvDim)w[3]), w[3]);
1398 }
1399
1400 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1401 * The “Depth” operand of OpTypeImage is ignored.
1402 */
1403 bool is_array = w[5];
1404 bool multisampled = w[6];
1405 unsigned sampled = w[7];
1406 SpvImageFormat format = w[8];
1407
1408 if (count > 9)
1409 val->type->access_qualifier = w[9];
1410 else
1411 val->type->access_qualifier = SpvAccessQualifierReadWrite;
1412
1413 if (multisampled) {
1414 if (dim == GLSL_SAMPLER_DIM_2D)
1415 dim = GLSL_SAMPLER_DIM_MS;
1416 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1417 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1418 else
1419 vtn_fail("Unsupported multisampled image type");
1420 }
1421
1422 val->type->image_format = translate_image_format(b, format);
1423
1424 enum glsl_base_type sampled_base_type =
1425 glsl_get_base_type(sampled_type->type);
1426 if (sampled == 1) {
1427 val->type->sampled = true;
1428 val->type->type = glsl_sampler_type(dim, false, is_array,
1429 sampled_base_type);
1430 } else if (sampled == 2) {
1431 val->type->sampled = false;
1432 val->type->type = glsl_image_type(dim, is_array, sampled_base_type);
1433 } else {
1434 vtn_fail("We need to know if the image will be sampled");
1435 }
1436 break;
1437 }
1438
1439 case SpvOpTypeSampledImage:
1440 val->type->base_type = vtn_base_type_sampled_image;
1441 val->type->image = vtn_value(b, w[2], vtn_value_type_type)->type;
1442 val->type->type = val->type->image->type;
1443 break;
1444
1445 case SpvOpTypeSampler:
1446 /* The actual sampler type here doesn't really matter. It gets
1447 * thrown away the moment you combine it with an image. What really
1448 * matters is that it's a sampler type as opposed to an integer type
1449 * so the backend knows what to do.
1450 */
1451 val->type->base_type = vtn_base_type_sampler;
1452 val->type->type = glsl_bare_sampler_type();
1453 break;
1454
1455 case SpvOpTypeOpaque:
1456 case SpvOpTypeEvent:
1457 case SpvOpTypeDeviceEvent:
1458 case SpvOpTypeReserveId:
1459 case SpvOpTypeQueue:
1460 case SpvOpTypePipe:
1461 default:
1462 vtn_fail_with_opcode("Unhandled opcode", opcode);
1463 }
1464
1465 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1466
1467 if (val->type->base_type == vtn_base_type_struct &&
1468 (val->type->block || val->type->buffer_block)) {
1469 for (unsigned i = 0; i < val->type->length; i++) {
1470 vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]),
1471 "Block and BufferBlock decorations cannot decorate a "
1472 "structure type that is nested at any level inside "
1473 "another structure type decorated with Block or "
1474 "BufferBlock.");
1475 }
1476 }
1477 }
1478
1479 static nir_constant *
1480 vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
1481 {
1482 nir_constant *c = rzalloc(b, nir_constant);
1483
1484 switch (type->base_type) {
1485 case vtn_base_type_scalar:
1486 case vtn_base_type_vector:
1487 /* Nothing to do here. It's already initialized to zero */
1488 break;
1489
1490 case vtn_base_type_pointer: {
1491 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1492 b, type->storage_class, type->deref, NULL);
1493 nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
1494
1495 const nir_const_value *null_value = nir_address_format_null_value(addr_format);
1496 memcpy(c->values, null_value,
1497 sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
1498 break;
1499 }
1500
1501 case vtn_base_type_void:
1502 case vtn_base_type_image:
1503 case vtn_base_type_sampler:
1504 case vtn_base_type_sampled_image:
1505 case vtn_base_type_function:
1506 /* For those we have to return something but it doesn't matter what. */
1507 break;
1508
1509 case vtn_base_type_matrix:
1510 case vtn_base_type_array:
1511 vtn_assert(type->length > 0);
1512 c->num_elements = type->length;
1513 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1514
1515 c->elements[0] = vtn_null_constant(b, type->array_element);
1516 for (unsigned i = 1; i < c->num_elements; i++)
1517 c->elements[i] = c->elements[0];
1518 break;
1519
1520 case vtn_base_type_struct:
1521 c->num_elements = type->length;
1522 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1523 for (unsigned i = 0; i < c->num_elements; i++)
1524 c->elements[i] = vtn_null_constant(b, type->members[i]);
1525 break;
1526
1527 default:
1528 vtn_fail("Invalid type for null constant");
1529 }
1530
1531 return c;
1532 }
1533
1534 static void
1535 spec_constant_decoration_cb(struct vtn_builder *b, UNUSED struct vtn_value *val,
1536 ASSERTED int member,
1537 const struct vtn_decoration *dec, void *data)
1538 {
1539 vtn_assert(member == -1);
1540 if (dec->decoration != SpvDecorationSpecId)
1541 return;
1542
1543 struct spec_constant_value *const_value = data;
1544
1545 for (unsigned i = 0; i < b->num_specializations; i++) {
1546 if (b->specializations[i].id == dec->operands[0]) {
1547 if (const_value->is_double)
1548 const_value->data64 = b->specializations[i].data64;
1549 else
1550 const_value->data32 = b->specializations[i].data32;
1551 return;
1552 }
1553 }
1554 }
1555
1556 static uint32_t
1557 get_specialization(struct vtn_builder *b, struct vtn_value *val,
1558 uint32_t const_value)
1559 {
1560 struct spec_constant_value data;
1561 data.is_double = false;
1562 data.data32 = const_value;
1563 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1564 return data.data32;
1565 }
1566
1567 static uint64_t
1568 get_specialization64(struct vtn_builder *b, struct vtn_value *val,
1569 uint64_t const_value)
1570 {
1571 struct spec_constant_value data;
1572 data.is_double = true;
1573 data.data64 = const_value;
1574 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1575 return data.data64;
1576 }
1577
1578 static void
1579 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1580 struct vtn_value *val,
1581 ASSERTED int member,
1582 const struct vtn_decoration *dec,
1583 UNUSED void *data)
1584 {
1585 vtn_assert(member == -1);
1586 if (dec->decoration != SpvDecorationBuiltIn ||
1587 dec->operands[0] != SpvBuiltInWorkgroupSize)
1588 return;
1589
1590 vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1591 b->workgroup_size_builtin = val;
1592 }
1593
1594 static void
1595 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1596 const uint32_t *w, unsigned count)
1597 {
1598 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1599 val->constant = rzalloc(b, nir_constant);
1600 switch (opcode) {
1601 case SpvOpConstantTrue:
1602 case SpvOpConstantFalse:
1603 case SpvOpSpecConstantTrue:
1604 case SpvOpSpecConstantFalse: {
1605 vtn_fail_if(val->type->type != glsl_bool_type(),
1606 "Result type of %s must be OpTypeBool",
1607 spirv_op_to_string(opcode));
1608
1609 uint32_t int_val = (opcode == SpvOpConstantTrue ||
1610 opcode == SpvOpSpecConstantTrue);
1611
1612 if (opcode == SpvOpSpecConstantTrue ||
1613 opcode == SpvOpSpecConstantFalse)
1614 int_val = get_specialization(b, val, int_val);
1615
1616 val->constant->values[0].b = int_val != 0;
1617 break;
1618 }
1619
1620 case SpvOpConstant: {
1621 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1622 "Result type of %s must be a scalar",
1623 spirv_op_to_string(opcode));
1624 int bit_size = glsl_get_bit_size(val->type->type);
1625 switch (bit_size) {
1626 case 64:
1627 val->constant->values[0].u64 = vtn_u64_literal(&w[3]);
1628 break;
1629 case 32:
1630 val->constant->values[0].u32 = w[3];
1631 break;
1632 case 16:
1633 val->constant->values[0].u16 = w[3];
1634 break;
1635 case 8:
1636 val->constant->values[0].u8 = w[3];
1637 break;
1638 default:
1639 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
1640 }
1641 break;
1642 }
1643
1644 case SpvOpSpecConstant: {
1645 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1646 "Result type of %s must be a scalar",
1647 spirv_op_to_string(opcode));
1648 int bit_size = glsl_get_bit_size(val->type->type);
1649 switch (bit_size) {
1650 case 64:
1651 val->constant->values[0].u64 =
1652 get_specialization64(b, val, vtn_u64_literal(&w[3]));
1653 break;
1654 case 32:
1655 val->constant->values[0].u32 = get_specialization(b, val, w[3]);
1656 break;
1657 case 16:
1658 val->constant->values[0].u16 = get_specialization(b, val, w[3]);
1659 break;
1660 case 8:
1661 val->constant->values[0].u8 = get_specialization(b, val, w[3]);
1662 break;
1663 default:
1664 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1665 }
1666 break;
1667 }
1668
1669 case SpvOpSpecConstantComposite:
1670 case SpvOpConstantComposite: {
1671 unsigned elem_count = count - 3;
1672 vtn_fail_if(elem_count != val->type->length,
1673 "%s has %u constituents, expected %u",
1674 spirv_op_to_string(opcode), elem_count, val->type->length);
1675
1676 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1677 for (unsigned i = 0; i < elem_count; i++) {
1678 struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
1679
1680 if (val->value_type == vtn_value_type_constant) {
1681 elems[i] = val->constant;
1682 } else {
1683 vtn_fail_if(val->value_type != vtn_value_type_undef,
1684 "only constants or undefs allowed for "
1685 "SpvOpConstantComposite");
1686 /* to make it easier, just insert a NULL constant for now */
1687 elems[i] = vtn_null_constant(b, val->type);
1688 }
1689 }
1690
1691 switch (val->type->base_type) {
1692 case vtn_base_type_vector: {
1693 assert(glsl_type_is_vector(val->type->type));
1694 for (unsigned i = 0; i < elem_count; i++)
1695 val->constant->values[i] = elems[i]->values[0];
1696 break;
1697 }
1698
1699 case vtn_base_type_matrix:
1700 case vtn_base_type_struct:
1701 case vtn_base_type_array:
1702 ralloc_steal(val->constant, elems);
1703 val->constant->num_elements = elem_count;
1704 val->constant->elements = elems;
1705 break;
1706
1707 default:
1708 vtn_fail("Result type of %s must be a composite type",
1709 spirv_op_to_string(opcode));
1710 }
1711 break;
1712 }
1713
1714 case SpvOpSpecConstantOp: {
1715 SpvOp opcode = get_specialization(b, val, w[3]);
1716 switch (opcode) {
1717 case SpvOpVectorShuffle: {
1718 struct vtn_value *v0 = &b->values[w[4]];
1719 struct vtn_value *v1 = &b->values[w[5]];
1720
1721 vtn_assert(v0->value_type == vtn_value_type_constant ||
1722 v0->value_type == vtn_value_type_undef);
1723 vtn_assert(v1->value_type == vtn_value_type_constant ||
1724 v1->value_type == vtn_value_type_undef);
1725
1726 unsigned len0 = glsl_get_vector_elements(v0->type->type);
1727 unsigned len1 = glsl_get_vector_elements(v1->type->type);
1728
1729 vtn_assert(len0 + len1 < 16);
1730
1731 unsigned bit_size = glsl_get_bit_size(val->type->type);
1732 unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
1733 unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
1734
1735 vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1736 (void)bit_size0; (void)bit_size1;
1737
1738 nir_const_value undef = { .u64 = 0xdeadbeefdeadbeef };
1739 nir_const_value combined[NIR_MAX_VEC_COMPONENTS * 2];
1740
1741 if (v0->value_type == vtn_value_type_constant) {
1742 for (unsigned i = 0; i < len0; i++)
1743 combined[i] = v0->constant->values[i];
1744 }
1745 if (v1->value_type == vtn_value_type_constant) {
1746 for (unsigned i = 0; i < len1; i++)
1747 combined[len0 + i] = v1->constant->values[i];
1748 }
1749
1750 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1751 uint32_t comp = w[i + 6];
1752 if (comp == (uint32_t)-1) {
1753 /* If component is not used, set the value to a known constant
1754 * to detect if it is wrongly used.
1755 */
1756 val->constant->values[j] = undef;
1757 } else {
1758 vtn_fail_if(comp >= len0 + len1,
1759 "All Component literals must either be FFFFFFFF "
1760 "or in [0, N - 1] (inclusive).");
1761 val->constant->values[j] = combined[comp];
1762 }
1763 }
1764 break;
1765 }
1766
1767 case SpvOpCompositeExtract:
1768 case SpvOpCompositeInsert: {
1769 struct vtn_value *comp;
1770 unsigned deref_start;
1771 struct nir_constant **c;
1772 if (opcode == SpvOpCompositeExtract) {
1773 comp = vtn_value(b, w[4], vtn_value_type_constant);
1774 deref_start = 5;
1775 c = &comp->constant;
1776 } else {
1777 comp = vtn_value(b, w[5], vtn_value_type_constant);
1778 deref_start = 6;
1779 val->constant = nir_constant_clone(comp->constant,
1780 (nir_variable *)b);
1781 c = &val->constant;
1782 }
1783
1784 int elem = -1;
1785 const struct vtn_type *type = comp->type;
1786 for (unsigned i = deref_start; i < count; i++) {
1787 vtn_fail_if(w[i] > type->length,
1788 "%uth index of %s is %u but the type has only "
1789 "%u elements", i - deref_start,
1790 spirv_op_to_string(opcode), w[i], type->length);
1791
1792 switch (type->base_type) {
1793 case vtn_base_type_vector:
1794 elem = w[i];
1795 type = type->array_element;
1796 break;
1797
1798 case vtn_base_type_matrix:
1799 case vtn_base_type_array:
1800 c = &(*c)->elements[w[i]];
1801 type = type->array_element;
1802 break;
1803
1804 case vtn_base_type_struct:
1805 c = &(*c)->elements[w[i]];
1806 type = type->members[w[i]];
1807 break;
1808
1809 default:
1810 vtn_fail("%s must only index into composite types",
1811 spirv_op_to_string(opcode));
1812 }
1813 }
1814
1815 if (opcode == SpvOpCompositeExtract) {
1816 if (elem == -1) {
1817 val->constant = *c;
1818 } else {
1819 unsigned num_components = type->length;
1820 for (unsigned i = 0; i < num_components; i++)
1821 val->constant->values[i] = (*c)->values[elem + i];
1822 }
1823 } else {
1824 struct vtn_value *insert =
1825 vtn_value(b, w[4], vtn_value_type_constant);
1826 vtn_assert(insert->type == type);
1827 if (elem == -1) {
1828 *c = insert->constant;
1829 } else {
1830 unsigned num_components = type->length;
1831 for (unsigned i = 0; i < num_components; i++)
1832 (*c)->values[elem + i] = insert->constant->values[i];
1833 }
1834 }
1835 break;
1836 }
1837
1838 default: {
1839 bool swap;
1840 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
1841 nir_alu_type src_alu_type = dst_alu_type;
1842 unsigned num_components = glsl_get_vector_elements(val->type->type);
1843 unsigned bit_size;
1844
1845 vtn_assert(count <= 7);
1846
1847 switch (opcode) {
1848 case SpvOpSConvert:
1849 case SpvOpFConvert:
1850 case SpvOpUConvert:
1851 /* We have a source in a conversion */
1852 src_alu_type =
1853 nir_get_nir_type_for_glsl_type(
1854 vtn_value(b, w[4], vtn_value_type_constant)->type->type);
1855 /* We use the bitsize of the conversion source to evaluate the opcode later */
1856 bit_size = glsl_get_bit_size(
1857 vtn_value(b, w[4], vtn_value_type_constant)->type->type);
1858 break;
1859 default:
1860 bit_size = glsl_get_bit_size(val->type->type);
1861 };
1862
1863 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
1864 nir_alu_type_get_type_size(src_alu_type),
1865 nir_alu_type_get_type_size(dst_alu_type));
1866 nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
1867
1868 for (unsigned i = 0; i < count - 4; i++) {
1869 struct vtn_value *src_val =
1870 vtn_value(b, w[4 + i], vtn_value_type_constant);
1871
1872 /* If this is an unsized source, pull the bit size from the
1873 * source; otherwise, we'll use the bit size from the destination.
1874 */
1875 if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
1876 bit_size = glsl_get_bit_size(src_val->type->type);
1877
1878 unsigned src_comps = nir_op_infos[op].input_sizes[i] ?
1879 nir_op_infos[op].input_sizes[i] :
1880 num_components;
1881
1882 unsigned j = swap ? 1 - i : i;
1883 for (unsigned c = 0; c < src_comps; c++)
1884 src[j][c] = src_val->constant->values[c];
1885 }
1886
1887 /* fix up fixed size sources */
1888 switch (op) {
1889 case nir_op_ishl:
1890 case nir_op_ishr:
1891 case nir_op_ushr: {
1892 if (bit_size == 32)
1893 break;
1894 for (unsigned i = 0; i < num_components; ++i) {
1895 switch (bit_size) {
1896 case 64: src[1][i].u32 = src[1][i].u64; break;
1897 case 16: src[1][i].u32 = src[1][i].u16; break;
1898 case 8: src[1][i].u32 = src[1][i].u8; break;
1899 }
1900 }
1901 break;
1902 }
1903 default:
1904 break;
1905 }
1906
1907 nir_const_value *srcs[3] = {
1908 src[0], src[1], src[2],
1909 };
1910 nir_eval_const_opcode(op, val->constant->values,
1911 num_components, bit_size, srcs,
1912 b->shader->info.float_controls_execution_mode);
1913 break;
1914 } /* default */
1915 }
1916 break;
1917 }
1918
1919 case SpvOpConstantNull:
1920 val->constant = vtn_null_constant(b, val->type);
1921 break;
1922
1923 case SpvOpConstantSampler:
1924 vtn_fail("OpConstantSampler requires Kernel Capability");
1925 break;
1926
1927 default:
1928 vtn_fail_with_opcode("Unhandled opcode", opcode);
1929 }
1930
1931 /* Now that we have the value, update the workgroup size if needed */
1932 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
1933 }
1934
1935 SpvMemorySemanticsMask
1936 vtn_storage_class_to_memory_semantics(SpvStorageClass sc)
1937 {
1938 switch (sc) {
1939 case SpvStorageClassStorageBuffer:
1940 case SpvStorageClassPhysicalStorageBuffer:
1941 return SpvMemorySemanticsUniformMemoryMask;
1942 case SpvStorageClassWorkgroup:
1943 return SpvMemorySemanticsWorkgroupMemoryMask;
1944 default:
1945 return SpvMemorySemanticsMaskNone;
1946 }
1947 }
1948
1949 static void
1950 vtn_split_barrier_semantics(struct vtn_builder *b,
1951 SpvMemorySemanticsMask semantics,
1952 SpvMemorySemanticsMask *before,
1953 SpvMemorySemanticsMask *after)
1954 {
1955 /* For memory semantics embedded in operations, we split them into up to
1956 * two barriers, to be added before and after the operation. This is less
1957 * strict than if we propagated until the final backend stage, but still
1958 * result in correct execution.
1959 *
1960 * A further improvement could be pipe this information (and use!) into the
1961 * next compiler layers, at the expense of making the handling of barriers
1962 * more complicated.
1963 */
1964
1965 *before = SpvMemorySemanticsMaskNone;
1966 *after = SpvMemorySemanticsMaskNone;
1967
1968 SpvMemorySemanticsMask order_semantics =
1969 semantics & (SpvMemorySemanticsAcquireMask |
1970 SpvMemorySemanticsReleaseMask |
1971 SpvMemorySemanticsAcquireReleaseMask |
1972 SpvMemorySemanticsSequentiallyConsistentMask);
1973
1974 if (util_bitcount(order_semantics) > 1) {
1975 /* Old GLSLang versions incorrectly set all the ordering bits. This was
1976 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
1977 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
1978 */
1979 vtn_warn("Multiple memory ordering semantics specified, "
1980 "assuming AcquireRelease.");
1981 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
1982 }
1983
1984 const SpvMemorySemanticsMask av_vis_semantics =
1985 semantics & (SpvMemorySemanticsMakeAvailableMask |
1986 SpvMemorySemanticsMakeVisibleMask);
1987
1988 const SpvMemorySemanticsMask storage_semantics =
1989 semantics & (SpvMemorySemanticsUniformMemoryMask |
1990 SpvMemorySemanticsSubgroupMemoryMask |
1991 SpvMemorySemanticsWorkgroupMemoryMask |
1992 SpvMemorySemanticsCrossWorkgroupMemoryMask |
1993 SpvMemorySemanticsAtomicCounterMemoryMask |
1994 SpvMemorySemanticsImageMemoryMask |
1995 SpvMemorySemanticsOutputMemoryMask);
1996
1997 const SpvMemorySemanticsMask other_semantics =
1998 semantics & ~(order_semantics | av_vis_semantics | storage_semantics);
1999
2000 if (other_semantics)
2001 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics);
2002
2003 /* SequentiallyConsistent is treated as AcquireRelease. */
2004
2005 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2006 * associated with a Store. All the write operations with a matching
2007 * semantics will not be reordered after the Store.
2008 */
2009 if (order_semantics & (SpvMemorySemanticsReleaseMask |
2010 SpvMemorySemanticsAcquireReleaseMask |
2011 SpvMemorySemanticsSequentiallyConsistentMask)) {
2012 *before |= SpvMemorySemanticsReleaseMask | storage_semantics;
2013 }
2014
2015 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2016 * associated with a Load. All the operations with a matching semantics
2017 * will not be reordered before the Load.
2018 */
2019 if (order_semantics & (SpvMemorySemanticsAcquireMask |
2020 SpvMemorySemanticsAcquireReleaseMask |
2021 SpvMemorySemanticsSequentiallyConsistentMask)) {
2022 *after |= SpvMemorySemanticsAcquireMask | storage_semantics;
2023 }
2024
2025 if (av_vis_semantics & SpvMemorySemanticsMakeVisibleMask)
2026 *before |= SpvMemorySemanticsMakeVisibleMask | storage_semantics;
2027
2028 if (av_vis_semantics & SpvMemorySemanticsMakeAvailableMask)
2029 *after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
2030 }
2031
2032 static void
2033 vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
2034 SpvMemorySemanticsMask semantics)
2035 {
2036 nir_memory_semantics nir_semantics = 0;
2037
2038 SpvMemorySemanticsMask order_semantics =
2039 semantics & (SpvMemorySemanticsAcquireMask |
2040 SpvMemorySemanticsReleaseMask |
2041 SpvMemorySemanticsAcquireReleaseMask |
2042 SpvMemorySemanticsSequentiallyConsistentMask);
2043
2044 if (util_bitcount(order_semantics) > 1) {
2045 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2046 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2047 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2048 */
2049 vtn_warn("Multiple memory ordering semantics bits specified, "
2050 "assuming AcquireRelease.");
2051 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2052 }
2053
2054 switch (order_semantics) {
2055 case 0:
2056 /* Not an ordering barrier. */
2057 break;
2058
2059 case SpvMemorySemanticsAcquireMask:
2060 nir_semantics = NIR_MEMORY_ACQUIRE;
2061 break;
2062
2063 case SpvMemorySemanticsReleaseMask:
2064 nir_semantics = NIR_MEMORY_RELEASE;
2065 break;
2066
2067 case SpvMemorySemanticsSequentiallyConsistentMask:
2068 /* Fall through. Treated as AcquireRelease in Vulkan. */
2069 case SpvMemorySemanticsAcquireReleaseMask:
2070 nir_semantics = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE;
2071 break;
2072
2073 default:
2074 unreachable("Invalid memory order semantics");
2075 }
2076
2077 if (semantics & SpvMemorySemanticsMakeAvailableMask) {
2078 vtn_fail_if(!b->options->caps.vk_memory_model,
2079 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2080 "capability must be declared.");
2081 nir_semantics |= NIR_MEMORY_MAKE_AVAILABLE;
2082 }
2083
2084 if (semantics & SpvMemorySemanticsMakeVisibleMask) {
2085 vtn_fail_if(!b->options->caps.vk_memory_model,
2086 "To use MakeVisible memory semantics the VulkanMemoryModel "
2087 "capability must be declared.");
2088 nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
2089 }
2090
2091 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2092 * and AtomicCounterMemory are ignored".
2093 */
2094 semantics &= ~(SpvMemorySemanticsSubgroupMemoryMask |
2095 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2096 SpvMemorySemanticsAtomicCounterMemoryMask);
2097
2098 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2099 * for SpvMemorySemanticsImageMemoryMask.
2100 */
2101
2102 nir_variable_mode modes = 0;
2103 if (semantics & (SpvMemorySemanticsUniformMemoryMask |
2104 SpvMemorySemanticsImageMemoryMask)) {
2105 modes |= nir_var_uniform |
2106 nir_var_mem_ubo |
2107 nir_var_mem_ssbo |
2108 nir_var_mem_global;
2109 }
2110 if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
2111 modes |= nir_var_mem_shared;
2112 if (semantics & SpvMemorySemanticsOutputMemoryMask) {
2113 modes |= nir_var_shader_out;
2114 }
2115
2116 /* No barrier to add. */
2117 if (nir_semantics == 0 || modes == 0)
2118 return;
2119
2120 nir_scope nir_scope;
2121 switch (scope) {
2122 case SpvScopeDevice:
2123 vtn_fail_if(b->options->caps.vk_memory_model &&
2124 !b->options->caps.vk_memory_model_device_scope,
2125 "If the Vulkan memory model is declared and any instruction "
2126 "uses Device scope, the VulkanMemoryModelDeviceScope "
2127 "capability must be declared.");
2128 nir_scope = NIR_SCOPE_DEVICE;
2129 break;
2130
2131 case SpvScopeQueueFamily:
2132 vtn_fail_if(!b->options->caps.vk_memory_model,
2133 "To use Queue Family scope, the VulkanMemoryModel capability "
2134 "must be declared.");
2135 nir_scope = NIR_SCOPE_QUEUE_FAMILY;
2136 break;
2137
2138 case SpvScopeWorkgroup:
2139 nir_scope = NIR_SCOPE_WORKGROUP;
2140 break;
2141
2142 case SpvScopeSubgroup:
2143 nir_scope = NIR_SCOPE_SUBGROUP;
2144 break;
2145
2146 case SpvScopeInvocation:
2147 nir_scope = NIR_SCOPE_INVOCATION;
2148 break;
2149
2150 default:
2151 vtn_fail("Invalid memory scope");
2152 }
2153
2154 nir_intrinsic_instr *intrin =
2155 nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_memory_barrier);
2156 nir_intrinsic_set_memory_semantics(intrin, nir_semantics);
2157
2158 nir_intrinsic_set_memory_modes(intrin, modes);
2159 nir_intrinsic_set_memory_scope(intrin, nir_scope);
2160 nir_builder_instr_insert(&b->nb, &intrin->instr);
2161 }
2162
2163 struct vtn_ssa_value *
2164 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
2165 {
2166 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
2167 val->type = type;
2168
2169 if (!glsl_type_is_vector_or_scalar(type)) {
2170 unsigned elems = glsl_get_length(type);
2171 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2172 for (unsigned i = 0; i < elems; i++) {
2173 const struct glsl_type *child_type;
2174
2175 switch (glsl_get_base_type(type)) {
2176 case GLSL_TYPE_INT:
2177 case GLSL_TYPE_UINT:
2178 case GLSL_TYPE_INT16:
2179 case GLSL_TYPE_UINT16:
2180 case GLSL_TYPE_UINT8:
2181 case GLSL_TYPE_INT8:
2182 case GLSL_TYPE_INT64:
2183 case GLSL_TYPE_UINT64:
2184 case GLSL_TYPE_BOOL:
2185 case GLSL_TYPE_FLOAT:
2186 case GLSL_TYPE_FLOAT16:
2187 case GLSL_TYPE_DOUBLE:
2188 child_type = glsl_get_column_type(type);
2189 break;
2190 case GLSL_TYPE_ARRAY:
2191 child_type = glsl_get_array_element(type);
2192 break;
2193 case GLSL_TYPE_STRUCT:
2194 case GLSL_TYPE_INTERFACE:
2195 child_type = glsl_get_struct_field(type, i);
2196 break;
2197 default:
2198 vtn_fail("unkown base type");
2199 }
2200
2201 val->elems[i] = vtn_create_ssa_value(b, child_type);
2202 }
2203 }
2204
2205 return val;
2206 }
2207
2208 static nir_tex_src
2209 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
2210 {
2211 nir_tex_src src;
2212 src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
2213 src.src_type = type;
2214 return src;
2215 }
2216
2217 static uint32_t
2218 image_operand_arg(struct vtn_builder *b, const uint32_t *w, uint32_t count,
2219 uint32_t mask_idx, SpvImageOperandsMask op)
2220 {
2221 static const SpvImageOperandsMask ops_with_arg =
2222 SpvImageOperandsBiasMask |
2223 SpvImageOperandsLodMask |
2224 SpvImageOperandsGradMask |
2225 SpvImageOperandsConstOffsetMask |
2226 SpvImageOperandsOffsetMask |
2227 SpvImageOperandsConstOffsetsMask |
2228 SpvImageOperandsSampleMask |
2229 SpvImageOperandsMinLodMask |
2230 SpvImageOperandsMakeTexelAvailableMask |
2231 SpvImageOperandsMakeTexelVisibleMask;
2232
2233 assert(util_bitcount(op) == 1);
2234 assert(w[mask_idx] & op);
2235 assert(op & ops_with_arg);
2236
2237 uint32_t idx = util_bitcount(w[mask_idx] & (op - 1) & ops_with_arg) + 1;
2238
2239 /* Adjust indices for operands with two arguments. */
2240 static const SpvImageOperandsMask ops_with_two_args =
2241 SpvImageOperandsGradMask;
2242 idx += util_bitcount(w[mask_idx] & (op - 1) & ops_with_two_args);
2243
2244 idx += mask_idx;
2245
2246 vtn_fail_if(idx + (op & ops_with_two_args ? 1 : 0) >= count,
2247 "Image op claims to have %s but does not enough "
2248 "following operands", spirv_imageoperands_to_string(op));
2249
2250 return idx;
2251 }
2252
2253 static void
2254 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
2255 const uint32_t *w, unsigned count)
2256 {
2257 if (opcode == SpvOpSampledImage) {
2258 struct vtn_value *val =
2259 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
2260 val->sampled_image = ralloc(b, struct vtn_sampled_image);
2261 val->sampled_image->image =
2262 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2263 val->sampled_image->sampler =
2264 vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
2265 return;
2266 } else if (opcode == SpvOpImage) {
2267 struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
2268 if (src_val->value_type == vtn_value_type_sampled_image) {
2269 vtn_push_value_pointer(b, w[2], src_val->sampled_image->image);
2270 } else {
2271 vtn_assert(src_val->value_type == vtn_value_type_pointer);
2272 vtn_push_value_pointer(b, w[2], src_val->pointer);
2273 }
2274 return;
2275 }
2276
2277 struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2278
2279 struct vtn_pointer *image = NULL, *sampler = NULL;
2280 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
2281 if (sampled_val->value_type == vtn_value_type_sampled_image) {
2282 image = sampled_val->sampled_image->image;
2283 sampler = sampled_val->sampled_image->sampler;
2284 } else {
2285 vtn_assert(sampled_val->value_type == vtn_value_type_pointer);
2286 image = sampled_val->pointer;
2287 }
2288
2289 nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image);
2290 nir_deref_instr *sampler_deref =
2291 sampler ? vtn_pointer_to_deref(b, sampler) : NULL;
2292
2293 const struct glsl_type *image_type = sampled_val->type->type;
2294 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
2295 const bool is_array = glsl_sampler_type_is_array(image_type);
2296 nir_alu_type dest_type = nir_type_invalid;
2297
2298 /* Figure out the base texture operation */
2299 nir_texop texop;
2300 switch (opcode) {
2301 case SpvOpImageSampleImplicitLod:
2302 case SpvOpImageSampleDrefImplicitLod:
2303 case SpvOpImageSampleProjImplicitLod:
2304 case SpvOpImageSampleProjDrefImplicitLod:
2305 texop = nir_texop_tex;
2306 break;
2307
2308 case SpvOpImageSampleExplicitLod:
2309 case SpvOpImageSampleDrefExplicitLod:
2310 case SpvOpImageSampleProjExplicitLod:
2311 case SpvOpImageSampleProjDrefExplicitLod:
2312 texop = nir_texop_txl;
2313 break;
2314
2315 case SpvOpImageFetch:
2316 if (sampler_dim == GLSL_SAMPLER_DIM_MS) {
2317 texop = nir_texop_txf_ms;
2318 } else {
2319 texop = nir_texop_txf;
2320 }
2321 break;
2322
2323 case SpvOpImageGather:
2324 case SpvOpImageDrefGather:
2325 texop = nir_texop_tg4;
2326 break;
2327
2328 case SpvOpImageQuerySizeLod:
2329 case SpvOpImageQuerySize:
2330 texop = nir_texop_txs;
2331 dest_type = nir_type_int;
2332 break;
2333
2334 case SpvOpImageQueryLod:
2335 texop = nir_texop_lod;
2336 dest_type = nir_type_float;
2337 break;
2338
2339 case SpvOpImageQueryLevels:
2340 texop = nir_texop_query_levels;
2341 dest_type = nir_type_int;
2342 break;
2343
2344 case SpvOpImageQuerySamples:
2345 texop = nir_texop_texture_samples;
2346 dest_type = nir_type_int;
2347 break;
2348
2349 default:
2350 vtn_fail_with_opcode("Unhandled opcode", opcode);
2351 }
2352
2353 nir_tex_src srcs[10]; /* 10 should be enough */
2354 nir_tex_src *p = srcs;
2355
2356 p->src = nir_src_for_ssa(&image_deref->dest.ssa);
2357 p->src_type = nir_tex_src_texture_deref;
2358 p++;
2359
2360 switch (texop) {
2361 case nir_texop_tex:
2362 case nir_texop_txb:
2363 case nir_texop_txl:
2364 case nir_texop_txd:
2365 case nir_texop_tg4:
2366 case nir_texop_lod:
2367 vtn_fail_if(sampler == NULL,
2368 "%s requires an image of type OpTypeSampledImage",
2369 spirv_op_to_string(opcode));
2370 p->src = nir_src_for_ssa(&sampler_deref->dest.ssa);
2371 p->src_type = nir_tex_src_sampler_deref;
2372 p++;
2373 break;
2374 case nir_texop_txf:
2375 case nir_texop_txf_ms:
2376 case nir_texop_txs:
2377 case nir_texop_query_levels:
2378 case nir_texop_texture_samples:
2379 case nir_texop_samples_identical:
2380 /* These don't */
2381 break;
2382 case nir_texop_txf_ms_fb:
2383 vtn_fail("unexpected nir_texop_txf_ms_fb");
2384 break;
2385 case nir_texop_txf_ms_mcs:
2386 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2387 case nir_texop_tex_prefetch:
2388 vtn_fail("unexpected nir_texop_tex_prefetch");
2389 }
2390
2391 unsigned idx = 4;
2392
2393 struct nir_ssa_def *coord;
2394 unsigned coord_components;
2395 switch (opcode) {
2396 case SpvOpImageSampleImplicitLod:
2397 case SpvOpImageSampleExplicitLod:
2398 case SpvOpImageSampleDrefImplicitLod:
2399 case SpvOpImageSampleDrefExplicitLod:
2400 case SpvOpImageSampleProjImplicitLod:
2401 case SpvOpImageSampleProjExplicitLod:
2402 case SpvOpImageSampleProjDrefImplicitLod:
2403 case SpvOpImageSampleProjDrefExplicitLod:
2404 case SpvOpImageFetch:
2405 case SpvOpImageGather:
2406 case SpvOpImageDrefGather:
2407 case SpvOpImageQueryLod: {
2408 /* All these types have the coordinate as their first real argument */
2409 switch (sampler_dim) {
2410 case GLSL_SAMPLER_DIM_1D:
2411 case GLSL_SAMPLER_DIM_BUF:
2412 coord_components = 1;
2413 break;
2414 case GLSL_SAMPLER_DIM_2D:
2415 case GLSL_SAMPLER_DIM_RECT:
2416 case GLSL_SAMPLER_DIM_MS:
2417 coord_components = 2;
2418 break;
2419 case GLSL_SAMPLER_DIM_3D:
2420 case GLSL_SAMPLER_DIM_CUBE:
2421 coord_components = 3;
2422 break;
2423 default:
2424 vtn_fail("Invalid sampler type");
2425 }
2426
2427 if (is_array && texop != nir_texop_lod)
2428 coord_components++;
2429
2430 coord = vtn_ssa_value(b, w[idx++])->def;
2431 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
2432 (1 << coord_components) - 1));
2433 p->src_type = nir_tex_src_coord;
2434 p++;
2435 break;
2436 }
2437
2438 default:
2439 coord = NULL;
2440 coord_components = 0;
2441 break;
2442 }
2443
2444 switch (opcode) {
2445 case SpvOpImageSampleProjImplicitLod:
2446 case SpvOpImageSampleProjExplicitLod:
2447 case SpvOpImageSampleProjDrefImplicitLod:
2448 case SpvOpImageSampleProjDrefExplicitLod:
2449 /* These have the projector as the last coordinate component */
2450 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
2451 p->src_type = nir_tex_src_projector;
2452 p++;
2453 break;
2454
2455 default:
2456 break;
2457 }
2458
2459 bool is_shadow = false;
2460 unsigned gather_component = 0;
2461 switch (opcode) {
2462 case SpvOpImageSampleDrefImplicitLod:
2463 case SpvOpImageSampleDrefExplicitLod:
2464 case SpvOpImageSampleProjDrefImplicitLod:
2465 case SpvOpImageSampleProjDrefExplicitLod:
2466 case SpvOpImageDrefGather:
2467 /* These all have an explicit depth value as their next source */
2468 is_shadow = true;
2469 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
2470 break;
2471
2472 case SpvOpImageGather:
2473 /* This has a component as its next source */
2474 gather_component = vtn_constant_uint(b, w[idx++]);
2475 break;
2476
2477 default:
2478 break;
2479 }
2480
2481 /* For OpImageQuerySizeLod, we always have an LOD */
2482 if (opcode == SpvOpImageQuerySizeLod)
2483 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2484
2485 /* Now we need to handle some number of optional arguments */
2486 struct vtn_value *gather_offsets = NULL;
2487 if (idx < count) {
2488 uint32_t operands = w[idx];
2489
2490 if (operands & SpvImageOperandsBiasMask) {
2491 vtn_assert(texop == nir_texop_tex);
2492 texop = nir_texop_txb;
2493 uint32_t arg = image_operand_arg(b, w, count, idx,
2494 SpvImageOperandsBiasMask);
2495 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_bias);
2496 }
2497
2498 if (operands & SpvImageOperandsLodMask) {
2499 vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
2500 texop == nir_texop_txs);
2501 uint32_t arg = image_operand_arg(b, w, count, idx,
2502 SpvImageOperandsLodMask);
2503 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_lod);
2504 }
2505
2506 if (operands & SpvImageOperandsGradMask) {
2507 vtn_assert(texop == nir_texop_txl);
2508 texop = nir_texop_txd;
2509 uint32_t arg = image_operand_arg(b, w, count, idx,
2510 SpvImageOperandsGradMask);
2511 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ddx);
2512 (*p++) = vtn_tex_src(b, w[arg + 1], nir_tex_src_ddy);
2513 }
2514
2515 vtn_fail_if(util_bitcount(operands & (SpvImageOperandsConstOffsetsMask |
2516 SpvImageOperandsOffsetMask |
2517 SpvImageOperandsConstOffsetMask)) > 1,
2518 "At most one of the ConstOffset, Offset, and ConstOffsets "
2519 "image operands can be used on a given instruction.");
2520
2521 if (operands & SpvImageOperandsOffsetMask) {
2522 uint32_t arg = image_operand_arg(b, w, count, idx,
2523 SpvImageOperandsOffsetMask);
2524 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2525 }
2526
2527 if (operands & SpvImageOperandsConstOffsetMask) {
2528 uint32_t arg = image_operand_arg(b, w, count, idx,
2529 SpvImageOperandsConstOffsetMask);
2530 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2531 }
2532
2533 if (operands & SpvImageOperandsConstOffsetsMask) {
2534 vtn_assert(texop == nir_texop_tg4);
2535 uint32_t arg = image_operand_arg(b, w, count, idx,
2536 SpvImageOperandsConstOffsetsMask);
2537 gather_offsets = vtn_value(b, w[arg], vtn_value_type_constant);
2538 }
2539
2540 if (operands & SpvImageOperandsSampleMask) {
2541 vtn_assert(texop == nir_texop_txf_ms);
2542 uint32_t arg = image_operand_arg(b, w, count, idx,
2543 SpvImageOperandsSampleMask);
2544 texop = nir_texop_txf_ms;
2545 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ms_index);
2546 }
2547
2548 if (operands & SpvImageOperandsMinLodMask) {
2549 vtn_assert(texop == nir_texop_tex ||
2550 texop == nir_texop_txb ||
2551 texop == nir_texop_txd);
2552 uint32_t arg = image_operand_arg(b, w, count, idx,
2553 SpvImageOperandsMinLodMask);
2554 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_min_lod);
2555 }
2556 }
2557
2558 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
2559 instr->op = texop;
2560
2561 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
2562
2563 instr->coord_components = coord_components;
2564 instr->sampler_dim = sampler_dim;
2565 instr->is_array = is_array;
2566 instr->is_shadow = is_shadow;
2567 instr->is_new_style_shadow =
2568 is_shadow && glsl_get_components(ret_type->type) == 1;
2569 instr->component = gather_component;
2570
2571 if (image && (image->access & ACCESS_NON_UNIFORM))
2572 instr->texture_non_uniform = true;
2573
2574 if (sampler && (sampler->access & ACCESS_NON_UNIFORM))
2575 instr->sampler_non_uniform = true;
2576
2577 /* for non-query ops, get dest_type from sampler type */
2578 if (dest_type == nir_type_invalid) {
2579 switch (glsl_get_sampler_result_type(image_type)) {
2580 case GLSL_TYPE_FLOAT: dest_type = nir_type_float; break;
2581 case GLSL_TYPE_INT: dest_type = nir_type_int; break;
2582 case GLSL_TYPE_UINT: dest_type = nir_type_uint; break;
2583 case GLSL_TYPE_BOOL: dest_type = nir_type_bool; break;
2584 default:
2585 vtn_fail("Invalid base type for sampler result");
2586 }
2587 }
2588
2589 instr->dest_type = dest_type;
2590
2591 nir_ssa_dest_init(&instr->instr, &instr->dest,
2592 nir_tex_instr_dest_size(instr), 32, NULL);
2593
2594 vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2595 nir_tex_instr_dest_size(instr));
2596
2597 if (gather_offsets) {
2598 vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
2599 gather_offsets->type->length != 4,
2600 "ConstOffsets must be an array of size four of vectors "
2601 "of two integer components");
2602
2603 struct vtn_type *vec_type = gather_offsets->type->array_element;
2604 vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
2605 vec_type->length != 2 ||
2606 !glsl_type_is_integer(vec_type->type),
2607 "ConstOffsets must be an array of size four of vectors "
2608 "of two integer components");
2609
2610 unsigned bit_size = glsl_get_bit_size(vec_type->type);
2611 for (uint32_t i = 0; i < 4; i++) {
2612 const nir_const_value *cvec =
2613 gather_offsets->constant->elements[i]->values;
2614 for (uint32_t j = 0; j < 2; j++) {
2615 switch (bit_size) {
2616 case 8: instr->tg4_offsets[i][j] = cvec[j].i8; break;
2617 case 16: instr->tg4_offsets[i][j] = cvec[j].i16; break;
2618 case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
2619 case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
2620 default:
2621 vtn_fail("Unsupported bit size: %u", bit_size);
2622 }
2623 }
2624 }
2625 }
2626
2627 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, ret_type->type);
2628 ssa->def = &instr->dest.ssa;
2629 vtn_push_ssa(b, w[2], ret_type, ssa);
2630
2631 nir_builder_instr_insert(&b->nb, &instr->instr);
2632 }
2633
2634 static void
2635 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2636 const uint32_t *w, nir_src *src)
2637 {
2638 switch (opcode) {
2639 case SpvOpAtomicIIncrement:
2640 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
2641 break;
2642
2643 case SpvOpAtomicIDecrement:
2644 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
2645 break;
2646
2647 case SpvOpAtomicISub:
2648 src[0] =
2649 nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
2650 break;
2651
2652 case SpvOpAtomicCompareExchange:
2653 case SpvOpAtomicCompareExchangeWeak:
2654 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
2655 src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
2656 break;
2657
2658 case SpvOpAtomicExchange:
2659 case SpvOpAtomicIAdd:
2660 case SpvOpAtomicSMin:
2661 case SpvOpAtomicUMin:
2662 case SpvOpAtomicSMax:
2663 case SpvOpAtomicUMax:
2664 case SpvOpAtomicAnd:
2665 case SpvOpAtomicOr:
2666 case SpvOpAtomicXor:
2667 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
2668 break;
2669
2670 default:
2671 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
2672 }
2673 }
2674
2675 static nir_ssa_def *
2676 get_image_coord(struct vtn_builder *b, uint32_t value)
2677 {
2678 struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
2679
2680 /* The image_load_store intrinsics assume a 4-dim coordinate */
2681 unsigned dim = glsl_get_vector_elements(coord->type);
2682 unsigned swizzle[4];
2683 for (unsigned i = 0; i < 4; i++)
2684 swizzle[i] = MIN2(i, dim - 1);
2685
2686 return nir_swizzle(&b->nb, coord->def, swizzle, 4);
2687 }
2688
2689 static nir_ssa_def *
2690 expand_to_vec4(nir_builder *b, nir_ssa_def *value)
2691 {
2692 if (value->num_components == 4)
2693 return value;
2694
2695 unsigned swiz[4];
2696 for (unsigned i = 0; i < 4; i++)
2697 swiz[i] = i < value->num_components ? i : 0;
2698 return nir_swizzle(b, value, swiz, 4);
2699 }
2700
2701 static void
2702 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
2703 const uint32_t *w, unsigned count)
2704 {
2705 /* Just get this one out of the way */
2706 if (opcode == SpvOpImageTexelPointer) {
2707 struct vtn_value *val =
2708 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
2709 val->image = ralloc(b, struct vtn_image_pointer);
2710
2711 val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2712 val->image->coord = get_image_coord(b, w[4]);
2713 val->image->sample = vtn_ssa_value(b, w[5])->def;
2714 val->image->lod = nir_imm_int(&b->nb, 0);
2715 return;
2716 }
2717
2718 struct vtn_image_pointer image;
2719 SpvScope scope = SpvScopeInvocation;
2720 SpvMemorySemanticsMask semantics = 0;
2721
2722 switch (opcode) {
2723 case SpvOpAtomicExchange:
2724 case SpvOpAtomicCompareExchange:
2725 case SpvOpAtomicCompareExchangeWeak:
2726 case SpvOpAtomicIIncrement:
2727 case SpvOpAtomicIDecrement:
2728 case SpvOpAtomicIAdd:
2729 case SpvOpAtomicISub:
2730 case SpvOpAtomicLoad:
2731 case SpvOpAtomicSMin:
2732 case SpvOpAtomicUMin:
2733 case SpvOpAtomicSMax:
2734 case SpvOpAtomicUMax:
2735 case SpvOpAtomicAnd:
2736 case SpvOpAtomicOr:
2737 case SpvOpAtomicXor:
2738 image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
2739 scope = vtn_constant_uint(b, w[4]);
2740 semantics = vtn_constant_uint(b, w[5]);
2741 break;
2742
2743 case SpvOpAtomicStore:
2744 image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
2745 scope = vtn_constant_uint(b, w[2]);
2746 semantics = vtn_constant_uint(b, w[3]);
2747 break;
2748
2749 case SpvOpImageQuerySize:
2750 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2751 image.coord = NULL;
2752 image.sample = NULL;
2753 image.lod = NULL;
2754 break;
2755
2756 case SpvOpImageRead: {
2757 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2758 image.coord = get_image_coord(b, w[4]);
2759
2760 const SpvImageOperandsMask operands =
2761 count > 5 ? w[5] : SpvImageOperandsMaskNone;
2762
2763 if (operands & SpvImageOperandsSampleMask) {
2764 uint32_t arg = image_operand_arg(b, w, count, 5,
2765 SpvImageOperandsSampleMask);
2766 image.sample = vtn_ssa_value(b, w[arg])->def;
2767 } else {
2768 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2769 }
2770
2771 if (operands & SpvImageOperandsMakeTexelVisibleMask) {
2772 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
2773 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2774 uint32_t arg = image_operand_arg(b, w, count, 5,
2775 SpvImageOperandsMakeTexelVisibleMask);
2776 semantics = SpvMemorySemanticsMakeVisibleMask;
2777 scope = vtn_constant_uint(b, w[arg]);
2778 }
2779
2780 if (operands & SpvImageOperandsLodMask) {
2781 uint32_t arg = image_operand_arg(b, w, count, 5,
2782 SpvImageOperandsLodMask);
2783 image.lod = vtn_ssa_value(b, w[arg])->def;
2784 } else {
2785 image.lod = nir_imm_int(&b->nb, 0);
2786 }
2787
2788 /* TODO: Volatile. */
2789
2790 break;
2791 }
2792
2793 case SpvOpImageWrite: {
2794 image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2795 image.coord = get_image_coord(b, w[2]);
2796
2797 /* texel = w[3] */
2798
2799 const SpvImageOperandsMask operands =
2800 count > 4 ? w[4] : SpvImageOperandsMaskNone;
2801
2802 if (operands & SpvImageOperandsSampleMask) {
2803 uint32_t arg = image_operand_arg(b, w, count, 4,
2804 SpvImageOperandsSampleMask);
2805 image.sample = vtn_ssa_value(b, w[arg])->def;
2806 } else {
2807 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2808 }
2809
2810 if (operands & SpvImageOperandsMakeTexelAvailableMask) {
2811 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
2812 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
2813 uint32_t arg = image_operand_arg(b, w, count, 4,
2814 SpvImageOperandsMakeTexelAvailableMask);
2815 semantics = SpvMemorySemanticsMakeAvailableMask;
2816 scope = vtn_constant_uint(b, w[arg]);
2817 }
2818
2819 if (operands & SpvImageOperandsLodMask) {
2820 uint32_t arg = image_operand_arg(b, w, count, 4,
2821 SpvImageOperandsLodMask);
2822 image.lod = vtn_ssa_value(b, w[arg])->def;
2823 } else {
2824 image.lod = nir_imm_int(&b->nb, 0);
2825 }
2826
2827 /* TODO: Volatile. */
2828
2829 break;
2830 }
2831
2832 default:
2833 vtn_fail_with_opcode("Invalid image opcode", opcode);
2834 }
2835
2836 nir_intrinsic_op op;
2837 switch (opcode) {
2838 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2839 OP(ImageQuerySize, size)
2840 OP(ImageRead, load)
2841 OP(ImageWrite, store)
2842 OP(AtomicLoad, load)
2843 OP(AtomicStore, store)
2844 OP(AtomicExchange, atomic_exchange)
2845 OP(AtomicCompareExchange, atomic_comp_swap)
2846 OP(AtomicCompareExchangeWeak, atomic_comp_swap)
2847 OP(AtomicIIncrement, atomic_add)
2848 OP(AtomicIDecrement, atomic_add)
2849 OP(AtomicIAdd, atomic_add)
2850 OP(AtomicISub, atomic_add)
2851 OP(AtomicSMin, atomic_imin)
2852 OP(AtomicUMin, atomic_umin)
2853 OP(AtomicSMax, atomic_imax)
2854 OP(AtomicUMax, atomic_umax)
2855 OP(AtomicAnd, atomic_and)
2856 OP(AtomicOr, atomic_or)
2857 OP(AtomicXor, atomic_xor)
2858 #undef OP
2859 default:
2860 vtn_fail_with_opcode("Invalid image opcode", opcode);
2861 }
2862
2863 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
2864
2865 nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image);
2866 intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
2867
2868 /* ImageQuerySize doesn't take any extra parameters */
2869 if (opcode != SpvOpImageQuerySize) {
2870 /* The image coordinate is always 4 components but we may not have that
2871 * many. Swizzle to compensate.
2872 */
2873 intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord));
2874 intrin->src[2] = nir_src_for_ssa(image.sample);
2875 }
2876
2877 nir_intrinsic_set_access(intrin, image.image->access);
2878
2879 switch (opcode) {
2880 case SpvOpAtomicLoad:
2881 case SpvOpImageQuerySize:
2882 case SpvOpImageRead:
2883 if (opcode == SpvOpImageRead || opcode == SpvOpAtomicLoad) {
2884 /* Only OpImageRead can support a lod parameter if
2885 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2886 * intrinsics definition for atomics requires us to set it for
2887 * OpAtomicLoad.
2888 */
2889 intrin->src[3] = nir_src_for_ssa(image.lod);
2890 }
2891 break;
2892 case SpvOpAtomicStore:
2893 case SpvOpImageWrite: {
2894 const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
2895 nir_ssa_def *value = vtn_ssa_value(b, value_id)->def;
2896 /* nir_intrinsic_image_deref_store always takes a vec4 value */
2897 assert(op == nir_intrinsic_image_deref_store);
2898 intrin->num_components = 4;
2899 intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value));
2900 /* Only OpImageWrite can support a lod parameter if
2901 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2902 * intrinsics definition for atomics requires us to set it for
2903 * OpAtomicStore.
2904 */
2905 intrin->src[4] = nir_src_for_ssa(image.lod);
2906 break;
2907 }
2908
2909 case SpvOpAtomicCompareExchange:
2910 case SpvOpAtomicCompareExchangeWeak:
2911 case SpvOpAtomicIIncrement:
2912 case SpvOpAtomicIDecrement:
2913 case SpvOpAtomicExchange:
2914 case SpvOpAtomicIAdd:
2915 case SpvOpAtomicISub:
2916 case SpvOpAtomicSMin:
2917 case SpvOpAtomicUMin:
2918 case SpvOpAtomicSMax:
2919 case SpvOpAtomicUMax:
2920 case SpvOpAtomicAnd:
2921 case SpvOpAtomicOr:
2922 case SpvOpAtomicXor:
2923 fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
2924 break;
2925
2926 default:
2927 vtn_fail_with_opcode("Invalid image opcode", opcode);
2928 }
2929
2930 /* Image operations implicitly have the Image storage memory semantics. */
2931 semantics |= SpvMemorySemanticsImageMemoryMask;
2932
2933 SpvMemorySemanticsMask before_semantics;
2934 SpvMemorySemanticsMask after_semantics;
2935 vtn_split_barrier_semantics(b, semantics, &before_semantics, &after_semantics);
2936
2937 if (before_semantics)
2938 vtn_emit_memory_barrier(b, scope, before_semantics);
2939
2940 if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
2941 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2942
2943 unsigned dest_components = glsl_get_vector_elements(type->type);
2944 intrin->num_components = nir_intrinsic_infos[op].dest_components;
2945 if (intrin->num_components == 0)
2946 intrin->num_components = dest_components;
2947
2948 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
2949 intrin->num_components, 32, NULL);
2950
2951 nir_builder_instr_insert(&b->nb, &intrin->instr);
2952
2953 nir_ssa_def *result = &intrin->dest.ssa;
2954 if (intrin->num_components != dest_components)
2955 result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
2956
2957 struct vtn_value *val =
2958 vtn_push_ssa(b, w[2], type, vtn_create_ssa_value(b, type->type));
2959 val->ssa->def = result;
2960 } else {
2961 nir_builder_instr_insert(&b->nb, &intrin->instr);
2962 }
2963
2964 if (after_semantics)
2965 vtn_emit_memory_barrier(b, scope, after_semantics);
2966 }
2967
2968 static nir_intrinsic_op
2969 get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2970 {
2971 switch (opcode) {
2972 case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
2973 case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
2974 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2975 OP(AtomicExchange, atomic_exchange)
2976 OP(AtomicCompareExchange, atomic_comp_swap)
2977 OP(AtomicCompareExchangeWeak, atomic_comp_swap)
2978 OP(AtomicIIncrement, atomic_add)
2979 OP(AtomicIDecrement, atomic_add)
2980 OP(AtomicIAdd, atomic_add)
2981 OP(AtomicISub, atomic_add)
2982 OP(AtomicSMin, atomic_imin)
2983 OP(AtomicUMin, atomic_umin)
2984 OP(AtomicSMax, atomic_imax)
2985 OP(AtomicUMax, atomic_umax)
2986 OP(AtomicAnd, atomic_and)
2987 OP(AtomicOr, atomic_or)
2988 OP(AtomicXor, atomic_xor)
2989 #undef OP
2990 default:
2991 vtn_fail_with_opcode("Invalid SSBO atomic", opcode);
2992 }
2993 }
2994
2995 static nir_intrinsic_op
2996 get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2997 {
2998 switch (opcode) {
2999 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3000 OP(AtomicLoad, read_deref)
3001 OP(AtomicExchange, exchange)
3002 OP(AtomicCompareExchange, comp_swap)
3003 OP(AtomicCompareExchangeWeak, comp_swap)
3004 OP(AtomicIIncrement, inc_deref)
3005 OP(AtomicIDecrement, post_dec_deref)
3006 OP(AtomicIAdd, add_deref)
3007 OP(AtomicISub, add_deref)
3008 OP(AtomicUMin, min_deref)
3009 OP(AtomicUMax, max_deref)
3010 OP(AtomicAnd, and_deref)
3011 OP(AtomicOr, or_deref)
3012 OP(AtomicXor, xor_deref)
3013 #undef OP
3014 default:
3015 /* We left the following out: AtomicStore, AtomicSMin and
3016 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3017 * moment Atomic Counter support is needed for ARB_spirv support, so is
3018 * only need to support GLSL Atomic Counters that are uints and don't
3019 * allow direct storage.
3020 */
3021 vtn_fail("Invalid uniform atomic");
3022 }
3023 }
3024
3025 static nir_intrinsic_op
3026 get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
3027 {
3028 switch (opcode) {
3029 case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
3030 case SpvOpAtomicStore: return nir_intrinsic_store_deref;
3031 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3032 OP(AtomicExchange, atomic_exchange)
3033 OP(AtomicCompareExchange, atomic_comp_swap)
3034 OP(AtomicCompareExchangeWeak, atomic_comp_swap)
3035 OP(AtomicIIncrement, atomic_add)
3036 OP(AtomicIDecrement, atomic_add)
3037 OP(AtomicIAdd, atomic_add)
3038 OP(AtomicISub, atomic_add)
3039 OP(AtomicSMin, atomic_imin)
3040 OP(AtomicUMin, atomic_umin)
3041 OP(AtomicSMax, atomic_imax)
3042 OP(AtomicUMax, atomic_umax)
3043 OP(AtomicAnd, atomic_and)
3044 OP(AtomicOr, atomic_or)
3045 OP(AtomicXor, atomic_xor)
3046 #undef OP
3047 default:
3048 vtn_fail_with_opcode("Invalid shared atomic", opcode);
3049 }
3050 }
3051
3052 /*
3053 * Handles shared atomics, ssbo atomics and atomic counters.
3054 */
3055 static void
3056 vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
3057 const uint32_t *w, UNUSED unsigned count)
3058 {
3059 struct vtn_pointer *ptr;
3060 nir_intrinsic_instr *atomic;
3061
3062 SpvScope scope = SpvScopeInvocation;
3063 SpvMemorySemanticsMask semantics = 0;
3064
3065 switch (opcode) {
3066 case SpvOpAtomicLoad:
3067 case SpvOpAtomicExchange:
3068 case SpvOpAtomicCompareExchange:
3069 case SpvOpAtomicCompareExchangeWeak:
3070 case SpvOpAtomicIIncrement:
3071 case SpvOpAtomicIDecrement:
3072 case SpvOpAtomicIAdd:
3073 case SpvOpAtomicISub:
3074 case SpvOpAtomicSMin:
3075 case SpvOpAtomicUMin:
3076 case SpvOpAtomicSMax:
3077 case SpvOpAtomicUMax:
3078 case SpvOpAtomicAnd:
3079 case SpvOpAtomicOr:
3080 case SpvOpAtomicXor:
3081 ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
3082 scope = vtn_constant_uint(b, w[4]);
3083 semantics = vtn_constant_uint(b, w[5]);
3084 break;
3085
3086 case SpvOpAtomicStore:
3087 ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
3088 scope = vtn_constant_uint(b, w[2]);
3089 semantics = vtn_constant_uint(b, w[3]);
3090 break;
3091
3092 default:
3093 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3094 }
3095
3096 /* uniform as "atomic counter uniform" */
3097 if (ptr->mode == vtn_variable_mode_uniform) {
3098 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
3099 const struct glsl_type *deref_type = deref->type;
3100 nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
3101 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3102 atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
3103
3104 /* SSBO needs to initialize index/offset. In this case we don't need to,
3105 * as that info is already stored on the ptr->var->var nir_variable (see
3106 * vtn_create_variable)
3107 */
3108
3109 switch (opcode) {
3110 case SpvOpAtomicLoad:
3111 atomic->num_components = glsl_get_vector_elements(deref_type);
3112 break;
3113
3114 case SpvOpAtomicStore:
3115 atomic->num_components = glsl_get_vector_elements(deref_type);
3116 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
3117 break;
3118
3119 case SpvOpAtomicExchange:
3120 case SpvOpAtomicCompareExchange:
3121 case SpvOpAtomicCompareExchangeWeak:
3122 case SpvOpAtomicIIncrement:
3123 case SpvOpAtomicIDecrement:
3124 case SpvOpAtomicIAdd:
3125 case SpvOpAtomicISub:
3126 case SpvOpAtomicSMin:
3127 case SpvOpAtomicUMin:
3128 case SpvOpAtomicSMax:
3129 case SpvOpAtomicUMax:
3130 case SpvOpAtomicAnd:
3131 case SpvOpAtomicOr:
3132 case SpvOpAtomicXor:
3133 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3134 * atomic counter uniforms doesn't have sources
3135 */
3136 break;
3137
3138 default:
3139 unreachable("Invalid SPIR-V atomic");
3140
3141 }
3142 } else if (vtn_pointer_uses_ssa_offset(b, ptr)) {
3143 nir_ssa_def *offset, *index;
3144 offset = vtn_pointer_to_offset(b, ptr, &index);
3145
3146 assert(ptr->mode == vtn_variable_mode_ssbo);
3147
3148 nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode);
3149 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3150
3151 int src = 0;
3152 switch (opcode) {
3153 case SpvOpAtomicLoad:
3154 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
3155 nir_intrinsic_set_align(atomic, 4, 0);
3156 if (ptr->mode == vtn_variable_mode_ssbo)
3157 atomic->src[src++] = nir_src_for_ssa(index);
3158 atomic->src[src++] = nir_src_for_ssa(offset);
3159 break;
3160
3161 case SpvOpAtomicStore:
3162 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
3163 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
3164 nir_intrinsic_set_align(atomic, 4, 0);
3165 atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
3166 if (ptr->mode == vtn_variable_mode_ssbo)
3167 atomic->src[src++] = nir_src_for_ssa(index);
3168 atomic->src[src++] = nir_src_for_ssa(offset);
3169 break;
3170
3171 case SpvOpAtomicExchange:
3172 case SpvOpAtomicCompareExchange:
3173 case SpvOpAtomicCompareExchangeWeak:
3174 case SpvOpAtomicIIncrement:
3175 case SpvOpAtomicIDecrement:
3176 case SpvOpAtomicIAdd:
3177 case SpvOpAtomicISub:
3178 case SpvOpAtomicSMin:
3179 case SpvOpAtomicUMin:
3180 case SpvOpAtomicSMax:
3181 case SpvOpAtomicUMax:
3182 case SpvOpAtomicAnd:
3183 case SpvOpAtomicOr:
3184 case SpvOpAtomicXor:
3185 if (ptr->mode == vtn_variable_mode_ssbo)
3186 atomic->src[src++] = nir_src_for_ssa(index);
3187 atomic->src[src++] = nir_src_for_ssa(offset);
3188 fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
3189 break;
3190
3191 default:
3192 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3193 }
3194 } else {
3195 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
3196 const struct glsl_type *deref_type = deref->type;
3197 nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
3198 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3199 atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
3200
3201 switch (opcode) {
3202 case SpvOpAtomicLoad:
3203 atomic->num_components = glsl_get_vector_elements(deref_type);
3204 break;
3205
3206 case SpvOpAtomicStore:
3207 atomic->num_components = glsl_get_vector_elements(deref_type);
3208 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
3209 atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
3210 break;
3211
3212 case SpvOpAtomicExchange:
3213 case SpvOpAtomicCompareExchange:
3214 case SpvOpAtomicCompareExchangeWeak:
3215 case SpvOpAtomicIIncrement:
3216 case SpvOpAtomicIDecrement:
3217 case SpvOpAtomicIAdd:
3218 case SpvOpAtomicISub:
3219 case SpvOpAtomicSMin:
3220 case SpvOpAtomicUMin:
3221 case SpvOpAtomicSMax:
3222 case SpvOpAtomicUMax:
3223 case SpvOpAtomicAnd:
3224 case SpvOpAtomicOr:
3225 case SpvOpAtomicXor:
3226 fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
3227 break;
3228
3229 default:
3230 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3231 }
3232 }
3233
3234 /* Atomic ordering operations will implicitly apply to the atomic operation
3235 * storage class, so include that too.
3236 */
3237 semantics |= vtn_storage_class_to_memory_semantics(ptr->ptr_type->storage_class);
3238
3239 SpvMemorySemanticsMask before_semantics;
3240 SpvMemorySemanticsMask after_semantics;
3241 vtn_split_barrier_semantics(b, semantics, &before_semantics, &after_semantics);
3242
3243 if (before_semantics)
3244 vtn_emit_memory_barrier(b, scope, before_semantics);
3245
3246 if (opcode != SpvOpAtomicStore) {
3247 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
3248
3249 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
3250 glsl_get_vector_elements(type->type),
3251 glsl_get_bit_size(type->type), NULL);
3252
3253 struct vtn_ssa_value *ssa = rzalloc(b, struct vtn_ssa_value);
3254 ssa->def = &atomic->dest.ssa;
3255 ssa->type = type->type;
3256 vtn_push_ssa(b, w[2], type, ssa);
3257 }
3258
3259 nir_builder_instr_insert(&b->nb, &atomic->instr);
3260
3261 if (after_semantics)
3262 vtn_emit_memory_barrier(b, scope, after_semantics);
3263 }
3264
3265 static nir_alu_instr *
3266 create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
3267 {
3268 nir_op op = nir_op_vec(num_components);
3269 nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
3270 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
3271 bit_size, NULL);
3272 vec->dest.write_mask = (1 << num_components) - 1;
3273
3274 return vec;
3275 }
3276
3277 struct vtn_ssa_value *
3278 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
3279 {
3280 if (src->transposed)
3281 return src->transposed;
3282
3283 struct vtn_ssa_value *dest =
3284 vtn_create_ssa_value(b, glsl_transposed_type(src->type));
3285
3286 for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
3287 nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type),
3288 glsl_get_bit_size(src->type));
3289 if (glsl_type_is_vector_or_scalar(src->type)) {
3290 vec->src[0].src = nir_src_for_ssa(src->def);
3291 vec->src[0].swizzle[0] = i;
3292 } else {
3293 for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
3294 vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
3295 vec->src[j].swizzle[0] = i;
3296 }
3297 }
3298 nir_builder_instr_insert(&b->nb, &vec->instr);
3299 dest->elems[i]->def = &vec->dest.dest.ssa;
3300 }
3301
3302 dest->transposed = src;
3303
3304 return dest;
3305 }
3306
3307 nir_ssa_def *
3308 vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
3309 {
3310 return nir_channel(&b->nb, src, index);
3311 }
3312
3313 nir_ssa_def *
3314 vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
3315 unsigned index)
3316 {
3317 nir_alu_instr *vec = create_vec(b, src->num_components,
3318 src->bit_size);
3319
3320 for (unsigned i = 0; i < src->num_components; i++) {
3321 if (i == index) {
3322 vec->src[i].src = nir_src_for_ssa(insert);
3323 } else {
3324 vec->src[i].src = nir_src_for_ssa(src);
3325 vec->src[i].swizzle[0] = i;
3326 }
3327 }
3328
3329 nir_builder_instr_insert(&b->nb, &vec->instr);
3330
3331 return &vec->dest.dest.ssa;
3332 }
3333
3334 static nir_ssa_def *
3335 nir_ieq_imm(nir_builder *b, nir_ssa_def *x, uint64_t i)
3336 {
3337 return nir_ieq(b, x, nir_imm_intN_t(b, i, x->bit_size));
3338 }
3339
3340 nir_ssa_def *
3341 vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
3342 nir_ssa_def *index)
3343 {
3344 return nir_vector_extract(&b->nb, src, nir_i2i(&b->nb, index, 32));
3345 }
3346
3347 nir_ssa_def *
3348 vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
3349 nir_ssa_def *insert, nir_ssa_def *index)
3350 {
3351 nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
3352 for (unsigned i = 1; i < src->num_components; i++)
3353 dest = nir_bcsel(&b->nb, nir_ieq_imm(&b->nb, index, i),
3354 vtn_vector_insert(b, src, insert, i), dest);
3355
3356 return dest;
3357 }
3358
3359 static nir_ssa_def *
3360 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
3361 nir_ssa_def *src0, nir_ssa_def *src1,
3362 const uint32_t *indices)
3363 {
3364 nir_alu_instr *vec = create_vec(b, num_components, src0->bit_size);
3365
3366 for (unsigned i = 0; i < num_components; i++) {
3367 uint32_t index = indices[i];
3368 if (index == 0xffffffff) {
3369 vec->src[i].src =
3370 nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
3371 } else if (index < src0->num_components) {
3372 vec->src[i].src = nir_src_for_ssa(src0);
3373 vec->src[i].swizzle[0] = index;
3374 } else {
3375 vec->src[i].src = nir_src_for_ssa(src1);
3376 vec->src[i].swizzle[0] = index - src0->num_components;
3377 }
3378 }
3379
3380 nir_builder_instr_insert(&b->nb, &vec->instr);
3381
3382 return &vec->dest.dest.ssa;
3383 }
3384
3385 /*
3386 * Concatentates a number of vectors/scalars together to produce a vector
3387 */
3388 static nir_ssa_def *
3389 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
3390 unsigned num_srcs, nir_ssa_def **srcs)
3391 {
3392 nir_alu_instr *vec = create_vec(b, num_components, srcs[0]->bit_size);
3393
3394 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3395 *
3396 * "When constructing a vector, there must be at least two Constituent
3397 * operands."
3398 */
3399 vtn_assert(num_srcs >= 2);
3400
3401 unsigned dest_idx = 0;
3402 for (unsigned i = 0; i < num_srcs; i++) {
3403 nir_ssa_def *src = srcs[i];
3404 vtn_assert(dest_idx + src->num_components <= num_components);
3405 for (unsigned j = 0; j < src->num_components; j++) {
3406 vec->src[dest_idx].src = nir_src_for_ssa(src);
3407 vec->src[dest_idx].swizzle[0] = j;
3408 dest_idx++;
3409 }
3410 }
3411
3412 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3413 *
3414 * "When constructing a vector, the total number of components in all
3415 * the operands must equal the number of components in Result Type."
3416 */
3417 vtn_assert(dest_idx == num_components);
3418
3419 nir_builder_instr_insert(&b->nb, &vec->instr);
3420
3421 return &vec->dest.dest.ssa;
3422 }
3423
3424 static struct vtn_ssa_value *
3425 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
3426 {
3427 struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
3428 dest->type = src->type;
3429
3430 if (glsl_type_is_vector_or_scalar(src->type)) {
3431 dest->def = src->def;
3432 } else {
3433 unsigned elems = glsl_get_length(src->type);
3434
3435 dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
3436 for (unsigned i = 0; i < elems; i++)
3437 dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
3438 }
3439
3440 return dest;
3441 }
3442
3443 static struct vtn_ssa_value *
3444 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
3445 struct vtn_ssa_value *insert, const uint32_t *indices,
3446 unsigned num_indices)
3447 {
3448 struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
3449
3450 struct vtn_ssa_value *cur = dest;
3451 unsigned i;
3452 for (i = 0; i < num_indices - 1; i++) {
3453 cur = cur->elems[indices[i]];
3454 }
3455
3456 if (glsl_type_is_vector_or_scalar(cur->type)) {
3457 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3458 * the component granularity. In that case, the last index will be
3459 * the index to insert the scalar into the vector.
3460 */
3461
3462 cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
3463 } else {
3464 cur->elems[indices[i]] = insert;
3465 }
3466
3467 return dest;
3468 }
3469
3470 static struct vtn_ssa_value *
3471 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
3472 const uint32_t *indices, unsigned num_indices)
3473 {
3474 struct vtn_ssa_value *cur = src;
3475 for (unsigned i = 0; i < num_indices; i++) {
3476 if (glsl_type_is_vector_or_scalar(cur->type)) {
3477 vtn_assert(i == num_indices - 1);
3478 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3479 * the component granularity. The last index will be the index of the
3480 * vector to extract.
3481 */
3482
3483 struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
3484 ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
3485 ret->def = vtn_vector_extract(b, cur->def, indices[i]);
3486 return ret;
3487 } else {
3488 cur = cur->elems[indices[i]];
3489 }
3490 }
3491
3492 return cur;
3493 }
3494
3495 static void
3496 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
3497 const uint32_t *w, unsigned count)
3498 {
3499 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
3500 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
3501
3502 switch (opcode) {
3503 case SpvOpVectorExtractDynamic:
3504 ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
3505 vtn_ssa_value(b, w[4])->def);
3506 break;
3507
3508 case SpvOpVectorInsertDynamic:
3509 ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
3510 vtn_ssa_value(b, w[4])->def,
3511 vtn_ssa_value(b, w[5])->def);
3512 break;
3513
3514 case SpvOpVectorShuffle:
3515 ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type->type),
3516 vtn_ssa_value(b, w[3])->def,
3517 vtn_ssa_value(b, w[4])->def,
3518 w + 5);
3519 break;
3520
3521 case SpvOpCompositeConstruct: {
3522 unsigned elems = count - 3;
3523 assume(elems >= 1);
3524 if (glsl_type_is_vector_or_scalar(type->type)) {
3525 nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
3526 for (unsigned i = 0; i < elems; i++)
3527 srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
3528 ssa->def =
3529 vtn_vector_construct(b, glsl_get_vector_elements(type->type),
3530 elems, srcs);
3531 } else {
3532 ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
3533 for (unsigned i = 0; i < elems; i++)
3534 ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
3535 }
3536 break;
3537 }
3538 case SpvOpCompositeExtract:
3539 ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
3540 w + 4, count - 4);
3541 break;
3542
3543 case SpvOpCompositeInsert:
3544 ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
3545 vtn_ssa_value(b, w[3]),
3546 w + 5, count - 5);
3547 break;
3548
3549 case SpvOpCopyLogical:
3550 case SpvOpCopyObject:
3551 ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
3552 break;
3553
3554 default:
3555 vtn_fail_with_opcode("unknown composite operation", opcode);
3556 }
3557
3558 vtn_push_ssa(b, w[2], type, ssa);
3559 }
3560
3561 static void
3562 vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
3563 {
3564 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
3565 nir_builder_instr_insert(&b->nb, &intrin->instr);
3566 }
3567
3568 void
3569 vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
3570 SpvMemorySemanticsMask semantics)
3571 {
3572 if (b->options->use_scoped_memory_barrier) {
3573 vtn_emit_scoped_memory_barrier(b, scope, semantics);
3574 return;
3575 }
3576
3577 static const SpvMemorySemanticsMask all_memory_semantics =
3578 SpvMemorySemanticsUniformMemoryMask |
3579 SpvMemorySemanticsWorkgroupMemoryMask |
3580 SpvMemorySemanticsAtomicCounterMemoryMask |
3581 SpvMemorySemanticsImageMemoryMask;
3582
3583 /* If we're not actually doing a memory barrier, bail */
3584 if (!(semantics & all_memory_semantics))
3585 return;
3586
3587 /* GL and Vulkan don't have these */
3588 vtn_assert(scope != SpvScopeCrossDevice);
3589
3590 if (scope == SpvScopeSubgroup)
3591 return; /* Nothing to do here */
3592
3593 if (scope == SpvScopeWorkgroup) {
3594 vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
3595 return;
3596 }
3597
3598 /* There's only two scopes thing left */
3599 vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
3600
3601 if ((semantics & all_memory_semantics) == all_memory_semantics) {
3602 vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
3603 return;
3604 }
3605
3606 /* Issue a bunch of more specific barriers */
3607 uint32_t bits = semantics;
3608 while (bits) {
3609 SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits);
3610 switch (semantic) {
3611 case SpvMemorySemanticsUniformMemoryMask:
3612 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
3613 break;
3614 case SpvMemorySemanticsWorkgroupMemoryMask:
3615 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
3616 break;
3617 case SpvMemorySemanticsAtomicCounterMemoryMask:
3618 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
3619 break;
3620 case SpvMemorySemanticsImageMemoryMask:
3621 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
3622 break;
3623 case SpvMemorySemanticsOutputMemoryMask:
3624 if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL)
3625 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
3626 break;
3627 default:
3628 break;;
3629 }
3630 }
3631 }
3632
3633 static void
3634 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
3635 const uint32_t *w, UNUSED unsigned count)
3636 {
3637 switch (opcode) {
3638 case SpvOpEmitVertex:
3639 case SpvOpEmitStreamVertex:
3640 case SpvOpEndPrimitive:
3641 case SpvOpEndStreamPrimitive: {
3642 nir_intrinsic_op intrinsic_op;
3643 switch (opcode) {
3644 case SpvOpEmitVertex:
3645 case SpvOpEmitStreamVertex:
3646 intrinsic_op = nir_intrinsic_emit_vertex;
3647 break;
3648 case SpvOpEndPrimitive:
3649 case SpvOpEndStreamPrimitive:
3650 intrinsic_op = nir_intrinsic_end_primitive;
3651 break;
3652 default:
3653 unreachable("Invalid opcode");
3654 }
3655
3656 nir_intrinsic_instr *intrin =
3657 nir_intrinsic_instr_create(b->shader, intrinsic_op);
3658
3659 switch (opcode) {
3660 case SpvOpEmitStreamVertex:
3661 case SpvOpEndStreamPrimitive: {
3662 unsigned stream = vtn_constant_uint(b, w[1]);
3663 nir_intrinsic_set_stream_id(intrin, stream);
3664 break;
3665 }
3666
3667 default:
3668 break;
3669 }
3670
3671 nir_builder_instr_insert(&b->nb, &intrin->instr);
3672 break;
3673 }
3674
3675 case SpvOpMemoryBarrier: {
3676 SpvScope scope = vtn_constant_uint(b, w[1]);
3677 SpvMemorySemanticsMask semantics = vtn_constant_uint(b, w[2]);
3678 vtn_emit_memory_barrier(b, scope, semantics);
3679 return;
3680 }
3681
3682 case SpvOpControlBarrier: {
3683 SpvScope execution_scope = vtn_constant_uint(b, w[1]);
3684 SpvScope memory_scope = vtn_constant_uint(b, w[2]);
3685 SpvMemorySemanticsMask memory_semantics = vtn_constant_uint(b, w[3]);
3686
3687 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3688 * memory semantics of None for GLSL barrier().
3689 */
3690 if (b->wa_glslang_cs_barrier &&
3691 b->nb.shader->info.stage == MESA_SHADER_COMPUTE &&
3692 execution_scope == SpvScopeWorkgroup &&
3693 memory_semantics == SpvMemorySemanticsMaskNone) {
3694 memory_scope = SpvScopeWorkgroup;
3695 memory_semantics = SpvMemorySemanticsAcquireReleaseMask |
3696 SpvMemorySemanticsWorkgroupMemoryMask;
3697 }
3698
3699 /* From the SPIR-V spec:
3700 *
3701 * "When used with the TessellationControl execution model, it also
3702 * implicitly synchronizes the Output Storage Class: Writes to Output
3703 * variables performed by any invocation executed prior to a
3704 * OpControlBarrier will be visible to any other invocation after
3705 * return from that OpControlBarrier."
3706 */
3707 if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL) {
3708 memory_semantics &= ~(SpvMemorySemanticsAcquireMask |
3709 SpvMemorySemanticsReleaseMask |
3710 SpvMemorySemanticsAcquireReleaseMask |
3711 SpvMemorySemanticsSequentiallyConsistentMask);
3712 memory_semantics |= SpvMemorySemanticsAcquireReleaseMask |
3713 SpvMemorySemanticsOutputMemoryMask;
3714 }
3715
3716 vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
3717
3718 if (execution_scope == SpvScopeWorkgroup)
3719 vtn_emit_barrier(b, nir_intrinsic_control_barrier);
3720 break;
3721 }
3722
3723 default:
3724 unreachable("unknown barrier instruction");
3725 }
3726 }
3727
3728 static unsigned
3729 gl_primitive_from_spv_execution_mode(struct vtn_builder *b,
3730 SpvExecutionMode mode)
3731 {
3732 switch (mode) {
3733 case SpvExecutionModeInputPoints:
3734 case SpvExecutionModeOutputPoints:
3735 return 0; /* GL_POINTS */
3736 case SpvExecutionModeInputLines:
3737 return 1; /* GL_LINES */
3738 case SpvExecutionModeInputLinesAdjacency:
3739 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3740 case SpvExecutionModeTriangles:
3741 return 4; /* GL_TRIANGLES */
3742 case SpvExecutionModeInputTrianglesAdjacency:
3743 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3744 case SpvExecutionModeQuads:
3745 return 7; /* GL_QUADS */
3746 case SpvExecutionModeIsolines:
3747 return 0x8E7A; /* GL_ISOLINES */
3748 case SpvExecutionModeOutputLineStrip:
3749 return 3; /* GL_LINE_STRIP */
3750 case SpvExecutionModeOutputTriangleStrip:
3751 return 5; /* GL_TRIANGLE_STRIP */
3752 default:
3753 vtn_fail("Invalid primitive type: %s (%u)",
3754 spirv_executionmode_to_string(mode), mode);
3755 }
3756 }
3757
3758 static unsigned
3759 vertices_in_from_spv_execution_mode(struct vtn_builder *b,
3760 SpvExecutionMode mode)
3761 {
3762 switch (mode) {
3763 case SpvExecutionModeInputPoints:
3764 return 1;
3765 case SpvExecutionModeInputLines:
3766 return 2;
3767 case SpvExecutionModeInputLinesAdjacency:
3768 return 4;
3769 case SpvExecutionModeTriangles:
3770 return 3;
3771 case SpvExecutionModeInputTrianglesAdjacency:
3772 return 6;
3773 default:
3774 vtn_fail("Invalid GS input mode: %s (%u)",
3775 spirv_executionmode_to_string(mode), mode);
3776 }
3777 }
3778
3779 static gl_shader_stage
3780 stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model)
3781 {
3782 switch (model) {
3783 case SpvExecutionModelVertex:
3784 return MESA_SHADER_VERTEX;
3785 case SpvExecutionModelTessellationControl:
3786 return MESA_SHADER_TESS_CTRL;
3787 case SpvExecutionModelTessellationEvaluation:
3788 return MESA_SHADER_TESS_EVAL;
3789 case SpvExecutionModelGeometry:
3790 return MESA_SHADER_GEOMETRY;
3791 case SpvExecutionModelFragment:
3792 return MESA_SHADER_FRAGMENT;
3793 case SpvExecutionModelGLCompute:
3794 return MESA_SHADER_COMPUTE;
3795 case SpvExecutionModelKernel:
3796 return MESA_SHADER_KERNEL;
3797 default:
3798 vtn_fail("Unsupported execution model: %s (%u)",
3799 spirv_executionmodel_to_string(model), model);
3800 }
3801 }
3802
3803 #define spv_check_supported(name, cap) do { \
3804 if (!(b->options && b->options->caps.name)) \
3805 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
3806 spirv_capability_to_string(cap), cap); \
3807 } while(0)
3808
3809
3810 void
3811 vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
3812 unsigned count)
3813 {
3814 struct vtn_value *entry_point = &b->values[w[2]];
3815 /* Let this be a name label regardless */
3816 unsigned name_words;
3817 entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
3818
3819 if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
3820 stage_for_execution_model(b, w[1]) != b->entry_point_stage)
3821 return;
3822
3823 vtn_assert(b->entry_point == NULL);
3824 b->entry_point = entry_point;
3825 }
3826
3827 static bool
3828 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
3829 const uint32_t *w, unsigned count)
3830 {
3831 switch (opcode) {
3832 case SpvOpSource: {
3833 const char *lang;
3834 switch (w[1]) {
3835 default:
3836 case SpvSourceLanguageUnknown: lang = "unknown"; break;
3837 case SpvSourceLanguageESSL: lang = "ESSL"; break;
3838 case SpvSourceLanguageGLSL: lang = "GLSL"; break;
3839 case SpvSourceLanguageOpenCL_C: lang = "OpenCL C"; break;
3840 case SpvSourceLanguageOpenCL_CPP: lang = "OpenCL C++"; break;
3841 case SpvSourceLanguageHLSL: lang = "HLSL"; break;
3842 }
3843
3844 uint32_t version = w[2];
3845
3846 const char *file =
3847 (count > 3) ? vtn_value(b, w[3], vtn_value_type_string)->str : "";
3848
3849 vtn_info("Parsing SPIR-V from %s %u source file %s", lang, version, file);
3850 break;
3851 }
3852
3853 case SpvOpSourceExtension:
3854 case SpvOpSourceContinued:
3855 case SpvOpExtension:
3856 case SpvOpModuleProcessed:
3857 /* Unhandled, but these are for debug so that's ok. */
3858 break;
3859
3860 case SpvOpCapability: {
3861 SpvCapability cap = w[1];
3862 switch (cap) {
3863 case SpvCapabilityMatrix:
3864 case SpvCapabilityShader:
3865 case SpvCapabilityGeometry:
3866 case SpvCapabilityGeometryPointSize:
3867 case SpvCapabilityUniformBufferArrayDynamicIndexing:
3868 case SpvCapabilitySampledImageArrayDynamicIndexing:
3869 case SpvCapabilityStorageBufferArrayDynamicIndexing:
3870 case SpvCapabilityStorageImageArrayDynamicIndexing:
3871 case SpvCapabilityImageRect:
3872 case SpvCapabilitySampledRect:
3873 case SpvCapabilitySampled1D:
3874 case SpvCapabilityImage1D:
3875 case SpvCapabilitySampledCubeArray:
3876 case SpvCapabilityImageCubeArray:
3877 case SpvCapabilitySampledBuffer:
3878 case SpvCapabilityImageBuffer:
3879 case SpvCapabilityImageQuery:
3880 case SpvCapabilityDerivativeControl:
3881 case SpvCapabilityInterpolationFunction:
3882 case SpvCapabilityMultiViewport:
3883 case SpvCapabilitySampleRateShading:
3884 case SpvCapabilityClipDistance:
3885 case SpvCapabilityCullDistance:
3886 case SpvCapabilityInputAttachment:
3887 case SpvCapabilityImageGatherExtended:
3888 case SpvCapabilityStorageImageExtendedFormats:
3889 case SpvCapabilityVector16:
3890 break;
3891
3892 case SpvCapabilityLinkage:
3893 case SpvCapabilityFloat16Buffer:
3894 case SpvCapabilitySparseResidency:
3895 vtn_warn("Unsupported SPIR-V capability: %s",
3896 spirv_capability_to_string(cap));
3897 break;
3898
3899 case SpvCapabilityMinLod:
3900 spv_check_supported(min_lod, cap);
3901 break;
3902
3903 case SpvCapabilityAtomicStorage:
3904 spv_check_supported(atomic_storage, cap);
3905 break;
3906
3907 case SpvCapabilityFloat64:
3908 spv_check_supported(float64, cap);
3909 break;
3910 case SpvCapabilityInt64:
3911 spv_check_supported(int64, cap);
3912 break;
3913 case SpvCapabilityInt16:
3914 spv_check_supported(int16, cap);
3915 break;
3916 case SpvCapabilityInt8:
3917 spv_check_supported(int8, cap);
3918 break;
3919
3920 case SpvCapabilityTransformFeedback:
3921 spv_check_supported(transform_feedback, cap);
3922 break;
3923
3924 case SpvCapabilityGeometryStreams:
3925 spv_check_supported(geometry_streams, cap);
3926 break;
3927
3928 case SpvCapabilityInt64Atomics:
3929 spv_check_supported(int64_atomics, cap);
3930 break;
3931
3932 case SpvCapabilityStorageImageMultisample:
3933 spv_check_supported(storage_image_ms, cap);
3934 break;
3935
3936 case SpvCapabilityAddresses:
3937 spv_check_supported(address, cap);
3938 break;
3939
3940 case SpvCapabilityKernel:
3941 spv_check_supported(kernel, cap);
3942 break;
3943
3944 case SpvCapabilityImageBasic:
3945 case SpvCapabilityImageReadWrite:
3946 case SpvCapabilityImageMipmap:
3947 case SpvCapabilityPipes:
3948 case SpvCapabilityDeviceEnqueue:
3949 case SpvCapabilityLiteralSampler:
3950 case SpvCapabilityGenericPointer:
3951 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3952 spirv_capability_to_string(cap));
3953 break;
3954
3955 case SpvCapabilityImageMSArray:
3956 spv_check_supported(image_ms_array, cap);
3957 break;
3958
3959 case SpvCapabilityTessellation:
3960 case SpvCapabilityTessellationPointSize:
3961 spv_check_supported(tessellation, cap);
3962 break;
3963
3964 case SpvCapabilityDrawParameters:
3965 spv_check_supported(draw_parameters, cap);
3966 break;
3967
3968 case SpvCapabilityStorageImageReadWithoutFormat:
3969 spv_check_supported(image_read_without_format, cap);
3970 break;
3971
3972 case SpvCapabilityStorageImageWriteWithoutFormat:
3973 spv_check_supported(image_write_without_format, cap);
3974 break;
3975
3976 case SpvCapabilityDeviceGroup:
3977 spv_check_supported(device_group, cap);
3978 break;
3979
3980 case SpvCapabilityMultiView:
3981 spv_check_supported(multiview, cap);
3982 break;
3983
3984 case SpvCapabilityGroupNonUniform:
3985 spv_check_supported(subgroup_basic, cap);
3986 break;
3987
3988 case SpvCapabilitySubgroupVoteKHR:
3989 case SpvCapabilityGroupNonUniformVote:
3990 spv_check_supported(subgroup_vote, cap);
3991 break;
3992
3993 case SpvCapabilitySubgroupBallotKHR:
3994 case SpvCapabilityGroupNonUniformBallot:
3995 spv_check_supported(subgroup_ballot, cap);
3996 break;
3997
3998 case SpvCapabilityGroupNonUniformShuffle:
3999 case SpvCapabilityGroupNonUniformShuffleRelative:
4000 spv_check_supported(subgroup_shuffle, cap);
4001 break;
4002
4003 case SpvCapabilityGroupNonUniformQuad:
4004 spv_check_supported(subgroup_quad, cap);
4005 break;
4006
4007 case SpvCapabilityGroupNonUniformArithmetic:
4008 case SpvCapabilityGroupNonUniformClustered:
4009 spv_check_supported(subgroup_arithmetic, cap);
4010 break;
4011
4012 case SpvCapabilityGroups:
4013 spv_check_supported(amd_shader_ballot, cap);
4014 break;
4015
4016 case SpvCapabilityVariablePointersStorageBuffer:
4017 case SpvCapabilityVariablePointers:
4018 spv_check_supported(variable_pointers, cap);
4019 b->variable_pointers = true;
4020 break;
4021
4022 case SpvCapabilityStorageUniformBufferBlock16:
4023 case SpvCapabilityStorageUniform16:
4024 case SpvCapabilityStoragePushConstant16:
4025 case SpvCapabilityStorageInputOutput16:
4026 spv_check_supported(storage_16bit, cap);
4027 break;
4028
4029 case SpvCapabilityShaderLayer:
4030 case SpvCapabilityShaderViewportIndex:
4031 case SpvCapabilityShaderViewportIndexLayerEXT:
4032 spv_check_supported(shader_viewport_index_layer, cap);
4033 break;
4034
4035 case SpvCapabilityStorageBuffer8BitAccess:
4036 case SpvCapabilityUniformAndStorageBuffer8BitAccess:
4037 case SpvCapabilityStoragePushConstant8:
4038 spv_check_supported(storage_8bit, cap);
4039 break;
4040
4041 case SpvCapabilityShaderNonUniformEXT:
4042 spv_check_supported(descriptor_indexing, cap);
4043 break;
4044
4045 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
4046 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
4047 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
4048 spv_check_supported(descriptor_array_dynamic_indexing, cap);
4049 break;
4050
4051 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT:
4052 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT:
4053 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT:
4054 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT:
4055 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT:
4056 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT:
4057 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT:
4058 spv_check_supported(descriptor_array_non_uniform_indexing, cap);
4059 break;
4060
4061 case SpvCapabilityRuntimeDescriptorArrayEXT:
4062 spv_check_supported(runtime_descriptor_array, cap);
4063 break;
4064
4065 case SpvCapabilityStencilExportEXT:
4066 spv_check_supported(stencil_export, cap);
4067 break;
4068
4069 case SpvCapabilitySampleMaskPostDepthCoverage:
4070 spv_check_supported(post_depth_coverage, cap);
4071 break;
4072
4073 case SpvCapabilityDenormFlushToZero:
4074 case SpvCapabilityDenormPreserve:
4075 case SpvCapabilitySignedZeroInfNanPreserve:
4076 case SpvCapabilityRoundingModeRTE:
4077 case SpvCapabilityRoundingModeRTZ:
4078 spv_check_supported(float_controls, cap);
4079 break;
4080
4081 case SpvCapabilityPhysicalStorageBufferAddresses:
4082 spv_check_supported(physical_storage_buffer_address, cap);
4083 break;
4084
4085 case SpvCapabilityComputeDerivativeGroupQuadsNV:
4086 case SpvCapabilityComputeDerivativeGroupLinearNV:
4087 spv_check_supported(derivative_group, cap);
4088 break;
4089
4090 case SpvCapabilityFloat16:
4091 spv_check_supported(float16, cap);
4092 break;
4093
4094 case SpvCapabilityFragmentShaderSampleInterlockEXT:
4095 spv_check_supported(fragment_shader_sample_interlock, cap);
4096 break;
4097
4098 case SpvCapabilityFragmentShaderPixelInterlockEXT:
4099 spv_check_supported(fragment_shader_pixel_interlock, cap);
4100 break;
4101
4102 case SpvCapabilityDemoteToHelperInvocationEXT:
4103 spv_check_supported(demote_to_helper_invocation, cap);
4104 break;
4105
4106 case SpvCapabilityShaderClockKHR:
4107 spv_check_supported(shader_clock, cap);
4108 break;
4109
4110 case SpvCapabilityVulkanMemoryModel:
4111 spv_check_supported(vk_memory_model, cap);
4112 break;
4113
4114 case SpvCapabilityVulkanMemoryModelDeviceScope:
4115 spv_check_supported(vk_memory_model_device_scope, cap);
4116 break;
4117
4118 case SpvCapabilityImageReadWriteLodAMD:
4119 spv_check_supported(amd_image_read_write_lod, cap);
4120 break;
4121
4122 default:
4123 vtn_fail("Unhandled capability: %s (%u)",
4124 spirv_capability_to_string(cap), cap);
4125 }
4126 break;
4127 }
4128
4129 case SpvOpExtInstImport:
4130 vtn_handle_extension(b, opcode, w, count);
4131 break;
4132
4133 case SpvOpMemoryModel:
4134 switch (w[1]) {
4135 case SpvAddressingModelPhysical32:
4136 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
4137 "AddressingModelPhysical32 only supported for kernels");
4138 b->shader->info.cs.ptr_size = 32;
4139 b->physical_ptrs = true;
4140 b->options->shared_addr_format = nir_address_format_32bit_global;
4141 b->options->global_addr_format = nir_address_format_32bit_global;
4142 b->options->temp_addr_format = nir_address_format_32bit_global;
4143 break;
4144 case SpvAddressingModelPhysical64:
4145 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
4146 "AddressingModelPhysical64 only supported for kernels");
4147 b->shader->info.cs.ptr_size = 64;
4148 b->physical_ptrs = true;
4149 b->options->shared_addr_format = nir_address_format_64bit_global;
4150 b->options->global_addr_format = nir_address_format_64bit_global;
4151 b->options->temp_addr_format = nir_address_format_64bit_global;
4152 break;
4153 case SpvAddressingModelLogical:
4154 vtn_fail_if(b->shader->info.stage >= MESA_SHADER_STAGES,
4155 "AddressingModelLogical only supported for shaders");
4156 b->physical_ptrs = false;
4157 break;
4158 case SpvAddressingModelPhysicalStorageBuffer64:
4159 vtn_fail_if(!b->options ||
4160 !b->options->caps.physical_storage_buffer_address,
4161 "AddressingModelPhysicalStorageBuffer64 not supported");
4162 break;
4163 default:
4164 vtn_fail("Unknown addressing model: %s (%u)",
4165 spirv_addressingmodel_to_string(w[1]), w[1]);
4166 break;
4167 }
4168
4169 switch (w[2]) {
4170 case SpvMemoryModelSimple:
4171 case SpvMemoryModelGLSL450:
4172 case SpvMemoryModelOpenCL:
4173 break;
4174 case SpvMemoryModelVulkan:
4175 vtn_fail_if(!b->options->caps.vk_memory_model,
4176 "Vulkan memory model is unsupported by this driver");
4177 break;
4178 default:
4179 vtn_fail("Unsupported memory model: %s",
4180 spirv_memorymodel_to_string(w[2]));
4181 break;
4182 }
4183 break;
4184
4185 case SpvOpEntryPoint:
4186 vtn_handle_entry_point(b, w, count);
4187 break;
4188
4189 case SpvOpString:
4190 vtn_push_value(b, w[1], vtn_value_type_string)->str =
4191 vtn_string_literal(b, &w[2], count - 2, NULL);
4192 break;
4193
4194 case SpvOpName:
4195 b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
4196 break;
4197
4198 case SpvOpMemberName:
4199 /* TODO */
4200 break;
4201
4202 case SpvOpExecutionMode:
4203 case SpvOpExecutionModeId:
4204 case SpvOpDecorationGroup:
4205 case SpvOpDecorate:
4206 case SpvOpDecorateId:
4207 case SpvOpMemberDecorate:
4208 case SpvOpGroupDecorate:
4209 case SpvOpGroupMemberDecorate:
4210 case SpvOpDecorateString:
4211 case SpvOpMemberDecorateString:
4212 vtn_handle_decoration(b, opcode, w, count);
4213 break;
4214
4215 case SpvOpExtInst: {
4216 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
4217 if (val->ext_handler == vtn_handle_non_semantic_instruction) {
4218 /* NonSemantic extended instructions are acceptable in preamble. */
4219 vtn_handle_non_semantic_instruction(b, w[4], w, count);
4220 return true;
4221 } else {
4222 return false; /* End of preamble. */
4223 }
4224 }
4225
4226 default:
4227 return false; /* End of preamble */
4228 }
4229
4230 return true;
4231 }
4232
4233 static void
4234 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
4235 const struct vtn_decoration *mode, UNUSED void *data)
4236 {
4237 vtn_assert(b->entry_point == entry_point);
4238
4239 switch(mode->exec_mode) {
4240 case SpvExecutionModeOriginUpperLeft:
4241 case SpvExecutionModeOriginLowerLeft:
4242 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4243 b->shader->info.fs.origin_upper_left =
4244 (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
4245 break;
4246
4247 case SpvExecutionModeEarlyFragmentTests:
4248 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4249 b->shader->info.fs.early_fragment_tests = true;
4250 break;
4251
4252 case SpvExecutionModePostDepthCoverage:
4253 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4254 b->shader->info.fs.post_depth_coverage = true;
4255 break;
4256
4257 case SpvExecutionModeInvocations:
4258 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4259 b->shader->info.gs.invocations = MAX2(1, mode->operands[0]);
4260 break;
4261
4262 case SpvExecutionModeDepthReplacing:
4263 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4264 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
4265 break;
4266 case SpvExecutionModeDepthGreater:
4267 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4268 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
4269 break;
4270 case SpvExecutionModeDepthLess:
4271 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4272 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
4273 break;
4274 case SpvExecutionModeDepthUnchanged:
4275 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4276 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
4277 break;
4278
4279 case SpvExecutionModeLocalSize:
4280 vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
4281 b->shader->info.cs.local_size[0] = mode->operands[0];
4282 b->shader->info.cs.local_size[1] = mode->operands[1];
4283 b->shader->info.cs.local_size[2] = mode->operands[2];
4284 break;
4285
4286 case SpvExecutionModeLocalSizeId:
4287 b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
4288 b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
4289 b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
4290 break;
4291
4292 case SpvExecutionModeLocalSizeHint:
4293 case SpvExecutionModeLocalSizeHintId:
4294 break; /* Nothing to do with this */
4295
4296 case SpvExecutionModeOutputVertices:
4297 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4298 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4299 b->shader->info.tess.tcs_vertices_out = mode->operands[0];
4300 } else {
4301 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4302 b->shader->info.gs.vertices_out = mode->operands[0];
4303 }
4304 break;
4305
4306 case SpvExecutionModeInputPoints:
4307 case SpvExecutionModeInputLines:
4308 case SpvExecutionModeInputLinesAdjacency:
4309 case SpvExecutionModeTriangles:
4310 case SpvExecutionModeInputTrianglesAdjacency:
4311 case SpvExecutionModeQuads:
4312 case SpvExecutionModeIsolines:
4313 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4314 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4315 b->shader->info.tess.primitive_mode =
4316 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4317 } else {
4318 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4319 b->shader->info.gs.vertices_in =
4320 vertices_in_from_spv_execution_mode(b, mode->exec_mode);
4321 b->shader->info.gs.input_primitive =
4322 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4323 }
4324 break;
4325
4326 case SpvExecutionModeOutputPoints:
4327 case SpvExecutionModeOutputLineStrip:
4328 case SpvExecutionModeOutputTriangleStrip:
4329 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4330 b->shader->info.gs.output_primitive =
4331 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4332 break;
4333
4334 case SpvExecutionModeSpacingEqual:
4335 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4336 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4337 b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
4338 break;
4339 case SpvExecutionModeSpacingFractionalEven:
4340 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4341 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4342 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
4343 break;
4344 case SpvExecutionModeSpacingFractionalOdd:
4345 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4346 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4347 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
4348 break;
4349 case SpvExecutionModeVertexOrderCw:
4350 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4351 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4352 b->shader->info.tess.ccw = false;
4353 break;
4354 case SpvExecutionModeVertexOrderCcw:
4355 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4356 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4357 b->shader->info.tess.ccw = true;
4358 break;
4359 case SpvExecutionModePointMode:
4360 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4361 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4362 b->shader->info.tess.point_mode = true;
4363 break;
4364
4365 case SpvExecutionModePixelCenterInteger:
4366 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4367 b->shader->info.fs.pixel_center_integer = true;
4368 break;
4369
4370 case SpvExecutionModeXfb:
4371 b->shader->info.has_transform_feedback_varyings = true;
4372 break;
4373
4374 case SpvExecutionModeVecTypeHint:
4375 break; /* OpenCL */
4376
4377 case SpvExecutionModeContractionOff:
4378 if (b->shader->info.stage != MESA_SHADER_KERNEL)
4379 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4380 spirv_executionmode_to_string(mode->exec_mode));
4381 else
4382 b->exact = true;
4383 break;
4384
4385 case SpvExecutionModeStencilRefReplacingEXT:
4386 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4387 break;
4388
4389 case SpvExecutionModeDerivativeGroupQuadsNV:
4390 vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
4391 b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_QUADS;
4392 break;
4393
4394 case SpvExecutionModeDerivativeGroupLinearNV:
4395 vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
4396 b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_LINEAR;
4397 break;
4398
4399 case SpvExecutionModePixelInterlockOrderedEXT:
4400 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4401 b->shader->info.fs.pixel_interlock_ordered = true;
4402 break;
4403
4404 case SpvExecutionModePixelInterlockUnorderedEXT:
4405 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4406 b->shader->info.fs.pixel_interlock_unordered = true;
4407 break;
4408
4409 case SpvExecutionModeSampleInterlockOrderedEXT:
4410 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4411 b->shader->info.fs.sample_interlock_ordered = true;
4412 break;
4413
4414 case SpvExecutionModeSampleInterlockUnorderedEXT:
4415 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4416 b->shader->info.fs.sample_interlock_unordered = true;
4417 break;
4418
4419 case SpvExecutionModeDenormPreserve:
4420 case SpvExecutionModeDenormFlushToZero:
4421 case SpvExecutionModeSignedZeroInfNanPreserve:
4422 case SpvExecutionModeRoundingModeRTE:
4423 case SpvExecutionModeRoundingModeRTZ:
4424 /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
4425 break;
4426
4427 default:
4428 vtn_fail("Unhandled execution mode: %s (%u)",
4429 spirv_executionmode_to_string(mode->exec_mode),
4430 mode->exec_mode);
4431 }
4432 }
4433
4434 static void
4435 vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
4436 const struct vtn_decoration *mode, void *data)
4437 {
4438 vtn_assert(b->entry_point == entry_point);
4439
4440 unsigned execution_mode = 0;
4441
4442 switch(mode->exec_mode) {
4443 case SpvExecutionModeDenormPreserve:
4444 switch (mode->operands[0]) {
4445 case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break;
4446 case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break;
4447 case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break;
4448 default: vtn_fail("Floating point type not supported");
4449 }
4450 break;
4451 case SpvExecutionModeDenormFlushToZero:
4452 switch (mode->operands[0]) {
4453 case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break;
4454 case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break;
4455 case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break;
4456 default: vtn_fail("Floating point type not supported");
4457 }
4458 break;
4459 case SpvExecutionModeSignedZeroInfNanPreserve:
4460 switch (mode->operands[0]) {
4461 case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break;
4462 case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break;
4463 case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break;
4464 default: vtn_fail("Floating point type not supported");
4465 }
4466 break;
4467 case SpvExecutionModeRoundingModeRTE:
4468 switch (mode->operands[0]) {
4469 case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break;
4470 case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break;
4471 case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break;
4472 default: vtn_fail("Floating point type not supported");
4473 }
4474 break;
4475 case SpvExecutionModeRoundingModeRTZ:
4476 switch (mode->operands[0]) {
4477 case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break;
4478 case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break;
4479 case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break;
4480 default: vtn_fail("Floating point type not supported");
4481 }
4482 break;
4483
4484 default:
4485 break;
4486 }
4487
4488 b->shader->info.float_controls_execution_mode |= execution_mode;
4489 }
4490
4491 static bool
4492 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
4493 const uint32_t *w, unsigned count)
4494 {
4495 vtn_set_instruction_result_type(b, opcode, w, count);
4496
4497 switch (opcode) {
4498 case SpvOpSource:
4499 case SpvOpSourceContinued:
4500 case SpvOpSourceExtension:
4501 case SpvOpExtension:
4502 case SpvOpCapability:
4503 case SpvOpExtInstImport:
4504 case SpvOpMemoryModel:
4505 case SpvOpEntryPoint:
4506 case SpvOpExecutionMode:
4507 case SpvOpString:
4508 case SpvOpName:
4509 case SpvOpMemberName:
4510 case SpvOpDecorationGroup:
4511 case SpvOpDecorate:
4512 case SpvOpDecorateId:
4513 case SpvOpMemberDecorate:
4514 case SpvOpGroupDecorate:
4515 case SpvOpGroupMemberDecorate:
4516 case SpvOpDecorateString:
4517 case SpvOpMemberDecorateString:
4518 vtn_fail("Invalid opcode types and variables section");
4519 break;
4520
4521 case SpvOpTypeVoid:
4522 case SpvOpTypeBool:
4523 case SpvOpTypeInt:
4524 case SpvOpTypeFloat:
4525 case SpvOpTypeVector:
4526 case SpvOpTypeMatrix:
4527 case SpvOpTypeImage:
4528 case SpvOpTypeSampler:
4529 case SpvOpTypeSampledImage:
4530 case SpvOpTypeArray:
4531 case SpvOpTypeRuntimeArray:
4532 case SpvOpTypeStruct:
4533 case SpvOpTypeOpaque:
4534 case SpvOpTypePointer:
4535 case SpvOpTypeForwardPointer:
4536 case SpvOpTypeFunction:
4537 case SpvOpTypeEvent:
4538 case SpvOpTypeDeviceEvent:
4539 case SpvOpTypeReserveId:
4540 case SpvOpTypeQueue:
4541 case SpvOpTypePipe:
4542 vtn_handle_type(b, opcode, w, count);
4543 break;
4544
4545 case SpvOpConstantTrue:
4546 case SpvOpConstantFalse:
4547 case SpvOpConstant:
4548 case SpvOpConstantComposite:
4549 case SpvOpConstantSampler:
4550 case SpvOpConstantNull:
4551 case SpvOpSpecConstantTrue:
4552 case SpvOpSpecConstantFalse:
4553 case SpvOpSpecConstant:
4554 case SpvOpSpecConstantComposite:
4555 case SpvOpSpecConstantOp:
4556 vtn_handle_constant(b, opcode, w, count);
4557 break;
4558
4559 case SpvOpUndef:
4560 case SpvOpVariable:
4561 vtn_handle_variables(b, opcode, w, count);
4562 break;
4563
4564 case SpvOpExtInst: {
4565 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
4566 /* NonSemantic extended instructions are acceptable in preamble, others
4567 * will indicate the end of preamble.
4568 */
4569 return val->ext_handler == vtn_handle_non_semantic_instruction;
4570 }
4571
4572 default:
4573 return false; /* End of preamble */
4574 }
4575
4576 return true;
4577 }
4578
4579 static struct vtn_ssa_value *
4580 vtn_nir_select(struct vtn_builder *b, struct vtn_ssa_value *src0,
4581 struct vtn_ssa_value *src1, struct vtn_ssa_value *src2)
4582 {
4583 struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
4584 dest->type = src1->type;
4585
4586 if (glsl_type_is_vector_or_scalar(src1->type)) {
4587 dest->def = nir_bcsel(&b->nb, src0->def, src1->def, src2->def);
4588 } else {
4589 unsigned elems = glsl_get_length(src1->type);
4590
4591 dest->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
4592 for (unsigned i = 0; i < elems; i++) {
4593 dest->elems[i] = vtn_nir_select(b, src0,
4594 src1->elems[i], src2->elems[i]);
4595 }
4596 }
4597
4598 return dest;
4599 }
4600
4601 static void
4602 vtn_handle_select(struct vtn_builder *b, SpvOp opcode,
4603 const uint32_t *w, unsigned count)
4604 {
4605 /* Handle OpSelect up-front here because it needs to be able to handle
4606 * pointers and not just regular vectors and scalars.
4607 */
4608 struct vtn_value *res_val = vtn_untyped_value(b, w[2]);
4609 struct vtn_value *cond_val = vtn_untyped_value(b, w[3]);
4610 struct vtn_value *obj1_val = vtn_untyped_value(b, w[4]);
4611 struct vtn_value *obj2_val = vtn_untyped_value(b, w[5]);
4612
4613 vtn_fail_if(obj1_val->type != res_val->type ||
4614 obj2_val->type != res_val->type,
4615 "Object types must match the result type in OpSelect");
4616
4617 vtn_fail_if((cond_val->type->base_type != vtn_base_type_scalar &&
4618 cond_val->type->base_type != vtn_base_type_vector) ||
4619 !glsl_type_is_boolean(cond_val->type->type),
4620 "OpSelect must have either a vector of booleans or "
4621 "a boolean as Condition type");
4622
4623 vtn_fail_if(cond_val->type->base_type == vtn_base_type_vector &&
4624 (res_val->type->base_type != vtn_base_type_vector ||
4625 res_val->type->length != cond_val->type->length),
4626 "When Condition type in OpSelect is a vector, the Result "
4627 "type must be a vector of the same length");
4628
4629 switch (res_val->type->base_type) {
4630 case vtn_base_type_scalar:
4631 case vtn_base_type_vector:
4632 case vtn_base_type_matrix:
4633 case vtn_base_type_array:
4634 case vtn_base_type_struct:
4635 /* OK. */
4636 break;
4637 case vtn_base_type_pointer:
4638 /* We need to have actual storage for pointer types. */
4639 vtn_fail_if(res_val->type->type == NULL,
4640 "Invalid pointer result type for OpSelect");
4641 break;
4642 default:
4643 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4644 }
4645
4646 struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
4647 struct vtn_ssa_value *ssa = vtn_nir_select(b,
4648 vtn_ssa_value(b, w[3]), vtn_ssa_value(b, w[4]), vtn_ssa_value(b, w[5]));
4649
4650 vtn_push_ssa(b, w[2], res_type, ssa);
4651 }
4652
4653 static void
4654 vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
4655 const uint32_t *w, unsigned count)
4656 {
4657 struct vtn_type *type1 = vtn_untyped_value(b, w[3])->type;
4658 struct vtn_type *type2 = vtn_untyped_value(b, w[4])->type;
4659 vtn_fail_if(type1->base_type != vtn_base_type_pointer ||
4660 type2->base_type != vtn_base_type_pointer,
4661 "%s operands must have pointer types",
4662 spirv_op_to_string(opcode));
4663 vtn_fail_if(type1->storage_class != type2->storage_class,
4664 "%s operands must have the same storage class",
4665 spirv_op_to_string(opcode));
4666
4667 struct vtn_type *vtn_type =
4668 vtn_value(b, w[1], vtn_value_type_type)->type;
4669 const struct glsl_type *type = vtn_type->type;
4670
4671 nir_address_format addr_format = vtn_mode_to_address_format(
4672 b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
4673
4674 nir_ssa_def *def;
4675
4676 switch (opcode) {
4677 case SpvOpPtrDiff: {
4678 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4679 unsigned elem_size, elem_align;
4680 glsl_get_natural_size_align_bytes(type1->deref->type,
4681 &elem_size, &elem_align);
4682
4683 def = nir_build_addr_isub(&b->nb,
4684 vtn_ssa_value(b, w[3])->def,
4685 vtn_ssa_value(b, w[4])->def,
4686 addr_format);
4687 def = nir_idiv(&b->nb, def, nir_imm_intN_t(&b->nb, elem_size, def->bit_size));
4688 def = nir_i2i(&b->nb, def, glsl_get_bit_size(type));
4689 break;
4690 }
4691
4692 case SpvOpPtrEqual:
4693 case SpvOpPtrNotEqual: {
4694 def = nir_build_addr_ieq(&b->nb,
4695 vtn_ssa_value(b, w[3])->def,
4696 vtn_ssa_value(b, w[4])->def,
4697 addr_format);
4698 if (opcode == SpvOpPtrNotEqual)
4699 def = nir_inot(&b->nb, def);
4700 break;
4701 }
4702
4703 default:
4704 unreachable("Invalid ptr operation");
4705 }
4706
4707 struct vtn_ssa_value *ssa_value = vtn_create_ssa_value(b, type);
4708 ssa_value->def = def;
4709 vtn_push_ssa(b, w[2], vtn_type, ssa_value);
4710 }
4711
4712 static bool
4713 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
4714 const uint32_t *w, unsigned count)
4715 {
4716 switch (opcode) {
4717 case SpvOpLabel:
4718 break;
4719
4720 case SpvOpLoopMerge:
4721 case SpvOpSelectionMerge:
4722 /* This is handled by cfg pre-pass and walk_blocks */
4723 break;
4724
4725 case SpvOpUndef: {
4726 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
4727 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
4728 break;
4729 }
4730
4731 case SpvOpExtInst:
4732 vtn_handle_extension(b, opcode, w, count);
4733 break;
4734
4735 case SpvOpVariable:
4736 case SpvOpLoad:
4737 case SpvOpStore:
4738 case SpvOpCopyMemory:
4739 case SpvOpCopyMemorySized:
4740 case SpvOpAccessChain:
4741 case SpvOpPtrAccessChain:
4742 case SpvOpInBoundsAccessChain:
4743 case SpvOpInBoundsPtrAccessChain:
4744 case SpvOpArrayLength:
4745 case SpvOpConvertPtrToU:
4746 case SpvOpConvertUToPtr:
4747 vtn_handle_variables(b, opcode, w, count);
4748 break;
4749
4750 case SpvOpFunctionCall:
4751 vtn_handle_function_call(b, opcode, w, count);
4752 break;
4753
4754 case SpvOpSampledImage:
4755 case SpvOpImage:
4756 case SpvOpImageSampleImplicitLod:
4757 case SpvOpImageSampleExplicitLod:
4758 case SpvOpImageSampleDrefImplicitLod:
4759 case SpvOpImageSampleDrefExplicitLod:
4760 case SpvOpImageSampleProjImplicitLod:
4761 case SpvOpImageSampleProjExplicitLod:
4762 case SpvOpImageSampleProjDrefImplicitLod:
4763 case SpvOpImageSampleProjDrefExplicitLod:
4764 case SpvOpImageFetch:
4765 case SpvOpImageGather:
4766 case SpvOpImageDrefGather:
4767 case SpvOpImageQuerySizeLod:
4768 case SpvOpImageQueryLod:
4769 case SpvOpImageQueryLevels:
4770 case SpvOpImageQuerySamples:
4771 vtn_handle_texture(b, opcode, w, count);
4772 break;
4773
4774 case SpvOpImageRead:
4775 case SpvOpImageWrite:
4776 case SpvOpImageTexelPointer:
4777 vtn_handle_image(b, opcode, w, count);
4778 break;
4779
4780 case SpvOpImageQuerySize: {
4781 struct vtn_pointer *image =
4782 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
4783 if (glsl_type_is_image(image->type->type)) {
4784 vtn_handle_image(b, opcode, w, count);
4785 } else {
4786 vtn_assert(glsl_type_is_sampler(image->type->type));
4787 vtn_handle_texture(b, opcode, w, count);
4788 }
4789 break;
4790 }
4791
4792 case SpvOpAtomicLoad:
4793 case SpvOpAtomicExchange:
4794 case SpvOpAtomicCompareExchange:
4795 case SpvOpAtomicCompareExchangeWeak:
4796 case SpvOpAtomicIIncrement:
4797 case SpvOpAtomicIDecrement:
4798 case SpvOpAtomicIAdd:
4799 case SpvOpAtomicISub:
4800 case SpvOpAtomicSMin:
4801 case SpvOpAtomicUMin:
4802 case SpvOpAtomicSMax:
4803 case SpvOpAtomicUMax:
4804 case SpvOpAtomicAnd:
4805 case SpvOpAtomicOr:
4806 case SpvOpAtomicXor: {
4807 struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
4808 if (pointer->value_type == vtn_value_type_image_pointer) {
4809 vtn_handle_image(b, opcode, w, count);
4810 } else {
4811 vtn_assert(pointer->value_type == vtn_value_type_pointer);
4812 vtn_handle_atomics(b, opcode, w, count);
4813 }
4814 break;
4815 }
4816
4817 case SpvOpAtomicStore: {
4818 struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
4819 if (pointer->value_type == vtn_value_type_image_pointer) {
4820 vtn_handle_image(b, opcode, w, count);
4821 } else {
4822 vtn_assert(pointer->value_type == vtn_value_type_pointer);
4823 vtn_handle_atomics(b, opcode, w, count);
4824 }
4825 break;
4826 }
4827
4828 case SpvOpSelect:
4829 vtn_handle_select(b, opcode, w, count);
4830 break;
4831
4832 case SpvOpSNegate:
4833 case SpvOpFNegate:
4834 case SpvOpNot:
4835 case SpvOpAny:
4836 case SpvOpAll:
4837 case SpvOpConvertFToU:
4838 case SpvOpConvertFToS:
4839 case SpvOpConvertSToF:
4840 case SpvOpConvertUToF:
4841 case SpvOpUConvert:
4842 case SpvOpSConvert:
4843 case SpvOpFConvert:
4844 case SpvOpQuantizeToF16:
4845 case SpvOpPtrCastToGeneric:
4846 case SpvOpGenericCastToPtr:
4847 case SpvOpIsNan:
4848 case SpvOpIsInf:
4849 case SpvOpIsFinite:
4850 case SpvOpIsNormal:
4851 case SpvOpSignBitSet:
4852 case SpvOpLessOrGreater:
4853 case SpvOpOrdered:
4854 case SpvOpUnordered:
4855 case SpvOpIAdd:
4856 case SpvOpFAdd:
4857 case SpvOpISub:
4858 case SpvOpFSub:
4859 case SpvOpIMul:
4860 case SpvOpFMul:
4861 case SpvOpUDiv:
4862 case SpvOpSDiv:
4863 case SpvOpFDiv:
4864 case SpvOpUMod:
4865 case SpvOpSRem:
4866 case SpvOpSMod:
4867 case SpvOpFRem:
4868 case SpvOpFMod:
4869 case SpvOpVectorTimesScalar:
4870 case SpvOpDot:
4871 case SpvOpIAddCarry:
4872 case SpvOpISubBorrow:
4873 case SpvOpUMulExtended:
4874 case SpvOpSMulExtended:
4875 case SpvOpShiftRightLogical:
4876 case SpvOpShiftRightArithmetic:
4877 case SpvOpShiftLeftLogical:
4878 case SpvOpLogicalEqual:
4879 case SpvOpLogicalNotEqual:
4880 case SpvOpLogicalOr:
4881 case SpvOpLogicalAnd:
4882 case SpvOpLogicalNot:
4883 case SpvOpBitwiseOr:
4884 case SpvOpBitwiseXor:
4885 case SpvOpBitwiseAnd:
4886 case SpvOpIEqual:
4887 case SpvOpFOrdEqual:
4888 case SpvOpFUnordEqual:
4889 case SpvOpINotEqual:
4890 case SpvOpFOrdNotEqual:
4891 case SpvOpFUnordNotEqual:
4892 case SpvOpULessThan:
4893 case SpvOpSLessThan:
4894 case SpvOpFOrdLessThan:
4895 case SpvOpFUnordLessThan:
4896 case SpvOpUGreaterThan:
4897 case SpvOpSGreaterThan:
4898 case SpvOpFOrdGreaterThan:
4899 case SpvOpFUnordGreaterThan:
4900 case SpvOpULessThanEqual:
4901 case SpvOpSLessThanEqual:
4902 case SpvOpFOrdLessThanEqual:
4903 case SpvOpFUnordLessThanEqual:
4904 case SpvOpUGreaterThanEqual:
4905 case SpvOpSGreaterThanEqual:
4906 case SpvOpFOrdGreaterThanEqual:
4907 case SpvOpFUnordGreaterThanEqual:
4908 case SpvOpDPdx:
4909 case SpvOpDPdy:
4910 case SpvOpFwidth:
4911 case SpvOpDPdxFine:
4912 case SpvOpDPdyFine:
4913 case SpvOpFwidthFine:
4914 case SpvOpDPdxCoarse:
4915 case SpvOpDPdyCoarse:
4916 case SpvOpFwidthCoarse:
4917 case SpvOpBitFieldInsert:
4918 case SpvOpBitFieldSExtract:
4919 case SpvOpBitFieldUExtract:
4920 case SpvOpBitReverse:
4921 case SpvOpBitCount:
4922 case SpvOpTranspose:
4923 case SpvOpOuterProduct:
4924 case SpvOpMatrixTimesScalar:
4925 case SpvOpVectorTimesMatrix:
4926 case SpvOpMatrixTimesVector:
4927 case SpvOpMatrixTimesMatrix:
4928 case SpvOpUCountLeadingZerosINTEL:
4929 case SpvOpUCountTrailingZerosINTEL:
4930 case SpvOpAbsISubINTEL:
4931 case SpvOpAbsUSubINTEL:
4932 case SpvOpIAddSatINTEL:
4933 case SpvOpUAddSatINTEL:
4934 case SpvOpIAverageINTEL:
4935 case SpvOpUAverageINTEL:
4936 case SpvOpIAverageRoundedINTEL:
4937 case SpvOpUAverageRoundedINTEL:
4938 case SpvOpISubSatINTEL:
4939 case SpvOpUSubSatINTEL:
4940 case SpvOpIMul32x16INTEL:
4941 case SpvOpUMul32x16INTEL:
4942 vtn_handle_alu(b, opcode, w, count);
4943 break;
4944
4945 case SpvOpBitcast:
4946 vtn_handle_bitcast(b, w, count);
4947 break;
4948
4949 case SpvOpVectorExtractDynamic:
4950 case SpvOpVectorInsertDynamic:
4951 case SpvOpVectorShuffle:
4952 case SpvOpCompositeConstruct:
4953 case SpvOpCompositeExtract:
4954 case SpvOpCompositeInsert:
4955 case SpvOpCopyLogical:
4956 case SpvOpCopyObject:
4957 vtn_handle_composite(b, opcode, w, count);
4958 break;
4959
4960 case SpvOpEmitVertex:
4961 case SpvOpEndPrimitive:
4962 case SpvOpEmitStreamVertex:
4963 case SpvOpEndStreamPrimitive:
4964 case SpvOpControlBarrier:
4965 case SpvOpMemoryBarrier:
4966 vtn_handle_barrier(b, opcode, w, count);
4967 break;
4968
4969 case SpvOpGroupNonUniformElect:
4970 case SpvOpGroupNonUniformAll:
4971 case SpvOpGroupNonUniformAny:
4972 case SpvOpGroupNonUniformAllEqual:
4973 case SpvOpGroupNonUniformBroadcast:
4974 case SpvOpGroupNonUniformBroadcastFirst:
4975 case SpvOpGroupNonUniformBallot:
4976 case SpvOpGroupNonUniformInverseBallot:
4977 case SpvOpGroupNonUniformBallotBitExtract:
4978 case SpvOpGroupNonUniformBallotBitCount:
4979 case SpvOpGroupNonUniformBallotFindLSB:
4980 case SpvOpGroupNonUniformBallotFindMSB:
4981 case SpvOpGroupNonUniformShuffle:
4982 case SpvOpGroupNonUniformShuffleXor:
4983 case SpvOpGroupNonUniformShuffleUp:
4984 case SpvOpGroupNonUniformShuffleDown:
4985 case SpvOpGroupNonUniformIAdd:
4986 case SpvOpGroupNonUniformFAdd:
4987 case SpvOpGroupNonUniformIMul:
4988 case SpvOpGroupNonUniformFMul:
4989 case SpvOpGroupNonUniformSMin:
4990 case SpvOpGroupNonUniformUMin:
4991 case SpvOpGroupNonUniformFMin:
4992 case SpvOpGroupNonUniformSMax:
4993 case SpvOpGroupNonUniformUMax:
4994 case SpvOpGroupNonUniformFMax:
4995 case SpvOpGroupNonUniformBitwiseAnd:
4996 case SpvOpGroupNonUniformBitwiseOr:
4997 case SpvOpGroupNonUniformBitwiseXor:
4998 case SpvOpGroupNonUniformLogicalAnd:
4999 case SpvOpGroupNonUniformLogicalOr:
5000 case SpvOpGroupNonUniformLogicalXor:
5001 case SpvOpGroupNonUniformQuadBroadcast:
5002 case SpvOpGroupNonUniformQuadSwap:
5003 case SpvOpGroupAll:
5004 case SpvOpGroupAny:
5005 case SpvOpGroupBroadcast:
5006 case SpvOpGroupIAdd:
5007 case SpvOpGroupFAdd:
5008 case SpvOpGroupFMin:
5009 case SpvOpGroupUMin:
5010 case SpvOpGroupSMin:
5011 case SpvOpGroupFMax:
5012 case SpvOpGroupUMax:
5013 case SpvOpGroupSMax:
5014 case SpvOpSubgroupBallotKHR:
5015 case SpvOpSubgroupFirstInvocationKHR:
5016 case SpvOpSubgroupReadInvocationKHR:
5017 case SpvOpSubgroupAllKHR:
5018 case SpvOpSubgroupAnyKHR:
5019 case SpvOpSubgroupAllEqualKHR:
5020 case SpvOpGroupIAddNonUniformAMD:
5021 case SpvOpGroupFAddNonUniformAMD:
5022 case SpvOpGroupFMinNonUniformAMD:
5023 case SpvOpGroupUMinNonUniformAMD:
5024 case SpvOpGroupSMinNonUniformAMD:
5025 case SpvOpGroupFMaxNonUniformAMD:
5026 case SpvOpGroupUMaxNonUniformAMD:
5027 case SpvOpGroupSMaxNonUniformAMD:
5028 vtn_handle_subgroup(b, opcode, w, count);
5029 break;
5030
5031 case SpvOpPtrDiff:
5032 case SpvOpPtrEqual:
5033 case SpvOpPtrNotEqual:
5034 vtn_handle_ptr(b, opcode, w, count);
5035 break;
5036
5037 case SpvOpBeginInvocationInterlockEXT:
5038 vtn_emit_barrier(b, nir_intrinsic_begin_invocation_interlock);
5039 break;
5040
5041 case SpvOpEndInvocationInterlockEXT:
5042 vtn_emit_barrier(b, nir_intrinsic_end_invocation_interlock);
5043 break;
5044
5045 case SpvOpDemoteToHelperInvocationEXT: {
5046 nir_intrinsic_instr *intrin =
5047 nir_intrinsic_instr_create(b->shader, nir_intrinsic_demote);
5048 nir_builder_instr_insert(&b->nb, &intrin->instr);
5049 break;
5050 }
5051
5052 case SpvOpIsHelperInvocationEXT: {
5053 nir_intrinsic_instr *intrin =
5054 nir_intrinsic_instr_create(b->shader, nir_intrinsic_is_helper_invocation);
5055 nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
5056 nir_builder_instr_insert(&b->nb, &intrin->instr);
5057
5058 struct vtn_type *res_type =
5059 vtn_value(b, w[1], vtn_value_type_type)->type;
5060 struct vtn_ssa_value *val = vtn_create_ssa_value(b, res_type->type);
5061 val->def = &intrin->dest.ssa;
5062
5063 vtn_push_ssa(b, w[2], res_type, val);
5064 break;
5065 }
5066
5067 case SpvOpReadClockKHR: {
5068 assert(vtn_constant_uint(b, w[3]) == SpvScopeSubgroup);
5069
5070 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5071 * intrinsic gives uvec2, so pack the result for the other case.
5072 */
5073 nir_intrinsic_instr *intrin =
5074 nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_shader_clock);
5075 nir_ssa_dest_init(&intrin->instr, &intrin->dest, 2, 32, NULL);
5076 nir_builder_instr_insert(&b->nb, &intrin->instr);
5077
5078 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
5079 const struct glsl_type *dest_type = type->type;
5080 nir_ssa_def *result;
5081
5082 if (glsl_type_is_vector(dest_type)) {
5083 assert(dest_type == glsl_vector_type(GLSL_TYPE_UINT, 2));
5084 result = &intrin->dest.ssa;
5085 } else {
5086 assert(glsl_type_is_scalar(dest_type));
5087 assert(glsl_get_base_type(dest_type) == GLSL_TYPE_UINT64);
5088 result = nir_pack_64_2x32(&b->nb, &intrin->dest.ssa);
5089 }
5090
5091 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
5092 val->type = type;
5093 val->ssa = vtn_create_ssa_value(b, dest_type);
5094 val->ssa->def = result;
5095 break;
5096 }
5097
5098 case SpvOpLifetimeStart:
5099 case SpvOpLifetimeStop:
5100 break;
5101
5102 default:
5103 vtn_fail_with_opcode("Unhandled opcode", opcode);
5104 }
5105
5106 return true;
5107 }
5108
5109 struct vtn_builder*
5110 vtn_create_builder(const uint32_t *words, size_t word_count,
5111 gl_shader_stage stage, const char *entry_point_name,
5112 const struct spirv_to_nir_options *options)
5113 {
5114 /* Initialize the vtn_builder object */
5115 struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
5116 struct spirv_to_nir_options *dup_options =
5117 ralloc(b, struct spirv_to_nir_options);
5118 *dup_options = *options;
5119
5120 b->spirv = words;
5121 b->spirv_word_count = word_count;
5122 b->file = NULL;
5123 b->line = -1;
5124 b->col = -1;
5125 exec_list_make_empty(&b->functions);
5126 b->entry_point_stage = stage;
5127 b->entry_point_name = entry_point_name;
5128 b->options = dup_options;
5129
5130 /*
5131 * Handle the SPIR-V header (first 5 dwords).
5132 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5133 */
5134 if (word_count <= 5)
5135 goto fail;
5136
5137 if (words[0] != SpvMagicNumber) {
5138 vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber);
5139 goto fail;
5140 }
5141 if (words[1] < 0x10000) {
5142 vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]);
5143 goto fail;
5144 }
5145
5146 uint16_t generator_id = words[2] >> 16;
5147 uint16_t generator_version = words[2];
5148
5149 /* The first GLSLang version bump actually 1.5 years after #179 was fixed
5150 * but this should at least let us shut the workaround off for modern
5151 * versions of GLSLang.
5152 */
5153 b->wa_glslang_179 = (generator_id == 8 && generator_version == 1);
5154
5155 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5156 * to provide correct memory semantics on compute shader barrier()
5157 * commands. Prior to that, we need to fix them up ourselves. This
5158 * GLSLang fix caused them to bump to generator version 3.
5159 */
5160 b->wa_glslang_cs_barrier = (generator_id == 8 && generator_version < 3);
5161
5162 /* words[2] == generator magic */
5163 unsigned value_id_bound = words[3];
5164 if (words[4] != 0) {
5165 vtn_err("words[4] was %u, want 0", words[4]);
5166 goto fail;
5167 }
5168
5169 b->value_id_bound = value_id_bound;
5170 b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
5171
5172 return b;
5173 fail:
5174 ralloc_free(b);
5175 return NULL;
5176 }
5177
5178 static nir_function *
5179 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder *b,
5180 nir_function *entry_point)
5181 {
5182 vtn_assert(entry_point == b->entry_point->func->impl->function);
5183 vtn_fail_if(!entry_point->name, "entry points are required to have a name");
5184 const char *func_name =
5185 ralloc_asprintf(b->shader, "__wrapped_%s", entry_point->name);
5186
5187 /* we shouldn't have any inputs yet */
5188 vtn_assert(!entry_point->shader->num_inputs);
5189 vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL);
5190
5191 nir_function *main_entry_point = nir_function_create(b->shader, func_name);
5192 main_entry_point->impl = nir_function_impl_create(main_entry_point);
5193 nir_builder_init(&b->nb, main_entry_point->impl);
5194 b->nb.cursor = nir_after_cf_list(&main_entry_point->impl->body);
5195 b->func_param_idx = 0;
5196
5197 nir_call_instr *call = nir_call_instr_create(b->nb.shader, entry_point);
5198
5199 for (unsigned i = 0; i < entry_point->num_params; ++i) {
5200 struct vtn_type *param_type = b->entry_point->func->type->params[i];
5201
5202 /* consider all pointers to function memory to be parameters passed
5203 * by value
5204 */
5205 bool is_by_val = param_type->base_type == vtn_base_type_pointer &&
5206 param_type->storage_class == SpvStorageClassFunction;
5207
5208 /* input variable */
5209 nir_variable *in_var = rzalloc(b->nb.shader, nir_variable);
5210 in_var->data.mode = nir_var_shader_in;
5211 in_var->data.read_only = true;
5212 in_var->data.location = i;
5213
5214 if (is_by_val)
5215 in_var->type = param_type->deref->type;
5216 else
5217 in_var->type = param_type->type;
5218
5219 nir_shader_add_variable(b->nb.shader, in_var);
5220 b->nb.shader->num_inputs++;
5221
5222 /* we have to copy the entire variable into function memory */
5223 if (is_by_val) {
5224 nir_variable *copy_var =
5225 nir_local_variable_create(main_entry_point->impl, in_var->type,
5226 "copy_in");
5227 nir_copy_var(&b->nb, copy_var, in_var);
5228 call->params[i] =
5229 nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa);
5230 } else {
5231 call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var));
5232 }
5233 }
5234
5235 nir_builder_instr_insert(&b->nb, &call->instr);
5236
5237 return main_entry_point;
5238 }
5239
5240 nir_shader *
5241 spirv_to_nir(const uint32_t *words, size_t word_count,
5242 struct nir_spirv_specialization *spec, unsigned num_spec,
5243 gl_shader_stage stage, const char *entry_point_name,
5244 const struct spirv_to_nir_options *options,
5245 const nir_shader_compiler_options *nir_options)
5246
5247 {
5248 const uint32_t *word_end = words + word_count;
5249
5250 struct vtn_builder *b = vtn_create_builder(words, word_count,
5251 stage, entry_point_name,
5252 options);
5253
5254 if (b == NULL)
5255 return NULL;
5256
5257 /* See also _vtn_fail() */
5258 if (setjmp(b->fail_jump)) {
5259 ralloc_free(b);
5260 return NULL;
5261 }
5262
5263 /* Skip the SPIR-V header, handled at vtn_create_builder */
5264 words+= 5;
5265
5266 b->shader = nir_shader_create(b, stage, nir_options, NULL);
5267
5268 /* Handle all the preamble instructions */
5269 words = vtn_foreach_instruction(b, words, word_end,
5270 vtn_handle_preamble_instruction);
5271
5272 if (b->entry_point == NULL) {
5273 vtn_fail("Entry point not found");
5274 ralloc_free(b);
5275 return NULL;
5276 }
5277
5278 /* Set shader info defaults */
5279 if (stage == MESA_SHADER_GEOMETRY)
5280 b->shader->info.gs.invocations = 1;
5281
5282 /* Parse rounding mode execution modes. This has to happen earlier than
5283 * other changes in the execution modes since they can affect, for example,
5284 * the result of the floating point constants.
5285 */
5286 vtn_foreach_execution_mode(b, b->entry_point,
5287 vtn_handle_rounding_mode_in_execution_mode, NULL);
5288
5289 b->specializations = spec;
5290 b->num_specializations = num_spec;
5291
5292 /* Handle all variable, type, and constant instructions */
5293 words = vtn_foreach_instruction(b, words, word_end,
5294 vtn_handle_variable_or_type_instruction);
5295
5296 /* Parse execution modes */
5297 vtn_foreach_execution_mode(b, b->entry_point,
5298 vtn_handle_execution_mode, NULL);
5299
5300 if (b->workgroup_size_builtin) {
5301 vtn_assert(b->workgroup_size_builtin->type->type ==
5302 glsl_vector_type(GLSL_TYPE_UINT, 3));
5303
5304 nir_const_value *const_size =
5305 b->workgroup_size_builtin->constant->values;
5306
5307 b->shader->info.cs.local_size[0] = const_size[0].u32;
5308 b->shader->info.cs.local_size[1] = const_size[1].u32;
5309 b->shader->info.cs.local_size[2] = const_size[2].u32;
5310 }
5311
5312 /* Set types on all vtn_values */
5313 vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type);
5314
5315 vtn_build_cfg(b, words, word_end);
5316
5317 assert(b->entry_point->value_type == vtn_value_type_function);
5318 b->entry_point->func->referenced = true;
5319
5320 bool progress;
5321 do {
5322 progress = false;
5323 foreach_list_typed(struct vtn_function, func, node, &b->functions) {
5324 if (func->referenced && !func->emitted) {
5325 b->const_table = _mesa_pointer_hash_table_create(b);
5326
5327 vtn_function_emit(b, func, vtn_handle_body_instruction);
5328 progress = true;
5329 }
5330 }
5331 } while (progress);
5332
5333 vtn_assert(b->entry_point->value_type == vtn_value_type_function);
5334 nir_function *entry_point = b->entry_point->func->impl->function;
5335 vtn_assert(entry_point);
5336
5337 /* post process entry_points with input params */
5338 if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
5339 entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
5340
5341 entry_point->is_entrypoint = true;
5342
5343 /* When multiple shader stages exist in the same SPIR-V module, we
5344 * generate input and output variables for every stage, in the same
5345 * NIR program. These dead variables can be invalid NIR. For example,
5346 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5347 * VS output variables wouldn't be.
5348 *
5349 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5350 * right away. In order to do so, we must lower any constant initializers
5351 * on outputs so nir_remove_dead_variables sees that they're written to.
5352 */
5353 nir_lower_constant_initializers(b->shader, nir_var_shader_out);
5354 nir_remove_dead_variables(b->shader,
5355 nir_var_shader_in | nir_var_shader_out);
5356
5357 /* We sometimes generate bogus derefs that, while never used, give the
5358 * validator a bit of heartburn. Run dead code to get rid of them.
5359 */
5360 nir_opt_dce(b->shader);
5361
5362 /* Unparent the shader from the vtn_builder before we delete the builder */
5363 ralloc_steal(NULL, b->shader);
5364
5365 nir_shader *shader = b->shader;
5366 ralloc_free(b);
5367
5368 return shader;
5369 }