spirv: Don't accept CPacked decoration on struct members
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
34
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
37
38 #include <stdio.h>
39 #if UTIL_ARCH_BIG_ENDIAN
40 #include <byteswap.h>
41 #endif
42
43 void
44 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
45 size_t spirv_offset, const char *message)
46 {
47 if (b->options->debug.func) {
48 b->options->debug.func(b->options->debug.private_data,
49 level, spirv_offset, message);
50 }
51
52 #ifndef NDEBUG
53 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
54 fprintf(stderr, "%s\n", message);
55 #endif
56 }
57
58 void
59 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
60 size_t spirv_offset, const char *fmt, ...)
61 {
62 va_list args;
63 char *msg;
64
65 va_start(args, fmt);
66 msg = ralloc_vasprintf(NULL, fmt, args);
67 va_end(args);
68
69 vtn_log(b, level, spirv_offset, msg);
70
71 ralloc_free(msg);
72 }
73
74 static void
75 vtn_log_err(struct vtn_builder *b,
76 enum nir_spirv_debug_level level, const char *prefix,
77 const char *file, unsigned line,
78 const char *fmt, va_list args)
79 {
80 char *msg;
81
82 msg = ralloc_strdup(NULL, prefix);
83
84 #ifndef NDEBUG
85 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
86 #endif
87
88 ralloc_asprintf_append(&msg, " ");
89
90 ralloc_vasprintf_append(&msg, fmt, args);
91
92 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
93 b->spirv_offset);
94
95 if (b->file) {
96 ralloc_asprintf_append(&msg,
97 "\n in SPIR-V source file %s, line %d, col %d",
98 b->file, b->line, b->col);
99 }
100
101 vtn_log(b, level, b->spirv_offset, msg);
102
103 ralloc_free(msg);
104 }
105
106 static void
107 vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
108 {
109 static int idx = 0;
110
111 char filename[1024];
112 int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
113 path, prefix, idx++);
114 if (len < 0 || len >= sizeof(filename))
115 return;
116
117 FILE *f = fopen(filename, "w");
118 if (f == NULL)
119 return;
120
121 fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
122 fclose(f);
123
124 vtn_info("SPIR-V shader dumped to %s", filename);
125 }
126
127 void
128 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
129 const char *fmt, ...)
130 {
131 va_list args;
132
133 va_start(args, fmt);
134 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
135 file, line, fmt, args);
136 va_end(args);
137 }
138
139 void
140 _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
141 const char *fmt, ...)
142 {
143 va_list args;
144
145 va_start(args, fmt);
146 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
147 file, line, fmt, args);
148 va_end(args);
149 }
150
151 void
152 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
153 const char *fmt, ...)
154 {
155 va_list args;
156
157 va_start(args, fmt);
158 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
159 file, line, fmt, args);
160 va_end(args);
161
162 const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
163 if (dump_path)
164 vtn_dump_shader(b, dump_path, "fail");
165
166 longjmp(b->fail_jump, 1);
167 }
168
169 static struct vtn_ssa_value *
170 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
171 {
172 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
173 val->type = glsl_get_bare_type(type);
174
175 if (glsl_type_is_vector_or_scalar(type)) {
176 unsigned num_components = glsl_get_vector_elements(val->type);
177 unsigned bit_size = glsl_get_bit_size(val->type);
178 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
179 } else {
180 unsigned elems = glsl_get_length(val->type);
181 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
182 if (glsl_type_is_array_or_matrix(type)) {
183 const struct glsl_type *elem_type = glsl_get_array_element(type);
184 for (unsigned i = 0; i < elems; i++)
185 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
186 } else {
187 vtn_assert(glsl_type_is_struct_or_ifc(type));
188 for (unsigned i = 0; i < elems; i++) {
189 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
190 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
191 }
192 }
193 }
194
195 return val;
196 }
197
198 static struct vtn_ssa_value *
199 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
200 const struct glsl_type *type)
201 {
202 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
203
204 if (entry)
205 return entry->data;
206
207 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
208 val->type = glsl_get_bare_type(type);
209
210 if (glsl_type_is_vector_or_scalar(type)) {
211 unsigned num_components = glsl_get_vector_elements(val->type);
212 unsigned bit_size = glsl_get_bit_size(type);
213 nir_load_const_instr *load =
214 nir_load_const_instr_create(b->shader, num_components, bit_size);
215
216 memcpy(load->value, constant->values,
217 sizeof(nir_const_value) * num_components);
218
219 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
220 val->def = &load->def;
221 } else {
222 unsigned elems = glsl_get_length(val->type);
223 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
224 if (glsl_type_is_array_or_matrix(type)) {
225 const struct glsl_type *elem_type = glsl_get_array_element(type);
226 for (unsigned i = 0; i < elems; i++) {
227 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
228 elem_type);
229 }
230 } else {
231 vtn_assert(glsl_type_is_struct_or_ifc(type));
232 for (unsigned i = 0; i < elems; i++) {
233 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
234 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
235 elem_type);
236 }
237 }
238 }
239
240 return val;
241 }
242
243 struct vtn_ssa_value *
244 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
245 {
246 struct vtn_value *val = vtn_untyped_value(b, value_id);
247 switch (val->value_type) {
248 case vtn_value_type_undef:
249 return vtn_undef_ssa_value(b, val->type->type);
250
251 case vtn_value_type_constant:
252 return vtn_const_ssa_value(b, val->constant, val->type->type);
253
254 case vtn_value_type_ssa:
255 return val->ssa;
256
257 case vtn_value_type_pointer:
258 vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
259 struct vtn_ssa_value *ssa =
260 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
261 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
262 return ssa;
263
264 default:
265 vtn_fail("Invalid type for an SSA value");
266 }
267 }
268
269 struct vtn_value *
270 vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
271 struct vtn_ssa_value *ssa)
272 {
273 struct vtn_type *type = vtn_get_value_type(b, value_id);
274
275 /* See vtn_create_ssa_value */
276 vtn_fail_if(ssa->type != glsl_get_bare_type(type->type),
277 "Type mismatch for SPIR-V SSA value");
278
279 struct vtn_value *val;
280 if (type->base_type == vtn_base_type_pointer) {
281 val = vtn_push_pointer(b, value_id, vtn_pointer_from_ssa(b, ssa->def, type));
282 } else {
283 /* Don't trip the value_type_ssa check in vtn_push_value */
284 val = vtn_push_value(b, value_id, vtn_value_type_invalid);
285 val->value_type = vtn_value_type_ssa;
286 val->ssa = ssa;
287 }
288
289 return val;
290 }
291
292 nir_ssa_def *
293 vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id)
294 {
295 struct vtn_ssa_value *ssa = vtn_ssa_value(b, value_id);
296 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa->type),
297 "Expected a vector or scalar type");
298 return ssa->def;
299 }
300
301 struct vtn_value *
302 vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def)
303 {
304 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
305 * type will be valid by the time we get here.
306 */
307 struct vtn_type *type = vtn_get_value_type(b, value_id);
308 vtn_fail_if(def->num_components != glsl_get_vector_elements(type->type) ||
309 def->bit_size != glsl_get_bit_size(type->type),
310 "Mismatch between NIR and SPIR-V type.");
311 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
312 ssa->def = def;
313 return vtn_push_ssa_value(b, value_id, ssa);
314 }
315
316 static nir_deref_instr *
317 vtn_get_image(struct vtn_builder *b, uint32_t value_id)
318 {
319 struct vtn_type *type = vtn_get_value_type(b, value_id);
320 vtn_assert(type->base_type == vtn_base_type_image);
321 return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
322 nir_var_uniform, type->glsl_image, 0);
323 }
324
325 static void
326 vtn_push_image(struct vtn_builder *b, uint32_t value_id,
327 nir_deref_instr *deref, bool propagate_non_uniform)
328 {
329 struct vtn_type *type = vtn_get_value_type(b, value_id);
330 vtn_assert(type->base_type == vtn_base_type_image);
331 struct vtn_value *value = vtn_push_nir_ssa(b, value_id, &deref->dest.ssa);
332 value->propagated_non_uniform = propagate_non_uniform;
333 }
334
335 static nir_deref_instr *
336 vtn_get_sampler(struct vtn_builder *b, uint32_t value_id)
337 {
338 struct vtn_type *type = vtn_get_value_type(b, value_id);
339 vtn_assert(type->base_type == vtn_base_type_sampler);
340 return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
341 nir_var_uniform, glsl_bare_sampler_type(), 0);
342 }
343
344 nir_ssa_def *
345 vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
346 struct vtn_sampled_image si)
347 {
348 return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa);
349 }
350
351 static void
352 vtn_push_sampled_image(struct vtn_builder *b, uint32_t value_id,
353 struct vtn_sampled_image si, bool propagate_non_uniform)
354 {
355 struct vtn_type *type = vtn_get_value_type(b, value_id);
356 vtn_assert(type->base_type == vtn_base_type_sampled_image);
357 struct vtn_value *value = vtn_push_nir_ssa(b, value_id,
358 vtn_sampled_image_to_nir_ssa(b, si));
359 value->propagated_non_uniform = propagate_non_uniform;
360 }
361
362 static struct vtn_sampled_image
363 vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id)
364 {
365 struct vtn_type *type = vtn_get_value_type(b, value_id);
366 vtn_assert(type->base_type == vtn_base_type_sampled_image);
367 nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
368
369 struct vtn_sampled_image si = { NULL, };
370 si.image = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 0),
371 nir_var_uniform,
372 type->image->glsl_image, 0);
373 si.sampler = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 1),
374 nir_var_uniform,
375 glsl_bare_sampler_type(), 0);
376 return si;
377 }
378
379 static const char *
380 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
381 unsigned word_count, unsigned *words_used)
382 {
383 /* From the SPIR-V spec:
384 *
385 * "A string is interpreted as a nul-terminated stream of characters.
386 * The character set is Unicode in the UTF-8 encoding scheme. The UTF-8
387 * octets (8-bit bytes) are packed four per word, following the
388 * little-endian convention (i.e., the first octet is in the
389 * lowest-order 8 bits of the word). The final word contains the
390 * string’s nul-termination character (0), and all contents past the
391 * end of the string in the final word are padded with 0."
392 *
393 * On big-endian, we need to byte-swap.
394 */
395 #if UTIL_ARCH_BIG_ENDIAN
396 {
397 uint32_t *copy = ralloc_array(b, uint32_t, word_count);
398 for (unsigned i = 0; i < word_count; i++)
399 copy[i] = bswap_32(words[i]);
400 words = copy;
401 }
402 #endif
403
404 const char *str = (char *)words;
405 const char *end = memchr(str, 0, word_count * 4);
406 vtn_fail_if(end == NULL, "String is not null-terminated");
407
408 if (words_used)
409 *words_used = DIV_ROUND_UP(end - str + 1, sizeof(*words));
410
411 return str;
412 }
413
414 const uint32_t *
415 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
416 const uint32_t *end, vtn_instruction_handler handler)
417 {
418 b->file = NULL;
419 b->line = -1;
420 b->col = -1;
421
422 const uint32_t *w = start;
423 while (w < end) {
424 SpvOp opcode = w[0] & SpvOpCodeMask;
425 unsigned count = w[0] >> SpvWordCountShift;
426 vtn_assert(count >= 1 && w + count <= end);
427
428 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
429
430 switch (opcode) {
431 case SpvOpNop:
432 break; /* Do nothing */
433
434 case SpvOpLine:
435 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
436 b->line = w[2];
437 b->col = w[3];
438 break;
439
440 case SpvOpNoLine:
441 b->file = NULL;
442 b->line = -1;
443 b->col = -1;
444 break;
445
446 default:
447 if (!handler(b, opcode, w, count))
448 return w;
449 break;
450 }
451
452 w += count;
453 }
454
455 b->spirv_offset = 0;
456 b->file = NULL;
457 b->line = -1;
458 b->col = -1;
459
460 assert(w == end);
461 return w;
462 }
463
464 static bool
465 vtn_handle_non_semantic_instruction(struct vtn_builder *b, SpvOp ext_opcode,
466 const uint32_t *w, unsigned count)
467 {
468 /* Do nothing. */
469 return true;
470 }
471
472 static void
473 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
474 const uint32_t *w, unsigned count)
475 {
476 switch (opcode) {
477 case SpvOpExtInstImport: {
478 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
479 const char *ext = vtn_string_literal(b, &w[2], count - 2, NULL);
480 if (strcmp(ext, "GLSL.std.450") == 0) {
481 val->ext_handler = vtn_handle_glsl450_instruction;
482 } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
483 && (b->options && b->options->caps.amd_gcn_shader)) {
484 val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
485 } else if ((strcmp(ext, "SPV_AMD_shader_ballot") == 0)
486 && (b->options && b->options->caps.amd_shader_ballot)) {
487 val->ext_handler = vtn_handle_amd_shader_ballot_instruction;
488 } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
489 && (b->options && b->options->caps.amd_trinary_minmax)) {
490 val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
491 } else if ((strcmp(ext, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
492 && (b->options && b->options->caps.amd_shader_explicit_vertex_parameter)) {
493 val->ext_handler = vtn_handle_amd_shader_explicit_vertex_parameter_instruction;
494 } else if (strcmp(ext, "OpenCL.std") == 0) {
495 val->ext_handler = vtn_handle_opencl_instruction;
496 } else if (strstr(ext, "NonSemantic.") == ext) {
497 val->ext_handler = vtn_handle_non_semantic_instruction;
498 } else {
499 vtn_fail("Unsupported extension: %s", ext);
500 }
501 break;
502 }
503
504 case SpvOpExtInst: {
505 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
506 bool handled = val->ext_handler(b, w[4], w, count);
507 vtn_assert(handled);
508 break;
509 }
510
511 default:
512 vtn_fail_with_opcode("Unhandled opcode", opcode);
513 }
514 }
515
516 static void
517 _foreach_decoration_helper(struct vtn_builder *b,
518 struct vtn_value *base_value,
519 int parent_member,
520 struct vtn_value *value,
521 vtn_decoration_foreach_cb cb, void *data)
522 {
523 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
524 int member;
525 if (dec->scope == VTN_DEC_DECORATION) {
526 member = parent_member;
527 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
528 vtn_fail_if(value->value_type != vtn_value_type_type ||
529 value->type->base_type != vtn_base_type_struct,
530 "OpMemberDecorate and OpGroupMemberDecorate are only "
531 "allowed on OpTypeStruct");
532 /* This means we haven't recursed yet */
533 assert(value == base_value);
534
535 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
536
537 vtn_fail_if(member >= base_value->type->length,
538 "OpMemberDecorate specifies member %d but the "
539 "OpTypeStruct has only %u members",
540 member, base_value->type->length);
541 } else {
542 /* Not a decoration */
543 assert(dec->scope == VTN_DEC_EXECUTION_MODE);
544 continue;
545 }
546
547 if (dec->group) {
548 assert(dec->group->value_type == vtn_value_type_decoration_group);
549 _foreach_decoration_helper(b, base_value, member, dec->group,
550 cb, data);
551 } else {
552 cb(b, base_value, member, dec, data);
553 }
554 }
555 }
556
557 /** Iterates (recursively if needed) over all of the decorations on a value
558 *
559 * This function iterates over all of the decorations applied to a given
560 * value. If it encounters a decoration group, it recurses into the group
561 * and iterates over all of those decorations as well.
562 */
563 void
564 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
565 vtn_decoration_foreach_cb cb, void *data)
566 {
567 _foreach_decoration_helper(b, value, -1, value, cb, data);
568 }
569
570 void
571 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
572 vtn_execution_mode_foreach_cb cb, void *data)
573 {
574 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
575 if (dec->scope != VTN_DEC_EXECUTION_MODE)
576 continue;
577
578 assert(dec->group == NULL);
579 cb(b, value, dec, data);
580 }
581 }
582
583 void
584 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
585 const uint32_t *w, unsigned count)
586 {
587 const uint32_t *w_end = w + count;
588 const uint32_t target = w[1];
589 w += 2;
590
591 switch (opcode) {
592 case SpvOpDecorationGroup:
593 vtn_push_value(b, target, vtn_value_type_decoration_group);
594 break;
595
596 case SpvOpDecorate:
597 case SpvOpDecorateId:
598 case SpvOpMemberDecorate:
599 case SpvOpDecorateString:
600 case SpvOpMemberDecorateString:
601 case SpvOpExecutionMode:
602 case SpvOpExecutionModeId: {
603 struct vtn_value *val = vtn_untyped_value(b, target);
604
605 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
606 switch (opcode) {
607 case SpvOpDecorate:
608 case SpvOpDecorateId:
609 case SpvOpDecorateString:
610 dec->scope = VTN_DEC_DECORATION;
611 break;
612 case SpvOpMemberDecorate:
613 case SpvOpMemberDecorateString:
614 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
615 vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
616 "Member argument of OpMemberDecorate too large");
617 break;
618 case SpvOpExecutionMode:
619 case SpvOpExecutionModeId:
620 dec->scope = VTN_DEC_EXECUTION_MODE;
621 break;
622 default:
623 unreachable("Invalid decoration opcode");
624 }
625 dec->decoration = *(w++);
626 dec->operands = w;
627
628 /* Link into the list */
629 dec->next = val->decoration;
630 val->decoration = dec;
631 break;
632 }
633
634 case SpvOpGroupMemberDecorate:
635 case SpvOpGroupDecorate: {
636 struct vtn_value *group =
637 vtn_value(b, target, vtn_value_type_decoration_group);
638
639 for (; w < w_end; w++) {
640 struct vtn_value *val = vtn_untyped_value(b, *w);
641 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
642
643 dec->group = group;
644 if (opcode == SpvOpGroupDecorate) {
645 dec->scope = VTN_DEC_DECORATION;
646 } else {
647 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
648 vtn_fail_if(dec->scope < 0, /* Check for overflow */
649 "Member argument of OpGroupMemberDecorate too large");
650 }
651
652 /* Link into the list */
653 dec->next = val->decoration;
654 val->decoration = dec;
655 }
656 break;
657 }
658
659 default:
660 unreachable("Unhandled opcode");
661 }
662 }
663
664 struct member_decoration_ctx {
665 unsigned num_fields;
666 struct glsl_struct_field *fields;
667 struct vtn_type *type;
668 };
669
670 /**
671 * Returns true if the given type contains a struct decorated Block or
672 * BufferBlock
673 */
674 bool
675 vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type)
676 {
677 switch (type->base_type) {
678 case vtn_base_type_array:
679 return vtn_type_contains_block(b, type->array_element);
680 case vtn_base_type_struct:
681 if (type->block || type->buffer_block)
682 return true;
683 for (unsigned i = 0; i < type->length; i++) {
684 if (vtn_type_contains_block(b, type->members[i]))
685 return true;
686 }
687 return false;
688 default:
689 return false;
690 }
691 }
692
693 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
694 * OpStore, or OpCopyMemory between them without breaking anything.
695 * Technically, the SPIR-V rules require the exact same type ID but this lets
696 * us internally be a bit looser.
697 */
698 bool
699 vtn_types_compatible(struct vtn_builder *b,
700 struct vtn_type *t1, struct vtn_type *t2)
701 {
702 if (t1->id == t2->id)
703 return true;
704
705 if (t1->base_type != t2->base_type)
706 return false;
707
708 switch (t1->base_type) {
709 case vtn_base_type_void:
710 case vtn_base_type_scalar:
711 case vtn_base_type_vector:
712 case vtn_base_type_matrix:
713 case vtn_base_type_image:
714 case vtn_base_type_sampler:
715 case vtn_base_type_sampled_image:
716 return t1->type == t2->type;
717
718 case vtn_base_type_array:
719 return t1->length == t2->length &&
720 vtn_types_compatible(b, t1->array_element, t2->array_element);
721
722 case vtn_base_type_pointer:
723 return vtn_types_compatible(b, t1->deref, t2->deref);
724
725 case vtn_base_type_struct:
726 if (t1->length != t2->length)
727 return false;
728
729 for (unsigned i = 0; i < t1->length; i++) {
730 if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
731 return false;
732 }
733 return true;
734
735 case vtn_base_type_function:
736 /* This case shouldn't get hit since you can't copy around function
737 * types. Just require them to be identical.
738 */
739 return false;
740 }
741
742 vtn_fail("Invalid base type");
743 }
744
745 struct vtn_type *
746 vtn_type_without_array(struct vtn_type *type)
747 {
748 while (type->base_type == vtn_base_type_array)
749 type = type->array_element;
750 return type;
751 }
752
753 /* does a shallow copy of a vtn_type */
754
755 static struct vtn_type *
756 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
757 {
758 struct vtn_type *dest = ralloc(b, struct vtn_type);
759 *dest = *src;
760
761 switch (src->base_type) {
762 case vtn_base_type_void:
763 case vtn_base_type_scalar:
764 case vtn_base_type_vector:
765 case vtn_base_type_matrix:
766 case vtn_base_type_array:
767 case vtn_base_type_pointer:
768 case vtn_base_type_image:
769 case vtn_base_type_sampler:
770 case vtn_base_type_sampled_image:
771 /* Nothing more to do */
772 break;
773
774 case vtn_base_type_struct:
775 dest->members = ralloc_array(b, struct vtn_type *, src->length);
776 memcpy(dest->members, src->members,
777 src->length * sizeof(src->members[0]));
778
779 dest->offsets = ralloc_array(b, unsigned, src->length);
780 memcpy(dest->offsets, src->offsets,
781 src->length * sizeof(src->offsets[0]));
782 break;
783
784 case vtn_base_type_function:
785 dest->params = ralloc_array(b, struct vtn_type *, src->length);
786 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
787 break;
788 }
789
790 return dest;
791 }
792
793 static const struct glsl_type *
794 wrap_type_in_array(const struct glsl_type *type,
795 const struct glsl_type *array_type)
796 {
797 if (!glsl_type_is_array(array_type))
798 return type;
799
800 const struct glsl_type *elem_type =
801 wrap_type_in_array(type, glsl_get_array_element(array_type));
802 return glsl_array_type(elem_type, glsl_get_length(array_type),
803 glsl_get_explicit_stride(array_type));
804 }
805
806 static bool
807 vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
808 {
809 /* For OpenCL we never want to strip the info from the types, and it makes
810 * type comparisons easier in later stages.
811 */
812 if (b->options->environment == NIR_SPIRV_OPENCL)
813 return true;
814
815 switch (mode) {
816 case vtn_variable_mode_input:
817 case vtn_variable_mode_output:
818 /* Layout decorations kept because we need offsets for XFB arrays of
819 * blocks.
820 */
821 return b->shader->info.has_transform_feedback_varyings;
822
823 case vtn_variable_mode_ssbo:
824 case vtn_variable_mode_phys_ssbo:
825 case vtn_variable_mode_ubo:
826 return true;
827
828 default:
829 return false;
830 }
831 }
832
833 const struct glsl_type *
834 vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
835 enum vtn_variable_mode mode)
836 {
837 if (mode == vtn_variable_mode_atomic_counter) {
838 vtn_fail_if(glsl_without_array(type->type) != glsl_uint_type(),
839 "Variables in the AtomicCounter storage class should be "
840 "(possibly arrays of arrays of) uint.");
841 return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
842 }
843
844 if (mode == vtn_variable_mode_uniform) {
845 switch (type->base_type) {
846 case vtn_base_type_array: {
847 const struct glsl_type *elem_type =
848 vtn_type_get_nir_type(b, type->array_element, mode);
849
850 return glsl_array_type(elem_type, type->length,
851 glsl_get_explicit_stride(type->type));
852 }
853
854 case vtn_base_type_struct: {
855 bool need_new_struct = false;
856 const uint32_t num_fields = type->length;
857 NIR_VLA(struct glsl_struct_field, fields, num_fields);
858 for (unsigned i = 0; i < num_fields; i++) {
859 fields[i] = *glsl_get_struct_field_data(type->type, i);
860 const struct glsl_type *field_nir_type =
861 vtn_type_get_nir_type(b, type->members[i], mode);
862 if (fields[i].type != field_nir_type) {
863 fields[i].type = field_nir_type;
864 need_new_struct = true;
865 }
866 }
867 if (need_new_struct) {
868 if (glsl_type_is_interface(type->type)) {
869 return glsl_interface_type(fields, num_fields,
870 /* packing */ 0, false,
871 glsl_get_type_name(type->type));
872 } else {
873 return glsl_struct_type(fields, num_fields,
874 glsl_get_type_name(type->type),
875 glsl_struct_type_is_packed(type->type));
876 }
877 } else {
878 /* No changes, just pass it on */
879 return type->type;
880 }
881 }
882
883 case vtn_base_type_image:
884 return type->glsl_image;
885
886 case vtn_base_type_sampler:
887 return glsl_bare_sampler_type();
888
889 case vtn_base_type_sampled_image:
890 return type->image->glsl_image;
891
892 default:
893 return type->type;
894 }
895 }
896
897 /* Layout decorations are allowed but ignored in certain conditions,
898 * to allow SPIR-V generators perform type deduplication. Discard
899 * unnecessary ones when passing to NIR.
900 */
901 if (!vtn_type_needs_explicit_layout(b, mode))
902 return glsl_get_bare_type(type->type);
903
904 return type->type;
905 }
906
907 static struct vtn_type *
908 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
909 {
910 type->members[member] = vtn_type_copy(b, type->members[member]);
911 type = type->members[member];
912
913 /* We may have an array of matrices.... Oh, joy! */
914 while (glsl_type_is_array(type->type)) {
915 type->array_element = vtn_type_copy(b, type->array_element);
916 type = type->array_element;
917 }
918
919 vtn_assert(glsl_type_is_matrix(type->type));
920
921 return type;
922 }
923
924 static void
925 vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
926 int member, enum gl_access_qualifier access)
927 {
928 type->members[member] = vtn_type_copy(b, type->members[member]);
929 type = type->members[member];
930
931 type->access |= access;
932 }
933
934 static void
935 array_stride_decoration_cb(struct vtn_builder *b,
936 struct vtn_value *val, int member,
937 const struct vtn_decoration *dec, void *void_ctx)
938 {
939 struct vtn_type *type = val->type;
940
941 if (dec->decoration == SpvDecorationArrayStride) {
942 if (vtn_type_contains_block(b, type)) {
943 vtn_warn("The ArrayStride decoration cannot be applied to an array "
944 "type which contains a structure type decorated Block "
945 "or BufferBlock");
946 /* Ignore the decoration */
947 } else {
948 vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
949 type->stride = dec->operands[0];
950 }
951 }
952 }
953
954 static void
955 struct_member_decoration_cb(struct vtn_builder *b,
956 UNUSED struct vtn_value *val, int member,
957 const struct vtn_decoration *dec, void *void_ctx)
958 {
959 struct member_decoration_ctx *ctx = void_ctx;
960
961 if (member < 0)
962 return;
963
964 assert(member < ctx->num_fields);
965
966 switch (dec->decoration) {
967 case SpvDecorationRelaxedPrecision:
968 case SpvDecorationUniform:
969 case SpvDecorationUniformId:
970 break; /* FIXME: Do nothing with this for now. */
971 case SpvDecorationNonWritable:
972 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
973 break;
974 case SpvDecorationNonReadable:
975 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
976 break;
977 case SpvDecorationVolatile:
978 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
979 break;
980 case SpvDecorationCoherent:
981 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
982 break;
983 case SpvDecorationNoPerspective:
984 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
985 break;
986 case SpvDecorationFlat:
987 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
988 break;
989 case SpvDecorationExplicitInterpAMD:
990 ctx->fields[member].interpolation = INTERP_MODE_EXPLICIT;
991 break;
992 case SpvDecorationCentroid:
993 ctx->fields[member].centroid = true;
994 break;
995 case SpvDecorationSample:
996 ctx->fields[member].sample = true;
997 break;
998 case SpvDecorationStream:
999 /* This is handled later by var_decoration_cb in vtn_variables.c */
1000 break;
1001 case SpvDecorationLocation:
1002 ctx->fields[member].location = dec->operands[0];
1003 break;
1004 case SpvDecorationComponent:
1005 break; /* FIXME: What should we do with these? */
1006 case SpvDecorationBuiltIn:
1007 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
1008 ctx->type->members[member]->is_builtin = true;
1009 ctx->type->members[member]->builtin = dec->operands[0];
1010 ctx->type->builtin_block = true;
1011 break;
1012 case SpvDecorationOffset:
1013 ctx->type->offsets[member] = dec->operands[0];
1014 ctx->fields[member].offset = dec->operands[0];
1015 break;
1016 case SpvDecorationMatrixStride:
1017 /* Handled as a second pass */
1018 break;
1019 case SpvDecorationColMajor:
1020 break; /* Nothing to do here. Column-major is the default. */
1021 case SpvDecorationRowMajor:
1022 mutable_matrix_member(b, ctx->type, member)->row_major = true;
1023 break;
1024
1025 case SpvDecorationPatch:
1026 break;
1027
1028 case SpvDecorationSpecId:
1029 case SpvDecorationBlock:
1030 case SpvDecorationBufferBlock:
1031 case SpvDecorationArrayStride:
1032 case SpvDecorationGLSLShared:
1033 case SpvDecorationGLSLPacked:
1034 case SpvDecorationInvariant:
1035 case SpvDecorationRestrict:
1036 case SpvDecorationAliased:
1037 case SpvDecorationConstant:
1038 case SpvDecorationIndex:
1039 case SpvDecorationBinding:
1040 case SpvDecorationDescriptorSet:
1041 case SpvDecorationLinkageAttributes:
1042 case SpvDecorationNoContraction:
1043 case SpvDecorationInputAttachmentIndex:
1044 case SpvDecorationCPacked:
1045 vtn_warn("Decoration not allowed on struct members: %s",
1046 spirv_decoration_to_string(dec->decoration));
1047 break;
1048
1049 case SpvDecorationXfbBuffer:
1050 case SpvDecorationXfbStride:
1051 /* This is handled later by var_decoration_cb in vtn_variables.c */
1052 break;
1053
1054 case SpvDecorationSaturatedConversion:
1055 case SpvDecorationFuncParamAttr:
1056 case SpvDecorationFPRoundingMode:
1057 case SpvDecorationFPFastMathMode:
1058 case SpvDecorationAlignment:
1059 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1060 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1061 spirv_decoration_to_string(dec->decoration));
1062 }
1063 break;
1064
1065 case SpvDecorationUserSemantic:
1066 case SpvDecorationUserTypeGOOGLE:
1067 /* User semantic decorations can safely be ignored by the driver. */
1068 break;
1069
1070 default:
1071 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1072 }
1073 }
1074
1075 /** Chases the array type all the way down to the tail and rewrites the
1076 * glsl_types to be based off the tail's glsl_type.
1077 */
1078 static void
1079 vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
1080 {
1081 if (type->base_type != vtn_base_type_array)
1082 return;
1083
1084 vtn_array_type_rewrite_glsl_type(type->array_element);
1085
1086 type->type = glsl_array_type(type->array_element->type,
1087 type->length, type->stride);
1088 }
1089
1090 /* Matrix strides are handled as a separate pass because we need to know
1091 * whether the matrix is row-major or not first.
1092 */
1093 static void
1094 struct_member_matrix_stride_cb(struct vtn_builder *b,
1095 UNUSED struct vtn_value *val, int member,
1096 const struct vtn_decoration *dec,
1097 void *void_ctx)
1098 {
1099 if (dec->decoration != SpvDecorationMatrixStride)
1100 return;
1101
1102 vtn_fail_if(member < 0,
1103 "The MatrixStride decoration is only allowed on members "
1104 "of OpTypeStruct");
1105 vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
1106
1107 struct member_decoration_ctx *ctx = void_ctx;
1108
1109 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
1110 if (mat_type->row_major) {
1111 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
1112 mat_type->stride = mat_type->array_element->stride;
1113 mat_type->array_element->stride = dec->operands[0];
1114
1115 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1116 dec->operands[0], true);
1117 mat_type->array_element->type = glsl_get_column_type(mat_type->type);
1118 } else {
1119 vtn_assert(mat_type->array_element->stride > 0);
1120 mat_type->stride = dec->operands[0];
1121
1122 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1123 dec->operands[0], false);
1124 }
1125
1126 /* Now that we've replaced the glsl_type with a properly strided matrix
1127 * type, rewrite the member type so that it's an array of the proper kind
1128 * of glsl_type.
1129 */
1130 vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
1131 ctx->fields[member].type = ctx->type->members[member]->type;
1132 }
1133
1134 static void
1135 struct_block_decoration_cb(struct vtn_builder *b,
1136 struct vtn_value *val, int member,
1137 const struct vtn_decoration *dec, void *ctx)
1138 {
1139 if (member != -1)
1140 return;
1141
1142 struct vtn_type *type = val->type;
1143 if (dec->decoration == SpvDecorationBlock)
1144 type->block = true;
1145 else if (dec->decoration == SpvDecorationBufferBlock)
1146 type->buffer_block = true;
1147 }
1148
1149 static void
1150 type_decoration_cb(struct vtn_builder *b,
1151 struct vtn_value *val, int member,
1152 const struct vtn_decoration *dec, UNUSED void *ctx)
1153 {
1154 struct vtn_type *type = val->type;
1155
1156 if (member != -1) {
1157 /* This should have been handled by OpTypeStruct */
1158 assert(val->type->base_type == vtn_base_type_struct);
1159 assert(member >= 0 && member < val->type->length);
1160 return;
1161 }
1162
1163 switch (dec->decoration) {
1164 case SpvDecorationArrayStride:
1165 vtn_assert(type->base_type == vtn_base_type_array ||
1166 type->base_type == vtn_base_type_pointer);
1167 break;
1168 case SpvDecorationBlock:
1169 vtn_assert(type->base_type == vtn_base_type_struct);
1170 vtn_assert(type->block);
1171 break;
1172 case SpvDecorationBufferBlock:
1173 vtn_assert(type->base_type == vtn_base_type_struct);
1174 vtn_assert(type->buffer_block);
1175 break;
1176 case SpvDecorationGLSLShared:
1177 case SpvDecorationGLSLPacked:
1178 /* Ignore these, since we get explicit offsets anyways */
1179 break;
1180
1181 case SpvDecorationRowMajor:
1182 case SpvDecorationColMajor:
1183 case SpvDecorationMatrixStride:
1184 case SpvDecorationBuiltIn:
1185 case SpvDecorationNoPerspective:
1186 case SpvDecorationFlat:
1187 case SpvDecorationPatch:
1188 case SpvDecorationCentroid:
1189 case SpvDecorationSample:
1190 case SpvDecorationExplicitInterpAMD:
1191 case SpvDecorationVolatile:
1192 case SpvDecorationCoherent:
1193 case SpvDecorationNonWritable:
1194 case SpvDecorationNonReadable:
1195 case SpvDecorationUniform:
1196 case SpvDecorationUniformId:
1197 case SpvDecorationLocation:
1198 case SpvDecorationComponent:
1199 case SpvDecorationOffset:
1200 case SpvDecorationXfbBuffer:
1201 case SpvDecorationXfbStride:
1202 case SpvDecorationUserSemantic:
1203 vtn_warn("Decoration only allowed for struct members: %s",
1204 spirv_decoration_to_string(dec->decoration));
1205 break;
1206
1207 case SpvDecorationStream:
1208 /* We don't need to do anything here, as stream is filled up when
1209 * aplying the decoration to a variable, just check that if it is not a
1210 * struct member, it should be a struct.
1211 */
1212 vtn_assert(type->base_type == vtn_base_type_struct);
1213 break;
1214
1215 case SpvDecorationRelaxedPrecision:
1216 case SpvDecorationSpecId:
1217 case SpvDecorationInvariant:
1218 case SpvDecorationRestrict:
1219 case SpvDecorationAliased:
1220 case SpvDecorationConstant:
1221 case SpvDecorationIndex:
1222 case SpvDecorationBinding:
1223 case SpvDecorationDescriptorSet:
1224 case SpvDecorationLinkageAttributes:
1225 case SpvDecorationNoContraction:
1226 case SpvDecorationInputAttachmentIndex:
1227 vtn_warn("Decoration not allowed on types: %s",
1228 spirv_decoration_to_string(dec->decoration));
1229 break;
1230
1231 case SpvDecorationCPacked:
1232 if (b->shader->info.stage != MESA_SHADER_KERNEL)
1233 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1234 spirv_decoration_to_string(dec->decoration));
1235 else
1236 type->packed = true;
1237 break;
1238
1239 case SpvDecorationSaturatedConversion:
1240 case SpvDecorationFuncParamAttr:
1241 case SpvDecorationFPRoundingMode:
1242 case SpvDecorationFPFastMathMode:
1243 case SpvDecorationAlignment:
1244 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1245 spirv_decoration_to_string(dec->decoration));
1246 break;
1247
1248 case SpvDecorationUserTypeGOOGLE:
1249 /* User semantic decorations can safely be ignored by the driver. */
1250 break;
1251
1252 default:
1253 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1254 }
1255 }
1256
1257 static unsigned
1258 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
1259 {
1260 switch (format) {
1261 case SpvImageFormatUnknown: return PIPE_FORMAT_NONE;
1262 case SpvImageFormatRgba32f: return PIPE_FORMAT_R32G32B32A32_FLOAT;
1263 case SpvImageFormatRgba16f: return PIPE_FORMAT_R16G16B16A16_FLOAT;
1264 case SpvImageFormatR32f: return PIPE_FORMAT_R32_FLOAT;
1265 case SpvImageFormatRgba8: return PIPE_FORMAT_R8G8B8A8_UNORM;
1266 case SpvImageFormatRgba8Snorm: return PIPE_FORMAT_R8G8B8A8_SNORM;
1267 case SpvImageFormatRg32f: return PIPE_FORMAT_R32G32_FLOAT;
1268 case SpvImageFormatRg16f: return PIPE_FORMAT_R16G16_FLOAT;
1269 case SpvImageFormatR11fG11fB10f: return PIPE_FORMAT_R11G11B10_FLOAT;
1270 case SpvImageFormatR16f: return PIPE_FORMAT_R16_FLOAT;
1271 case SpvImageFormatRgba16: return PIPE_FORMAT_R16G16B16A16_UNORM;
1272 case SpvImageFormatRgb10A2: return PIPE_FORMAT_R10G10B10A2_UNORM;
1273 case SpvImageFormatRg16: return PIPE_FORMAT_R16G16_UNORM;
1274 case SpvImageFormatRg8: return PIPE_FORMAT_R8G8_UNORM;
1275 case SpvImageFormatR16: return PIPE_FORMAT_R16_UNORM;
1276 case SpvImageFormatR8: return PIPE_FORMAT_R8_UNORM;
1277 case SpvImageFormatRgba16Snorm: return PIPE_FORMAT_R16G16B16A16_SNORM;
1278 case SpvImageFormatRg16Snorm: return PIPE_FORMAT_R16G16_SNORM;
1279 case SpvImageFormatRg8Snorm: return PIPE_FORMAT_R8G8_SNORM;
1280 case SpvImageFormatR16Snorm: return PIPE_FORMAT_R16_SNORM;
1281 case SpvImageFormatR8Snorm: return PIPE_FORMAT_R8_SNORM;
1282 case SpvImageFormatRgba32i: return PIPE_FORMAT_R32G32B32A32_SINT;
1283 case SpvImageFormatRgba16i: return PIPE_FORMAT_R16G16B16A16_SINT;
1284 case SpvImageFormatRgba8i: return PIPE_FORMAT_R8G8B8A8_SINT;
1285 case SpvImageFormatR32i: return PIPE_FORMAT_R32_SINT;
1286 case SpvImageFormatRg32i: return PIPE_FORMAT_R32G32_SINT;
1287 case SpvImageFormatRg16i: return PIPE_FORMAT_R16G16_SINT;
1288 case SpvImageFormatRg8i: return PIPE_FORMAT_R8G8_SINT;
1289 case SpvImageFormatR16i: return PIPE_FORMAT_R16_SINT;
1290 case SpvImageFormatR8i: return PIPE_FORMAT_R8_SINT;
1291 case SpvImageFormatRgba32ui: return PIPE_FORMAT_R32G32B32A32_UINT;
1292 case SpvImageFormatRgba16ui: return PIPE_FORMAT_R16G16B16A16_UINT;
1293 case SpvImageFormatRgba8ui: return PIPE_FORMAT_R8G8B8A8_UINT;
1294 case SpvImageFormatR32ui: return PIPE_FORMAT_R32_UINT;
1295 case SpvImageFormatRgb10a2ui: return PIPE_FORMAT_R10G10B10A2_UINT;
1296 case SpvImageFormatRg32ui: return PIPE_FORMAT_R32G32_UINT;
1297 case SpvImageFormatRg16ui: return PIPE_FORMAT_R16G16_UINT;
1298 case SpvImageFormatRg8ui: return PIPE_FORMAT_R8G8_UINT;
1299 case SpvImageFormatR16ui: return PIPE_FORMAT_R16_UINT;
1300 case SpvImageFormatR8ui: return PIPE_FORMAT_R8_UINT;
1301 default:
1302 vtn_fail("Invalid image format: %s (%u)",
1303 spirv_imageformat_to_string(format), format);
1304 }
1305 }
1306
1307 static void
1308 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
1309 const uint32_t *w, unsigned count)
1310 {
1311 struct vtn_value *val = NULL;
1312
1313 /* In order to properly handle forward declarations, we have to defer
1314 * allocation for pointer types.
1315 */
1316 if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) {
1317 val = vtn_push_value(b, w[1], vtn_value_type_type);
1318 vtn_fail_if(val->type != NULL,
1319 "Only pointers can have forward declarations");
1320 val->type = rzalloc(b, struct vtn_type);
1321 val->type->id = w[1];
1322 }
1323
1324 switch (opcode) {
1325 case SpvOpTypeVoid:
1326 val->type->base_type = vtn_base_type_void;
1327 val->type->type = glsl_void_type();
1328 break;
1329 case SpvOpTypeBool:
1330 val->type->base_type = vtn_base_type_scalar;
1331 val->type->type = glsl_bool_type();
1332 val->type->length = 1;
1333 break;
1334 case SpvOpTypeInt: {
1335 int bit_size = w[2];
1336 const bool signedness = w[3];
1337 val->type->base_type = vtn_base_type_scalar;
1338 switch (bit_size) {
1339 case 64:
1340 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
1341 break;
1342 case 32:
1343 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
1344 break;
1345 case 16:
1346 val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type());
1347 break;
1348 case 8:
1349 val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
1350 break;
1351 default:
1352 vtn_fail("Invalid int bit size: %u", bit_size);
1353 }
1354 val->type->length = 1;
1355 break;
1356 }
1357
1358 case SpvOpTypeFloat: {
1359 int bit_size = w[2];
1360 val->type->base_type = vtn_base_type_scalar;
1361 switch (bit_size) {
1362 case 16:
1363 val->type->type = glsl_float16_t_type();
1364 break;
1365 case 32:
1366 val->type->type = glsl_float_type();
1367 break;
1368 case 64:
1369 val->type->type = glsl_double_type();
1370 break;
1371 default:
1372 vtn_fail("Invalid float bit size: %u", bit_size);
1373 }
1374 val->type->length = 1;
1375 break;
1376 }
1377
1378 case SpvOpTypeVector: {
1379 struct vtn_type *base = vtn_get_type(b, w[2]);
1380 unsigned elems = w[3];
1381
1382 vtn_fail_if(base->base_type != vtn_base_type_scalar,
1383 "Base type for OpTypeVector must be a scalar");
1384 vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16),
1385 "Invalid component count for OpTypeVector");
1386
1387 val->type->base_type = vtn_base_type_vector;
1388 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
1389 val->type->length = elems;
1390 val->type->stride = glsl_type_is_boolean(val->type->type)
1391 ? 4 : glsl_get_bit_size(base->type) / 8;
1392 val->type->array_element = base;
1393 break;
1394 }
1395
1396 case SpvOpTypeMatrix: {
1397 struct vtn_type *base = vtn_get_type(b, w[2]);
1398 unsigned columns = w[3];
1399
1400 vtn_fail_if(base->base_type != vtn_base_type_vector,
1401 "Base type for OpTypeMatrix must be a vector");
1402 vtn_fail_if(columns < 2 || columns > 4,
1403 "Invalid column count for OpTypeMatrix");
1404
1405 val->type->base_type = vtn_base_type_matrix;
1406 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
1407 glsl_get_vector_elements(base->type),
1408 columns);
1409 vtn_fail_if(glsl_type_is_error(val->type->type),
1410 "Unsupported base type for OpTypeMatrix");
1411 assert(!glsl_type_is_error(val->type->type));
1412 val->type->length = columns;
1413 val->type->array_element = base;
1414 val->type->row_major = false;
1415 val->type->stride = 0;
1416 break;
1417 }
1418
1419 case SpvOpTypeRuntimeArray:
1420 case SpvOpTypeArray: {
1421 struct vtn_type *array_element = vtn_get_type(b, w[2]);
1422
1423 if (opcode == SpvOpTypeRuntimeArray) {
1424 /* A length of 0 is used to denote unsized arrays */
1425 val->type->length = 0;
1426 } else {
1427 val->type->length = vtn_constant_uint(b, w[3]);
1428 }
1429
1430 val->type->base_type = vtn_base_type_array;
1431 val->type->array_element = array_element;
1432 if (b->shader->info.stage == MESA_SHADER_KERNEL)
1433 val->type->stride = glsl_get_cl_size(array_element->type);
1434
1435 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1436 val->type->type = glsl_array_type(array_element->type, val->type->length,
1437 val->type->stride);
1438 break;
1439 }
1440
1441 case SpvOpTypeStruct: {
1442 unsigned num_fields = count - 2;
1443 val->type->base_type = vtn_base_type_struct;
1444 val->type->length = num_fields;
1445 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
1446 val->type->offsets = ralloc_array(b, unsigned, num_fields);
1447 val->type->packed = false;
1448
1449 NIR_VLA(struct glsl_struct_field, fields, count);
1450 for (unsigned i = 0; i < num_fields; i++) {
1451 val->type->members[i] = vtn_get_type(b, w[i + 2]);
1452 fields[i] = (struct glsl_struct_field) {
1453 .type = val->type->members[i]->type,
1454 .name = ralloc_asprintf(b, "field%d", i),
1455 .location = -1,
1456 .offset = -1,
1457 };
1458 }
1459
1460 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1461 unsigned offset = 0;
1462 for (unsigned i = 0; i < num_fields; i++) {
1463 offset = align(offset, glsl_get_cl_alignment(fields[i].type));
1464 fields[i].offset = offset;
1465 offset += glsl_get_cl_size(fields[i].type);
1466 }
1467 }
1468
1469 struct member_decoration_ctx ctx = {
1470 .num_fields = num_fields,
1471 .fields = fields,
1472 .type = val->type
1473 };
1474
1475 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1476 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1477
1478 vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
1479
1480 const char *name = val->name;
1481
1482 if (val->type->block || val->type->buffer_block) {
1483 /* Packing will be ignored since types coming from SPIR-V are
1484 * explicitly laid out.
1485 */
1486 val->type->type = glsl_interface_type(fields, num_fields,
1487 /* packing */ 0, false,
1488 name ? name : "block");
1489 } else {
1490 val->type->type = glsl_struct_type(fields, num_fields,
1491 name ? name : "struct", false);
1492 }
1493 break;
1494 }
1495
1496 case SpvOpTypeFunction: {
1497 val->type->base_type = vtn_base_type_function;
1498 val->type->type = NULL;
1499
1500 val->type->return_type = vtn_get_type(b, w[2]);
1501
1502 const unsigned num_params = count - 3;
1503 val->type->length = num_params;
1504 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1505 for (unsigned i = 0; i < count - 3; i++) {
1506 val->type->params[i] = vtn_get_type(b, w[i + 3]);
1507 }
1508 break;
1509 }
1510
1511 case SpvOpTypePointer:
1512 case SpvOpTypeForwardPointer: {
1513 /* We can't blindly push the value because it might be a forward
1514 * declaration.
1515 */
1516 val = vtn_untyped_value(b, w[1]);
1517
1518 SpvStorageClass storage_class = w[2];
1519
1520 if (val->value_type == vtn_value_type_invalid) {
1521 val->value_type = vtn_value_type_type;
1522 val->type = rzalloc(b, struct vtn_type);
1523 val->type->id = w[1];
1524 val->type->base_type = vtn_base_type_pointer;
1525 val->type->storage_class = storage_class;
1526
1527 /* These can actually be stored to nir_variables and used as SSA
1528 * values so they need a real glsl_type.
1529 */
1530 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1531 b, storage_class, NULL, NULL);
1532 val->type->type = nir_address_format_to_glsl_type(
1533 vtn_mode_to_address_format(b, mode));
1534 } else {
1535 vtn_fail_if(val->type->storage_class != storage_class,
1536 "The storage classes of an OpTypePointer and any "
1537 "OpTypeForwardPointers that provide forward "
1538 "declarations of it must match.");
1539 }
1540
1541 if (opcode == SpvOpTypePointer) {
1542 vtn_fail_if(val->type->deref != NULL,
1543 "While OpTypeForwardPointer can be used to provide a "
1544 "forward declaration of a pointer, OpTypePointer can "
1545 "only be used once for a given id.");
1546
1547 val->type->deref = vtn_get_type(b, w[3]);
1548
1549 /* Only certain storage classes use ArrayStride. The others (in
1550 * particular Workgroup) are expected to be laid out by the driver.
1551 */
1552 switch (storage_class) {
1553 case SpvStorageClassUniform:
1554 case SpvStorageClassPushConstant:
1555 case SpvStorageClassStorageBuffer:
1556 case SpvStorageClassPhysicalStorageBuffer:
1557 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1558 break;
1559 default:
1560 /* Nothing to do. */
1561 break;
1562 }
1563
1564 if (b->physical_ptrs) {
1565 switch (storage_class) {
1566 case SpvStorageClassFunction:
1567 case SpvStorageClassWorkgroup:
1568 case SpvStorageClassCrossWorkgroup:
1569 case SpvStorageClassUniformConstant:
1570 val->type->stride = align(glsl_get_cl_size(val->type->deref->type),
1571 glsl_get_cl_alignment(val->type->deref->type));
1572 break;
1573 default:
1574 break;
1575 }
1576 }
1577 }
1578 break;
1579 }
1580
1581 case SpvOpTypeImage: {
1582 val->type->base_type = vtn_base_type_image;
1583
1584 /* Images are represented in NIR as a scalar SSA value that is the
1585 * result of a deref instruction. An OpLoad on an OpTypeImage pointer
1586 * from UniformConstant memory just takes the NIR deref from the pointer
1587 * and turns it into an SSA value.
1588 */
1589 val->type->type = nir_address_format_to_glsl_type(
1590 vtn_mode_to_address_format(b, vtn_variable_mode_function));
1591
1592 const struct vtn_type *sampled_type = vtn_get_type(b, w[2]);
1593 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1594 vtn_fail_if(sampled_type->base_type != vtn_base_type_void,
1595 "Sampled type of OpTypeImage must be void for kernels");
1596 } else {
1597 vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
1598 glsl_get_bit_size(sampled_type->type) != 32,
1599 "Sampled type of OpTypeImage must be a 32-bit scalar");
1600 }
1601
1602 enum glsl_sampler_dim dim;
1603 switch ((SpvDim)w[3]) {
1604 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
1605 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
1606 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
1607 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
1608 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
1609 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
1610 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1611 default:
1612 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1613 spirv_dim_to_string((SpvDim)w[3]), w[3]);
1614 }
1615
1616 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1617 * The “Depth” operand of OpTypeImage is ignored.
1618 */
1619 bool is_array = w[5];
1620 bool multisampled = w[6];
1621 unsigned sampled = w[7];
1622 SpvImageFormat format = w[8];
1623
1624 if (count > 9)
1625 val->type->access_qualifier = w[9];
1626 else if (b->shader->info.stage == MESA_SHADER_KERNEL)
1627 /* Per the CL C spec: If no qualifier is provided, read_only is assumed. */
1628 val->type->access_qualifier = SpvAccessQualifierReadOnly;
1629 else
1630 val->type->access_qualifier = SpvAccessQualifierReadWrite;
1631
1632 if (multisampled) {
1633 if (dim == GLSL_SAMPLER_DIM_2D)
1634 dim = GLSL_SAMPLER_DIM_MS;
1635 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1636 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1637 else
1638 vtn_fail("Unsupported multisampled image type");
1639 }
1640
1641 val->type->image_format = translate_image_format(b, format);
1642
1643 enum glsl_base_type sampled_base_type =
1644 glsl_get_base_type(sampled_type->type);
1645 if (sampled == 1) {
1646 val->type->glsl_image = glsl_sampler_type(dim, false, is_array,
1647 sampled_base_type);
1648 } else if (sampled == 2) {
1649 val->type->glsl_image = glsl_image_type(dim, is_array,
1650 sampled_base_type);
1651 } else if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1652 val->type->glsl_image = glsl_image_type(dim, is_array,
1653 GLSL_TYPE_VOID);
1654 } else {
1655 vtn_fail("We need to know if the image will be sampled");
1656 }
1657 break;
1658 }
1659
1660 case SpvOpTypeSampledImage: {
1661 val->type->base_type = vtn_base_type_sampled_image;
1662 val->type->image = vtn_get_type(b, w[2]);
1663
1664 /* Sampled images are represented NIR as a vec2 SSA value where each
1665 * component is the result of a deref instruction. The first component
1666 * is the image and the second is the sampler. An OpLoad on an
1667 * OpTypeSampledImage pointer from UniformConstant memory just takes
1668 * the NIR deref from the pointer and duplicates it to both vector
1669 * components.
1670 */
1671 nir_address_format addr_format =
1672 vtn_mode_to_address_format(b, vtn_variable_mode_function);
1673 assert(nir_address_format_num_components(addr_format) == 1);
1674 unsigned bit_size = nir_address_format_bit_size(addr_format);
1675 assert(bit_size == 32 || bit_size == 64);
1676
1677 enum glsl_base_type base_type =
1678 bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64;
1679 val->type->type = glsl_vector_type(base_type, 2);
1680 break;
1681 }
1682
1683 case SpvOpTypeSampler:
1684 val->type->base_type = vtn_base_type_sampler;
1685
1686 /* Samplers are represented in NIR as a scalar SSA value that is the
1687 * result of a deref instruction. An OpLoad on an OpTypeSampler pointer
1688 * from UniformConstant memory just takes the NIR deref from the pointer
1689 * and turns it into an SSA value.
1690 */
1691 val->type->type = nir_address_format_to_glsl_type(
1692 vtn_mode_to_address_format(b, vtn_variable_mode_function));
1693 break;
1694
1695 case SpvOpTypeOpaque:
1696 case SpvOpTypeEvent:
1697 case SpvOpTypeDeviceEvent:
1698 case SpvOpTypeReserveId:
1699 case SpvOpTypeQueue:
1700 case SpvOpTypePipe:
1701 default:
1702 vtn_fail_with_opcode("Unhandled opcode", opcode);
1703 }
1704
1705 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1706
1707 if (val->type->base_type == vtn_base_type_struct &&
1708 (val->type->block || val->type->buffer_block)) {
1709 for (unsigned i = 0; i < val->type->length; i++) {
1710 vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]),
1711 "Block and BufferBlock decorations cannot decorate a "
1712 "structure type that is nested at any level inside "
1713 "another structure type decorated with Block or "
1714 "BufferBlock.");
1715 }
1716 }
1717 }
1718
1719 static nir_constant *
1720 vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
1721 {
1722 nir_constant *c = rzalloc(b, nir_constant);
1723
1724 switch (type->base_type) {
1725 case vtn_base_type_scalar:
1726 case vtn_base_type_vector:
1727 /* Nothing to do here. It's already initialized to zero */
1728 break;
1729
1730 case vtn_base_type_pointer: {
1731 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1732 b, type->storage_class, type->deref, NULL);
1733 nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
1734
1735 const nir_const_value *null_value = nir_address_format_null_value(addr_format);
1736 memcpy(c->values, null_value,
1737 sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
1738 break;
1739 }
1740
1741 case vtn_base_type_void:
1742 case vtn_base_type_image:
1743 case vtn_base_type_sampler:
1744 case vtn_base_type_sampled_image:
1745 case vtn_base_type_function:
1746 /* For those we have to return something but it doesn't matter what. */
1747 break;
1748
1749 case vtn_base_type_matrix:
1750 case vtn_base_type_array:
1751 vtn_assert(type->length > 0);
1752 c->num_elements = type->length;
1753 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1754
1755 c->elements[0] = vtn_null_constant(b, type->array_element);
1756 for (unsigned i = 1; i < c->num_elements; i++)
1757 c->elements[i] = c->elements[0];
1758 break;
1759
1760 case vtn_base_type_struct:
1761 c->num_elements = type->length;
1762 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1763 for (unsigned i = 0; i < c->num_elements; i++)
1764 c->elements[i] = vtn_null_constant(b, type->members[i]);
1765 break;
1766
1767 default:
1768 vtn_fail("Invalid type for null constant");
1769 }
1770
1771 return c;
1772 }
1773
1774 static void
1775 spec_constant_decoration_cb(struct vtn_builder *b, UNUSED struct vtn_value *val,
1776 ASSERTED int member,
1777 const struct vtn_decoration *dec, void *data)
1778 {
1779 vtn_assert(member == -1);
1780 if (dec->decoration != SpvDecorationSpecId)
1781 return;
1782
1783 nir_const_value *value = data;
1784 for (unsigned i = 0; i < b->num_specializations; i++) {
1785 if (b->specializations[i].id == dec->operands[0]) {
1786 *value = b->specializations[i].value;
1787 return;
1788 }
1789 }
1790 }
1791
1792 static void
1793 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1794 struct vtn_value *val,
1795 ASSERTED int member,
1796 const struct vtn_decoration *dec,
1797 UNUSED void *data)
1798 {
1799 vtn_assert(member == -1);
1800 if (dec->decoration != SpvDecorationBuiltIn ||
1801 dec->operands[0] != SpvBuiltInWorkgroupSize)
1802 return;
1803
1804 vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1805 b->workgroup_size_builtin = val;
1806 }
1807
1808 static void
1809 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1810 const uint32_t *w, unsigned count)
1811 {
1812 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1813 val->constant = rzalloc(b, nir_constant);
1814 switch (opcode) {
1815 case SpvOpConstantTrue:
1816 case SpvOpConstantFalse:
1817 case SpvOpSpecConstantTrue:
1818 case SpvOpSpecConstantFalse: {
1819 vtn_fail_if(val->type->type != glsl_bool_type(),
1820 "Result type of %s must be OpTypeBool",
1821 spirv_op_to_string(opcode));
1822
1823 bool bval = (opcode == SpvOpConstantTrue ||
1824 opcode == SpvOpSpecConstantTrue);
1825
1826 nir_const_value u32val = nir_const_value_for_uint(bval, 32);
1827
1828 if (opcode == SpvOpSpecConstantTrue ||
1829 opcode == SpvOpSpecConstantFalse)
1830 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32val);
1831
1832 val->constant->values[0].b = u32val.u32 != 0;
1833 break;
1834 }
1835
1836 case SpvOpConstant:
1837 case SpvOpSpecConstant: {
1838 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1839 "Result type of %s must be a scalar",
1840 spirv_op_to_string(opcode));
1841 int bit_size = glsl_get_bit_size(val->type->type);
1842 switch (bit_size) {
1843 case 64:
1844 val->constant->values[0].u64 = vtn_u64_literal(&w[3]);
1845 break;
1846 case 32:
1847 val->constant->values[0].u32 = w[3];
1848 break;
1849 case 16:
1850 val->constant->values[0].u16 = w[3];
1851 break;
1852 case 8:
1853 val->constant->values[0].u8 = w[3];
1854 break;
1855 default:
1856 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
1857 }
1858
1859 if (opcode == SpvOpSpecConstant)
1860 vtn_foreach_decoration(b, val, spec_constant_decoration_cb,
1861 &val->constant->values[0]);
1862 break;
1863 }
1864
1865 case SpvOpSpecConstantComposite:
1866 case SpvOpConstantComposite: {
1867 unsigned elem_count = count - 3;
1868 vtn_fail_if(elem_count != val->type->length,
1869 "%s has %u constituents, expected %u",
1870 spirv_op_to_string(opcode), elem_count, val->type->length);
1871
1872 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1873 for (unsigned i = 0; i < elem_count; i++) {
1874 struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
1875
1876 if (val->value_type == vtn_value_type_constant) {
1877 elems[i] = val->constant;
1878 } else {
1879 vtn_fail_if(val->value_type != vtn_value_type_undef,
1880 "only constants or undefs allowed for "
1881 "SpvOpConstantComposite");
1882 /* to make it easier, just insert a NULL constant for now */
1883 elems[i] = vtn_null_constant(b, val->type);
1884 }
1885 }
1886
1887 switch (val->type->base_type) {
1888 case vtn_base_type_vector: {
1889 assert(glsl_type_is_vector(val->type->type));
1890 for (unsigned i = 0; i < elem_count; i++)
1891 val->constant->values[i] = elems[i]->values[0];
1892 break;
1893 }
1894
1895 case vtn_base_type_matrix:
1896 case vtn_base_type_struct:
1897 case vtn_base_type_array:
1898 ralloc_steal(val->constant, elems);
1899 val->constant->num_elements = elem_count;
1900 val->constant->elements = elems;
1901 break;
1902
1903 default:
1904 vtn_fail("Result type of %s must be a composite type",
1905 spirv_op_to_string(opcode));
1906 }
1907 break;
1908 }
1909
1910 case SpvOpSpecConstantOp: {
1911 nir_const_value u32op = nir_const_value_for_uint(w[3], 32);
1912 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32op);
1913 SpvOp opcode = u32op.u32;
1914 switch (opcode) {
1915 case SpvOpVectorShuffle: {
1916 struct vtn_value *v0 = &b->values[w[4]];
1917 struct vtn_value *v1 = &b->values[w[5]];
1918
1919 vtn_assert(v0->value_type == vtn_value_type_constant ||
1920 v0->value_type == vtn_value_type_undef);
1921 vtn_assert(v1->value_type == vtn_value_type_constant ||
1922 v1->value_type == vtn_value_type_undef);
1923
1924 unsigned len0 = glsl_get_vector_elements(v0->type->type);
1925 unsigned len1 = glsl_get_vector_elements(v1->type->type);
1926
1927 vtn_assert(len0 + len1 < 16);
1928
1929 unsigned bit_size = glsl_get_bit_size(val->type->type);
1930 unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
1931 unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
1932
1933 vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1934 (void)bit_size0; (void)bit_size1;
1935
1936 nir_const_value undef = { .u64 = 0xdeadbeefdeadbeef };
1937 nir_const_value combined[NIR_MAX_VEC_COMPONENTS * 2];
1938
1939 if (v0->value_type == vtn_value_type_constant) {
1940 for (unsigned i = 0; i < len0; i++)
1941 combined[i] = v0->constant->values[i];
1942 }
1943 if (v1->value_type == vtn_value_type_constant) {
1944 for (unsigned i = 0; i < len1; i++)
1945 combined[len0 + i] = v1->constant->values[i];
1946 }
1947
1948 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1949 uint32_t comp = w[i + 6];
1950 if (comp == (uint32_t)-1) {
1951 /* If component is not used, set the value to a known constant
1952 * to detect if it is wrongly used.
1953 */
1954 val->constant->values[j] = undef;
1955 } else {
1956 vtn_fail_if(comp >= len0 + len1,
1957 "All Component literals must either be FFFFFFFF "
1958 "or in [0, N - 1] (inclusive).");
1959 val->constant->values[j] = combined[comp];
1960 }
1961 }
1962 break;
1963 }
1964
1965 case SpvOpCompositeExtract:
1966 case SpvOpCompositeInsert: {
1967 struct vtn_value *comp;
1968 unsigned deref_start;
1969 struct nir_constant **c;
1970 if (opcode == SpvOpCompositeExtract) {
1971 comp = vtn_value(b, w[4], vtn_value_type_constant);
1972 deref_start = 5;
1973 c = &comp->constant;
1974 } else {
1975 comp = vtn_value(b, w[5], vtn_value_type_constant);
1976 deref_start = 6;
1977 val->constant = nir_constant_clone(comp->constant,
1978 (nir_variable *)b);
1979 c = &val->constant;
1980 }
1981
1982 int elem = -1;
1983 const struct vtn_type *type = comp->type;
1984 for (unsigned i = deref_start; i < count; i++) {
1985 vtn_fail_if(w[i] > type->length,
1986 "%uth index of %s is %u but the type has only "
1987 "%u elements", i - deref_start,
1988 spirv_op_to_string(opcode), w[i], type->length);
1989
1990 switch (type->base_type) {
1991 case vtn_base_type_vector:
1992 elem = w[i];
1993 type = type->array_element;
1994 break;
1995
1996 case vtn_base_type_matrix:
1997 case vtn_base_type_array:
1998 c = &(*c)->elements[w[i]];
1999 type = type->array_element;
2000 break;
2001
2002 case vtn_base_type_struct:
2003 c = &(*c)->elements[w[i]];
2004 type = type->members[w[i]];
2005 break;
2006
2007 default:
2008 vtn_fail("%s must only index into composite types",
2009 spirv_op_to_string(opcode));
2010 }
2011 }
2012
2013 if (opcode == SpvOpCompositeExtract) {
2014 if (elem == -1) {
2015 val->constant = *c;
2016 } else {
2017 unsigned num_components = type->length;
2018 for (unsigned i = 0; i < num_components; i++)
2019 val->constant->values[i] = (*c)->values[elem + i];
2020 }
2021 } else {
2022 struct vtn_value *insert =
2023 vtn_value(b, w[4], vtn_value_type_constant);
2024 vtn_assert(insert->type == type);
2025 if (elem == -1) {
2026 *c = insert->constant;
2027 } else {
2028 unsigned num_components = type->length;
2029 for (unsigned i = 0; i < num_components; i++)
2030 (*c)->values[elem + i] = insert->constant->values[i];
2031 }
2032 }
2033 break;
2034 }
2035
2036 default: {
2037 bool swap;
2038 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
2039 nir_alu_type src_alu_type = dst_alu_type;
2040 unsigned num_components = glsl_get_vector_elements(val->type->type);
2041 unsigned bit_size;
2042
2043 vtn_assert(count <= 7);
2044
2045 switch (opcode) {
2046 case SpvOpSConvert:
2047 case SpvOpFConvert:
2048 case SpvOpUConvert:
2049 /* We have a source in a conversion */
2050 src_alu_type =
2051 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b, w[4])->type);
2052 /* We use the bitsize of the conversion source to evaluate the opcode later */
2053 bit_size = glsl_get_bit_size(vtn_get_value_type(b, w[4])->type);
2054 break;
2055 default:
2056 bit_size = glsl_get_bit_size(val->type->type);
2057 };
2058
2059 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
2060 nir_alu_type_get_type_size(src_alu_type),
2061 nir_alu_type_get_type_size(dst_alu_type));
2062 nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
2063
2064 for (unsigned i = 0; i < count - 4; i++) {
2065 struct vtn_value *src_val =
2066 vtn_value(b, w[4 + i], vtn_value_type_constant);
2067
2068 /* If this is an unsized source, pull the bit size from the
2069 * source; otherwise, we'll use the bit size from the destination.
2070 */
2071 if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
2072 bit_size = glsl_get_bit_size(src_val->type->type);
2073
2074 unsigned src_comps = nir_op_infos[op].input_sizes[i] ?
2075 nir_op_infos[op].input_sizes[i] :
2076 num_components;
2077
2078 unsigned j = swap ? 1 - i : i;
2079 for (unsigned c = 0; c < src_comps; c++)
2080 src[j][c] = src_val->constant->values[c];
2081 }
2082
2083 /* fix up fixed size sources */
2084 switch (op) {
2085 case nir_op_ishl:
2086 case nir_op_ishr:
2087 case nir_op_ushr: {
2088 if (bit_size == 32)
2089 break;
2090 for (unsigned i = 0; i < num_components; ++i) {
2091 switch (bit_size) {
2092 case 64: src[1][i].u32 = src[1][i].u64; break;
2093 case 16: src[1][i].u32 = src[1][i].u16; break;
2094 case 8: src[1][i].u32 = src[1][i].u8; break;
2095 }
2096 }
2097 break;
2098 }
2099 default:
2100 break;
2101 }
2102
2103 nir_const_value *srcs[3] = {
2104 src[0], src[1], src[2],
2105 };
2106 nir_eval_const_opcode(op, val->constant->values,
2107 num_components, bit_size, srcs,
2108 b->shader->info.float_controls_execution_mode);
2109 break;
2110 } /* default */
2111 }
2112 break;
2113 }
2114
2115 case SpvOpConstantNull:
2116 val->constant = vtn_null_constant(b, val->type);
2117 break;
2118
2119 default:
2120 vtn_fail_with_opcode("Unhandled opcode", opcode);
2121 }
2122
2123 /* Now that we have the value, update the workgroup size if needed */
2124 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
2125 }
2126
2127 static void
2128 vtn_split_barrier_semantics(struct vtn_builder *b,
2129 SpvMemorySemanticsMask semantics,
2130 SpvMemorySemanticsMask *before,
2131 SpvMemorySemanticsMask *after)
2132 {
2133 /* For memory semantics embedded in operations, we split them into up to
2134 * two barriers, to be added before and after the operation. This is less
2135 * strict than if we propagated until the final backend stage, but still
2136 * result in correct execution.
2137 *
2138 * A further improvement could be pipe this information (and use!) into the
2139 * next compiler layers, at the expense of making the handling of barriers
2140 * more complicated.
2141 */
2142
2143 *before = SpvMemorySemanticsMaskNone;
2144 *after = SpvMemorySemanticsMaskNone;
2145
2146 SpvMemorySemanticsMask order_semantics =
2147 semantics & (SpvMemorySemanticsAcquireMask |
2148 SpvMemorySemanticsReleaseMask |
2149 SpvMemorySemanticsAcquireReleaseMask |
2150 SpvMemorySemanticsSequentiallyConsistentMask);
2151
2152 if (util_bitcount(order_semantics) > 1) {
2153 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2154 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2155 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2156 */
2157 vtn_warn("Multiple memory ordering semantics specified, "
2158 "assuming AcquireRelease.");
2159 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2160 }
2161
2162 const SpvMemorySemanticsMask av_vis_semantics =
2163 semantics & (SpvMemorySemanticsMakeAvailableMask |
2164 SpvMemorySemanticsMakeVisibleMask);
2165
2166 const SpvMemorySemanticsMask storage_semantics =
2167 semantics & (SpvMemorySemanticsUniformMemoryMask |
2168 SpvMemorySemanticsSubgroupMemoryMask |
2169 SpvMemorySemanticsWorkgroupMemoryMask |
2170 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2171 SpvMemorySemanticsAtomicCounterMemoryMask |
2172 SpvMemorySemanticsImageMemoryMask |
2173 SpvMemorySemanticsOutputMemoryMask);
2174
2175 const SpvMemorySemanticsMask other_semantics =
2176 semantics & ~(order_semantics | av_vis_semantics | storage_semantics |
2177 SpvMemorySemanticsVolatileMask);
2178
2179 if (other_semantics)
2180 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics);
2181
2182 /* SequentiallyConsistent is treated as AcquireRelease. */
2183
2184 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2185 * associated with a Store. All the write operations with a matching
2186 * semantics will not be reordered after the Store.
2187 */
2188 if (order_semantics & (SpvMemorySemanticsReleaseMask |
2189 SpvMemorySemanticsAcquireReleaseMask |
2190 SpvMemorySemanticsSequentiallyConsistentMask)) {
2191 *before |= SpvMemorySemanticsReleaseMask | storage_semantics;
2192 }
2193
2194 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2195 * associated with a Load. All the operations with a matching semantics
2196 * will not be reordered before the Load.
2197 */
2198 if (order_semantics & (SpvMemorySemanticsAcquireMask |
2199 SpvMemorySemanticsAcquireReleaseMask |
2200 SpvMemorySemanticsSequentiallyConsistentMask)) {
2201 *after |= SpvMemorySemanticsAcquireMask | storage_semantics;
2202 }
2203
2204 if (av_vis_semantics & SpvMemorySemanticsMakeVisibleMask)
2205 *before |= SpvMemorySemanticsMakeVisibleMask | storage_semantics;
2206
2207 if (av_vis_semantics & SpvMemorySemanticsMakeAvailableMask)
2208 *after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
2209 }
2210
2211 static nir_memory_semantics
2212 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder *b,
2213 SpvMemorySemanticsMask semantics)
2214 {
2215 nir_memory_semantics nir_semantics = 0;
2216
2217 SpvMemorySemanticsMask order_semantics =
2218 semantics & (SpvMemorySemanticsAcquireMask |
2219 SpvMemorySemanticsReleaseMask |
2220 SpvMemorySemanticsAcquireReleaseMask |
2221 SpvMemorySemanticsSequentiallyConsistentMask);
2222
2223 if (util_bitcount(order_semantics) > 1) {
2224 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2225 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2226 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2227 */
2228 vtn_warn("Multiple memory ordering semantics bits specified, "
2229 "assuming AcquireRelease.");
2230 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2231 }
2232
2233 switch (order_semantics) {
2234 case 0:
2235 /* Not an ordering barrier. */
2236 break;
2237
2238 case SpvMemorySemanticsAcquireMask:
2239 nir_semantics = NIR_MEMORY_ACQUIRE;
2240 break;
2241
2242 case SpvMemorySemanticsReleaseMask:
2243 nir_semantics = NIR_MEMORY_RELEASE;
2244 break;
2245
2246 case SpvMemorySemanticsSequentiallyConsistentMask:
2247 /* Fall through. Treated as AcquireRelease in Vulkan. */
2248 case SpvMemorySemanticsAcquireReleaseMask:
2249 nir_semantics = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE;
2250 break;
2251
2252 default:
2253 unreachable("Invalid memory order semantics");
2254 }
2255
2256 if (semantics & SpvMemorySemanticsMakeAvailableMask) {
2257 vtn_fail_if(!b->options->caps.vk_memory_model,
2258 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2259 "capability must be declared.");
2260 nir_semantics |= NIR_MEMORY_MAKE_AVAILABLE;
2261 }
2262
2263 if (semantics & SpvMemorySemanticsMakeVisibleMask) {
2264 vtn_fail_if(!b->options->caps.vk_memory_model,
2265 "To use MakeVisible memory semantics the VulkanMemoryModel "
2266 "capability must be declared.");
2267 nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
2268 }
2269
2270 return nir_semantics;
2271 }
2272
2273 static nir_variable_mode
2274 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder *b,
2275 SpvMemorySemanticsMask semantics)
2276 {
2277 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2278 * and AtomicCounterMemory are ignored".
2279 */
2280 semantics &= ~(SpvMemorySemanticsSubgroupMemoryMask |
2281 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2282 SpvMemorySemanticsAtomicCounterMemoryMask);
2283
2284 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2285 * for SpvMemorySemanticsImageMemoryMask.
2286 */
2287
2288 nir_variable_mode modes = 0;
2289 if (semantics & (SpvMemorySemanticsUniformMemoryMask |
2290 SpvMemorySemanticsImageMemoryMask)) {
2291 modes |= nir_var_uniform |
2292 nir_var_mem_ubo |
2293 nir_var_mem_ssbo |
2294 nir_var_mem_global;
2295 }
2296 if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
2297 modes |= nir_var_mem_shared;
2298 if (semantics & SpvMemorySemanticsOutputMemoryMask) {
2299 modes |= nir_var_shader_out;
2300 }
2301
2302 return modes;
2303 }
2304
2305 static nir_scope
2306 vtn_scope_to_nir_scope(struct vtn_builder *b, SpvScope scope)
2307 {
2308 nir_scope nir_scope;
2309 switch (scope) {
2310 case SpvScopeDevice:
2311 vtn_fail_if(b->options->caps.vk_memory_model &&
2312 !b->options->caps.vk_memory_model_device_scope,
2313 "If the Vulkan memory model is declared and any instruction "
2314 "uses Device scope, the VulkanMemoryModelDeviceScope "
2315 "capability must be declared.");
2316 nir_scope = NIR_SCOPE_DEVICE;
2317 break;
2318
2319 case SpvScopeQueueFamily:
2320 vtn_fail_if(!b->options->caps.vk_memory_model,
2321 "To use Queue Family scope, the VulkanMemoryModel capability "
2322 "must be declared.");
2323 nir_scope = NIR_SCOPE_QUEUE_FAMILY;
2324 break;
2325
2326 case SpvScopeWorkgroup:
2327 nir_scope = NIR_SCOPE_WORKGROUP;
2328 break;
2329
2330 case SpvScopeSubgroup:
2331 nir_scope = NIR_SCOPE_SUBGROUP;
2332 break;
2333
2334 case SpvScopeInvocation:
2335 nir_scope = NIR_SCOPE_INVOCATION;
2336 break;
2337
2338 default:
2339 vtn_fail("Invalid memory scope");
2340 }
2341
2342 return nir_scope;
2343 }
2344
2345 static void
2346 vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
2347 SpvScope mem_scope,
2348 SpvMemorySemanticsMask semantics)
2349 {
2350 nir_memory_semantics nir_semantics =
2351 vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2352 nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
2353 nir_scope nir_exec_scope = vtn_scope_to_nir_scope(b, exec_scope);
2354
2355 /* Memory semantics is optional for OpControlBarrier. */
2356 nir_scope nir_mem_scope;
2357 if (nir_semantics == 0 || modes == 0)
2358 nir_mem_scope = NIR_SCOPE_NONE;
2359 else
2360 nir_mem_scope = vtn_scope_to_nir_scope(b, mem_scope);
2361
2362 nir_scoped_barrier(&b->nb, nir_exec_scope, nir_mem_scope, nir_semantics, modes);
2363 }
2364
2365 static void
2366 vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
2367 SpvMemorySemanticsMask semantics)
2368 {
2369 nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
2370 nir_memory_semantics nir_semantics =
2371 vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2372
2373 /* No barrier to add. */
2374 if (nir_semantics == 0 || modes == 0)
2375 return;
2376
2377 nir_scope nir_mem_scope = vtn_scope_to_nir_scope(b, scope);
2378 nir_scoped_barrier(&b->nb, NIR_SCOPE_NONE, nir_mem_scope, nir_semantics, modes);
2379 }
2380
2381 struct vtn_ssa_value *
2382 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
2383 {
2384 /* Always use bare types for SSA values for a couple of reasons:
2385 *
2386 * 1. Code which emits deref chains should never listen to the explicit
2387 * layout information on the SSA value if any exists. If we've
2388 * accidentally been relying on this, we want to find those bugs.
2389 *
2390 * 2. We want to be able to quickly check that an SSA value being assigned
2391 * to a SPIR-V value has the right type. Using bare types everywhere
2392 * ensures that we can pointer-compare.
2393 */
2394 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
2395 val->type = glsl_get_bare_type(type);
2396
2397
2398 if (!glsl_type_is_vector_or_scalar(type)) {
2399 unsigned elems = glsl_get_length(val->type);
2400 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2401 if (glsl_type_is_array_or_matrix(type)) {
2402 const struct glsl_type *elem_type = glsl_get_array_element(type);
2403 for (unsigned i = 0; i < elems; i++)
2404 val->elems[i] = vtn_create_ssa_value(b, elem_type);
2405 } else {
2406 vtn_assert(glsl_type_is_struct_or_ifc(type));
2407 for (unsigned i = 0; i < elems; i++) {
2408 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
2409 val->elems[i] = vtn_create_ssa_value(b, elem_type);
2410 }
2411 }
2412 }
2413
2414 return val;
2415 }
2416
2417 static nir_tex_src
2418 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
2419 {
2420 nir_tex_src src;
2421 src.src = nir_src_for_ssa(vtn_get_nir_ssa(b, index));
2422 src.src_type = type;
2423 return src;
2424 }
2425
2426 static uint32_t
2427 image_operand_arg(struct vtn_builder *b, const uint32_t *w, uint32_t count,
2428 uint32_t mask_idx, SpvImageOperandsMask op)
2429 {
2430 static const SpvImageOperandsMask ops_with_arg =
2431 SpvImageOperandsBiasMask |
2432 SpvImageOperandsLodMask |
2433 SpvImageOperandsGradMask |
2434 SpvImageOperandsConstOffsetMask |
2435 SpvImageOperandsOffsetMask |
2436 SpvImageOperandsConstOffsetsMask |
2437 SpvImageOperandsSampleMask |
2438 SpvImageOperandsMinLodMask |
2439 SpvImageOperandsMakeTexelAvailableMask |
2440 SpvImageOperandsMakeTexelVisibleMask;
2441
2442 assert(util_bitcount(op) == 1);
2443 assert(w[mask_idx] & op);
2444 assert(op & ops_with_arg);
2445
2446 uint32_t idx = util_bitcount(w[mask_idx] & (op - 1) & ops_with_arg) + 1;
2447
2448 /* Adjust indices for operands with two arguments. */
2449 static const SpvImageOperandsMask ops_with_two_args =
2450 SpvImageOperandsGradMask;
2451 idx += util_bitcount(w[mask_idx] & (op - 1) & ops_with_two_args);
2452
2453 idx += mask_idx;
2454
2455 vtn_fail_if(idx + (op & ops_with_two_args ? 1 : 0) >= count,
2456 "Image op claims to have %s but does not enough "
2457 "following operands", spirv_imageoperands_to_string(op));
2458
2459 return idx;
2460 }
2461
2462 static void
2463 non_uniform_decoration_cb(struct vtn_builder *b,
2464 struct vtn_value *val, int member,
2465 const struct vtn_decoration *dec, void *void_ctx)
2466 {
2467 enum gl_access_qualifier *access = void_ctx;
2468 switch (dec->decoration) {
2469 case SpvDecorationNonUniformEXT:
2470 *access |= ACCESS_NON_UNIFORM;
2471 break;
2472
2473 default:
2474 break;
2475 }
2476 }
2477
2478 static void
2479 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
2480 const uint32_t *w, unsigned count)
2481 {
2482 struct vtn_type *ret_type = vtn_get_type(b, w[1]);
2483
2484 if (opcode == SpvOpSampledImage) {
2485 struct vtn_sampled_image si = {
2486 .image = vtn_get_image(b, w[3]),
2487 .sampler = vtn_get_sampler(b, w[4]),
2488 };
2489
2490 enum gl_access_qualifier access = 0;
2491 vtn_foreach_decoration(b, vtn_untyped_value(b, w[3]),
2492 non_uniform_decoration_cb, &access);
2493 vtn_foreach_decoration(b, vtn_untyped_value(b, w[4]),
2494 non_uniform_decoration_cb, &access);
2495
2496 vtn_push_sampled_image(b, w[2], si, access & ACCESS_NON_UNIFORM);
2497 return;
2498 } else if (opcode == SpvOpImage) {
2499 struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2500
2501 enum gl_access_qualifier access = 0;
2502 vtn_foreach_decoration(b, vtn_untyped_value(b, w[3]),
2503 non_uniform_decoration_cb, &access);
2504
2505 vtn_push_image(b, w[2], si.image, access & ACCESS_NON_UNIFORM);
2506 return;
2507 }
2508
2509 nir_deref_instr *image = NULL, *sampler = NULL;
2510 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
2511 if (sampled_val->type->base_type == vtn_base_type_sampled_image) {
2512 struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2513 image = si.image;
2514 sampler = si.sampler;
2515 } else {
2516 image = vtn_get_image(b, w[3]);
2517 }
2518
2519 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image->type);
2520 const bool is_array = glsl_sampler_type_is_array(image->type);
2521 nir_alu_type dest_type = nir_type_invalid;
2522
2523 /* Figure out the base texture operation */
2524 nir_texop texop;
2525 switch (opcode) {
2526 case SpvOpImageSampleImplicitLod:
2527 case SpvOpImageSampleDrefImplicitLod:
2528 case SpvOpImageSampleProjImplicitLod:
2529 case SpvOpImageSampleProjDrefImplicitLod:
2530 texop = nir_texop_tex;
2531 break;
2532
2533 case SpvOpImageSampleExplicitLod:
2534 case SpvOpImageSampleDrefExplicitLod:
2535 case SpvOpImageSampleProjExplicitLod:
2536 case SpvOpImageSampleProjDrefExplicitLod:
2537 texop = nir_texop_txl;
2538 break;
2539
2540 case SpvOpImageFetch:
2541 if (sampler_dim == GLSL_SAMPLER_DIM_MS) {
2542 texop = nir_texop_txf_ms;
2543 } else {
2544 texop = nir_texop_txf;
2545 }
2546 break;
2547
2548 case SpvOpImageGather:
2549 case SpvOpImageDrefGather:
2550 texop = nir_texop_tg4;
2551 break;
2552
2553 case SpvOpImageQuerySizeLod:
2554 case SpvOpImageQuerySize:
2555 texop = nir_texop_txs;
2556 dest_type = nir_type_int;
2557 break;
2558
2559 case SpvOpImageQueryLod:
2560 texop = nir_texop_lod;
2561 dest_type = nir_type_float;
2562 break;
2563
2564 case SpvOpImageQueryLevels:
2565 texop = nir_texop_query_levels;
2566 dest_type = nir_type_int;
2567 break;
2568
2569 case SpvOpImageQuerySamples:
2570 texop = nir_texop_texture_samples;
2571 dest_type = nir_type_int;
2572 break;
2573
2574 case SpvOpFragmentFetchAMD:
2575 texop = nir_texop_fragment_fetch;
2576 break;
2577
2578 case SpvOpFragmentMaskFetchAMD:
2579 texop = nir_texop_fragment_mask_fetch;
2580 dest_type = nir_type_uint;
2581 break;
2582
2583 default:
2584 vtn_fail_with_opcode("Unhandled opcode", opcode);
2585 }
2586
2587 nir_tex_src srcs[10]; /* 10 should be enough */
2588 nir_tex_src *p = srcs;
2589
2590 p->src = nir_src_for_ssa(&image->dest.ssa);
2591 p->src_type = nir_tex_src_texture_deref;
2592 p++;
2593
2594 switch (texop) {
2595 case nir_texop_tex:
2596 case nir_texop_txb:
2597 case nir_texop_txl:
2598 case nir_texop_txd:
2599 case nir_texop_tg4:
2600 case nir_texop_lod:
2601 vtn_fail_if(sampler == NULL,
2602 "%s requires an image of type OpTypeSampledImage",
2603 spirv_op_to_string(opcode));
2604 p->src = nir_src_for_ssa(&sampler->dest.ssa);
2605 p->src_type = nir_tex_src_sampler_deref;
2606 p++;
2607 break;
2608 case nir_texop_txf:
2609 case nir_texop_txf_ms:
2610 case nir_texop_txs:
2611 case nir_texop_query_levels:
2612 case nir_texop_texture_samples:
2613 case nir_texop_samples_identical:
2614 case nir_texop_fragment_fetch:
2615 case nir_texop_fragment_mask_fetch:
2616 /* These don't */
2617 break;
2618 case nir_texop_txf_ms_fb:
2619 vtn_fail("unexpected nir_texop_txf_ms_fb");
2620 break;
2621 case nir_texop_txf_ms_mcs:
2622 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2623 case nir_texop_tex_prefetch:
2624 vtn_fail("unexpected nir_texop_tex_prefetch");
2625 }
2626
2627 unsigned idx = 4;
2628
2629 struct nir_ssa_def *coord;
2630 unsigned coord_components;
2631 switch (opcode) {
2632 case SpvOpImageSampleImplicitLod:
2633 case SpvOpImageSampleExplicitLod:
2634 case SpvOpImageSampleDrefImplicitLod:
2635 case SpvOpImageSampleDrefExplicitLod:
2636 case SpvOpImageSampleProjImplicitLod:
2637 case SpvOpImageSampleProjExplicitLod:
2638 case SpvOpImageSampleProjDrefImplicitLod:
2639 case SpvOpImageSampleProjDrefExplicitLod:
2640 case SpvOpImageFetch:
2641 case SpvOpImageGather:
2642 case SpvOpImageDrefGather:
2643 case SpvOpImageQueryLod:
2644 case SpvOpFragmentFetchAMD:
2645 case SpvOpFragmentMaskFetchAMD: {
2646 /* All these types have the coordinate as their first real argument */
2647 coord_components = glsl_get_sampler_dim_coordinate_components(sampler_dim);
2648
2649 if (is_array && texop != nir_texop_lod)
2650 coord_components++;
2651
2652 struct vtn_ssa_value *coord_val = vtn_ssa_value(b, w[idx++]);
2653 coord = coord_val->def;
2654 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
2655 (1 << coord_components) - 1));
2656
2657 /* OpenCL allows integer sampling coordinates */
2658 if (glsl_type_is_integer(coord_val->type) &&
2659 opcode == SpvOpImageSampleExplicitLod) {
2660 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
2661 "Unless the Kernel capability is being used, the coordinate parameter "
2662 "OpImageSampleExplicitLod must be floating point.");
2663
2664 p->src = nir_src_for_ssa(nir_i2f32(&b->nb, p->src.ssa));
2665 }
2666
2667 p->src_type = nir_tex_src_coord;
2668 p++;
2669 break;
2670 }
2671
2672 default:
2673 coord = NULL;
2674 coord_components = 0;
2675 break;
2676 }
2677
2678 switch (opcode) {
2679 case SpvOpImageSampleProjImplicitLod:
2680 case SpvOpImageSampleProjExplicitLod:
2681 case SpvOpImageSampleProjDrefImplicitLod:
2682 case SpvOpImageSampleProjDrefExplicitLod:
2683 /* These have the projector as the last coordinate component */
2684 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
2685 p->src_type = nir_tex_src_projector;
2686 p++;
2687 break;
2688
2689 default:
2690 break;
2691 }
2692
2693 bool is_shadow = false;
2694 unsigned gather_component = 0;
2695 switch (opcode) {
2696 case SpvOpImageSampleDrefImplicitLod:
2697 case SpvOpImageSampleDrefExplicitLod:
2698 case SpvOpImageSampleProjDrefImplicitLod:
2699 case SpvOpImageSampleProjDrefExplicitLod:
2700 case SpvOpImageDrefGather:
2701 /* These all have an explicit depth value as their next source */
2702 is_shadow = true;
2703 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
2704 break;
2705
2706 case SpvOpImageGather:
2707 /* This has a component as its next source */
2708 gather_component = vtn_constant_uint(b, w[idx++]);
2709 break;
2710
2711 default:
2712 break;
2713 }
2714
2715 /* For OpImageQuerySizeLod, we always have an LOD */
2716 if (opcode == SpvOpImageQuerySizeLod)
2717 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2718
2719 /* For OpFragmentFetchAMD, we always have a multisample index */
2720 if (opcode == SpvOpFragmentFetchAMD)
2721 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
2722
2723 /* Now we need to handle some number of optional arguments */
2724 struct vtn_value *gather_offsets = NULL;
2725 if (idx < count) {
2726 uint32_t operands = w[idx];
2727
2728 if (operands & SpvImageOperandsBiasMask) {
2729 vtn_assert(texop == nir_texop_tex ||
2730 texop == nir_texop_tg4);
2731 if (texop == nir_texop_tex)
2732 texop = nir_texop_txb;
2733 uint32_t arg = image_operand_arg(b, w, count, idx,
2734 SpvImageOperandsBiasMask);
2735 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_bias);
2736 }
2737
2738 if (operands & SpvImageOperandsLodMask) {
2739 vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
2740 texop == nir_texop_txs || texop == nir_texop_tg4);
2741 uint32_t arg = image_operand_arg(b, w, count, idx,
2742 SpvImageOperandsLodMask);
2743 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_lod);
2744 }
2745
2746 if (operands & SpvImageOperandsGradMask) {
2747 vtn_assert(texop == nir_texop_txl);
2748 texop = nir_texop_txd;
2749 uint32_t arg = image_operand_arg(b, w, count, idx,
2750 SpvImageOperandsGradMask);
2751 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ddx);
2752 (*p++) = vtn_tex_src(b, w[arg + 1], nir_tex_src_ddy);
2753 }
2754
2755 vtn_fail_if(util_bitcount(operands & (SpvImageOperandsConstOffsetsMask |
2756 SpvImageOperandsOffsetMask |
2757 SpvImageOperandsConstOffsetMask)) > 1,
2758 "At most one of the ConstOffset, Offset, and ConstOffsets "
2759 "image operands can be used on a given instruction.");
2760
2761 if (operands & SpvImageOperandsOffsetMask) {
2762 uint32_t arg = image_operand_arg(b, w, count, idx,
2763 SpvImageOperandsOffsetMask);
2764 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2765 }
2766
2767 if (operands & SpvImageOperandsConstOffsetMask) {
2768 uint32_t arg = image_operand_arg(b, w, count, idx,
2769 SpvImageOperandsConstOffsetMask);
2770 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2771 }
2772
2773 if (operands & SpvImageOperandsConstOffsetsMask) {
2774 vtn_assert(texop == nir_texop_tg4);
2775 uint32_t arg = image_operand_arg(b, w, count, idx,
2776 SpvImageOperandsConstOffsetsMask);
2777 gather_offsets = vtn_value(b, w[arg], vtn_value_type_constant);
2778 }
2779
2780 if (operands & SpvImageOperandsSampleMask) {
2781 vtn_assert(texop == nir_texop_txf_ms);
2782 uint32_t arg = image_operand_arg(b, w, count, idx,
2783 SpvImageOperandsSampleMask);
2784 texop = nir_texop_txf_ms;
2785 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ms_index);
2786 }
2787
2788 if (operands & SpvImageOperandsMinLodMask) {
2789 vtn_assert(texop == nir_texop_tex ||
2790 texop == nir_texop_txb ||
2791 texop == nir_texop_txd);
2792 uint32_t arg = image_operand_arg(b, w, count, idx,
2793 SpvImageOperandsMinLodMask);
2794 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_min_lod);
2795 }
2796 }
2797
2798 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
2799 instr->op = texop;
2800
2801 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
2802
2803 instr->coord_components = coord_components;
2804 instr->sampler_dim = sampler_dim;
2805 instr->is_array = is_array;
2806 instr->is_shadow = is_shadow;
2807 instr->is_new_style_shadow =
2808 is_shadow && glsl_get_components(ret_type->type) == 1;
2809 instr->component = gather_component;
2810
2811 /* The Vulkan spec says:
2812 *
2813 * "If an instruction loads from or stores to a resource (including
2814 * atomics and image instructions) and the resource descriptor being
2815 * accessed is not dynamically uniform, then the operand corresponding
2816 * to that resource (e.g. the pointer or sampled image operand) must be
2817 * decorated with NonUniform."
2818 *
2819 * It's very careful to specify that the exact operand must be decorated
2820 * NonUniform. The SPIR-V parser is not expected to chase through long
2821 * chains to find the NonUniform decoration. It's either right there or we
2822 * can assume it doesn't exist.
2823 */
2824 enum gl_access_qualifier access = 0;
2825 vtn_foreach_decoration(b, sampled_val, non_uniform_decoration_cb, &access);
2826
2827 if (sampled_val->propagated_non_uniform)
2828 access |= ACCESS_NON_UNIFORM;
2829
2830 if (image && (access & ACCESS_NON_UNIFORM))
2831 instr->texture_non_uniform = true;
2832
2833 if (sampler && (access & ACCESS_NON_UNIFORM))
2834 instr->sampler_non_uniform = true;
2835
2836 /* for non-query ops, get dest_type from SPIR-V return type */
2837 if (dest_type == nir_type_invalid) {
2838 /* the return type should match the image type, unless the image type is
2839 * VOID (CL image), in which case the return type dictates the sampler
2840 */
2841 enum glsl_base_type sampler_base =
2842 glsl_get_sampler_result_type(image->type);
2843 enum glsl_base_type ret_base = glsl_get_base_type(ret_type->type);
2844 vtn_fail_if(sampler_base != ret_base && sampler_base != GLSL_TYPE_VOID,
2845 "SPIR-V return type mismatches image type. This is only valid "
2846 "for untyped images (OpenCL).");
2847 switch (ret_base) {
2848 case GLSL_TYPE_FLOAT: dest_type = nir_type_float; break;
2849 case GLSL_TYPE_INT: dest_type = nir_type_int; break;
2850 case GLSL_TYPE_UINT: dest_type = nir_type_uint; break;
2851 case GLSL_TYPE_BOOL: dest_type = nir_type_bool; break;
2852 default:
2853 vtn_fail("Invalid base type for sampler result");
2854 }
2855 }
2856
2857 instr->dest_type = dest_type;
2858
2859 nir_ssa_dest_init(&instr->instr, &instr->dest,
2860 nir_tex_instr_dest_size(instr), 32, NULL);
2861
2862 vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2863 nir_tex_instr_dest_size(instr));
2864
2865 if (gather_offsets) {
2866 vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
2867 gather_offsets->type->length != 4,
2868 "ConstOffsets must be an array of size four of vectors "
2869 "of two integer components");
2870
2871 struct vtn_type *vec_type = gather_offsets->type->array_element;
2872 vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
2873 vec_type->length != 2 ||
2874 !glsl_type_is_integer(vec_type->type),
2875 "ConstOffsets must be an array of size four of vectors "
2876 "of two integer components");
2877
2878 unsigned bit_size = glsl_get_bit_size(vec_type->type);
2879 for (uint32_t i = 0; i < 4; i++) {
2880 const nir_const_value *cvec =
2881 gather_offsets->constant->elements[i]->values;
2882 for (uint32_t j = 0; j < 2; j++) {
2883 switch (bit_size) {
2884 case 8: instr->tg4_offsets[i][j] = cvec[j].i8; break;
2885 case 16: instr->tg4_offsets[i][j] = cvec[j].i16; break;
2886 case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
2887 case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
2888 default:
2889 vtn_fail("Unsupported bit size: %u", bit_size);
2890 }
2891 }
2892 }
2893 }
2894
2895 nir_builder_instr_insert(&b->nb, &instr->instr);
2896
2897 vtn_push_nir_ssa(b, w[2], &instr->dest.ssa);
2898 }
2899
2900 static void
2901 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2902 const uint32_t *w, nir_src *src)
2903 {
2904 const struct glsl_type *type = vtn_get_type(b, w[1])->type;
2905 unsigned bit_size = glsl_get_bit_size(type);
2906
2907 switch (opcode) {
2908 case SpvOpAtomicIIncrement:
2909 src[0] = nir_src_for_ssa(nir_imm_intN_t(&b->nb, 1, bit_size));
2910 break;
2911
2912 case SpvOpAtomicIDecrement:
2913 src[0] = nir_src_for_ssa(nir_imm_intN_t(&b->nb, -1, bit_size));
2914 break;
2915
2916 case SpvOpAtomicISub:
2917 src[0] =
2918 nir_src_for_ssa(nir_ineg(&b->nb, vtn_get_nir_ssa(b, w[6])));
2919 break;
2920
2921 case SpvOpAtomicCompareExchange:
2922 case SpvOpAtomicCompareExchangeWeak:
2923 src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[8]));
2924 src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[7]));
2925 break;
2926
2927 case SpvOpAtomicExchange:
2928 case SpvOpAtomicIAdd:
2929 case SpvOpAtomicSMin:
2930 case SpvOpAtomicUMin:
2931 case SpvOpAtomicSMax:
2932 case SpvOpAtomicUMax:
2933 case SpvOpAtomicAnd:
2934 case SpvOpAtomicOr:
2935 case SpvOpAtomicXor:
2936 case SpvOpAtomicFAddEXT:
2937 src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
2938 break;
2939
2940 default:
2941 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
2942 }
2943 }
2944
2945 static nir_ssa_def *
2946 get_image_coord(struct vtn_builder *b, uint32_t value)
2947 {
2948 nir_ssa_def *coord = vtn_get_nir_ssa(b, value);
2949
2950 /* The image_load_store intrinsics assume a 4-dim coordinate */
2951 unsigned swizzle[4];
2952 for (unsigned i = 0; i < 4; i++)
2953 swizzle[i] = MIN2(i, coord->num_components - 1);
2954
2955 return nir_swizzle(&b->nb, coord, swizzle, 4);
2956 }
2957
2958 static nir_ssa_def *
2959 expand_to_vec4(nir_builder *b, nir_ssa_def *value)
2960 {
2961 if (value->num_components == 4)
2962 return value;
2963
2964 unsigned swiz[4];
2965 for (unsigned i = 0; i < 4; i++)
2966 swiz[i] = i < value->num_components ? i : 0;
2967 return nir_swizzle(b, value, swiz, 4);
2968 }
2969
2970 static void
2971 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
2972 const uint32_t *w, unsigned count)
2973 {
2974 /* Just get this one out of the way */
2975 if (opcode == SpvOpImageTexelPointer) {
2976 struct vtn_value *val =
2977 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
2978 val->image = ralloc(b, struct vtn_image_pointer);
2979
2980 val->image->image = vtn_nir_deref(b, w[3]);
2981 val->image->coord = get_image_coord(b, w[4]);
2982 val->image->sample = vtn_get_nir_ssa(b, w[5]);
2983 val->image->lod = nir_imm_int(&b->nb, 0);
2984 return;
2985 }
2986
2987 struct vtn_image_pointer image;
2988 SpvScope scope = SpvScopeInvocation;
2989 SpvMemorySemanticsMask semantics = 0;
2990
2991 enum gl_access_qualifier access = 0;
2992
2993 struct vtn_value *res_val;
2994 switch (opcode) {
2995 case SpvOpAtomicExchange:
2996 case SpvOpAtomicCompareExchange:
2997 case SpvOpAtomicCompareExchangeWeak:
2998 case SpvOpAtomicIIncrement:
2999 case SpvOpAtomicIDecrement:
3000 case SpvOpAtomicIAdd:
3001 case SpvOpAtomicISub:
3002 case SpvOpAtomicLoad:
3003 case SpvOpAtomicSMin:
3004 case SpvOpAtomicUMin:
3005 case SpvOpAtomicSMax:
3006 case SpvOpAtomicUMax:
3007 case SpvOpAtomicAnd:
3008 case SpvOpAtomicOr:
3009 case SpvOpAtomicXor:
3010 case SpvOpAtomicFAddEXT:
3011 res_val = vtn_value(b, w[3], vtn_value_type_image_pointer);
3012 image = *res_val->image;
3013 scope = vtn_constant_uint(b, w[4]);
3014 semantics = vtn_constant_uint(b, w[5]);
3015 access |= ACCESS_COHERENT;
3016 break;
3017
3018 case SpvOpAtomicStore:
3019 res_val = vtn_value(b, w[1], vtn_value_type_image_pointer);
3020 image = *res_val->image;
3021 scope = vtn_constant_uint(b, w[2]);
3022 semantics = vtn_constant_uint(b, w[3]);
3023 access |= ACCESS_COHERENT;
3024 break;
3025
3026 case SpvOpImageQuerySizeLod:
3027 res_val = vtn_untyped_value(b, w[3]);
3028 image.image = vtn_get_image(b, w[3]);
3029 image.coord = NULL;
3030 image.sample = NULL;
3031 image.lod = vtn_ssa_value(b, w[4])->def;
3032 break;
3033
3034 case SpvOpImageQuerySize:
3035 res_val = vtn_untyped_value(b, w[3]);
3036 image.image = vtn_get_image(b, w[3]);
3037 image.coord = NULL;
3038 image.sample = NULL;
3039 image.lod = NULL;
3040 break;
3041
3042 case SpvOpImageQueryFormat:
3043 case SpvOpImageQueryOrder:
3044 res_val = vtn_untyped_value(b, w[3]);
3045 image.image = vtn_get_image(b, w[3]);
3046 image.coord = NULL;
3047 image.sample = NULL;
3048 image.lod = NULL;
3049 break;
3050
3051 case SpvOpImageRead: {
3052 res_val = vtn_untyped_value(b, w[3]);
3053 image.image = vtn_get_image(b, w[3]);
3054 image.coord = get_image_coord(b, w[4]);
3055
3056 const SpvImageOperandsMask operands =
3057 count > 5 ? w[5] : SpvImageOperandsMaskNone;
3058
3059 if (operands & SpvImageOperandsSampleMask) {
3060 uint32_t arg = image_operand_arg(b, w, count, 5,
3061 SpvImageOperandsSampleMask);
3062 image.sample = vtn_get_nir_ssa(b, w[arg]);
3063 } else {
3064 image.sample = nir_ssa_undef(&b->nb, 1, 32);
3065 }
3066
3067 if (operands & SpvImageOperandsMakeTexelVisibleMask) {
3068 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3069 "MakeTexelVisible requires NonPrivateTexel to also be set.");
3070 uint32_t arg = image_operand_arg(b, w, count, 5,
3071 SpvImageOperandsMakeTexelVisibleMask);
3072 semantics = SpvMemorySemanticsMakeVisibleMask;
3073 scope = vtn_constant_uint(b, w[arg]);
3074 }
3075
3076 if (operands & SpvImageOperandsLodMask) {
3077 uint32_t arg = image_operand_arg(b, w, count, 5,
3078 SpvImageOperandsLodMask);
3079 image.lod = vtn_get_nir_ssa(b, w[arg]);
3080 } else {
3081 image.lod = nir_imm_int(&b->nb, 0);
3082 }
3083
3084 if (operands & SpvImageOperandsVolatileTexelMask)
3085 access |= ACCESS_VOLATILE;
3086
3087 break;
3088 }
3089
3090 case SpvOpImageWrite: {
3091 res_val = vtn_untyped_value(b, w[1]);
3092 image.image = vtn_get_image(b, w[1]);
3093 image.coord = get_image_coord(b, w[2]);
3094
3095 /* texel = w[3] */
3096
3097 const SpvImageOperandsMask operands =
3098 count > 4 ? w[4] : SpvImageOperandsMaskNone;
3099
3100 if (operands & SpvImageOperandsSampleMask) {
3101 uint32_t arg = image_operand_arg(b, w, count, 4,