nvc0/ir: fix load propagation for sub 4 byte addressing
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
34
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
37
38 #include <stdio.h>
39 #if UTIL_ARCH_BIG_ENDIAN
40 #include <byteswap.h>
41 #endif
42
43 void
44 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
45 size_t spirv_offset, const char *message)
46 {
47 if (b->options->debug.func) {
48 b->options->debug.func(b->options->debug.private_data,
49 level, spirv_offset, message);
50 }
51
52 #ifndef NDEBUG
53 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
54 fprintf(stderr, "%s\n", message);
55 #endif
56 }
57
58 void
59 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
60 size_t spirv_offset, const char *fmt, ...)
61 {
62 va_list args;
63 char *msg;
64
65 va_start(args, fmt);
66 msg = ralloc_vasprintf(NULL, fmt, args);
67 va_end(args);
68
69 vtn_log(b, level, spirv_offset, msg);
70
71 ralloc_free(msg);
72 }
73
74 static void
75 vtn_log_err(struct vtn_builder *b,
76 enum nir_spirv_debug_level level, const char *prefix,
77 const char *file, unsigned line,
78 const char *fmt, va_list args)
79 {
80 char *msg;
81
82 msg = ralloc_strdup(NULL, prefix);
83
84 #ifndef NDEBUG
85 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
86 #endif
87
88 ralloc_asprintf_append(&msg, " ");
89
90 ralloc_vasprintf_append(&msg, fmt, args);
91
92 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
93 b->spirv_offset);
94
95 if (b->file) {
96 ralloc_asprintf_append(&msg,
97 "\n in SPIR-V source file %s, line %d, col %d",
98 b->file, b->line, b->col);
99 }
100
101 vtn_log(b, level, b->spirv_offset, msg);
102
103 ralloc_free(msg);
104 }
105
106 static void
107 vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
108 {
109 static int idx = 0;
110
111 char filename[1024];
112 int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
113 path, prefix, idx++);
114 if (len < 0 || len >= sizeof(filename))
115 return;
116
117 FILE *f = fopen(filename, "w");
118 if (f == NULL)
119 return;
120
121 fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
122 fclose(f);
123
124 vtn_info("SPIR-V shader dumped to %s", filename);
125 }
126
127 void
128 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
129 const char *fmt, ...)
130 {
131 va_list args;
132
133 va_start(args, fmt);
134 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
135 file, line, fmt, args);
136 va_end(args);
137 }
138
139 void
140 _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
141 const char *fmt, ...)
142 {
143 va_list args;
144
145 va_start(args, fmt);
146 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
147 file, line, fmt, args);
148 va_end(args);
149 }
150
151 void
152 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
153 const char *fmt, ...)
154 {
155 va_list args;
156
157 va_start(args, fmt);
158 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
159 file, line, fmt, args);
160 va_end(args);
161
162 const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
163 if (dump_path)
164 vtn_dump_shader(b, dump_path, "fail");
165
166 longjmp(b->fail_jump, 1);
167 }
168
169 static struct vtn_ssa_value *
170 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
171 {
172 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
173 val->type = glsl_get_bare_type(type);
174
175 if (glsl_type_is_vector_or_scalar(type)) {
176 unsigned num_components = glsl_get_vector_elements(val->type);
177 unsigned bit_size = glsl_get_bit_size(val->type);
178 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
179 } else {
180 unsigned elems = glsl_get_length(val->type);
181 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
182 if (glsl_type_is_array_or_matrix(type)) {
183 const struct glsl_type *elem_type = glsl_get_array_element(type);
184 for (unsigned i = 0; i < elems; i++)
185 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
186 } else {
187 vtn_assert(glsl_type_is_struct_or_ifc(type));
188 for (unsigned i = 0; i < elems; i++) {
189 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
190 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
191 }
192 }
193 }
194
195 return val;
196 }
197
198 static struct vtn_ssa_value *
199 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
200 const struct glsl_type *type)
201 {
202 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
203
204 if (entry)
205 return entry->data;
206
207 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
208 val->type = glsl_get_bare_type(type);
209
210 if (glsl_type_is_vector_or_scalar(type)) {
211 unsigned num_components = glsl_get_vector_elements(val->type);
212 unsigned bit_size = glsl_get_bit_size(type);
213 nir_load_const_instr *load =
214 nir_load_const_instr_create(b->shader, num_components, bit_size);
215
216 memcpy(load->value, constant->values,
217 sizeof(nir_const_value) * num_components);
218
219 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
220 val->def = &load->def;
221 } else {
222 unsigned elems = glsl_get_length(val->type);
223 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
224 if (glsl_type_is_array_or_matrix(type)) {
225 const struct glsl_type *elem_type = glsl_get_array_element(type);
226 for (unsigned i = 0; i < elems; i++) {
227 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
228 elem_type);
229 }
230 } else {
231 vtn_assert(glsl_type_is_struct_or_ifc(type));
232 for (unsigned i = 0; i < elems; i++) {
233 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
234 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
235 elem_type);
236 }
237 }
238 }
239
240 return val;
241 }
242
243 struct vtn_ssa_value *
244 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
245 {
246 struct vtn_value *val = vtn_untyped_value(b, value_id);
247 switch (val->value_type) {
248 case vtn_value_type_undef:
249 return vtn_undef_ssa_value(b, val->type->type);
250
251 case vtn_value_type_constant:
252 return vtn_const_ssa_value(b, val->constant, val->type->type);
253
254 case vtn_value_type_ssa:
255 return val->ssa;
256
257 case vtn_value_type_pointer:
258 vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
259 struct vtn_ssa_value *ssa =
260 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
261 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
262 return ssa;
263
264 default:
265 vtn_fail("Invalid type for an SSA value");
266 }
267 }
268
269 struct vtn_value *
270 vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
271 struct vtn_ssa_value *ssa)
272 {
273 struct vtn_type *type = vtn_get_value_type(b, value_id);
274
275 /* See vtn_create_ssa_value */
276 vtn_fail_if(ssa->type != glsl_get_bare_type(type->type),
277 "Type mismatch for SPIR-V SSA value");
278
279 struct vtn_value *val;
280 if (type->base_type == vtn_base_type_pointer) {
281 val = vtn_push_pointer(b, value_id, vtn_pointer_from_ssa(b, ssa->def, type));
282 } else {
283 /* Don't trip the value_type_ssa check in vtn_push_value */
284 val = vtn_push_value(b, value_id, vtn_value_type_invalid);
285 val->value_type = vtn_value_type_ssa;
286 val->ssa = ssa;
287 }
288
289 return val;
290 }
291
292 nir_ssa_def *
293 vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id)
294 {
295 struct vtn_ssa_value *ssa = vtn_ssa_value(b, value_id);
296 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa->type),
297 "Expected a vector or scalar type");
298 return ssa->def;
299 }
300
301 struct vtn_value *
302 vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def)
303 {
304 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
305 * type will be valid by the time we get here.
306 */
307 struct vtn_type *type = vtn_get_value_type(b, value_id);
308 vtn_fail_if(def->num_components != glsl_get_vector_elements(type->type) ||
309 def->bit_size != glsl_get_bit_size(type->type),
310 "Mismatch between NIR and SPIR-V type.");
311 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
312 ssa->def = def;
313 return vtn_push_ssa_value(b, value_id, ssa);
314 }
315
316 static nir_deref_instr *
317 vtn_get_image(struct vtn_builder *b, uint32_t value_id)
318 {
319 struct vtn_type *type = vtn_get_value_type(b, value_id);
320 vtn_assert(type->base_type == vtn_base_type_image);
321 return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
322 nir_var_uniform, type->glsl_image, 0);
323 }
324
325 static void
326 vtn_push_image(struct vtn_builder *b, uint32_t value_id,
327 nir_deref_instr *deref)
328 {
329 struct vtn_type *type = vtn_get_value_type(b, value_id);
330 vtn_assert(type->base_type == vtn_base_type_image);
331 vtn_push_nir_ssa(b, value_id, &deref->dest.ssa);
332 }
333
334 static nir_deref_instr *
335 vtn_get_sampler(struct vtn_builder *b, uint32_t value_id)
336 {
337 struct vtn_type *type = vtn_get_value_type(b, value_id);
338 vtn_assert(type->base_type == vtn_base_type_sampler);
339 return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
340 nir_var_uniform, glsl_bare_sampler_type(), 0);
341 }
342
343 nir_ssa_def *
344 vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
345 struct vtn_sampled_image si)
346 {
347 return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa);
348 }
349
350 static void
351 vtn_push_sampled_image(struct vtn_builder *b, uint32_t value_id,
352 struct vtn_sampled_image si)
353 {
354 struct vtn_type *type = vtn_get_value_type(b, value_id);
355 vtn_assert(type->base_type == vtn_base_type_sampled_image);
356 vtn_push_nir_ssa(b, value_id, vtn_sampled_image_to_nir_ssa(b, si));
357 }
358
359 static struct vtn_sampled_image
360 vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id)
361 {
362 struct vtn_type *type = vtn_get_value_type(b, value_id);
363 vtn_assert(type->base_type == vtn_base_type_sampled_image);
364 nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
365
366 struct vtn_sampled_image si = { NULL, };
367 si.image = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 0),
368 nir_var_uniform,
369 type->image->glsl_image, 0);
370 si.sampler = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 1),
371 nir_var_uniform,
372 glsl_bare_sampler_type(), 0);
373 return si;
374 }
375
376 static const char *
377 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
378 unsigned word_count, unsigned *words_used)
379 {
380 /* From the SPIR-V spec:
381 *
382 * "A string is interpreted as a nul-terminated stream of characters.
383 * The character set is Unicode in the UTF-8 encoding scheme. The UTF-8
384 * octets (8-bit bytes) are packed four per word, following the
385 * little-endian convention (i.e., the first octet is in the
386 * lowest-order 8 bits of the word). The final word contains the
387 * string’s nul-termination character (0), and all contents past the
388 * end of the string in the final word are padded with 0."
389 *
390 * On big-endian, we need to byte-swap.
391 */
392 #if UTIL_ARCH_BIG_ENDIAN
393 {
394 uint32_t *copy = ralloc_array(b, uint32_t, word_count);
395 for (unsigned i = 0; i < word_count; i++)
396 copy[i] = bswap_32(words[i]);
397 words = copy;
398 }
399 #endif
400
401 const char *str = (char *)words;
402 const char *end = memchr(str, 0, word_count * 4);
403 vtn_fail_if(end == NULL, "String is not null-terminated");
404
405 if (words_used)
406 *words_used = DIV_ROUND_UP(end - str + 1, sizeof(*words));
407
408 return str;
409 }
410
411 const uint32_t *
412 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
413 const uint32_t *end, vtn_instruction_handler handler)
414 {
415 b->file = NULL;
416 b->line = -1;
417 b->col = -1;
418
419 const uint32_t *w = start;
420 while (w < end) {
421 SpvOp opcode = w[0] & SpvOpCodeMask;
422 unsigned count = w[0] >> SpvWordCountShift;
423 vtn_assert(count >= 1 && w + count <= end);
424
425 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
426
427 switch (opcode) {
428 case SpvOpNop:
429 break; /* Do nothing */
430
431 case SpvOpLine:
432 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
433 b->line = w[2];
434 b->col = w[3];
435 break;
436
437 case SpvOpNoLine:
438 b->file = NULL;
439 b->line = -1;
440 b->col = -1;
441 break;
442
443 default:
444 if (!handler(b, opcode, w, count))
445 return w;
446 break;
447 }
448
449 w += count;
450 }
451
452 b->spirv_offset = 0;
453 b->file = NULL;
454 b->line = -1;
455 b->col = -1;
456
457 assert(w == end);
458 return w;
459 }
460
461 static bool
462 vtn_handle_non_semantic_instruction(struct vtn_builder *b, SpvOp ext_opcode,
463 const uint32_t *w, unsigned count)
464 {
465 /* Do nothing. */
466 return true;
467 }
468
469 static void
470 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
471 const uint32_t *w, unsigned count)
472 {
473 switch (opcode) {
474 case SpvOpExtInstImport: {
475 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
476 const char *ext = vtn_string_literal(b, &w[2], count - 2, NULL);
477 if (strcmp(ext, "GLSL.std.450") == 0) {
478 val->ext_handler = vtn_handle_glsl450_instruction;
479 } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
480 && (b->options && b->options->caps.amd_gcn_shader)) {
481 val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
482 } else if ((strcmp(ext, "SPV_AMD_shader_ballot") == 0)
483 && (b->options && b->options->caps.amd_shader_ballot)) {
484 val->ext_handler = vtn_handle_amd_shader_ballot_instruction;
485 } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
486 && (b->options && b->options->caps.amd_trinary_minmax)) {
487 val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
488 } else if ((strcmp(ext, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
489 && (b->options && b->options->caps.amd_shader_explicit_vertex_parameter)) {
490 val->ext_handler = vtn_handle_amd_shader_explicit_vertex_parameter_instruction;
491 } else if (strcmp(ext, "OpenCL.std") == 0) {
492 val->ext_handler = vtn_handle_opencl_instruction;
493 } else if (strstr(ext, "NonSemantic.") == ext) {
494 val->ext_handler = vtn_handle_non_semantic_instruction;
495 } else {
496 vtn_fail("Unsupported extension: %s", ext);
497 }
498 break;
499 }
500
501 case SpvOpExtInst: {
502 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
503 bool handled = val->ext_handler(b, w[4], w, count);
504 vtn_assert(handled);
505 break;
506 }
507
508 default:
509 vtn_fail_with_opcode("Unhandled opcode", opcode);
510 }
511 }
512
513 static void
514 _foreach_decoration_helper(struct vtn_builder *b,
515 struct vtn_value *base_value,
516 int parent_member,
517 struct vtn_value *value,
518 vtn_decoration_foreach_cb cb, void *data)
519 {
520 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
521 int member;
522 if (dec->scope == VTN_DEC_DECORATION) {
523 member = parent_member;
524 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
525 vtn_fail_if(value->value_type != vtn_value_type_type ||
526 value->type->base_type != vtn_base_type_struct,
527 "OpMemberDecorate and OpGroupMemberDecorate are only "
528 "allowed on OpTypeStruct");
529 /* This means we haven't recursed yet */
530 assert(value == base_value);
531
532 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
533
534 vtn_fail_if(member >= base_value->type->length,
535 "OpMemberDecorate specifies member %d but the "
536 "OpTypeStruct has only %u members",
537 member, base_value->type->length);
538 } else {
539 /* Not a decoration */
540 assert(dec->scope == VTN_DEC_EXECUTION_MODE);
541 continue;
542 }
543
544 if (dec->group) {
545 assert(dec->group->value_type == vtn_value_type_decoration_group);
546 _foreach_decoration_helper(b, base_value, member, dec->group,
547 cb, data);
548 } else {
549 cb(b, base_value, member, dec, data);
550 }
551 }
552 }
553
554 /** Iterates (recursively if needed) over all of the decorations on a value
555 *
556 * This function iterates over all of the decorations applied to a given
557 * value. If it encounters a decoration group, it recurses into the group
558 * and iterates over all of those decorations as well.
559 */
560 void
561 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
562 vtn_decoration_foreach_cb cb, void *data)
563 {
564 _foreach_decoration_helper(b, value, -1, value, cb, data);
565 }
566
567 void
568 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
569 vtn_execution_mode_foreach_cb cb, void *data)
570 {
571 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
572 if (dec->scope != VTN_DEC_EXECUTION_MODE)
573 continue;
574
575 assert(dec->group == NULL);
576 cb(b, value, dec, data);
577 }
578 }
579
580 void
581 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
582 const uint32_t *w, unsigned count)
583 {
584 const uint32_t *w_end = w + count;
585 const uint32_t target = w[1];
586 w += 2;
587
588 switch (opcode) {
589 case SpvOpDecorationGroup:
590 vtn_push_value(b, target, vtn_value_type_decoration_group);
591 break;
592
593 case SpvOpDecorate:
594 case SpvOpDecorateId:
595 case SpvOpMemberDecorate:
596 case SpvOpDecorateString:
597 case SpvOpMemberDecorateString:
598 case SpvOpExecutionMode:
599 case SpvOpExecutionModeId: {
600 struct vtn_value *val = vtn_untyped_value(b, target);
601
602 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
603 switch (opcode) {
604 case SpvOpDecorate:
605 case SpvOpDecorateId:
606 case SpvOpDecorateString:
607 dec->scope = VTN_DEC_DECORATION;
608 break;
609 case SpvOpMemberDecorate:
610 case SpvOpMemberDecorateString:
611 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
612 vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
613 "Member argument of OpMemberDecorate too large");
614 break;
615 case SpvOpExecutionMode:
616 case SpvOpExecutionModeId:
617 dec->scope = VTN_DEC_EXECUTION_MODE;
618 break;
619 default:
620 unreachable("Invalid decoration opcode");
621 }
622 dec->decoration = *(w++);
623 dec->operands = w;
624
625 /* Link into the list */
626 dec->next = val->decoration;
627 val->decoration = dec;
628 break;
629 }
630
631 case SpvOpGroupMemberDecorate:
632 case SpvOpGroupDecorate: {
633 struct vtn_value *group =
634 vtn_value(b, target, vtn_value_type_decoration_group);
635
636 for (; w < w_end; w++) {
637 struct vtn_value *val = vtn_untyped_value(b, *w);
638 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
639
640 dec->group = group;
641 if (opcode == SpvOpGroupDecorate) {
642 dec->scope = VTN_DEC_DECORATION;
643 } else {
644 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
645 vtn_fail_if(dec->scope < 0, /* Check for overflow */
646 "Member argument of OpGroupMemberDecorate too large");
647 }
648
649 /* Link into the list */
650 dec->next = val->decoration;
651 val->decoration = dec;
652 }
653 break;
654 }
655
656 default:
657 unreachable("Unhandled opcode");
658 }
659 }
660
661 struct member_decoration_ctx {
662 unsigned num_fields;
663 struct glsl_struct_field *fields;
664 struct vtn_type *type;
665 };
666
667 /**
668 * Returns true if the given type contains a struct decorated Block or
669 * BufferBlock
670 */
671 bool
672 vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type)
673 {
674 switch (type->base_type) {
675 case vtn_base_type_array:
676 return vtn_type_contains_block(b, type->array_element);
677 case vtn_base_type_struct:
678 if (type->block || type->buffer_block)
679 return true;
680 for (unsigned i = 0; i < type->length; i++) {
681 if (vtn_type_contains_block(b, type->members[i]))
682 return true;
683 }
684 return false;
685 default:
686 return false;
687 }
688 }
689
690 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
691 * OpStore, or OpCopyMemory between them without breaking anything.
692 * Technically, the SPIR-V rules require the exact same type ID but this lets
693 * us internally be a bit looser.
694 */
695 bool
696 vtn_types_compatible(struct vtn_builder *b,
697 struct vtn_type *t1, struct vtn_type *t2)
698 {
699 if (t1->id == t2->id)
700 return true;
701
702 if (t1->base_type != t2->base_type)
703 return false;
704
705 switch (t1->base_type) {
706 case vtn_base_type_void:
707 case vtn_base_type_scalar:
708 case vtn_base_type_vector:
709 case vtn_base_type_matrix:
710 case vtn_base_type_image:
711 case vtn_base_type_sampler:
712 case vtn_base_type_sampled_image:
713 return t1->type == t2->type;
714
715 case vtn_base_type_array:
716 return t1->length == t2->length &&
717 vtn_types_compatible(b, t1->array_element, t2->array_element);
718
719 case vtn_base_type_pointer:
720 return vtn_types_compatible(b, t1->deref, t2->deref);
721
722 case vtn_base_type_struct:
723 if (t1->length != t2->length)
724 return false;
725
726 for (unsigned i = 0; i < t1->length; i++) {
727 if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
728 return false;
729 }
730 return true;
731
732 case vtn_base_type_function:
733 /* This case shouldn't get hit since you can't copy around function
734 * types. Just require them to be identical.
735 */
736 return false;
737 }
738
739 vtn_fail("Invalid base type");
740 }
741
742 struct vtn_type *
743 vtn_type_without_array(struct vtn_type *type)
744 {
745 while (type->base_type == vtn_base_type_array)
746 type = type->array_element;
747 return type;
748 }
749
750 /* does a shallow copy of a vtn_type */
751
752 static struct vtn_type *
753 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
754 {
755 struct vtn_type *dest = ralloc(b, struct vtn_type);
756 *dest = *src;
757
758 switch (src->base_type) {
759 case vtn_base_type_void:
760 case vtn_base_type_scalar:
761 case vtn_base_type_vector:
762 case vtn_base_type_matrix:
763 case vtn_base_type_array:
764 case vtn_base_type_pointer:
765 case vtn_base_type_image:
766 case vtn_base_type_sampler:
767 case vtn_base_type_sampled_image:
768 /* Nothing more to do */
769 break;
770
771 case vtn_base_type_struct:
772 dest->members = ralloc_array(b, struct vtn_type *, src->length);
773 memcpy(dest->members, src->members,
774 src->length * sizeof(src->members[0]));
775
776 dest->offsets = ralloc_array(b, unsigned, src->length);
777 memcpy(dest->offsets, src->offsets,
778 src->length * sizeof(src->offsets[0]));
779 break;
780
781 case vtn_base_type_function:
782 dest->params = ralloc_array(b, struct vtn_type *, src->length);
783 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
784 break;
785 }
786
787 return dest;
788 }
789
790 static const struct glsl_type *
791 wrap_type_in_array(const struct glsl_type *type,
792 const struct glsl_type *array_type)
793 {
794 if (!glsl_type_is_array(array_type))
795 return type;
796
797 const struct glsl_type *elem_type =
798 wrap_type_in_array(type, glsl_get_array_element(array_type));
799 return glsl_array_type(elem_type, glsl_get_length(array_type),
800 glsl_get_explicit_stride(array_type));
801 }
802
803 static bool
804 vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
805 {
806 /* For OpenCL we never want to strip the info from the types, and it makes
807 * type comparisons easier in later stages.
808 */
809 if (b->options->environment == NIR_SPIRV_OPENCL)
810 return true;
811
812 switch (mode) {
813 case vtn_variable_mode_input:
814 case vtn_variable_mode_output:
815 /* Layout decorations kept because we need offsets for XFB arrays of
816 * blocks.
817 */
818 return b->shader->info.has_transform_feedback_varyings;
819
820 case vtn_variable_mode_ssbo:
821 case vtn_variable_mode_phys_ssbo:
822 case vtn_variable_mode_ubo:
823 return true;
824
825 default:
826 return false;
827 }
828 }
829
830 const struct glsl_type *
831 vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
832 enum vtn_variable_mode mode)
833 {
834 if (mode == vtn_variable_mode_atomic_counter) {
835 vtn_fail_if(glsl_without_array(type->type) != glsl_uint_type(),
836 "Variables in the AtomicCounter storage class should be "
837 "(possibly arrays of arrays of) uint.");
838 return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
839 }
840
841 if (mode == vtn_variable_mode_uniform) {
842 switch (type->base_type) {
843 case vtn_base_type_array: {
844 const struct glsl_type *elem_type =
845 vtn_type_get_nir_type(b, type->array_element, mode);
846
847 return glsl_array_type(elem_type, type->length,
848 glsl_get_explicit_stride(type->type));
849 }
850
851 case vtn_base_type_struct: {
852 bool need_new_struct = false;
853 const uint32_t num_fields = type->length;
854 NIR_VLA(struct glsl_struct_field, fields, num_fields);
855 for (unsigned i = 0; i < num_fields; i++) {
856 fields[i] = *glsl_get_struct_field_data(type->type, i);
857 const struct glsl_type *field_nir_type =
858 vtn_type_get_nir_type(b, type->members[i], mode);
859 if (fields[i].type != field_nir_type) {
860 fields[i].type = field_nir_type;
861 need_new_struct = true;
862 }
863 }
864 if (need_new_struct) {
865 if (glsl_type_is_interface(type->type)) {
866 return glsl_interface_type(fields, num_fields,
867 /* packing */ 0, false,
868 glsl_get_type_name(type->type));
869 } else {
870 return glsl_struct_type(fields, num_fields,
871 glsl_get_type_name(type->type),
872 glsl_struct_type_is_packed(type->type));
873 }
874 } else {
875 /* No changes, just pass it on */
876 return type->type;
877 }
878 }
879
880 case vtn_base_type_image:
881 return type->glsl_image;
882
883 case vtn_base_type_sampler:
884 return glsl_bare_sampler_type();
885
886 case vtn_base_type_sampled_image:
887 return type->image->glsl_image;
888
889 default:
890 return type->type;
891 }
892 }
893
894 /* Layout decorations are allowed but ignored in certain conditions,
895 * to allow SPIR-V generators perform type deduplication. Discard
896 * unnecessary ones when passing to NIR.
897 */
898 if (!vtn_type_needs_explicit_layout(b, mode))
899 return glsl_get_bare_type(type->type);
900
901 return type->type;
902 }
903
904 static struct vtn_type *
905 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
906 {
907 type->members[member] = vtn_type_copy(b, type->members[member]);
908 type = type->members[member];
909
910 /* We may have an array of matrices.... Oh, joy! */
911 while (glsl_type_is_array(type->type)) {
912 type->array_element = vtn_type_copy(b, type->array_element);
913 type = type->array_element;
914 }
915
916 vtn_assert(glsl_type_is_matrix(type->type));
917
918 return type;
919 }
920
921 static void
922 vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
923 int member, enum gl_access_qualifier access)
924 {
925 type->members[member] = vtn_type_copy(b, type->members[member]);
926 type = type->members[member];
927
928 type->access |= access;
929 }
930
931 static void
932 array_stride_decoration_cb(struct vtn_builder *b,
933 struct vtn_value *val, int member,
934 const struct vtn_decoration *dec, void *void_ctx)
935 {
936 struct vtn_type *type = val->type;
937
938 if (dec->decoration == SpvDecorationArrayStride) {
939 if (vtn_type_contains_block(b, type)) {
940 vtn_warn("The ArrayStride decoration cannot be applied to an array "
941 "type which contains a structure type decorated Block "
942 "or BufferBlock");
943 /* Ignore the decoration */
944 } else {
945 vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
946 type->stride = dec->operands[0];
947 }
948 }
949 }
950
951 static void
952 struct_member_decoration_cb(struct vtn_builder *b,
953 UNUSED struct vtn_value *val, int member,
954 const struct vtn_decoration *dec, void *void_ctx)
955 {
956 struct member_decoration_ctx *ctx = void_ctx;
957
958 if (member < 0)
959 return;
960
961 assert(member < ctx->num_fields);
962
963 switch (dec->decoration) {
964 case SpvDecorationRelaxedPrecision:
965 case SpvDecorationUniform:
966 case SpvDecorationUniformId:
967 break; /* FIXME: Do nothing with this for now. */
968 case SpvDecorationNonWritable:
969 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
970 break;
971 case SpvDecorationNonReadable:
972 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
973 break;
974 case SpvDecorationVolatile:
975 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
976 break;
977 case SpvDecorationCoherent:
978 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
979 break;
980 case SpvDecorationNoPerspective:
981 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
982 break;
983 case SpvDecorationFlat:
984 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
985 break;
986 case SpvDecorationExplicitInterpAMD:
987 ctx->fields[member].interpolation = INTERP_MODE_EXPLICIT;
988 break;
989 case SpvDecorationCentroid:
990 ctx->fields[member].centroid = true;
991 break;
992 case SpvDecorationSample:
993 ctx->fields[member].sample = true;
994 break;
995 case SpvDecorationStream:
996 /* This is handled later by var_decoration_cb in vtn_variables.c */
997 break;
998 case SpvDecorationLocation:
999 ctx->fields[member].location = dec->operands[0];
1000 break;
1001 case SpvDecorationComponent:
1002 break; /* FIXME: What should we do with these? */
1003 case SpvDecorationBuiltIn:
1004 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
1005 ctx->type->members[member]->is_builtin = true;
1006 ctx->type->members[member]->builtin = dec->operands[0];
1007 ctx->type->builtin_block = true;
1008 break;
1009 case SpvDecorationOffset:
1010 ctx->type->offsets[member] = dec->operands[0];
1011 ctx->fields[member].offset = dec->operands[0];
1012 break;
1013 case SpvDecorationMatrixStride:
1014 /* Handled as a second pass */
1015 break;
1016 case SpvDecorationColMajor:
1017 break; /* Nothing to do here. Column-major is the default. */
1018 case SpvDecorationRowMajor:
1019 mutable_matrix_member(b, ctx->type, member)->row_major = true;
1020 break;
1021
1022 case SpvDecorationPatch:
1023 break;
1024
1025 case SpvDecorationSpecId:
1026 case SpvDecorationBlock:
1027 case SpvDecorationBufferBlock:
1028 case SpvDecorationArrayStride:
1029 case SpvDecorationGLSLShared:
1030 case SpvDecorationGLSLPacked:
1031 case SpvDecorationInvariant:
1032 case SpvDecorationRestrict:
1033 case SpvDecorationAliased:
1034 case SpvDecorationConstant:
1035 case SpvDecorationIndex:
1036 case SpvDecorationBinding:
1037 case SpvDecorationDescriptorSet:
1038 case SpvDecorationLinkageAttributes:
1039 case SpvDecorationNoContraction:
1040 case SpvDecorationInputAttachmentIndex:
1041 vtn_warn("Decoration not allowed on struct members: %s",
1042 spirv_decoration_to_string(dec->decoration));
1043 break;
1044
1045 case SpvDecorationXfbBuffer:
1046 case SpvDecorationXfbStride:
1047 /* This is handled later by var_decoration_cb in vtn_variables.c */
1048 break;
1049
1050 case SpvDecorationCPacked:
1051 if (b->shader->info.stage != MESA_SHADER_KERNEL)
1052 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1053 spirv_decoration_to_string(dec->decoration));
1054 else
1055 ctx->type->packed = true;
1056 break;
1057
1058 case SpvDecorationSaturatedConversion:
1059 case SpvDecorationFuncParamAttr:
1060 case SpvDecorationFPRoundingMode:
1061 case SpvDecorationFPFastMathMode:
1062 case SpvDecorationAlignment:
1063 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1064 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1065 spirv_decoration_to_string(dec->decoration));
1066 }
1067 break;
1068
1069 case SpvDecorationUserSemantic:
1070 case SpvDecorationUserTypeGOOGLE:
1071 /* User semantic decorations can safely be ignored by the driver. */
1072 break;
1073
1074 default:
1075 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1076 }
1077 }
1078
1079 /** Chases the array type all the way down to the tail and rewrites the
1080 * glsl_types to be based off the tail's glsl_type.
1081 */
1082 static void
1083 vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
1084 {
1085 if (type->base_type != vtn_base_type_array)
1086 return;
1087
1088 vtn_array_type_rewrite_glsl_type(type->array_element);
1089
1090 type->type = glsl_array_type(type->array_element->type,
1091 type->length, type->stride);
1092 }
1093
1094 /* Matrix strides are handled as a separate pass because we need to know
1095 * whether the matrix is row-major or not first.
1096 */
1097 static void
1098 struct_member_matrix_stride_cb(struct vtn_builder *b,
1099 UNUSED struct vtn_value *val, int member,
1100 const struct vtn_decoration *dec,
1101 void *void_ctx)
1102 {
1103 if (dec->decoration != SpvDecorationMatrixStride)
1104 return;
1105
1106 vtn_fail_if(member < 0,
1107 "The MatrixStride decoration is only allowed on members "
1108 "of OpTypeStruct");
1109 vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
1110
1111 struct member_decoration_ctx *ctx = void_ctx;
1112
1113 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
1114 if (mat_type->row_major) {
1115 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
1116 mat_type->stride = mat_type->array_element->stride;
1117 mat_type->array_element->stride = dec->operands[0];
1118
1119 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1120 dec->operands[0], true);
1121 mat_type->array_element->type = glsl_get_column_type(mat_type->type);
1122 } else {
1123 vtn_assert(mat_type->array_element->stride > 0);
1124 mat_type->stride = dec->operands[0];
1125
1126 mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1127 dec->operands[0], false);
1128 }
1129
1130 /* Now that we've replaced the glsl_type with a properly strided matrix
1131 * type, rewrite the member type so that it's an array of the proper kind
1132 * of glsl_type.
1133 */
1134 vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
1135 ctx->fields[member].type = ctx->type->members[member]->type;
1136 }
1137
1138 static void
1139 struct_block_decoration_cb(struct vtn_builder *b,
1140 struct vtn_value *val, int member,
1141 const struct vtn_decoration *dec, void *ctx)
1142 {
1143 if (member != -1)
1144 return;
1145
1146 struct vtn_type *type = val->type;
1147 if (dec->decoration == SpvDecorationBlock)
1148 type->block = true;
1149 else if (dec->decoration == SpvDecorationBufferBlock)
1150 type->buffer_block = true;
1151 }
1152
1153 static void
1154 type_decoration_cb(struct vtn_builder *b,
1155 struct vtn_value *val, int member,
1156 const struct vtn_decoration *dec, UNUSED void *ctx)
1157 {
1158 struct vtn_type *type = val->type;
1159
1160 if (member != -1) {
1161 /* This should have been handled by OpTypeStruct */
1162 assert(val->type->base_type == vtn_base_type_struct);
1163 assert(member >= 0 && member < val->type->length);
1164 return;
1165 }
1166
1167 switch (dec->decoration) {
1168 case SpvDecorationArrayStride:
1169 vtn_assert(type->base_type == vtn_base_type_array ||
1170 type->base_type == vtn_base_type_pointer);
1171 break;
1172 case SpvDecorationBlock:
1173 vtn_assert(type->base_type == vtn_base_type_struct);
1174 vtn_assert(type->block);
1175 break;
1176 case SpvDecorationBufferBlock:
1177 vtn_assert(type->base_type == vtn_base_type_struct);
1178 vtn_assert(type->buffer_block);
1179 break;
1180 case SpvDecorationGLSLShared:
1181 case SpvDecorationGLSLPacked:
1182 /* Ignore these, since we get explicit offsets anyways */
1183 break;
1184
1185 case SpvDecorationRowMajor:
1186 case SpvDecorationColMajor:
1187 case SpvDecorationMatrixStride:
1188 case SpvDecorationBuiltIn:
1189 case SpvDecorationNoPerspective:
1190 case SpvDecorationFlat:
1191 case SpvDecorationPatch:
1192 case SpvDecorationCentroid:
1193 case SpvDecorationSample:
1194 case SpvDecorationExplicitInterpAMD:
1195 case SpvDecorationVolatile:
1196 case SpvDecorationCoherent:
1197 case SpvDecorationNonWritable:
1198 case SpvDecorationNonReadable:
1199 case SpvDecorationUniform:
1200 case SpvDecorationUniformId:
1201 case SpvDecorationLocation:
1202 case SpvDecorationComponent:
1203 case SpvDecorationOffset:
1204 case SpvDecorationXfbBuffer:
1205 case SpvDecorationXfbStride:
1206 case SpvDecorationUserSemantic:
1207 vtn_warn("Decoration only allowed for struct members: %s",
1208 spirv_decoration_to_string(dec->decoration));
1209 break;
1210
1211 case SpvDecorationStream:
1212 /* We don't need to do anything here, as stream is filled up when
1213 * aplying the decoration to a variable, just check that if it is not a
1214 * struct member, it should be a struct.
1215 */
1216 vtn_assert(type->base_type == vtn_base_type_struct);
1217 break;
1218
1219 case SpvDecorationRelaxedPrecision:
1220 case SpvDecorationSpecId:
1221 case SpvDecorationInvariant:
1222 case SpvDecorationRestrict:
1223 case SpvDecorationAliased:
1224 case SpvDecorationConstant:
1225 case SpvDecorationIndex:
1226 case SpvDecorationBinding:
1227 case SpvDecorationDescriptorSet:
1228 case SpvDecorationLinkageAttributes:
1229 case SpvDecorationNoContraction:
1230 case SpvDecorationInputAttachmentIndex:
1231 vtn_warn("Decoration not allowed on types: %s",
1232 spirv_decoration_to_string(dec->decoration));
1233 break;
1234
1235 case SpvDecorationCPacked:
1236 if (b->shader->info.stage != MESA_SHADER_KERNEL)
1237 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1238 spirv_decoration_to_string(dec->decoration));
1239 else
1240 type->packed = true;
1241 break;
1242
1243 case SpvDecorationSaturatedConversion:
1244 case SpvDecorationFuncParamAttr:
1245 case SpvDecorationFPRoundingMode:
1246 case SpvDecorationFPFastMathMode:
1247 case SpvDecorationAlignment:
1248 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1249 spirv_decoration_to_string(dec->decoration));
1250 break;
1251
1252 case SpvDecorationUserTypeGOOGLE:
1253 /* User semantic decorations can safely be ignored by the driver. */
1254 break;
1255
1256 default:
1257 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1258 }
1259 }
1260
1261 static unsigned
1262 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
1263 {
1264 switch (format) {
1265 case SpvImageFormatUnknown: return PIPE_FORMAT_NONE;
1266 case SpvImageFormatRgba32f: return PIPE_FORMAT_R32G32B32A32_FLOAT;
1267 case SpvImageFormatRgba16f: return PIPE_FORMAT_R16G16B16A16_FLOAT;
1268 case SpvImageFormatR32f: return PIPE_FORMAT_R32_FLOAT;
1269 case SpvImageFormatRgba8: return PIPE_FORMAT_R8G8B8A8_UNORM;
1270 case SpvImageFormatRgba8Snorm: return PIPE_FORMAT_R8G8B8A8_SNORM;
1271 case SpvImageFormatRg32f: return PIPE_FORMAT_R32G32_FLOAT;
1272 case SpvImageFormatRg16f: return PIPE_FORMAT_R16G16_FLOAT;
1273 case SpvImageFormatR11fG11fB10f: return PIPE_FORMAT_R11G11B10_FLOAT;
1274 case SpvImageFormatR16f: return PIPE_FORMAT_R16_FLOAT;
1275 case SpvImageFormatRgba16: return PIPE_FORMAT_R16G16B16A16_UNORM;
1276 case SpvImageFormatRgb10A2: return PIPE_FORMAT_R10G10B10A2_UNORM;
1277 case SpvImageFormatRg16: return PIPE_FORMAT_R16G16_UNORM;
1278 case SpvImageFormatRg8: return PIPE_FORMAT_R8G8_UNORM;
1279 case SpvImageFormatR16: return PIPE_FORMAT_R16_UNORM;
1280 case SpvImageFormatR8: return PIPE_FORMAT_R8_UNORM;
1281 case SpvImageFormatRgba16Snorm: return PIPE_FORMAT_R16G16B16A16_SNORM;
1282 case SpvImageFormatRg16Snorm: return PIPE_FORMAT_R16G16_SNORM;
1283 case SpvImageFormatRg8Snorm: return PIPE_FORMAT_R8G8_SNORM;
1284 case SpvImageFormatR16Snorm: return PIPE_FORMAT_R16_SNORM;
1285 case SpvImageFormatR8Snorm: return PIPE_FORMAT_R8_SNORM;
1286 case SpvImageFormatRgba32i: return PIPE_FORMAT_R32G32B32A32_SINT;
1287 case SpvImageFormatRgba16i: return PIPE_FORMAT_R16G16B16A16_SINT;
1288 case SpvImageFormatRgba8i: return PIPE_FORMAT_R8G8B8A8_SINT;
1289 case SpvImageFormatR32i: return PIPE_FORMAT_R32_SINT;
1290 case SpvImageFormatRg32i: return PIPE_FORMAT_R32G32_SINT;
1291 case SpvImageFormatRg16i: return PIPE_FORMAT_R16G16_SINT;
1292 case SpvImageFormatRg8i: return PIPE_FORMAT_R8G8_SINT;
1293 case SpvImageFormatR16i: return PIPE_FORMAT_R16_SINT;
1294 case SpvImageFormatR8i: return PIPE_FORMAT_R8_SINT;
1295 case SpvImageFormatRgba32ui: return PIPE_FORMAT_R32G32B32A32_UINT;
1296 case SpvImageFormatRgba16ui: return PIPE_FORMAT_R16G16B16A16_UINT;
1297 case SpvImageFormatRgba8ui: return PIPE_FORMAT_R8G8B8A8_UINT;
1298 case SpvImageFormatR32ui: return PIPE_FORMAT_R32_UINT;
1299 case SpvImageFormatRgb10a2ui: return PIPE_FORMAT_R10G10B10A2_UINT;
1300 case SpvImageFormatRg32ui: return PIPE_FORMAT_R32G32_UINT;
1301 case SpvImageFormatRg16ui: return PIPE_FORMAT_R16G16_UINT;
1302 case SpvImageFormatRg8ui: return PIPE_FORMAT_R8G8_UINT;
1303 case SpvImageFormatR16ui: return PIPE_FORMAT_R16_UINT;
1304 case SpvImageFormatR8ui: return PIPE_FORMAT_R8_UINT;
1305 default:
1306 vtn_fail("Invalid image format: %s (%u)",
1307 spirv_imageformat_to_string(format), format);
1308 }
1309 }
1310
1311 static void
1312 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
1313 const uint32_t *w, unsigned count)
1314 {
1315 struct vtn_value *val = NULL;
1316
1317 /* In order to properly handle forward declarations, we have to defer
1318 * allocation for pointer types.
1319 */
1320 if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) {
1321 val = vtn_push_value(b, w[1], vtn_value_type_type);
1322 vtn_fail_if(val->type != NULL,
1323 "Only pointers can have forward declarations");
1324 val->type = rzalloc(b, struct vtn_type);
1325 val->type->id = w[1];
1326 }
1327
1328 switch (opcode) {
1329 case SpvOpTypeVoid:
1330 val->type->base_type = vtn_base_type_void;
1331 val->type->type = glsl_void_type();
1332 break;
1333 case SpvOpTypeBool:
1334 val->type->base_type = vtn_base_type_scalar;
1335 val->type->type = glsl_bool_type();
1336 val->type->length = 1;
1337 break;
1338 case SpvOpTypeInt: {
1339 int bit_size = w[2];
1340 const bool signedness = w[3];
1341 val->type->base_type = vtn_base_type_scalar;
1342 switch (bit_size) {
1343 case 64:
1344 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
1345 break;
1346 case 32:
1347 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
1348 break;
1349 case 16:
1350 val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type());
1351 break;
1352 case 8:
1353 val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
1354 break;
1355 default:
1356 vtn_fail("Invalid int bit size: %u", bit_size);
1357 }
1358 val->type->length = 1;
1359 break;
1360 }
1361
1362 case SpvOpTypeFloat: {
1363 int bit_size = w[2];
1364 val->type->base_type = vtn_base_type_scalar;
1365 switch (bit_size) {
1366 case 16:
1367 val->type->type = glsl_float16_t_type();
1368 break;
1369 case 32:
1370 val->type->type = glsl_float_type();
1371 break;
1372 case 64:
1373 val->type->type = glsl_double_type();
1374 break;
1375 default:
1376 vtn_fail("Invalid float bit size: %u", bit_size);
1377 }
1378 val->type->length = 1;
1379 break;
1380 }
1381
1382 case SpvOpTypeVector: {
1383 struct vtn_type *base = vtn_get_type(b, w[2]);
1384 unsigned elems = w[3];
1385
1386 vtn_fail_if(base->base_type != vtn_base_type_scalar,
1387 "Base type for OpTypeVector must be a scalar");
1388 vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16),
1389 "Invalid component count for OpTypeVector");
1390
1391 val->type->base_type = vtn_base_type_vector;
1392 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
1393 val->type->length = elems;
1394 val->type->stride = glsl_type_is_boolean(val->type->type)
1395 ? 4 : glsl_get_bit_size(base->type) / 8;
1396 val->type->array_element = base;
1397 break;
1398 }
1399
1400 case SpvOpTypeMatrix: {
1401 struct vtn_type *base = vtn_get_type(b, w[2]);
1402 unsigned columns = w[3];
1403
1404 vtn_fail_if(base->base_type != vtn_base_type_vector,
1405 "Base type for OpTypeMatrix must be a vector");
1406 vtn_fail_if(columns < 2 || columns > 4,
1407 "Invalid column count for OpTypeMatrix");
1408
1409 val->type->base_type = vtn_base_type_matrix;
1410 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
1411 glsl_get_vector_elements(base->type),
1412 columns);
1413 vtn_fail_if(glsl_type_is_error(val->type->type),
1414 "Unsupported base type for OpTypeMatrix");
1415 assert(!glsl_type_is_error(val->type->type));
1416 val->type->length = columns;
1417 val->type->array_element = base;
1418 val->type->row_major = false;
1419 val->type->stride = 0;
1420 break;
1421 }
1422
1423 case SpvOpTypeRuntimeArray:
1424 case SpvOpTypeArray: {
1425 struct vtn_type *array_element = vtn_get_type(b, w[2]);
1426
1427 if (opcode == SpvOpTypeRuntimeArray) {
1428 /* A length of 0 is used to denote unsized arrays */
1429 val->type->length = 0;
1430 } else {
1431 val->type->length = vtn_constant_uint(b, w[3]);
1432 }
1433
1434 val->type->base_type = vtn_base_type_array;
1435 val->type->array_element = array_element;
1436 if (b->shader->info.stage == MESA_SHADER_KERNEL)
1437 val->type->stride = glsl_get_cl_size(array_element->type);
1438
1439 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1440 val->type->type = glsl_array_type(array_element->type, val->type->length,
1441 val->type->stride);
1442 break;
1443 }
1444
1445 case SpvOpTypeStruct: {
1446 unsigned num_fields = count - 2;
1447 val->type->base_type = vtn_base_type_struct;
1448 val->type->length = num_fields;
1449 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
1450 val->type->offsets = ralloc_array(b, unsigned, num_fields);
1451 val->type->packed = false;
1452
1453 NIR_VLA(struct glsl_struct_field, fields, count);
1454 for (unsigned i = 0; i < num_fields; i++) {
1455 val->type->members[i] = vtn_get_type(b, w[i + 2]);
1456 fields[i] = (struct glsl_struct_field) {
1457 .type = val->type->members[i]->type,
1458 .name = ralloc_asprintf(b, "field%d", i),
1459 .location = -1,
1460 .offset = -1,
1461 };
1462 }
1463
1464 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1465 unsigned offset = 0;
1466 for (unsigned i = 0; i < num_fields; i++) {
1467 offset = align(offset, glsl_get_cl_alignment(fields[i].type));
1468 fields[i].offset = offset;
1469 offset += glsl_get_cl_size(fields[i].type);
1470 }
1471 }
1472
1473 struct member_decoration_ctx ctx = {
1474 .num_fields = num_fields,
1475 .fields = fields,
1476 .type = val->type
1477 };
1478
1479 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1480 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1481
1482 vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
1483
1484 const char *name = val->name;
1485
1486 if (val->type->block || val->type->buffer_block) {
1487 /* Packing will be ignored since types coming from SPIR-V are
1488 * explicitly laid out.
1489 */
1490 val->type->type = glsl_interface_type(fields, num_fields,
1491 /* packing */ 0, false,
1492 name ? name : "block");
1493 } else {
1494 val->type->type = glsl_struct_type(fields, num_fields,
1495 name ? name : "struct", false);
1496 }
1497 break;
1498 }
1499
1500 case SpvOpTypeFunction: {
1501 val->type->base_type = vtn_base_type_function;
1502 val->type->type = NULL;
1503
1504 val->type->return_type = vtn_get_type(b, w[2]);
1505
1506 const unsigned num_params = count - 3;
1507 val->type->length = num_params;
1508 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1509 for (unsigned i = 0; i < count - 3; i++) {
1510 val->type->params[i] = vtn_get_type(b, w[i + 3]);
1511 }
1512 break;
1513 }
1514
1515 case SpvOpTypePointer:
1516 case SpvOpTypeForwardPointer: {
1517 /* We can't blindly push the value because it might be a forward
1518 * declaration.
1519 */
1520 val = vtn_untyped_value(b, w[1]);
1521
1522 SpvStorageClass storage_class = w[2];
1523
1524 if (val->value_type == vtn_value_type_invalid) {
1525 val->value_type = vtn_value_type_type;
1526 val->type = rzalloc(b, struct vtn_type);
1527 val->type->id = w[1];
1528 val->type->base_type = vtn_base_type_pointer;
1529 val->type->storage_class = storage_class;
1530
1531 /* These can actually be stored to nir_variables and used as SSA
1532 * values so they need a real glsl_type.
1533 */
1534 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1535 b, storage_class, NULL, NULL);
1536 val->type->type = nir_address_format_to_glsl_type(
1537 vtn_mode_to_address_format(b, mode));
1538 } else {
1539 vtn_fail_if(val->type->storage_class != storage_class,
1540 "The storage classes of an OpTypePointer and any "
1541 "OpTypeForwardPointers that provide forward "
1542 "declarations of it must match.");
1543 }
1544
1545 if (opcode == SpvOpTypePointer) {
1546 vtn_fail_if(val->type->deref != NULL,
1547 "While OpTypeForwardPointer can be used to provide a "
1548 "forward declaration of a pointer, OpTypePointer can "
1549 "only be used once for a given id.");
1550
1551 val->type->deref = vtn_get_type(b, w[3]);
1552
1553 /* Only certain storage classes use ArrayStride. The others (in
1554 * particular Workgroup) are expected to be laid out by the driver.
1555 */
1556 switch (storage_class) {
1557 case SpvStorageClassUniform:
1558 case SpvStorageClassPushConstant:
1559 case SpvStorageClassStorageBuffer:
1560 case SpvStorageClassPhysicalStorageBuffer:
1561 vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1562 break;
1563 default:
1564 /* Nothing to do. */
1565 break;
1566 }
1567
1568 if (b->physical_ptrs) {
1569 switch (storage_class) {
1570 case SpvStorageClassFunction:
1571 case SpvStorageClassWorkgroup:
1572 case SpvStorageClassCrossWorkgroup:
1573 case SpvStorageClassUniformConstant:
1574 val->type->stride = align(glsl_get_cl_size(val->type->deref->type),
1575 glsl_get_cl_alignment(val->type->deref->type));
1576 break;
1577 default:
1578 break;
1579 }
1580 }
1581 }
1582 break;
1583 }
1584
1585 case SpvOpTypeImage: {
1586 val->type->base_type = vtn_base_type_image;
1587
1588 /* Images are represented in NIR as a scalar SSA value that is the
1589 * result of a deref instruction. An OpLoad on an OpTypeImage pointer
1590 * from UniformConstant memory just takes the NIR deref from the pointer
1591 * and turns it into an SSA value.
1592 */
1593 val->type->type = nir_address_format_to_glsl_type(
1594 vtn_mode_to_address_format(b, vtn_variable_mode_function));
1595
1596 const struct vtn_type *sampled_type = vtn_get_type(b, w[2]);
1597 if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1598 vtn_fail_if(sampled_type->base_type != vtn_base_type_void,
1599 "Sampled type of OpTypeImage must be void for kernels");
1600 } else {
1601 vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
1602 glsl_get_bit_size(sampled_type->type) != 32,
1603 "Sampled type of OpTypeImage must be a 32-bit scalar");
1604 }
1605
1606 enum glsl_sampler_dim dim;
1607 switch ((SpvDim)w[3]) {
1608 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
1609 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
1610 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
1611 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
1612 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
1613 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
1614 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1615 default:
1616 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1617 spirv_dim_to_string((SpvDim)w[3]), w[3]);
1618 }
1619
1620 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1621 * The “Depth” operand of OpTypeImage is ignored.
1622 */
1623 bool is_array = w[5];
1624 bool multisampled = w[6];
1625 unsigned sampled = w[7];
1626 SpvImageFormat format = w[8];
1627
1628 if (count > 9)
1629 val->type->access_qualifier = w[9];
1630 else if (b->shader->info.stage == MESA_SHADER_KERNEL)
1631 /* Per the CL C spec: If no qualifier is provided, read_only is assumed. */
1632 val->type->access_qualifier = SpvAccessQualifierReadOnly;
1633 else
1634 val->type->access_qualifier = SpvAccessQualifierReadWrite;
1635
1636 if (multisampled) {
1637 if (dim == GLSL_SAMPLER_DIM_2D)
1638 dim = GLSL_SAMPLER_DIM_MS;
1639 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1640 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1641 else
1642 vtn_fail("Unsupported multisampled image type");
1643 }
1644
1645 val->type->image_format = translate_image_format(b, format);
1646
1647 enum glsl_base_type sampled_base_type =
1648 glsl_get_base_type(sampled_type->type);
1649 if (sampled == 1) {
1650 val->type->glsl_image = glsl_sampler_type(dim, false, is_array,
1651 sampled_base_type);
1652 } else if (sampled == 2) {
1653 val->type->glsl_image = glsl_image_type(dim, is_array,
1654 sampled_base_type);
1655 } else if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1656 val->type->glsl_image = glsl_image_type(dim, is_array,
1657 GLSL_TYPE_VOID);
1658 } else {
1659 vtn_fail("We need to know if the image will be sampled");
1660 }
1661 break;
1662 }
1663
1664 case SpvOpTypeSampledImage: {
1665 val->type->base_type = vtn_base_type_sampled_image;
1666 val->type->image = vtn_get_type(b, w[2]);
1667
1668 /* Sampled images are represented NIR as a vec2 SSA value where each
1669 * component is the result of a deref instruction. The first component
1670 * is the image and the second is the sampler. An OpLoad on an
1671 * OpTypeSampledImage pointer from UniformConstant memory just takes
1672 * the NIR deref from the pointer and duplicates it to both vector
1673 * components.
1674 */
1675 nir_address_format addr_format =
1676 vtn_mode_to_address_format(b, vtn_variable_mode_function);
1677 assert(nir_address_format_num_components(addr_format) == 1);
1678 unsigned bit_size = nir_address_format_bit_size(addr_format);
1679 assert(bit_size == 32 || bit_size == 64);
1680
1681 enum glsl_base_type base_type =
1682 bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64;
1683 val->type->type = glsl_vector_type(base_type, 2);
1684 break;
1685 }
1686
1687 case SpvOpTypeSampler:
1688 val->type->base_type = vtn_base_type_sampler;
1689
1690 /* Samplers are represented in NIR as a scalar SSA value that is the
1691 * result of a deref instruction. An OpLoad on an OpTypeSampler pointer
1692 * from UniformConstant memory just takes the NIR deref from the pointer
1693 * and turns it into an SSA value.
1694 */
1695 val->type->type = nir_address_format_to_glsl_type(
1696 vtn_mode_to_address_format(b, vtn_variable_mode_function));
1697 break;
1698
1699 case SpvOpTypeOpaque:
1700 case SpvOpTypeEvent:
1701 case SpvOpTypeDeviceEvent:
1702 case SpvOpTypeReserveId:
1703 case SpvOpTypeQueue:
1704 case SpvOpTypePipe:
1705 default:
1706 vtn_fail_with_opcode("Unhandled opcode", opcode);
1707 }
1708
1709 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1710
1711 if (val->type->base_type == vtn_base_type_struct &&
1712 (val->type->block || val->type->buffer_block)) {
1713 for (unsigned i = 0; i < val->type->length; i++) {
1714 vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]),
1715 "Block and BufferBlock decorations cannot decorate a "
1716 "structure type that is nested at any level inside "
1717 "another structure type decorated with Block or "
1718 "BufferBlock.");
1719 }
1720 }
1721 }
1722
1723 static nir_constant *
1724 vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
1725 {
1726 nir_constant *c = rzalloc(b, nir_constant);
1727
1728 switch (type->base_type) {
1729 case vtn_base_type_scalar:
1730 case vtn_base_type_vector:
1731 /* Nothing to do here. It's already initialized to zero */
1732 break;
1733
1734 case vtn_base_type_pointer: {
1735 enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1736 b, type->storage_class, type->deref, NULL);
1737 nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
1738
1739 const nir_const_value *null_value = nir_address_format_null_value(addr_format);
1740 memcpy(c->values, null_value,
1741 sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
1742 break;
1743 }
1744
1745 case vtn_base_type_void:
1746 case vtn_base_type_image:
1747 case vtn_base_type_sampler:
1748 case vtn_base_type_sampled_image:
1749 case vtn_base_type_function:
1750 /* For those we have to return something but it doesn't matter what. */
1751 break;
1752
1753 case vtn_base_type_matrix:
1754 case vtn_base_type_array:
1755 vtn_assert(type->length > 0);
1756 c->num_elements = type->length;
1757 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1758
1759 c->elements[0] = vtn_null_constant(b, type->array_element);
1760 for (unsigned i = 1; i < c->num_elements; i++)
1761 c->elements[i] = c->elements[0];
1762 break;
1763
1764 case vtn_base_type_struct:
1765 c->num_elements = type->length;
1766 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1767 for (unsigned i = 0; i < c->num_elements; i++)
1768 c->elements[i] = vtn_null_constant(b, type->members[i]);
1769 break;
1770
1771 default:
1772 vtn_fail("Invalid type for null constant");
1773 }
1774
1775 return c;
1776 }
1777
1778 static void
1779 spec_constant_decoration_cb(struct vtn_builder *b, UNUSED struct vtn_value *val,
1780 ASSERTED int member,
1781 const struct vtn_decoration *dec, void *data)
1782 {
1783 vtn_assert(member == -1);
1784 if (dec->decoration != SpvDecorationSpecId)
1785 return;
1786
1787 nir_const_value *value = data;
1788 for (unsigned i = 0; i < b->num_specializations; i++) {
1789 if (b->specializations[i].id == dec->operands[0]) {
1790 *value = b->specializations[i].value;
1791 return;
1792 }
1793 }
1794 }
1795
1796 static void
1797 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1798 struct vtn_value *val,
1799 ASSERTED int member,
1800 const struct vtn_decoration *dec,
1801 UNUSED void *data)
1802 {
1803 vtn_assert(member == -1);
1804 if (dec->decoration != SpvDecorationBuiltIn ||
1805 dec->operands[0] != SpvBuiltInWorkgroupSize)
1806 return;
1807
1808 vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1809 b->workgroup_size_builtin = val;
1810 }
1811
1812 static void
1813 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1814 const uint32_t *w, unsigned count)
1815 {
1816 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1817 val->constant = rzalloc(b, nir_constant);
1818 switch (opcode) {
1819 case SpvOpConstantTrue:
1820 case SpvOpConstantFalse:
1821 case SpvOpSpecConstantTrue:
1822 case SpvOpSpecConstantFalse: {
1823 vtn_fail_if(val->type->type != glsl_bool_type(),
1824 "Result type of %s must be OpTypeBool",
1825 spirv_op_to_string(opcode));
1826
1827 bool bval = (opcode == SpvOpConstantTrue ||
1828 opcode == SpvOpSpecConstantTrue);
1829
1830 nir_const_value u32val = nir_const_value_for_uint(bval, 32);
1831
1832 if (opcode == SpvOpSpecConstantTrue ||
1833 opcode == SpvOpSpecConstantFalse)
1834 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32val);
1835
1836 val->constant->values[0].b = u32val.u32 != 0;
1837 break;
1838 }
1839
1840 case SpvOpConstant:
1841 case SpvOpSpecConstant: {
1842 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1843 "Result type of %s must be a scalar",
1844 spirv_op_to_string(opcode));
1845 int bit_size = glsl_get_bit_size(val->type->type);
1846 switch (bit_size) {
1847 case 64:
1848 val->constant->values[0].u64 = vtn_u64_literal(&w[3]);
1849 break;
1850 case 32:
1851 val->constant->values[0].u32 = w[3];
1852 break;
1853 case 16:
1854 val->constant->values[0].u16 = w[3];
1855 break;
1856 case 8:
1857 val->constant->values[0].u8 = w[3];
1858 break;
1859 default:
1860 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
1861 }
1862
1863 if (opcode == SpvOpSpecConstant)
1864 vtn_foreach_decoration(b, val, spec_constant_decoration_cb,
1865 &val->constant->values[0]);
1866 break;
1867 }
1868
1869 case SpvOpSpecConstantComposite:
1870 case SpvOpConstantComposite: {
1871 unsigned elem_count = count - 3;
1872 vtn_fail_if(elem_count != val->type->length,
1873 "%s has %u constituents, expected %u",
1874 spirv_op_to_string(opcode), elem_count, val->type->length);
1875
1876 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1877 for (unsigned i = 0; i < elem_count; i++) {
1878 struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
1879
1880 if (val->value_type == vtn_value_type_constant) {
1881 elems[i] = val->constant;
1882 } else {
1883 vtn_fail_if(val->value_type != vtn_value_type_undef,
1884 "only constants or undefs allowed for "
1885 "SpvOpConstantComposite");
1886 /* to make it easier, just insert a NULL constant for now */
1887 elems[i] = vtn_null_constant(b, val->type);
1888 }
1889 }
1890
1891 switch (val->type->base_type) {
1892 case vtn_base_type_vector: {
1893 assert(glsl_type_is_vector(val->type->type));
1894 for (unsigned i = 0; i < elem_count; i++)
1895 val->constant->values[i] = elems[i]->values[0];
1896 break;
1897 }
1898
1899 case vtn_base_type_matrix:
1900 case vtn_base_type_struct:
1901 case vtn_base_type_array:
1902 ralloc_steal(val->constant, elems);
1903 val->constant->num_elements = elem_count;
1904 val->constant->elements = elems;
1905 break;
1906
1907 default:
1908 vtn_fail("Result type of %s must be a composite type",
1909 spirv_op_to_string(opcode));
1910 }
1911 break;
1912 }
1913
1914 case SpvOpSpecConstantOp: {
1915 nir_const_value u32op = nir_const_value_for_uint(w[3], 32);
1916 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32op);
1917 SpvOp opcode = u32op.u32;
1918 switch (opcode) {
1919 case SpvOpVectorShuffle: {
1920 struct vtn_value *v0 = &b->values[w[4]];
1921 struct vtn_value *v1 = &b->values[w[5]];
1922
1923 vtn_assert(v0->value_type == vtn_value_type_constant ||
1924 v0->value_type == vtn_value_type_undef);
1925 vtn_assert(v1->value_type == vtn_value_type_constant ||
1926 v1->value_type == vtn_value_type_undef);
1927
1928 unsigned len0 = glsl_get_vector_elements(v0->type->type);
1929 unsigned len1 = glsl_get_vector_elements(v1->type->type);
1930
1931 vtn_assert(len0 + len1 < 16);
1932
1933 unsigned bit_size = glsl_get_bit_size(val->type->type);
1934 unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
1935 unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
1936
1937 vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1938 (void)bit_size0; (void)bit_size1;
1939
1940 nir_const_value undef = { .u64 = 0xdeadbeefdeadbeef };
1941 nir_const_value combined[NIR_MAX_VEC_COMPONENTS * 2];
1942
1943 if (v0->value_type == vtn_value_type_constant) {
1944 for (unsigned i = 0; i < len0; i++)
1945 combined[i] = v0->constant->values[i];
1946 }
1947 if (v1->value_type == vtn_value_type_constant) {
1948 for (unsigned i = 0; i < len1; i++)
1949 combined[len0 + i] = v1->constant->values[i];
1950 }
1951
1952 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1953 uint32_t comp = w[i + 6];
1954 if (comp == (uint32_t)-1) {
1955 /* If component is not used, set the value to a known constant
1956 * to detect if it is wrongly used.
1957 */
1958 val->constant->values[j] = undef;
1959 } else {
1960 vtn_fail_if(comp >= len0 + len1,
1961 "All Component literals must either be FFFFFFFF "
1962 "or in [0, N - 1] (inclusive).");
1963 val->constant->values[j] = combined[comp];
1964 }
1965 }
1966 break;
1967 }
1968
1969 case SpvOpCompositeExtract:
1970 case SpvOpCompositeInsert: {
1971 struct vtn_value *comp;
1972 unsigned deref_start;
1973 struct nir_constant **c;
1974 if (opcode == SpvOpCompositeExtract) {
1975 comp = vtn_value(b, w[4], vtn_value_type_constant);
1976 deref_start = 5;
1977 c = &comp->constant;
1978 } else {
1979 comp = vtn_value(b, w[5], vtn_value_type_constant);
1980 deref_start = 6;
1981 val->constant = nir_constant_clone(comp->constant,
1982 (nir_variable *)b);
1983 c = &val->constant;
1984 }
1985
1986 int elem = -1;
1987 const struct vtn_type *type = comp->type;
1988 for (unsigned i = deref_start; i < count; i++) {
1989 vtn_fail_if(w[i] > type->length,
1990 "%uth index of %s is %u but the type has only "
1991 "%u elements", i - deref_start,
1992 spirv_op_to_string(opcode), w[i], type->length);
1993
1994 switch (type->base_type) {
1995 case vtn_base_type_vector:
1996 elem = w[i];
1997 type = type->array_element;
1998 break;
1999
2000 case vtn_base_type_matrix:
2001 case vtn_base_type_array:
2002 c = &(*c)->elements[w[i]];
2003 type = type->array_element;
2004 break;
2005
2006 case vtn_base_type_struct:
2007 c = &(*c)->elements[w[i]];
2008 type = type->members[w[i]];
2009 break;
2010
2011 default:
2012 vtn_fail("%s must only index into composite types",
2013 spirv_op_to_string(opcode));
2014 }
2015 }
2016
2017 if (opcode == SpvOpCompositeExtract) {
2018 if (elem == -1) {
2019 val->constant = *c;
2020 } else {
2021 unsigned num_components = type->length;
2022 for (unsigned i = 0; i < num_components; i++)
2023 val->constant->values[i] = (*c)->values[elem + i];
2024 }
2025 } else {
2026 struct vtn_value *insert =
2027 vtn_value(b, w[4], vtn_value_type_constant);
2028 vtn_assert(insert->type == type);
2029 if (elem == -1) {
2030 *c = insert->constant;
2031 } else {
2032 unsigned num_components = type->length;
2033 for (unsigned i = 0; i < num_components; i++)
2034 (*c)->values[elem + i] = insert->constant->values[i];
2035 }
2036 }
2037 break;
2038 }
2039
2040 default: {
2041 bool swap;
2042 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
2043 nir_alu_type src_alu_type = dst_alu_type;
2044 unsigned num_components = glsl_get_vector_elements(val->type->type);
2045 unsigned bit_size;
2046
2047 vtn_assert(count <= 7);
2048
2049 switch (opcode) {
2050 case SpvOpSConvert:
2051 case SpvOpFConvert:
2052 case SpvOpUConvert:
2053 /* We have a source in a conversion */
2054 src_alu_type =
2055 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b, w[4])->type);
2056 /* We use the bitsize of the conversion source to evaluate the opcode later */
2057 bit_size = glsl_get_bit_size(vtn_get_value_type(b, w[4])->type);
2058 break;
2059 default:
2060 bit_size = glsl_get_bit_size(val->type->type);
2061 };
2062
2063 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
2064 nir_alu_type_get_type_size(src_alu_type),
2065 nir_alu_type_get_type_size(dst_alu_type));
2066 nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
2067
2068 for (unsigned i = 0; i < count - 4; i++) {
2069 struct vtn_value *src_val =
2070 vtn_value(b, w[4 + i], vtn_value_type_constant);
2071
2072 /* If this is an unsized source, pull the bit size from the
2073 * source; otherwise, we'll use the bit size from the destination.
2074 */
2075 if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
2076 bit_size = glsl_get_bit_size(src_val->type->type);
2077
2078 unsigned src_comps = nir_op_infos[op].input_sizes[i] ?
2079 nir_op_infos[op].input_sizes[i] :
2080 num_components;
2081
2082 unsigned j = swap ? 1 - i : i;
2083 for (unsigned c = 0; c < src_comps; c++)
2084 src[j][c] = src_val->constant->values[c];
2085 }
2086
2087 /* fix up fixed size sources */
2088 switch (op) {
2089 case nir_op_ishl:
2090 case nir_op_ishr:
2091 case nir_op_ushr: {
2092 if (bit_size == 32)
2093 break;
2094 for (unsigned i = 0; i < num_components; ++i) {
2095 switch (bit_size) {
2096 case 64: src[1][i].u32 = src[1][i].u64; break;
2097 case 16: src[1][i].u32 = src[1][i].u16; break;
2098 case 8: src[1][i].u32 = src[1][i].u8; break;
2099 }
2100 }
2101 break;
2102 }
2103 default:
2104 break;
2105 }
2106
2107 nir_const_value *srcs[3] = {
2108 src[0], src[1], src[2],
2109 };
2110 nir_eval_const_opcode(op, val->constant->values,
2111 num_components, bit_size, srcs,
2112 b->shader->info.float_controls_execution_mode);
2113 break;
2114 } /* default */
2115 }
2116 break;
2117 }
2118
2119 case SpvOpConstantNull:
2120 val->constant = vtn_null_constant(b, val->type);
2121 break;
2122
2123 default:
2124 vtn_fail_with_opcode("Unhandled opcode", opcode);
2125 }
2126
2127 /* Now that we have the value, update the workgroup size if needed */
2128 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
2129 }
2130
2131 static void
2132 vtn_split_barrier_semantics(struct vtn_builder *b,
2133 SpvMemorySemanticsMask semantics,
2134 SpvMemorySemanticsMask *before,
2135 SpvMemorySemanticsMask *after)
2136 {
2137 /* For memory semantics embedded in operations, we split them into up to
2138 * two barriers, to be added before and after the operation. This is less
2139 * strict than if we propagated until the final backend stage, but still
2140 * result in correct execution.
2141 *
2142 * A further improvement could be pipe this information (and use!) into the
2143 * next compiler layers, at the expense of making the handling of barriers
2144 * more complicated.
2145 */
2146
2147 *before = SpvMemorySemanticsMaskNone;
2148 *after = SpvMemorySemanticsMaskNone;
2149
2150 SpvMemorySemanticsMask order_semantics =
2151 semantics & (SpvMemorySemanticsAcquireMask |
2152 SpvMemorySemanticsReleaseMask |
2153 SpvMemorySemanticsAcquireReleaseMask |
2154 SpvMemorySemanticsSequentiallyConsistentMask);
2155
2156 if (util_bitcount(order_semantics) > 1) {
2157 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2158 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2159 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2160 */
2161 vtn_warn("Multiple memory ordering semantics specified, "
2162 "assuming AcquireRelease.");
2163 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2164 }
2165
2166 const SpvMemorySemanticsMask av_vis_semantics =
2167 semantics & (SpvMemorySemanticsMakeAvailableMask |
2168 SpvMemorySemanticsMakeVisibleMask);
2169
2170 const SpvMemorySemanticsMask storage_semantics =
2171 semantics & (SpvMemorySemanticsUniformMemoryMask |
2172 SpvMemorySemanticsSubgroupMemoryMask |
2173 SpvMemorySemanticsWorkgroupMemoryMask |
2174 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2175 SpvMemorySemanticsAtomicCounterMemoryMask |
2176 SpvMemorySemanticsImageMemoryMask |
2177 SpvMemorySemanticsOutputMemoryMask);
2178
2179 const SpvMemorySemanticsMask other_semantics =
2180 semantics & ~(order_semantics | av_vis_semantics | storage_semantics |
2181 SpvMemorySemanticsVolatileMask);
2182
2183 if (other_semantics)
2184 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics);
2185
2186 /* SequentiallyConsistent is treated as AcquireRelease. */
2187
2188 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2189 * associated with a Store. All the write operations with a matching
2190 * semantics will not be reordered after the Store.
2191 */
2192 if (order_semantics & (SpvMemorySemanticsReleaseMask |
2193 SpvMemorySemanticsAcquireReleaseMask |
2194 SpvMemorySemanticsSequentiallyConsistentMask)) {
2195 *before |= SpvMemorySemanticsReleaseMask | storage_semantics;
2196 }
2197
2198 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2199 * associated with a Load. All the operations with a matching semantics
2200 * will not be reordered before the Load.
2201 */
2202 if (order_semantics & (SpvMemorySemanticsAcquireMask |
2203 SpvMemorySemanticsAcquireReleaseMask |
2204 SpvMemorySemanticsSequentiallyConsistentMask)) {
2205 *after |= SpvMemorySemanticsAcquireMask | storage_semantics;
2206 }
2207
2208 if (av_vis_semantics & SpvMemorySemanticsMakeVisibleMask)
2209 *before |= SpvMemorySemanticsMakeVisibleMask | storage_semantics;
2210
2211 if (av_vis_semantics & SpvMemorySemanticsMakeAvailableMask)
2212 *after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
2213 }
2214
2215 static nir_memory_semantics
2216 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder *b,
2217 SpvMemorySemanticsMask semantics)
2218 {
2219 nir_memory_semantics nir_semantics = 0;
2220
2221 SpvMemorySemanticsMask order_semantics =
2222 semantics & (SpvMemorySemanticsAcquireMask |
2223 SpvMemorySemanticsReleaseMask |
2224 SpvMemorySemanticsAcquireReleaseMask |
2225 SpvMemorySemanticsSequentiallyConsistentMask);
2226
2227 if (util_bitcount(order_semantics) > 1) {
2228 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2229 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2230 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2231 */
2232 vtn_warn("Multiple memory ordering semantics bits specified, "
2233 "assuming AcquireRelease.");
2234 order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2235 }
2236
2237 switch (order_semantics) {
2238 case 0:
2239 /* Not an ordering barrier. */
2240 break;
2241
2242 case SpvMemorySemanticsAcquireMask:
2243 nir_semantics = NIR_MEMORY_ACQUIRE;
2244 break;
2245
2246 case SpvMemorySemanticsReleaseMask:
2247 nir_semantics = NIR_MEMORY_RELEASE;
2248 break;
2249
2250 case SpvMemorySemanticsSequentiallyConsistentMask:
2251 /* Fall through. Treated as AcquireRelease in Vulkan. */
2252 case SpvMemorySemanticsAcquireReleaseMask:
2253 nir_semantics = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE;
2254 break;
2255
2256 default:
2257 unreachable("Invalid memory order semantics");
2258 }
2259
2260 if (semantics & SpvMemorySemanticsMakeAvailableMask) {
2261 vtn_fail_if(!b->options->caps.vk_memory_model,
2262 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2263 "capability must be declared.");
2264 nir_semantics |= NIR_MEMORY_MAKE_AVAILABLE;
2265 }
2266
2267 if (semantics & SpvMemorySemanticsMakeVisibleMask) {
2268 vtn_fail_if(!b->options->caps.vk_memory_model,
2269 "To use MakeVisible memory semantics the VulkanMemoryModel "
2270 "capability must be declared.");
2271 nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
2272 }
2273
2274 return nir_semantics;
2275 }
2276
2277 static nir_variable_mode
2278 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder *b,
2279 SpvMemorySemanticsMask semantics)
2280 {
2281 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2282 * and AtomicCounterMemory are ignored".
2283 */
2284 semantics &= ~(SpvMemorySemanticsSubgroupMemoryMask |
2285 SpvMemorySemanticsCrossWorkgroupMemoryMask |
2286 SpvMemorySemanticsAtomicCounterMemoryMask);
2287
2288 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2289 * for SpvMemorySemanticsImageMemoryMask.
2290 */
2291
2292 nir_variable_mode modes = 0;
2293 if (semantics & (SpvMemorySemanticsUniformMemoryMask |
2294 SpvMemorySemanticsImageMemoryMask)) {
2295 modes |= nir_var_uniform |
2296 nir_var_mem_ubo |
2297 nir_var_mem_ssbo |
2298 nir_var_mem_global;
2299 }
2300 if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
2301 modes |= nir_var_mem_shared;
2302 if (semantics & SpvMemorySemanticsOutputMemoryMask) {
2303 modes |= nir_var_shader_out;
2304 }
2305
2306 return modes;
2307 }
2308
2309 static nir_scope
2310 vtn_scope_to_nir_scope(struct vtn_builder *b, SpvScope scope)
2311 {
2312 nir_scope nir_scope;
2313 switch (scope) {
2314 case SpvScopeDevice:
2315 vtn_fail_if(b->options->caps.vk_memory_model &&
2316 !b->options->caps.vk_memory_model_device_scope,
2317 "If the Vulkan memory model is declared and any instruction "
2318 "uses Device scope, the VulkanMemoryModelDeviceScope "
2319 "capability must be declared.");
2320 nir_scope = NIR_SCOPE_DEVICE;
2321 break;
2322
2323 case SpvScopeQueueFamily:
2324 vtn_fail_if(!b->options->caps.vk_memory_model,
2325 "To use Queue Family scope, the VulkanMemoryModel capability "
2326 "must be declared.");
2327 nir_scope = NIR_SCOPE_QUEUE_FAMILY;
2328 break;
2329
2330 case SpvScopeWorkgroup:
2331 nir_scope = NIR_SCOPE_WORKGROUP;
2332 break;
2333
2334 case SpvScopeSubgroup:
2335 nir_scope = NIR_SCOPE_SUBGROUP;
2336 break;
2337
2338 case SpvScopeInvocation:
2339 nir_scope = NIR_SCOPE_INVOCATION;
2340 break;
2341
2342 default:
2343 vtn_fail("Invalid memory scope");
2344 }
2345
2346 return nir_scope;
2347 }
2348
2349 static void
2350 vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
2351 SpvScope mem_scope,
2352 SpvMemorySemanticsMask semantics)
2353 {
2354 nir_memory_semantics nir_semantics =
2355 vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2356 nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
2357 nir_scope nir_exec_scope = vtn_scope_to_nir_scope(b, exec_scope);
2358
2359 /* Memory semantics is optional for OpControlBarrier. */
2360 nir_scope nir_mem_scope;
2361 if (nir_semantics == 0 || modes == 0)
2362 nir_mem_scope = NIR_SCOPE_NONE;
2363 else
2364 nir_mem_scope = vtn_scope_to_nir_scope(b, mem_scope);
2365
2366 nir_scoped_barrier(&b->nb, nir_exec_scope, nir_mem_scope, nir_semantics, modes);
2367 }
2368
2369 static void
2370 vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
2371 SpvMemorySemanticsMask semantics)
2372 {
2373 nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
2374 nir_memory_semantics nir_semantics =
2375 vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2376
2377 /* No barrier to add. */
2378 if (nir_semantics == 0 || modes == 0)
2379 return;
2380
2381 nir_scope nir_mem_scope = vtn_scope_to_nir_scope(b, scope);
2382 nir_scoped_barrier(&b->nb, NIR_SCOPE_NONE, nir_mem_scope, nir_semantics, modes);
2383 }
2384
2385 struct vtn_ssa_value *
2386 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
2387 {
2388 /* Always use bare types for SSA values for a couple of reasons:
2389 *
2390 * 1. Code which emits deref chains should never listen to the explicit
2391 * layout information on the SSA value if any exists. If we've
2392 * accidentally been relying on this, we want to find those bugs.
2393 *
2394 * 2. We want to be able to quickly check that an SSA value being assigned
2395 * to a SPIR-V value has the right type. Using bare types everywhere
2396 * ensures that we can pointer-compare.
2397 */
2398 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
2399 val->type = glsl_get_bare_type(type);
2400
2401
2402 if (!glsl_type_is_vector_or_scalar(type)) {
2403 unsigned elems = glsl_get_length(val->type);
2404 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2405 if (glsl_type_is_array_or_matrix(type)) {
2406 const struct glsl_type *elem_type = glsl_get_array_element(type);
2407 for (unsigned i = 0; i < elems; i++)
2408 val->elems[i] = vtn_create_ssa_value(b, elem_type);
2409 } else {
2410 vtn_assert(glsl_type_is_struct_or_ifc(type));
2411 for (unsigned i = 0; i < elems; i++) {
2412 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
2413 val->elems[i] = vtn_create_ssa_value(b, elem_type);
2414 }
2415 }
2416 }
2417
2418 return val;
2419 }
2420
2421 static nir_tex_src
2422 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
2423 {
2424 nir_tex_src src;
2425 src.src = nir_src_for_ssa(vtn_get_nir_ssa(b, index));
2426 src.src_type = type;
2427 return src;
2428 }
2429
2430 static uint32_t
2431 image_operand_arg(struct vtn_builder *b, const uint32_t *w, uint32_t count,
2432 uint32_t mask_idx, SpvImageOperandsMask op)
2433 {
2434 static const SpvImageOperandsMask ops_with_arg =
2435 SpvImageOperandsBiasMask |
2436 SpvImageOperandsLodMask |
2437 SpvImageOperandsGradMask |
2438 SpvImageOperandsConstOffsetMask |
2439 SpvImageOperandsOffsetMask |
2440 SpvImageOperandsConstOffsetsMask |
2441 SpvImageOperandsSampleMask |
2442 SpvImageOperandsMinLodMask |
2443 SpvImageOperandsMakeTexelAvailableMask |
2444 SpvImageOperandsMakeTexelVisibleMask;
2445
2446 assert(util_bitcount(op) == 1);
2447 assert(w[mask_idx] & op);
2448 assert(op & ops_with_arg);
2449
2450 uint32_t idx = util_bitcount(w[mask_idx] & (op - 1) & ops_with_arg) + 1;
2451
2452 /* Adjust indices for operands with two arguments. */
2453 static const SpvImageOperandsMask ops_with_two_args =
2454 SpvImageOperandsGradMask;
2455 idx += util_bitcount(w[mask_idx] & (op - 1) & ops_with_two_args);
2456
2457 idx += mask_idx;
2458
2459 vtn_fail_if(idx + (op & ops_with_two_args ? 1 : 0) >= count,
2460 "Image op claims to have %s but does not enough "
2461 "following operands", spirv_imageoperands_to_string(op));
2462
2463 return idx;
2464 }
2465
2466 static void
2467 non_uniform_decoration_cb(struct vtn_builder *b,
2468 struct vtn_value *val, int member,
2469 const struct vtn_decoration *dec, void *void_ctx)
2470 {
2471 enum gl_access_qualifier *access = void_ctx;
2472 switch (dec->decoration) {
2473 case SpvDecorationNonUniformEXT:
2474 *access |= ACCESS_NON_UNIFORM;
2475 break;
2476
2477 default:
2478 break;
2479 }
2480 }
2481
2482 static void
2483 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
2484 const uint32_t *w, unsigned count)
2485 {
2486 struct vtn_type *ret_type = vtn_get_type(b, w[1]);
2487
2488 if (opcode == SpvOpSampledImage) {
2489 struct vtn_sampled_image si = {
2490 .image = vtn_get_image(b, w[3]),
2491 .sampler = vtn_get_sampler(b, w[4]),
2492 };
2493 vtn_push_sampled_image(b, w[2], si);
2494 return;
2495 } else if (opcode == SpvOpImage) {
2496 struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2497 vtn_push_image(b, w[2], si.image);
2498 return;
2499 }
2500
2501 nir_deref_instr *image = NULL, *sampler = NULL;
2502 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
2503 if (sampled_val->type->base_type == vtn_base_type_sampled_image) {
2504 struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2505 image = si.image;
2506 sampler = si.sampler;
2507 } else {
2508 image = vtn_get_image(b, w[3]);
2509 }
2510
2511 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image->type);
2512 const bool is_array = glsl_sampler_type_is_array(image->type);
2513 nir_alu_type dest_type = nir_type_invalid;
2514
2515 /* Figure out the base texture operation */
2516 nir_texop texop;
2517 switch (opcode) {
2518 case SpvOpImageSampleImplicitLod:
2519 case SpvOpImageSampleDrefImplicitLod:
2520 case SpvOpImageSampleProjImplicitLod:
2521 case SpvOpImageSampleProjDrefImplicitLod:
2522 texop = nir_texop_tex;
2523 break;
2524
2525 case SpvOpImageSampleExplicitLod:
2526 case SpvOpImageSampleDrefExplicitLod:
2527 case SpvOpImageSampleProjExplicitLod:
2528 case SpvOpImageSampleProjDrefExplicitLod:
2529 texop = nir_texop_txl;
2530 break;
2531
2532 case SpvOpImageFetch:
2533 if (sampler_dim == GLSL_SAMPLER_DIM_MS) {
2534 texop = nir_texop_txf_ms;
2535 } else {
2536 texop = nir_texop_txf;
2537 }
2538 break;
2539
2540 case SpvOpImageGather:
2541 case SpvOpImageDrefGather:
2542 texop = nir_texop_tg4;
2543 break;
2544
2545 case SpvOpImageQuerySizeLod:
2546 case SpvOpImageQuerySize:
2547 texop = nir_texop_txs;
2548 dest_type = nir_type_int;
2549 break;
2550
2551 case SpvOpImageQueryLod:
2552 texop = nir_texop_lod;
2553 dest_type = nir_type_float;
2554 break;
2555
2556 case SpvOpImageQueryLevels:
2557 texop = nir_texop_query_levels;
2558 dest_type = nir_type_int;
2559 break;
2560
2561 case SpvOpImageQuerySamples:
2562 texop = nir_texop_texture_samples;
2563 dest_type = nir_type_int;
2564 break;
2565
2566 case SpvOpFragmentFetchAMD:
2567 texop = nir_texop_fragment_fetch;
2568 break;
2569
2570 case SpvOpFragmentMaskFetchAMD:
2571 texop = nir_texop_fragment_mask_fetch;
2572 break;
2573
2574 default:
2575 vtn_fail_with_opcode("Unhandled opcode", opcode);
2576 }
2577
2578 nir_tex_src srcs[10]; /* 10 should be enough */
2579 nir_tex_src *p = srcs;
2580
2581 p->src = nir_src_for_ssa(&image->dest.ssa);
2582 p->src_type = nir_tex_src_texture_deref;
2583 p++;
2584
2585 switch (texop) {
2586 case nir_texop_tex:
2587 case nir_texop_txb:
2588 case nir_texop_txl:
2589 case nir_texop_txd:
2590 case nir_texop_tg4:
2591 case nir_texop_lod:
2592 vtn_fail_if(sampler == NULL,
2593 "%s requires an image of type OpTypeSampledImage",
2594 spirv_op_to_string(opcode));
2595 p->src = nir_src_for_ssa(&sampler->dest.ssa);
2596 p->src_type = nir_tex_src_sampler_deref;
2597 p++;
2598 break;
2599 case nir_texop_txf:
2600 case nir_texop_txf_ms:
2601 case nir_texop_txs:
2602 case nir_texop_query_levels:
2603 case nir_texop_texture_samples:
2604 case nir_texop_samples_identical:
2605 case nir_texop_fragment_fetch:
2606 case nir_texop_fragment_mask_fetch:
2607 /* These don't */
2608 break;
2609 case nir_texop_txf_ms_fb:
2610 vtn_fail("unexpected nir_texop_txf_ms_fb");
2611 break;
2612 case nir_texop_txf_ms_mcs:
2613 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2614 case nir_texop_tex_prefetch:
2615 vtn_fail("unexpected nir_texop_tex_prefetch");
2616 }
2617
2618 unsigned idx = 4;
2619
2620 struct nir_ssa_def *coord;
2621 unsigned coord_components;
2622 switch (opcode) {
2623 case SpvOpImageSampleImplicitLod:
2624 case SpvOpImageSampleExplicitLod:
2625 case SpvOpImageSampleDrefImplicitLod:
2626 case SpvOpImageSampleDrefExplicitLod:
2627 case SpvOpImageSampleProjImplicitLod:
2628 case SpvOpImageSampleProjExplicitLod:
2629 case SpvOpImageSampleProjDrefImplicitLod:
2630 case SpvOpImageSampleProjDrefExplicitLod:
2631 case SpvOpImageFetch:
2632 case SpvOpImageGather:
2633 case SpvOpImageDrefGather:
2634 case SpvOpImageQueryLod:
2635 case SpvOpFragmentFetchAMD:
2636 case SpvOpFragmentMaskFetchAMD: {
2637 /* All these types have the coordinate as their first real argument */
2638 coord_components = glsl_get_sampler_dim_coordinate_components(sampler_dim);
2639
2640 if (is_array && texop != nir_texop_lod)
2641 coord_components++;
2642
2643 struct vtn_ssa_value *coord_val = vtn_ssa_value(b, w[idx++]);
2644 coord = coord_val->def;
2645 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
2646 (1 << coord_components) - 1));
2647
2648 /* OpenCL allows integer sampling coordinates */
2649 if (glsl_type_is_integer(coord_val->type) &&
2650 opcode == SpvOpImageSampleExplicitLod) {
2651 vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
2652 "Unless the Kernel capability is being used, the coordinate parameter "
2653 "OpImageSampleExplicitLod must be floating point.");
2654
2655 p->src = nir_src_for_ssa(nir_i2f32(&b->nb, p->src.ssa));
2656 }
2657
2658 p->src_type = nir_tex_src_coord;
2659 p++;
2660 break;
2661 }
2662
2663 default:
2664 coord = NULL;
2665 coord_components = 0;
2666 break;
2667 }
2668
2669 switch (opcode) {
2670 case SpvOpImageSampleProjImplicitLod:
2671 case SpvOpImageSampleProjExplicitLod:
2672 case SpvOpImageSampleProjDrefImplicitLod:
2673 case SpvOpImageSampleProjDrefExplicitLod:
2674 /* These have the projector as the last coordinate component */
2675 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
2676 p->src_type = nir_tex_src_projector;
2677 p++;
2678 break;
2679
2680 default:
2681 break;
2682 }
2683
2684 bool is_shadow = false;
2685 unsigned gather_component = 0;
2686 switch (opcode) {
2687 case SpvOpImageSampleDrefImplicitLod:
2688 case SpvOpImageSampleDrefExplicitLod:
2689 case SpvOpImageSampleProjDrefImplicitLod:
2690 case SpvOpImageSampleProjDrefExplicitLod:
2691 case SpvOpImageDrefGather:
2692 /* These all have an explicit depth value as their next source */
2693 is_shadow = true;
2694 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
2695 break;
2696
2697 case SpvOpImageGather:
2698 /* This has a component as its next source */
2699 gather_component = vtn_constant_uint(b, w[idx++]);
2700 break;
2701
2702 default:
2703 break;
2704 }
2705
2706 /* For OpImageQuerySizeLod, we always have an LOD */
2707 if (opcode == SpvOpImageQuerySizeLod)
2708 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2709
2710 /* For OpFragmentFetchAMD, we always have a multisample index */
2711 if (opcode == SpvOpFragmentFetchAMD)
2712 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
2713
2714 /* Now we need to handle some number of optional arguments */
2715 struct vtn_value *gather_offsets = NULL;
2716 if (idx < count) {
2717 uint32_t operands = w[idx];
2718
2719 if (operands & SpvImageOperandsBiasMask) {
2720 vtn_assert(texop == nir_texop_tex ||
2721 texop == nir_texop_tg4);
2722 if (texop == nir_texop_tex)
2723 texop = nir_texop_txb;
2724 uint32_t arg = image_operand_arg(b, w, count, idx,
2725 SpvImageOperandsBiasMask);
2726 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_bias);
2727 }
2728
2729 if (operands & SpvImageOperandsLodMask) {
2730 vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
2731 texop == nir_texop_txs || texop == nir_texop_tg4);
2732 uint32_t arg = image_operand_arg(b, w, count, idx,
2733 SpvImageOperandsLodMask);
2734 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_lod);
2735 }
2736
2737 if (operands & SpvImageOperandsGradMask) {
2738 vtn_assert(texop == nir_texop_txl);
2739 texop = nir_texop_txd;
2740 uint32_t arg = image_operand_arg(b, w, count, idx,
2741 SpvImageOperandsGradMask);
2742 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ddx);
2743 (*p++) = vtn_tex_src(b, w[arg + 1], nir_tex_src_ddy);
2744 }
2745
2746 vtn_fail_if(util_bitcount(operands & (SpvImageOperandsConstOffsetsMask |
2747 SpvImageOperandsOffsetMask |
2748 SpvImageOperandsConstOffsetMask)) > 1,
2749 "At most one of the ConstOffset, Offset, and ConstOffsets "
2750 "image operands can be used on a given instruction.");
2751
2752 if (operands & SpvImageOperandsOffsetMask) {
2753 uint32_t arg = image_operand_arg(b, w, count, idx,
2754 SpvImageOperandsOffsetMask);
2755 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2756 }
2757
2758 if (operands & SpvImageOperandsConstOffsetMask) {
2759 uint32_t arg = image_operand_arg(b, w, count, idx,
2760 SpvImageOperandsConstOffsetMask);
2761 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2762 }
2763
2764 if (operands & SpvImageOperandsConstOffsetsMask) {
2765 vtn_assert(texop == nir_texop_tg4);
2766 uint32_t arg = image_operand_arg(b, w, count, idx,
2767 SpvImageOperandsConstOffsetsMask);
2768 gather_offsets = vtn_value(b, w[arg], vtn_value_type_constant);
2769 }
2770
2771 if (operands & SpvImageOperandsSampleMask) {
2772 vtn_assert(texop == nir_texop_txf_ms);
2773 uint32_t arg = image_operand_arg(b, w, count, idx,
2774 SpvImageOperandsSampleMask);
2775 texop = nir_texop_txf_ms;
2776 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ms_index);
2777 }
2778
2779 if (operands & SpvImageOperandsMinLodMask) {
2780 vtn_assert(texop == nir_texop_tex ||
2781 texop == nir_texop_txb ||
2782 texop == nir_texop_txd);
2783 uint32_t arg = image_operand_arg(b, w, count, idx,
2784 SpvImageOperandsMinLodMask);
2785 (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_min_lod);
2786 }
2787 }
2788
2789 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
2790 instr->op = texop;
2791
2792 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
2793
2794 instr->coord_components = coord_components;
2795 instr->sampler_dim = sampler_dim;
2796 instr->is_array = is_array;
2797 instr->is_shadow = is_shadow;
2798 instr->is_new_style_shadow =
2799 is_shadow && glsl_get_components(ret_type->type) == 1;
2800 instr->component = gather_component;
2801
2802 /* The Vulkan spec says:
2803 *
2804 * "If an instruction loads from or stores to a resource (including
2805 * atomics and image instructions) and the resource descriptor being
2806 * accessed is not dynamically uniform, then the operand corresponding
2807 * to that resource (e.g. the pointer or sampled image operand) must be
2808 * decorated with NonUniform."
2809 *
2810 * It's very careful to specify that the exact operand must be decorated
2811 * NonUniform. The SPIR-V parser is not expected to chase through long
2812 * chains to find the NonUniform decoration. It's either right there or we
2813 * can assume it doesn't exist.
2814 */
2815 enum gl_access_qualifier access = 0;
2816 vtn_foreach_decoration(b, sampled_val, non_uniform_decoration_cb, &access);
2817
2818 if (image && (access & ACCESS_NON_UNIFORM))
2819 instr->texture_non_uniform = true;
2820
2821 if (sampler && (access & ACCESS_NON_UNIFORM))
2822 instr->sampler_non_uniform = true;
2823
2824 /* for non-query ops, get dest_type from SPIR-V return type */
2825 if (dest_type == nir_type_invalid) {
2826 /* the return type should match the image type, unless the image type is
2827 * VOID (CL image), in which case the return type dictates the sampler
2828 */
2829 enum glsl_base_type sampler_base =
2830 glsl_get_sampler_result_type(image->type);
2831 enum glsl_base_type ret_base = glsl_get_base_type(ret_type->type);
2832 vtn_fail_if(sampler_base != ret_base && sampler_base != GLSL_TYPE_VOID,
2833 "SPIR-V return type mismatches image type. This is only valid "
2834 "for untyped images (OpenCL).");
2835 switch (ret_base) {
2836 case GLSL_TYPE_FLOAT: dest_type = nir_type_float; break;
2837 case GLSL_TYPE_INT: dest_type = nir_type_int; break;
2838 case GLSL_TYPE_UINT: dest_type = nir_type_uint; break;
2839 case GLSL_TYPE_BOOL: dest_type = nir_type_bool; break;
2840 default:
2841 vtn_fail("Invalid base type for sampler result");
2842 }
2843 }
2844
2845 instr->dest_type = dest_type;
2846
2847 nir_ssa_dest_init(&instr->instr, &instr->dest,
2848 nir_tex_instr_dest_size(instr), 32, NULL);
2849
2850 vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2851 nir_tex_instr_dest_size(instr));
2852
2853 if (gather_offsets) {
2854 vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
2855 gather_offsets->type->length != 4,
2856 "ConstOffsets must be an array of size four of vectors "
2857 "of two integer components");
2858
2859 struct vtn_type *vec_type = gather_offsets->type->array_element;
2860 vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
2861 vec_type->length != 2 ||
2862 !glsl_type_is_integer(vec_type->type),
2863 "ConstOffsets must be an array of size four of vectors "
2864 "of two integer components");
2865
2866 unsigned bit_size = glsl_get_bit_size(vec_type->type);
2867 for (uint32_t i = 0; i < 4; i++) {
2868 const nir_const_value *cvec =
2869 gather_offsets->constant->elements[i]->values;
2870 for (uint32_t j = 0; j < 2; j++) {
2871 switch (bit_size) {
2872 case 8: instr->tg4_offsets[i][j] = cvec[j].i8; break;
2873 case 16: instr->tg4_offsets[i][j] = cvec[j].i16; break;
2874 case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
2875 case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
2876 default:
2877 vtn_fail("Unsupported bit size: %u", bit_size);
2878 }
2879 }
2880 }
2881 }
2882
2883 nir_builder_instr_insert(&b->nb, &instr->instr);
2884
2885 vtn_push_nir_ssa(b, w[2], &instr->dest.ssa);
2886 }
2887
2888 static void
2889 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2890 const uint32_t *w, nir_src *src)
2891 {
2892 switch (opcode) {
2893 case SpvOpAtomicIIncrement:
2894 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
2895 break;
2896
2897 case SpvOpAtomicIDecrement:
2898 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
2899 break;
2900
2901 case SpvOpAtomicISub:
2902 src[0] =
2903 nir_src_for_ssa(nir_ineg(&b->nb, vtn_get_nir_ssa(b, w[6])));
2904 break;
2905
2906 case SpvOpAtomicCompareExchange:
2907 case SpvOpAtomicCompareExchangeWeak:
2908 src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[8]));
2909 src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[7]));
2910 break;
2911
2912 case SpvOpAtomicExchange:
2913 case SpvOpAtomicIAdd:
2914 case SpvOpAtomicSMin:
2915 case SpvOpAtomicUMin:
2916 case SpvOpAtomicSMax:
2917 case SpvOpAtomicUMax:
2918 case SpvOpAtomicAnd:
2919 case SpvOpAtomicOr:
2920 case SpvOpAtomicXor:
2921 case SpvOpAtomicFAddEXT:
2922 src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
2923 break;
2924
2925 default:
2926 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
2927 }
2928 }
2929
2930 static nir_ssa_def *
2931 get_image_coord(struct vtn_builder *b, uint32_t value)
2932 {
2933 nir_ssa_def *coord = vtn_get_nir_ssa(b, value);
2934
2935 /* The image_load_store intrinsics assume a 4-dim coordinate */
2936 unsigned swizzle[4];
2937 for (unsigned i = 0; i < 4; i++)
2938 swizzle[i] = MIN2(i, coord->num_components - 1);
2939
2940 return nir_swizzle(&b->nb, coord, swizzle, 4);
2941 }
2942
2943 static nir_ssa_def *
2944 expand_to_vec4(nir_builder *b, nir_ssa_def *value)
2945 {
2946 if (value->num_components == 4)
2947 return value;
2948
2949 unsigned swiz[4];
2950 for (unsigned i = 0; i < 4; i++)
2951 swiz[i] = i < value->num_components ? i : 0;
2952 return nir_swizzle(b, value, swiz, 4);
2953 }
2954
2955 static void
2956 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
2957 const uint32_t *w, unsigned count)
2958 {
2959 /* Just get this one out of the way */
2960 if (opcode == SpvOpImageTexelPointer) {
2961 struct vtn_value *val =
2962 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
2963 val->image = ralloc(b, struct vtn_image_pointer);
2964
2965 val->image->image = vtn_nir_deref(b, w[3]);
2966 val->image->coord = get_image_coord(b, w[4]);
2967 val->image->sample = vtn_get_nir_ssa(b, w[5]);
2968 val->image->lod = nir_imm_int(&b->nb, 0);
2969 return;
2970 }
2971
2972 struct vtn_image_pointer image;
2973 SpvScope scope = SpvScopeInvocation;
2974 SpvMemorySemanticsMask semantics = 0;
2975
2976 enum gl_access_qualifier access = 0;
2977
2978 struct vtn_value *res_val;
2979 switch (opcode) {
2980 case SpvOpAtomicExchange:
2981 case SpvOpAtomicCompareExchange:
2982 case SpvOpAtomicCompareExchangeWeak:
2983 case SpvOpAtomicIIncrement:
2984 case SpvOpAtomicIDecrement:
2985 case SpvOpAtomicIAdd:
2986 case SpvOpAtomicISub:
2987 case SpvOpAtomicLoad:
2988 case SpvOpAtomicSMin:
2989 case SpvOpAtomicUMin:
2990 case SpvOpAtomicSMax:
2991 case SpvOpAtomicUMax:
2992 case SpvOpAtomicAnd:
2993 case SpvOpAtomicOr:
2994 case SpvOpAtomicXor:
2995 case SpvOpAtomicFAddEXT:
2996 res_val = vtn_value(b, w[3], vtn_value_type_image_pointer);
2997 image = *res_val->image;
2998 scope = vtn_constant_uint(b, w[4]);
2999 semantics = vtn_constant_uint(b, w[5]);
3000 access |= ACCESS_COHERENT;
3001 break;
3002
3003 case SpvOpAtomicStore:
3004 res_val = vtn_value(b, w[1], vtn_value_type_image_pointer);
3005 image = *res_val->image;
3006 scope = vtn_constant_uint(b, w[2]);
3007 semantics = vtn_constant_uint(b, w[3]);
3008 access |= ACCESS_COHERENT;
3009 break;
3010
3011 case SpvOpImageQuerySizeLod:
3012 res_val = vtn_untyped_value(b, w[3]);
3013 image.image = vtn_get_image(b, w[3]);
3014 image.coord = NULL;
3015 image.sample = NULL;
3016 image.lod = vtn_ssa_value(b, w[4])->def;
3017 break;
3018
3019 case SpvOpImageQuerySize:
3020 res_val = vtn_untyped_value(b, w[3]);
3021 image.image = vtn_get_image(b, w[3]);
3022 image.coord = NULL;
3023 image.sample = NULL;
3024 image.lod = NULL;
3025 break;
3026
3027 case SpvOpImageQueryFormat:
3028 case SpvOpImageQueryOrder:
3029 res_val = vtn_untyped_value(b, w[3]);
3030 image.image = vtn_get_image(b, w[3]);
3031 image.coord = NULL;
3032 image.sample = NULL;
3033 image.lod = NULL;
3034 break;
3035
3036 case SpvOpImageRead: {
3037 res_val = vtn_untyped_value(b, w[3]);
3038 image.image = vtn_get_image(b, w[3]);
3039 image.coord = get_image_coord(b, w[4]);
3040
3041 const SpvImageOperandsMask operands =
3042 count > 5 ? w[5] : SpvImageOperandsMaskNone;
3043
3044 if (operands & SpvImageOperandsSampleMask) {
3045 uint32_t arg = image_operand_arg(b, w, count, 5,
3046 SpvImageOperandsSampleMask);
3047 image.sample = vtn_get_nir_ssa(b, w[arg]);
3048 } else {
3049 image.sample = nir_ssa_undef(&b->nb, 1, 32);
3050 }
3051
3052 if (operands & SpvImageOperandsMakeTexelVisibleMask) {
3053 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3054 "MakeTexelVisible requires NonPrivateTexel to also be set.");
3055 uint32_t arg = image_operand_arg(b, w, count, 5,
3056 SpvImageOperandsMakeTexelVisibleMask);
3057 semantics = SpvMemorySemanticsMakeVisibleMask;
3058 scope = vtn_constant_uint(b, w[arg]);
3059 }
3060
3061 if (operands & SpvImageOperandsLodMask) {
3062 uint32_t arg = image_operand_arg(b, w, count, 5,
3063 SpvImageOperandsLodMask);
3064 image.lod = vtn_get_nir_ssa(b, w[arg]);
3065 } else {
3066 image.lod = nir_imm_int(&b->nb, 0);
3067 }
3068
3069 if (operands & SpvImageOperandsVolatileTexelMask)
3070 access |= ACCESS_VOLATILE;
3071
3072 break;
3073 }
3074
3075 case SpvOpImageWrite: {
3076 res_val = vtn_untyped_value(b, w[1]);
3077 image.image = vtn_get_image(b, w[1]);
3078 image.coord = get_image_coord(b, w[2]);
3079
3080 /* texel = w[3] */
3081
3082 const SpvImageOperandsMask operands =
3083 count > 4 ? w[4] : SpvImageOperandsMaskNone;
3084
3085 if (operands & SpvImageOperandsSampleMask) {
3086 uint32_t arg = image_operand_arg(b, w, count, 4,
3087 SpvImageOperandsSampleMask);
3088 image.sample = vtn_get_nir_ssa(b, w[arg]);
3089 } else {
3090 image.sample = nir_ssa_undef(&b->nb, 1, 32);
3091 }
3092
3093 if (operands & SpvImageOperandsMakeTexelAvailableMask) {
3094 vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3095 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
3096 uint32_t arg = image_operand_arg(b, w, count, 4,
3097 SpvImageOperandsMakeTexelAvailableMask);
3098 semantics = SpvMemorySemanticsMakeAvailableMask;
3099 scope = vtn_constant_uint(b, w[arg]);
3100 }
3101
3102 if (operands & SpvImageOperandsLodMask) {