Merge commit mesa-public/master into vulkan
[mesa.git] / src / compiler / nir / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32
33 static struct vtn_ssa_value *
34 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
35 {
36 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
37 val->type = type;
38
39 if (glsl_type_is_vector_or_scalar(type)) {
40 unsigned num_components = glsl_get_vector_elements(val->type);
41 nir_ssa_undef_instr *undef =
42 nir_ssa_undef_instr_create(b->shader, num_components);
43
44 nir_instr_insert_before_cf_list(&b->impl->body, &undef->instr);
45 val->def = &undef->def;
46 } else {
47 unsigned elems = glsl_get_length(val->type);
48 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
49 if (glsl_type_is_matrix(type)) {
50 const struct glsl_type *elem_type =
51 glsl_vector_type(glsl_get_base_type(type),
52 glsl_get_vector_elements(type));
53
54 for (unsigned i = 0; i < elems; i++)
55 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
56 } else if (glsl_type_is_array(type)) {
57 const struct glsl_type *elem_type = glsl_get_array_element(type);
58 for (unsigned i = 0; i < elems; i++)
59 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
60 } else {
61 for (unsigned i = 0; i < elems; i++) {
62 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
63 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
64 }
65 }
66 }
67
68 return val;
69 }
70
71 static struct vtn_ssa_value *
72 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
73 const struct glsl_type *type)
74 {
75 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
76
77 if (entry)
78 return entry->data;
79
80 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
81 val->type = type;
82
83 switch (glsl_get_base_type(type)) {
84 case GLSL_TYPE_INT:
85 case GLSL_TYPE_UINT:
86 case GLSL_TYPE_BOOL:
87 case GLSL_TYPE_FLOAT:
88 case GLSL_TYPE_DOUBLE:
89 if (glsl_type_is_vector_or_scalar(type)) {
90 unsigned num_components = glsl_get_vector_elements(val->type);
91 nir_load_const_instr *load =
92 nir_load_const_instr_create(b->shader, num_components);
93
94 for (unsigned i = 0; i < num_components; i++)
95 load->value.u[i] = constant->value.u[i];
96
97 nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
98 val->def = &load->def;
99 } else {
100 assert(glsl_type_is_matrix(type));
101 unsigned rows = glsl_get_vector_elements(val->type);
102 unsigned columns = glsl_get_matrix_columns(val->type);
103 val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
104
105 for (unsigned i = 0; i < columns; i++) {
106 struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
107 col_val->type = glsl_get_column_type(val->type);
108 nir_load_const_instr *load =
109 nir_load_const_instr_create(b->shader, rows);
110
111 for (unsigned j = 0; j < rows; j++)
112 load->value.u[j] = constant->value.u[rows * i + j];
113
114 nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
115 col_val->def = &load->def;
116
117 val->elems[i] = col_val;
118 }
119 }
120 break;
121
122 case GLSL_TYPE_ARRAY: {
123 unsigned elems = glsl_get_length(val->type);
124 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
125 const struct glsl_type *elem_type = glsl_get_array_element(val->type);
126 for (unsigned i = 0; i < elems; i++)
127 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
128 elem_type);
129 break;
130 }
131
132 case GLSL_TYPE_STRUCT: {
133 unsigned elems = glsl_get_length(val->type);
134 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
135 for (unsigned i = 0; i < elems; i++) {
136 const struct glsl_type *elem_type =
137 glsl_get_struct_field(val->type, i);
138 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
139 elem_type);
140 }
141 break;
142 }
143
144 default:
145 unreachable("bad constant type");
146 }
147
148 return val;
149 }
150
151 struct vtn_ssa_value *
152 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
153 {
154 struct vtn_value *val = vtn_untyped_value(b, value_id);
155 switch (val->value_type) {
156 case vtn_value_type_undef:
157 return vtn_undef_ssa_value(b, val->type->type);
158
159 case vtn_value_type_constant:
160 return vtn_const_ssa_value(b, val->constant, val->const_type);
161
162 case vtn_value_type_ssa:
163 return val->ssa;
164
165 case vtn_value_type_access_chain:
166 /* This is needed for function parameters */
167 return vtn_variable_load(b, val->access_chain);
168
169 default:
170 unreachable("Invalid type for an SSA value");
171 }
172 }
173
174 static char *
175 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
176 unsigned word_count, unsigned *words_used)
177 {
178 char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
179 if (words_used) {
180 /* Ammount of space taken by the string (including the null) */
181 unsigned len = strlen(dup) + 1;
182 *words_used = DIV_ROUND_UP(len, sizeof(*words));
183 }
184 return dup;
185 }
186
187 const uint32_t *
188 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
189 const uint32_t *end, vtn_instruction_handler handler)
190 {
191 b->file = NULL;
192 b->line = -1;
193 b->col = -1;
194
195 const uint32_t *w = start;
196 while (w < end) {
197 SpvOp opcode = w[0] & SpvOpCodeMask;
198 unsigned count = w[0] >> SpvWordCountShift;
199 assert(count >= 1 && w + count <= end);
200
201 switch (opcode) {
202 case SpvOpNop:
203 break; /* Do nothing */
204
205 case SpvOpLine:
206 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
207 b->line = w[2];
208 b->col = w[3];
209 break;
210
211 case SpvOpNoLine:
212 b->file = NULL;
213 b->line = -1;
214 b->col = -1;
215 break;
216
217 default:
218 if (!handler(b, opcode, w, count))
219 return w;
220 break;
221 }
222
223 w += count;
224 }
225 assert(w == end);
226 return w;
227 }
228
229 static void
230 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
231 const uint32_t *w, unsigned count)
232 {
233 switch (opcode) {
234 case SpvOpExtInstImport: {
235 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
236 if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
237 val->ext_handler = vtn_handle_glsl450_instruction;
238 } else {
239 assert(!"Unsupported extension");
240 }
241 break;
242 }
243
244 case SpvOpExtInst: {
245 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
246 bool handled = val->ext_handler(b, w[4], w, count);
247 (void)handled;
248 assert(handled);
249 break;
250 }
251
252 default:
253 unreachable("Unhandled opcode");
254 }
255 }
256
257 static void
258 _foreach_decoration_helper(struct vtn_builder *b,
259 struct vtn_value *base_value,
260 int parent_member,
261 struct vtn_value *value,
262 vtn_decoration_foreach_cb cb, void *data)
263 {
264 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
265 int member;
266 if (dec->scope == VTN_DEC_DECORATION) {
267 member = parent_member;
268 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
269 assert(parent_member == -1);
270 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
271 } else {
272 /* Not a decoration */
273 continue;
274 }
275
276 if (dec->group) {
277 assert(dec->group->value_type == vtn_value_type_decoration_group);
278 _foreach_decoration_helper(b, base_value, member, dec->group,
279 cb, data);
280 } else {
281 cb(b, base_value, member, dec, data);
282 }
283 }
284 }
285
286 /** Iterates (recursively if needed) over all of the decorations on a value
287 *
288 * This function iterates over all of the decorations applied to a given
289 * value. If it encounters a decoration group, it recurses into the group
290 * and iterates over all of those decorations as well.
291 */
292 void
293 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
294 vtn_decoration_foreach_cb cb, void *data)
295 {
296 _foreach_decoration_helper(b, value, -1, value, cb, data);
297 }
298
299 void
300 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
301 vtn_execution_mode_foreach_cb cb, void *data)
302 {
303 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
304 if (dec->scope != VTN_DEC_EXECUTION_MODE)
305 continue;
306
307 assert(dec->group == NULL);
308 cb(b, value, dec, data);
309 }
310 }
311
312 static void
313 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
314 const uint32_t *w, unsigned count)
315 {
316 const uint32_t *w_end = w + count;
317 const uint32_t target = w[1];
318 w += 2;
319
320 switch (opcode) {
321 case SpvOpDecorationGroup:
322 vtn_push_value(b, target, vtn_value_type_decoration_group);
323 break;
324
325 case SpvOpDecorate:
326 case SpvOpMemberDecorate:
327 case SpvOpExecutionMode: {
328 struct vtn_value *val = &b->values[target];
329
330 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
331 switch (opcode) {
332 case SpvOpDecorate:
333 dec->scope = VTN_DEC_DECORATION;
334 break;
335 case SpvOpMemberDecorate:
336 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
337 break;
338 case SpvOpExecutionMode:
339 dec->scope = VTN_DEC_EXECUTION_MODE;
340 break;
341 default:
342 unreachable("Invalid decoration opcode");
343 }
344 dec->decoration = *(w++);
345 dec->literals = w;
346
347 /* Link into the list */
348 dec->next = val->decoration;
349 val->decoration = dec;
350 break;
351 }
352
353 case SpvOpGroupMemberDecorate:
354 case SpvOpGroupDecorate: {
355 struct vtn_value *group =
356 vtn_value(b, target, vtn_value_type_decoration_group);
357
358 for (; w < w_end; w++) {
359 struct vtn_value *val = vtn_untyped_value(b, *w);
360 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
361
362 dec->group = group;
363 if (opcode == SpvOpGroupDecorate) {
364 dec->scope = VTN_DEC_DECORATION;
365 } else {
366 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
367 }
368
369 /* Link into the list */
370 dec->next = val->decoration;
371 val->decoration = dec;
372 }
373 break;
374 }
375
376 default:
377 unreachable("Unhandled opcode");
378 }
379 }
380
381 struct member_decoration_ctx {
382 struct glsl_struct_field *fields;
383 struct vtn_type *type;
384 };
385
386 /* does a shallow copy of a vtn_type */
387
388 static struct vtn_type *
389 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
390 {
391 struct vtn_type *dest = ralloc(b, struct vtn_type);
392 dest->type = src->type;
393 dest->is_builtin = src->is_builtin;
394 if (src->is_builtin)
395 dest->builtin = src->builtin;
396
397 if (!glsl_type_is_scalar(src->type)) {
398 switch (glsl_get_base_type(src->type)) {
399 case GLSL_TYPE_INT:
400 case GLSL_TYPE_UINT:
401 case GLSL_TYPE_BOOL:
402 case GLSL_TYPE_FLOAT:
403 case GLSL_TYPE_DOUBLE:
404 case GLSL_TYPE_ARRAY:
405 dest->row_major = src->row_major;
406 dest->stride = src->stride;
407 dest->array_element = src->array_element;
408 break;
409
410 case GLSL_TYPE_STRUCT: {
411 unsigned elems = glsl_get_length(src->type);
412
413 dest->members = ralloc_array(b, struct vtn_type *, elems);
414 memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
415
416 dest->offsets = ralloc_array(b, unsigned, elems);
417 memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
418 break;
419 }
420
421 default:
422 unreachable("unhandled type");
423 }
424 }
425
426 return dest;
427 }
428
429 static struct vtn_type *
430 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
431 {
432 type->members[member] = vtn_type_copy(b, type->members[member]);
433 type = type->members[member];
434
435 /* We may have an array of matrices.... Oh, joy! */
436 while (glsl_type_is_array(type->type)) {
437 type->array_element = vtn_type_copy(b, type->array_element);
438 type = type->array_element;
439 }
440
441 assert(glsl_type_is_matrix(type->type));
442
443 return type;
444 }
445
446 static void
447 struct_member_decoration_cb(struct vtn_builder *b,
448 struct vtn_value *val, int member,
449 const struct vtn_decoration *dec, void *void_ctx)
450 {
451 struct member_decoration_ctx *ctx = void_ctx;
452
453 if (member < 0)
454 return;
455
456 switch (dec->decoration) {
457 case SpvDecorationRelaxedPrecision:
458 break; /* FIXME: Do nothing with this for now. */
459 case SpvDecorationNoPerspective:
460 ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
461 break;
462 case SpvDecorationFlat:
463 ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
464 break;
465 case SpvDecorationCentroid:
466 ctx->fields[member].centroid = true;
467 break;
468 case SpvDecorationSample:
469 ctx->fields[member].sample = true;
470 break;
471 case SpvDecorationLocation:
472 ctx->fields[member].location = dec->literals[0];
473 break;
474 case SpvDecorationBuiltIn:
475 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
476 ctx->type->members[member]->is_builtin = true;
477 ctx->type->members[member]->builtin = dec->literals[0];
478 ctx->type->builtin_block = true;
479 break;
480 case SpvDecorationOffset:
481 ctx->type->offsets[member] = dec->literals[0];
482 break;
483 case SpvDecorationMatrixStride:
484 mutable_matrix_member(b, ctx->type, member)->stride = dec->literals[0];
485 break;
486 case SpvDecorationColMajor:
487 break; /* Nothing to do here. Column-major is the default. */
488 case SpvDecorationRowMajor:
489 mutable_matrix_member(b, ctx->type, member)->row_major = true;
490 break;
491 default:
492 unreachable("Unhandled member decoration");
493 }
494 }
495
496 static void
497 type_decoration_cb(struct vtn_builder *b,
498 struct vtn_value *val, int member,
499 const struct vtn_decoration *dec, void *ctx)
500 {
501 struct vtn_type *type = val->type;
502
503 if (member != -1)
504 return;
505
506 switch (dec->decoration) {
507 case SpvDecorationArrayStride:
508 type->stride = dec->literals[0];
509 break;
510 case SpvDecorationBlock:
511 type->block = true;
512 break;
513 case SpvDecorationBufferBlock:
514 type->buffer_block = true;
515 break;
516 case SpvDecorationGLSLShared:
517 case SpvDecorationGLSLPacked:
518 /* Ignore these, since we get explicit offsets anyways */
519 break;
520
521 case SpvDecorationStream:
522 assert(dec->literals[0] == 0);
523 break;
524
525 default:
526 unreachable("Unhandled type decoration");
527 }
528 }
529
530 static unsigned
531 translate_image_format(SpvImageFormat format)
532 {
533 switch (format) {
534 case SpvImageFormatUnknown: return 0; /* GL_NONE */
535 case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
536 case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
537 case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
538 case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
539 case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
540 case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
541 case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
542 case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
543 case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
544 case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
545 case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
546 case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
547 case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
548 case SpvImageFormatR16: return 0x822A; /* GL_R16 */
549 case SpvImageFormatR8: return 0x8229; /* GL_R8 */
550 case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
551 case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
552 case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
553 case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
554 case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
555 case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
556 case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
557 case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
558 case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
559 case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
560 case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
561 case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
562 case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
563 case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
564 case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
565 case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
566 case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
567 case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
568 case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
569 case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
570 case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
571 case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
572 case SpvImageFormatR16ui: return 0x823A; /* GL_RG16UI */
573 case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
574 default:
575 assert(!"Invalid image format");
576 return 0;
577 }
578 }
579
580 static void
581 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
582 const uint32_t *w, unsigned count)
583 {
584 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
585
586 val->type = rzalloc(b, struct vtn_type);
587 val->type->is_builtin = false;
588 val->type->val = val;
589
590 switch (opcode) {
591 case SpvOpTypeVoid:
592 val->type->type = glsl_void_type();
593 break;
594 case SpvOpTypeBool:
595 val->type->type = glsl_bool_type();
596 break;
597 case SpvOpTypeInt:
598 val->type->type = glsl_int_type();
599 break;
600 case SpvOpTypeFloat:
601 val->type->type = glsl_float_type();
602 break;
603
604 case SpvOpTypeVector: {
605 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
606 unsigned elems = w[3];
607
608 assert(glsl_type_is_scalar(base->type));
609 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
610
611 /* Vectors implicitly have sizeof(base_type) stride. For now, this
612 * is always 4 bytes. This will have to change if we want to start
613 * supporting doubles or half-floats.
614 */
615 val->type->stride = 4;
616 val->type->array_element = base;
617 break;
618 }
619
620 case SpvOpTypeMatrix: {
621 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
622 unsigned columns = w[3];
623
624 assert(glsl_type_is_vector(base->type));
625 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
626 glsl_get_vector_elements(base->type),
627 columns);
628 assert(!glsl_type_is_error(val->type->type));
629 val->type->array_element = base;
630 val->type->row_major = false;
631 val->type->stride = 0;
632 break;
633 }
634
635 case SpvOpTypeRuntimeArray:
636 case SpvOpTypeArray: {
637 struct vtn_type *array_element =
638 vtn_value(b, w[2], vtn_value_type_type)->type;
639
640 unsigned length;
641 if (opcode == SpvOpTypeRuntimeArray) {
642 /* A length of 0 is used to denote unsized arrays */
643 length = 0;
644 } else {
645 length =
646 vtn_value(b, w[3], vtn_value_type_constant)->constant->value.u[0];
647 }
648
649 val->type->type = glsl_array_type(array_element->type, length);
650 val->type->array_element = array_element;
651 val->type->stride = 0;
652 break;
653 }
654
655 case SpvOpTypeStruct: {
656 unsigned num_fields = count - 2;
657 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
658 val->type->offsets = ralloc_array(b, unsigned, num_fields);
659
660 NIR_VLA(struct glsl_struct_field, fields, count);
661 for (unsigned i = 0; i < num_fields; i++) {
662 val->type->members[i] =
663 vtn_value(b, w[i + 2], vtn_value_type_type)->type;
664 fields[i] = (struct glsl_struct_field) {
665 .type = val->type->members[i]->type,
666 .name = ralloc_asprintf(b, "field%d", i),
667 .location = -1,
668 };
669 }
670
671 struct member_decoration_ctx ctx = {
672 .fields = fields,
673 .type = val->type
674 };
675
676 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
677
678 const char *name = val->name ? val->name : "struct";
679
680 val->type->type = glsl_struct_type(fields, num_fields, name);
681 break;
682 }
683
684 case SpvOpTypeFunction: {
685 const struct glsl_type *return_type =
686 vtn_value(b, w[2], vtn_value_type_type)->type->type;
687 NIR_VLA(struct glsl_function_param, params, count - 3);
688 for (unsigned i = 0; i < count - 3; i++) {
689 params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
690
691 /* FIXME: */
692 params[i].in = true;
693 params[i].out = true;
694 }
695 val->type->type = glsl_function_type(return_type, params, count - 3);
696 break;
697 }
698
699 case SpvOpTypePointer:
700 /* FIXME: For now, we'll just do the really lame thing and return
701 * the same type. The validator should ensure that the proper number
702 * of dereferences happen
703 */
704 val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
705 break;
706
707 case SpvOpTypeImage: {
708 const struct glsl_type *sampled_type =
709 vtn_value(b, w[2], vtn_value_type_type)->type->type;
710
711 assert(glsl_type_is_vector_or_scalar(sampled_type));
712
713 enum glsl_sampler_dim dim;
714 switch ((SpvDim)w[3]) {
715 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
716 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
717 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
718 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
719 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
720 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
721 default:
722 unreachable("Invalid SPIR-V Sampler dimension");
723 }
724
725 bool is_shadow = w[4];
726 bool is_array = w[5];
727 bool multisampled = w[6];
728 unsigned sampled = w[7];
729 SpvImageFormat format = w[8];
730
731 if (count > 9)
732 val->type->access_qualifier = w[9];
733 else
734 val->type->access_qualifier = SpvAccessQualifierReadWrite;
735
736 assert(!multisampled && "FIXME: Handl multi-sampled textures");
737
738 val->type->image_format = translate_image_format(format);
739
740 if (sampled == 1) {
741 val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
742 glsl_get_base_type(sampled_type));
743 } else if (sampled == 2) {
744 assert(format);
745 assert(!is_shadow);
746 val->type->type = glsl_image_type(dim, is_array,
747 glsl_get_base_type(sampled_type));
748 } else {
749 assert(!"We need to know if the image will be sampled");
750 }
751 break;
752 }
753
754 case SpvOpTypeSampledImage:
755 val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
756 break;
757
758 case SpvOpTypeSampler:
759 /* The actual sampler type here doesn't really matter. It gets
760 * thrown away the moment you combine it with an image. What really
761 * matters is that it's a sampler type as opposed to an integer type
762 * so the backend knows what to do.
763 *
764 * TODO: Eventually we should consider adding a "bare sampler" type
765 * to glsl_types.
766 */
767 val->type->type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, false,
768 GLSL_TYPE_FLOAT);
769 break;
770
771 case SpvOpTypeOpaque:
772 case SpvOpTypeEvent:
773 case SpvOpTypeDeviceEvent:
774 case SpvOpTypeReserveId:
775 case SpvOpTypeQueue:
776 case SpvOpTypePipe:
777 default:
778 unreachable("Unhandled opcode");
779 }
780
781 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
782 }
783
784 static nir_constant *
785 vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
786 {
787 nir_constant *c = rzalloc(b, nir_constant);
788
789 switch (glsl_get_base_type(type)) {
790 case GLSL_TYPE_INT:
791 case GLSL_TYPE_UINT:
792 case GLSL_TYPE_BOOL:
793 case GLSL_TYPE_FLOAT:
794 case GLSL_TYPE_DOUBLE:
795 /* Nothing to do here. It's already initialized to zero */
796 break;
797
798 case GLSL_TYPE_ARRAY:
799 assert(glsl_get_length(type) > 0);
800 c->num_elements = glsl_get_length(type);
801 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
802
803 c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
804 for (unsigned i = 1; i < c->num_elements; i++)
805 c->elements[i] = c->elements[0];
806 break;
807
808 case GLSL_TYPE_STRUCT:
809 c->num_elements = glsl_get_length(type);
810 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
811
812 for (unsigned i = 0; i < c->num_elements; i++) {
813 c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
814 }
815 break;
816
817 default:
818 unreachable("Invalid type for null constant");
819 }
820
821 return c;
822 }
823
824 static void
825 spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v,
826 int member, const struct vtn_decoration *dec,
827 void *data)
828 {
829 assert(member == -1);
830 if (dec->decoration != SpvDecorationSpecId)
831 return;
832
833 uint32_t *const_value = data;
834
835 for (unsigned i = 0; i < b->num_specializations; i++) {
836 if (b->specializations[i].id == dec->literals[0]) {
837 *const_value = b->specializations[i].data;
838 return;
839 }
840 }
841 }
842
843 static uint32_t
844 get_specialization(struct vtn_builder *b, struct vtn_value *val,
845 uint32_t const_value)
846 {
847 vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value);
848 return const_value;
849 }
850
851 static void
852 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
853 const uint32_t *w, unsigned count)
854 {
855 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
856 val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
857 val->constant = rzalloc(b, nir_constant);
858 switch (opcode) {
859 case SpvOpConstantTrue:
860 assert(val->const_type == glsl_bool_type());
861 val->constant->value.u[0] = NIR_TRUE;
862 break;
863 case SpvOpConstantFalse:
864 assert(val->const_type == glsl_bool_type());
865 val->constant->value.u[0] = NIR_FALSE;
866 break;
867
868 case SpvOpSpecConstantTrue:
869 case SpvOpSpecConstantFalse: {
870 assert(val->const_type == glsl_bool_type());
871 uint32_t int_val =
872 get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
873 val->constant->value.u[0] = int_val ? NIR_TRUE : NIR_FALSE;
874 break;
875 }
876
877 case SpvOpConstant:
878 assert(glsl_type_is_scalar(val->const_type));
879 val->constant->value.u[0] = w[3];
880 break;
881 case SpvOpSpecConstant:
882 assert(glsl_type_is_scalar(val->const_type));
883 val->constant->value.u[0] = get_specialization(b, val, w[3]);
884 break;
885 case SpvOpSpecConstantComposite:
886 case SpvOpConstantComposite: {
887 unsigned elem_count = count - 3;
888 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
889 for (unsigned i = 0; i < elem_count; i++)
890 elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
891
892 switch (glsl_get_base_type(val->const_type)) {
893 case GLSL_TYPE_UINT:
894 case GLSL_TYPE_INT:
895 case GLSL_TYPE_FLOAT:
896 case GLSL_TYPE_BOOL:
897 if (glsl_type_is_matrix(val->const_type)) {
898 unsigned rows = glsl_get_vector_elements(val->const_type);
899 assert(glsl_get_matrix_columns(val->const_type) == elem_count);
900 for (unsigned i = 0; i < elem_count; i++)
901 for (unsigned j = 0; j < rows; j++)
902 val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
903 } else {
904 assert(glsl_type_is_vector(val->const_type));
905 assert(glsl_get_vector_elements(val->const_type) == elem_count);
906 for (unsigned i = 0; i < elem_count; i++)
907 val->constant->value.u[i] = elems[i]->value.u[0];
908 }
909 ralloc_free(elems);
910 break;
911
912 case GLSL_TYPE_STRUCT:
913 case GLSL_TYPE_ARRAY:
914 ralloc_steal(val->constant, elems);
915 val->constant->num_elements = elem_count;
916 val->constant->elements = elems;
917 break;
918
919 default:
920 unreachable("Unsupported type for constants");
921 }
922 break;
923 }
924
925 case SpvOpSpecConstantOp: {
926 SpvOp opcode = get_specialization(b, val, w[3]);
927 switch (opcode) {
928 case SpvOpVectorShuffle: {
929 struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant);
930 struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant);
931 unsigned len0 = glsl_get_vector_elements(v0->const_type);
932 unsigned len1 = glsl_get_vector_elements(v1->const_type);
933
934 uint32_t u[8];
935 for (unsigned i = 0; i < len0; i++)
936 u[i] = v0->constant->value.u[i];
937 for (unsigned i = 0; i < len1; i++)
938 u[len0 + i] = v1->constant->value.u[i];
939
940 for (unsigned i = 0; i < count - 6; i++) {
941 uint32_t comp = w[i + 6];
942 if (comp == (uint32_t)-1) {
943 val->constant->value.u[i] = 0xdeadbeef;
944 } else {
945 val->constant->value.u[i] = u[comp];
946 }
947 }
948 return;
949 }
950
951 case SpvOpCompositeExtract:
952 case SpvOpCompositeInsert: {
953 struct vtn_value *comp;
954 unsigned deref_start;
955 struct nir_constant **c;
956 if (opcode == SpvOpCompositeExtract) {
957 comp = vtn_value(b, w[4], vtn_value_type_constant);
958 deref_start = 5;
959 c = &comp->constant;
960 } else {
961 comp = vtn_value(b, w[5], vtn_value_type_constant);
962 deref_start = 6;
963 val->constant = nir_constant_clone(comp->constant,
964 (nir_variable *)b);
965 c = &val->constant;
966 }
967
968 int elem = -1;
969 const struct glsl_type *type = comp->const_type;
970 for (unsigned i = deref_start; i < count; i++) {
971 switch (glsl_get_base_type(type)) {
972 case GLSL_TYPE_UINT:
973 case GLSL_TYPE_INT:
974 case GLSL_TYPE_FLOAT:
975 case GLSL_TYPE_BOOL:
976 /* If we hit this granularity, we're picking off an element */
977 if (elem < 0)
978 elem = 0;
979
980 if (glsl_type_is_matrix(type)) {
981 elem += w[i] * glsl_get_vector_elements(type);
982 type = glsl_get_column_type(type);
983 } else {
984 assert(glsl_type_is_vector(type));
985 elem += w[i];
986 type = glsl_scalar_type(glsl_get_base_type(type));
987 }
988 continue;
989
990 case GLSL_TYPE_ARRAY:
991 c = &(*c)->elements[w[i]];
992 type = glsl_get_array_element(type);
993 continue;
994
995 case GLSL_TYPE_STRUCT:
996 c = &(*c)->elements[w[i]];
997 type = glsl_get_struct_field(type, w[i]);
998 continue;
999
1000 default:
1001 unreachable("Invalid constant type");
1002 }
1003 }
1004
1005 if (opcode == SpvOpCompositeExtract) {
1006 if (elem == -1) {
1007 val->constant = *c;
1008 } else {
1009 unsigned num_components = glsl_get_vector_elements(type);
1010 for (unsigned i = 0; i < num_components; i++)
1011 val->constant->value.u[i] = (*c)->value.u[elem + i];
1012 }
1013 } else {
1014 struct vtn_value *insert =
1015 vtn_value(b, w[4], vtn_value_type_constant);
1016 assert(insert->const_type == type);
1017 if (elem == -1) {
1018 *c = insert->constant;
1019 } else {
1020 unsigned num_components = glsl_get_vector_elements(type);
1021 for (unsigned i = 0; i < num_components; i++)
1022 (*c)->value.u[elem + i] = insert->constant->value.u[i];
1023 }
1024 }
1025 return;
1026 }
1027
1028 default: {
1029 bool swap;
1030 nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
1031
1032 unsigned num_components = glsl_get_vector_elements(val->const_type);
1033
1034 nir_const_value src[3];
1035 assert(count <= 7);
1036 for (unsigned i = 0; i < count - 4; i++) {
1037 nir_constant *c =
1038 vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
1039
1040 unsigned j = swap ? 1 - i : i;
1041 for (unsigned k = 0; k < num_components; k++)
1042 src[j].u[k] = c->value.u[k];
1043 }
1044
1045 nir_const_value res = nir_eval_const_opcode(op, num_components, src);
1046
1047 for (unsigned k = 0; k < num_components; k++)
1048 val->constant->value.u[k] = res.u[k];
1049
1050 return;
1051 } /* default */
1052 }
1053 }
1054
1055 case SpvOpConstantNull:
1056 val->constant = vtn_null_constant(b, val->const_type);
1057 break;
1058
1059 case SpvOpConstantSampler:
1060 assert(!"OpConstantSampler requires Kernel Capability");
1061 break;
1062
1063 default:
1064 unreachable("Unhandled opcode");
1065 }
1066 }
1067
1068 static void
1069 vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
1070 const uint32_t *w, unsigned count)
1071 {
1072 struct nir_function *callee =
1073 vtn_value(b, w[3], vtn_value_type_function)->func->impl->function;
1074
1075 nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
1076 for (unsigned i = 0; i < call->num_params; i++) {
1077 unsigned arg_id = w[4 + i];
1078 struct vtn_value *arg = vtn_untyped_value(b, arg_id);
1079 if (arg->value_type == vtn_value_type_access_chain) {
1080 nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
1081 call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref));
1082 } else {
1083 struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
1084
1085 /* Make a temporary to store the argument in */
1086 nir_variable *tmp =
1087 nir_local_variable_create(b->impl, arg_ssa->type, "arg_tmp");
1088 call->params[i] = nir_deref_var_create(call, tmp);
1089
1090 vtn_local_store(b, arg_ssa, call->params[i]);
1091 }
1092 }
1093
1094 nir_variable *out_tmp = NULL;
1095 if (!glsl_type_is_void(callee->return_type)) {
1096 out_tmp = nir_local_variable_create(b->impl, callee->return_type,
1097 "out_tmp");
1098 call->return_deref = nir_deref_var_create(call, out_tmp);
1099 }
1100
1101 nir_builder_instr_insert(&b->nb, &call->instr);
1102
1103 if (glsl_type_is_void(callee->return_type)) {
1104 vtn_push_value(b, w[2], vtn_value_type_undef);
1105 } else {
1106 struct vtn_value *retval = vtn_push_value(b, w[2], vtn_value_type_ssa);
1107 retval->ssa = vtn_local_load(b, call->return_deref);
1108 }
1109 }
1110
1111 struct vtn_ssa_value *
1112 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
1113 {
1114 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
1115 val->type = type;
1116
1117 if (!glsl_type_is_vector_or_scalar(type)) {
1118 unsigned elems = glsl_get_length(type);
1119 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
1120 for (unsigned i = 0; i < elems; i++) {
1121 const struct glsl_type *child_type;
1122
1123 switch (glsl_get_base_type(type)) {
1124 case GLSL_TYPE_INT:
1125 case GLSL_TYPE_UINT:
1126 case GLSL_TYPE_BOOL:
1127 case GLSL_TYPE_FLOAT:
1128 case GLSL_TYPE_DOUBLE:
1129 child_type = glsl_get_column_type(type);
1130 break;
1131 case GLSL_TYPE_ARRAY:
1132 child_type = glsl_get_array_element(type);
1133 break;
1134 case GLSL_TYPE_STRUCT:
1135 child_type = glsl_get_struct_field(type, i);
1136 break;
1137 default:
1138 unreachable("unkown base type");
1139 }
1140
1141 val->elems[i] = vtn_create_ssa_value(b, child_type);
1142 }
1143 }
1144
1145 return val;
1146 }
1147
1148 static nir_tex_src
1149 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
1150 {
1151 nir_tex_src src;
1152 src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
1153 src.src_type = type;
1154 return src;
1155 }
1156
1157 static void
1158 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
1159 const uint32_t *w, unsigned count)
1160 {
1161 if (opcode == SpvOpSampledImage) {
1162 struct vtn_value *val =
1163 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
1164 val->sampled_image = ralloc(b, struct vtn_sampled_image);
1165 val->sampled_image->image =
1166 vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
1167 val->sampled_image->sampler =
1168 vtn_value(b, w[4], vtn_value_type_access_chain)->access_chain;
1169 return;
1170 } else if (opcode == SpvOpImage) {
1171 struct vtn_value *val =
1172 vtn_push_value(b, w[2], vtn_value_type_access_chain);
1173 struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
1174 if (src_val->value_type == vtn_value_type_sampled_image) {
1175 val->access_chain = src_val->sampled_image->image;
1176 } else {
1177 assert(src_val->value_type == vtn_value_type_access_chain);
1178 val->access_chain = src_val->access_chain;
1179 }
1180 return;
1181 }
1182
1183 struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
1184 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1185
1186 struct vtn_sampled_image sampled;
1187 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
1188 if (sampled_val->value_type == vtn_value_type_sampled_image) {
1189 sampled = *sampled_val->sampled_image;
1190 } else {
1191 assert(sampled_val->value_type == vtn_value_type_access_chain);
1192 sampled.image = NULL;
1193 sampled.sampler = sampled_val->access_chain;
1194 }
1195
1196 nir_tex_src srcs[8]; /* 8 should be enough */
1197 nir_tex_src *p = srcs;
1198
1199 unsigned idx = 4;
1200
1201 bool has_coord = false;
1202 switch (opcode) {
1203 case SpvOpImageSampleImplicitLod:
1204 case SpvOpImageSampleExplicitLod:
1205 case SpvOpImageSampleDrefImplicitLod:
1206 case SpvOpImageSampleDrefExplicitLod:
1207 case SpvOpImageSampleProjImplicitLod:
1208 case SpvOpImageSampleProjExplicitLod:
1209 case SpvOpImageSampleProjDrefImplicitLod:
1210 case SpvOpImageSampleProjDrefExplicitLod:
1211 case SpvOpImageFetch:
1212 case SpvOpImageGather:
1213 case SpvOpImageDrefGather:
1214 case SpvOpImageQueryLod: {
1215 /* All these types have the coordinate as their first real argument */
1216 struct vtn_ssa_value *coord = vtn_ssa_value(b, w[idx++]);
1217 has_coord = true;
1218 p->src = nir_src_for_ssa(coord->def);
1219 p->src_type = nir_tex_src_coord;
1220 p++;
1221 break;
1222 }
1223
1224 default:
1225 break;
1226 }
1227
1228 /* These all have an explicit depth value as their next source */
1229 switch (opcode) {
1230 case SpvOpImageSampleDrefImplicitLod:
1231 case SpvOpImageSampleDrefExplicitLod:
1232 case SpvOpImageSampleProjDrefImplicitLod:
1233 case SpvOpImageSampleProjDrefExplicitLod:
1234 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor);
1235 break;
1236 default:
1237 break;
1238 }
1239
1240 /* For OpImageQuerySizeLod, we always have an LOD */
1241 if (opcode == SpvOpImageQuerySizeLod)
1242 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
1243
1244 /* Figure out the base texture operation */
1245 nir_texop texop;
1246 switch (opcode) {
1247 case SpvOpImageSampleImplicitLod:
1248 case SpvOpImageSampleDrefImplicitLod:
1249 case SpvOpImageSampleProjImplicitLod:
1250 case SpvOpImageSampleProjDrefImplicitLod:
1251 texop = nir_texop_tex;
1252 break;
1253
1254 case SpvOpImageSampleExplicitLod:
1255 case SpvOpImageSampleDrefExplicitLod:
1256 case SpvOpImageSampleProjExplicitLod:
1257 case SpvOpImageSampleProjDrefExplicitLod:
1258 texop = nir_texop_txl;
1259 break;
1260
1261 case SpvOpImageFetch:
1262 texop = nir_texop_txf;
1263 break;
1264
1265 case SpvOpImageGather:
1266 case SpvOpImageDrefGather:
1267 texop = nir_texop_tg4;
1268 break;
1269
1270 case SpvOpImageQuerySizeLod:
1271 case SpvOpImageQuerySize:
1272 texop = nir_texop_txs;
1273 break;
1274
1275 case SpvOpImageQueryLod:
1276 texop = nir_texop_lod;
1277 break;
1278
1279 case SpvOpImageQueryLevels:
1280 texop = nir_texop_query_levels;
1281 break;
1282
1283 case SpvOpImageQuerySamples:
1284 default:
1285 unreachable("Unhandled opcode");
1286 }
1287
1288 /* Now we need to handle some number of optional arguments */
1289 if (idx < count) {
1290 uint32_t operands = w[idx++];
1291
1292 if (operands & SpvImageOperandsBiasMask) {
1293 assert(texop == nir_texop_tex);
1294 texop = nir_texop_txb;
1295 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
1296 }
1297
1298 if (operands & SpvImageOperandsLodMask) {
1299 assert(texop == nir_texop_txl || texop == nir_texop_txf ||
1300 texop == nir_texop_txs);
1301 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
1302 }
1303
1304 if (operands & SpvImageOperandsGradMask) {
1305 assert(texop == nir_texop_tex);
1306 texop = nir_texop_txd;
1307 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
1308 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
1309 }
1310
1311 if (operands & SpvImageOperandsOffsetMask ||
1312 operands & SpvImageOperandsConstOffsetMask)
1313 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
1314
1315 if (operands & SpvImageOperandsConstOffsetsMask)
1316 assert(!"Constant offsets to texture gather not yet implemented");
1317
1318 if (operands & SpvImageOperandsSampleMask) {
1319 assert(texop == nir_texop_txf);
1320 texop = nir_texop_txf_ms;
1321 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
1322 }
1323 }
1324 /* We should have now consumed exactly all of the arguments */
1325 assert(idx == count);
1326
1327 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
1328 instr->op = texop;
1329
1330 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
1331
1332 const struct glsl_type *image_type;
1333 if (sampled.image) {
1334 image_type = sampled.image->var->var->interface_type;
1335 } else {
1336 image_type = sampled.sampler->var->var->interface_type;
1337 }
1338
1339 instr->sampler_dim = glsl_get_sampler_dim(image_type);
1340 instr->is_array = glsl_sampler_type_is_array(image_type);
1341 instr->is_shadow = glsl_sampler_type_is_shadow(image_type);
1342 instr->is_new_style_shadow = instr->is_shadow;
1343
1344 if (has_coord) {
1345 switch (instr->sampler_dim) {
1346 case GLSL_SAMPLER_DIM_1D:
1347 case GLSL_SAMPLER_DIM_BUF:
1348 instr->coord_components = 1;
1349 break;
1350 case GLSL_SAMPLER_DIM_2D:
1351 case GLSL_SAMPLER_DIM_RECT:
1352 instr->coord_components = 2;
1353 break;
1354 case GLSL_SAMPLER_DIM_3D:
1355 case GLSL_SAMPLER_DIM_CUBE:
1356 case GLSL_SAMPLER_DIM_MS:
1357 instr->coord_components = 3;
1358 break;
1359 default:
1360 assert("Invalid sampler type");
1361 }
1362
1363 if (instr->is_array)
1364 instr->coord_components++;
1365 } else {
1366 instr->coord_components = 0;
1367 }
1368
1369 switch (glsl_get_sampler_result_type(image_type)) {
1370 case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
1371 case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
1372 case GLSL_TYPE_UINT: instr->dest_type = nir_type_uint; break;
1373 case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
1374 default:
1375 unreachable("Invalid base type for sampler result");
1376 }
1377
1378 nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
1379 instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
1380 if (sampled.image) {
1381 nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
1382 instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
1383 } else {
1384 instr->texture = NULL;
1385 }
1386
1387 nir_ssa_dest_init(&instr->instr, &instr->dest,
1388 nir_tex_instr_dest_size(instr), NULL);
1389
1390 assert(glsl_get_vector_elements(ret_type->type) ==
1391 nir_tex_instr_dest_size(instr));
1392
1393 val->ssa = vtn_create_ssa_value(b, ret_type->type);
1394 val->ssa->def = &instr->dest.ssa;
1395
1396 nir_builder_instr_insert(&b->nb, &instr->instr);
1397 }
1398
1399 static nir_ssa_def *
1400 get_image_coord(struct vtn_builder *b, uint32_t value)
1401 {
1402 struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
1403
1404 /* The image_load_store intrinsics assume a 4-dim coordinate */
1405 unsigned dim = glsl_get_vector_elements(coord->type);
1406 unsigned swizzle[4];
1407 for (unsigned i = 0; i < 4; i++)
1408 swizzle[i] = MIN2(i, dim - 1);
1409
1410 return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
1411 }
1412
1413 static void
1414 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
1415 const uint32_t *w, unsigned count)
1416 {
1417 /* Just get this one out of the way */
1418 if (opcode == SpvOpImageTexelPointer) {
1419 struct vtn_value *val =
1420 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
1421 val->image = ralloc(b, struct vtn_image_pointer);
1422
1423 val->image->image =
1424 vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
1425 val->image->coord = get_image_coord(b, w[4]);
1426 val->image->sample = vtn_ssa_value(b, w[5])->def;
1427 return;
1428 }
1429
1430 struct vtn_image_pointer image;
1431
1432 switch (opcode) {
1433 case SpvOpAtomicExchange:
1434 case SpvOpAtomicCompareExchange:
1435 case SpvOpAtomicCompareExchangeWeak:
1436 case SpvOpAtomicIIncrement:
1437 case SpvOpAtomicIDecrement:
1438 case SpvOpAtomicIAdd:
1439 case SpvOpAtomicISub:
1440 case SpvOpAtomicSMin:
1441 case SpvOpAtomicUMin:
1442 case SpvOpAtomicSMax:
1443 case SpvOpAtomicUMax:
1444 case SpvOpAtomicAnd:
1445 case SpvOpAtomicOr:
1446 case SpvOpAtomicXor:
1447 image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
1448 break;
1449
1450 case SpvOpImageQuerySize:
1451 image.image =
1452 vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
1453 image.coord = NULL;
1454 image.sample = NULL;
1455 break;
1456
1457 case SpvOpImageRead:
1458 image.image =
1459 vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
1460 image.coord = get_image_coord(b, w[4]);
1461
1462 if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
1463 assert(w[5] == SpvImageOperandsSampleMask);
1464 image.sample = vtn_ssa_value(b, w[6])->def;
1465 } else {
1466 image.sample = nir_ssa_undef(&b->nb, 1);
1467 }
1468 break;
1469
1470 case SpvOpImageWrite:
1471 image.image =
1472 vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
1473 image.coord = get_image_coord(b, w[2]);
1474
1475 /* texel = w[3] */
1476
1477 if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
1478 assert(w[4] == SpvImageOperandsSampleMask);
1479 image.sample = vtn_ssa_value(b, w[5])->def;
1480 } else {
1481 image.sample = nir_ssa_undef(&b->nb, 1);
1482 }
1483 break;
1484
1485 default:
1486 unreachable("Invalid image opcode");
1487 }
1488
1489 nir_intrinsic_op op;
1490 switch (opcode) {
1491 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1492 OP(ImageQuerySize, size)
1493 OP(ImageRead, load)
1494 OP(ImageWrite, store)
1495 OP(AtomicExchange, atomic_exchange)
1496 OP(AtomicCompareExchange, atomic_comp_swap)
1497 OP(AtomicIIncrement, atomic_add)
1498 OP(AtomicIDecrement, atomic_add)
1499 OP(AtomicIAdd, atomic_add)
1500 OP(AtomicISub, atomic_add)
1501 OP(AtomicSMin, atomic_min)
1502 OP(AtomicUMin, atomic_min)
1503 OP(AtomicSMax, atomic_max)
1504 OP(AtomicUMax, atomic_max)
1505 OP(AtomicAnd, atomic_and)
1506 OP(AtomicOr, atomic_or)
1507 OP(AtomicXor, atomic_xor)
1508 #undef OP
1509 default:
1510 unreachable("Invalid image opcode");
1511 }
1512
1513 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
1514
1515 nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
1516 intrin->variables[0] =
1517 nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref));
1518
1519 /* ImageQuerySize doesn't take any extra parameters */
1520 if (opcode != SpvOpImageQuerySize) {
1521 /* The image coordinate is always 4 components but we may not have that
1522 * many. Swizzle to compensate.
1523 */
1524 unsigned swiz[4];
1525 for (unsigned i = 0; i < 4; i++)
1526 swiz[i] = i < image.coord->num_components ? i : 0;
1527 intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
1528 swiz, 4, false));
1529 intrin->src[1] = nir_src_for_ssa(image.sample);
1530 }
1531
1532 switch (opcode) {
1533 case SpvOpImageQuerySize:
1534 case SpvOpImageRead:
1535 break;
1536 case SpvOpImageWrite:
1537 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
1538 break;
1539 case SpvOpAtomicIIncrement:
1540 intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
1541 break;
1542 case SpvOpAtomicIDecrement:
1543 intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
1544 break;
1545
1546 case SpvOpAtomicExchange:
1547 case SpvOpAtomicIAdd:
1548 case SpvOpAtomicSMin:
1549 case SpvOpAtomicUMin:
1550 case SpvOpAtomicSMax:
1551 case SpvOpAtomicUMax:
1552 case SpvOpAtomicAnd:
1553 case SpvOpAtomicOr:
1554 case SpvOpAtomicXor:
1555 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
1556 break;
1557
1558 case SpvOpAtomicCompareExchange:
1559 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
1560 intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
1561 break;
1562
1563 case SpvOpAtomicISub:
1564 intrin->src[2] = nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
1565 break;
1566
1567 default:
1568 unreachable("Invalid image opcode");
1569 }
1570
1571 if (opcode != SpvOpImageWrite) {
1572 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1573 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
1574 nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, NULL);
1575
1576 nir_builder_instr_insert(&b->nb, &intrin->instr);
1577
1578 /* The image intrinsics always return 4 channels but we may not want
1579 * that many. Emit a mov to trim it down.
1580 */
1581 unsigned swiz[4] = {0, 1, 2, 3};
1582 val->ssa = vtn_create_ssa_value(b, type->type);
1583 val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz,
1584 glsl_get_vector_elements(type->type), false);
1585 } else {
1586 nir_builder_instr_insert(&b->nb, &intrin->instr);
1587 }
1588 }
1589
1590 static nir_intrinsic_op
1591 get_ssbo_nir_atomic_op(SpvOp opcode)
1592 {
1593 switch (opcode) {
1594 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
1595 OP(AtomicExchange, atomic_exchange)
1596 OP(AtomicCompareExchange, atomic_comp_swap)
1597 OP(AtomicIIncrement, atomic_add)
1598 OP(AtomicIDecrement, atomic_add)
1599 OP(AtomicIAdd, atomic_add)
1600 OP(AtomicISub, atomic_add)
1601 OP(AtomicSMin, atomic_imin)
1602 OP(AtomicUMin, atomic_umin)
1603 OP(AtomicSMax, atomic_imax)
1604 OP(AtomicUMax, atomic_umax)
1605 OP(AtomicAnd, atomic_and)
1606 OP(AtomicOr, atomic_or)
1607 OP(AtomicXor, atomic_xor)
1608 #undef OP
1609 default:
1610 unreachable("Invalid SSBO atomic");
1611 }
1612 }
1613
1614 static nir_intrinsic_op
1615 get_shared_nir_atomic_op(SpvOp opcode)
1616 {
1617 switch (opcode) {
1618 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
1619 OP(AtomicExchange, atomic_exchange)
1620 OP(AtomicCompareExchange, atomic_comp_swap)
1621 OP(AtomicIIncrement, atomic_add)
1622 OP(AtomicIDecrement, atomic_add)
1623 OP(AtomicIAdd, atomic_add)
1624 OP(AtomicISub, atomic_add)
1625 OP(AtomicSMin, atomic_imin)
1626 OP(AtomicUMin, atomic_umin)
1627 OP(AtomicSMax, atomic_imax)
1628 OP(AtomicUMax, atomic_umax)
1629 OP(AtomicAnd, atomic_and)
1630 OP(AtomicOr, atomic_or)
1631 OP(AtomicXor, atomic_xor)
1632 #undef OP
1633 default:
1634 unreachable("Invalid shared atomic");
1635 }
1636 }
1637
1638 static void
1639 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
1640 const uint32_t *w, nir_src *src)
1641 {
1642 switch (opcode) {
1643 case SpvOpAtomicIIncrement:
1644 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
1645 break;
1646
1647 case SpvOpAtomicIDecrement:
1648 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
1649 break;
1650
1651 case SpvOpAtomicISub:
1652 src[0] =
1653 nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
1654 break;
1655
1656 case SpvOpAtomicCompareExchange:
1657 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
1658 src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
1659 break;
1660 /* Fall through */
1661
1662 case SpvOpAtomicExchange:
1663 case SpvOpAtomicIAdd:
1664 case SpvOpAtomicSMin:
1665 case SpvOpAtomicUMin:
1666 case SpvOpAtomicSMax:
1667 case SpvOpAtomicUMax:
1668 case SpvOpAtomicAnd:
1669 case SpvOpAtomicOr:
1670 case SpvOpAtomicXor:
1671 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
1672 break;
1673
1674 default:
1675 unreachable("Invalid SPIR-V atomic");
1676 }
1677 }
1678
1679 static void
1680 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
1681 const uint32_t *w, unsigned count)
1682 {
1683 struct vtn_access_chain *chain =
1684 vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
1685 nir_intrinsic_instr *atomic;
1686
1687 /*
1688 SpvScope scope = w[4];
1689 SpvMemorySemanticsMask semantics = w[5];
1690 */
1691
1692 if (chain->var->mode == vtn_variable_mode_workgroup) {
1693 nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref;
1694 nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
1695 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
1696 atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref));
1697 fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
1698 } else {
1699 assert(chain->var->mode == vtn_variable_mode_ssbo);
1700 struct vtn_type *type;
1701 nir_ssa_def *offset, *index;
1702 offset = vtn_access_chain_to_offset(b, chain, &index, &type, NULL, false);
1703
1704 nir_intrinsic_op op = get_ssbo_nir_atomic_op(opcode);
1705
1706 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
1707 atomic->src[0] = nir_src_for_ssa(index);
1708 atomic->src[1] = nir_src_for_ssa(offset);
1709 fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
1710 }
1711
1712 nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, NULL);
1713
1714 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
1715 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1716 val->ssa = rzalloc(b, struct vtn_ssa_value);
1717 val->ssa->def = &atomic->dest.ssa;
1718 val->ssa->type = type->type;
1719
1720 nir_builder_instr_insert(&b->nb, &atomic->instr);
1721 }
1722
1723 static nir_alu_instr *
1724 create_vec(nir_shader *shader, unsigned num_components)
1725 {
1726 nir_op op;
1727 switch (num_components) {
1728 case 1: op = nir_op_fmov; break;
1729 case 2: op = nir_op_vec2; break;
1730 case 3: op = nir_op_vec3; break;
1731 case 4: op = nir_op_vec4; break;
1732 default: unreachable("bad vector size");
1733 }
1734
1735 nir_alu_instr *vec = nir_alu_instr_create(shader, op);
1736 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, NULL);
1737 vec->dest.write_mask = (1 << num_components) - 1;
1738
1739 return vec;
1740 }
1741
1742 struct vtn_ssa_value *
1743 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
1744 {
1745 if (src->transposed)
1746 return src->transposed;
1747
1748 struct vtn_ssa_value *dest =
1749 vtn_create_ssa_value(b, glsl_transposed_type(src->type));
1750
1751 for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
1752 nir_alu_instr *vec = create_vec(b->shader,
1753 glsl_get_matrix_columns(src->type));
1754 if (glsl_type_is_vector_or_scalar(src->type)) {
1755 vec->src[0].src = nir_src_for_ssa(src->def);
1756 vec->src[0].swizzle[0] = i;
1757 } else {
1758 for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
1759 vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
1760 vec->src[j].swizzle[0] = i;
1761 }
1762 }
1763 nir_builder_instr_insert(&b->nb, &vec->instr);
1764 dest->elems[i]->def = &vec->dest.dest.ssa;
1765 }
1766
1767 dest->transposed = src;
1768
1769 return dest;
1770 }
1771
1772 nir_ssa_def *
1773 vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
1774 {
1775 unsigned swiz[4] = { index };
1776 return nir_swizzle(&b->nb, src, swiz, 1, true);
1777 }
1778
1779 nir_ssa_def *
1780 vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
1781 unsigned index)
1782 {
1783 nir_alu_instr *vec = create_vec(b->shader, src->num_components);
1784
1785 for (unsigned i = 0; i < src->num_components; i++) {
1786 if (i == index) {
1787 vec->src[i].src = nir_src_for_ssa(insert);
1788 } else {
1789 vec->src[i].src = nir_src_for_ssa(src);
1790 vec->src[i].swizzle[0] = i;
1791 }
1792 }
1793
1794 nir_builder_instr_insert(&b->nb, &vec->instr);
1795
1796 return &vec->dest.dest.ssa;
1797 }
1798
1799 nir_ssa_def *
1800 vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
1801 nir_ssa_def *index)
1802 {
1803 nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
1804 for (unsigned i = 1; i < src->num_components; i++)
1805 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
1806 vtn_vector_extract(b, src, i), dest);
1807
1808 return dest;
1809 }
1810
1811 nir_ssa_def *
1812 vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
1813 nir_ssa_def *insert, nir_ssa_def *index)
1814 {
1815 nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
1816 for (unsigned i = 1; i < src->num_components; i++)
1817 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
1818 vtn_vector_insert(b, src, insert, i), dest);
1819
1820 return dest;
1821 }
1822
1823 static nir_ssa_def *
1824 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
1825 nir_ssa_def *src0, nir_ssa_def *src1,
1826 const uint32_t *indices)
1827 {
1828 nir_alu_instr *vec = create_vec(b->shader, num_components);
1829
1830 nir_ssa_undef_instr *undef = nir_ssa_undef_instr_create(b->shader, 1);
1831 nir_builder_instr_insert(&b->nb, &undef->instr);
1832
1833 for (unsigned i = 0; i < num_components; i++) {
1834 uint32_t index = indices[i];
1835 if (index == 0xffffffff) {
1836 vec->src[i].src = nir_src_for_ssa(&undef->def);
1837 } else if (index < src0->num_components) {
1838 vec->src[i].src = nir_src_for_ssa(src0);
1839 vec->src[i].swizzle[0] = index;
1840 } else {
1841 vec->src[i].src = nir_src_for_ssa(src1);
1842 vec->src[i].swizzle[0] = index - src0->num_components;
1843 }
1844 }
1845
1846 nir_builder_instr_insert(&b->nb, &vec->instr);
1847
1848 return &vec->dest.dest.ssa;
1849 }
1850
1851 /*
1852 * Concatentates a number of vectors/scalars together to produce a vector
1853 */
1854 static nir_ssa_def *
1855 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
1856 unsigned num_srcs, nir_ssa_def **srcs)
1857 {
1858 nir_alu_instr *vec = create_vec(b->shader, num_components);
1859
1860 unsigned dest_idx = 0;
1861 for (unsigned i = 0; i < num_srcs; i++) {
1862 nir_ssa_def *src = srcs[i];
1863 for (unsigned j = 0; j < src->num_components; j++) {
1864 vec->src[dest_idx].src = nir_src_for_ssa(src);
1865 vec->src[dest_idx].swizzle[0] = j;
1866 dest_idx++;
1867 }
1868 }
1869
1870 nir_builder_instr_insert(&b->nb, &vec->instr);
1871
1872 return &vec->dest.dest.ssa;
1873 }
1874
1875 static struct vtn_ssa_value *
1876 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
1877 {
1878 struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
1879 dest->type = src->type;
1880
1881 if (glsl_type_is_vector_or_scalar(src->type)) {
1882 dest->def = src->def;
1883 } else {
1884 unsigned elems = glsl_get_length(src->type);
1885
1886 dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
1887 for (unsigned i = 0; i < elems; i++)
1888 dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
1889 }
1890
1891 return dest;
1892 }
1893
1894 static struct vtn_ssa_value *
1895 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
1896 struct vtn_ssa_value *insert, const uint32_t *indices,
1897 unsigned num_indices)
1898 {
1899 struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
1900
1901 struct vtn_ssa_value *cur = dest;
1902 unsigned i;
1903 for (i = 0; i < num_indices - 1; i++) {
1904 cur = cur->elems[indices[i]];
1905 }
1906
1907 if (glsl_type_is_vector_or_scalar(cur->type)) {
1908 /* According to the SPIR-V spec, OpCompositeInsert may work down to
1909 * the component granularity. In that case, the last index will be
1910 * the index to insert the scalar into the vector.
1911 */
1912
1913 cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
1914 } else {
1915 cur->elems[indices[i]] = insert;
1916 }
1917
1918 return dest;
1919 }
1920
1921 static struct vtn_ssa_value *
1922 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
1923 const uint32_t *indices, unsigned num_indices)
1924 {
1925 struct vtn_ssa_value *cur = src;
1926 for (unsigned i = 0; i < num_indices; i++) {
1927 if (glsl_type_is_vector_or_scalar(cur->type)) {
1928 assert(i == num_indices - 1);
1929 /* According to the SPIR-V spec, OpCompositeExtract may work down to
1930 * the component granularity. The last index will be the index of the
1931 * vector to extract.
1932 */
1933
1934 struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
1935 ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
1936 ret->def = vtn_vector_extract(b, cur->def, indices[i]);
1937 return ret;
1938 } else {
1939 cur = cur->elems[indices[i]];
1940 }
1941 }
1942
1943 return cur;
1944 }
1945
1946 static void
1947 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
1948 const uint32_t *w, unsigned count)
1949 {
1950 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1951 const struct glsl_type *type =
1952 vtn_value(b, w[1], vtn_value_type_type)->type->type;
1953 val->ssa = vtn_create_ssa_value(b, type);
1954
1955 switch (opcode) {
1956 case SpvOpVectorExtractDynamic:
1957 val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
1958 vtn_ssa_value(b, w[4])->def);
1959 break;
1960
1961 case SpvOpVectorInsertDynamic:
1962 val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
1963 vtn_ssa_value(b, w[4])->def,
1964 vtn_ssa_value(b, w[5])->def);
1965 break;
1966
1967 case SpvOpVectorShuffle:
1968 val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
1969 vtn_ssa_value(b, w[3])->def,
1970 vtn_ssa_value(b, w[4])->def,
1971 w + 5);
1972 break;
1973
1974 case SpvOpCompositeConstruct: {
1975 unsigned elems = count - 3;
1976 if (glsl_type_is_vector_or_scalar(type)) {
1977 nir_ssa_def *srcs[4];
1978 for (unsigned i = 0; i < elems; i++)
1979 srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
1980 val->ssa->def =
1981 vtn_vector_construct(b, glsl_get_vector_elements(type),
1982 elems, srcs);
1983 } else {
1984 val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
1985 for (unsigned i = 0; i < elems; i++)
1986 val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
1987 }
1988 break;
1989 }
1990 case SpvOpCompositeExtract:
1991 val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
1992 w + 4, count - 4);
1993 break;
1994
1995 case SpvOpCompositeInsert:
1996 val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
1997 vtn_ssa_value(b, w[3]),
1998 w + 5, count - 5);
1999 break;
2000
2001 case SpvOpCopyObject:
2002 val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
2003 break;
2004
2005 default:
2006 unreachable("unknown composite operation");
2007 }
2008 }
2009
2010 static void
2011 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
2012 const uint32_t *w, unsigned count)
2013 {
2014 nir_intrinsic_op intrinsic_op;
2015 switch (opcode) {
2016 case SpvOpEmitVertex:
2017 case SpvOpEmitStreamVertex:
2018 intrinsic_op = nir_intrinsic_emit_vertex;
2019 break;
2020 case SpvOpEndPrimitive:
2021 case SpvOpEndStreamPrimitive:
2022 intrinsic_op = nir_intrinsic_end_primitive;
2023 break;
2024 case SpvOpMemoryBarrier:
2025 intrinsic_op = nir_intrinsic_memory_barrier;
2026 break;
2027 case SpvOpControlBarrier:
2028 intrinsic_op = nir_intrinsic_barrier;
2029 break;
2030 default:
2031 unreachable("unknown barrier instruction");
2032 }
2033
2034 nir_intrinsic_instr *intrin =
2035 nir_intrinsic_instr_create(b->shader, intrinsic_op);
2036
2037 if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
2038 intrin->const_index[0] = w[1];
2039
2040 nir_builder_instr_insert(&b->nb, &intrin->instr);
2041 }
2042
2043 static unsigned
2044 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode)
2045 {
2046 switch (mode) {
2047 case SpvExecutionModeInputPoints:
2048 case SpvExecutionModeOutputPoints:
2049 return 0; /* GL_POINTS */
2050 case SpvExecutionModeInputLines:
2051 return 1; /* GL_LINES */
2052 case SpvExecutionModeInputLinesAdjacency:
2053 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2054 case SpvExecutionModeTriangles:
2055 return 4; /* GL_TRIANGLES */
2056 case SpvExecutionModeInputTrianglesAdjacency:
2057 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2058 case SpvExecutionModeQuads:
2059 return 7; /* GL_QUADS */
2060 case SpvExecutionModeIsolines:
2061 return 0x8E7A; /* GL_ISOLINES */
2062 case SpvExecutionModeOutputLineStrip:
2063 return 3; /* GL_LINE_STRIP */
2064 case SpvExecutionModeOutputTriangleStrip:
2065 return 5; /* GL_TRIANGLE_STRIP */
2066 default:
2067 assert(!"Invalid primitive type");
2068 return 4;
2069 }
2070 }
2071
2072 static unsigned
2073 vertices_in_from_spv_execution_mode(SpvExecutionMode mode)
2074 {
2075 switch (mode) {
2076 case SpvExecutionModeInputPoints:
2077 return 1;
2078 case SpvExecutionModeInputLines:
2079 return 2;
2080 case SpvExecutionModeInputLinesAdjacency:
2081 return 4;
2082 case SpvExecutionModeTriangles:
2083 return 3;
2084 case SpvExecutionModeInputTrianglesAdjacency:
2085 return 6;
2086 default:
2087 assert(!"Invalid GS input mode");
2088 return 0;
2089 }
2090 }
2091
2092 static gl_shader_stage
2093 stage_for_execution_model(SpvExecutionModel model)
2094 {
2095 switch (model) {
2096 case SpvExecutionModelVertex:
2097 return MESA_SHADER_VERTEX;
2098 case SpvExecutionModelTessellationControl:
2099 return MESA_SHADER_TESS_CTRL;
2100 case SpvExecutionModelTessellationEvaluation:
2101 return MESA_SHADER_TESS_EVAL;
2102 case SpvExecutionModelGeometry:
2103 return MESA_SHADER_GEOMETRY;
2104 case SpvExecutionModelFragment:
2105 return MESA_SHADER_FRAGMENT;
2106 case SpvExecutionModelGLCompute:
2107 return MESA_SHADER_COMPUTE;
2108 default:
2109 unreachable("Unsupported execution model");
2110 }
2111 }
2112
2113 static bool
2114 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
2115 const uint32_t *w, unsigned count)
2116 {
2117 switch (opcode) {
2118 case SpvOpSource:
2119 case SpvOpSourceExtension:
2120 case SpvOpSourceContinued:
2121 case SpvOpExtension:
2122 /* Unhandled, but these are for debug so that's ok. */
2123 break;
2124
2125 case SpvOpCapability:
2126 switch ((SpvCapability)w[1]) {
2127 case SpvCapabilityMatrix:
2128 case SpvCapabilityShader:
2129 case SpvCapabilityGeometry:
2130 break;
2131 default:
2132 assert(!"Unsupported capability");
2133 }
2134 break;
2135
2136 case SpvOpExtInstImport:
2137 vtn_handle_extension(b, opcode, w, count);
2138 break;
2139
2140 case SpvOpMemoryModel:
2141 assert(w[1] == SpvAddressingModelLogical);
2142 assert(w[2] == SpvMemoryModelGLSL450);
2143 break;
2144
2145 case SpvOpEntryPoint: {
2146 struct vtn_value *entry_point = &b->values[w[2]];
2147 /* Let this be a name label regardless */
2148 unsigned name_words;
2149 entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
2150
2151 if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
2152 stage_for_execution_model(w[1]) != b->entry_point_stage)
2153 break;
2154
2155 assert(b->entry_point == NULL);
2156 b->entry_point = entry_point;
2157 break;
2158 }
2159
2160 case SpvOpString:
2161 vtn_push_value(b, w[1], vtn_value_type_string)->str =
2162 vtn_string_literal(b, &w[2], count - 2, NULL);
2163 break;
2164
2165 case SpvOpName:
2166 b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
2167 break;
2168
2169 case SpvOpMemberName:
2170 /* TODO */
2171 break;
2172
2173 case SpvOpExecutionMode:
2174 case SpvOpDecorationGroup:
2175 case SpvOpDecorate:
2176 case SpvOpMemberDecorate:
2177 case SpvOpGroupDecorate:
2178 case SpvOpGroupMemberDecorate:
2179 vtn_handle_decoration(b, opcode, w, count);
2180 break;
2181
2182 default:
2183 return false; /* End of preamble */
2184 }
2185
2186 return true;
2187 }
2188
2189 static void
2190 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
2191 const struct vtn_decoration *mode, void *data)
2192 {
2193 assert(b->entry_point == entry_point);
2194
2195 switch(mode->exec_mode) {
2196 case SpvExecutionModeOriginUpperLeft:
2197 case SpvExecutionModeOriginLowerLeft:
2198 b->origin_upper_left =
2199 (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
2200 break;
2201
2202 case SpvExecutionModeEarlyFragmentTests:
2203 assert(b->shader->stage == MESA_SHADER_FRAGMENT);
2204 b->shader->info.fs.early_fragment_tests = true;
2205 break;
2206
2207 case SpvExecutionModeInvocations:
2208 assert(b->shader->stage == MESA_SHADER_GEOMETRY);
2209 b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
2210 break;
2211
2212 case SpvExecutionModeDepthReplacing:
2213 assert(b->shader->stage == MESA_SHADER_FRAGMENT);
2214 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
2215 break;
2216 case SpvExecutionModeDepthGreater:
2217 assert(b->shader->stage == MESA_SHADER_FRAGMENT);
2218 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
2219 break;
2220 case SpvExecutionModeDepthLess:
2221 assert(b->shader->stage == MESA_SHADER_FRAGMENT);
2222 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
2223 break;
2224 case SpvExecutionModeDepthUnchanged:
2225 assert(b->shader->stage == MESA_SHADER_FRAGMENT);
2226 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
2227 break;
2228
2229 case SpvExecutionModeLocalSize:
2230 assert(b->shader->stage == MESA_SHADER_COMPUTE);
2231 b->shader->info.cs.local_size[0] = mode->literals[0];
2232 b->shader->info.cs.local_size[1] = mode->literals[1];
2233 b->shader->info.cs.local_size[2] = mode->literals[2];
2234 break;
2235 case SpvExecutionModeLocalSizeHint:
2236 break; /* Nothing do do with this */
2237
2238 case SpvExecutionModeOutputVertices:
2239 assert(b->shader->stage == MESA_SHADER_GEOMETRY);
2240 b->shader->info.gs.vertices_out = mode->literals[0];
2241 break;
2242
2243 case SpvExecutionModeInputPoints:
2244 case SpvExecutionModeInputLines:
2245 case SpvExecutionModeInputLinesAdjacency:
2246 case SpvExecutionModeTriangles:
2247 case SpvExecutionModeInputTrianglesAdjacency:
2248 case SpvExecutionModeQuads:
2249 case SpvExecutionModeIsolines:
2250 if (b->shader->stage == MESA_SHADER_GEOMETRY) {
2251 b->shader->info.gs.vertices_in =
2252 vertices_in_from_spv_execution_mode(mode->exec_mode);
2253 } else {
2254 assert(!"Tesselation shaders not yet supported");
2255 }
2256 break;
2257
2258 case SpvExecutionModeOutputPoints:
2259 case SpvExecutionModeOutputLineStrip:
2260 case SpvExecutionModeOutputTriangleStrip:
2261 assert(b->shader->stage == MESA_SHADER_GEOMETRY);
2262 b->shader->info.gs.output_primitive =
2263 gl_primitive_from_spv_execution_mode(mode->exec_mode);
2264 break;
2265
2266 case SpvExecutionModeSpacingEqual:
2267 case SpvExecutionModeSpacingFractionalEven:
2268 case SpvExecutionModeSpacingFractionalOdd:
2269 case SpvExecutionModeVertexOrderCw:
2270 case SpvExecutionModeVertexOrderCcw:
2271 case SpvExecutionModePointMode:
2272 assert(!"TODO: Add tessellation metadata");
2273 break;
2274
2275 case SpvExecutionModePixelCenterInteger:
2276 case SpvExecutionModeXfb:
2277 assert(!"Unhandled execution mode");
2278 break;
2279
2280 case SpvExecutionModeVecTypeHint:
2281 case SpvExecutionModeContractionOff:
2282 break; /* OpenCL */
2283 }
2284 }
2285
2286 static bool
2287 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
2288 const uint32_t *w, unsigned count)
2289 {
2290 switch (opcode) {
2291 case SpvOpSource:
2292 case SpvOpSourceContinued:
2293 case SpvOpSourceExtension:
2294 case SpvOpExtension:
2295 case SpvOpCapability:
2296 case SpvOpExtInstImport:
2297 case SpvOpMemoryModel:
2298 case SpvOpEntryPoint:
2299 case SpvOpExecutionMode:
2300 case SpvOpString:
2301 case SpvOpName:
2302 case SpvOpMemberName:
2303 case SpvOpDecorationGroup:
2304 case SpvOpDecorate:
2305 case SpvOpMemberDecorate:
2306 case SpvOpGroupDecorate:
2307 case SpvOpGroupMemberDecorate:
2308 assert(!"Invalid opcode types and variables section");
2309 break;
2310
2311 case SpvOpTypeVoid:
2312 case SpvOpTypeBool:
2313 case SpvOpTypeInt:
2314 case SpvOpTypeFloat:
2315 case SpvOpTypeVector:
2316 case SpvOpTypeMatrix:
2317 case SpvOpTypeImage:
2318 case SpvOpTypeSampler:
2319 case SpvOpTypeSampledImage:
2320 case SpvOpTypeArray:
2321 case SpvOpTypeRuntimeArray:
2322 case SpvOpTypeStruct:
2323 case SpvOpTypeOpaque:
2324 case SpvOpTypePointer:
2325 case SpvOpTypeFunction:
2326 case SpvOpTypeEvent:
2327 case SpvOpTypeDeviceEvent:
2328 case SpvOpTypeReserveId:
2329 case SpvOpTypeQueue:
2330 case SpvOpTypePipe:
2331 vtn_handle_type(b, opcode, w, count);
2332 break;
2333
2334 case SpvOpConstantTrue:
2335 case SpvOpConstantFalse:
2336 case SpvOpConstant:
2337 case SpvOpConstantComposite:
2338 case SpvOpConstantSampler:
2339 case SpvOpConstantNull:
2340 case SpvOpSpecConstantTrue:
2341 case SpvOpSpecConstantFalse:
2342 case SpvOpSpecConstant:
2343 case SpvOpSpecConstantComposite:
2344 case SpvOpSpecConstantOp:
2345 vtn_handle_constant(b, opcode, w, count);
2346 break;
2347
2348 case SpvOpVariable:
2349 vtn_handle_variables(b, opcode, w, count);
2350 break;
2351
2352 default:
2353 return false; /* End of preamble */
2354 }
2355
2356 return true;
2357 }
2358
2359 static bool
2360 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
2361 const uint32_t *w, unsigned count)
2362 {
2363 switch (opcode) {
2364 case SpvOpLabel:
2365 break;
2366
2367 case SpvOpLoopMerge:
2368 case SpvOpSelectionMerge:
2369 /* This is handled by cfg pre-pass and walk_blocks */
2370 break;
2371
2372 case SpvOpUndef: {
2373 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
2374 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
2375 break;
2376 }
2377
2378 case SpvOpExtInst:
2379 vtn_handle_extension(b, opcode, w, count);
2380 break;
2381
2382 case SpvOpVariable:
2383 case SpvOpLoad:
2384 case SpvOpStore:
2385 case SpvOpCopyMemory:
2386 case SpvOpCopyMemorySized:
2387 case SpvOpAccessChain:
2388 case SpvOpInBoundsAccessChain:
2389 case SpvOpArrayLength:
2390 vtn_handle_variables(b, opcode, w, count);
2391 break;
2392
2393 case SpvOpFunctionCall:
2394 vtn_handle_function_call(b, opcode, w, count);
2395 break;
2396
2397 case SpvOpSampledImage:
2398 case SpvOpImage:
2399 case SpvOpImageSampleImplicitLod:
2400 case SpvOpImageSampleExplicitLod:
2401 case SpvOpImageSampleDrefImplicitLod:
2402 case SpvOpImageSampleDrefExplicitLod:
2403 case SpvOpImageSampleProjImplicitLod:
2404 case SpvOpImageSampleProjExplicitLod:
2405 case SpvOpImageSampleProjDrefImplicitLod:
2406 case SpvOpImageSampleProjDrefExplicitLod:
2407 case SpvOpImageFetch:
2408 case SpvOpImageGather:
2409 case SpvOpImageDrefGather:
2410 case SpvOpImageQuerySizeLod:
2411 case SpvOpImageQueryLod:
2412 case SpvOpImageQueryLevels:
2413 case SpvOpImageQuerySamples:
2414 vtn_handle_texture(b, opcode, w, count);
2415 break;
2416
2417 case SpvOpImageRead:
2418 case SpvOpImageWrite:
2419 case SpvOpImageTexelPointer:
2420 vtn_handle_image(b, opcode, w, count);
2421 break;
2422
2423 case SpvOpImageQuerySize: {
2424 struct vtn_access_chain *image =
2425 vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
2426 if (glsl_type_is_image(image->var->var->interface_type)) {
2427 vtn_handle_image(b, opcode, w, count);
2428 } else {
2429 vtn_handle_texture(b, opcode, w, count);
2430 }
2431 break;
2432 }
2433
2434 case SpvOpAtomicExchange:
2435 case SpvOpAtomicCompareExchange:
2436 case SpvOpAtomicCompareExchangeWeak:
2437 case SpvOpAtomicIIncrement:
2438 case SpvOpAtomicIDecrement:
2439 case SpvOpAtomicIAdd:
2440 case SpvOpAtomicISub:
2441 case SpvOpAtomicSMin:
2442 case SpvOpAtomicUMin:
2443 case SpvOpAtomicSMax:
2444 case SpvOpAtomicUMax:
2445 case SpvOpAtomicAnd:
2446 case SpvOpAtomicOr:
2447 case SpvOpAtomicXor: {
2448 struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
2449 if (pointer->value_type == vtn_value_type_image_pointer) {
2450 vtn_handle_image(b, opcode, w, count);
2451 } else {
2452 assert(pointer->value_type == vtn_value_type_access_chain);
2453 vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
2454 }
2455 break;
2456 }
2457
2458 case SpvOpSNegate:
2459 case SpvOpFNegate:
2460 case SpvOpNot:
2461 case SpvOpAny:
2462 case SpvOpAll:
2463 case SpvOpConvertFToU:
2464 case SpvOpConvertFToS:
2465 case SpvOpConvertSToF:
2466 case SpvOpConvertUToF:
2467 case SpvOpUConvert:
2468 case SpvOpSConvert:
2469 case SpvOpFConvert:
2470 case SpvOpQuantizeToF16:
2471 case SpvOpConvertPtrToU:
2472 case SpvOpConvertUToPtr:
2473 case SpvOpPtrCastToGeneric:
2474 case SpvOpGenericCastToPtr:
2475 case SpvOpBitcast:
2476 case SpvOpIsNan:
2477 case SpvOpIsInf:
2478 case SpvOpIsFinite:
2479 case SpvOpIsNormal:
2480 case SpvOpSignBitSet:
2481 case SpvOpLessOrGreater:
2482 case SpvOpOrdered:
2483 case SpvOpUnordered:
2484 case SpvOpIAdd:
2485 case SpvOpFAdd:
2486 case SpvOpISub:
2487 case SpvOpFSub:
2488 case SpvOpIMul:
2489 case SpvOpFMul:
2490 case SpvOpUDiv:
2491 case SpvOpSDiv:
2492 case SpvOpFDiv:
2493 case SpvOpUMod:
2494 case SpvOpSRem:
2495 case SpvOpSMod:
2496 case SpvOpFRem:
2497 case SpvOpFMod:
2498 case SpvOpVectorTimesScalar:
2499 case SpvOpDot:
2500 case SpvOpIAddCarry:
2501 case SpvOpISubBorrow:
2502 case SpvOpUMulExtended:
2503 case SpvOpSMulExtended:
2504 case SpvOpShiftRightLogical:
2505 case SpvOpShiftRightArithmetic:
2506 case SpvOpShiftLeftLogical:
2507 case SpvOpLogicalEqual:
2508 case SpvOpLogicalNotEqual:
2509 case SpvOpLogicalOr:
2510 case SpvOpLogicalAnd:
2511 case SpvOpLogicalNot:
2512 case SpvOpBitwiseOr:
2513 case SpvOpBitwiseXor:
2514 case SpvOpBitwiseAnd:
2515 case SpvOpSelect:
2516 case SpvOpIEqual:
2517 case SpvOpFOrdEqual:
2518 case SpvOpFUnordEqual:
2519 case SpvOpINotEqual:
2520 case SpvOpFOrdNotEqual:
2521 case SpvOpFUnordNotEqual:
2522 case SpvOpULessThan:
2523 case SpvOpSLessThan:
2524 case SpvOpFOrdLessThan:
2525 case SpvOpFUnordLessThan:
2526 case SpvOpUGreaterThan:
2527 case SpvOpSGreaterThan:
2528 case SpvOpFOrdGreaterThan:
2529 case SpvOpFUnordGreaterThan:
2530 case SpvOpULessThanEqual:
2531 case SpvOpSLessThanEqual:
2532 case SpvOpFOrdLessThanEqual:
2533 case SpvOpFUnordLessThanEqual:
2534 case SpvOpUGreaterThanEqual:
2535 case SpvOpSGreaterThanEqual:
2536 case SpvOpFOrdGreaterThanEqual:
2537 case SpvOpFUnordGreaterThanEqual:
2538 case SpvOpDPdx:
2539 case SpvOpDPdy:
2540 case SpvOpFwidth:
2541 case SpvOpDPdxFine:
2542 case SpvOpDPdyFine:
2543 case SpvOpFwidthFine:
2544 case SpvOpDPdxCoarse:
2545 case SpvOpDPdyCoarse:
2546 case SpvOpFwidthCoarse:
2547 case SpvOpBitFieldInsert:
2548 case SpvOpBitFieldSExtract:
2549 case SpvOpBitFieldUExtract:
2550 case SpvOpBitReverse:
2551 case SpvOpBitCount:
2552 case SpvOpTranspose:
2553 case SpvOpOuterProduct:
2554 case SpvOpMatrixTimesScalar:
2555 case SpvOpVectorTimesMatrix:
2556 case SpvOpMatrixTimesVector:
2557 case SpvOpMatrixTimesMatrix:
2558 vtn_handle_alu(b, opcode, w, count);
2559 break;
2560
2561 case SpvOpVectorExtractDynamic:
2562 case SpvOpVectorInsertDynamic:
2563 case SpvOpVectorShuffle:
2564 case SpvOpCompositeConstruct:
2565 case SpvOpCompositeExtract:
2566 case SpvOpCompositeInsert:
2567 case SpvOpCopyObject:
2568 vtn_handle_composite(b, opcode, w, count);
2569 break;
2570
2571 case SpvOpEmitVertex:
2572 case SpvOpEndPrimitive:
2573 case SpvOpEmitStreamVertex:
2574 case SpvOpEndStreamPrimitive:
2575 case SpvOpControlBarrier:
2576 case SpvOpMemoryBarrier:
2577 vtn_handle_barrier(b, opcode, w, count);
2578 break;
2579
2580 default:
2581 unreachable("Unhandled opcode");
2582 }
2583
2584 return true;
2585 }
2586
2587 nir_function *
2588 spirv_to_nir(const uint32_t *words, size_t word_count,
2589 struct nir_spirv_specialization *spec, unsigned num_spec,
2590 gl_shader_stage stage, const char *entry_point_name,
2591 const nir_shader_compiler_options *options)
2592 {
2593 const uint32_t *word_end = words + word_count;
2594
2595 /* Handle the SPIR-V header (first 4 dwords) */
2596 assert(word_count > 5);
2597
2598 assert(words[0] == SpvMagicNumber);
2599 assert(words[1] >= 0x10000);
2600 /* words[2] == generator magic */
2601 unsigned value_id_bound = words[3];
2602 assert(words[4] == 0);
2603
2604 words+= 5;
2605
2606 /* Initialize the stn_builder object */
2607 struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
2608 b->value_id_bound = value_id_bound;
2609 b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
2610 exec_list_make_empty(&b->functions);
2611 b->entry_point_stage = stage;
2612 b->entry_point_name = entry_point_name;
2613
2614 /* Handle all the preamble instructions */
2615 words = vtn_foreach_instruction(b, words, word_end,
2616 vtn_handle_preamble_instruction);
2617
2618 if (b->entry_point == NULL) {
2619 assert(!"Entry point not found");
2620 ralloc_free(b);
2621 return NULL;
2622 }
2623
2624 b->shader = nir_shader_create(NULL, stage, options);
2625
2626 /* Parse execution modes */
2627 vtn_foreach_execution_mode(b, b->entry_point,
2628 vtn_handle_execution_mode, NULL);
2629
2630 b->specializations = spec;
2631 b->num_specializations = num_spec;
2632
2633 /* Handle all variable, type, and constant instructions */
2634 words = vtn_foreach_instruction(b, words, word_end,
2635 vtn_handle_variable_or_type_instruction);
2636
2637 vtn_build_cfg(b, words, word_end);
2638
2639 foreach_list_typed(struct vtn_function, func, node, &b->functions) {
2640 b->impl = func->impl;
2641 b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
2642 _mesa_key_pointer_equal);
2643
2644 vtn_function_emit(b, func, vtn_handle_body_instruction);
2645 }
2646
2647 assert(b->entry_point->value_type == vtn_value_type_function);
2648 nir_function *entry_point = b->entry_point->func->impl->function;
2649 assert(entry_point);
2650
2651 ralloc_free(b);
2652
2653 return entry_point;
2654 }