zink: adjust zink_shader struct to contain full streamout info
[mesa.git] / src / gallium / drivers / zink / nir_to_spirv / nir_to_spirv.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "nir_to_spirv.h"
25 #include "spirv_builder.h"
26
27 #include "nir.h"
28 #include "pipe/p_state.h"
29 #include "util/u_memory.h"
30 #include "util/hash_table.h"
31
32 /* this consistently maps slots to a zero-indexed value to avoid wasting slots */
33 static unsigned slot_pack_map[] = {
34 /* Position is builtin */
35 [VARYING_SLOT_POS] = UINT_MAX,
36 [VARYING_SLOT_COL0] = 0, /* input/output */
37 [VARYING_SLOT_COL1] = 1, /* input/output */
38 [VARYING_SLOT_FOGC] = 2, /* input/output */
39 /* TEX0-7 are deprecated, so we put them at the end of the range and hope nobody uses them all */
40 [VARYING_SLOT_TEX0] = VARYING_SLOT_VAR0 - 1, /* input/output */
41 [VARYING_SLOT_TEX1] = VARYING_SLOT_VAR0 - 2,
42 [VARYING_SLOT_TEX2] = VARYING_SLOT_VAR0 - 3,
43 [VARYING_SLOT_TEX3] = VARYING_SLOT_VAR0 - 4,
44 [VARYING_SLOT_TEX4] = VARYING_SLOT_VAR0 - 5,
45 [VARYING_SLOT_TEX5] = VARYING_SLOT_VAR0 - 6,
46 [VARYING_SLOT_TEX6] = VARYING_SLOT_VAR0 - 7,
47 [VARYING_SLOT_TEX7] = VARYING_SLOT_VAR0 - 8,
48
49 /* PointSize is builtin */
50 [VARYING_SLOT_PSIZ] = UINT_MAX,
51
52 [VARYING_SLOT_BFC0] = 3, /* output only */
53 [VARYING_SLOT_BFC1] = 4, /* output only */
54 [VARYING_SLOT_EDGE] = 5, /* output only */
55 [VARYING_SLOT_CLIP_VERTEX] = 6, /* output only */
56
57 /* ClipDistance is builtin */
58 [VARYING_SLOT_CLIP_DIST0] = UINT_MAX,
59 [VARYING_SLOT_CLIP_DIST1] = UINT_MAX,
60
61 /* CullDistance is builtin */
62 [VARYING_SLOT_CULL_DIST0] = UINT_MAX, /* input/output */
63 [VARYING_SLOT_CULL_DIST1] = UINT_MAX, /* never actually used */
64
65 /* PrimitiveId is builtin */
66 [VARYING_SLOT_PRIMITIVE_ID] = UINT_MAX,
67
68 /* Layer is builtin */
69 [VARYING_SLOT_LAYER] = UINT_MAX, /* input/output */
70
71 /* ViewportIndex is builtin */
72 [VARYING_SLOT_VIEWPORT] = UINT_MAX, /* input/output */
73
74 /* FrontFacing is builtin */
75 [VARYING_SLOT_FACE] = UINT_MAX,
76
77 /* PointCoord is builtin */
78 [VARYING_SLOT_PNTC] = UINT_MAX, /* input only */
79
80 /* TessLevelOuter is builtin */
81 [VARYING_SLOT_TESS_LEVEL_OUTER] = UINT_MAX,
82 /* TessLevelInner is builtin */
83 [VARYING_SLOT_TESS_LEVEL_INNER] = UINT_MAX,
84
85 [VARYING_SLOT_BOUNDING_BOX0] = 7, /* Only appears as TCS output. */
86 [VARYING_SLOT_BOUNDING_BOX1] = 8, /* Only appears as TCS output. */
87 [VARYING_SLOT_VIEW_INDEX] = 9, /* input/output */
88 [VARYING_SLOT_VIEWPORT_MASK] = 10, /* output only */
89 };
90 #define NTV_MIN_RESERVED_SLOTS 11
91
92 struct ntv_context {
93 void *mem_ctx;
94
95 struct spirv_builder builder;
96
97 SpvId GLSL_std_450;
98
99 gl_shader_stage stage;
100
101 SpvId ubos[128];
102 size_t num_ubos;
103 SpvId image_types[PIPE_MAX_SAMPLERS];
104 SpvId samplers[PIPE_MAX_SAMPLERS];
105 unsigned samplers_used : PIPE_MAX_SAMPLERS;
106 SpvId entry_ifaces[PIPE_MAX_SHADER_INPUTS * 4 + PIPE_MAX_SHADER_OUTPUTS * 4];
107 size_t num_entry_ifaces;
108
109 SpvId *defs;
110 size_t num_defs;
111
112 SpvId *regs;
113 size_t num_regs;
114
115 struct hash_table *vars; /* nir_variable -> SpvId */
116 struct hash_table *so_outputs; /* pipe_stream_output -> SpvId */
117 unsigned outputs[VARYING_SLOT_MAX];
118 const struct glsl_type *so_output_gl_types[VARYING_SLOT_MAX];
119 SpvId so_output_types[VARYING_SLOT_MAX];
120
121 const SpvId *block_ids;
122 size_t num_blocks;
123 bool block_started;
124 SpvId loop_break, loop_cont;
125
126 SpvId front_face_var, instance_id_var, vertex_id_var;
127 #ifndef NDEBUG
128 bool seen_texcoord[8]; //whether we've seen a VARYING_SLOT_TEX[n] this pass
129 #endif
130 };
131
132 static SpvId
133 get_fvec_constant(struct ntv_context *ctx, unsigned bit_size,
134 unsigned num_components, float value);
135
136 static SpvId
137 get_uvec_constant(struct ntv_context *ctx, unsigned bit_size,
138 unsigned num_components, uint32_t value);
139
140 static SpvId
141 get_ivec_constant(struct ntv_context *ctx, unsigned bit_size,
142 unsigned num_components, int32_t value);
143
144 static SpvId
145 emit_unop(struct ntv_context *ctx, SpvOp op, SpvId type, SpvId src);
146
147 static SpvId
148 emit_binop(struct ntv_context *ctx, SpvOp op, SpvId type,
149 SpvId src0, SpvId src1);
150
151 static SpvId
152 emit_triop(struct ntv_context *ctx, SpvOp op, SpvId type,
153 SpvId src0, SpvId src1, SpvId src2);
154
155 static SpvId
156 get_bvec_type(struct ntv_context *ctx, int num_components)
157 {
158 SpvId bool_type = spirv_builder_type_bool(&ctx->builder);
159 if (num_components > 1)
160 return spirv_builder_type_vector(&ctx->builder, bool_type,
161 num_components);
162
163 assert(num_components == 1);
164 return bool_type;
165 }
166
167 static SpvId
168 block_label(struct ntv_context *ctx, nir_block *block)
169 {
170 assert(block->index < ctx->num_blocks);
171 return ctx->block_ids[block->index];
172 }
173
174 static SpvId
175 emit_float_const(struct ntv_context *ctx, int bit_size, float value)
176 {
177 assert(bit_size == 32);
178 return spirv_builder_const_float(&ctx->builder, bit_size, value);
179 }
180
181 static SpvId
182 emit_uint_const(struct ntv_context *ctx, int bit_size, uint32_t value)
183 {
184 assert(bit_size == 32);
185 return spirv_builder_const_uint(&ctx->builder, bit_size, value);
186 }
187
188 static SpvId
189 emit_int_const(struct ntv_context *ctx, int bit_size, int32_t value)
190 {
191 assert(bit_size == 32);
192 return spirv_builder_const_int(&ctx->builder, bit_size, value);
193 }
194
195 static SpvId
196 get_fvec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
197 {
198 assert(bit_size == 32); // only 32-bit floats supported so far
199
200 SpvId float_type = spirv_builder_type_float(&ctx->builder, bit_size);
201 if (num_components > 1)
202 return spirv_builder_type_vector(&ctx->builder, float_type,
203 num_components);
204
205 assert(num_components == 1);
206 return float_type;
207 }
208
209 static SpvId
210 get_ivec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
211 {
212 assert(bit_size == 32); // only 32-bit ints supported so far
213
214 SpvId int_type = spirv_builder_type_int(&ctx->builder, bit_size);
215 if (num_components > 1)
216 return spirv_builder_type_vector(&ctx->builder, int_type,
217 num_components);
218
219 assert(num_components == 1);
220 return int_type;
221 }
222
223 static SpvId
224 get_uvec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
225 {
226 assert(bit_size == 32); // only 32-bit uints supported so far
227
228 SpvId uint_type = spirv_builder_type_uint(&ctx->builder, bit_size);
229 if (num_components > 1)
230 return spirv_builder_type_vector(&ctx->builder, uint_type,
231 num_components);
232
233 assert(num_components == 1);
234 return uint_type;
235 }
236
237 static SpvId
238 get_dest_uvec_type(struct ntv_context *ctx, nir_dest *dest)
239 {
240 unsigned bit_size = MAX2(nir_dest_bit_size(*dest), 32);
241 return get_uvec_type(ctx, bit_size, nir_dest_num_components(*dest));
242 }
243
244 static SpvId
245 get_glsl_basetype(struct ntv_context *ctx, enum glsl_base_type type)
246 {
247 switch (type) {
248 case GLSL_TYPE_BOOL:
249 return spirv_builder_type_bool(&ctx->builder);
250
251 case GLSL_TYPE_FLOAT:
252 return spirv_builder_type_float(&ctx->builder, 32);
253
254 case GLSL_TYPE_INT:
255 return spirv_builder_type_int(&ctx->builder, 32);
256
257 case GLSL_TYPE_UINT:
258 return spirv_builder_type_uint(&ctx->builder, 32);
259 /* TODO: handle more types */
260
261 default:
262 unreachable("unknown GLSL type");
263 }
264 }
265
266 static SpvId
267 get_glsl_type(struct ntv_context *ctx, const struct glsl_type *type)
268 {
269 assert(type);
270 if (glsl_type_is_scalar(type))
271 return get_glsl_basetype(ctx, glsl_get_base_type(type));
272
273 if (glsl_type_is_vector(type))
274 return spirv_builder_type_vector(&ctx->builder,
275 get_glsl_basetype(ctx, glsl_get_base_type(type)),
276 glsl_get_vector_elements(type));
277
278 if (glsl_type_is_array(type)) {
279 SpvId ret = spirv_builder_type_array(&ctx->builder,
280 get_glsl_type(ctx, glsl_get_array_element(type)),
281 emit_uint_const(ctx, 32, glsl_get_length(type)));
282 uint32_t stride = glsl_get_explicit_stride(type);
283 if (stride)
284 spirv_builder_emit_array_stride(&ctx->builder, ret, stride);
285 return ret;
286 }
287
288
289 unreachable("we shouldn't get here, I think...");
290 }
291
292 static inline unsigned
293 handle_slot(struct ntv_context *ctx, unsigned slot)
294 {
295 unsigned orig = slot;
296 if (slot < VARYING_SLOT_VAR0) {
297 #ifndef NDEBUG
298 if (slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7)
299 ctx->seen_texcoord[slot - VARYING_SLOT_TEX0] = true;
300 #endif
301 slot = slot_pack_map[slot];
302 if (slot == UINT_MAX)
303 debug_printf("unhandled varying slot: %s\n", gl_varying_slot_name(orig));
304 } else {
305 slot -= VARYING_SLOT_VAR0 - NTV_MIN_RESERVED_SLOTS;
306 assert(slot <= VARYING_SLOT_VAR0 - 8 ||
307 !ctx->seen_texcoord[VARYING_SLOT_VAR0 - slot - 1]);
308
309 }
310 assert(slot < VARYING_SLOT_VAR0);
311 return slot;
312 }
313
314 #define HANDLE_EMIT_BUILTIN(SLOT, BUILTIN) \
315 case VARYING_SLOT_##SLOT: \
316 spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltIn##BUILTIN); \
317 break
318
319
320 static void
321 emit_input(struct ntv_context *ctx, struct nir_variable *var)
322 {
323 SpvId var_type = get_glsl_type(ctx, var->type);
324 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
325 SpvStorageClassInput,
326 var_type);
327 SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
328 SpvStorageClassInput);
329
330 if (var->name)
331 spirv_builder_emit_name(&ctx->builder, var_id, var->name);
332
333 if (ctx->stage == MESA_SHADER_FRAGMENT) {
334 unsigned slot = var->data.location;
335 switch (slot) {
336 HANDLE_EMIT_BUILTIN(POS, FragCoord);
337 HANDLE_EMIT_BUILTIN(PNTC, PointCoord);
338 HANDLE_EMIT_BUILTIN(LAYER, Layer);
339 HANDLE_EMIT_BUILTIN(PRIMITIVE_ID, PrimitiveId);
340 HANDLE_EMIT_BUILTIN(CLIP_DIST0, ClipDistance);
341 HANDLE_EMIT_BUILTIN(CULL_DIST0, CullDistance);
342 HANDLE_EMIT_BUILTIN(VIEWPORT, ViewportIndex);
343 HANDLE_EMIT_BUILTIN(FACE, FrontFacing);
344
345 default:
346 slot = handle_slot(ctx, slot);
347 spirv_builder_emit_location(&ctx->builder, var_id, slot);
348 }
349 } else {
350 spirv_builder_emit_location(&ctx->builder, var_id,
351 var->data.driver_location);
352 }
353
354 if (var->data.location_frac)
355 spirv_builder_emit_component(&ctx->builder, var_id,
356 var->data.location_frac);
357
358 if (var->data.interpolation == INTERP_MODE_FLAT)
359 spirv_builder_emit_decoration(&ctx->builder, var_id, SpvDecorationFlat);
360
361 _mesa_hash_table_insert(ctx->vars, var, (void *)(intptr_t)var_id);
362
363 assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
364 ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
365 }
366
367 static void
368 emit_output(struct ntv_context *ctx, struct nir_variable *var)
369 {
370 SpvId var_type = get_glsl_type(ctx, var->type);
371 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
372 SpvStorageClassOutput,
373 var_type);
374 SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
375 SpvStorageClassOutput);
376 if (var->name)
377 spirv_builder_emit_name(&ctx->builder, var_id, var->name);
378
379
380 if (ctx->stage == MESA_SHADER_VERTEX) {
381 unsigned slot = var->data.location;
382 switch (slot) {
383 HANDLE_EMIT_BUILTIN(POS, Position);
384 HANDLE_EMIT_BUILTIN(PSIZ, PointSize);
385 HANDLE_EMIT_BUILTIN(LAYER, Layer);
386 HANDLE_EMIT_BUILTIN(PRIMITIVE_ID, PrimitiveId);
387 HANDLE_EMIT_BUILTIN(CULL_DIST0, CullDistance);
388 HANDLE_EMIT_BUILTIN(VIEWPORT, ViewportIndex);
389 HANDLE_EMIT_BUILTIN(TESS_LEVEL_OUTER, TessLevelOuter);
390 HANDLE_EMIT_BUILTIN(TESS_LEVEL_INNER, TessLevelInner);
391
392 case VARYING_SLOT_CLIP_DIST0:
393 assert(glsl_type_is_array(var->type));
394 spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInClipDistance);
395 /* this can be as large as 2x vec4, which requires 2 slots */
396 ctx->outputs[VARYING_SLOT_CLIP_DIST1] = var_id;
397 ctx->so_output_gl_types[VARYING_SLOT_CLIP_DIST1] = var->type;
398 ctx->so_output_types[VARYING_SLOT_CLIP_DIST1] = var_type;
399 break;
400
401 default:
402 slot = handle_slot(ctx, slot);
403 spirv_builder_emit_location(&ctx->builder, var_id, slot);
404 }
405 ctx->outputs[var->data.location] = var_id;
406 ctx->so_output_gl_types[var->data.location] = var->type;
407 ctx->so_output_types[var->data.location] = var_type;
408 } else if (ctx->stage == MESA_SHADER_FRAGMENT) {
409 if (var->data.location >= FRAG_RESULT_DATA0) {
410 spirv_builder_emit_location(&ctx->builder, var_id,
411 var->data.location - FRAG_RESULT_DATA0);
412 spirv_builder_emit_index(&ctx->builder, var_id, var->data.index);
413 } else {
414 switch (var->data.location) {
415 case FRAG_RESULT_COLOR:
416 unreachable("gl_FragColor should be lowered by now");
417
418 case FRAG_RESULT_DEPTH:
419 spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInFragDepth);
420 break;
421
422 default:
423 spirv_builder_emit_location(&ctx->builder, var_id,
424 var->data.driver_location);
425 spirv_builder_emit_index(&ctx->builder, var_id, var->data.index);
426 }
427 }
428 }
429
430 if (var->data.location_frac)
431 spirv_builder_emit_component(&ctx->builder, var_id,
432 var->data.location_frac);
433
434 switch (var->data.interpolation) {
435 case INTERP_MODE_NONE:
436 case INTERP_MODE_SMOOTH: /* XXX spirv doesn't seem to have anything for this */
437 break;
438 case INTERP_MODE_FLAT:
439 spirv_builder_emit_decoration(&ctx->builder, var_id, SpvDecorationFlat);
440 break;
441 case INTERP_MODE_EXPLICIT:
442 spirv_builder_emit_decoration(&ctx->builder, var_id, SpvDecorationExplicitInterpAMD);
443 break;
444 case INTERP_MODE_NOPERSPECTIVE:
445 spirv_builder_emit_decoration(&ctx->builder, var_id, SpvDecorationNoPerspective);
446 break;
447 default:
448 unreachable("unknown interpolation value");
449 }
450
451 _mesa_hash_table_insert(ctx->vars, var, (void *)(intptr_t)var_id);
452
453 assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
454 ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
455 }
456
457 static SpvDim
458 type_to_dim(enum glsl_sampler_dim gdim, bool *is_ms)
459 {
460 *is_ms = false;
461 switch (gdim) {
462 case GLSL_SAMPLER_DIM_1D:
463 return SpvDim1D;
464 case GLSL_SAMPLER_DIM_2D:
465 return SpvDim2D;
466 case GLSL_SAMPLER_DIM_3D:
467 return SpvDim3D;
468 case GLSL_SAMPLER_DIM_CUBE:
469 return SpvDimCube;
470 case GLSL_SAMPLER_DIM_RECT:
471 return SpvDim2D;
472 case GLSL_SAMPLER_DIM_BUF:
473 return SpvDimBuffer;
474 case GLSL_SAMPLER_DIM_EXTERNAL:
475 return SpvDim2D; /* seems dodgy... */
476 case GLSL_SAMPLER_DIM_MS:
477 *is_ms = true;
478 return SpvDim2D;
479 default:
480 fprintf(stderr, "unknown sampler type %d\n", gdim);
481 break;
482 }
483 return SpvDim2D;
484 }
485
486 uint32_t
487 zink_binding(gl_shader_stage stage, VkDescriptorType type, int index)
488 {
489 if (stage == MESA_SHADER_NONE ||
490 stage >= MESA_SHADER_COMPUTE) {
491 unreachable("not supported");
492 } else {
493 uint32_t stage_offset = (uint32_t)stage * (PIPE_MAX_CONSTANT_BUFFERS +
494 PIPE_MAX_SHADER_SAMPLER_VIEWS);
495
496 switch (type) {
497 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
498 assert(index < PIPE_MAX_CONSTANT_BUFFERS);
499 return stage_offset + index;
500
501 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
502 assert(index < PIPE_MAX_SHADER_SAMPLER_VIEWS);
503 return stage_offset + PIPE_MAX_CONSTANT_BUFFERS + index;
504
505 default:
506 unreachable("unexpected type");
507 }
508 }
509 }
510
511 static void
512 emit_sampler(struct ntv_context *ctx, struct nir_variable *var)
513 {
514 const struct glsl_type *type = glsl_without_array(var->type);
515
516 bool is_ms;
517 SpvDim dimension = type_to_dim(glsl_get_sampler_dim(type), &is_ms);
518
519 SpvId result_type = get_glsl_basetype(ctx, glsl_get_sampler_result_type(type));
520 SpvId image_type = spirv_builder_type_image(&ctx->builder, result_type,
521 dimension, false,
522 glsl_sampler_type_is_array(type),
523 is_ms, 1,
524 SpvImageFormatUnknown);
525
526 SpvId sampled_type = spirv_builder_type_sampled_image(&ctx->builder,
527 image_type);
528 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
529 SpvStorageClassUniformConstant,
530 sampled_type);
531
532 if (glsl_type_is_array(var->type)) {
533 for (int i = 0; i < glsl_get_length(var->type); ++i) {
534 SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
535 SpvStorageClassUniformConstant);
536
537 if (var->name) {
538 char element_name[100];
539 snprintf(element_name, sizeof(element_name), "%s_%d", var->name, i);
540 spirv_builder_emit_name(&ctx->builder, var_id, var->name);
541 }
542
543 int index = var->data.binding + i;
544 assert(!(ctx->samplers_used & (1 << index)));
545 assert(!ctx->image_types[index]);
546 ctx->image_types[index] = image_type;
547 ctx->samplers[index] = var_id;
548 ctx->samplers_used |= 1 << index;
549
550 spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
551 var->data.descriptor_set);
552 int binding = zink_binding(ctx->stage,
553 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
554 var->data.binding + i);
555 spirv_builder_emit_binding(&ctx->builder, var_id, binding);
556 }
557 } else {
558 SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
559 SpvStorageClassUniformConstant);
560
561 if (var->name)
562 spirv_builder_emit_name(&ctx->builder, var_id, var->name);
563
564 int index = var->data.binding;
565 assert(!(ctx->samplers_used & (1 << index)));
566 assert(!ctx->image_types[index]);
567 ctx->image_types[index] = image_type;
568 ctx->samplers[index] = var_id;
569 ctx->samplers_used |= 1 << index;
570
571 spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
572 var->data.descriptor_set);
573 int binding = zink_binding(ctx->stage,
574 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
575 var->data.binding);
576 spirv_builder_emit_binding(&ctx->builder, var_id, binding);
577 }
578 }
579
580 static void
581 emit_ubo(struct ntv_context *ctx, struct nir_variable *var)
582 {
583 uint32_t size = glsl_count_attribute_slots(var->type, false);
584 SpvId vec4_type = get_uvec_type(ctx, 32, 4);
585 SpvId array_length = emit_uint_const(ctx, 32, size);
586 SpvId array_type = spirv_builder_type_array(&ctx->builder, vec4_type,
587 array_length);
588 spirv_builder_emit_array_stride(&ctx->builder, array_type, 16);
589
590 // wrap UBO-array in a struct
591 SpvId struct_type = spirv_builder_type_struct(&ctx->builder, &array_type, 1);
592 if (var->name) {
593 char struct_name[100];
594 snprintf(struct_name, sizeof(struct_name), "struct_%s", var->name);
595 spirv_builder_emit_name(&ctx->builder, struct_type, struct_name);
596 }
597
598 spirv_builder_emit_decoration(&ctx->builder, struct_type,
599 SpvDecorationBlock);
600 spirv_builder_emit_member_offset(&ctx->builder, struct_type, 0, 0);
601
602
603 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
604 SpvStorageClassUniform,
605 struct_type);
606
607 SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
608 SpvStorageClassUniform);
609 if (var->name)
610 spirv_builder_emit_name(&ctx->builder, var_id, var->name);
611
612 assert(ctx->num_ubos < ARRAY_SIZE(ctx->ubos));
613 ctx->ubos[ctx->num_ubos++] = var_id;
614
615 spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
616 var->data.descriptor_set);
617 int binding = zink_binding(ctx->stage,
618 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
619 var->data.binding);
620 spirv_builder_emit_binding(&ctx->builder, var_id, binding);
621 }
622
623 static void
624 emit_uniform(struct ntv_context *ctx, struct nir_variable *var)
625 {
626 if (var->data.mode == nir_var_mem_ubo)
627 emit_ubo(ctx, var);
628 else {
629 assert(var->data.mode == nir_var_uniform);
630 if (glsl_type_is_sampler(glsl_without_array(var->type)))
631 emit_sampler(ctx, var);
632 }
633 }
634
635 static SpvId
636 get_vec_from_bit_size(struct ntv_context *ctx, uint32_t bit_size, uint32_t num_components)
637 {
638 if (bit_size == 1)
639 return get_bvec_type(ctx, num_components);
640 if (bit_size == 32)
641 return get_uvec_type(ctx, bit_size, num_components);
642 unreachable("unhandled register bit size");
643 return 0;
644 }
645
646 static SpvId
647 get_src_ssa(struct ntv_context *ctx, const nir_ssa_def *ssa)
648 {
649 assert(ssa->index < ctx->num_defs);
650 assert(ctx->defs[ssa->index] != 0);
651 return ctx->defs[ssa->index];
652 }
653
654 static SpvId
655 get_var_from_reg(struct ntv_context *ctx, nir_register *reg)
656 {
657 assert(reg->index < ctx->num_regs);
658 assert(ctx->regs[reg->index] != 0);
659 return ctx->regs[reg->index];
660 }
661
662 static SpvId
663 get_src_reg(struct ntv_context *ctx, const nir_reg_src *reg)
664 {
665 assert(reg->reg);
666 assert(!reg->indirect);
667 assert(!reg->base_offset);
668
669 SpvId var = get_var_from_reg(ctx, reg->reg);
670 SpvId type = get_vec_from_bit_size(ctx, reg->reg->bit_size, reg->reg->num_components);
671 return spirv_builder_emit_load(&ctx->builder, type, var);
672 }
673
674 static SpvId
675 get_src(struct ntv_context *ctx, nir_src *src)
676 {
677 if (src->is_ssa)
678 return get_src_ssa(ctx, src->ssa);
679 else
680 return get_src_reg(ctx, &src->reg);
681 }
682
683 static SpvId
684 get_alu_src_raw(struct ntv_context *ctx, nir_alu_instr *alu, unsigned src)
685 {
686 assert(!alu->src[src].negate);
687 assert(!alu->src[src].abs);
688
689 SpvId def = get_src(ctx, &alu->src[src].src);
690
691 unsigned used_channels = 0;
692 bool need_swizzle = false;
693 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
694 if (!nir_alu_instr_channel_used(alu, src, i))
695 continue;
696
697 used_channels++;
698
699 if (alu->src[src].swizzle[i] != i)
700 need_swizzle = true;
701 }
702 assert(used_channels != 0);
703
704 unsigned live_channels = nir_src_num_components(alu->src[src].src);
705 if (used_channels != live_channels)
706 need_swizzle = true;
707
708 if (!need_swizzle)
709 return def;
710
711 int bit_size = nir_src_bit_size(alu->src[src].src);
712 assert(bit_size == 1 || bit_size == 32);
713
714 SpvId raw_type = bit_size == 1 ? spirv_builder_type_bool(&ctx->builder) :
715 spirv_builder_type_uint(&ctx->builder, bit_size);
716
717 if (used_channels == 1) {
718 uint32_t indices[] = { alu->src[src].swizzle[0] };
719 return spirv_builder_emit_composite_extract(&ctx->builder, raw_type,
720 def, indices,
721 ARRAY_SIZE(indices));
722 } else if (live_channels == 1) {
723 SpvId raw_vec_type = spirv_builder_type_vector(&ctx->builder,
724 raw_type,
725 used_channels);
726
727 SpvId constituents[NIR_MAX_VEC_COMPONENTS] = {0};
728 for (unsigned i = 0; i < used_channels; ++i)
729 constituents[i] = def;
730
731 return spirv_builder_emit_composite_construct(&ctx->builder,
732 raw_vec_type,
733 constituents,
734 used_channels);
735 } else {
736 SpvId raw_vec_type = spirv_builder_type_vector(&ctx->builder,
737 raw_type,
738 used_channels);
739
740 uint32_t components[NIR_MAX_VEC_COMPONENTS] = {0};
741 size_t num_components = 0;
742 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
743 if (!nir_alu_instr_channel_used(alu, src, i))
744 continue;
745
746 components[num_components++] = alu->src[src].swizzle[i];
747 }
748
749 return spirv_builder_emit_vector_shuffle(&ctx->builder, raw_vec_type,
750 def, def, components,
751 num_components);
752 }
753 }
754
755 static void
756 store_ssa_def(struct ntv_context *ctx, nir_ssa_def *ssa, SpvId result)
757 {
758 assert(result != 0);
759 assert(ssa->index < ctx->num_defs);
760 ctx->defs[ssa->index] = result;
761 }
762
763 static SpvId
764 emit_select(struct ntv_context *ctx, SpvId type, SpvId cond,
765 SpvId if_true, SpvId if_false)
766 {
767 return emit_triop(ctx, SpvOpSelect, type, cond, if_true, if_false);
768 }
769
770 static SpvId
771 uvec_to_bvec(struct ntv_context *ctx, SpvId value, unsigned num_components)
772 {
773 SpvId type = get_bvec_type(ctx, num_components);
774 SpvId zero = get_uvec_constant(ctx, 32, num_components, 0);
775 return emit_binop(ctx, SpvOpINotEqual, type, value, zero);
776 }
777
778 static SpvId
779 emit_bitcast(struct ntv_context *ctx, SpvId type, SpvId value)
780 {
781 return emit_unop(ctx, SpvOpBitcast, type, value);
782 }
783
784 static SpvId
785 bitcast_to_uvec(struct ntv_context *ctx, SpvId value, unsigned bit_size,
786 unsigned num_components)
787 {
788 SpvId type = get_uvec_type(ctx, bit_size, num_components);
789 return emit_bitcast(ctx, type, value);
790 }
791
792 static SpvId
793 bitcast_to_ivec(struct ntv_context *ctx, SpvId value, unsigned bit_size,
794 unsigned num_components)
795 {
796 SpvId type = get_ivec_type(ctx, bit_size, num_components);
797 return emit_bitcast(ctx, type, value);
798 }
799
800 static SpvId
801 bitcast_to_fvec(struct ntv_context *ctx, SpvId value, unsigned bit_size,
802 unsigned num_components)
803 {
804 SpvId type = get_fvec_type(ctx, bit_size, num_components);
805 return emit_bitcast(ctx, type, value);
806 }
807
808 static void
809 store_reg_def(struct ntv_context *ctx, nir_reg_dest *reg, SpvId result)
810 {
811 SpvId var = get_var_from_reg(ctx, reg->reg);
812 assert(var);
813 spirv_builder_emit_store(&ctx->builder, var, result);
814 }
815
816 static void
817 store_dest_raw(struct ntv_context *ctx, nir_dest *dest, SpvId result)
818 {
819 if (dest->is_ssa)
820 store_ssa_def(ctx, &dest->ssa, result);
821 else
822 store_reg_def(ctx, &dest->reg, result);
823 }
824
825 static SpvId
826 store_dest(struct ntv_context *ctx, nir_dest *dest, SpvId result, nir_alu_type type)
827 {
828 unsigned num_components = nir_dest_num_components(*dest);
829 unsigned bit_size = nir_dest_bit_size(*dest);
830
831 if (bit_size != 1) {
832 switch (nir_alu_type_get_base_type(type)) {
833 case nir_type_bool:
834 assert("bool should have bit-size 1");
835
836 case nir_type_uint:
837 break; /* nothing to do! */
838
839 case nir_type_int:
840 case nir_type_float:
841 result = bitcast_to_uvec(ctx, result, bit_size, num_components);
842 break;
843
844 default:
845 unreachable("unsupported nir_alu_type");
846 }
847 }
848
849 store_dest_raw(ctx, dest, result);
850 return result;
851 }
852
853 static SpvId
854 emit_unop(struct ntv_context *ctx, SpvOp op, SpvId type, SpvId src)
855 {
856 return spirv_builder_emit_unop(&ctx->builder, op, type, src);
857 }
858
859 /* return the intended xfb output vec type based on base type and vector size */
860 static SpvId
861 get_output_type(struct ntv_context *ctx, unsigned register_index, unsigned num_components)
862 {
863 const struct glsl_type *out_type = ctx->so_output_gl_types[register_index];
864 enum glsl_base_type base_type = glsl_get_base_type(out_type);
865 if (base_type == GLSL_TYPE_ARRAY)
866 base_type = glsl_get_base_type(glsl_without_array(out_type));
867
868 switch (base_type) {
869 case GLSL_TYPE_BOOL:
870 return get_bvec_type(ctx, num_components);
871
872 case GLSL_TYPE_FLOAT:
873 return get_fvec_type(ctx, 32, num_components);
874
875 case GLSL_TYPE_INT:
876 return get_ivec_type(ctx, 32, num_components);
877
878 case GLSL_TYPE_UINT:
879 return get_uvec_type(ctx, 32, num_components);
880
881 default:
882 break;
883 }
884 unreachable("unknown type");
885 return 0;
886 }
887
888 /* for streamout create new outputs, as streamout can be done on individual components,
889 from complete outputs, so we just can't use the created packed outputs */
890 static void
891 emit_so_info(struct ntv_context *ctx, unsigned max_output_location,
892 const struct zink_so_info *so_info)
893 {
894 for (unsigned i = 0; i < so_info->so_info.num_outputs; i++) {
895 struct pipe_stream_output so_output = so_info->so_info.output[i];
896 unsigned slot = so_info->so_info_slots[i];
897 SpvId out_type = get_output_type(ctx, slot, so_output.num_components);
898 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
899 SpvStorageClassOutput,
900 out_type);
901 SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
902 SpvStorageClassOutput);
903 char name[10];
904
905 snprintf(name, 10, "xfb%d", i);
906 spirv_builder_emit_name(&ctx->builder, var_id, name);
907 spirv_builder_emit_offset(&ctx->builder, var_id, (so_output.dst_offset * 4));
908 spirv_builder_emit_xfb_buffer(&ctx->builder, var_id, so_output.output_buffer);
909 spirv_builder_emit_xfb_stride(&ctx->builder, var_id, so_info->so_info.stride[so_output.output_buffer] * 4);
910
911 /* output location is incremented by VARYING_SLOT_VAR0 for non-builtins in vtn,
912 * so we need to ensure that the new xfb location slot doesn't conflict with any previously-emitted
913 * outputs.
914 *
915 * if there's no previous outputs that take up user slots (VAR0+) then we can start right after the
916 * glsl builtin reserved slots, otherwise we start just after the adjusted user output slot
917 */
918 uint32_t location = NTV_MIN_RESERVED_SLOTS + i;
919 if (max_output_location >= VARYING_SLOT_VAR0)
920 location = max_output_location - VARYING_SLOT_VAR0 + 1 + i;
921 assert(location < VARYING_SLOT_VAR0);
922 assert(location <= VARYING_SLOT_VAR0 - 8 ||
923 !ctx->seen_texcoord[VARYING_SLOT_VAR0 - location - 1]);
924 spirv_builder_emit_location(&ctx->builder, var_id, location);
925
926 /* note: gl_ClipDistance[4] can the 0-indexed member of VARYING_SLOT_CLIP_DIST1 here,
927 * so this is still the 0 component
928 */
929 if (so_output.start_component)
930 spirv_builder_emit_component(&ctx->builder, var_id, so_output.start_component);
931
932 uint32_t *key = ralloc_size(ctx->mem_ctx, sizeof(uint32_t));
933 *key = (uint32_t)so_output.register_index << 2 | so_output.start_component;
934 _mesa_hash_table_insert(ctx->so_outputs, key, (void *)(intptr_t)var_id);
935
936 assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
937 ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
938 }
939 }
940
941 static void
942 emit_so_outputs(struct ntv_context *ctx,
943 const struct zink_so_info *so_info)
944 {
945 SpvId loaded_outputs[VARYING_SLOT_MAX] = {};
946 for (unsigned i = 0; i < so_info->so_info.num_outputs; i++) {
947 uint32_t components[NIR_MAX_VEC_COMPONENTS];
948 unsigned slot = so_info->so_info_slots[i];
949 struct pipe_stream_output so_output = so_info->so_info.output[i];
950 uint32_t so_key = (uint32_t) so_output.register_index << 2 | so_output.start_component;
951 struct hash_entry *he = _mesa_hash_table_search(ctx->so_outputs, &so_key);
952 assert(he);
953 SpvId so_output_var_id = (SpvId)(intptr_t)he->data;
954
955 SpvId type = get_output_type(ctx, slot, so_output.num_components);
956 SpvId output = ctx->outputs[slot];
957 SpvId output_type = ctx->so_output_types[slot];
958 const struct glsl_type *out_type = ctx->so_output_gl_types[slot];
959
960 if (!loaded_outputs[slot])
961 loaded_outputs[slot] = spirv_builder_emit_load(&ctx->builder, output_type, output);
962 SpvId src = loaded_outputs[slot];
963
964 SpvId result;
965
966 for (unsigned c = 0; c < so_output.num_components; c++) {
967 components[c] = so_output.start_component + c;
968 /* this is the second half of a 2 * vec4 array */
969 if (ctx->stage == MESA_SHADER_VERTEX && slot == VARYING_SLOT_CLIP_DIST1)
970 components[c] += 4;
971 }
972
973 /* if we're emitting a scalar or the type we're emitting matches the output's original type and we're
974 * emitting the same number of components, then we can skip any sort of conversion here
975 */
976 if (glsl_type_is_scalar(out_type) || (type == output_type && glsl_get_length(out_type) == so_output.num_components))
977 result = src;
978 else {
979 /* OpCompositeExtract can only extract scalars for our use here */
980 if (so_output.num_components == 1) {
981 result = spirv_builder_emit_composite_extract(&ctx->builder, type, src, components, so_output.num_components);
982 } else if (glsl_type_is_vector(out_type)) {
983 /* OpVectorShuffle can select vector members into a differently-sized vector */
984 result = spirv_builder_emit_vector_shuffle(&ctx->builder, type,
985 src, src,
986 components, so_output.num_components);
987 result = emit_unop(ctx, SpvOpBitcast, type, result);
988 } else {
989 /* for arrays, we need to manually extract each desired member
990 * and re-pack them into the desired output type
991 */
992 for (unsigned c = 0; c < so_output.num_components; c++) {
993 uint32_t member[] = { so_output.start_component + c };
994 SpvId base_type = get_glsl_type(ctx, glsl_without_array(out_type));
995
996 if (ctx->stage == MESA_SHADER_VERTEX && slot == VARYING_SLOT_CLIP_DIST1)
997 member[0] += 4;
998 components[c] = spirv_builder_emit_composite_extract(&ctx->builder, base_type, src, member, 1);
999 }
1000 result = spirv_builder_emit_composite_construct(&ctx->builder, type, components, so_output.num_components);
1001 }
1002 }
1003
1004 spirv_builder_emit_store(&ctx->builder, so_output_var_id, result);
1005 }
1006 }
1007
1008 static SpvId
1009 emit_binop(struct ntv_context *ctx, SpvOp op, SpvId type,
1010 SpvId src0, SpvId src1)
1011 {
1012 return spirv_builder_emit_binop(&ctx->builder, op, type, src0, src1);
1013 }
1014
1015 static SpvId
1016 emit_triop(struct ntv_context *ctx, SpvOp op, SpvId type,
1017 SpvId src0, SpvId src1, SpvId src2)
1018 {
1019 return spirv_builder_emit_triop(&ctx->builder, op, type, src0, src1, src2);
1020 }
1021
1022 static SpvId
1023 emit_builtin_unop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
1024 SpvId src)
1025 {
1026 SpvId args[] = { src };
1027 return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
1028 op, args, ARRAY_SIZE(args));
1029 }
1030
1031 static SpvId
1032 emit_builtin_binop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
1033 SpvId src0, SpvId src1)
1034 {
1035 SpvId args[] = { src0, src1 };
1036 return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
1037 op, args, ARRAY_SIZE(args));
1038 }
1039
1040 static SpvId
1041 emit_builtin_triop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
1042 SpvId src0, SpvId src1, SpvId src2)
1043 {
1044 SpvId args[] = { src0, src1, src2 };
1045 return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
1046 op, args, ARRAY_SIZE(args));
1047 }
1048
1049 static SpvId
1050 get_fvec_constant(struct ntv_context *ctx, unsigned bit_size,
1051 unsigned num_components, float value)
1052 {
1053 assert(bit_size == 32);
1054
1055 SpvId result = emit_float_const(ctx, bit_size, value);
1056 if (num_components == 1)
1057 return result;
1058
1059 assert(num_components > 1);
1060 SpvId components[num_components];
1061 for (int i = 0; i < num_components; i++)
1062 components[i] = result;
1063
1064 SpvId type = get_fvec_type(ctx, bit_size, num_components);
1065 return spirv_builder_const_composite(&ctx->builder, type, components,
1066 num_components);
1067 }
1068
1069 static SpvId
1070 get_uvec_constant(struct ntv_context *ctx, unsigned bit_size,
1071 unsigned num_components, uint32_t value)
1072 {
1073 assert(bit_size == 32);
1074
1075 SpvId result = emit_uint_const(ctx, bit_size, value);
1076 if (num_components == 1)
1077 return result;
1078
1079 assert(num_components > 1);
1080 SpvId components[num_components];
1081 for (int i = 0; i < num_components; i++)
1082 components[i] = result;
1083
1084 SpvId type = get_uvec_type(ctx, bit_size, num_components);
1085 return spirv_builder_const_composite(&ctx->builder, type, components,
1086 num_components);
1087 }
1088
1089 static SpvId
1090 get_ivec_constant(struct ntv_context *ctx, unsigned bit_size,
1091 unsigned num_components, int32_t value)
1092 {
1093 assert(bit_size == 32);
1094
1095 SpvId result = emit_int_const(ctx, bit_size, value);
1096 if (num_components == 1)
1097 return result;
1098
1099 assert(num_components > 1);
1100 SpvId components[num_components];
1101 for (int i = 0; i < num_components; i++)
1102 components[i] = result;
1103
1104 SpvId type = get_ivec_type(ctx, bit_size, num_components);
1105 return spirv_builder_const_composite(&ctx->builder, type, components,
1106 num_components);
1107 }
1108
1109 static inline unsigned
1110 alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
1111 {
1112 if (nir_op_infos[instr->op].input_sizes[src] > 0)
1113 return nir_op_infos[instr->op].input_sizes[src];
1114
1115 if (instr->dest.dest.is_ssa)
1116 return instr->dest.dest.ssa.num_components;
1117 else
1118 return instr->dest.dest.reg.reg->num_components;
1119 }
1120
1121 static SpvId
1122 get_alu_src(struct ntv_context *ctx, nir_alu_instr *alu, unsigned src)
1123 {
1124 SpvId raw_value = get_alu_src_raw(ctx, alu, src);
1125
1126 unsigned num_components = alu_instr_src_components(alu, src);
1127 unsigned bit_size = nir_src_bit_size(alu->src[src].src);
1128 nir_alu_type type = nir_op_infos[alu->op].input_types[src];
1129
1130 if (bit_size == 1)
1131 return raw_value;
1132 else {
1133 switch (nir_alu_type_get_base_type(type)) {
1134 case nir_type_bool:
1135 unreachable("bool should have bit-size 1");
1136
1137 case nir_type_int:
1138 return bitcast_to_ivec(ctx, raw_value, bit_size, num_components);
1139
1140 case nir_type_uint:
1141 return raw_value;
1142
1143 case nir_type_float:
1144 return bitcast_to_fvec(ctx, raw_value, bit_size, num_components);
1145
1146 default:
1147 unreachable("unknown nir_alu_type");
1148 }
1149 }
1150 }
1151
1152 static SpvId
1153 store_alu_result(struct ntv_context *ctx, nir_alu_instr *alu, SpvId result)
1154 {
1155 assert(!alu->dest.saturate);
1156 return store_dest(ctx, &alu->dest.dest, result,
1157 nir_op_infos[alu->op].output_type);
1158 }
1159
1160 static SpvId
1161 get_dest_type(struct ntv_context *ctx, nir_dest *dest, nir_alu_type type)
1162 {
1163 unsigned num_components = nir_dest_num_components(*dest);
1164 unsigned bit_size = nir_dest_bit_size(*dest);
1165
1166 if (bit_size == 1)
1167 return get_bvec_type(ctx, num_components);
1168
1169 switch (nir_alu_type_get_base_type(type)) {
1170 case nir_type_bool:
1171 unreachable("bool should have bit-size 1");
1172
1173 case nir_type_int:
1174 return get_ivec_type(ctx, bit_size, num_components);
1175
1176 case nir_type_uint:
1177 return get_uvec_type(ctx, bit_size, num_components);
1178
1179 case nir_type_float:
1180 return get_fvec_type(ctx, bit_size, num_components);
1181
1182 default:
1183 unreachable("unsupported nir_alu_type");
1184 }
1185 }
1186
1187 static void
1188 emit_alu(struct ntv_context *ctx, nir_alu_instr *alu)
1189 {
1190 SpvId src[nir_op_infos[alu->op].num_inputs];
1191 unsigned in_bit_sizes[nir_op_infos[alu->op].num_inputs];
1192 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1193 src[i] = get_alu_src(ctx, alu, i);
1194 in_bit_sizes[i] = nir_src_bit_size(alu->src[i].src);
1195 }
1196
1197 SpvId dest_type = get_dest_type(ctx, &alu->dest.dest,
1198 nir_op_infos[alu->op].output_type);
1199 unsigned bit_size = nir_dest_bit_size(alu->dest.dest);
1200 unsigned num_components = nir_dest_num_components(alu->dest.dest);
1201
1202 SpvId result = 0;
1203 switch (alu->op) {
1204 case nir_op_mov:
1205 assert(nir_op_infos[alu->op].num_inputs == 1);
1206 result = src[0];
1207 break;
1208
1209 #define UNOP(nir_op, spirv_op) \
1210 case nir_op: \
1211 assert(nir_op_infos[alu->op].num_inputs == 1); \
1212 result = emit_unop(ctx, spirv_op, dest_type, src[0]); \
1213 break;
1214
1215 UNOP(nir_op_ineg, SpvOpSNegate)
1216 UNOP(nir_op_fneg, SpvOpFNegate)
1217 UNOP(nir_op_fddx, SpvOpDPdx)
1218 UNOP(nir_op_fddx_coarse, SpvOpDPdxCoarse)
1219 UNOP(nir_op_fddx_fine, SpvOpDPdxFine)
1220 UNOP(nir_op_fddy, SpvOpDPdy)
1221 UNOP(nir_op_fddy_coarse, SpvOpDPdyCoarse)
1222 UNOP(nir_op_fddy_fine, SpvOpDPdyFine)
1223 UNOP(nir_op_f2i32, SpvOpConvertFToS)
1224 UNOP(nir_op_f2u32, SpvOpConvertFToU)
1225 UNOP(nir_op_i2f32, SpvOpConvertSToF)
1226 UNOP(nir_op_u2f32, SpvOpConvertUToF)
1227 UNOP(nir_op_bitfield_reverse, SpvOpBitReverse)
1228 #undef UNOP
1229
1230 case nir_op_inot:
1231 if (bit_size == 1)
1232 result = emit_unop(ctx, SpvOpLogicalNot, dest_type, src[0]);
1233 else
1234 result = emit_unop(ctx, SpvOpNot, dest_type, src[0]);
1235 break;
1236
1237 case nir_op_b2i32:
1238 assert(nir_op_infos[alu->op].num_inputs == 1);
1239 result = emit_select(ctx, dest_type, src[0],
1240 get_ivec_constant(ctx, 32, num_components, 1),
1241 get_ivec_constant(ctx, 32, num_components, 0));
1242 break;
1243
1244 case nir_op_b2f32:
1245 assert(nir_op_infos[alu->op].num_inputs == 1);
1246 result = emit_select(ctx, dest_type, src[0],
1247 get_fvec_constant(ctx, 32, num_components, 1),
1248 get_fvec_constant(ctx, 32, num_components, 0));
1249 break;
1250
1251 #define BUILTIN_UNOP(nir_op, spirv_op) \
1252 case nir_op: \
1253 assert(nir_op_infos[alu->op].num_inputs == 1); \
1254 result = emit_builtin_unop(ctx, spirv_op, dest_type, src[0]); \
1255 break;
1256
1257 BUILTIN_UNOP(nir_op_iabs, GLSLstd450SAbs)
1258 BUILTIN_UNOP(nir_op_fabs, GLSLstd450FAbs)
1259 BUILTIN_UNOP(nir_op_fsqrt, GLSLstd450Sqrt)
1260 BUILTIN_UNOP(nir_op_frsq, GLSLstd450InverseSqrt)
1261 BUILTIN_UNOP(nir_op_flog2, GLSLstd450Log2)
1262 BUILTIN_UNOP(nir_op_fexp2, GLSLstd450Exp2)
1263 BUILTIN_UNOP(nir_op_ffract, GLSLstd450Fract)
1264 BUILTIN_UNOP(nir_op_ffloor, GLSLstd450Floor)
1265 BUILTIN_UNOP(nir_op_fceil, GLSLstd450Ceil)
1266 BUILTIN_UNOP(nir_op_ftrunc, GLSLstd450Trunc)
1267 BUILTIN_UNOP(nir_op_fround_even, GLSLstd450RoundEven)
1268 BUILTIN_UNOP(nir_op_fsign, GLSLstd450FSign)
1269 BUILTIN_UNOP(nir_op_isign, GLSLstd450SSign)
1270 BUILTIN_UNOP(nir_op_fsin, GLSLstd450Sin)
1271 BUILTIN_UNOP(nir_op_fcos, GLSLstd450Cos)
1272 #undef BUILTIN_UNOP
1273
1274 case nir_op_frcp:
1275 assert(nir_op_infos[alu->op].num_inputs == 1);
1276 result = emit_binop(ctx, SpvOpFDiv, dest_type,
1277 get_fvec_constant(ctx, bit_size, num_components, 1),
1278 src[0]);
1279 break;
1280
1281 case nir_op_f2b1:
1282 assert(nir_op_infos[alu->op].num_inputs == 1);
1283 result = emit_binop(ctx, SpvOpFOrdNotEqual, dest_type, src[0],
1284 get_fvec_constant(ctx,
1285 nir_src_bit_size(alu->src[0].src),
1286 num_components, 0));
1287 break;
1288 case nir_op_i2b1:
1289 assert(nir_op_infos[alu->op].num_inputs == 1);
1290 result = emit_binop(ctx, SpvOpINotEqual, dest_type, src[0],
1291 get_ivec_constant(ctx,
1292 nir_src_bit_size(alu->src[0].src),
1293 num_components, 0));
1294 break;
1295
1296
1297 #define BINOP(nir_op, spirv_op) \
1298 case nir_op: \
1299 assert(nir_op_infos[alu->op].num_inputs == 2); \
1300 result = emit_binop(ctx, spirv_op, dest_type, src[0], src[1]); \
1301 break;
1302
1303 BINOP(nir_op_iadd, SpvOpIAdd)
1304 BINOP(nir_op_isub, SpvOpISub)
1305 BINOP(nir_op_imul, SpvOpIMul)
1306 BINOP(nir_op_idiv, SpvOpSDiv)
1307 BINOP(nir_op_udiv, SpvOpUDiv)
1308 BINOP(nir_op_umod, SpvOpUMod)
1309 BINOP(nir_op_fadd, SpvOpFAdd)
1310 BINOP(nir_op_fsub, SpvOpFSub)
1311 BINOP(nir_op_fmul, SpvOpFMul)
1312 BINOP(nir_op_fdiv, SpvOpFDiv)
1313 BINOP(nir_op_fmod, SpvOpFMod)
1314 BINOP(nir_op_ilt, SpvOpSLessThan)
1315 BINOP(nir_op_ige, SpvOpSGreaterThanEqual)
1316 BINOP(nir_op_ult, SpvOpULessThan)
1317 BINOP(nir_op_uge, SpvOpUGreaterThanEqual)
1318 BINOP(nir_op_flt, SpvOpFOrdLessThan)
1319 BINOP(nir_op_fge, SpvOpFOrdGreaterThanEqual)
1320 BINOP(nir_op_feq, SpvOpFOrdEqual)
1321 BINOP(nir_op_fne, SpvOpFUnordNotEqual)
1322 BINOP(nir_op_ishl, SpvOpShiftLeftLogical)
1323 BINOP(nir_op_ishr, SpvOpShiftRightArithmetic)
1324 BINOP(nir_op_ushr, SpvOpShiftRightLogical)
1325 BINOP(nir_op_ixor, SpvOpBitwiseXor)
1326 #undef BINOP
1327
1328 #define BINOP_LOG(nir_op, spv_op, spv_log_op) \
1329 case nir_op: \
1330 assert(nir_op_infos[alu->op].num_inputs == 2); \
1331 if (nir_src_bit_size(alu->src[0].src) == 1) \
1332 result = emit_binop(ctx, spv_log_op, dest_type, src[0], src[1]); \
1333 else \
1334 result = emit_binop(ctx, spv_op, dest_type, src[0], src[1]); \
1335 break;
1336
1337 BINOP_LOG(nir_op_iand, SpvOpBitwiseAnd, SpvOpLogicalAnd)
1338 BINOP_LOG(nir_op_ior, SpvOpBitwiseOr, SpvOpLogicalOr)
1339 BINOP_LOG(nir_op_ieq, SpvOpIEqual, SpvOpLogicalEqual)
1340 BINOP_LOG(nir_op_ine, SpvOpINotEqual, SpvOpLogicalNotEqual)
1341 #undef BINOP_LOG
1342
1343 #define BUILTIN_BINOP(nir_op, spirv_op) \
1344 case nir_op: \
1345 assert(nir_op_infos[alu->op].num_inputs == 2); \
1346 result = emit_builtin_binop(ctx, spirv_op, dest_type, src[0], src[1]); \
1347 break;
1348
1349 BUILTIN_BINOP(nir_op_fmin, GLSLstd450FMin)
1350 BUILTIN_BINOP(nir_op_fmax, GLSLstd450FMax)
1351 BUILTIN_BINOP(nir_op_imin, GLSLstd450SMin)
1352 BUILTIN_BINOP(nir_op_imax, GLSLstd450SMax)
1353 BUILTIN_BINOP(nir_op_umin, GLSLstd450UMin)
1354 BUILTIN_BINOP(nir_op_umax, GLSLstd450UMax)
1355 #undef BUILTIN_BINOP
1356
1357 case nir_op_fdot2:
1358 case nir_op_fdot3:
1359 case nir_op_fdot4:
1360 assert(nir_op_infos[alu->op].num_inputs == 2);
1361 result = emit_binop(ctx, SpvOpDot, dest_type, src[0], src[1]);
1362 break;
1363
1364 case nir_op_fdph:
1365 unreachable("should already be lowered away");
1366
1367 case nir_op_seq:
1368 case nir_op_sne:
1369 case nir_op_slt:
1370 case nir_op_sge: {
1371 assert(nir_op_infos[alu->op].num_inputs == 2);
1372 int num_components = nir_dest_num_components(alu->dest.dest);
1373 SpvId bool_type = get_bvec_type(ctx, num_components);
1374
1375 SpvId zero = emit_float_const(ctx, bit_size, 0.0f);
1376 SpvId one = emit_float_const(ctx, bit_size, 1.0f);
1377 if (num_components > 1) {
1378 SpvId zero_comps[num_components], one_comps[num_components];
1379 for (int i = 0; i < num_components; i++) {
1380 zero_comps[i] = zero;
1381 one_comps[i] = one;
1382 }
1383
1384 zero = spirv_builder_const_composite(&ctx->builder, dest_type,
1385 zero_comps, num_components);
1386 one = spirv_builder_const_composite(&ctx->builder, dest_type,
1387 one_comps, num_components);
1388 }
1389
1390 SpvOp op;
1391 switch (alu->op) {
1392 case nir_op_seq: op = SpvOpFOrdEqual; break;
1393 case nir_op_sne: op = SpvOpFOrdNotEqual; break;
1394 case nir_op_slt: op = SpvOpFOrdLessThan; break;
1395 case nir_op_sge: op = SpvOpFOrdGreaterThanEqual; break;
1396 default: unreachable("unexpected op");
1397 }
1398
1399 result = emit_binop(ctx, op, bool_type, src[0], src[1]);
1400 result = emit_select(ctx, dest_type, result, one, zero);
1401 }
1402 break;
1403
1404 case nir_op_flrp:
1405 assert(nir_op_infos[alu->op].num_inputs == 3);
1406 result = emit_builtin_triop(ctx, GLSLstd450FMix, dest_type,
1407 src[0], src[1], src[2]);
1408 break;
1409
1410 case nir_op_fcsel:
1411 result = emit_binop(ctx, SpvOpFOrdGreaterThan,
1412 get_bvec_type(ctx, num_components),
1413 src[0],
1414 get_fvec_constant(ctx,
1415 nir_src_bit_size(alu->src[0].src),
1416 num_components, 0));
1417 result = emit_select(ctx, dest_type, result, src[1], src[2]);
1418 break;
1419
1420 case nir_op_bcsel:
1421 assert(nir_op_infos[alu->op].num_inputs == 3);
1422 result = emit_select(ctx, dest_type, src[0], src[1], src[2]);
1423 break;
1424
1425 case nir_op_bany_fnequal2:
1426 case nir_op_bany_fnequal3:
1427 case nir_op_bany_fnequal4: {
1428 assert(nir_op_infos[alu->op].num_inputs == 2);
1429 assert(alu_instr_src_components(alu, 0) ==
1430 alu_instr_src_components(alu, 1));
1431 assert(in_bit_sizes[0] == in_bit_sizes[1]);
1432 /* The type of Operand 1 and Operand 2 must be a scalar or vector of floating-point type. */
1433 SpvOp op = in_bit_sizes[0] == 1 ? SpvOpLogicalNotEqual : SpvOpFOrdNotEqual;
1434 result = emit_binop(ctx, op,
1435 get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
1436 src[0], src[1]);
1437 result = emit_unop(ctx, SpvOpAny, dest_type, result);
1438 break;
1439 }
1440
1441 case nir_op_ball_fequal2:
1442 case nir_op_ball_fequal3:
1443 case nir_op_ball_fequal4: {
1444 assert(nir_op_infos[alu->op].num_inputs == 2);
1445 assert(alu_instr_src_components(alu, 0) ==
1446 alu_instr_src_components(alu, 1));
1447 assert(in_bit_sizes[0] == in_bit_sizes[1]);
1448 /* The type of Operand 1 and Operand 2 must be a scalar or vector of floating-point type. */
1449 SpvOp op = in_bit_sizes[0] == 1 ? SpvOpLogicalEqual : SpvOpFOrdEqual;
1450 result = emit_binop(ctx, op,
1451 get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
1452 src[0], src[1]);
1453 result = emit_unop(ctx, SpvOpAll, dest_type, result);
1454 break;
1455 }
1456
1457 case nir_op_bany_inequal2:
1458 case nir_op_bany_inequal3:
1459 case nir_op_bany_inequal4: {
1460 assert(nir_op_infos[alu->op].num_inputs == 2);
1461 assert(alu_instr_src_components(alu, 0) ==
1462 alu_instr_src_components(alu, 1));
1463 assert(in_bit_sizes[0] == in_bit_sizes[1]);
1464 /* The type of Operand 1 and Operand 2 must be a scalar or vector of integer type. */
1465 SpvOp op = in_bit_sizes[0] == 1 ? SpvOpLogicalNotEqual : SpvOpINotEqual;
1466 result = emit_binop(ctx, op,
1467 get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
1468 src[0], src[1]);
1469 result = emit_unop(ctx, SpvOpAny, dest_type, result);
1470 break;
1471 }
1472
1473 case nir_op_ball_iequal2:
1474 case nir_op_ball_iequal3:
1475 case nir_op_ball_iequal4: {
1476 assert(nir_op_infos[alu->op].num_inputs == 2);
1477 assert(alu_instr_src_components(alu, 0) ==
1478 alu_instr_src_components(alu, 1));
1479 assert(in_bit_sizes[0] == in_bit_sizes[1]);
1480 /* The type of Operand 1 and Operand 2 must be a scalar or vector of integer type. */
1481 SpvOp op = in_bit_sizes[0] == 1 ? SpvOpLogicalEqual : SpvOpIEqual;
1482 result = emit_binop(ctx, op,
1483 get_bvec_type(ctx, alu_instr_src_components(alu, 0)),
1484 src[0], src[1]);
1485 result = emit_unop(ctx, SpvOpAll, dest_type, result);
1486 break;
1487 }
1488
1489 case nir_op_vec2:
1490 case nir_op_vec3:
1491 case nir_op_vec4: {
1492 int num_inputs = nir_op_infos[alu->op].num_inputs;
1493 assert(2 <= num_inputs && num_inputs <= 4);
1494 result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type,
1495 src, num_inputs);
1496 }
1497 break;
1498
1499 default:
1500 fprintf(stderr, "emit_alu: not implemented (%s)\n",
1501 nir_op_infos[alu->op].name);
1502
1503 unreachable("unsupported opcode");
1504 return;
1505 }
1506
1507 store_alu_result(ctx, alu, result);
1508 }
1509
1510 static void
1511 emit_load_const(struct ntv_context *ctx, nir_load_const_instr *load_const)
1512 {
1513 unsigned bit_size = load_const->def.bit_size;
1514 unsigned num_components = load_const->def.num_components;
1515
1516 SpvId constant;
1517 if (num_components > 1) {
1518 SpvId components[num_components];
1519 SpvId type = get_vec_from_bit_size(ctx, bit_size, num_components);
1520 if (bit_size == 1) {
1521 for (int i = 0; i < num_components; i++)
1522 components[i] = spirv_builder_const_bool(&ctx->builder,
1523 load_const->value[i].b);
1524
1525 } else {
1526 for (int i = 0; i < num_components; i++)
1527 components[i] = emit_uint_const(ctx, bit_size,
1528 load_const->value[i].u32);
1529
1530 }
1531 constant = spirv_builder_const_composite(&ctx->builder, type,
1532 components, num_components);
1533 } else {
1534 assert(num_components == 1);
1535 if (bit_size == 1)
1536 constant = spirv_builder_const_bool(&ctx->builder,
1537 load_const->value[0].b);
1538 else
1539 constant = emit_uint_const(ctx, bit_size, load_const->value[0].u32);
1540 }
1541
1542 store_ssa_def(ctx, &load_const->def, constant);
1543 }
1544
1545 static void
1546 emit_load_ubo(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1547 {
1548 nir_const_value *const_block_index = nir_src_as_const_value(intr->src[0]);
1549 assert(const_block_index); // no dynamic indexing for now
1550 assert(const_block_index->u32 == 0); // we only support the default UBO for now
1551
1552 nir_const_value *const_offset = nir_src_as_const_value(intr->src[1]);
1553 if (const_offset) {
1554 SpvId uvec4_type = get_uvec_type(ctx, 32, 4);
1555 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
1556 SpvStorageClassUniform,
1557 uvec4_type);
1558
1559 unsigned idx = const_offset->u32;
1560 SpvId member = emit_uint_const(ctx, 32, 0);
1561 SpvId offset = emit_uint_const(ctx, 32, idx);
1562 SpvId offsets[] = { member, offset };
1563 SpvId ptr = spirv_builder_emit_access_chain(&ctx->builder, pointer_type,
1564 ctx->ubos[0], offsets,
1565 ARRAY_SIZE(offsets));
1566 SpvId result = spirv_builder_emit_load(&ctx->builder, uvec4_type, ptr);
1567
1568 SpvId type = get_dest_uvec_type(ctx, &intr->dest);
1569 unsigned num_components = nir_dest_num_components(intr->dest);
1570 if (num_components == 1) {
1571 uint32_t components[] = { 0 };
1572 result = spirv_builder_emit_composite_extract(&ctx->builder,
1573 type,
1574 result, components,
1575 1);
1576 } else if (num_components < 4) {
1577 SpvId constituents[num_components];
1578 SpvId uint_type = spirv_builder_type_uint(&ctx->builder, 32);
1579 for (uint32_t i = 0; i < num_components; ++i)
1580 constituents[i] = spirv_builder_emit_composite_extract(&ctx->builder,
1581 uint_type,
1582 result, &i,
1583 1);
1584
1585 result = spirv_builder_emit_composite_construct(&ctx->builder,
1586 type,
1587 constituents,
1588 num_components);
1589 }
1590
1591 if (nir_dest_bit_size(intr->dest) == 1)
1592 result = uvec_to_bvec(ctx, result, num_components);
1593
1594 store_dest(ctx, &intr->dest, result, nir_type_uint);
1595 } else
1596 unreachable("uniform-addressing not yet supported");
1597 }
1598
1599 static void
1600 emit_discard(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1601 {
1602 assert(ctx->block_started);
1603 spirv_builder_emit_kill(&ctx->builder);
1604 /* discard is weird in NIR, so let's just create an unreachable block after
1605 it and hope that the vulkan driver will DCE any instructinos in it. */
1606 spirv_builder_label(&ctx->builder, spirv_builder_new_id(&ctx->builder));
1607 }
1608
1609 static void
1610 emit_load_deref(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1611 {
1612 SpvId ptr = get_src(ctx, intr->src);
1613
1614 SpvId result = spirv_builder_emit_load(&ctx->builder,
1615 get_glsl_type(ctx, nir_src_as_deref(intr->src[0])->type),
1616 ptr);
1617 unsigned num_components = nir_dest_num_components(intr->dest);
1618 unsigned bit_size = nir_dest_bit_size(intr->dest);
1619 result = bitcast_to_uvec(ctx, result, bit_size, num_components);
1620 store_dest(ctx, &intr->dest, result, nir_type_uint);
1621 }
1622
1623 static void
1624 emit_store_deref(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1625 {
1626 SpvId ptr = get_src(ctx, &intr->src[0]);
1627 SpvId src = get_src(ctx, &intr->src[1]);
1628
1629 SpvId type = get_glsl_type(ctx, nir_src_as_deref(intr->src[0])->type);
1630 SpvId result = emit_bitcast(ctx, type, src);
1631 spirv_builder_emit_store(&ctx->builder, ptr, result);
1632 }
1633
1634 static SpvId
1635 create_builtin_var(struct ntv_context *ctx, SpvId var_type,
1636 SpvStorageClass storage_class,
1637 const char *name, SpvBuiltIn builtin)
1638 {
1639 SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
1640 storage_class,
1641 var_type);
1642 SpvId var = spirv_builder_emit_var(&ctx->builder, pointer_type,
1643 storage_class);
1644 spirv_builder_emit_name(&ctx->builder, var, name);
1645 spirv_builder_emit_builtin(&ctx->builder, var, builtin);
1646
1647 assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
1648 ctx->entry_ifaces[ctx->num_entry_ifaces++] = var;
1649 return var;
1650 }
1651
1652 static void
1653 emit_load_front_face(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1654 {
1655 SpvId var_type = spirv_builder_type_bool(&ctx->builder);
1656 if (!ctx->front_face_var)
1657 ctx->front_face_var = create_builtin_var(ctx, var_type,
1658 SpvStorageClassInput,
1659 "gl_FrontFacing",
1660 SpvBuiltInFrontFacing);
1661
1662 SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
1663 ctx->front_face_var);
1664 assert(1 == nir_dest_num_components(intr->dest));
1665 store_dest(ctx, &intr->dest, result, nir_type_bool);
1666 }
1667
1668 static void
1669 emit_load_instance_id(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1670 {
1671 SpvId var_type = spirv_builder_type_uint(&ctx->builder, 32);
1672 if (!ctx->instance_id_var)
1673 ctx->instance_id_var = create_builtin_var(ctx, var_type,
1674 SpvStorageClassInput,
1675 "gl_InstanceId",
1676 SpvBuiltInInstanceIndex);
1677
1678 SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
1679 ctx->instance_id_var);
1680 assert(1 == nir_dest_num_components(intr->dest));
1681 store_dest(ctx, &intr->dest, result, nir_type_uint);
1682 }
1683
1684 static void
1685 emit_load_vertex_id(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1686 {
1687 SpvId var_type = spirv_builder_type_uint(&ctx->builder, 32);
1688 if (!ctx->vertex_id_var)
1689 ctx->vertex_id_var = create_builtin_var(ctx, var_type,
1690 SpvStorageClassInput,
1691 "gl_VertexID",
1692 SpvBuiltInVertexIndex);
1693
1694 SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
1695 ctx->vertex_id_var);
1696 assert(1 == nir_dest_num_components(intr->dest));
1697 store_dest(ctx, &intr->dest, result, nir_type_uint);
1698 }
1699
1700 static void
1701 emit_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
1702 {
1703 switch (intr->intrinsic) {
1704 case nir_intrinsic_load_ubo:
1705 emit_load_ubo(ctx, intr);
1706 break;
1707
1708 case nir_intrinsic_discard:
1709 emit_discard(ctx, intr);
1710 break;
1711
1712 case nir_intrinsic_load_deref:
1713 emit_load_deref(ctx, intr);
1714 break;
1715
1716 case nir_intrinsic_store_deref:
1717 emit_store_deref(ctx, intr);
1718 break;
1719
1720 case nir_intrinsic_load_front_face:
1721 emit_load_front_face(ctx, intr);
1722 break;
1723
1724 case nir_intrinsic_load_instance_id:
1725 emit_load_instance_id(ctx, intr);
1726 break;
1727
1728 case nir_intrinsic_load_vertex_id:
1729 emit_load_vertex_id(ctx, intr);
1730 break;
1731
1732 default:
1733 fprintf(stderr, "emit_intrinsic: not implemented (%s)\n",
1734 nir_intrinsic_infos[intr->intrinsic].name);
1735 unreachable("unsupported intrinsic");
1736 }
1737 }
1738
1739 static void
1740 emit_undef(struct ntv_context *ctx, nir_ssa_undef_instr *undef)
1741 {
1742 SpvId type = get_uvec_type(ctx, undef->def.bit_size,
1743 undef->def.num_components);
1744
1745 store_ssa_def(ctx, &undef->def,
1746 spirv_builder_emit_undef(&ctx->builder, type));
1747 }
1748
1749 static SpvId
1750 get_src_float(struct ntv_context *ctx, nir_src *src)
1751 {
1752 SpvId def = get_src(ctx, src);
1753 unsigned num_components = nir_src_num_components(*src);
1754 unsigned bit_size = nir_src_bit_size(*src);
1755 return bitcast_to_fvec(ctx, def, bit_size, num_components);
1756 }
1757
1758 static SpvId
1759 get_src_int(struct ntv_context *ctx, nir_src *src)
1760 {
1761 SpvId def = get_src(ctx, src);
1762 unsigned num_components = nir_src_num_components(*src);
1763 unsigned bit_size = nir_src_bit_size(*src);
1764 return bitcast_to_ivec(ctx, def, bit_size, num_components);
1765 }
1766
1767 static inline bool
1768 tex_instr_is_lod_allowed(nir_tex_instr *tex)
1769 {
1770 /* This can only be used with an OpTypeImage that has a Dim operand of 1D, 2D, 3D, or Cube
1771 * - SPIR-V: 3.14. Image Operands
1772 */
1773
1774 return (tex->sampler_dim == GLSL_SAMPLER_DIM_1D ||
1775 tex->sampler_dim == GLSL_SAMPLER_DIM_2D ||
1776 tex->sampler_dim == GLSL_SAMPLER_DIM_3D ||
1777 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
1778 }
1779
1780 static SpvId
1781 pad_coord_vector(struct ntv_context *ctx, SpvId orig, unsigned old_size, unsigned new_size)
1782 {
1783 SpvId int_type = spirv_builder_type_int(&ctx->builder, 32);
1784 SpvId type = get_ivec_type(ctx, 32, new_size);
1785 SpvId constituents[NIR_MAX_VEC_COMPONENTS] = {0};
1786 SpvId zero = emit_int_const(ctx, 32, 0);
1787 assert(new_size < NIR_MAX_VEC_COMPONENTS);
1788
1789 if (old_size == 1)
1790 constituents[0] = orig;
1791 else {
1792 for (unsigned i = 0; i < old_size; i++)
1793 constituents[i] = spirv_builder_emit_vector_extract(&ctx->builder, int_type, orig, i);
1794 }
1795
1796 for (unsigned i = old_size; i < new_size; i++)
1797 constituents[i] = zero;
1798
1799 return spirv_builder_emit_composite_construct(&ctx->builder, type,
1800 constituents, new_size);
1801 }
1802
1803 static void
1804 emit_tex(struct ntv_context *ctx, nir_tex_instr *tex)
1805 {
1806 assert(tex->op == nir_texop_tex ||
1807 tex->op == nir_texop_txb ||
1808 tex->op == nir_texop_txl ||
1809 tex->op == nir_texop_txd ||
1810 tex->op == nir_texop_txf ||
1811 tex->op == nir_texop_txf_ms ||
1812 tex->op == nir_texop_txs);
1813 assert(tex->texture_index == tex->sampler_index);
1814
1815 SpvId coord = 0, proj = 0, bias = 0, lod = 0, dref = 0, dx = 0, dy = 0,
1816 offset = 0, sample = 0;
1817 unsigned coord_components = 0, coord_bitsize = 0, offset_components = 0;
1818 for (unsigned i = 0; i < tex->num_srcs; i++) {
1819 switch (tex->src[i].src_type) {
1820 case nir_tex_src_coord:
1821 if (tex->op == nir_texop_txf ||
1822 tex->op == nir_texop_txf_ms)
1823 coord = get_src_int(ctx, &tex->src[i].src);
1824 else
1825 coord = get_src_float(ctx, &tex->src[i].src);
1826 coord_components = nir_src_num_components(tex->src[i].src);
1827 coord_bitsize = nir_src_bit_size(tex->src[i].src);
1828 break;
1829
1830 case nir_tex_src_projector:
1831 assert(nir_src_num_components(tex->src[i].src) == 1);
1832 proj = get_src_float(ctx, &tex->src[i].src);
1833 assert(proj != 0);
1834 break;
1835
1836 case nir_tex_src_offset:
1837 offset = get_src_int(ctx, &tex->src[i].src);
1838 offset_components = nir_src_num_components(tex->src[i].src);
1839 break;
1840
1841 case nir_tex_src_bias:
1842 assert(tex->op == nir_texop_txb);
1843 bias = get_src_float(ctx, &tex->src[i].src);
1844 assert(bias != 0);
1845 break;
1846
1847 case nir_tex_src_lod:
1848 assert(nir_src_num_components(tex->src[i].src) == 1);
1849 if (tex->op == nir_texop_txf ||
1850 tex->op == nir_texop_txf_ms ||
1851 tex->op == nir_texop_txs)
1852 lod = get_src_int(ctx, &tex->src[i].src);
1853 else
1854 lod = get_src_float(ctx, &tex->src[i].src);
1855 assert(lod != 0);
1856 break;
1857
1858 case nir_tex_src_ms_index:
1859 assert(nir_src_num_components(tex->src[i].src) == 1);
1860 sample = get_src_int(ctx, &tex->src[i].src);
1861 break;
1862
1863 case nir_tex_src_comparator:
1864 assert(nir_src_num_components(tex->src[i].src) == 1);
1865 dref = get_src_float(ctx, &tex->src[i].src);
1866 assert(dref != 0);
1867 break;
1868
1869 case nir_tex_src_ddx:
1870 dx = get_src_float(ctx, &tex->src[i].src);
1871 assert(dx != 0);
1872 break;
1873
1874 case nir_tex_src_ddy:
1875 dy = get_src_float(ctx, &tex->src[i].src);
1876 assert(dy != 0);
1877 break;
1878
1879 default:
1880 fprintf(stderr, "texture source: %d\n", tex->src[i].src_type);
1881 unreachable("unknown texture source");
1882 }
1883 }
1884
1885 if (lod == 0 && ctx->stage != MESA_SHADER_FRAGMENT) {
1886 lod = emit_float_const(ctx, 32, 0.0f);
1887 assert(lod != 0);
1888 }
1889
1890 SpvId image_type = ctx->image_types[tex->texture_index];
1891 SpvId sampled_type = spirv_builder_type_sampled_image(&ctx->builder,
1892 image_type);
1893
1894 assert(ctx->samplers_used & (1u << tex->texture_index));
1895 SpvId load = spirv_builder_emit_load(&ctx->builder, sampled_type,
1896 ctx->samplers[tex->texture_index]);
1897
1898 SpvId dest_type = get_dest_type(ctx, &tex->dest, tex->dest_type);
1899
1900 if (!tex_instr_is_lod_allowed(tex))
1901 lod = 0;
1902 if (tex->op == nir_texop_txs) {
1903 SpvId image = spirv_builder_emit_image(&ctx->builder, image_type, load);
1904 SpvId result = spirv_builder_emit_image_query_size(&ctx->builder,
1905 dest_type, image,
1906 lod);
1907 store_dest(ctx, &tex->dest, result, tex->dest_type);
1908 return;
1909 }
1910
1911 if (proj && coord_components > 0) {
1912 SpvId constituents[coord_components + 1];
1913 if (coord_components == 1)
1914 constituents[0] = coord;
1915 else {
1916 assert(coord_components > 1);
1917 SpvId float_type = spirv_builder_type_float(&ctx->builder, 32);
1918 for (uint32_t i = 0; i < coord_components; ++i)
1919 constituents[i] = spirv_builder_emit_composite_extract(&ctx->builder,
1920 float_type,
1921 coord,
1922 &i, 1);
1923 }
1924
1925 constituents[coord_components++] = proj;
1926
1927 SpvId vec_type = get_fvec_type(ctx, 32, coord_components);
1928 coord = spirv_builder_emit_composite_construct(&ctx->builder,
1929 vec_type,
1930 constituents,
1931 coord_components);
1932 }
1933
1934 SpvId actual_dest_type = dest_type;
1935 if (dref)
1936 actual_dest_type = spirv_builder_type_float(&ctx->builder, 32);
1937
1938 SpvId result;
1939 if (tex->op == nir_texop_txf ||
1940 tex->op == nir_texop_txf_ms) {
1941 SpvId image = spirv_builder_emit_image(&ctx->builder, image_type, load);
1942 if (offset) {
1943 /* SPIRV requires matched length vectors for OpIAdd, so if a shader
1944 * uses vecs of differing sizes we need to make a new vec padded with zeroes
1945 * to mimic how GLSL does this implicitly
1946 */
1947 if (offset_components > coord_components)
1948 coord = pad_coord_vector(ctx, coord, coord_components, offset_components);
1949 else if (coord_components > offset_components)
1950 offset = pad_coord_vector(ctx, offset, offset_components, coord_components);
1951 coord = emit_binop(ctx, SpvOpIAdd,
1952 get_ivec_type(ctx, coord_bitsize, coord_components),
1953 coord, offset);
1954 }
1955 result = spirv_builder_emit_image_fetch(&ctx->builder, dest_type,
1956 image, coord, lod, sample);
1957 } else {
1958 result = spirv_builder_emit_image_sample(&ctx->builder,
1959 actual_dest_type, load,
1960 coord,
1961 proj != 0,
1962 lod, bias, dref, dx, dy,
1963 offset);
1964 }
1965
1966 spirv_builder_emit_decoration(&ctx->builder, result,
1967 SpvDecorationRelaxedPrecision);
1968
1969 if (dref && nir_dest_num_components(tex->dest) > 1) {
1970 SpvId components[4] = { result, result, result, result };
1971 result = spirv_builder_emit_composite_construct(&ctx->builder,
1972 dest_type,
1973 components,
1974 4);
1975 }
1976
1977 store_dest(ctx, &tex->dest, result, tex->dest_type);
1978 }
1979
1980 static void
1981 start_block(struct ntv_context *ctx, SpvId label)
1982 {
1983 /* terminate previous block if needed */
1984 if (ctx->block_started)
1985 spirv_builder_emit_branch(&ctx->builder, label);
1986
1987 /* start new block */
1988 spirv_builder_label(&ctx->builder, label);
1989 ctx->block_started = true;
1990 }
1991
1992 static void
1993 branch(struct ntv_context *ctx, SpvId label)
1994 {
1995 assert(ctx->block_started);
1996 spirv_builder_emit_branch(&ctx->builder, label);
1997 ctx->block_started = false;
1998 }
1999
2000 static void
2001 branch_conditional(struct ntv_context *ctx, SpvId condition, SpvId then_id,
2002 SpvId else_id)
2003 {
2004 assert(ctx->block_started);
2005 spirv_builder_emit_branch_conditional(&ctx->builder, condition,
2006 then_id, else_id);
2007 ctx->block_started = false;
2008 }
2009
2010 static void
2011 emit_jump(struct ntv_context *ctx, nir_jump_instr *jump)
2012 {
2013 switch (jump->type) {
2014 case nir_jump_break:
2015 assert(ctx->loop_break);
2016 branch(ctx, ctx->loop_break);
2017 break;
2018
2019 case nir_jump_continue:
2020 assert(ctx->loop_cont);
2021 branch(ctx, ctx->loop_cont);
2022 break;
2023
2024 default:
2025 unreachable("Unsupported jump type\n");
2026 }
2027 }
2028
2029 static void
2030 emit_deref_var(struct ntv_context *ctx, nir_deref_instr *deref)
2031 {
2032 assert(deref->deref_type == nir_deref_type_var);
2033
2034 struct hash_entry *he = _mesa_hash_table_search(ctx->vars, deref->var);
2035 assert(he);
2036 SpvId result = (SpvId)(intptr_t)he->data;
2037 store_dest_raw(ctx, &deref->dest, result);
2038 }
2039
2040 static void
2041 emit_deref_array(struct ntv_context *ctx, nir_deref_instr *deref)
2042 {
2043 assert(deref->deref_type == nir_deref_type_array);
2044 nir_variable *var = nir_deref_instr_get_variable(deref);
2045
2046 SpvStorageClass storage_class;
2047 switch (var->data.mode) {
2048 case nir_var_shader_in:
2049 storage_class = SpvStorageClassInput;
2050 break;
2051
2052 case nir_var_shader_out:
2053 storage_class = SpvStorageClassOutput;
2054 break;
2055
2056 default:
2057 unreachable("Unsupported nir_variable_mode\n");
2058 }
2059
2060 SpvId index = get_src(ctx, &deref->arr.index);
2061
2062 SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
2063 storage_class,
2064 get_glsl_type(ctx, deref->type));
2065
2066 SpvId result = spirv_builder_emit_access_chain(&ctx->builder,
2067 ptr_type,
2068 get_src(ctx, &deref->parent),
2069 &index, 1);
2070 /* uint is a bit of a lie here, it's really just an opaque type */
2071 store_dest(ctx, &deref->dest, result, nir_type_uint);
2072 }
2073
2074 static void
2075 emit_deref(struct ntv_context *ctx, nir_deref_instr *deref)
2076 {
2077 switch (deref->deref_type) {
2078 case nir_deref_type_var:
2079 emit_deref_var(ctx, deref);
2080 break;
2081
2082 case nir_deref_type_array:
2083 emit_deref_array(ctx, deref);
2084 break;
2085
2086 default:
2087 unreachable("unexpected deref_type");
2088 }
2089 }
2090
2091 static void
2092 emit_block(struct ntv_context *ctx, struct nir_block *block)
2093 {
2094 start_block(ctx, block_label(ctx, block));
2095 nir_foreach_instr(instr, block) {
2096 switch (instr->type) {
2097 case nir_instr_type_alu:
2098 emit_alu(ctx, nir_instr_as_alu(instr));
2099 break;
2100 case nir_instr_type_intrinsic:
2101 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2102 break;
2103 case nir_instr_type_load_const:
2104 emit_load_const(ctx, nir_instr_as_load_const(instr));
2105 break;
2106 case nir_instr_type_ssa_undef:
2107 emit_undef(ctx, nir_instr_as_ssa_undef(instr));
2108 break;
2109 case nir_instr_type_tex:
2110 emit_tex(ctx, nir_instr_as_tex(instr));
2111 break;
2112 case nir_instr_type_phi:
2113 unreachable("nir_instr_type_phi not supported");
2114 break;
2115 case nir_instr_type_jump:
2116 emit_jump(ctx, nir_instr_as_jump(instr));
2117 break;
2118 case nir_instr_type_call:
2119 unreachable("nir_instr_type_call not supported");
2120 break;
2121 case nir_instr_type_parallel_copy:
2122 unreachable("nir_instr_type_parallel_copy not supported");
2123 break;
2124 case nir_instr_type_deref:
2125 emit_deref(ctx, nir_instr_as_deref(instr));
2126 break;
2127 }
2128 }
2129 }
2130
2131 static void
2132 emit_cf_list(struct ntv_context *ctx, struct exec_list *list);
2133
2134 static SpvId
2135 get_src_bool(struct ntv_context *ctx, nir_src *src)
2136 {
2137 assert(nir_src_bit_size(*src) == 1);
2138 return get_src(ctx, src);
2139 }
2140
2141 static void
2142 emit_if(struct ntv_context *ctx, nir_if *if_stmt)
2143 {
2144 SpvId condition = get_src_bool(ctx, &if_stmt->condition);
2145
2146 SpvId header_id = spirv_builder_new_id(&ctx->builder);
2147 SpvId then_id = block_label(ctx, nir_if_first_then_block(if_stmt));
2148 SpvId endif_id = spirv_builder_new_id(&ctx->builder);
2149 SpvId else_id = endif_id;
2150
2151 bool has_else = !exec_list_is_empty(&if_stmt->else_list);
2152 if (has_else) {
2153 assert(nir_if_first_else_block(if_stmt)->index < ctx->num_blocks);
2154 else_id = block_label(ctx, nir_if_first_else_block(if_stmt));
2155 }
2156
2157 /* create a header-block */
2158 start_block(ctx, header_id);
2159 spirv_builder_emit_selection_merge(&ctx->builder, endif_id,
2160 SpvSelectionControlMaskNone);
2161 branch_conditional(ctx, condition, then_id, else_id);
2162
2163 emit_cf_list(ctx, &if_stmt->then_list);
2164
2165 if (has_else) {
2166 if (ctx->block_started)
2167 branch(ctx, endif_id);
2168
2169 emit_cf_list(ctx, &if_stmt->else_list);
2170 }
2171
2172 start_block(ctx, endif_id);
2173 }
2174
2175 static void
2176 emit_loop(struct ntv_context *ctx, nir_loop *loop)
2177 {
2178 SpvId header_id = spirv_builder_new_id(&ctx->builder);
2179 SpvId begin_id = block_label(ctx, nir_loop_first_block(loop));
2180 SpvId break_id = spirv_builder_new_id(&ctx->builder);
2181 SpvId cont_id = spirv_builder_new_id(&ctx->builder);
2182
2183 /* create a header-block */
2184 start_block(ctx, header_id);
2185 spirv_builder_loop_merge(&ctx->builder, break_id, cont_id, SpvLoopControlMaskNone);
2186 branch(ctx, begin_id);
2187
2188 SpvId save_break = ctx->loop_break;
2189 SpvId save_cont = ctx->loop_cont;
2190 ctx->loop_break = break_id;
2191 ctx->loop_cont = cont_id;
2192
2193 emit_cf_list(ctx, &loop->body);
2194
2195 ctx->loop_break = save_break;
2196 ctx->loop_cont = save_cont;
2197
2198 /* loop->body may have already ended our block */
2199 if (ctx->block_started)
2200 branch(ctx, cont_id);
2201 start_block(ctx, cont_id);
2202 branch(ctx, header_id);
2203
2204 start_block(ctx, break_id);
2205 }
2206
2207 static void
2208 emit_cf_list(struct ntv_context *ctx, struct exec_list *list)
2209 {
2210 foreach_list_typed(nir_cf_node, node, node, list) {
2211 switch (node->type) {
2212 case nir_cf_node_block:
2213 emit_block(ctx, nir_cf_node_as_block(node));
2214 break;
2215
2216 case nir_cf_node_if:
2217 emit_if(ctx, nir_cf_node_as_if(node));
2218 break;
2219
2220 case nir_cf_node_loop:
2221 emit_loop(ctx, nir_cf_node_as_loop(node));
2222 break;
2223
2224 case nir_cf_node_function:
2225 unreachable("nir_cf_node_function not supported");
2226 break;
2227 }
2228 }
2229 }
2230
2231 struct spirv_shader *
2232 nir_to_spirv(struct nir_shader *s, const struct zink_so_info *so_info)
2233 {
2234 struct spirv_shader *ret = NULL;
2235
2236 struct ntv_context ctx = {};
2237 ctx.mem_ctx = ralloc_context(NULL);
2238 ctx.builder.mem_ctx = ctx.mem_ctx;
2239
2240 switch (s->info.stage) {
2241 case MESA_SHADER_VERTEX:
2242 case MESA_SHADER_FRAGMENT:
2243 case MESA_SHADER_COMPUTE:
2244 spirv_builder_emit_cap(&ctx.builder, SpvCapabilityShader);
2245 break;
2246
2247 case MESA_SHADER_TESS_CTRL:
2248 case MESA_SHADER_TESS_EVAL:
2249 spirv_builder_emit_cap(&ctx.builder, SpvCapabilityTessellation);
2250 break;
2251
2252 case MESA_SHADER_GEOMETRY:
2253 spirv_builder_emit_cap(&ctx.builder, SpvCapabilityGeometry);
2254 break;
2255
2256 default:
2257 unreachable("invalid stage");
2258 }
2259
2260 // TODO: only enable when needed
2261 if (s->info.stage == MESA_SHADER_FRAGMENT) {
2262 spirv_builder_emit_cap(&ctx.builder, SpvCapabilitySampled1D);
2263 spirv_builder_emit_cap(&ctx.builder, SpvCapabilityImageQuery);
2264 spirv_builder_emit_cap(&ctx.builder, SpvCapabilityDerivativeControl);
2265 }
2266
2267 ctx.stage = s->info.stage;
2268 ctx.GLSL_std_450 = spirv_builder_import(&ctx.builder, "GLSL.std.450");
2269 spirv_builder_emit_source(&ctx.builder, SpvSourceLanguageGLSL, 450);
2270
2271 spirv_builder_emit_mem_model(&ctx.builder, SpvAddressingModelLogical,
2272 SpvMemoryModelGLSL450);
2273
2274 SpvExecutionModel exec_model;
2275 switch (s->info.stage) {
2276 case MESA_SHADER_VERTEX:
2277 exec_model = SpvExecutionModelVertex;
2278 break;
2279 case MESA_SHADER_TESS_CTRL:
2280 exec_model = SpvExecutionModelTessellationControl;
2281 break;
2282 case MESA_SHADER_TESS_EVAL:
2283 exec_model = SpvExecutionModelTessellationEvaluation;
2284 break;
2285 case MESA_SHADER_GEOMETRY:
2286 exec_model = SpvExecutionModelGeometry;
2287 break;
2288 case MESA_SHADER_FRAGMENT:
2289 exec_model = SpvExecutionModelFragment;
2290 break;
2291 case MESA_SHADER_COMPUTE:
2292 exec_model = SpvExecutionModelGLCompute;
2293 break;
2294 default:
2295 unreachable("invalid stage");
2296 }
2297
2298 SpvId type_void = spirv_builder_type_void(&ctx.builder);
2299 SpvId type_main = spirv_builder_type_function(&ctx.builder, type_void,
2300 NULL, 0);
2301 SpvId entry_point = spirv_builder_new_id(&ctx.builder);
2302 spirv_builder_emit_name(&ctx.builder, entry_point, "main");
2303
2304 ctx.vars = _mesa_hash_table_create(ctx.mem_ctx, _mesa_hash_pointer,
2305 _mesa_key_pointer_equal);
2306
2307 ctx.so_outputs = _mesa_hash_table_create(ctx.mem_ctx, _mesa_hash_u32,
2308 _mesa_key_u32_equal);
2309
2310 nir_foreach_shader_in_variable(var, s)
2311 emit_input(&ctx, var);
2312
2313 nir_foreach_shader_out_variable(var, s)
2314 emit_output(&ctx, var);
2315
2316 if (so_info)
2317 emit_so_info(&ctx, util_last_bit64(s->info.outputs_written), so_info);
2318 nir_foreach_variable_with_modes(var, s, nir_var_uniform |
2319 nir_var_mem_ubo |
2320 nir_var_mem_ssbo)
2321 emit_uniform(&ctx, var);
2322
2323 if (s->info.stage == MESA_SHADER_FRAGMENT) {
2324 spirv_builder_emit_exec_mode(&ctx.builder, entry_point,
2325 SpvExecutionModeOriginUpperLeft);
2326 if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
2327 spirv_builder_emit_exec_mode(&ctx.builder, entry_point,
2328 SpvExecutionModeDepthReplacing);
2329 }
2330
2331 if (so_info && so_info->so_info.num_outputs) {
2332 spirv_builder_emit_cap(&ctx.builder, SpvCapabilityTransformFeedback);
2333 spirv_builder_emit_exec_mode(&ctx.builder, entry_point,
2334 SpvExecutionModeXfb);
2335 }
2336
2337 spirv_builder_function(&ctx.builder, entry_point, type_void,
2338 SpvFunctionControlMaskNone,
2339 type_main);
2340
2341 nir_function_impl *entry = nir_shader_get_entrypoint(s);
2342 nir_metadata_require(entry, nir_metadata_block_index);
2343
2344 ctx.defs = ralloc_array_size(ctx.mem_ctx,
2345 sizeof(SpvId), entry->ssa_alloc);
2346 if (!ctx.defs)
2347 goto fail;
2348 ctx.num_defs = entry->ssa_alloc;
2349
2350 nir_index_local_regs(entry);
2351 ctx.regs = ralloc_array_size(ctx.mem_ctx,
2352 sizeof(SpvId), entry->reg_alloc);
2353 if (!ctx.regs)
2354 goto fail;
2355 ctx.num_regs = entry->reg_alloc;
2356
2357 SpvId *block_ids = ralloc_array_size(ctx.mem_ctx,
2358 sizeof(SpvId), entry->num_blocks);
2359 if (!block_ids)
2360 goto fail;
2361
2362 for (int i = 0; i < entry->num_blocks; ++i)
2363 block_ids[i] = spirv_builder_new_id(&ctx.builder);
2364
2365 ctx.block_ids = block_ids;
2366 ctx.num_blocks = entry->num_blocks;
2367
2368 /* emit a block only for the variable declarations */
2369 start_block(&ctx, spirv_builder_new_id(&ctx.builder));
2370 foreach_list_typed(nir_register, reg, node, &entry->registers) {
2371 SpvId type = get_vec_from_bit_size(&ctx, reg->bit_size, reg->num_components);
2372 SpvId pointer_type = spirv_builder_type_pointer(&ctx.builder,
2373 SpvStorageClassFunction,
2374 type);
2375 SpvId var = spirv_builder_emit_var(&ctx.builder, pointer_type,
2376 SpvStorageClassFunction);
2377
2378 ctx.regs[reg->index] = var;
2379 }
2380
2381 emit_cf_list(&ctx, &entry->body);
2382
2383 if (so_info)
2384 emit_so_outputs(&ctx, so_info);
2385
2386 spirv_builder_return(&ctx.builder); // doesn't belong here, but whatevz
2387 spirv_builder_function_end(&ctx.builder);
2388
2389 spirv_builder_emit_entry_point(&ctx.builder, exec_model, entry_point,
2390 "main", ctx.entry_ifaces,
2391 ctx.num_entry_ifaces);
2392
2393 size_t num_words = spirv_builder_get_num_words(&ctx.builder);
2394
2395 ret = CALLOC_STRUCT(spirv_shader);
2396 if (!ret)
2397 goto fail;
2398
2399 ret->words = MALLOC(sizeof(uint32_t) * num_words);
2400 if (!ret->words)
2401 goto fail;
2402
2403 ret->num_words = spirv_builder_get_words(&ctx.builder, ret->words, num_words);
2404 assert(ret->num_words == num_words);
2405
2406 ralloc_free(ctx.mem_ctx);
2407
2408 return ret;
2409
2410 fail:
2411 ralloc_free(ctx.mem_ctx);
2412
2413 if (ret)
2414 spirv_shader_delete(ret);
2415
2416 return NULL;
2417 }
2418
2419 void
2420 spirv_shader_delete(struct spirv_shader *s)
2421 {
2422 FREE(s->words);
2423 FREE(s);
2424 }