turnip: Gather information for transform feedback
[mesa.git] / src / freedreno / vulkan / tu_shader.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28 #include "nir/nir_xfb_info.h"
29
30 #include "ir3/ir3_nir.h"
31
32 static nir_shader *
33 tu_spirv_to_nir(struct ir3_compiler *compiler,
34 const uint32_t *words,
35 size_t word_count,
36 gl_shader_stage stage,
37 const char *entry_point_name,
38 const VkSpecializationInfo *spec_info)
39 {
40 /* TODO these are made-up */
41 const struct spirv_to_nir_options spirv_options = {
42 .frag_coord_is_sysval = true,
43 .lower_ubo_ssbo_access_to_offsets = true,
44 .caps = {
45 .transform_feedback = compiler->gpu_id >= 600,
46 },
47 };
48 const nir_shader_compiler_options *nir_options =
49 ir3_get_compiler_options(compiler);
50
51 /* convert VkSpecializationInfo */
52 struct nir_spirv_specialization *spec = NULL;
53 uint32_t num_spec = 0;
54 if (spec_info && spec_info->mapEntryCount) {
55 spec = malloc(sizeof(*spec) * spec_info->mapEntryCount);
56 if (!spec)
57 return NULL;
58
59 for (uint32_t i = 0; i < spec_info->mapEntryCount; i++) {
60 const VkSpecializationMapEntry *entry = &spec_info->pMapEntries[i];
61 const void *data = spec_info->pData + entry->offset;
62 assert(data + entry->size <= spec_info->pData + spec_info->dataSize);
63 spec[i].id = entry->constantID;
64 if (entry->size == 8)
65 spec[i].data64 = *(const uint64_t *) data;
66 else
67 spec[i].data32 = *(const uint32_t *) data;
68 spec[i].defined_on_module = false;
69 }
70
71 num_spec = spec_info->mapEntryCount;
72 }
73
74 nir_shader *nir =
75 spirv_to_nir(words, word_count, spec, num_spec, stage, entry_point_name,
76 &spirv_options, nir_options);
77
78 free(spec);
79
80 assert(nir->info.stage == stage);
81 nir_validate_shader(nir, "after spirv_to_nir");
82
83 return nir;
84 }
85
86 static unsigned
87 map_add(struct tu_descriptor_map *map, int set, int binding, int value,
88 int array_size)
89 {
90 unsigned index = 0;
91 for (unsigned i = 0; i < map->num; i++) {
92 if (set == map->set[i] && binding == map->binding[i]) {
93 assert(value == map->value[i]);
94 assert(array_size == map->array_size[i]);
95 return index;
96 }
97 index += map->array_size[i];
98 }
99
100 assert(index == map->num_desc);
101
102 map->set[map->num] = set;
103 map->binding[map->num] = binding;
104 map->value[map->num] = value;
105 map->array_size[map->num] = array_size;
106 map->num++;
107 map->num_desc += array_size;
108
109 return index;
110 }
111
112 static void
113 lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
114 struct tu_shader *shader,
115 const struct tu_pipeline_layout *layout)
116 {
117 nir_ssa_def *index = NULL;
118 unsigned base_index = 0;
119 unsigned array_elements = 1;
120 nir_tex_src *src = &instr->src[src_idx];
121 bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
122
123 /* We compute first the offsets */
124 nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
125 while (deref->deref_type != nir_deref_type_var) {
126 assert(deref->parent.is_ssa);
127 nir_deref_instr *parent =
128 nir_instr_as_deref(deref->parent.ssa->parent_instr);
129
130 assert(deref->deref_type == nir_deref_type_array);
131
132 if (nir_src_is_const(deref->arr.index) && index == NULL) {
133 /* We're still building a direct index */
134 base_index += nir_src_as_uint(deref->arr.index) * array_elements;
135 } else {
136 if (index == NULL) {
137 /* We used to be direct but not anymore */
138 index = nir_imm_int(b, base_index);
139 base_index = 0;
140 }
141
142 index = nir_iadd(b, index,
143 nir_imul(b, nir_imm_int(b, array_elements),
144 nir_ssa_for_src(b, deref->arr.index, 1)));
145 }
146
147 array_elements *= glsl_get_length(parent->type);
148
149 deref = parent;
150 }
151
152 if (index)
153 index = nir_umin(b, index, nir_imm_int(b, array_elements - 1));
154
155 /* We have the offsets, we apply them, rewriting the source or removing
156 * instr if needed
157 */
158 if (index) {
159 nir_instr_rewrite_src(&instr->instr, &src->src,
160 nir_src_for_ssa(index));
161
162 src->src_type = is_sampler ?
163 nir_tex_src_sampler_offset :
164 nir_tex_src_texture_offset;
165 } else {
166 nir_tex_instr_remove_src(instr, src_idx);
167 }
168
169 uint32_t set = deref->var->data.descriptor_set;
170 uint32_t binding = deref->var->data.binding;
171 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
172 struct tu_descriptor_set_binding_layout *binding_layout =
173 &set_layout->binding[binding];
174
175 int desc_index = map_add(is_sampler ?
176 &shader->sampler_map : &shader->texture_map,
177 deref->var->data.descriptor_set,
178 deref->var->data.binding,
179 deref->var->data.index,
180 binding_layout->array_size) + base_index;
181 if (is_sampler)
182 instr->sampler_index = desc_index;
183 else
184 instr->texture_index = desc_index;
185 }
186
187 static bool
188 lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader,
189 const struct tu_pipeline_layout *layout)
190 {
191 int texture_idx =
192 nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
193
194 if (texture_idx >= 0)
195 lower_tex_src_to_offset(b, instr, texture_idx, shader, layout);
196
197 int sampler_idx =
198 nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
199
200 if (sampler_idx >= 0)
201 lower_tex_src_to_offset(b, instr, sampler_idx, shader, layout);
202
203 if (texture_idx < 0 && sampler_idx < 0)
204 return false;
205
206 return true;
207 }
208
209 static void
210 lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
211 struct tu_shader *shader)
212 {
213 /* note: ir3 wants load_ubo, not load_uniform */
214 assert(nir_intrinsic_base(instr) == 0);
215
216 nir_intrinsic_instr *load =
217 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
218 load->num_components = instr->num_components;
219 load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
220 load->src[1] = instr->src[0];
221 nir_ssa_dest_init(&load->instr, &load->dest,
222 load->num_components, instr->dest.ssa.bit_size,
223 instr->dest.ssa.name);
224 nir_builder_instr_insert(b, &load->instr);
225 nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
226
227 nir_instr_remove(&instr->instr);
228 }
229
230 static void
231 lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
232 struct tu_shader *shader,
233 const struct tu_pipeline_layout *layout)
234 {
235 nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
236
237 unsigned set = nir_intrinsic_desc_set(instr);
238 unsigned binding = nir_intrinsic_binding(instr);
239 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
240 struct tu_descriptor_set_binding_layout *binding_layout =
241 &set_layout->binding[binding];
242 unsigned index = 0;
243
244 switch (nir_intrinsic_desc_type(instr)) {
245 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
246 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
247 if (!const_val)
248 tu_finishme("non-constant vulkan_resource_index array index");
249 /* skip index 0 which is used for push constants */
250 index = map_add(&shader->ubo_map, set, binding, 0,
251 binding_layout->array_size) + 1;
252 index += const_val->u32;
253 break;
254 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
255 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
256 if (!const_val)
257 tu_finishme("non-constant vulkan_resource_index array index");
258 index = map_add(&shader->ssbo_map, set, binding, 0,
259 binding_layout->array_size);
260 index += const_val->u32;
261 break;
262 default:
263 tu_finishme("unsupported desc_type for vulkan_resource_index");
264 break;
265 }
266
267 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
268 nir_src_for_ssa(nir_imm_int(b, index)));
269 nir_instr_remove(&instr->instr);
270 }
271
272 static void
273 lower_image_deref(nir_builder *b,
274 nir_intrinsic_instr *instr, struct tu_shader *shader,
275 const struct tu_pipeline_layout *layout)
276 {
277 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
278 nir_variable *var = nir_deref_instr_get_variable(deref);
279
280 uint32_t set = var->data.descriptor_set;
281 uint32_t binding = var->data.binding;
282 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
283 struct tu_descriptor_set_binding_layout *binding_layout =
284 &set_layout->binding[binding];
285
286 nir_ssa_def *index = nir_imm_int(b,
287 map_add(&shader->image_map,
288 set, binding, var->data.index,
289 binding_layout->array_size));
290 if (deref->deref_type != nir_deref_type_var) {
291 assert(deref->deref_type == nir_deref_type_array);
292 index = nir_iadd(b, index, nir_ssa_for_src(b, deref->arr.index, 1));
293 }
294 nir_rewrite_image_intrinsic(instr, index, false);
295 }
296
297 static bool
298 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
299 struct tu_shader *shader,
300 const struct tu_pipeline_layout *layout)
301 {
302 switch (instr->intrinsic) {
303 case nir_intrinsic_load_layer_id:
304 /* TODO: remove this when layered rendering is implemented */
305 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
306 nir_src_for_ssa(nir_imm_int(b, 0)));
307 nir_instr_remove(&instr->instr);
308 return true;
309
310 case nir_intrinsic_load_push_constant:
311 lower_load_push_constant(b, instr, shader);
312 return true;
313
314 case nir_intrinsic_vulkan_resource_index:
315 lower_vulkan_resource_index(b, instr, shader, layout);
316 return true;
317
318 case nir_intrinsic_image_deref_load:
319 case nir_intrinsic_image_deref_store:
320 case nir_intrinsic_image_deref_atomic_add:
321 case nir_intrinsic_image_deref_atomic_imin:
322 case nir_intrinsic_image_deref_atomic_umin:
323 case nir_intrinsic_image_deref_atomic_imax:
324 case nir_intrinsic_image_deref_atomic_umax:
325 case nir_intrinsic_image_deref_atomic_and:
326 case nir_intrinsic_image_deref_atomic_or:
327 case nir_intrinsic_image_deref_atomic_xor:
328 case nir_intrinsic_image_deref_atomic_exchange:
329 case nir_intrinsic_image_deref_atomic_comp_swap:
330 case nir_intrinsic_image_deref_size:
331 case nir_intrinsic_image_deref_samples:
332 case nir_intrinsic_image_deref_load_param_intel:
333 case nir_intrinsic_image_deref_load_raw_intel:
334 case nir_intrinsic_image_deref_store_raw_intel:
335 lower_image_deref(b, instr, shader, layout);
336 return true;
337
338 default:
339 return false;
340 }
341 }
342
343 static bool
344 lower_impl(nir_function_impl *impl, struct tu_shader *shader,
345 const struct tu_pipeline_layout *layout)
346 {
347 nir_builder b;
348 nir_builder_init(&b, impl);
349 bool progress = false;
350
351 nir_foreach_block(block, impl) {
352 nir_foreach_instr_safe(instr, block) {
353 b.cursor = nir_before_instr(instr);
354 switch (instr->type) {
355 case nir_instr_type_tex:
356 progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader, layout);
357 break;
358 case nir_instr_type_intrinsic:
359 progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
360 break;
361 default:
362 break;
363 }
364 }
365 }
366
367 return progress;
368 }
369
370 static bool
371 tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
372 const struct tu_pipeline_layout *layout)
373 {
374 bool progress = false;
375
376 nir_foreach_function(function, shader) {
377 if (function->impl)
378 progress |= lower_impl(function->impl, tu_shader, layout);
379 }
380
381 /* spirv_to_nir produces num_ssbos equal to the number of SSBO-containing
382 * variables, while ir3 wants the number of descriptors (like the gallium
383 * path).
384 */
385 shader->info.num_ssbos = tu_shader->ssbo_map.num_desc;
386
387 return progress;
388 }
389
390 static void
391 tu_gather_xfb_info(nir_shader *nir, struct tu_shader *shader)
392 {
393 struct ir3_stream_output_info *info = &shader->ir3_shader.stream_output;
394 nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
395
396 if (!xfb)
397 return;
398
399 /* creating a map from VARYING_SLOT_* enums to consecutive index */
400 uint8_t num_outputs = 0;
401 uint64_t outputs_written = 0;
402 for (int i = 0; i < xfb->output_count; i++)
403 outputs_written |= BITFIELD64_BIT(xfb->outputs[i].location);
404
405 uint8_t output_map[VARYING_SLOT_TESS_MAX];
406 memset(output_map, 0, sizeof(output_map));
407
408 for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
409 if (outputs_written & BITFIELD64_BIT(attr))
410 output_map[attr] = num_outputs++;
411 }
412
413 assert(xfb->output_count < IR3_MAX_SO_OUTPUTS);
414 info->num_outputs = xfb->output_count;
415
416 for (int i = 0; i < IR3_MAX_SO_BUFFERS; i++)
417 info->stride[i] = xfb->buffers[i].stride / 4;
418
419 for (int i = 0; i < xfb->output_count; i++) {
420 info->output[i].register_index = output_map[xfb->outputs[i].location];
421 info->output[i].start_component = xfb->outputs[i].component_offset;
422 info->output[i].num_components =
423 util_bitcount(xfb->outputs[i].component_mask);
424 info->output[i].output_buffer = xfb->outputs[i].buffer;
425 info->output[i].dst_offset = xfb->outputs[i].offset;
426 info->output[i].stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
427 }
428
429 ralloc_free(xfb);
430 }
431
432 struct tu_shader *
433 tu_shader_create(struct tu_device *dev,
434 gl_shader_stage stage,
435 const VkPipelineShaderStageCreateInfo *stage_info,
436 struct tu_pipeline_layout *layout,
437 const VkAllocationCallbacks *alloc)
438 {
439 const struct tu_shader_module *module =
440 tu_shader_module_from_handle(stage_info->module);
441 struct tu_shader *shader;
442
443 const uint32_t max_variant_count = (stage == MESA_SHADER_VERTEX) ? 2 : 1;
444 shader = vk_zalloc2(
445 &dev->alloc, alloc,
446 sizeof(*shader) + sizeof(struct ir3_shader_variant) * max_variant_count,
447 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
448 if (!shader)
449 return NULL;
450
451 /* translate SPIR-V to NIR */
452 assert(module->code_size % 4 == 0);
453 nir_shader *nir = tu_spirv_to_nir(
454 dev->compiler, (const uint32_t *) module->code, module->code_size / 4,
455 stage, stage_info->pName, stage_info->pSpecializationInfo);
456 if (!nir) {
457 vk_free2(&dev->alloc, alloc, shader);
458 return NULL;
459 }
460
461 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_NIR)) {
462 fprintf(stderr, "translated nir:\n");
463 nir_print_shader(nir, stderr);
464 }
465
466 /* multi step inlining procedure */
467 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
468 NIR_PASS_V(nir, nir_lower_returns);
469 NIR_PASS_V(nir, nir_inline_functions);
470 NIR_PASS_V(nir, nir_opt_deref);
471 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
472 if (!func->is_entrypoint)
473 exec_node_remove(&func->node);
474 }
475 assert(exec_list_length(&nir->functions) == 1);
476 NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
477
478 /* Split member structs. We do this before lower_io_to_temporaries so that
479 * it doesn't lower system values to temporaries by accident.
480 */
481 NIR_PASS_V(nir, nir_split_var_copies);
482 NIR_PASS_V(nir, nir_split_per_member_structs);
483
484 /* Gather information for transform feedback.
485 * This should be called after nir_split_per_member_structs.
486 */
487 if (nir->info.stage == MESA_SHADER_VERTEX ||
488 nir->info.stage == MESA_SHADER_TESS_EVAL ||
489 nir->info.stage == MESA_SHADER_GEOMETRY)
490 tu_gather_xfb_info(nir, shader);
491
492 NIR_PASS_V(nir, nir_remove_dead_variables,
493 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
494
495 NIR_PASS_V(nir, nir_propagate_invariant);
496
497 NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
498
499 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
500 NIR_PASS_V(nir, nir_split_var_copies);
501 NIR_PASS_V(nir, nir_lower_var_copies);
502
503 NIR_PASS_V(nir, nir_opt_copy_prop_vars);
504 NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
505
506 /* ir3 doesn't support indirect input/output */
507 NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
508
509 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
510
511 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
512 nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
513
514 NIR_PASS_V(nir, nir_lower_system_values);
515 NIR_PASS_V(nir, nir_lower_frexp);
516
517 if (stage == MESA_SHADER_FRAGMENT)
518 NIR_PASS_V(nir, nir_lower_input_attachments, true);
519
520 NIR_PASS_V(nir, tu_lower_io, shader, layout);
521
522 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
523
524 if (stage == MESA_SHADER_FRAGMENT) {
525 /* NOTE: lower load_barycentric_at_sample first, since it
526 * produces load_barycentric_at_offset:
527 */
528 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
529 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
530
531 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
532 }
533
534 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
535
536 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
537 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
538
539 shader->ir3_shader.compiler = dev->compiler;
540 shader->ir3_shader.type = stage;
541 shader->ir3_shader.nir = nir;
542
543 return shader;
544 }
545
546 void
547 tu_shader_destroy(struct tu_device *dev,
548 struct tu_shader *shader,
549 const VkAllocationCallbacks *alloc)
550 {
551 if (shader->ir3_shader.nir)
552 ralloc_free(shader->ir3_shader.nir);
553
554 for (uint32_t i = 0; i < 1 + shader->has_binning_pass; i++) {
555 if (shader->variants[i].ir)
556 ir3_destroy(shader->variants[i].ir);
557 }
558
559 if (shader->ir3_shader.const_state.immediates)
560 free(shader->ir3_shader.const_state.immediates);
561 if (shader->binary)
562 free(shader->binary);
563 if (shader->binning_binary)
564 free(shader->binning_binary);
565
566 vk_free2(&dev->alloc, alloc, shader);
567 }
568
569 void
570 tu_shader_compile_options_init(
571 struct tu_shader_compile_options *options,
572 const VkGraphicsPipelineCreateInfo *pipeline_info)
573 {
574 *options = (struct tu_shader_compile_options) {
575 /* TODO ir3_key */
576
577 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
578 * some optimizations need to happen otherwise shader might not compile
579 */
580 .optimize = true,
581 .include_binning_pass = true,
582 };
583 }
584
585 static uint32_t *
586 tu_compile_shader_variant(struct ir3_shader *shader,
587 const struct ir3_shader_key *key,
588 struct ir3_shader_variant *nonbinning,
589 struct ir3_shader_variant *variant)
590 {
591 variant->shader = shader;
592 variant->type = shader->type;
593 variant->key = *key;
594 variant->binning_pass = !!nonbinning;
595 variant->nonbinning = nonbinning;
596
597 int ret = ir3_compile_shader_nir(shader->compiler, variant);
598 if (ret)
599 return NULL;
600
601 /* when assemble fails, we rely on tu_shader_destroy to clean up the
602 * variant
603 */
604 return ir3_shader_assemble(variant, shader->compiler->gpu_id);
605 }
606
607 VkResult
608 tu_shader_compile(struct tu_device *dev,
609 struct tu_shader *shader,
610 const struct tu_shader *next_stage,
611 const struct tu_shader_compile_options *options,
612 const VkAllocationCallbacks *alloc)
613 {
614 if (options->optimize) {
615 /* ignore the key for the first pass of optimization */
616 ir3_optimize_nir(&shader->ir3_shader, shader->ir3_shader.nir, NULL);
617
618 if (unlikely(dev->physical_device->instance->debug_flags &
619 TU_DEBUG_NIR)) {
620 fprintf(stderr, "optimized nir:\n");
621 nir_print_shader(shader->ir3_shader.nir, stderr);
622 }
623 }
624
625 shader->binary = tu_compile_shader_variant(
626 &shader->ir3_shader, &options->key, NULL, &shader->variants[0]);
627 if (!shader->binary)
628 return VK_ERROR_OUT_OF_HOST_MEMORY;
629
630 /* compile another variant for the binning pass */
631 if (options->include_binning_pass &&
632 shader->ir3_shader.type == MESA_SHADER_VERTEX) {
633 shader->binning_binary = tu_compile_shader_variant(
634 &shader->ir3_shader, &options->key, &shader->variants[0],
635 &shader->variants[1]);
636 if (!shader->binning_binary)
637 return VK_ERROR_OUT_OF_HOST_MEMORY;
638
639 shader->has_binning_pass = true;
640 }
641
642 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_IR3)) {
643 fprintf(stderr, "disassembled ir3:\n");
644 fprintf(stderr, "shader: %s\n",
645 gl_shader_stage_name(shader->ir3_shader.type));
646 ir3_shader_disasm(&shader->variants[0], shader->binary, stderr);
647
648 if (shader->has_binning_pass) {
649 fprintf(stderr, "disassembled ir3:\n");
650 fprintf(stderr, "shader: %s (binning)\n",
651 gl_shader_stage_name(shader->ir3_shader.type));
652 ir3_shader_disasm(&shader->variants[1], shader->binning_binary,
653 stderr);
654 }
655 }
656
657 return VK_SUCCESS;
658 }
659
660 VkResult
661 tu_CreateShaderModule(VkDevice _device,
662 const VkShaderModuleCreateInfo *pCreateInfo,
663 const VkAllocationCallbacks *pAllocator,
664 VkShaderModule *pShaderModule)
665 {
666 TU_FROM_HANDLE(tu_device, device, _device);
667 struct tu_shader_module *module;
668
669 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
670 assert(pCreateInfo->flags == 0);
671 assert(pCreateInfo->codeSize % 4 == 0);
672
673 module = vk_alloc2(&device->alloc, pAllocator,
674 sizeof(*module) + pCreateInfo->codeSize, 8,
675 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
676 if (module == NULL)
677 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
678
679 module->code_size = pCreateInfo->codeSize;
680 memcpy(module->code, pCreateInfo->pCode, pCreateInfo->codeSize);
681
682 _mesa_sha1_compute(module->code, module->code_size, module->sha1);
683
684 *pShaderModule = tu_shader_module_to_handle(module);
685
686 return VK_SUCCESS;
687 }
688
689 void
690 tu_DestroyShaderModule(VkDevice _device,
691 VkShaderModule _module,
692 const VkAllocationCallbacks *pAllocator)
693 {
694 TU_FROM_HANDLE(tu_device, device, _device);
695 TU_FROM_HANDLE(tu_shader_module, module, _module);
696
697 if (!module)
698 return;
699
700 vk_free2(&device->alloc, pAllocator, module);
701 }