turnip: Refactor the intrinsic lowering.
[mesa.git] / src / freedreno / vulkan / tu_shader.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28
29 #include "ir3/ir3_nir.h"
30
31 static nir_shader *
32 tu_spirv_to_nir(struct ir3_compiler *compiler,
33 const uint32_t *words,
34 size_t word_count,
35 gl_shader_stage stage,
36 const char *entry_point_name,
37 const VkSpecializationInfo *spec_info)
38 {
39 /* TODO these are made-up */
40 const struct spirv_to_nir_options spirv_options = {
41 .frag_coord_is_sysval = true,
42 .lower_ubo_ssbo_access_to_offsets = true,
43 .caps = { false },
44 };
45 const nir_shader_compiler_options *nir_options =
46 ir3_get_compiler_options(compiler);
47
48 /* convert VkSpecializationInfo */
49 struct nir_spirv_specialization *spec = NULL;
50 uint32_t num_spec = 0;
51 if (spec_info && spec_info->mapEntryCount) {
52 spec = malloc(sizeof(*spec) * spec_info->mapEntryCount);
53 if (!spec)
54 return NULL;
55
56 for (uint32_t i = 0; i < spec_info->mapEntryCount; i++) {
57 const VkSpecializationMapEntry *entry = &spec_info->pMapEntries[i];
58 const void *data = spec_info->pData + entry->offset;
59 assert(data + entry->size <= spec_info->pData + spec_info->dataSize);
60 spec[i].id = entry->constantID;
61 if (entry->size == 8)
62 spec[i].data64 = *(const uint64_t *) data;
63 else
64 spec[i].data32 = *(const uint32_t *) data;
65 spec[i].defined_on_module = false;
66 }
67
68 num_spec = spec_info->mapEntryCount;
69 }
70
71 nir_shader *nir =
72 spirv_to_nir(words, word_count, spec, num_spec, stage, entry_point_name,
73 &spirv_options, nir_options);
74
75 free(spec);
76
77 assert(nir->info.stage == stage);
78 nir_validate_shader(nir, "after spirv_to_nir");
79
80 return nir;
81 }
82
83 static void
84 tu_sort_variables_by_location(struct exec_list *variables)
85 {
86 struct exec_list sorted;
87 exec_list_make_empty(&sorted);
88
89 nir_foreach_variable_safe(var, variables)
90 {
91 exec_node_remove(&var->node);
92
93 /* insert the variable into the sorted list */
94 nir_variable *next = NULL;
95 nir_foreach_variable(tmp, &sorted)
96 {
97 if (var->data.location < tmp->data.location) {
98 next = tmp;
99 break;
100 }
101 }
102 if (next)
103 exec_node_insert_node_before(&next->node, &var->node);
104 else
105 exec_list_push_tail(&sorted, &var->node);
106 }
107
108 exec_list_move_nodes_to(&sorted, variables);
109 }
110
111 static unsigned
112 map_add(struct tu_descriptor_map *map, int set, int binding, int value,
113 int array_size)
114 {
115 unsigned index = 0;
116 for (unsigned i = 0; i < map->num; i++) {
117 if (set == map->set[i] && binding == map->binding[i]) {
118 assert(value == map->value[i]);
119 assert(array_size == map->array_size[i]);
120 return index;
121 }
122 index += map->array_size[i];
123 }
124
125 assert(index == map->num_desc);
126
127 map->set[map->num] = set;
128 map->binding[map->num] = binding;
129 map->value[map->num] = value;
130 map->array_size[map->num] = array_size;
131 map->num++;
132 map->num_desc += array_size;
133
134 return index;
135 }
136
137 static void
138 lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
139 struct tu_shader *shader,
140 const struct tu_pipeline_layout *layout)
141 {
142 nir_ssa_def *index = NULL;
143 unsigned base_index = 0;
144 unsigned array_elements = 1;
145 nir_tex_src *src = &instr->src[src_idx];
146 bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
147
148 /* We compute first the offsets */
149 nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
150 while (deref->deref_type != nir_deref_type_var) {
151 assert(deref->parent.is_ssa);
152 nir_deref_instr *parent =
153 nir_instr_as_deref(deref->parent.ssa->parent_instr);
154
155 assert(deref->deref_type == nir_deref_type_array);
156
157 if (nir_src_is_const(deref->arr.index) && index == NULL) {
158 /* We're still building a direct index */
159 base_index += nir_src_as_uint(deref->arr.index) * array_elements;
160 } else {
161 if (index == NULL) {
162 /* We used to be direct but not anymore */
163 index = nir_imm_int(b, base_index);
164 base_index = 0;
165 }
166
167 index = nir_iadd(b, index,
168 nir_imul(b, nir_imm_int(b, array_elements),
169 nir_ssa_for_src(b, deref->arr.index, 1)));
170 }
171
172 array_elements *= glsl_get_length(parent->type);
173
174 deref = parent;
175 }
176
177 if (index)
178 index = nir_umin(b, index, nir_imm_int(b, array_elements - 1));
179
180 /* We have the offsets, we apply them, rewriting the source or removing
181 * instr if needed
182 */
183 if (index) {
184 nir_instr_rewrite_src(&instr->instr, &src->src,
185 nir_src_for_ssa(index));
186
187 src->src_type = is_sampler ?
188 nir_tex_src_sampler_offset :
189 nir_tex_src_texture_offset;
190
191 instr->texture_array_size = array_elements;
192 } else {
193 nir_tex_instr_remove_src(instr, src_idx);
194 }
195
196 uint32_t set = deref->var->data.descriptor_set;
197 uint32_t binding = deref->var->data.binding;
198 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
199 struct tu_descriptor_set_binding_layout *binding_layout =
200 &set_layout->binding[binding];
201
202 int desc_index = map_add(is_sampler ?
203 &shader->sampler_map : &shader->texture_map,
204 deref->var->data.descriptor_set,
205 deref->var->data.binding,
206 deref->var->data.index,
207 binding_layout->array_size) + base_index;
208 if (is_sampler)
209 instr->sampler_index = desc_index;
210 else
211 instr->texture_index = desc_index;
212 }
213
214 static bool
215 lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader,
216 const struct tu_pipeline_layout *layout)
217 {
218 int texture_idx =
219 nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
220
221 if (texture_idx >= 0)
222 lower_tex_src_to_offset(b, instr, texture_idx, shader, layout);
223
224 int sampler_idx =
225 nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
226
227 if (sampler_idx >= 0)
228 lower_tex_src_to_offset(b, instr, sampler_idx, shader, layout);
229
230 if (texture_idx < 0 && sampler_idx < 0)
231 return false;
232
233 return true;
234 }
235
236 static void
237 lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
238 struct tu_shader *shader)
239 {
240 /* note: ir3 wants load_ubo, not load_uniform */
241 assert(nir_intrinsic_base(instr) == 0);
242
243 nir_intrinsic_instr *load =
244 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
245 load->num_components = instr->num_components;
246 load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
247 load->src[1] = instr->src[0];
248 nir_ssa_dest_init(&load->instr, &load->dest,
249 load->num_components, instr->dest.ssa.bit_size,
250 instr->dest.ssa.name);
251 nir_builder_instr_insert(b, &load->instr);
252 nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
253
254 nir_instr_remove(&instr->instr);
255 }
256
257 static void
258 lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
259 struct tu_shader *shader,
260 const struct tu_pipeline_layout *layout)
261 {
262 nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
263
264 unsigned set = nir_intrinsic_desc_set(instr);
265 unsigned binding = nir_intrinsic_binding(instr);
266 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
267 struct tu_descriptor_set_binding_layout *binding_layout =
268 &set_layout->binding[binding];
269 unsigned index = 0;
270
271 switch (nir_intrinsic_desc_type(instr)) {
272 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
273 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
274 if (!const_val || const_val->u32 != 0)
275 tu_finishme("non-zero vulkan_resource_index array index");
276 /* skip index 0 which is used for push constants */
277 index = map_add(&shader->ubo_map, set, binding, 0,
278 binding_layout->array_size) + 1;
279 break;
280 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
281 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
282 if (!const_val)
283 tu_finishme("non-constant vulkan_resource_index array index");
284 index = map_add(&shader->ssbo_map, set, binding, 0,
285 binding_layout->array_size);
286 index += const_val->u32;
287 break;
288 default:
289 tu_finishme("unsupported desc_type for vulkan_resource_index");
290 break;
291 }
292
293 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
294 nir_src_for_ssa(nir_imm_int(b, index)));
295 nir_instr_remove(&instr->instr);
296 }
297
298 static bool
299 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
300 struct tu_shader *shader,
301 const struct tu_pipeline_layout *layout)
302 {
303 switch (instr->intrinsic) {
304 case nir_intrinsic_load_layer_id:
305 /* TODO: remove this when layered rendering is implemented */
306 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
307 nir_src_for_ssa(nir_imm_int(b, 0)));
308 nir_instr_remove(&instr->instr);
309 return true;
310
311 case nir_intrinsic_load_push_constant:
312 lower_load_push_constant(b, instr, shader);
313 return true;
314
315 case nir_intrinsic_vulkan_resource_index:
316 lower_vulkan_resource_index(b, instr, shader, layout);
317 return true;
318
319 default:
320 return false;
321 }
322 }
323
324 static bool
325 lower_impl(nir_function_impl *impl, struct tu_shader *shader,
326 const struct tu_pipeline_layout *layout)
327 {
328 nir_builder b;
329 nir_builder_init(&b, impl);
330 bool progress = false;
331
332 nir_foreach_block(block, impl) {
333 nir_foreach_instr_safe(instr, block) {
334 b.cursor = nir_before_instr(instr);
335 switch (instr->type) {
336 case nir_instr_type_tex:
337 progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader, layout);
338 break;
339 case nir_instr_type_intrinsic:
340 progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
341 break;
342 default:
343 break;
344 }
345 }
346 }
347
348 return progress;
349 }
350
351 static bool
352 tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
353 const struct tu_pipeline_layout *layout)
354 {
355 bool progress = false;
356
357 nir_foreach_function(function, shader) {
358 if (function->impl)
359 progress |= lower_impl(function->impl, tu_shader, layout);
360 }
361
362 /* spirv_to_nir produces num_ssbos equal to the number of SSBO-containing
363 * variables, while ir3 wants the number of descriptors (like the gallium
364 * path).
365 */
366 shader->info.num_ssbos = tu_shader->ssbo_map.num_desc;
367
368 return progress;
369 }
370
371 struct tu_shader *
372 tu_shader_create(struct tu_device *dev,
373 gl_shader_stage stage,
374 const VkPipelineShaderStageCreateInfo *stage_info,
375 struct tu_pipeline_layout *layout,
376 const VkAllocationCallbacks *alloc)
377 {
378 const struct tu_shader_module *module =
379 tu_shader_module_from_handle(stage_info->module);
380 struct tu_shader *shader;
381
382 const uint32_t max_variant_count = (stage == MESA_SHADER_VERTEX) ? 2 : 1;
383 shader = vk_zalloc2(
384 &dev->alloc, alloc,
385 sizeof(*shader) + sizeof(struct ir3_shader_variant) * max_variant_count,
386 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
387 if (!shader)
388 return NULL;
389
390 /* translate SPIR-V to NIR */
391 assert(module->code_size % 4 == 0);
392 nir_shader *nir = tu_spirv_to_nir(
393 dev->compiler, (const uint32_t *) module->code, module->code_size / 4,
394 stage, stage_info->pName, stage_info->pSpecializationInfo);
395 if (!nir) {
396 vk_free2(&dev->alloc, alloc, shader);
397 return NULL;
398 }
399
400 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_NIR)) {
401 fprintf(stderr, "translated nir:\n");
402 nir_print_shader(nir, stderr);
403 }
404
405 /* multi step inlining procedure */
406 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
407 NIR_PASS_V(nir, nir_lower_returns);
408 NIR_PASS_V(nir, nir_inline_functions);
409 NIR_PASS_V(nir, nir_opt_deref);
410 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
411 if (!func->is_entrypoint)
412 exec_node_remove(&func->node);
413 }
414 assert(exec_list_length(&nir->functions) == 1);
415 NIR_PASS_V(nir, nir_lower_constant_initializers, ~nir_var_function_temp);
416
417 /* Split member structs. We do this before lower_io_to_temporaries so that
418 * it doesn't lower system values to temporaries by accident.
419 */
420 NIR_PASS_V(nir, nir_split_var_copies);
421 NIR_PASS_V(nir, nir_split_per_member_structs);
422
423 NIR_PASS_V(nir, nir_remove_dead_variables,
424 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
425
426 NIR_PASS_V(nir, nir_propagate_invariant);
427
428 NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
429
430 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
431 NIR_PASS_V(nir, nir_split_var_copies);
432 NIR_PASS_V(nir, nir_lower_var_copies);
433
434 NIR_PASS_V(nir, nir_opt_copy_prop_vars);
435 NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
436
437 /* ir3 doesn't support indirect input/output */
438 NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
439
440 switch (stage) {
441 case MESA_SHADER_VERTEX:
442 tu_sort_variables_by_location(&nir->outputs);
443 break;
444 case MESA_SHADER_TESS_CTRL:
445 case MESA_SHADER_TESS_EVAL:
446 case MESA_SHADER_GEOMETRY:
447 tu_sort_variables_by_location(&nir->inputs);
448 tu_sort_variables_by_location(&nir->outputs);
449 break;
450 case MESA_SHADER_FRAGMENT:
451 tu_sort_variables_by_location(&nir->inputs);
452 break;
453 case MESA_SHADER_COMPUTE:
454 break;
455 default:
456 unreachable("invalid gl_shader_stage");
457 break;
458 }
459
460 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
461 nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
462
463 NIR_PASS_V(nir, nir_lower_system_values);
464 NIR_PASS_V(nir, nir_lower_frexp);
465
466 if (stage == MESA_SHADER_FRAGMENT)
467 NIR_PASS_V(nir, nir_lower_input_attachments, true);
468
469 NIR_PASS_V(nir, tu_lower_io, shader, layout);
470
471 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
472
473 if (stage == MESA_SHADER_FRAGMENT) {
474 /* NOTE: lower load_barycentric_at_sample first, since it
475 * produces load_barycentric_at_offset:
476 */
477 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
478 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
479
480 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
481 }
482
483 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
484
485 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
486
487 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
488 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
489
490 shader->ir3_shader.compiler = dev->compiler;
491 shader->ir3_shader.type = stage;
492 shader->ir3_shader.nir = nir;
493
494 return shader;
495 }
496
497 void
498 tu_shader_destroy(struct tu_device *dev,
499 struct tu_shader *shader,
500 const VkAllocationCallbacks *alloc)
501 {
502 if (shader->ir3_shader.nir)
503 ralloc_free(shader->ir3_shader.nir);
504
505 for (uint32_t i = 0; i < 1 + shader->has_binning_pass; i++) {
506 if (shader->variants[i].ir)
507 ir3_destroy(shader->variants[i].ir);
508 }
509
510 if (shader->ir3_shader.const_state.immediates)
511 free(shader->ir3_shader.const_state.immediates);
512 if (shader->binary)
513 free(shader->binary);
514 if (shader->binning_binary)
515 free(shader->binning_binary);
516
517 vk_free2(&dev->alloc, alloc, shader);
518 }
519
520 void
521 tu_shader_compile_options_init(
522 struct tu_shader_compile_options *options,
523 const VkGraphicsPipelineCreateInfo *pipeline_info)
524 {
525 *options = (struct tu_shader_compile_options) {
526 /* TODO ir3_key */
527
528 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
529 * some optimizations need to happen otherwise shader might not compile
530 */
531 .optimize = true,
532 .include_binning_pass = true,
533 };
534 }
535
536 static uint32_t *
537 tu_compile_shader_variant(struct ir3_shader *shader,
538 const struct ir3_shader_key *key,
539 struct ir3_shader_variant *nonbinning,
540 struct ir3_shader_variant *variant)
541 {
542 variant->shader = shader;
543 variant->type = shader->type;
544 variant->key = *key;
545 variant->binning_pass = !!nonbinning;
546 variant->nonbinning = nonbinning;
547
548 int ret = ir3_compile_shader_nir(shader->compiler, variant);
549 if (ret)
550 return NULL;
551
552 /* when assemble fails, we rely on tu_shader_destroy to clean up the
553 * variant
554 */
555 return ir3_shader_assemble(variant, shader->compiler->gpu_id);
556 }
557
558 VkResult
559 tu_shader_compile(struct tu_device *dev,
560 struct tu_shader *shader,
561 const struct tu_shader *next_stage,
562 const struct tu_shader_compile_options *options,
563 const VkAllocationCallbacks *alloc)
564 {
565 if (options->optimize) {
566 /* ignore the key for the first pass of optimization */
567 ir3_optimize_nir(&shader->ir3_shader, shader->ir3_shader.nir, NULL);
568
569 if (unlikely(dev->physical_device->instance->debug_flags &
570 TU_DEBUG_NIR)) {
571 fprintf(stderr, "optimized nir:\n");
572 nir_print_shader(shader->ir3_shader.nir, stderr);
573 }
574 }
575
576 shader->binary = tu_compile_shader_variant(
577 &shader->ir3_shader, &options->key, NULL, &shader->variants[0]);
578 if (!shader->binary)
579 return VK_ERROR_OUT_OF_HOST_MEMORY;
580
581 /* compile another variant for the binning pass */
582 if (options->include_binning_pass &&
583 shader->ir3_shader.type == MESA_SHADER_VERTEX) {
584 shader->binning_binary = tu_compile_shader_variant(
585 &shader->ir3_shader, &options->key, &shader->variants[0],
586 &shader->variants[1]);
587 if (!shader->binning_binary)
588 return VK_ERROR_OUT_OF_HOST_MEMORY;
589
590 shader->has_binning_pass = true;
591 }
592
593 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_IR3)) {
594 fprintf(stderr, "disassembled ir3:\n");
595 fprintf(stderr, "shader: %s\n",
596 gl_shader_stage_name(shader->ir3_shader.type));
597 ir3_shader_disasm(&shader->variants[0], shader->binary, stderr);
598
599 if (shader->has_binning_pass) {
600 fprintf(stderr, "disassembled ir3:\n");
601 fprintf(stderr, "shader: %s (binning)\n",
602 gl_shader_stage_name(shader->ir3_shader.type));
603 ir3_shader_disasm(&shader->variants[1], shader->binning_binary,
604 stderr);
605 }
606 }
607
608 return VK_SUCCESS;
609 }
610
611 VkResult
612 tu_CreateShaderModule(VkDevice _device,
613 const VkShaderModuleCreateInfo *pCreateInfo,
614 const VkAllocationCallbacks *pAllocator,
615 VkShaderModule *pShaderModule)
616 {
617 TU_FROM_HANDLE(tu_device, device, _device);
618 struct tu_shader_module *module;
619
620 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
621 assert(pCreateInfo->flags == 0);
622 assert(pCreateInfo->codeSize % 4 == 0);
623
624 module = vk_alloc2(&device->alloc, pAllocator,
625 sizeof(*module) + pCreateInfo->codeSize, 8,
626 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
627 if (module == NULL)
628 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
629
630 module->code_size = pCreateInfo->codeSize;
631 memcpy(module->code, pCreateInfo->pCode, pCreateInfo->codeSize);
632
633 _mesa_sha1_compute(module->code, module->code_size, module->sha1);
634
635 *pShaderModule = tu_shader_module_to_handle(module);
636
637 return VK_SUCCESS;
638 }
639
640 void
641 tu_DestroyShaderModule(VkDevice _device,
642 VkShaderModule _module,
643 const VkAllocationCallbacks *pAllocator)
644 {
645 TU_FROM_HANDLE(tu_device, device, _device);
646 TU_FROM_HANDLE(tu_shader_module, module, _module);
647
648 if (!module)
649 return;
650
651 vk_free2(&device->alloc, pAllocator, module);
652 }