nir: Drop nir_tex_instr::texture_array_size
[mesa.git] / src / freedreno / vulkan / tu_shader.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28
29 #include "ir3/ir3_nir.h"
30
31 static nir_shader *
32 tu_spirv_to_nir(struct ir3_compiler *compiler,
33 const uint32_t *words,
34 size_t word_count,
35 gl_shader_stage stage,
36 const char *entry_point_name,
37 const VkSpecializationInfo *spec_info)
38 {
39 /* TODO these are made-up */
40 const struct spirv_to_nir_options spirv_options = {
41 .frag_coord_is_sysval = true,
42 .lower_ubo_ssbo_access_to_offsets = true,
43 .caps = { false },
44 };
45 const nir_shader_compiler_options *nir_options =
46 ir3_get_compiler_options(compiler);
47
48 /* convert VkSpecializationInfo */
49 struct nir_spirv_specialization *spec = NULL;
50 uint32_t num_spec = 0;
51 if (spec_info && spec_info->mapEntryCount) {
52 spec = malloc(sizeof(*spec) * spec_info->mapEntryCount);
53 if (!spec)
54 return NULL;
55
56 for (uint32_t i = 0; i < spec_info->mapEntryCount; i++) {
57 const VkSpecializationMapEntry *entry = &spec_info->pMapEntries[i];
58 const void *data = spec_info->pData + entry->offset;
59 assert(data + entry->size <= spec_info->pData + spec_info->dataSize);
60 spec[i].id = entry->constantID;
61 if (entry->size == 8)
62 spec[i].data64 = *(const uint64_t *) data;
63 else
64 spec[i].data32 = *(const uint32_t *) data;
65 spec[i].defined_on_module = false;
66 }
67
68 num_spec = spec_info->mapEntryCount;
69 }
70
71 nir_shader *nir =
72 spirv_to_nir(words, word_count, spec, num_spec, stage, entry_point_name,
73 &spirv_options, nir_options);
74
75 free(spec);
76
77 assert(nir->info.stage == stage);
78 nir_validate_shader(nir, "after spirv_to_nir");
79
80 return nir;
81 }
82
83 static unsigned
84 map_add(struct tu_descriptor_map *map, int set, int binding, int value,
85 int array_size)
86 {
87 unsigned index = 0;
88 for (unsigned i = 0; i < map->num; i++) {
89 if (set == map->set[i] && binding == map->binding[i]) {
90 assert(value == map->value[i]);
91 assert(array_size == map->array_size[i]);
92 return index;
93 }
94 index += map->array_size[i];
95 }
96
97 assert(index == map->num_desc);
98
99 map->set[map->num] = set;
100 map->binding[map->num] = binding;
101 map->value[map->num] = value;
102 map->array_size[map->num] = array_size;
103 map->num++;
104 map->num_desc += array_size;
105
106 return index;
107 }
108
109 static void
110 lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
111 struct tu_shader *shader,
112 const struct tu_pipeline_layout *layout)
113 {
114 nir_ssa_def *index = NULL;
115 unsigned base_index = 0;
116 unsigned array_elements = 1;
117 nir_tex_src *src = &instr->src[src_idx];
118 bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
119
120 /* We compute first the offsets */
121 nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
122 while (deref->deref_type != nir_deref_type_var) {
123 assert(deref->parent.is_ssa);
124 nir_deref_instr *parent =
125 nir_instr_as_deref(deref->parent.ssa->parent_instr);
126
127 assert(deref->deref_type == nir_deref_type_array);
128
129 if (nir_src_is_const(deref->arr.index) && index == NULL) {
130 /* We're still building a direct index */
131 base_index += nir_src_as_uint(deref->arr.index) * array_elements;
132 } else {
133 if (index == NULL) {
134 /* We used to be direct but not anymore */
135 index = nir_imm_int(b, base_index);
136 base_index = 0;
137 }
138
139 index = nir_iadd(b, index,
140 nir_imul(b, nir_imm_int(b, array_elements),
141 nir_ssa_for_src(b, deref->arr.index, 1)));
142 }
143
144 array_elements *= glsl_get_length(parent->type);
145
146 deref = parent;
147 }
148
149 if (index)
150 index = nir_umin(b, index, nir_imm_int(b, array_elements - 1));
151
152 /* We have the offsets, we apply them, rewriting the source or removing
153 * instr if needed
154 */
155 if (index) {
156 nir_instr_rewrite_src(&instr->instr, &src->src,
157 nir_src_for_ssa(index));
158
159 src->src_type = is_sampler ?
160 nir_tex_src_sampler_offset :
161 nir_tex_src_texture_offset;
162 } else {
163 nir_tex_instr_remove_src(instr, src_idx);
164 }
165
166 uint32_t set = deref->var->data.descriptor_set;
167 uint32_t binding = deref->var->data.binding;
168 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
169 struct tu_descriptor_set_binding_layout *binding_layout =
170 &set_layout->binding[binding];
171
172 int desc_index = map_add(is_sampler ?
173 &shader->sampler_map : &shader->texture_map,
174 deref->var->data.descriptor_set,
175 deref->var->data.binding,
176 deref->var->data.index,
177 binding_layout->array_size) + base_index;
178 if (is_sampler)
179 instr->sampler_index = desc_index;
180 else
181 instr->texture_index = desc_index;
182 }
183
184 static bool
185 lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader,
186 const struct tu_pipeline_layout *layout)
187 {
188 int texture_idx =
189 nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
190
191 if (texture_idx >= 0)
192 lower_tex_src_to_offset(b, instr, texture_idx, shader, layout);
193
194 int sampler_idx =
195 nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
196
197 if (sampler_idx >= 0)
198 lower_tex_src_to_offset(b, instr, sampler_idx, shader, layout);
199
200 if (texture_idx < 0 && sampler_idx < 0)
201 return false;
202
203 return true;
204 }
205
206 static void
207 lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
208 struct tu_shader *shader)
209 {
210 /* note: ir3 wants load_ubo, not load_uniform */
211 assert(nir_intrinsic_base(instr) == 0);
212
213 nir_intrinsic_instr *load =
214 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
215 load->num_components = instr->num_components;
216 load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
217 load->src[1] = instr->src[0];
218 nir_ssa_dest_init(&load->instr, &load->dest,
219 load->num_components, instr->dest.ssa.bit_size,
220 instr->dest.ssa.name);
221 nir_builder_instr_insert(b, &load->instr);
222 nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
223
224 nir_instr_remove(&instr->instr);
225 }
226
227 static void
228 lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
229 struct tu_shader *shader,
230 const struct tu_pipeline_layout *layout)
231 {
232 nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
233
234 unsigned set = nir_intrinsic_desc_set(instr);
235 unsigned binding = nir_intrinsic_binding(instr);
236 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
237 struct tu_descriptor_set_binding_layout *binding_layout =
238 &set_layout->binding[binding];
239 unsigned index = 0;
240
241 switch (nir_intrinsic_desc_type(instr)) {
242 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
243 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
244 if (!const_val)
245 tu_finishme("non-constant vulkan_resource_index array index");
246 /* skip index 0 which is used for push constants */
247 index = map_add(&shader->ubo_map, set, binding, 0,
248 binding_layout->array_size) + 1;
249 index += const_val->u32;
250 break;
251 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
252 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
253 if (!const_val)
254 tu_finishme("non-constant vulkan_resource_index array index");
255 index = map_add(&shader->ssbo_map, set, binding, 0,
256 binding_layout->array_size);
257 index += const_val->u32;
258 break;
259 default:
260 tu_finishme("unsupported desc_type for vulkan_resource_index");
261 break;
262 }
263
264 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
265 nir_src_for_ssa(nir_imm_int(b, index)));
266 nir_instr_remove(&instr->instr);
267 }
268
269 static void
270 lower_image_deref(nir_builder *b,
271 nir_intrinsic_instr *instr, struct tu_shader *shader,
272 const struct tu_pipeline_layout *layout)
273 {
274 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
275 nir_variable *var = nir_deref_instr_get_variable(deref);
276
277 uint32_t set = var->data.descriptor_set;
278 uint32_t binding = var->data.binding;
279 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
280 struct tu_descriptor_set_binding_layout *binding_layout =
281 &set_layout->binding[binding];
282
283 nir_ssa_def *index = nir_imm_int(b,
284 map_add(&shader->image_map,
285 set, binding, var->data.index,
286 binding_layout->array_size));
287 if (deref->deref_type != nir_deref_type_var) {
288 assert(deref->deref_type == nir_deref_type_array);
289 index = nir_iadd(b, index, nir_ssa_for_src(b, deref->arr.index, 1));
290 }
291 nir_rewrite_image_intrinsic(instr, index, false);
292 }
293
294 static bool
295 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
296 struct tu_shader *shader,
297 const struct tu_pipeline_layout *layout)
298 {
299 switch (instr->intrinsic) {
300 case nir_intrinsic_load_layer_id:
301 /* TODO: remove this when layered rendering is implemented */
302 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
303 nir_src_for_ssa(nir_imm_int(b, 0)));
304 nir_instr_remove(&instr->instr);
305 return true;
306
307 case nir_intrinsic_load_push_constant:
308 lower_load_push_constant(b, instr, shader);
309 return true;
310
311 case nir_intrinsic_vulkan_resource_index:
312 lower_vulkan_resource_index(b, instr, shader, layout);
313 return true;
314
315 case nir_intrinsic_image_deref_load:
316 case nir_intrinsic_image_deref_store:
317 case nir_intrinsic_image_deref_atomic_add:
318 case nir_intrinsic_image_deref_atomic_imin:
319 case nir_intrinsic_image_deref_atomic_umin:
320 case nir_intrinsic_image_deref_atomic_imax:
321 case nir_intrinsic_image_deref_atomic_umax:
322 case nir_intrinsic_image_deref_atomic_and:
323 case nir_intrinsic_image_deref_atomic_or:
324 case nir_intrinsic_image_deref_atomic_xor:
325 case nir_intrinsic_image_deref_atomic_exchange:
326 case nir_intrinsic_image_deref_atomic_comp_swap:
327 case nir_intrinsic_image_deref_size:
328 case nir_intrinsic_image_deref_samples:
329 case nir_intrinsic_image_deref_load_param_intel:
330 case nir_intrinsic_image_deref_load_raw_intel:
331 case nir_intrinsic_image_deref_store_raw_intel:
332 lower_image_deref(b, instr, shader, layout);
333 return true;
334
335 default:
336 return false;
337 }
338 }
339
340 static bool
341 lower_impl(nir_function_impl *impl, struct tu_shader *shader,
342 const struct tu_pipeline_layout *layout)
343 {
344 nir_builder b;
345 nir_builder_init(&b, impl);
346 bool progress = false;
347
348 nir_foreach_block(block, impl) {
349 nir_foreach_instr_safe(instr, block) {
350 b.cursor = nir_before_instr(instr);
351 switch (instr->type) {
352 case nir_instr_type_tex:
353 progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader, layout);
354 break;
355 case nir_instr_type_intrinsic:
356 progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
357 break;
358 default:
359 break;
360 }
361 }
362 }
363
364 return progress;
365 }
366
367 static bool
368 tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
369 const struct tu_pipeline_layout *layout)
370 {
371 bool progress = false;
372
373 nir_foreach_function(function, shader) {
374 if (function->impl)
375 progress |= lower_impl(function->impl, tu_shader, layout);
376 }
377
378 /* spirv_to_nir produces num_ssbos equal to the number of SSBO-containing
379 * variables, while ir3 wants the number of descriptors (like the gallium
380 * path).
381 */
382 shader->info.num_ssbos = tu_shader->ssbo_map.num_desc;
383
384 return progress;
385 }
386
387 struct tu_shader *
388 tu_shader_create(struct tu_device *dev,
389 gl_shader_stage stage,
390 const VkPipelineShaderStageCreateInfo *stage_info,
391 struct tu_pipeline_layout *layout,
392 const VkAllocationCallbacks *alloc)
393 {
394 const struct tu_shader_module *module =
395 tu_shader_module_from_handle(stage_info->module);
396 struct tu_shader *shader;
397
398 const uint32_t max_variant_count = (stage == MESA_SHADER_VERTEX) ? 2 : 1;
399 shader = vk_zalloc2(
400 &dev->alloc, alloc,
401 sizeof(*shader) + sizeof(struct ir3_shader_variant) * max_variant_count,
402 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
403 if (!shader)
404 return NULL;
405
406 /* translate SPIR-V to NIR */
407 assert(module->code_size % 4 == 0);
408 nir_shader *nir = tu_spirv_to_nir(
409 dev->compiler, (const uint32_t *) module->code, module->code_size / 4,
410 stage, stage_info->pName, stage_info->pSpecializationInfo);
411 if (!nir) {
412 vk_free2(&dev->alloc, alloc, shader);
413 return NULL;
414 }
415
416 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_NIR)) {
417 fprintf(stderr, "translated nir:\n");
418 nir_print_shader(nir, stderr);
419 }
420
421 /* multi step inlining procedure */
422 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
423 NIR_PASS_V(nir, nir_lower_returns);
424 NIR_PASS_V(nir, nir_inline_functions);
425 NIR_PASS_V(nir, nir_opt_deref);
426 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
427 if (!func->is_entrypoint)
428 exec_node_remove(&func->node);
429 }
430 assert(exec_list_length(&nir->functions) == 1);
431 NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
432
433 /* Split member structs. We do this before lower_io_to_temporaries so that
434 * it doesn't lower system values to temporaries by accident.
435 */
436 NIR_PASS_V(nir, nir_split_var_copies);
437 NIR_PASS_V(nir, nir_split_per_member_structs);
438
439 NIR_PASS_V(nir, nir_remove_dead_variables,
440 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
441
442 NIR_PASS_V(nir, nir_propagate_invariant);
443
444 NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
445
446 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
447 NIR_PASS_V(nir, nir_split_var_copies);
448 NIR_PASS_V(nir, nir_lower_var_copies);
449
450 NIR_PASS_V(nir, nir_opt_copy_prop_vars);
451 NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
452
453 /* ir3 doesn't support indirect input/output */
454 NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
455
456 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
457
458 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
459 nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
460
461 NIR_PASS_V(nir, nir_lower_system_values);
462 NIR_PASS_V(nir, nir_lower_frexp);
463
464 if (stage == MESA_SHADER_FRAGMENT)
465 NIR_PASS_V(nir, nir_lower_input_attachments, true);
466
467 NIR_PASS_V(nir, tu_lower_io, shader, layout);
468
469 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
470
471 if (stage == MESA_SHADER_FRAGMENT) {
472 /* NOTE: lower load_barycentric_at_sample first, since it
473 * produces load_barycentric_at_offset:
474 */
475 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
476 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
477
478 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
479 }
480
481 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
482
483 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
484 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
485
486 shader->ir3_shader.compiler = dev->compiler;
487 shader->ir3_shader.type = stage;
488 shader->ir3_shader.nir = nir;
489
490 return shader;
491 }
492
493 void
494 tu_shader_destroy(struct tu_device *dev,
495 struct tu_shader *shader,
496 const VkAllocationCallbacks *alloc)
497 {
498 if (shader->ir3_shader.nir)
499 ralloc_free(shader->ir3_shader.nir);
500
501 for (uint32_t i = 0; i < 1 + shader->has_binning_pass; i++) {
502 if (shader->variants[i].ir)
503 ir3_destroy(shader->variants[i].ir);
504 }
505
506 if (shader->ir3_shader.const_state.immediates)
507 free(shader->ir3_shader.const_state.immediates);
508 if (shader->binary)
509 free(shader->binary);
510 if (shader->binning_binary)
511 free(shader->binning_binary);
512
513 vk_free2(&dev->alloc, alloc, shader);
514 }
515
516 void
517 tu_shader_compile_options_init(
518 struct tu_shader_compile_options *options,
519 const VkGraphicsPipelineCreateInfo *pipeline_info)
520 {
521 *options = (struct tu_shader_compile_options) {
522 /* TODO ir3_key */
523
524 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
525 * some optimizations need to happen otherwise shader might not compile
526 */
527 .optimize = true,
528 .include_binning_pass = true,
529 };
530 }
531
532 static uint32_t *
533 tu_compile_shader_variant(struct ir3_shader *shader,
534 const struct ir3_shader_key *key,
535 struct ir3_shader_variant *nonbinning,
536 struct ir3_shader_variant *variant)
537 {
538 variant->shader = shader;
539 variant->type = shader->type;
540 variant->key = *key;
541 variant->binning_pass = !!nonbinning;
542 variant->nonbinning = nonbinning;
543
544 int ret = ir3_compile_shader_nir(shader->compiler, variant);
545 if (ret)
546 return NULL;
547
548 /* when assemble fails, we rely on tu_shader_destroy to clean up the
549 * variant
550 */
551 return ir3_shader_assemble(variant, shader->compiler->gpu_id);
552 }
553
554 VkResult
555 tu_shader_compile(struct tu_device *dev,
556 struct tu_shader *shader,
557 const struct tu_shader *next_stage,
558 const struct tu_shader_compile_options *options,
559 const VkAllocationCallbacks *alloc)
560 {
561 if (options->optimize) {
562 /* ignore the key for the first pass of optimization */
563 ir3_optimize_nir(&shader->ir3_shader, shader->ir3_shader.nir, NULL);
564
565 if (unlikely(dev->physical_device->instance->debug_flags &
566 TU_DEBUG_NIR)) {
567 fprintf(stderr, "optimized nir:\n");
568 nir_print_shader(shader->ir3_shader.nir, stderr);
569 }
570 }
571
572 shader->binary = tu_compile_shader_variant(
573 &shader->ir3_shader, &options->key, NULL, &shader->variants[0]);
574 if (!shader->binary)
575 return VK_ERROR_OUT_OF_HOST_MEMORY;
576
577 /* compile another variant for the binning pass */
578 if (options->include_binning_pass &&
579 shader->ir3_shader.type == MESA_SHADER_VERTEX) {
580 shader->binning_binary = tu_compile_shader_variant(
581 &shader->ir3_shader, &options->key, &shader->variants[0],
582 &shader->variants[1]);
583 if (!shader->binning_binary)
584 return VK_ERROR_OUT_OF_HOST_MEMORY;
585
586 shader->has_binning_pass = true;
587 }
588
589 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_IR3)) {
590 fprintf(stderr, "disassembled ir3:\n");
591 fprintf(stderr, "shader: %s\n",
592 gl_shader_stage_name(shader->ir3_shader.type));
593 ir3_shader_disasm(&shader->variants[0], shader->binary, stderr);
594
595 if (shader->has_binning_pass) {
596 fprintf(stderr, "disassembled ir3:\n");
597 fprintf(stderr, "shader: %s (binning)\n",
598 gl_shader_stage_name(shader->ir3_shader.type));
599 ir3_shader_disasm(&shader->variants[1], shader->binning_binary,
600 stderr);
601 }
602 }
603
604 return VK_SUCCESS;
605 }
606
607 VkResult
608 tu_CreateShaderModule(VkDevice _device,
609 const VkShaderModuleCreateInfo *pCreateInfo,
610 const VkAllocationCallbacks *pAllocator,
611 VkShaderModule *pShaderModule)
612 {
613 TU_FROM_HANDLE(tu_device, device, _device);
614 struct tu_shader_module *module;
615
616 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
617 assert(pCreateInfo->flags == 0);
618 assert(pCreateInfo->codeSize % 4 == 0);
619
620 module = vk_alloc2(&device->alloc, pAllocator,
621 sizeof(*module) + pCreateInfo->codeSize, 8,
622 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
623 if (module == NULL)
624 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
625
626 module->code_size = pCreateInfo->codeSize;
627 memcpy(module->code, pCreateInfo->pCode, pCreateInfo->codeSize);
628
629 _mesa_sha1_compute(module->code, module->code_size, module->sha1);
630
631 *pShaderModule = tu_shader_module_to_handle(module);
632
633 return VK_SUCCESS;
634 }
635
636 void
637 tu_DestroyShaderModule(VkDevice _device,
638 VkShaderModule _module,
639 const VkAllocationCallbacks *pAllocator)
640 {
641 TU_FROM_HANDLE(tu_device, device, _device);
642 TU_FROM_HANDLE(tu_shader_module, module, _module);
643
644 if (!module)
645 return;
646
647 vk_free2(&device->alloc, pAllocator, module);
648 }