2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
29 #include "ir3/ir3_nir.h"
32 tu_spirv_to_nir(struct ir3_compiler
*compiler
,
33 const uint32_t *words
,
35 gl_shader_stage stage
,
36 const char *entry_point_name
,
37 const VkSpecializationInfo
*spec_info
)
39 /* TODO these are made-up */
40 const struct spirv_to_nir_options spirv_options
= {
41 .frag_coord_is_sysval
= true,
42 .lower_ubo_ssbo_access_to_offsets
= true,
45 const nir_shader_compiler_options
*nir_options
=
46 ir3_get_compiler_options(compiler
);
48 /* convert VkSpecializationInfo */
49 struct nir_spirv_specialization
*spec
= NULL
;
50 uint32_t num_spec
= 0;
51 if (spec_info
&& spec_info
->mapEntryCount
) {
52 spec
= malloc(sizeof(*spec
) * spec_info
->mapEntryCount
);
56 for (uint32_t i
= 0; i
< spec_info
->mapEntryCount
; i
++) {
57 const VkSpecializationMapEntry
*entry
= &spec_info
->pMapEntries
[i
];
58 const void *data
= spec_info
->pData
+ entry
->offset
;
59 assert(data
+ entry
->size
<= spec_info
->pData
+ spec_info
->dataSize
);
60 spec
[i
].id
= entry
->constantID
;
62 spec
[i
].data64
= *(const uint64_t *) data
;
64 spec
[i
].data32
= *(const uint32_t *) data
;
65 spec
[i
].defined_on_module
= false;
68 num_spec
= spec_info
->mapEntryCount
;
72 spirv_to_nir(words
, word_count
, spec
, num_spec
, stage
, entry_point_name
,
73 &spirv_options
, nir_options
);
77 assert(nir
->info
.stage
== stage
);
78 nir_validate_shader(nir
, "after spirv_to_nir");
84 tu_sort_variables_by_location(struct exec_list
*variables
)
86 struct exec_list sorted
;
87 exec_list_make_empty(&sorted
);
89 nir_foreach_variable_safe(var
, variables
)
91 exec_node_remove(&var
->node
);
93 /* insert the variable into the sorted list */
94 nir_variable
*next
= NULL
;
95 nir_foreach_variable(tmp
, &sorted
)
97 if (var
->data
.location
< tmp
->data
.location
) {
103 exec_node_insert_node_before(&next
->node
, &var
->node
);
105 exec_list_push_tail(&sorted
, &var
->node
);
108 exec_list_move_nodes_to(&sorted
, variables
);
112 map_add(struct tu_descriptor_map
*map
, int set
, int binding
, int value
,
116 for (unsigned i
= 0; i
< map
->num
; i
++) {
117 if (set
== map
->set
[i
] && binding
== map
->binding
[i
]) {
118 assert(value
== map
->value
[i
]);
119 assert(array_size
== map
->array_size
[i
]);
122 index
+= map
->array_size
[i
];
125 assert(index
== map
->num_desc
);
127 map
->set
[map
->num
] = set
;
128 map
->binding
[map
->num
] = binding
;
129 map
->value
[map
->num
] = value
;
130 map
->array_size
[map
->num
] = array_size
;
132 map
->num_desc
+= array_size
;
138 lower_tex_src_to_offset(nir_builder
*b
, nir_tex_instr
*instr
, unsigned src_idx
,
139 struct tu_shader
*shader
,
140 const struct tu_pipeline_layout
*layout
)
142 nir_ssa_def
*index
= NULL
;
143 unsigned base_index
= 0;
144 unsigned array_elements
= 1;
145 nir_tex_src
*src
= &instr
->src
[src_idx
];
146 bool is_sampler
= src
->src_type
== nir_tex_src_sampler_deref
;
148 /* We compute first the offsets */
149 nir_deref_instr
*deref
= nir_instr_as_deref(src
->src
.ssa
->parent_instr
);
150 while (deref
->deref_type
!= nir_deref_type_var
) {
151 assert(deref
->parent
.is_ssa
);
152 nir_deref_instr
*parent
=
153 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
155 assert(deref
->deref_type
== nir_deref_type_array
);
157 if (nir_src_is_const(deref
->arr
.index
) && index
== NULL
) {
158 /* We're still building a direct index */
159 base_index
+= nir_src_as_uint(deref
->arr
.index
) * array_elements
;
162 /* We used to be direct but not anymore */
163 index
= nir_imm_int(b
, base_index
);
167 index
= nir_iadd(b
, index
,
168 nir_imul(b
, nir_imm_int(b
, array_elements
),
169 nir_ssa_for_src(b
, deref
->arr
.index
, 1)));
172 array_elements
*= glsl_get_length(parent
->type
);
178 index
= nir_umin(b
, index
, nir_imm_int(b
, array_elements
- 1));
180 /* We have the offsets, we apply them, rewriting the source or removing
184 nir_instr_rewrite_src(&instr
->instr
, &src
->src
,
185 nir_src_for_ssa(index
));
187 src
->src_type
= is_sampler
?
188 nir_tex_src_sampler_offset
:
189 nir_tex_src_texture_offset
;
191 instr
->texture_array_size
= array_elements
;
193 nir_tex_instr_remove_src(instr
, src_idx
);
196 uint32_t set
= deref
->var
->data
.descriptor_set
;
197 uint32_t binding
= deref
->var
->data
.binding
;
198 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
199 struct tu_descriptor_set_binding_layout
*binding_layout
=
200 &set_layout
->binding
[binding
];
202 int desc_index
= map_add(is_sampler
?
203 &shader
->sampler_map
: &shader
->texture_map
,
204 deref
->var
->data
.descriptor_set
,
205 deref
->var
->data
.binding
,
206 deref
->var
->data
.index
,
207 binding_layout
->array_size
) + base_index
;
209 instr
->sampler_index
= desc_index
;
211 instr
->texture_index
= desc_index
;
215 lower_sampler(nir_builder
*b
, nir_tex_instr
*instr
, struct tu_shader
*shader
,
216 const struct tu_pipeline_layout
*layout
)
219 nir_tex_instr_src_index(instr
, nir_tex_src_texture_deref
);
221 if (texture_idx
>= 0)
222 lower_tex_src_to_offset(b
, instr
, texture_idx
, shader
, layout
);
225 nir_tex_instr_src_index(instr
, nir_tex_src_sampler_deref
);
227 if (sampler_idx
>= 0)
228 lower_tex_src_to_offset(b
, instr
, sampler_idx
, shader
, layout
);
230 if (texture_idx
< 0 && sampler_idx
< 0)
237 lower_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*instr
,
238 struct tu_shader
*shader
,
239 const struct tu_pipeline_layout
*layout
)
241 /* TODO: remove this when layered rendering is implemented */
242 if (instr
->intrinsic
== nir_intrinsic_load_layer_id
) {
243 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
244 nir_src_for_ssa(nir_imm_int(b
, 0)));
245 nir_instr_remove(&instr
->instr
);
249 if (instr
->intrinsic
== nir_intrinsic_load_push_constant
) {
250 /* note: ir3 wants load_ubo, not load_uniform */
251 assert(nir_intrinsic_base(instr
) == 0);
253 nir_intrinsic_instr
*load
=
254 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
255 load
->num_components
= instr
->num_components
;
256 load
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
257 load
->src
[1] = instr
->src
[0];
258 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
259 load
->num_components
, instr
->dest
.ssa
.bit_size
,
260 instr
->dest
.ssa
.name
);
261 nir_builder_instr_insert(b
, &load
->instr
);
262 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(&load
->dest
.ssa
));
264 nir_instr_remove(&instr
->instr
);
269 if (instr
->intrinsic
!= nir_intrinsic_vulkan_resource_index
)
272 nir_const_value
*const_val
= nir_src_as_const_value(instr
->src
[0]);
275 unsigned set
= nir_intrinsic_desc_set(instr
);
276 unsigned binding
= nir_intrinsic_binding(instr
);
277 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
278 struct tu_descriptor_set_binding_layout
*binding_layout
=
279 &set_layout
->binding
[binding
];
282 switch (nir_intrinsic_desc_type(instr
)) {
283 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
284 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
285 if (!const_val
|| const_val
->u32
!= 0)
286 tu_finishme("non-zero vulkan_resource_index array index");
287 /* skip index 0 which is used for push constants */
288 index
= map_add(&shader
->ubo_map
, set
, binding
, 0,
289 binding_layout
->array_size
) + 1;
291 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
292 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
294 tu_finishme("non-constant vulkan_resource_index array index");
295 index
= map_add(&shader
->ssbo_map
, set
, binding
, 0,
296 binding_layout
->array_size
);
299 tu_finishme("unsupported desc_type for vulkan_resource_index");
303 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
304 nir_src_for_ssa(nir_imm_int(b
, index
)));
305 nir_instr_remove(&instr
->instr
);
311 lower_impl(nir_function_impl
*impl
, struct tu_shader
*shader
,
312 const struct tu_pipeline_layout
*layout
)
315 nir_builder_init(&b
, impl
);
316 bool progress
= false;
318 nir_foreach_block(block
, impl
) {
319 nir_foreach_instr_safe(instr
, block
) {
320 b
.cursor
= nir_before_instr(instr
);
321 switch (instr
->type
) {
322 case nir_instr_type_tex
:
323 progress
|= lower_sampler(&b
, nir_instr_as_tex(instr
), shader
, layout
);
325 case nir_instr_type_intrinsic
:
326 progress
|= lower_intrinsic(&b
, nir_instr_as_intrinsic(instr
), shader
, layout
);
338 tu_lower_io(nir_shader
*shader
, struct tu_shader
*tu_shader
,
339 const struct tu_pipeline_layout
*layout
)
341 bool progress
= false;
343 nir_foreach_function(function
, shader
) {
345 progress
|= lower_impl(function
->impl
, tu_shader
, layout
);
352 tu_shader_create(struct tu_device
*dev
,
353 gl_shader_stage stage
,
354 const VkPipelineShaderStageCreateInfo
*stage_info
,
355 struct tu_pipeline_layout
*layout
,
356 const VkAllocationCallbacks
*alloc
)
358 const struct tu_shader_module
*module
=
359 tu_shader_module_from_handle(stage_info
->module
);
360 struct tu_shader
*shader
;
362 const uint32_t max_variant_count
= (stage
== MESA_SHADER_VERTEX
) ? 2 : 1;
365 sizeof(*shader
) + sizeof(struct ir3_shader_variant
) * max_variant_count
,
366 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
370 /* translate SPIR-V to NIR */
371 assert(module
->code_size
% 4 == 0);
372 nir_shader
*nir
= tu_spirv_to_nir(
373 dev
->compiler
, (const uint32_t *) module
->code
, module
->code_size
/ 4,
374 stage
, stage_info
->pName
, stage_info
->pSpecializationInfo
);
376 vk_free2(&dev
->alloc
, alloc
, shader
);
380 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_NIR
)) {
381 fprintf(stderr
, "translated nir:\n");
382 nir_print_shader(nir
, stderr
);
385 /* multi step inlining procedure */
386 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_function_temp
);
387 NIR_PASS_V(nir
, nir_lower_returns
);
388 NIR_PASS_V(nir
, nir_inline_functions
);
389 NIR_PASS_V(nir
, nir_opt_deref
);
390 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
391 if (!func
->is_entrypoint
)
392 exec_node_remove(&func
->node
);
394 assert(exec_list_length(&nir
->functions
) == 1);
395 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~nir_var_function_temp
);
397 /* Split member structs. We do this before lower_io_to_temporaries so that
398 * it doesn't lower system values to temporaries by accident.
400 NIR_PASS_V(nir
, nir_split_var_copies
);
401 NIR_PASS_V(nir
, nir_split_per_member_structs
);
403 NIR_PASS_V(nir
, nir_remove_dead_variables
,
404 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
);
406 NIR_PASS_V(nir
, nir_propagate_invariant
);
408 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
), true, true);
410 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
411 NIR_PASS_V(nir
, nir_split_var_copies
);
412 NIR_PASS_V(nir
, nir_lower_var_copies
);
414 NIR_PASS_V(nir
, nir_opt_copy_prop_vars
);
415 NIR_PASS_V(nir
, nir_opt_combine_stores
, nir_var_all
);
417 /* ir3 doesn't support indirect input/output */
418 NIR_PASS_V(nir
, nir_lower_indirect_derefs
, nir_var_shader_in
| nir_var_shader_out
);
421 case MESA_SHADER_VERTEX
:
422 tu_sort_variables_by_location(&nir
->outputs
);
424 case MESA_SHADER_TESS_CTRL
:
425 case MESA_SHADER_TESS_EVAL
:
426 case MESA_SHADER_GEOMETRY
:
427 tu_sort_variables_by_location(&nir
->inputs
);
428 tu_sort_variables_by_location(&nir
->outputs
);
430 case MESA_SHADER_FRAGMENT
:
431 tu_sort_variables_by_location(&nir
->inputs
);
433 case MESA_SHADER_COMPUTE
:
436 unreachable("invalid gl_shader_stage");
440 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
, stage
);
441 nir_assign_io_var_locations(&nir
->outputs
, &nir
->num_outputs
, stage
);
443 NIR_PASS_V(nir
, nir_lower_system_values
);
444 NIR_PASS_V(nir
, nir_lower_frexp
);
446 if (stage
== MESA_SHADER_FRAGMENT
)
447 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
449 NIR_PASS_V(nir
, tu_lower_io
, shader
, layout
);
451 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, ir3_glsl_type_size
, 0);
453 if (stage
== MESA_SHADER_FRAGMENT
) {
454 /* NOTE: lower load_barycentric_at_sample first, since it
455 * produces load_barycentric_at_offset:
457 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_sample
);
458 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_offset
);
460 NIR_PASS_V(nir
, ir3_nir_move_varying_inputs
);
463 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
465 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
467 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
468 nir
->num_uniforms
= MAX_PUSH_CONSTANTS_SIZE
/ 16;
470 shader
->ir3_shader
.compiler
= dev
->compiler
;
471 shader
->ir3_shader
.type
= stage
;
472 shader
->ir3_shader
.nir
= nir
;
478 tu_shader_destroy(struct tu_device
*dev
,
479 struct tu_shader
*shader
,
480 const VkAllocationCallbacks
*alloc
)
482 if (shader
->ir3_shader
.nir
)
483 ralloc_free(shader
->ir3_shader
.nir
);
485 for (uint32_t i
= 0; i
< 1 + shader
->has_binning_pass
; i
++) {
486 if (shader
->variants
[i
].ir
)
487 ir3_destroy(shader
->variants
[i
].ir
);
490 if (shader
->ir3_shader
.const_state
.immediates
)
491 free(shader
->ir3_shader
.const_state
.immediates
);
493 free(shader
->binary
);
494 if (shader
->binning_binary
)
495 free(shader
->binning_binary
);
497 vk_free2(&dev
->alloc
, alloc
, shader
);
501 tu_shader_compile_options_init(
502 struct tu_shader_compile_options
*options
,
503 const VkGraphicsPipelineCreateInfo
*pipeline_info
)
505 *options
= (struct tu_shader_compile_options
) {
508 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
509 * some optimizations need to happen otherwise shader might not compile
512 .include_binning_pass
= true,
517 tu_compile_shader_variant(struct ir3_shader
*shader
,
518 const struct ir3_shader_key
*key
,
519 struct ir3_shader_variant
*nonbinning
,
520 struct ir3_shader_variant
*variant
)
522 variant
->shader
= shader
;
523 variant
->type
= shader
->type
;
525 variant
->binning_pass
= !!nonbinning
;
526 variant
->nonbinning
= nonbinning
;
528 int ret
= ir3_compile_shader_nir(shader
->compiler
, variant
);
532 /* when assemble fails, we rely on tu_shader_destroy to clean up the
535 return ir3_shader_assemble(variant
, shader
->compiler
->gpu_id
);
539 tu_shader_compile(struct tu_device
*dev
,
540 struct tu_shader
*shader
,
541 const struct tu_shader
*next_stage
,
542 const struct tu_shader_compile_options
*options
,
543 const VkAllocationCallbacks
*alloc
)
545 if (options
->optimize
) {
546 /* ignore the key for the first pass of optimization */
547 ir3_optimize_nir(&shader
->ir3_shader
, shader
->ir3_shader
.nir
, NULL
);
549 if (unlikely(dev
->physical_device
->instance
->debug_flags
&
551 fprintf(stderr
, "optimized nir:\n");
552 nir_print_shader(shader
->ir3_shader
.nir
, stderr
);
556 shader
->binary
= tu_compile_shader_variant(
557 &shader
->ir3_shader
, &options
->key
, NULL
, &shader
->variants
[0]);
559 return VK_ERROR_OUT_OF_HOST_MEMORY
;
561 /* compile another variant for the binning pass */
562 if (options
->include_binning_pass
&&
563 shader
->ir3_shader
.type
== MESA_SHADER_VERTEX
) {
564 shader
->binning_binary
= tu_compile_shader_variant(
565 &shader
->ir3_shader
, &options
->key
, &shader
->variants
[0],
566 &shader
->variants
[1]);
567 if (!shader
->binning_binary
)
568 return VK_ERROR_OUT_OF_HOST_MEMORY
;
570 shader
->has_binning_pass
= true;
573 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_IR3
)) {
574 fprintf(stderr
, "disassembled ir3:\n");
575 fprintf(stderr
, "shader: %s\n",
576 gl_shader_stage_name(shader
->ir3_shader
.type
));
577 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stderr
);
579 if (shader
->has_binning_pass
) {
580 fprintf(stderr
, "disassembled ir3:\n");
581 fprintf(stderr
, "shader: %s (binning)\n",
582 gl_shader_stage_name(shader
->ir3_shader
.type
));
583 ir3_shader_disasm(&shader
->variants
[1], shader
->binning_binary
,
592 tu_CreateShaderModule(VkDevice _device
,
593 const VkShaderModuleCreateInfo
*pCreateInfo
,
594 const VkAllocationCallbacks
*pAllocator
,
595 VkShaderModule
*pShaderModule
)
597 TU_FROM_HANDLE(tu_device
, device
, _device
);
598 struct tu_shader_module
*module
;
600 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
601 assert(pCreateInfo
->flags
== 0);
602 assert(pCreateInfo
->codeSize
% 4 == 0);
604 module
= vk_alloc2(&device
->alloc
, pAllocator
,
605 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
606 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
608 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
610 module
->code_size
= pCreateInfo
->codeSize
;
611 memcpy(module
->code
, pCreateInfo
->pCode
, pCreateInfo
->codeSize
);
613 _mesa_sha1_compute(module
->code
, module
->code_size
, module
->sha1
);
615 *pShaderModule
= tu_shader_module_to_handle(module
);
621 tu_DestroyShaderModule(VkDevice _device
,
622 VkShaderModule _module
,
623 const VkAllocationCallbacks
*pAllocator
)
625 TU_FROM_HANDLE(tu_device
, device
, _device
);
626 TU_FROM_HANDLE(tu_shader_module
, module
, _module
);
631 vk_free2(&device
->alloc
, pAllocator
, module
);