2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
29 #include "ir3/ir3_nir.h"
32 tu_spirv_to_nir(struct ir3_compiler
*compiler
,
33 const uint32_t *words
,
35 gl_shader_stage stage
,
36 const char *entry_point_name
,
37 const VkSpecializationInfo
*spec_info
)
39 /* TODO these are made-up */
40 const struct spirv_to_nir_options spirv_options
= {
41 .frag_coord_is_sysval
= true,
42 .lower_ubo_ssbo_access_to_offsets
= true,
45 const nir_shader_compiler_options
*nir_options
=
46 ir3_get_compiler_options(compiler
);
48 /* convert VkSpecializationInfo */
49 struct nir_spirv_specialization
*spec
= NULL
;
50 uint32_t num_spec
= 0;
51 if (spec_info
&& spec_info
->mapEntryCount
) {
52 spec
= malloc(sizeof(*spec
) * spec_info
->mapEntryCount
);
56 for (uint32_t i
= 0; i
< spec_info
->mapEntryCount
; i
++) {
57 const VkSpecializationMapEntry
*entry
= &spec_info
->pMapEntries
[i
];
58 const void *data
= spec_info
->pData
+ entry
->offset
;
59 assert(data
+ entry
->size
<= spec_info
->pData
+ spec_info
->dataSize
);
60 spec
[i
].id
= entry
->constantID
;
62 spec
[i
].data64
= *(const uint64_t *) data
;
64 spec
[i
].data32
= *(const uint32_t *) data
;
65 spec
[i
].defined_on_module
= false;
68 num_spec
= spec_info
->mapEntryCount
;
72 spirv_to_nir(words
, word_count
, spec
, num_spec
, stage
, entry_point_name
,
73 &spirv_options
, nir_options
);
77 assert(nir
->info
.stage
== stage
);
78 nir_validate_shader(nir
, "after spirv_to_nir");
84 tu_sort_variables_by_location(struct exec_list
*variables
)
86 struct exec_list sorted
;
87 exec_list_make_empty(&sorted
);
89 nir_foreach_variable_safe(var
, variables
)
91 exec_node_remove(&var
->node
);
93 /* insert the variable into the sorted list */
94 nir_variable
*next
= NULL
;
95 nir_foreach_variable(tmp
, &sorted
)
97 if (var
->data
.location
< tmp
->data
.location
) {
103 exec_node_insert_node_before(&next
->node
, &var
->node
);
105 exec_list_push_tail(&sorted
, &var
->node
);
108 exec_list_move_nodes_to(&sorted
, variables
);
112 map_add(struct tu_descriptor_map
*map
, int set
, int binding
, int value
)
115 for (index
= 0; index
< map
->num
; index
++) {
116 if (set
== map
->set
[index
] && binding
== map
->binding
[index
])
120 assert(index
< ARRAY_SIZE(map
->set
));
122 map
->set
[index
] = set
;
123 map
->binding
[index
] = binding
;
124 map
->value
[index
] = value
;
125 map
->num
= MAX2(map
->num
, index
+ 1);
130 lower_tex_src_to_offset(nir_builder
*b
, nir_tex_instr
*instr
, unsigned src_idx
,
131 struct tu_shader
*shader
)
133 nir_ssa_def
*index
= NULL
;
134 unsigned base_index
= 0;
135 unsigned array_elements
= 1;
136 nir_tex_src
*src
= &instr
->src
[src_idx
];
137 bool is_sampler
= src
->src_type
== nir_tex_src_sampler_deref
;
139 /* We compute first the offsets */
140 nir_deref_instr
*deref
= nir_instr_as_deref(src
->src
.ssa
->parent_instr
);
141 while (deref
->deref_type
!= nir_deref_type_var
) {
142 assert(deref
->parent
.is_ssa
);
143 nir_deref_instr
*parent
=
144 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
146 assert(deref
->deref_type
== nir_deref_type_array
);
148 if (nir_src_is_const(deref
->arr
.index
) && index
== NULL
) {
149 /* We're still building a direct index */
150 base_index
+= nir_src_as_uint(deref
->arr
.index
) * array_elements
;
153 /* We used to be direct but not anymore */
154 index
= nir_imm_int(b
, base_index
);
158 index
= nir_iadd(b
, index
,
159 nir_imul(b
, nir_imm_int(b
, array_elements
),
160 nir_ssa_for_src(b
, deref
->arr
.index
, 1)));
163 array_elements
*= glsl_get_length(parent
->type
);
169 index
= nir_umin(b
, index
, nir_imm_int(b
, array_elements
- 1));
171 /* We have the offsets, we apply them, rewriting the source or removing
175 nir_instr_rewrite_src(&instr
->instr
, &src
->src
,
176 nir_src_for_ssa(index
));
178 src
->src_type
= is_sampler
?
179 nir_tex_src_sampler_offset
:
180 nir_tex_src_texture_offset
;
182 instr
->texture_array_size
= array_elements
;
184 nir_tex_instr_remove_src(instr
, src_idx
);
187 if (array_elements
> 1)
188 tu_finishme("texture/sampler array");
191 instr
->sampler_index
= map_add(&shader
->sampler_map
,
192 deref
->var
->data
.descriptor_set
,
193 deref
->var
->data
.binding
,
195 instr
->sampler_index
+= base_index
;
197 instr
->texture_index
= map_add(&shader
->texture_map
,
198 deref
->var
->data
.descriptor_set
,
199 deref
->var
->data
.binding
,
200 deref
->var
->data
.index
);
201 instr
->texture_index
+= base_index
;
202 instr
->texture_array_size
= array_elements
;
207 lower_sampler(nir_builder
*b
, nir_tex_instr
*instr
, struct tu_shader
*shader
)
210 nir_tex_instr_src_index(instr
, nir_tex_src_texture_deref
);
212 if (texture_idx
>= 0)
213 lower_tex_src_to_offset(b
, instr
, texture_idx
, shader
);
216 nir_tex_instr_src_index(instr
, nir_tex_src_sampler_deref
);
218 if (sampler_idx
>= 0)
219 lower_tex_src_to_offset(b
, instr
, sampler_idx
, shader
);
221 if (texture_idx
< 0 && sampler_idx
< 0)
228 lower_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*instr
,
229 struct tu_shader
*shader
)
231 /* TODO: remove this when layered rendering is implemented */
232 if (instr
->intrinsic
== nir_intrinsic_load_layer_id
) {
233 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
234 nir_src_for_ssa(nir_imm_int(b
, 0)));
235 nir_instr_remove(&instr
->instr
);
239 if (instr
->intrinsic
== nir_intrinsic_load_push_constant
) {
240 /* note: ir3 wants load_ubo, not load_uniform */
241 assert(nir_intrinsic_base(instr
) == 0);
243 nir_intrinsic_instr
*load
=
244 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
245 load
->num_components
= instr
->num_components
;
246 load
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
247 load
->src
[1] = instr
->src
[0];
248 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
249 load
->num_components
, instr
->dest
.ssa
.bit_size
,
250 instr
->dest
.ssa
.name
);
251 nir_builder_instr_insert(b
, &load
->instr
);
252 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(&load
->dest
.ssa
));
254 nir_instr_remove(&instr
->instr
);
259 if (instr
->intrinsic
!= nir_intrinsic_vulkan_resource_index
)
262 nir_const_value
*const_val
= nir_src_as_const_value(instr
->src
[0]);
263 if (!const_val
|| const_val
->u32
!= 0)
264 tu_finishme("non-zero vulkan_resource_index array index");
267 unsigned set
= nir_intrinsic_desc_set(instr
);
268 unsigned binding
= nir_intrinsic_binding(instr
);
271 switch (nir_intrinsic_desc_type(instr
)) {
272 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
273 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
274 /* skip index 0 which is used for push constants */
275 index
= map_add(&shader
->ubo_map
, set
, binding
, 0) + 1;
277 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
278 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
279 index
= map_add(&shader
->ssbo_map
, set
, binding
, 0);
282 tu_finishme("unsupported desc_type for vulkan_resource_index");
286 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
287 nir_src_for_ssa(nir_imm_int(b
, index
)));
288 nir_instr_remove(&instr
->instr
);
294 lower_impl(nir_function_impl
*impl
, struct tu_shader
*shader
)
297 nir_builder_init(&b
, impl
);
298 bool progress
= false;
300 nir_foreach_block(block
, impl
) {
301 nir_foreach_instr_safe(instr
, block
) {
302 b
.cursor
= nir_before_instr(instr
);
303 switch (instr
->type
) {
304 case nir_instr_type_tex
:
305 progress
|= lower_sampler(&b
, nir_instr_as_tex(instr
), shader
);
307 case nir_instr_type_intrinsic
:
308 progress
|= lower_intrinsic(&b
, nir_instr_as_intrinsic(instr
), shader
);
320 tu_lower_io(nir_shader
*shader
, struct tu_shader
*tu_shader
)
322 bool progress
= false;
324 nir_foreach_function(function
, shader
) {
326 progress
|= lower_impl(function
->impl
, tu_shader
);
333 tu_shader_create(struct tu_device
*dev
,
334 gl_shader_stage stage
,
335 const VkPipelineShaderStageCreateInfo
*stage_info
,
336 const VkAllocationCallbacks
*alloc
)
338 const struct tu_shader_module
*module
=
339 tu_shader_module_from_handle(stage_info
->module
);
340 struct tu_shader
*shader
;
342 const uint32_t max_variant_count
= (stage
== MESA_SHADER_VERTEX
) ? 2 : 1;
345 sizeof(*shader
) + sizeof(struct ir3_shader_variant
) * max_variant_count
,
346 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
350 /* translate SPIR-V to NIR */
351 assert(module
->code_size
% 4 == 0);
352 nir_shader
*nir
= tu_spirv_to_nir(
353 dev
->compiler
, (const uint32_t *) module
->code
, module
->code_size
/ 4,
354 stage
, stage_info
->pName
, stage_info
->pSpecializationInfo
);
356 vk_free2(&dev
->alloc
, alloc
, shader
);
360 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_NIR
)) {
361 fprintf(stderr
, "translated nir:\n");
362 nir_print_shader(nir
, stderr
);
365 /* multi step inlining procedure */
366 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_function_temp
);
367 NIR_PASS_V(nir
, nir_lower_returns
);
368 NIR_PASS_V(nir
, nir_inline_functions
);
369 NIR_PASS_V(nir
, nir_opt_deref
);
370 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
371 if (!func
->is_entrypoint
)
372 exec_node_remove(&func
->node
);
374 assert(exec_list_length(&nir
->functions
) == 1);
375 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~nir_var_function_temp
);
377 /* Split member structs. We do this before lower_io_to_temporaries so that
378 * it doesn't lower system values to temporaries by accident.
380 NIR_PASS_V(nir
, nir_split_var_copies
);
381 NIR_PASS_V(nir
, nir_split_per_member_structs
);
383 NIR_PASS_V(nir
, nir_remove_dead_variables
,
384 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
);
386 NIR_PASS_V(nir
, nir_propagate_invariant
);
388 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
), true, true);
390 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
391 NIR_PASS_V(nir
, nir_split_var_copies
);
392 NIR_PASS_V(nir
, nir_lower_var_copies
);
394 NIR_PASS_V(nir
, nir_opt_copy_prop_vars
);
395 NIR_PASS_V(nir
, nir_opt_combine_stores
, nir_var_all
);
397 /* ir3 doesn't support indirect input/output */
398 NIR_PASS_V(nir
, nir_lower_indirect_derefs
, nir_var_shader_in
| nir_var_shader_out
);
401 case MESA_SHADER_VERTEX
:
402 tu_sort_variables_by_location(&nir
->outputs
);
404 case MESA_SHADER_TESS_CTRL
:
405 case MESA_SHADER_TESS_EVAL
:
406 case MESA_SHADER_GEOMETRY
:
407 tu_sort_variables_by_location(&nir
->inputs
);
408 tu_sort_variables_by_location(&nir
->outputs
);
410 case MESA_SHADER_FRAGMENT
:
411 tu_sort_variables_by_location(&nir
->inputs
);
413 case MESA_SHADER_COMPUTE
:
416 unreachable("invalid gl_shader_stage");
420 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
, stage
);
421 nir_assign_io_var_locations(&nir
->outputs
, &nir
->num_outputs
, stage
);
423 NIR_PASS_V(nir
, nir_lower_system_values
);
424 NIR_PASS_V(nir
, nir_lower_frexp
);
426 if (stage
== MESA_SHADER_FRAGMENT
)
427 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
429 NIR_PASS_V(nir
, tu_lower_io
, shader
);
431 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, ir3_glsl_type_size
, 0);
433 if (stage
== MESA_SHADER_FRAGMENT
) {
434 /* NOTE: lower load_barycentric_at_sample first, since it
435 * produces load_barycentric_at_offset:
437 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_sample
);
438 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_offset
);
440 NIR_PASS_V(nir
, ir3_nir_move_varying_inputs
);
443 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
445 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
447 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
448 nir
->num_uniforms
= MAX_PUSH_CONSTANTS_SIZE
/ 16;
450 shader
->ir3_shader
.compiler
= dev
->compiler
;
451 shader
->ir3_shader
.type
= stage
;
452 shader
->ir3_shader
.nir
= nir
;
458 tu_shader_destroy(struct tu_device
*dev
,
459 struct tu_shader
*shader
,
460 const VkAllocationCallbacks
*alloc
)
462 if (shader
->ir3_shader
.nir
)
463 ralloc_free(shader
->ir3_shader
.nir
);
465 for (uint32_t i
= 0; i
< 1 + shader
->has_binning_pass
; i
++) {
466 if (shader
->variants
[i
].ir
)
467 ir3_destroy(shader
->variants
[i
].ir
);
470 if (shader
->ir3_shader
.const_state
.immediates
)
471 free(shader
->ir3_shader
.const_state
.immediates
);
473 free(shader
->binary
);
474 if (shader
->binning_binary
)
475 free(shader
->binning_binary
);
477 vk_free2(&dev
->alloc
, alloc
, shader
);
481 tu_shader_compile_options_init(
482 struct tu_shader_compile_options
*options
,
483 const VkGraphicsPipelineCreateInfo
*pipeline_info
)
485 *options
= (struct tu_shader_compile_options
) {
488 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
489 * some optimizations need to happen otherwise shader might not compile
492 .include_binning_pass
= true,
497 tu_compile_shader_variant(struct ir3_shader
*shader
,
498 const struct ir3_shader_key
*key
,
499 struct ir3_shader_variant
*nonbinning
,
500 struct ir3_shader_variant
*variant
)
502 variant
->shader
= shader
;
503 variant
->type
= shader
->type
;
505 variant
->binning_pass
= !!nonbinning
;
506 variant
->nonbinning
= nonbinning
;
508 int ret
= ir3_compile_shader_nir(shader
->compiler
, variant
);
512 /* when assemble fails, we rely on tu_shader_destroy to clean up the
515 return ir3_shader_assemble(variant
, shader
->compiler
->gpu_id
);
519 tu_shader_compile(struct tu_device
*dev
,
520 struct tu_shader
*shader
,
521 const struct tu_shader
*next_stage
,
522 const struct tu_shader_compile_options
*options
,
523 const VkAllocationCallbacks
*alloc
)
525 if (options
->optimize
) {
526 /* ignore the key for the first pass of optimization */
527 ir3_optimize_nir(&shader
->ir3_shader
, shader
->ir3_shader
.nir
, NULL
);
529 if (unlikely(dev
->physical_device
->instance
->debug_flags
&
531 fprintf(stderr
, "optimized nir:\n");
532 nir_print_shader(shader
->ir3_shader
.nir
, stderr
);
536 shader
->binary
= tu_compile_shader_variant(
537 &shader
->ir3_shader
, &options
->key
, NULL
, &shader
->variants
[0]);
539 return VK_ERROR_OUT_OF_HOST_MEMORY
;
541 /* compile another variant for the binning pass */
542 if (options
->include_binning_pass
&&
543 shader
->ir3_shader
.type
== MESA_SHADER_VERTEX
) {
544 shader
->binning_binary
= tu_compile_shader_variant(
545 &shader
->ir3_shader
, &options
->key
, &shader
->variants
[0],
546 &shader
->variants
[1]);
547 if (!shader
->binning_binary
)
548 return VK_ERROR_OUT_OF_HOST_MEMORY
;
550 shader
->has_binning_pass
= true;
553 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_IR3
)) {
554 fprintf(stderr
, "disassembled ir3:\n");
555 fprintf(stderr
, "shader: %s\n",
556 gl_shader_stage_name(shader
->ir3_shader
.type
));
557 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stderr
);
559 if (shader
->has_binning_pass
) {
560 fprintf(stderr
, "disassembled ir3:\n");
561 fprintf(stderr
, "shader: %s (binning)\n",
562 gl_shader_stage_name(shader
->ir3_shader
.type
));
563 ir3_shader_disasm(&shader
->variants
[1], shader
->binning_binary
,
572 tu_CreateShaderModule(VkDevice _device
,
573 const VkShaderModuleCreateInfo
*pCreateInfo
,
574 const VkAllocationCallbacks
*pAllocator
,
575 VkShaderModule
*pShaderModule
)
577 TU_FROM_HANDLE(tu_device
, device
, _device
);
578 struct tu_shader_module
*module
;
580 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
581 assert(pCreateInfo
->flags
== 0);
582 assert(pCreateInfo
->codeSize
% 4 == 0);
584 module
= vk_alloc2(&device
->alloc
, pAllocator
,
585 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
586 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
588 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
590 module
->code_size
= pCreateInfo
->codeSize
;
591 memcpy(module
->code
, pCreateInfo
->pCode
, pCreateInfo
->codeSize
);
593 _mesa_sha1_compute(module
->code
, module
->code_size
, module
->sha1
);
595 *pShaderModule
= tu_shader_module_to_handle(module
);
601 tu_DestroyShaderModule(VkDevice _device
,
602 VkShaderModule _module
,
603 const VkAllocationCallbacks
*pAllocator
)
605 TU_FROM_HANDLE(tu_device
, device
, _device
);
606 TU_FROM_HANDLE(tu_shader_module
, module
, _module
);
611 vk_free2(&device
->alloc
, pAllocator
, module
);