2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
29 #include "ir3/ir3_nir.h"
32 tu_spirv_to_nir(struct ir3_compiler
*compiler
,
33 const uint32_t *words
,
35 gl_shader_stage stage
,
36 const char *entry_point_name
,
37 const VkSpecializationInfo
*spec_info
)
39 /* TODO these are made-up */
40 const struct spirv_to_nir_options spirv_options
= {
41 .frag_coord_is_sysval
= true,
42 .lower_ubo_ssbo_access_to_offsets
= true,
45 const nir_shader_compiler_options
*nir_options
=
46 ir3_get_compiler_options(compiler
);
48 /* convert VkSpecializationInfo */
49 struct nir_spirv_specialization
*spec
= NULL
;
50 uint32_t num_spec
= 0;
51 if (spec_info
&& spec_info
->mapEntryCount
) {
52 spec
= malloc(sizeof(*spec
) * spec_info
->mapEntryCount
);
56 for (uint32_t i
= 0; i
< spec_info
->mapEntryCount
; i
++) {
57 const VkSpecializationMapEntry
*entry
= &spec_info
->pMapEntries
[i
];
58 const void *data
= spec_info
->pData
+ entry
->offset
;
59 assert(data
+ entry
->size
<= spec_info
->pData
+ spec_info
->dataSize
);
60 spec
[i
].id
= entry
->constantID
;
62 spec
[i
].data64
= *(const uint64_t *) data
;
64 spec
[i
].data32
= *(const uint32_t *) data
;
65 spec
[i
].defined_on_module
= false;
68 num_spec
= spec_info
->mapEntryCount
;
72 spirv_to_nir(words
, word_count
, spec
, num_spec
, stage
, entry_point_name
,
73 &spirv_options
, nir_options
);
77 assert(nir
->info
.stage
== stage
);
78 nir_validate_shader(nir
, "after spirv_to_nir");
84 map_add(struct tu_descriptor_map
*map
, int set
, int binding
, int value
,
88 for (unsigned i
= 0; i
< map
->num
; i
++) {
89 if (set
== map
->set
[i
] && binding
== map
->binding
[i
]) {
90 assert(value
== map
->value
[i
]);
91 assert(array_size
== map
->array_size
[i
]);
94 index
+= map
->array_size
[i
];
97 assert(index
== map
->num_desc
);
99 map
->set
[map
->num
] = set
;
100 map
->binding
[map
->num
] = binding
;
101 map
->value
[map
->num
] = value
;
102 map
->array_size
[map
->num
] = array_size
;
104 map
->num_desc
+= array_size
;
110 lower_tex_src_to_offset(nir_builder
*b
, nir_tex_instr
*instr
, unsigned src_idx
,
111 struct tu_shader
*shader
,
112 const struct tu_pipeline_layout
*layout
)
114 nir_ssa_def
*index
= NULL
;
115 unsigned base_index
= 0;
116 unsigned array_elements
= 1;
117 nir_tex_src
*src
= &instr
->src
[src_idx
];
118 bool is_sampler
= src
->src_type
== nir_tex_src_sampler_deref
;
120 /* We compute first the offsets */
121 nir_deref_instr
*deref
= nir_instr_as_deref(src
->src
.ssa
->parent_instr
);
122 while (deref
->deref_type
!= nir_deref_type_var
) {
123 assert(deref
->parent
.is_ssa
);
124 nir_deref_instr
*parent
=
125 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
127 assert(deref
->deref_type
== nir_deref_type_array
);
129 if (nir_src_is_const(deref
->arr
.index
) && index
== NULL
) {
130 /* We're still building a direct index */
131 base_index
+= nir_src_as_uint(deref
->arr
.index
) * array_elements
;
134 /* We used to be direct but not anymore */
135 index
= nir_imm_int(b
, base_index
);
139 index
= nir_iadd(b
, index
,
140 nir_imul(b
, nir_imm_int(b
, array_elements
),
141 nir_ssa_for_src(b
, deref
->arr
.index
, 1)));
144 array_elements
*= glsl_get_length(parent
->type
);
150 index
= nir_umin(b
, index
, nir_imm_int(b
, array_elements
- 1));
152 /* We have the offsets, we apply them, rewriting the source or removing
156 nir_instr_rewrite_src(&instr
->instr
, &src
->src
,
157 nir_src_for_ssa(index
));
159 src
->src_type
= is_sampler
?
160 nir_tex_src_sampler_offset
:
161 nir_tex_src_texture_offset
;
163 instr
->texture_array_size
= array_elements
;
165 nir_tex_instr_remove_src(instr
, src_idx
);
168 uint32_t set
= deref
->var
->data
.descriptor_set
;
169 uint32_t binding
= deref
->var
->data
.binding
;
170 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
171 struct tu_descriptor_set_binding_layout
*binding_layout
=
172 &set_layout
->binding
[binding
];
174 int desc_index
= map_add(is_sampler
?
175 &shader
->sampler_map
: &shader
->texture_map
,
176 deref
->var
->data
.descriptor_set
,
177 deref
->var
->data
.binding
,
178 deref
->var
->data
.index
,
179 binding_layout
->array_size
) + base_index
;
181 instr
->sampler_index
= desc_index
;
183 instr
->texture_index
= desc_index
;
187 lower_sampler(nir_builder
*b
, nir_tex_instr
*instr
, struct tu_shader
*shader
,
188 const struct tu_pipeline_layout
*layout
)
191 nir_tex_instr_src_index(instr
, nir_tex_src_texture_deref
);
193 if (texture_idx
>= 0)
194 lower_tex_src_to_offset(b
, instr
, texture_idx
, shader
, layout
);
197 nir_tex_instr_src_index(instr
, nir_tex_src_sampler_deref
);
199 if (sampler_idx
>= 0)
200 lower_tex_src_to_offset(b
, instr
, sampler_idx
, shader
, layout
);
202 if (texture_idx
< 0 && sampler_idx
< 0)
209 lower_load_push_constant(nir_builder
*b
, nir_intrinsic_instr
*instr
,
210 struct tu_shader
*shader
)
212 /* note: ir3 wants load_ubo, not load_uniform */
213 assert(nir_intrinsic_base(instr
) == 0);
215 nir_intrinsic_instr
*load
=
216 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
217 load
->num_components
= instr
->num_components
;
218 load
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
219 load
->src
[1] = instr
->src
[0];
220 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
221 load
->num_components
, instr
->dest
.ssa
.bit_size
,
222 instr
->dest
.ssa
.name
);
223 nir_builder_instr_insert(b
, &load
->instr
);
224 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(&load
->dest
.ssa
));
226 nir_instr_remove(&instr
->instr
);
230 lower_vulkan_resource_index(nir_builder
*b
, nir_intrinsic_instr
*instr
,
231 struct tu_shader
*shader
,
232 const struct tu_pipeline_layout
*layout
)
234 nir_const_value
*const_val
= nir_src_as_const_value(instr
->src
[0]);
236 unsigned set
= nir_intrinsic_desc_set(instr
);
237 unsigned binding
= nir_intrinsic_binding(instr
);
238 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
239 struct tu_descriptor_set_binding_layout
*binding_layout
=
240 &set_layout
->binding
[binding
];
243 switch (nir_intrinsic_desc_type(instr
)) {
244 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
245 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
247 tu_finishme("non-constant vulkan_resource_index array index");
248 /* skip index 0 which is used for push constants */
249 index
= map_add(&shader
->ubo_map
, set
, binding
, 0,
250 binding_layout
->array_size
) + 1;
251 index
+= const_val
->u32
;
253 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
254 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
256 tu_finishme("non-constant vulkan_resource_index array index");
257 index
= map_add(&shader
->ssbo_map
, set
, binding
, 0,
258 binding_layout
->array_size
);
259 index
+= const_val
->u32
;
262 tu_finishme("unsupported desc_type for vulkan_resource_index");
266 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
267 nir_src_for_ssa(nir_imm_int(b
, index
)));
268 nir_instr_remove(&instr
->instr
);
272 add_image_deref_mapping(nir_intrinsic_instr
*instr
, struct tu_shader
*shader
,
273 const struct tu_pipeline_layout
*layout
)
275 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
276 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
278 uint32_t set
= var
->data
.descriptor_set
;
279 uint32_t binding
= var
->data
.binding
;
280 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
281 struct tu_descriptor_set_binding_layout
*binding_layout
=
282 &set_layout
->binding
[binding
];
284 var
->data
.driver_location
=
285 map_add(&shader
->image_map
, set
, binding
, var
->data
.index
,
286 binding_layout
->array_size
);
290 lower_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*instr
,
291 struct tu_shader
*shader
,
292 const struct tu_pipeline_layout
*layout
)
294 switch (instr
->intrinsic
) {
295 case nir_intrinsic_load_layer_id
:
296 /* TODO: remove this when layered rendering is implemented */
297 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
298 nir_src_for_ssa(nir_imm_int(b
, 0)));
299 nir_instr_remove(&instr
->instr
);
302 case nir_intrinsic_load_push_constant
:
303 lower_load_push_constant(b
, instr
, shader
);
306 case nir_intrinsic_vulkan_resource_index
:
307 lower_vulkan_resource_index(b
, instr
, shader
, layout
);
310 case nir_intrinsic_image_deref_load
:
311 case nir_intrinsic_image_deref_store
:
312 case nir_intrinsic_image_deref_atomic_add
:
313 case nir_intrinsic_image_deref_atomic_imin
:
314 case nir_intrinsic_image_deref_atomic_umin
:
315 case nir_intrinsic_image_deref_atomic_imax
:
316 case nir_intrinsic_image_deref_atomic_umax
:
317 case nir_intrinsic_image_deref_atomic_and
:
318 case nir_intrinsic_image_deref_atomic_or
:
319 case nir_intrinsic_image_deref_atomic_xor
:
320 case nir_intrinsic_image_deref_atomic_exchange
:
321 case nir_intrinsic_image_deref_atomic_comp_swap
:
322 case nir_intrinsic_image_deref_size
:
323 case nir_intrinsic_image_deref_samples
:
324 case nir_intrinsic_image_deref_load_param_intel
:
325 case nir_intrinsic_image_deref_load_raw_intel
:
326 case nir_intrinsic_image_deref_store_raw_intel
:
327 add_image_deref_mapping(instr
, shader
, layout
);
336 lower_impl(nir_function_impl
*impl
, struct tu_shader
*shader
,
337 const struct tu_pipeline_layout
*layout
)
340 nir_builder_init(&b
, impl
);
341 bool progress
= false;
343 nir_foreach_block(block
, impl
) {
344 nir_foreach_instr_safe(instr
, block
) {
345 b
.cursor
= nir_before_instr(instr
);
346 switch (instr
->type
) {
347 case nir_instr_type_tex
:
348 progress
|= lower_sampler(&b
, nir_instr_as_tex(instr
), shader
, layout
);
350 case nir_instr_type_intrinsic
:
351 progress
|= lower_intrinsic(&b
, nir_instr_as_intrinsic(instr
), shader
, layout
);
363 tu_lower_io(nir_shader
*shader
, struct tu_shader
*tu_shader
,
364 const struct tu_pipeline_layout
*layout
)
366 bool progress
= false;
368 nir_foreach_function(function
, shader
) {
370 progress
|= lower_impl(function
->impl
, tu_shader
, layout
);
373 /* spirv_to_nir produces num_ssbos equal to the number of SSBO-containing
374 * variables, while ir3 wants the number of descriptors (like the gallium
377 shader
->info
.num_ssbos
= tu_shader
->ssbo_map
.num_desc
;
383 tu_shader_create(struct tu_device
*dev
,
384 gl_shader_stage stage
,
385 const VkPipelineShaderStageCreateInfo
*stage_info
,
386 struct tu_pipeline_layout
*layout
,
387 const VkAllocationCallbacks
*alloc
)
389 const struct tu_shader_module
*module
=
390 tu_shader_module_from_handle(stage_info
->module
);
391 struct tu_shader
*shader
;
393 const uint32_t max_variant_count
= (stage
== MESA_SHADER_VERTEX
) ? 2 : 1;
396 sizeof(*shader
) + sizeof(struct ir3_shader_variant
) * max_variant_count
,
397 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
401 /* translate SPIR-V to NIR */
402 assert(module
->code_size
% 4 == 0);
403 nir_shader
*nir
= tu_spirv_to_nir(
404 dev
->compiler
, (const uint32_t *) module
->code
, module
->code_size
/ 4,
405 stage
, stage_info
->pName
, stage_info
->pSpecializationInfo
);
407 vk_free2(&dev
->alloc
, alloc
, shader
);
411 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_NIR
)) {
412 fprintf(stderr
, "translated nir:\n");
413 nir_print_shader(nir
, stderr
);
416 /* multi step inlining procedure */
417 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
418 NIR_PASS_V(nir
, nir_lower_returns
);
419 NIR_PASS_V(nir
, nir_inline_functions
);
420 NIR_PASS_V(nir
, nir_opt_deref
);
421 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
422 if (!func
->is_entrypoint
)
423 exec_node_remove(&func
->node
);
425 assert(exec_list_length(&nir
->functions
) == 1);
426 NIR_PASS_V(nir
, nir_lower_variable_initializers
, ~nir_var_function_temp
);
428 /* Split member structs. We do this before lower_io_to_temporaries so that
429 * it doesn't lower system values to temporaries by accident.
431 NIR_PASS_V(nir
, nir_split_var_copies
);
432 NIR_PASS_V(nir
, nir_split_per_member_structs
);
434 NIR_PASS_V(nir
, nir_remove_dead_variables
,
435 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
);
437 NIR_PASS_V(nir
, nir_propagate_invariant
);
439 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
), true, true);
441 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
442 NIR_PASS_V(nir
, nir_split_var_copies
);
443 NIR_PASS_V(nir
, nir_lower_var_copies
);
445 NIR_PASS_V(nir
, nir_opt_copy_prop_vars
);
446 NIR_PASS_V(nir
, nir_opt_combine_stores
, nir_var_all
);
448 /* ir3 doesn't support indirect input/output */
449 NIR_PASS_V(nir
, nir_lower_indirect_derefs
, nir_var_shader_in
| nir_var_shader_out
);
451 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
453 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
, stage
);
454 nir_assign_io_var_locations(&nir
->outputs
, &nir
->num_outputs
, stage
);
456 NIR_PASS_V(nir
, nir_lower_system_values
);
457 NIR_PASS_V(nir
, nir_lower_frexp
);
459 if (stage
== MESA_SHADER_FRAGMENT
)
460 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
462 NIR_PASS_V(nir
, tu_lower_io
, shader
, layout
);
464 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, ir3_glsl_type_size
, 0);
466 if (stage
== MESA_SHADER_FRAGMENT
) {
467 /* NOTE: lower load_barycentric_at_sample first, since it
468 * produces load_barycentric_at_offset:
470 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_sample
);
471 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_offset
);
473 NIR_PASS_V(nir
, ir3_nir_move_varying_inputs
);
476 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
478 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
479 nir
->num_uniforms
= MAX_PUSH_CONSTANTS_SIZE
/ 16;
481 shader
->ir3_shader
.compiler
= dev
->compiler
;
482 shader
->ir3_shader
.type
= stage
;
483 shader
->ir3_shader
.nir
= nir
;
489 tu_shader_destroy(struct tu_device
*dev
,
490 struct tu_shader
*shader
,
491 const VkAllocationCallbacks
*alloc
)
493 if (shader
->ir3_shader
.nir
)
494 ralloc_free(shader
->ir3_shader
.nir
);
496 for (uint32_t i
= 0; i
< 1 + shader
->has_binning_pass
; i
++) {
497 if (shader
->variants
[i
].ir
)
498 ir3_destroy(shader
->variants
[i
].ir
);
501 if (shader
->ir3_shader
.const_state
.immediates
)
502 free(shader
->ir3_shader
.const_state
.immediates
);
504 free(shader
->binary
);
505 if (shader
->binning_binary
)
506 free(shader
->binning_binary
);
508 vk_free2(&dev
->alloc
, alloc
, shader
);
512 tu_shader_compile_options_init(
513 struct tu_shader_compile_options
*options
,
514 const VkGraphicsPipelineCreateInfo
*pipeline_info
)
516 *options
= (struct tu_shader_compile_options
) {
519 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
520 * some optimizations need to happen otherwise shader might not compile
523 .include_binning_pass
= true,
528 tu_compile_shader_variant(struct ir3_shader
*shader
,
529 const struct ir3_shader_key
*key
,
530 struct ir3_shader_variant
*nonbinning
,
531 struct ir3_shader_variant
*variant
)
533 variant
->shader
= shader
;
534 variant
->type
= shader
->type
;
536 variant
->binning_pass
= !!nonbinning
;
537 variant
->nonbinning
= nonbinning
;
539 int ret
= ir3_compile_shader_nir(shader
->compiler
, variant
);
543 /* when assemble fails, we rely on tu_shader_destroy to clean up the
546 return ir3_shader_assemble(variant
, shader
->compiler
->gpu_id
);
550 tu_shader_compile(struct tu_device
*dev
,
551 struct tu_shader
*shader
,
552 const struct tu_shader
*next_stage
,
553 const struct tu_shader_compile_options
*options
,
554 const VkAllocationCallbacks
*alloc
)
556 if (options
->optimize
) {
557 /* ignore the key for the first pass of optimization */
558 ir3_optimize_nir(&shader
->ir3_shader
, shader
->ir3_shader
.nir
, NULL
);
560 if (unlikely(dev
->physical_device
->instance
->debug_flags
&
562 fprintf(stderr
, "optimized nir:\n");
563 nir_print_shader(shader
->ir3_shader
.nir
, stderr
);
567 shader
->binary
= tu_compile_shader_variant(
568 &shader
->ir3_shader
, &options
->key
, NULL
, &shader
->variants
[0]);
570 return VK_ERROR_OUT_OF_HOST_MEMORY
;
572 /* compile another variant for the binning pass */
573 if (options
->include_binning_pass
&&
574 shader
->ir3_shader
.type
== MESA_SHADER_VERTEX
) {
575 shader
->binning_binary
= tu_compile_shader_variant(
576 &shader
->ir3_shader
, &options
->key
, &shader
->variants
[0],
577 &shader
->variants
[1]);
578 if (!shader
->binning_binary
)
579 return VK_ERROR_OUT_OF_HOST_MEMORY
;
581 shader
->has_binning_pass
= true;
584 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_IR3
)) {
585 fprintf(stderr
, "disassembled ir3:\n");
586 fprintf(stderr
, "shader: %s\n",
587 gl_shader_stage_name(shader
->ir3_shader
.type
));
588 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stderr
);
590 if (shader
->has_binning_pass
) {
591 fprintf(stderr
, "disassembled ir3:\n");
592 fprintf(stderr
, "shader: %s (binning)\n",
593 gl_shader_stage_name(shader
->ir3_shader
.type
));
594 ir3_shader_disasm(&shader
->variants
[1], shader
->binning_binary
,
603 tu_CreateShaderModule(VkDevice _device
,
604 const VkShaderModuleCreateInfo
*pCreateInfo
,
605 const VkAllocationCallbacks
*pAllocator
,
606 VkShaderModule
*pShaderModule
)
608 TU_FROM_HANDLE(tu_device
, device
, _device
);
609 struct tu_shader_module
*module
;
611 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
612 assert(pCreateInfo
->flags
== 0);
613 assert(pCreateInfo
->codeSize
% 4 == 0);
615 module
= vk_alloc2(&device
->alloc
, pAllocator
,
616 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
617 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
619 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
621 module
->code_size
= pCreateInfo
->codeSize
;
622 memcpy(module
->code
, pCreateInfo
->pCode
, pCreateInfo
->codeSize
);
624 _mesa_sha1_compute(module
->code
, module
->code_size
, module
->sha1
);
626 *pShaderModule
= tu_shader_module_to_handle(module
);
632 tu_DestroyShaderModule(VkDevice _device
,
633 VkShaderModule _module
,
634 const VkAllocationCallbacks
*pAllocator
)
636 TU_FROM_HANDLE(tu_device
, device
, _device
);
637 TU_FROM_HANDLE(tu_shader_module
, module
, _module
);
642 vk_free2(&device
->alloc
, pAllocator
, module
);