2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28 #include "nir/nir_xfb_info.h"
31 #include "ir3/ir3_nir.h"
34 tu_spirv_to_nir(struct ir3_compiler
*compiler
,
35 const uint32_t *words
,
37 gl_shader_stage stage
,
38 const char *entry_point_name
,
39 const VkSpecializationInfo
*spec_info
)
41 /* TODO these are made-up */
42 const struct spirv_to_nir_options spirv_options
= {
43 .frag_coord_is_sysval
= true,
44 .lower_ubo_ssbo_access_to_offsets
= true,
46 .transform_feedback
= compiler
->gpu_id
>= 600,
49 const nir_shader_compiler_options
*nir_options
=
50 ir3_get_compiler_options(compiler
);
52 /* convert VkSpecializationInfo */
53 struct nir_spirv_specialization
*spec
= NULL
;
54 uint32_t num_spec
= 0;
55 if (spec_info
&& spec_info
->mapEntryCount
) {
56 spec
= malloc(sizeof(*spec
) * spec_info
->mapEntryCount
);
60 for (uint32_t i
= 0; i
< spec_info
->mapEntryCount
; i
++) {
61 const VkSpecializationMapEntry
*entry
= &spec_info
->pMapEntries
[i
];
62 const void *data
= spec_info
->pData
+ entry
->offset
;
63 assert(data
+ entry
->size
<= spec_info
->pData
+ spec_info
->dataSize
);
64 spec
[i
].id
= entry
->constantID
;
65 switch (entry
->size
) {
67 spec
[i
].data64
= *(const uint64_t *)data
;
70 spec
[i
].data32
= *(const uint32_t *)data
;
73 spec
[i
].data32
= *(const uint16_t *)data
;
76 spec
[i
].data32
= *(const uint8_t *)data
;
79 assert(!"Invalid spec constant size");
82 spec
[i
].defined_on_module
= false;
85 num_spec
= spec_info
->mapEntryCount
;
89 spirv_to_nir(words
, word_count
, spec
, num_spec
, stage
, entry_point_name
,
90 &spirv_options
, nir_options
);
94 assert(nir
->info
.stage
== stage
);
95 nir_validate_shader(nir
, "after spirv_to_nir");
101 lower_load_push_constant(nir_builder
*b
, nir_intrinsic_instr
*instr
,
102 struct tu_shader
*shader
)
104 nir_intrinsic_instr
*load
=
105 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
106 load
->num_components
= instr
->num_components
;
107 uint32_t base
= nir_intrinsic_base(instr
);
108 assert(base
% 4 == 0);
109 assert(base
>= shader
->push_consts
.lo
* 16);
110 base
-= shader
->push_consts
.lo
* 16;
111 nir_intrinsic_set_base(load
, base
/ 4);
113 nir_src_for_ssa(nir_ushr(b
, instr
->src
[0].ssa
, nir_imm_int(b
, 2)));
114 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
115 load
->num_components
, instr
->dest
.ssa
.bit_size
,
116 instr
->dest
.ssa
.name
);
117 nir_builder_instr_insert(b
, &load
->instr
);
118 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(&load
->dest
.ssa
));
120 nir_instr_remove(&instr
->instr
);
124 lower_vulkan_resource_index(nir_builder
*b
, nir_intrinsic_instr
*instr
,
125 struct tu_shader
*shader
,
126 const struct tu_pipeline_layout
*layout
)
128 nir_ssa_def
*vulkan_idx
= instr
->src
[0].ssa
;
130 unsigned set
= nir_intrinsic_desc_set(instr
);
131 unsigned binding
= nir_intrinsic_binding(instr
);
132 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
133 struct tu_descriptor_set_binding_layout
*binding_layout
=
134 &set_layout
->binding
[binding
];
137 switch (binding_layout
->type
) {
138 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
139 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
140 base
= layout
->set
[set
].dynamic_offset_start
+
141 binding_layout
->dynamic_offset_offset
+
142 layout
->input_attachment_count
;
146 base
= binding_layout
->offset
/ (4 * A6XX_TEX_CONST_DWORDS
);
150 nir_intrinsic_instr
*bindless
=
151 nir_intrinsic_instr_create(b
->shader
,
152 nir_intrinsic_bindless_resource_ir3
);
153 bindless
->num_components
= 1;
154 nir_ssa_dest_init(&bindless
->instr
, &bindless
->dest
,
156 nir_intrinsic_set_desc_set(bindless
, set
);
157 bindless
->src
[0] = nir_src_for_ssa(nir_iadd(b
, nir_imm_int(b
, base
), vulkan_idx
));
158 nir_builder_instr_insert(b
, &bindless
->instr
);
160 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
161 nir_src_for_ssa(&bindless
->dest
.ssa
));
162 nir_instr_remove(&instr
->instr
);
166 build_bindless(nir_builder
*b
, nir_deref_instr
*deref
, bool is_sampler
,
167 struct tu_shader
*shader
,
168 const struct tu_pipeline_layout
*layout
)
170 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
172 unsigned set
= var
->data
.descriptor_set
;
173 unsigned binding
= var
->data
.binding
;
174 const struct tu_descriptor_set_binding_layout
*bind_layout
=
175 &layout
->set
[set
].layout
->binding
[binding
];
177 nir_ssa_def
*desc_offset
;
178 unsigned descriptor_stride
;
179 if (bind_layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
) {
181 layout
->set
[set
].input_attachment_start
+
182 bind_layout
->input_attachment_offset
;
183 desc_offset
= nir_imm_int(b
, offset
);
185 descriptor_stride
= 1;
188 /* Samplers come second in combined image/sampler descriptors, see
189 * write_combined_image_sampler_descriptor().
191 if (is_sampler
&& bind_layout
->type
==
192 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
196 nir_imm_int(b
, (bind_layout
->offset
/ (4 * A6XX_TEX_CONST_DWORDS
)) +
198 descriptor_stride
= bind_layout
->size
/ (4 * A6XX_TEX_CONST_DWORDS
);
201 if (deref
->deref_type
!= nir_deref_type_var
) {
202 assert(deref
->deref_type
== nir_deref_type_array
);
204 nir_ssa_def
*arr_index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
205 desc_offset
= nir_iadd(b
, desc_offset
,
206 nir_imul_imm(b
, arr_index
, descriptor_stride
));
209 nir_intrinsic_instr
*bindless
=
210 nir_intrinsic_instr_create(b
->shader
,
211 nir_intrinsic_bindless_resource_ir3
);
212 bindless
->num_components
= 1;
213 nir_ssa_dest_init(&bindless
->instr
, &bindless
->dest
,
215 nir_intrinsic_set_desc_set(bindless
, set
);
216 bindless
->src
[0] = nir_src_for_ssa(desc_offset
);
217 nir_builder_instr_insert(b
, &bindless
->instr
);
219 return &bindless
->dest
.ssa
;
223 lower_image_deref(nir_builder
*b
,
224 nir_intrinsic_instr
*instr
, struct tu_shader
*shader
,
225 const struct tu_pipeline_layout
*layout
)
227 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
228 nir_ssa_def
*bindless
= build_bindless(b
, deref
, false, shader
, layout
);
229 nir_rewrite_image_intrinsic(instr
, bindless
, true);
233 lower_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*instr
,
234 struct tu_shader
*shader
,
235 const struct tu_pipeline_layout
*layout
)
237 switch (instr
->intrinsic
) {
238 case nir_intrinsic_load_layer_id
:
239 /* TODO: remove this when layered rendering is implemented */
240 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
241 nir_src_for_ssa(nir_imm_int(b
, 0)));
242 nir_instr_remove(&instr
->instr
);
245 case nir_intrinsic_load_push_constant
:
246 lower_load_push_constant(b
, instr
, shader
);
249 case nir_intrinsic_vulkan_resource_index
:
250 lower_vulkan_resource_index(b
, instr
, shader
, layout
);
253 case nir_intrinsic_image_deref_load
:
254 case nir_intrinsic_image_deref_store
:
255 case nir_intrinsic_image_deref_atomic_add
:
256 case nir_intrinsic_image_deref_atomic_imin
:
257 case nir_intrinsic_image_deref_atomic_umin
:
258 case nir_intrinsic_image_deref_atomic_imax
:
259 case nir_intrinsic_image_deref_atomic_umax
:
260 case nir_intrinsic_image_deref_atomic_and
:
261 case nir_intrinsic_image_deref_atomic_or
:
262 case nir_intrinsic_image_deref_atomic_xor
:
263 case nir_intrinsic_image_deref_atomic_exchange
:
264 case nir_intrinsic_image_deref_atomic_comp_swap
:
265 case nir_intrinsic_image_deref_size
:
266 case nir_intrinsic_image_deref_samples
:
267 lower_image_deref(b
, instr
, shader
, layout
);
276 lower_tex(nir_builder
*b
, nir_tex_instr
*tex
,
277 struct tu_shader
*shader
, const struct tu_pipeline_layout
*layout
)
279 int sampler_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_sampler_deref
);
280 if (sampler_src_idx
>= 0) {
281 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[sampler_src_idx
].src
);
282 nir_ssa_def
*bindless
= build_bindless(b
, deref
, true, shader
, layout
);
283 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[sampler_src_idx
].src
,
284 nir_src_for_ssa(bindless
));
285 tex
->src
[sampler_src_idx
].src_type
= nir_tex_src_sampler_handle
;
288 int tex_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_texture_deref
);
289 if (tex_src_idx
>= 0) {
290 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[tex_src_idx
].src
);
291 nir_ssa_def
*bindless
= build_bindless(b
, deref
, false, shader
, layout
);
292 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex_src_idx
].src
,
293 nir_src_for_ssa(bindless
));
294 tex
->src
[tex_src_idx
].src_type
= nir_tex_src_texture_handle
;
301 lower_impl(nir_function_impl
*impl
, struct tu_shader
*shader
,
302 const struct tu_pipeline_layout
*layout
)
305 nir_builder_init(&b
, impl
);
306 bool progress
= false;
308 nir_foreach_block(block
, impl
) {
309 nir_foreach_instr_safe(instr
, block
) {
310 b
.cursor
= nir_before_instr(instr
);
311 switch (instr
->type
) {
312 case nir_instr_type_tex
:
313 progress
|= lower_tex(&b
, nir_instr_as_tex(instr
), shader
, layout
);
315 case nir_instr_type_intrinsic
:
316 progress
|= lower_intrinsic(&b
, nir_instr_as_intrinsic(instr
), shader
, layout
);
328 /* Figure out the range of push constants that we're actually going to push to
329 * the shader, and tell the backend to reserve this range when pushing UBO
334 gather_push_constants(nir_shader
*shader
, struct tu_shader
*tu_shader
)
336 uint32_t min
= UINT32_MAX
, max
= 0;
337 nir_foreach_function(function
, shader
) {
341 nir_foreach_block(block
, function
->impl
) {
342 nir_foreach_instr_safe(instr
, block
) {
343 if (instr
->type
!= nir_instr_type_intrinsic
)
346 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
347 if (intrin
->intrinsic
!= nir_intrinsic_load_push_constant
)
350 uint32_t base
= nir_intrinsic_base(intrin
);
351 uint32_t range
= nir_intrinsic_range(intrin
);
352 min
= MIN2(min
, base
);
353 max
= MAX2(max
, base
+ range
);
360 tu_shader
->push_consts
.lo
= 0;
361 tu_shader
->push_consts
.count
= 0;
362 tu_shader
->ir3_shader
.const_state
.num_reserved_user_consts
= 0;
366 /* CP_LOAD_STATE OFFSET and NUM_UNIT are in units of vec4 (4 dwords),
367 * however there's an alignment requirement of 4 on OFFSET. Expand the
368 * range and change units accordingly.
370 tu_shader
->push_consts
.lo
= (min
/ 16) / 4 * 4;
371 tu_shader
->push_consts
.count
=
372 align(max
, 16) / 16 - tu_shader
->push_consts
.lo
;
373 tu_shader
->ir3_shader
.const_state
.num_reserved_user_consts
=
374 align(tu_shader
->push_consts
.count
, 4);
377 /* Gather the InputAttachmentIndex for each input attachment from the NIR
378 * shader and organize the info in a way so that draw-time patching is easy.
381 gather_input_attachments(nir_shader
*shader
, struct tu_shader
*tu_shader
,
382 const struct tu_pipeline_layout
*layout
)
384 nir_foreach_variable(var
, &shader
->uniforms
) {
385 const struct glsl_type
*glsl_type
= glsl_without_array(var
->type
);
387 if (!glsl_type_is_image(glsl_type
))
390 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(glsl_type
);
392 const uint32_t set
= var
->data
.descriptor_set
;
393 const uint32_t binding
= var
->data
.binding
;
394 const struct tu_descriptor_set_binding_layout
*bind_layout
=
395 &layout
->set
[set
].layout
->binding
[binding
];
396 const uint32_t array_size
= bind_layout
->array_size
;
398 if (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
399 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) {
401 layout
->set
[set
].input_attachment_start
+
402 bind_layout
->input_attachment_offset
;
403 for (unsigned i
= 0; i
< array_size
; i
++)
404 tu_shader
->attachment_idx
[offset
+ i
] = var
->data
.index
+ i
;
410 tu_lower_io(nir_shader
*shader
, struct tu_shader
*tu_shader
,
411 const struct tu_pipeline_layout
*layout
)
413 bool progress
= false;
415 gather_push_constants(shader
, tu_shader
);
416 gather_input_attachments(shader
, tu_shader
, layout
);
418 nir_foreach_function(function
, shader
) {
420 progress
|= lower_impl(function
->impl
, tu_shader
, layout
);
427 tu_gather_xfb_info(nir_shader
*nir
, struct tu_shader
*shader
)
429 struct ir3_stream_output_info
*info
= &shader
->ir3_shader
.stream_output
;
430 nir_xfb_info
*xfb
= nir_gather_xfb_info(nir
, NULL
);
435 /* creating a map from VARYING_SLOT_* enums to consecutive index */
436 uint8_t num_outputs
= 0;
437 uint64_t outputs_written
= 0;
438 for (int i
= 0; i
< xfb
->output_count
; i
++)
439 outputs_written
|= BITFIELD64_BIT(xfb
->outputs
[i
].location
);
441 uint8_t output_map
[VARYING_SLOT_TESS_MAX
];
442 memset(output_map
, 0, sizeof(output_map
));
444 for (unsigned attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
445 if (outputs_written
& BITFIELD64_BIT(attr
))
446 output_map
[attr
] = num_outputs
++;
449 assert(xfb
->output_count
< IR3_MAX_SO_OUTPUTS
);
450 info
->num_outputs
= xfb
->output_count
;
452 for (int i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
453 info
->stride
[i
] = xfb
->buffers
[i
].stride
/ 4;
455 for (int i
= 0; i
< xfb
->output_count
; i
++) {
456 info
->output
[i
].register_index
= output_map
[xfb
->outputs
[i
].location
];
457 info
->output
[i
].start_component
= xfb
->outputs
[i
].component_offset
;
458 info
->output
[i
].num_components
=
459 util_bitcount(xfb
->outputs
[i
].component_mask
);
460 info
->output
[i
].output_buffer
= xfb
->outputs
[i
].buffer
;
461 info
->output
[i
].dst_offset
= xfb
->outputs
[i
].offset
/ 4;
462 info
->output
[i
].stream
= xfb
->buffer_to_stream
[xfb
->outputs
[i
].buffer
];
469 tu_shader_create(struct tu_device
*dev
,
470 gl_shader_stage stage
,
471 const VkPipelineShaderStageCreateInfo
*stage_info
,
472 struct tu_pipeline_layout
*layout
,
473 const VkAllocationCallbacks
*alloc
)
475 const struct tu_shader_module
*module
=
476 tu_shader_module_from_handle(stage_info
->module
);
477 struct tu_shader
*shader
;
479 const uint32_t max_variant_count
= (stage
== MESA_SHADER_VERTEX
) ? 2 : 1;
482 sizeof(*shader
) + sizeof(struct ir3_shader_variant
) * max_variant_count
,
483 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
487 /* translate SPIR-V to NIR */
488 assert(module
->code_size
% 4 == 0);
489 nir_shader
*nir
= tu_spirv_to_nir(
490 dev
->compiler
, (const uint32_t *) module
->code
, module
->code_size
/ 4,
491 stage
, stage_info
->pName
, stage_info
->pSpecializationInfo
);
493 vk_free2(&dev
->alloc
, alloc
, shader
);
497 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_NIR
)) {
498 fprintf(stderr
, "translated nir:\n");
499 nir_print_shader(nir
, stderr
);
502 /* multi step inlining procedure */
503 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
504 NIR_PASS_V(nir
, nir_lower_returns
);
505 NIR_PASS_V(nir
, nir_inline_functions
);
506 NIR_PASS_V(nir
, nir_opt_deref
);
507 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
508 if (!func
->is_entrypoint
)
509 exec_node_remove(&func
->node
);
511 assert(exec_list_length(&nir
->functions
) == 1);
512 NIR_PASS_V(nir
, nir_lower_variable_initializers
, ~nir_var_function_temp
);
514 /* Split member structs. We do this before lower_io_to_temporaries so that
515 * it doesn't lower system values to temporaries by accident.
517 NIR_PASS_V(nir
, nir_split_var_copies
);
518 NIR_PASS_V(nir
, nir_split_per_member_structs
);
520 NIR_PASS_V(nir
, nir_remove_dead_variables
,
521 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
);
523 /* Gather information for transform feedback.
524 * This should be called after nir_split_per_member_structs.
525 * Also needs to be called after nir_remove_dead_variables with varyings,
526 * so that we could align stream outputs correctly.
528 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
529 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
||
530 nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
531 tu_gather_xfb_info(nir
, shader
);
533 NIR_PASS_V(nir
, nir_propagate_invariant
);
535 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
), true, true);
537 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
538 NIR_PASS_V(nir
, nir_split_var_copies
);
539 NIR_PASS_V(nir
, nir_lower_var_copies
);
541 NIR_PASS_V(nir
, nir_opt_copy_prop_vars
);
542 NIR_PASS_V(nir
, nir_opt_combine_stores
, nir_var_all
);
544 /* ir3 doesn't support indirect input/output */
545 NIR_PASS_V(nir
, nir_lower_indirect_derefs
, nir_var_shader_in
| nir_var_shader_out
);
547 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
549 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
, stage
);
550 nir_assign_io_var_locations(&nir
->outputs
, &nir
->num_outputs
, stage
);
552 NIR_PASS_V(nir
, nir_lower_system_values
);
553 NIR_PASS_V(nir
, nir_lower_frexp
);
555 if (stage
== MESA_SHADER_FRAGMENT
)
556 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
558 NIR_PASS_V(nir
, tu_lower_io
, shader
, layout
);
560 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, ir3_glsl_type_size
, 0);
562 if (stage
== MESA_SHADER_FRAGMENT
) {
563 /* NOTE: lower load_barycentric_at_sample first, since it
564 * produces load_barycentric_at_offset:
566 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_sample
);
567 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_offset
);
569 NIR_PASS_V(nir
, ir3_nir_move_varying_inputs
);
572 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
574 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
575 nir
->num_uniforms
= MAX_PUSH_CONSTANTS_SIZE
/ 16;
577 shader
->ir3_shader
.compiler
= dev
->compiler
;
578 shader
->ir3_shader
.type
= stage
;
579 shader
->ir3_shader
.nir
= nir
;
585 tu_shader_destroy(struct tu_device
*dev
,
586 struct tu_shader
*shader
,
587 const VkAllocationCallbacks
*alloc
)
589 if (shader
->ir3_shader
.nir
)
590 ralloc_free(shader
->ir3_shader
.nir
);
592 for (uint32_t i
= 0; i
< 1 + shader
->has_binning_pass
; i
++) {
593 if (shader
->variants
[i
].ir
)
594 ir3_destroy(shader
->variants
[i
].ir
);
597 if (shader
->ir3_shader
.const_state
.immediates
)
598 free(shader
->ir3_shader
.const_state
.immediates
);
600 free(shader
->binary
);
601 if (shader
->binning_binary
)
602 free(shader
->binning_binary
);
604 vk_free2(&dev
->alloc
, alloc
, shader
);
608 tu_shader_compile_options_init(
609 struct tu_shader_compile_options
*options
,
610 const VkGraphicsPipelineCreateInfo
*pipeline_info
)
615 for (uint32_t i
= 0; i
< pipeline_info
->stageCount
; i
++) {
616 if (pipeline_info
->pStages
[i
].stage
== VK_SHADER_STAGE_GEOMETRY_BIT
) {
622 const VkPipelineMultisampleStateCreateInfo
*msaa_info
= pipeline_info
->pMultisampleState
;
623 const struct VkPipelineSampleLocationsStateCreateInfoEXT
*sample_locations
=
624 vk_find_struct_const(msaa_info
->pNext
, PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT
);
625 if (!pipeline_info
->pRasterizationState
->rasterizerDiscardEnable
&&
626 (msaa_info
->rasterizationSamples
> 1 ||
627 /* also set msaa key when sample location is not the default
628 * since this affects varying interpolation */
629 (sample_locations
&& sample_locations
->sampleLocationsEnable
))) {
634 *options
= (struct tu_shader_compile_options
) {
635 /* TODO: Populate the remaining fields of ir3_shader_key. */
640 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
641 * some optimizations need to happen otherwise shader might not compile
644 .include_binning_pass
= true,
649 tu_compile_shader_variant(struct ir3_shader
*shader
,
650 const struct ir3_shader_key
*key
,
651 struct ir3_shader_variant
*nonbinning
,
652 struct ir3_shader_variant
*variant
)
654 variant
->shader
= shader
;
655 variant
->type
= shader
->type
;
657 variant
->binning_pass
= !!nonbinning
;
658 variant
->nonbinning
= nonbinning
;
660 int ret
= ir3_compile_shader_nir(shader
->compiler
, variant
);
664 /* when assemble fails, we rely on tu_shader_destroy to clean up the
667 return ir3_shader_assemble(variant
, shader
->compiler
->gpu_id
);
671 tu_shader_compile(struct tu_device
*dev
,
672 struct tu_shader
*shader
,
673 const struct tu_shader
*next_stage
,
674 const struct tu_shader_compile_options
*options
,
675 const VkAllocationCallbacks
*alloc
)
677 if (options
->optimize
) {
678 /* ignore the key for the first pass of optimization */
679 ir3_optimize_nir(&shader
->ir3_shader
, shader
->ir3_shader
.nir
, NULL
);
681 if (unlikely(dev
->physical_device
->instance
->debug_flags
&
683 fprintf(stderr
, "optimized nir:\n");
684 nir_print_shader(shader
->ir3_shader
.nir
, stderr
);
688 shader
->binary
= tu_compile_shader_variant(
689 &shader
->ir3_shader
, &options
->key
, NULL
, &shader
->variants
[0]);
691 return VK_ERROR_OUT_OF_HOST_MEMORY
;
693 if (shader_debug_enabled(shader
->ir3_shader
.type
)) {
694 fprintf(stdout
, "Native code for unnamed %s shader %s:\n",
695 ir3_shader_stage(&shader
->variants
[0]), shader
->ir3_shader
.nir
->info
.name
);
696 if (shader
->ir3_shader
.type
== MESA_SHADER_FRAGMENT
)
697 fprintf(stdout
, "SIMD0\n");
698 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stdout
);
701 /* compile another variant for the binning pass */
702 if (options
->include_binning_pass
&&
703 shader
->ir3_shader
.type
== MESA_SHADER_VERTEX
) {
704 shader
->binning_binary
= tu_compile_shader_variant(
705 &shader
->ir3_shader
, &options
->key
, &shader
->variants
[0],
706 &shader
->variants
[1]);
707 if (!shader
->binning_binary
)
708 return VK_ERROR_OUT_OF_HOST_MEMORY
;
710 shader
->has_binning_pass
= true;
712 if (shader_debug_enabled(MESA_SHADER_VERTEX
)) {
713 fprintf(stdout
, "Native code for unnamed binning shader %s:\n",
714 shader
->ir3_shader
.nir
->info
.name
);
715 ir3_shader_disasm(&shader
->variants
[1], shader
->binary
, stdout
);
719 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_IR3
)) {
720 fprintf(stderr
, "disassembled ir3:\n");
721 fprintf(stderr
, "shader: %s\n",
722 gl_shader_stage_name(shader
->ir3_shader
.type
));
723 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stderr
);
725 if (shader
->has_binning_pass
) {
726 fprintf(stderr
, "disassembled ir3:\n");
727 fprintf(stderr
, "shader: %s (binning)\n",
728 gl_shader_stage_name(shader
->ir3_shader
.type
));
729 ir3_shader_disasm(&shader
->variants
[1], shader
->binning_binary
,
738 tu_CreateShaderModule(VkDevice _device
,
739 const VkShaderModuleCreateInfo
*pCreateInfo
,
740 const VkAllocationCallbacks
*pAllocator
,
741 VkShaderModule
*pShaderModule
)
743 TU_FROM_HANDLE(tu_device
, device
, _device
);
744 struct tu_shader_module
*module
;
746 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
747 assert(pCreateInfo
->flags
== 0);
748 assert(pCreateInfo
->codeSize
% 4 == 0);
750 module
= vk_alloc2(&device
->alloc
, pAllocator
,
751 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
752 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
754 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
756 module
->code_size
= pCreateInfo
->codeSize
;
757 memcpy(module
->code
, pCreateInfo
->pCode
, pCreateInfo
->codeSize
);
759 _mesa_sha1_compute(module
->code
, module
->code_size
, module
->sha1
);
761 *pShaderModule
= tu_shader_module_to_handle(module
);
767 tu_DestroyShaderModule(VkDevice _device
,
768 VkShaderModule _module
,
769 const VkAllocationCallbacks
*pAllocator
)
771 TU_FROM_HANDLE(tu_device
, device
, _device
);
772 TU_FROM_HANDLE(tu_shader_module
, module
, _module
);
777 vk_free2(&device
->alloc
, pAllocator
, module
);