2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28 #include "nir/nir_xfb_info.h"
30 #include "ir3/ir3_nir.h"
33 tu_spirv_to_nir(struct ir3_compiler
*compiler
,
34 const uint32_t *words
,
36 gl_shader_stage stage
,
37 const char *entry_point_name
,
38 const VkSpecializationInfo
*spec_info
)
40 /* TODO these are made-up */
41 const struct spirv_to_nir_options spirv_options
= {
42 .frag_coord_is_sysval
= true,
43 .lower_ubo_ssbo_access_to_offsets
= true,
45 .transform_feedback
= compiler
->gpu_id
>= 600,
48 const nir_shader_compiler_options
*nir_options
=
49 ir3_get_compiler_options(compiler
);
51 /* convert VkSpecializationInfo */
52 struct nir_spirv_specialization
*spec
= NULL
;
53 uint32_t num_spec
= 0;
54 if (spec_info
&& spec_info
->mapEntryCount
) {
55 spec
= malloc(sizeof(*spec
) * spec_info
->mapEntryCount
);
59 for (uint32_t i
= 0; i
< spec_info
->mapEntryCount
; i
++) {
60 const VkSpecializationMapEntry
*entry
= &spec_info
->pMapEntries
[i
];
61 const void *data
= spec_info
->pData
+ entry
->offset
;
62 assert(data
+ entry
->size
<= spec_info
->pData
+ spec_info
->dataSize
);
63 spec
[i
].id
= entry
->constantID
;
65 spec
[i
].data64
= *(const uint64_t *) data
;
67 spec
[i
].data32
= *(const uint32_t *) data
;
68 spec
[i
].defined_on_module
= false;
71 num_spec
= spec_info
->mapEntryCount
;
75 spirv_to_nir(words
, word_count
, spec
, num_spec
, stage
, entry_point_name
,
76 &spirv_options
, nir_options
);
80 assert(nir
->info
.stage
== stage
);
81 nir_validate_shader(nir
, "after spirv_to_nir");
87 map_add(struct tu_descriptor_map
*map
, int set
, int binding
, int value
,
91 for (unsigned i
= 0; i
< map
->num
; i
++) {
92 if (set
== map
->set
[i
] && binding
== map
->binding
[i
]) {
93 assert(value
== map
->value
[i
]);
94 assert(array_size
== map
->array_size
[i
]);
97 index
+= map
->array_size
[i
];
100 assert(index
== map
->num_desc
);
102 map
->set
[map
->num
] = set
;
103 map
->binding
[map
->num
] = binding
;
104 map
->value
[map
->num
] = value
;
105 map
->array_size
[map
->num
] = array_size
;
107 map
->num_desc
+= array_size
;
113 lower_tex_src_to_offset(nir_builder
*b
, nir_tex_instr
*instr
, unsigned src_idx
,
114 struct tu_shader
*shader
,
115 const struct tu_pipeline_layout
*layout
)
117 nir_ssa_def
*index
= NULL
;
118 unsigned base_index
= 0;
119 unsigned array_elements
= 1;
120 nir_tex_src
*src
= &instr
->src
[src_idx
];
121 bool is_sampler
= src
->src_type
== nir_tex_src_sampler_deref
;
123 /* We compute first the offsets */
124 nir_deref_instr
*deref
= nir_instr_as_deref(src
->src
.ssa
->parent_instr
);
125 while (deref
->deref_type
!= nir_deref_type_var
) {
126 assert(deref
->parent
.is_ssa
);
127 nir_deref_instr
*parent
=
128 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
130 assert(deref
->deref_type
== nir_deref_type_array
);
132 if (nir_src_is_const(deref
->arr
.index
) && index
== NULL
) {
133 /* We're still building a direct index */
134 base_index
+= nir_src_as_uint(deref
->arr
.index
) * array_elements
;
137 /* We used to be direct but not anymore */
138 index
= nir_imm_int(b
, base_index
);
142 index
= nir_iadd(b
, index
,
143 nir_imul(b
, nir_imm_int(b
, array_elements
),
144 nir_ssa_for_src(b
, deref
->arr
.index
, 1)));
147 array_elements
*= glsl_get_length(parent
->type
);
153 index
= nir_umin(b
, index
, nir_imm_int(b
, array_elements
- 1));
155 /* We have the offsets, we apply them, rewriting the source or removing
159 nir_instr_rewrite_src(&instr
->instr
, &src
->src
,
160 nir_src_for_ssa(index
));
162 src
->src_type
= is_sampler
?
163 nir_tex_src_sampler_offset
:
164 nir_tex_src_texture_offset
;
166 nir_tex_instr_remove_src(instr
, src_idx
);
169 uint32_t set
= deref
->var
->data
.descriptor_set
;
170 uint32_t binding
= deref
->var
->data
.binding
;
171 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
172 struct tu_descriptor_set_binding_layout
*binding_layout
=
173 &set_layout
->binding
[binding
];
175 int desc_index
= map_add(is_sampler
?
176 &shader
->sampler_map
: &shader
->texture_map
,
177 deref
->var
->data
.descriptor_set
,
178 deref
->var
->data
.binding
,
179 deref
->var
->data
.index
,
180 binding_layout
->array_size
) + base_index
;
182 instr
->sampler_index
= desc_index
;
184 instr
->texture_index
= desc_index
;
188 lower_sampler(nir_builder
*b
, nir_tex_instr
*instr
, struct tu_shader
*shader
,
189 const struct tu_pipeline_layout
*layout
)
192 nir_tex_instr_src_index(instr
, nir_tex_src_texture_deref
);
194 if (texture_idx
>= 0)
195 lower_tex_src_to_offset(b
, instr
, texture_idx
, shader
, layout
);
198 nir_tex_instr_src_index(instr
, nir_tex_src_sampler_deref
);
200 if (sampler_idx
>= 0)
201 lower_tex_src_to_offset(b
, instr
, sampler_idx
, shader
, layout
);
203 if (texture_idx
< 0 && sampler_idx
< 0)
210 lower_load_push_constant(nir_builder
*b
, nir_intrinsic_instr
*instr
,
211 struct tu_shader
*shader
)
213 nir_intrinsic_instr
*load
=
214 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
215 load
->num_components
= instr
->num_components
;
216 uint32_t base
= nir_intrinsic_base(instr
);
217 assert(base
% 4 == 0);
218 assert(base
>= shader
->push_consts
.lo
* 16);
219 base
-= shader
->push_consts
.lo
* 16;
220 nir_intrinsic_set_base(load
, base
/ 4);
222 nir_src_for_ssa(nir_ushr(b
, instr
->src
[0].ssa
, nir_imm_int(b
, 2)));
223 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
224 load
->num_components
, instr
->dest
.ssa
.bit_size
,
225 instr
->dest
.ssa
.name
);
226 nir_builder_instr_insert(b
, &load
->instr
);
227 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(&load
->dest
.ssa
));
229 nir_instr_remove(&instr
->instr
);
233 lower_vulkan_resource_index(nir_builder
*b
, nir_intrinsic_instr
*instr
,
234 struct tu_shader
*shader
,
235 const struct tu_pipeline_layout
*layout
)
237 nir_const_value
*const_val
= nir_src_as_const_value(instr
->src
[0]);
239 unsigned set
= nir_intrinsic_desc_set(instr
);
240 unsigned binding
= nir_intrinsic_binding(instr
);
241 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
242 struct tu_descriptor_set_binding_layout
*binding_layout
=
243 &set_layout
->binding
[binding
];
246 switch (nir_intrinsic_desc_type(instr
)) {
247 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
248 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
250 tu_finishme("non-constant vulkan_resource_index array index");
251 /* skip index 0 which is used for push constants */
252 index
= map_add(&shader
->ubo_map
, set
, binding
, 0,
253 binding_layout
->array_size
) + 1;
254 index
+= const_val
->u32
;
256 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
257 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
259 tu_finishme("non-constant vulkan_resource_index array index");
260 index
= map_add(&shader
->ssbo_map
, set
, binding
, 0,
261 binding_layout
->array_size
);
262 index
+= const_val
->u32
;
265 tu_finishme("unsupported desc_type for vulkan_resource_index");
269 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
270 nir_src_for_ssa(nir_imm_int(b
, index
)));
271 nir_instr_remove(&instr
->instr
);
275 lower_image_deref(nir_builder
*b
,
276 nir_intrinsic_instr
*instr
, struct tu_shader
*shader
,
277 const struct tu_pipeline_layout
*layout
)
279 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
280 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
282 uint32_t set
= var
->data
.descriptor_set
;
283 uint32_t binding
= var
->data
.binding
;
284 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
285 struct tu_descriptor_set_binding_layout
*binding_layout
=
286 &set_layout
->binding
[binding
];
288 nir_ssa_def
*index
= nir_imm_int(b
,
289 map_add(&shader
->image_map
,
290 set
, binding
, var
->data
.index
,
291 binding_layout
->array_size
));
292 if (deref
->deref_type
!= nir_deref_type_var
) {
293 assert(deref
->deref_type
== nir_deref_type_array
);
294 index
= nir_iadd(b
, index
, nir_ssa_for_src(b
, deref
->arr
.index
, 1));
296 nir_rewrite_image_intrinsic(instr
, index
, false);
300 lower_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*instr
,
301 struct tu_shader
*shader
,
302 const struct tu_pipeline_layout
*layout
)
304 switch (instr
->intrinsic
) {
305 case nir_intrinsic_load_layer_id
:
306 /* TODO: remove this when layered rendering is implemented */
307 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
308 nir_src_for_ssa(nir_imm_int(b
, 0)));
309 nir_instr_remove(&instr
->instr
);
312 case nir_intrinsic_load_push_constant
:
313 lower_load_push_constant(b
, instr
, shader
);
316 case nir_intrinsic_vulkan_resource_index
:
317 lower_vulkan_resource_index(b
, instr
, shader
, layout
);
320 case nir_intrinsic_image_deref_load
:
321 case nir_intrinsic_image_deref_store
:
322 case nir_intrinsic_image_deref_atomic_add
:
323 case nir_intrinsic_image_deref_atomic_imin
:
324 case nir_intrinsic_image_deref_atomic_umin
:
325 case nir_intrinsic_image_deref_atomic_imax
:
326 case nir_intrinsic_image_deref_atomic_umax
:
327 case nir_intrinsic_image_deref_atomic_and
:
328 case nir_intrinsic_image_deref_atomic_or
:
329 case nir_intrinsic_image_deref_atomic_xor
:
330 case nir_intrinsic_image_deref_atomic_exchange
:
331 case nir_intrinsic_image_deref_atomic_comp_swap
:
332 case nir_intrinsic_image_deref_size
:
333 case nir_intrinsic_image_deref_samples
:
334 case nir_intrinsic_image_deref_load_param_intel
:
335 case nir_intrinsic_image_deref_load_raw_intel
:
336 case nir_intrinsic_image_deref_store_raw_intel
:
337 lower_image_deref(b
, instr
, shader
, layout
);
345 /* Figure out the range of push constants that we're actually going to push to
346 * the shader, and tell the backend to reserve this range when pushing UBO
351 gather_push_constants(nir_shader
*shader
, struct tu_shader
*tu_shader
)
353 uint32_t min
= UINT32_MAX
, max
= 0;
354 nir_foreach_function(function
, shader
) {
358 nir_foreach_block(block
, function
->impl
) {
359 nir_foreach_instr_safe(instr
, block
) {
360 if (instr
->type
!= nir_instr_type_intrinsic
)
363 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
364 if (intrin
->intrinsic
!= nir_intrinsic_load_push_constant
)
367 uint32_t base
= nir_intrinsic_base(intrin
);
368 uint32_t range
= nir_intrinsic_range(intrin
);
369 min
= MIN2(min
, base
);
370 max
= MAX2(max
, base
+ range
);
377 tu_shader
->push_consts
.lo
= 0;
378 tu_shader
->push_consts
.count
= 0;
379 tu_shader
->ir3_shader
.const_state
.num_reserved_user_consts
= 0;
383 /* CP_LOAD_STATE OFFSET and NUM_UNIT are in units of vec4 (4 dwords),
384 * however there's an alignment requirement of 4 on OFFSET. Expand the
385 * range and change units accordingly.
387 tu_shader
->push_consts
.lo
= (min
/ 16) / 4 * 4;
388 tu_shader
->push_consts
.count
=
389 align(max
, 16) / 16 - tu_shader
->push_consts
.lo
;
390 tu_shader
->ir3_shader
.const_state
.num_reserved_user_consts
=
391 align(tu_shader
->push_consts
.count
, 4);
395 lower_impl(nir_function_impl
*impl
, struct tu_shader
*shader
,
396 const struct tu_pipeline_layout
*layout
)
399 nir_builder_init(&b
, impl
);
400 bool progress
= false;
402 nir_foreach_block(block
, impl
) {
403 nir_foreach_instr_safe(instr
, block
) {
404 b
.cursor
= nir_before_instr(instr
);
405 switch (instr
->type
) {
406 case nir_instr_type_tex
:
407 progress
|= lower_sampler(&b
, nir_instr_as_tex(instr
), shader
, layout
);
409 case nir_instr_type_intrinsic
:
410 progress
|= lower_intrinsic(&b
, nir_instr_as_intrinsic(instr
), shader
, layout
);
422 tu_lower_io(nir_shader
*shader
, struct tu_shader
*tu_shader
,
423 const struct tu_pipeline_layout
*layout
)
425 bool progress
= false;
427 gather_push_constants(shader
, tu_shader
);
429 nir_foreach_function(function
, shader
) {
431 progress
|= lower_impl(function
->impl
, tu_shader
, layout
);
434 /* spirv_to_nir produces num_ssbos equal to the number of SSBO-containing
435 * variables, while ir3 wants the number of descriptors (like the gallium
438 shader
->info
.num_ssbos
= tu_shader
->ssbo_map
.num_desc
;
444 tu_gather_xfb_info(nir_shader
*nir
, struct tu_shader
*shader
)
446 struct ir3_stream_output_info
*info
= &shader
->ir3_shader
.stream_output
;
447 nir_xfb_info
*xfb
= nir_gather_xfb_info(nir
, NULL
);
452 /* creating a map from VARYING_SLOT_* enums to consecutive index */
453 uint8_t num_outputs
= 0;
454 uint64_t outputs_written
= 0;
455 for (int i
= 0; i
< xfb
->output_count
; i
++)
456 outputs_written
|= BITFIELD64_BIT(xfb
->outputs
[i
].location
);
458 uint8_t output_map
[VARYING_SLOT_TESS_MAX
];
459 memset(output_map
, 0, sizeof(output_map
));
461 for (unsigned attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
462 if (outputs_written
& BITFIELD64_BIT(attr
))
463 output_map
[attr
] = num_outputs
++;
466 assert(xfb
->output_count
< IR3_MAX_SO_OUTPUTS
);
467 info
->num_outputs
= xfb
->output_count
;
469 for (int i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
470 info
->stride
[i
] = xfb
->buffers
[i
].stride
/ 4;
472 for (int i
= 0; i
< xfb
->output_count
; i
++) {
473 info
->output
[i
].register_index
= output_map
[xfb
->outputs
[i
].location
];
474 info
->output
[i
].start_component
= xfb
->outputs
[i
].component_offset
;
475 info
->output
[i
].num_components
=
476 util_bitcount(xfb
->outputs
[i
].component_mask
);
477 info
->output
[i
].output_buffer
= xfb
->outputs
[i
].buffer
;
478 info
->output
[i
].dst_offset
= xfb
->outputs
[i
].offset
/ 4;
479 info
->output
[i
].stream
= xfb
->buffer_to_stream
[xfb
->outputs
[i
].buffer
];
486 tu_shader_create(struct tu_device
*dev
,
487 gl_shader_stage stage
,
488 const VkPipelineShaderStageCreateInfo
*stage_info
,
489 struct tu_pipeline_layout
*layout
,
490 const VkAllocationCallbacks
*alloc
)
492 const struct tu_shader_module
*module
=
493 tu_shader_module_from_handle(stage_info
->module
);
494 struct tu_shader
*shader
;
496 const uint32_t max_variant_count
= (stage
== MESA_SHADER_VERTEX
) ? 2 : 1;
499 sizeof(*shader
) + sizeof(struct ir3_shader_variant
) * max_variant_count
,
500 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
504 /* translate SPIR-V to NIR */
505 assert(module
->code_size
% 4 == 0);
506 nir_shader
*nir
= tu_spirv_to_nir(
507 dev
->compiler
, (const uint32_t *) module
->code
, module
->code_size
/ 4,
508 stage
, stage_info
->pName
, stage_info
->pSpecializationInfo
);
510 vk_free2(&dev
->alloc
, alloc
, shader
);
514 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_NIR
)) {
515 fprintf(stderr
, "translated nir:\n");
516 nir_print_shader(nir
, stderr
);
519 /* multi step inlining procedure */
520 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
521 NIR_PASS_V(nir
, nir_lower_returns
);
522 NIR_PASS_V(nir
, nir_inline_functions
);
523 NIR_PASS_V(nir
, nir_opt_deref
);
524 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
525 if (!func
->is_entrypoint
)
526 exec_node_remove(&func
->node
);
528 assert(exec_list_length(&nir
->functions
) == 1);
529 NIR_PASS_V(nir
, nir_lower_variable_initializers
, ~nir_var_function_temp
);
531 /* Split member structs. We do this before lower_io_to_temporaries so that
532 * it doesn't lower system values to temporaries by accident.
534 NIR_PASS_V(nir
, nir_split_var_copies
);
535 NIR_PASS_V(nir
, nir_split_per_member_structs
);
537 NIR_PASS_V(nir
, nir_remove_dead_variables
,
538 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
);
540 /* Gather information for transform feedback.
541 * This should be called after nir_split_per_member_structs.
542 * Also needs to be called after nir_remove_dead_variables with varyings,
543 * so that we could align stream outputs correctly.
545 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
546 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
||
547 nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
548 tu_gather_xfb_info(nir
, shader
);
550 NIR_PASS_V(nir
, nir_propagate_invariant
);
552 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
), true, true);
554 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
555 NIR_PASS_V(nir
, nir_split_var_copies
);
556 NIR_PASS_V(nir
, nir_lower_var_copies
);
558 NIR_PASS_V(nir
, nir_opt_copy_prop_vars
);
559 NIR_PASS_V(nir
, nir_opt_combine_stores
, nir_var_all
);
561 /* ir3 doesn't support indirect input/output */
562 NIR_PASS_V(nir
, nir_lower_indirect_derefs
, nir_var_shader_in
| nir_var_shader_out
);
564 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
566 nir_assign_io_var_locations(&nir
->inputs
, &nir
->num_inputs
, stage
);
567 nir_assign_io_var_locations(&nir
->outputs
, &nir
->num_outputs
, stage
);
569 NIR_PASS_V(nir
, nir_lower_system_values
);
570 NIR_PASS_V(nir
, nir_lower_frexp
);
572 if (stage
== MESA_SHADER_FRAGMENT
)
573 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
575 NIR_PASS_V(nir
, tu_lower_io
, shader
, layout
);
577 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, ir3_glsl_type_size
, 0);
579 if (stage
== MESA_SHADER_FRAGMENT
) {
580 /* NOTE: lower load_barycentric_at_sample first, since it
581 * produces load_barycentric_at_offset:
583 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_sample
);
584 NIR_PASS_V(nir
, ir3_nir_lower_load_barycentric_at_offset
);
586 NIR_PASS_V(nir
, ir3_nir_move_varying_inputs
);
589 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
591 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
592 nir
->num_uniforms
= MAX_PUSH_CONSTANTS_SIZE
/ 16;
594 shader
->ir3_shader
.compiler
= dev
->compiler
;
595 shader
->ir3_shader
.type
= stage
;
596 shader
->ir3_shader
.nir
= nir
;
602 tu_shader_destroy(struct tu_device
*dev
,
603 struct tu_shader
*shader
,
604 const VkAllocationCallbacks
*alloc
)
606 if (shader
->ir3_shader
.nir
)
607 ralloc_free(shader
->ir3_shader
.nir
);
609 for (uint32_t i
= 0; i
< 1 + shader
->has_binning_pass
; i
++) {
610 if (shader
->variants
[i
].ir
)
611 ir3_destroy(shader
->variants
[i
].ir
);
614 if (shader
->ir3_shader
.const_state
.immediates
)
615 free(shader
->ir3_shader
.const_state
.immediates
);
617 free(shader
->binary
);
618 if (shader
->binning_binary
)
619 free(shader
->binning_binary
);
621 vk_free2(&dev
->alloc
, alloc
, shader
);
625 tu_shader_compile_options_init(
626 struct tu_shader_compile_options
*options
,
627 const VkGraphicsPipelineCreateInfo
*pipeline_info
)
631 for (uint32_t i
= 0; i
< pipeline_info
->stageCount
; i
++) {
632 if (pipeline_info
->pStages
[i
].stage
== VK_SHADER_STAGE_GEOMETRY_BIT
) {
639 *options
= (struct tu_shader_compile_options
) {
640 /* TODO: Populate the remaining fields of ir3_shader_key. */
644 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
645 * some optimizations need to happen otherwise shader might not compile
648 .include_binning_pass
= true,
653 tu_compile_shader_variant(struct ir3_shader
*shader
,
654 const struct ir3_shader_key
*key
,
655 struct ir3_shader_variant
*nonbinning
,
656 struct ir3_shader_variant
*variant
)
658 variant
->shader
= shader
;
659 variant
->type
= shader
->type
;
661 variant
->binning_pass
= !!nonbinning
;
662 variant
->nonbinning
= nonbinning
;
664 int ret
= ir3_compile_shader_nir(shader
->compiler
, variant
);
668 /* when assemble fails, we rely on tu_shader_destroy to clean up the
671 return ir3_shader_assemble(variant
, shader
->compiler
->gpu_id
);
675 tu_shader_compile(struct tu_device
*dev
,
676 struct tu_shader
*shader
,
677 const struct tu_shader
*next_stage
,
678 const struct tu_shader_compile_options
*options
,
679 const VkAllocationCallbacks
*alloc
)
681 if (options
->optimize
) {
682 /* ignore the key for the first pass of optimization */
683 ir3_optimize_nir(&shader
->ir3_shader
, shader
->ir3_shader
.nir
, NULL
);
685 if (unlikely(dev
->physical_device
->instance
->debug_flags
&
687 fprintf(stderr
, "optimized nir:\n");
688 nir_print_shader(shader
->ir3_shader
.nir
, stderr
);
692 shader
->binary
= tu_compile_shader_variant(
693 &shader
->ir3_shader
, &options
->key
, NULL
, &shader
->variants
[0]);
695 return VK_ERROR_OUT_OF_HOST_MEMORY
;
697 if (shader_debug_enabled(shader
->ir3_shader
.type
)) {
698 fprintf(stdout
, "Native code for unnamed %s shader %s:\n",
699 ir3_shader_stage(&shader
->variants
[0]), shader
->ir3_shader
.nir
->info
.name
);
700 if (shader
->ir3_shader
.type
== MESA_SHADER_FRAGMENT
)
701 fprintf(stdout
, "SIMD0\n");
702 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stdout
);
705 /* compile another variant for the binning pass */
706 if (options
->include_binning_pass
&&
707 shader
->ir3_shader
.type
== MESA_SHADER_VERTEX
) {
708 shader
->binning_binary
= tu_compile_shader_variant(
709 &shader
->ir3_shader
, &options
->key
, &shader
->variants
[0],
710 &shader
->variants
[1]);
711 if (!shader
->binning_binary
)
712 return VK_ERROR_OUT_OF_HOST_MEMORY
;
714 shader
->has_binning_pass
= true;
716 if (shader_debug_enabled(MESA_SHADER_VERTEX
)) {
717 fprintf(stdout
, "Native code for unnamed binning shader %s:\n",
718 shader
->ir3_shader
.nir
->info
.name
);
719 ir3_shader_disasm(&shader
->variants
[1], shader
->binary
, stdout
);
723 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_IR3
)) {
724 fprintf(stderr
, "disassembled ir3:\n");
725 fprintf(stderr
, "shader: %s\n",
726 gl_shader_stage_name(shader
->ir3_shader
.type
));
727 ir3_shader_disasm(&shader
->variants
[0], shader
->binary
, stderr
);
729 if (shader
->has_binning_pass
) {
730 fprintf(stderr
, "disassembled ir3:\n");
731 fprintf(stderr
, "shader: %s (binning)\n",
732 gl_shader_stage_name(shader
->ir3_shader
.type
));
733 ir3_shader_disasm(&shader
->variants
[1], shader
->binning_binary
,
742 tu_CreateShaderModule(VkDevice _device
,
743 const VkShaderModuleCreateInfo
*pCreateInfo
,
744 const VkAllocationCallbacks
*pAllocator
,
745 VkShaderModule
*pShaderModule
)
747 TU_FROM_HANDLE(tu_device
, device
, _device
);
748 struct tu_shader_module
*module
;
750 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
751 assert(pCreateInfo
->flags
== 0);
752 assert(pCreateInfo
->codeSize
% 4 == 0);
754 module
= vk_alloc2(&device
->alloc
, pAllocator
,
755 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
756 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
758 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
760 module
->code_size
= pCreateInfo
->codeSize
;
761 memcpy(module
->code
, pCreateInfo
->pCode
, pCreateInfo
->codeSize
);
763 _mesa_sha1_compute(module
->code
, module
->code_size
, module
->sha1
);
765 *pShaderModule
= tu_shader_module_to_handle(module
);
771 tu_DestroyShaderModule(VkDevice _device
,
772 VkShaderModule _module
,
773 const VkAllocationCallbacks
*pAllocator
)
775 TU_FROM_HANDLE(tu_device
, device
, _device
);
776 TU_FROM_HANDLE(tu_shader_module
, module
, _module
);
781 vk_free2(&device
->alloc
, pAllocator
, module
);