2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28 #include "nir/nir_xfb_info.h"
29 #include "nir/nir_vulkan.h"
32 #include "ir3/ir3_nir.h"
35 tu_spirv_to_nir(struct ir3_compiler
*compiler
,
36 const uint32_t *words
,
38 gl_shader_stage stage
,
39 const char *entry_point_name
,
40 const VkSpecializationInfo
*spec_info
)
42 /* TODO these are made-up */
43 const struct spirv_to_nir_options spirv_options
= {
44 .frag_coord_is_sysval
= true,
45 .lower_ubo_ssbo_access_to_offsets
= false,
47 .ubo_addr_format
= nir_address_format_vec2_index_32bit_offset
,
48 .ssbo_addr_format
= nir_address_format_vec2_index_32bit_offset
,
50 /* Accessed via stg/ldg */
51 .phys_ssbo_addr_format
= nir_address_format_64bit_global
,
53 /* Accessed via the const register file */
54 .push_const_addr_format
= nir_address_format_logical
,
56 /* Accessed via ldl/stl */
57 .shared_addr_format
= nir_address_format_32bit_offset
,
59 /* Accessed via stg/ldg (not used with Vulkan?) */
60 .global_addr_format
= nir_address_format_64bit_global
,
63 .transform_feedback
= true,
65 .draw_parameters
= true,
66 .variable_pointers
= true,
67 .stencil_export
= true,
70 const nir_shader_compiler_options
*nir_options
=
71 ir3_get_compiler_options(compiler
);
73 /* convert VkSpecializationInfo */
74 struct nir_spirv_specialization
*spec
= NULL
;
75 uint32_t num_spec
= 0;
76 if (spec_info
&& spec_info
->mapEntryCount
) {
77 spec
= calloc(spec_info
->mapEntryCount
, sizeof(*spec
));
81 for (uint32_t i
= 0; i
< spec_info
->mapEntryCount
; i
++) {
82 const VkSpecializationMapEntry
*entry
= &spec_info
->pMapEntries
[i
];
83 const void *data
= spec_info
->pData
+ entry
->offset
;
84 assert(data
+ entry
->size
<= spec_info
->pData
+ spec_info
->dataSize
);
85 spec
[i
].id
= entry
->constantID
;
86 switch (entry
->size
) {
88 spec
[i
].value
.u64
= *(const uint64_t *)data
;
91 spec
[i
].value
.u32
= *(const uint32_t *)data
;
94 spec
[i
].value
.u16
= *(const uint16_t *)data
;
97 spec
[i
].value
.u8
= *(const uint8_t *)data
;
100 assert(!"Invalid spec constant size");
103 spec
[i
].defined_on_module
= false;
106 num_spec
= spec_info
->mapEntryCount
;
110 spirv_to_nir(words
, word_count
, spec
, num_spec
, stage
, entry_point_name
,
111 &spirv_options
, nir_options
);
115 assert(nir
->info
.stage
== stage
);
116 nir_validate_shader(nir
, "after spirv_to_nir");
122 lower_load_push_constant(nir_builder
*b
, nir_intrinsic_instr
*instr
,
123 struct tu_shader
*shader
)
125 nir_intrinsic_instr
*load
=
126 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
127 load
->num_components
= instr
->num_components
;
128 uint32_t base
= nir_intrinsic_base(instr
);
129 assert(base
% 4 == 0);
130 assert(base
>= shader
->push_consts
.lo
* 16);
131 base
-= shader
->push_consts
.lo
* 16;
132 nir_intrinsic_set_base(load
, base
/ 4);
134 nir_src_for_ssa(nir_ushr(b
, instr
->src
[0].ssa
, nir_imm_int(b
, 2)));
135 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
136 load
->num_components
, instr
->dest
.ssa
.bit_size
,
137 instr
->dest
.ssa
.name
);
138 nir_builder_instr_insert(b
, &load
->instr
);
139 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(&load
->dest
.ssa
));
141 nir_instr_remove(&instr
->instr
);
145 lower_vulkan_resource_index(nir_builder
*b
, nir_intrinsic_instr
*instr
,
146 struct tu_shader
*shader
,
147 const struct tu_pipeline_layout
*layout
)
149 nir_ssa_def
*vulkan_idx
= instr
->src
[0].ssa
;
151 unsigned set
= nir_intrinsic_desc_set(instr
);
152 unsigned binding
= nir_intrinsic_binding(instr
);
153 struct tu_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
154 struct tu_descriptor_set_binding_layout
*binding_layout
=
155 &set_layout
->binding
[binding
];
158 shader
->active_desc_sets
|= 1u << set
;
160 switch (binding_layout
->type
) {
161 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
162 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
163 base
= layout
->set
[set
].dynamic_offset_start
+
164 binding_layout
->dynamic_offset_offset
;
168 base
= binding_layout
->offset
/ (4 * A6XX_TEX_CONST_DWORDS
);
172 nir_ssa_def
*def
= nir_vec3(b
, nir_imm_int(b
, set
),
173 nir_iadd(b
, nir_imm_int(b
, base
), vulkan_idx
),
176 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
, nir_src_for_ssa(def
));
177 nir_instr_remove(&instr
->instr
);
181 lower_load_vulkan_descriptor(nir_intrinsic_instr
*intrin
)
183 /* Loading the descriptor happens as part of the load/store instruction so
186 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, intrin
->src
[0]);
187 nir_instr_remove(&intrin
->instr
);
191 lower_ssbo_ubo_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*intrin
)
193 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intrin
->intrinsic
];
195 /* The bindless base is part of the instruction, which means that part of
196 * the "pointer" has to be constant. We solve this in the same way the blob
197 * does, by generating a bunch of if-statements. In the usual case where
198 * the descriptor set is constant this will get optimized out.
202 if (intrin
->intrinsic
== nir_intrinsic_store_ssbo
) {
203 /* This has the value first */
209 nir_ssa_def
*base_idx
= nir_channel(b
, intrin
->src
[buffer_src
].ssa
, 0);
210 nir_ssa_def
*descriptor_idx
= nir_channel(b
, intrin
->src
[buffer_src
].ssa
, 1);
212 nir_ssa_def
*results
[MAX_SETS
+ 1] = { NULL
};
214 for (unsigned i
= 0; i
< MAX_SETS
+ 1; i
++) {
215 /* if (base_idx == i) { ... */
216 nir_if
*nif
= nir_push_if(b
, nir_ieq(b
, base_idx
, nir_imm_int(b
, i
)));
218 nir_intrinsic_instr
*bindless
=
219 nir_intrinsic_instr_create(b
->shader
,
220 nir_intrinsic_bindless_resource_ir3
);
221 bindless
->num_components
= 0;
222 nir_ssa_dest_init(&bindless
->instr
, &bindless
->dest
,
224 nir_intrinsic_set_desc_set(bindless
, i
);
225 bindless
->src
[0] = nir_src_for_ssa(descriptor_idx
);
226 nir_builder_instr_insert(b
, &bindless
->instr
);
228 nir_intrinsic_instr
*copy
=
229 nir_intrinsic_instr_create(b
->shader
, intrin
->intrinsic
);
231 copy
->num_components
= intrin
->num_components
;
233 for (unsigned src
= 0; src
< info
->num_srcs
; src
++) {
234 if (src
== buffer_src
)
235 copy
->src
[src
] = nir_src_for_ssa(&bindless
->dest
.ssa
);
237 copy
->src
[src
] = nir_src_for_ssa(intrin
->src
[src
].ssa
);
240 for (unsigned idx
= 0; idx
< info
->num_indices
; idx
++) {
241 copy
->const_index
[idx
] = intrin
->const_index
[idx
];
244 if (info
->has_dest
) {
245 nir_ssa_dest_init(©
->instr
, ©
->dest
,
246 intrin
->dest
.ssa
.num_components
,
247 intrin
->dest
.ssa
.bit_size
,
248 intrin
->dest
.ssa
.name
);
249 results
[i
] = ©
->dest
.ssa
;
252 nir_builder_instr_insert(b
, ©
->instr
);
255 nir_push_else(b
, nif
);
258 nir_ssa_def
*result
=
259 nir_ssa_undef(b
, intrin
->dest
.ssa
.num_components
, intrin
->dest
.ssa
.bit_size
);
260 for (int i
= MAX_SETS
; i
>= 0; i
--) {
263 result
= nir_if_phi(b
, results
[i
], result
);
267 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(result
));
268 nir_instr_remove(&intrin
->instr
);
272 build_bindless(nir_builder
*b
, nir_deref_instr
*deref
, bool is_sampler
,
273 struct tu_shader
*shader
,
274 const struct tu_pipeline_layout
*layout
)
276 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
278 unsigned set
= var
->data
.descriptor_set
;
279 unsigned binding
= var
->data
.binding
;
280 const struct tu_descriptor_set_binding_layout
*bind_layout
=
281 &layout
->set
[set
].layout
->binding
[binding
];
283 /* input attachments use non bindless workaround */
284 if (bind_layout
->type
== VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
) {
285 const struct glsl_type
*glsl_type
= glsl_without_array(var
->type
);
286 uint32_t idx
= var
->data
.index
* 2;
288 b
->shader
->info
.textures_used
|=
289 ((1ull << (bind_layout
->array_size
* 2)) - 1) << (idx
* 2);
291 /* D24S8 workaround: stencil of D24S8 will be sampled as uint */
292 if (glsl_get_sampler_result_type(glsl_type
) == GLSL_TYPE_UINT
)
295 if (deref
->deref_type
== nir_deref_type_var
)
296 return nir_imm_int(b
, idx
);
298 nir_ssa_def
*arr_index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
299 return nir_iadd(b
, nir_imm_int(b
, idx
),
300 nir_imul_imm(b
, arr_index
, 2));
303 shader
->active_desc_sets
|= 1u << set
;
305 nir_ssa_def
*desc_offset
;
306 unsigned descriptor_stride
;
308 /* Samplers come second in combined image/sampler descriptors, see
309 * write_combined_image_sampler_descriptor().
311 if (is_sampler
&& bind_layout
->type
==
312 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
316 nir_imm_int(b
, (bind_layout
->offset
/ (4 * A6XX_TEX_CONST_DWORDS
)) +
318 descriptor_stride
= bind_layout
->size
/ (4 * A6XX_TEX_CONST_DWORDS
);
320 if (deref
->deref_type
!= nir_deref_type_var
) {
321 assert(deref
->deref_type
== nir_deref_type_array
);
323 nir_ssa_def
*arr_index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
324 desc_offset
= nir_iadd(b
, desc_offset
,
325 nir_imul_imm(b
, arr_index
, descriptor_stride
));
328 nir_intrinsic_instr
*bindless
=
329 nir_intrinsic_instr_create(b
->shader
,
330 nir_intrinsic_bindless_resource_ir3
);
331 bindless
->num_components
= 0;
332 nir_ssa_dest_init(&bindless
->instr
, &bindless
->dest
,
334 nir_intrinsic_set_desc_set(bindless
, set
);
335 bindless
->src
[0] = nir_src_for_ssa(desc_offset
);
336 nir_builder_instr_insert(b
, &bindless
->instr
);
338 return &bindless
->dest
.ssa
;
342 lower_image_deref(nir_builder
*b
,
343 nir_intrinsic_instr
*instr
, struct tu_shader
*shader
,
344 const struct tu_pipeline_layout
*layout
)
346 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
347 nir_ssa_def
*bindless
= build_bindless(b
, deref
, false, shader
, layout
);
348 nir_rewrite_image_intrinsic(instr
, bindless
, true);
352 lower_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*instr
,
353 struct tu_shader
*shader
,
354 const struct tu_pipeline_layout
*layout
)
356 switch (instr
->intrinsic
) {
357 case nir_intrinsic_load_layer_id
:
358 /* TODO: remove this when layered rendering is implemented */
359 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
360 nir_src_for_ssa(nir_imm_int(b
, 0)));
361 nir_instr_remove(&instr
->instr
);
364 case nir_intrinsic_load_push_constant
:
365 lower_load_push_constant(b
, instr
, shader
);
368 case nir_intrinsic_load_vulkan_descriptor
:
369 lower_load_vulkan_descriptor(instr
);
372 case nir_intrinsic_vulkan_resource_index
:
373 lower_vulkan_resource_index(b
, instr
, shader
, layout
);
376 case nir_intrinsic_load_ubo
:
377 case nir_intrinsic_load_ssbo
:
378 case nir_intrinsic_store_ssbo
:
379 case nir_intrinsic_ssbo_atomic_add
:
380 case nir_intrinsic_ssbo_atomic_imin
:
381 case nir_intrinsic_ssbo_atomic_umin
:
382 case nir_intrinsic_ssbo_atomic_imax
:
383 case nir_intrinsic_ssbo_atomic_umax
:
384 case nir_intrinsic_ssbo_atomic_and
:
385 case nir_intrinsic_ssbo_atomic_or
:
386 case nir_intrinsic_ssbo_atomic_xor
:
387 case nir_intrinsic_ssbo_atomic_exchange
:
388 case nir_intrinsic_ssbo_atomic_comp_swap
:
389 case nir_intrinsic_ssbo_atomic_fadd
:
390 case nir_intrinsic_ssbo_atomic_fmin
:
391 case nir_intrinsic_ssbo_atomic_fmax
:
392 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
393 case nir_intrinsic_get_buffer_size
:
394 lower_ssbo_ubo_intrinsic(b
, instr
);
397 case nir_intrinsic_image_deref_load
:
398 case nir_intrinsic_image_deref_store
:
399 case nir_intrinsic_image_deref_atomic_add
:
400 case nir_intrinsic_image_deref_atomic_imin
:
401 case nir_intrinsic_image_deref_atomic_umin
:
402 case nir_intrinsic_image_deref_atomic_imax
:
403 case nir_intrinsic_image_deref_atomic_umax
:
404 case nir_intrinsic_image_deref_atomic_and
:
405 case nir_intrinsic_image_deref_atomic_or
:
406 case nir_intrinsic_image_deref_atomic_xor
:
407 case nir_intrinsic_image_deref_atomic_exchange
:
408 case nir_intrinsic_image_deref_atomic_comp_swap
:
409 case nir_intrinsic_image_deref_size
:
410 case nir_intrinsic_image_deref_samples
:
411 lower_image_deref(b
, instr
, shader
, layout
);
420 lower_tex_ycbcr(const struct tu_pipeline_layout
*layout
,
421 nir_builder
*builder
,
424 int deref_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_texture_deref
);
425 assert(deref_src_idx
>= 0);
426 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[deref_src_idx
].src
);
428 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
429 const struct tu_descriptor_set_layout
*set_layout
=
430 layout
->set
[var
->data
.descriptor_set
].layout
;
431 const struct tu_descriptor_set_binding_layout
*binding
=
432 &set_layout
->binding
[var
->data
.binding
];
433 const struct tu_sampler_ycbcr_conversion
*ycbcr_samplers
=
434 tu_immutable_ycbcr_samplers(set_layout
, binding
);
439 /* For the following instructions, we don't apply any change */
440 if (tex
->op
== nir_texop_txs
||
441 tex
->op
== nir_texop_query_levels
||
442 tex
->op
== nir_texop_lod
)
445 assert(tex
->texture_index
== 0);
446 unsigned array_index
= 0;
447 if (deref
->deref_type
!= nir_deref_type_var
) {
448 assert(deref
->deref_type
== nir_deref_type_array
);
449 if (!nir_src_is_const(deref
->arr
.index
))
451 array_index
= nir_src_as_uint(deref
->arr
.index
);
452 array_index
= MIN2(array_index
, binding
->array_size
- 1);
454 const struct tu_sampler_ycbcr_conversion
*ycbcr_sampler
= ycbcr_samplers
+ array_index
;
456 if (ycbcr_sampler
->ycbcr_model
== VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY
)
459 builder
->cursor
= nir_after_instr(&tex
->instr
);
461 uint8_t bits
= vk_format_get_component_bits(ycbcr_sampler
->format
,
462 UTIL_FORMAT_COLORSPACE_RGB
,
464 uint32_t bpcs
[3] = {bits
, bits
, bits
}; /* TODO: use right bpc for each channel ? */
465 nir_ssa_def
*result
= nir_convert_ycbcr_to_rgb(builder
,
466 ycbcr_sampler
->ycbcr_model
,
467 ycbcr_sampler
->ycbcr_range
,
470 nir_ssa_def_rewrite_uses_after(&tex
->dest
.ssa
, nir_src_for_ssa(result
),
471 result
->parent_instr
);
473 builder
->cursor
= nir_before_instr(&tex
->instr
);
477 lower_tex(nir_builder
*b
, nir_tex_instr
*tex
,
478 struct tu_shader
*shader
, const struct tu_pipeline_layout
*layout
)
480 lower_tex_ycbcr(layout
, b
, tex
);
482 int sampler_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_sampler_deref
);
483 if (sampler_src_idx
>= 0) {
484 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[sampler_src_idx
].src
);
485 nir_ssa_def
*bindless
= build_bindless(b
, deref
, true, shader
, layout
);
486 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[sampler_src_idx
].src
,
487 nir_src_for_ssa(bindless
));
488 tex
->src
[sampler_src_idx
].src_type
= nir_tex_src_sampler_handle
;
491 int tex_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_texture_deref
);
492 if (tex_src_idx
>= 0) {
493 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[tex_src_idx
].src
);
494 nir_ssa_def
*bindless
= build_bindless(b
, deref
, false, shader
, layout
);
495 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex_src_idx
].src
,
496 nir_src_for_ssa(bindless
));
497 tex
->src
[tex_src_idx
].src_type
= nir_tex_src_texture_handle
;
499 /* for the input attachment case: */
500 if (bindless
->parent_instr
->type
!= nir_instr_type_intrinsic
)
501 tex
->src
[tex_src_idx
].src_type
= nir_tex_src_texture_offset
;
508 lower_impl(nir_function_impl
*impl
, struct tu_shader
*shader
,
509 const struct tu_pipeline_layout
*layout
)
512 nir_builder_init(&b
, impl
);
513 bool progress
= false;
515 nir_foreach_block(block
, impl
) {
516 nir_foreach_instr_safe(instr
, block
) {
517 b
.cursor
= nir_before_instr(instr
);
518 switch (instr
->type
) {
519 case nir_instr_type_tex
:
520 progress
|= lower_tex(&b
, nir_instr_as_tex(instr
), shader
, layout
);
522 case nir_instr_type_intrinsic
:
523 progress
|= lower_intrinsic(&b
, nir_instr_as_intrinsic(instr
), shader
, layout
);
532 nir_metadata_preserve(impl
, nir_metadata_none
);
534 nir_metadata_preserve(impl
, nir_metadata_all
);
540 /* Figure out the range of push constants that we're actually going to push to
541 * the shader, and tell the backend to reserve this range when pushing UBO
546 gather_push_constants(nir_shader
*shader
, struct tu_shader
*tu_shader
)
548 uint32_t min
= UINT32_MAX
, max
= 0;
549 nir_foreach_function(function
, shader
) {
553 nir_foreach_block(block
, function
->impl
) {
554 nir_foreach_instr_safe(instr
, block
) {
555 if (instr
->type
!= nir_instr_type_intrinsic
)
558 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
559 if (intrin
->intrinsic
!= nir_intrinsic_load_push_constant
)
562 uint32_t base
= nir_intrinsic_base(intrin
);
563 uint32_t range
= nir_intrinsic_range(intrin
);
564 min
= MIN2(min
, base
);
565 max
= MAX2(max
, base
+ range
);
572 tu_shader
->push_consts
.lo
= 0;
573 tu_shader
->push_consts
.count
= 0;
577 /* CP_LOAD_STATE OFFSET and NUM_UNIT are in units of vec4 (4 dwords),
578 * however there's an alignment requirement of 4 on OFFSET. Expand the
579 * range and change units accordingly.
581 tu_shader
->push_consts
.lo
= (min
/ 16) / 4 * 4;
582 tu_shader
->push_consts
.count
=
583 align(max
, 16) / 16 - tu_shader
->push_consts
.lo
;
587 tu_lower_io(nir_shader
*shader
, struct tu_shader
*tu_shader
,
588 const struct tu_pipeline_layout
*layout
)
590 bool progress
= false;
592 gather_push_constants(shader
, tu_shader
);
594 nir_foreach_function(function
, shader
) {
596 progress
|= lower_impl(function
->impl
, tu_shader
, layout
);
599 /* Remove now-unused variables so that when we gather the shader info later
600 * they won't be counted.
607 nir_remove_dead_variables(shader
,
608 nir_var_uniform
| nir_var_mem_ubo
| nir_var_mem_ssbo
,
615 shared_type_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
617 assert(glsl_type_is_vector_or_scalar(type
));
620 glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
621 unsigned length
= glsl_get_vector_elements(type
);
622 *size
= comp_size
* length
;
627 tu_gather_xfb_info(nir_shader
*nir
, struct ir3_stream_output_info
*info
)
629 nir_xfb_info
*xfb
= nir_gather_xfb_info(nir
, NULL
);
634 /* creating a map from VARYING_SLOT_* enums to consecutive index */
635 uint8_t num_outputs
= 0;
636 uint64_t outputs_written
= 0;
637 for (int i
= 0; i
< xfb
->output_count
; i
++)
638 outputs_written
|= BITFIELD64_BIT(xfb
->outputs
[i
].location
);
640 uint8_t output_map
[VARYING_SLOT_TESS_MAX
];
641 memset(output_map
, 0, sizeof(output_map
));
643 for (unsigned attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
644 if (outputs_written
& BITFIELD64_BIT(attr
))
645 output_map
[attr
] = num_outputs
++;
648 assert(xfb
->output_count
< IR3_MAX_SO_OUTPUTS
);
649 info
->num_outputs
= xfb
->output_count
;
651 for (int i
= 0; i
< IR3_MAX_SO_BUFFERS
; i
++)
652 info
->stride
[i
] = xfb
->buffers
[i
].stride
/ 4;
654 for (int i
= 0; i
< xfb
->output_count
; i
++) {
655 info
->output
[i
].register_index
= output_map
[xfb
->outputs
[i
].location
];
656 info
->output
[i
].start_component
= xfb
->outputs
[i
].component_offset
;
657 info
->output
[i
].num_components
=
658 util_bitcount(xfb
->outputs
[i
].component_mask
);
659 info
->output
[i
].output_buffer
= xfb
->outputs
[i
].buffer
;
660 info
->output
[i
].dst_offset
= xfb
->outputs
[i
].offset
/ 4;
661 info
->output
[i
].stream
= xfb
->buffer_to_stream
[xfb
->outputs
[i
].buffer
];
668 tu_shader_create(struct tu_device
*dev
,
669 gl_shader_stage stage
,
670 const VkPipelineShaderStageCreateInfo
*stage_info
,
671 struct tu_pipeline_layout
*layout
,
672 const VkAllocationCallbacks
*alloc
)
674 struct tu_shader
*shader
;
677 &dev
->vk
.alloc
, alloc
,
679 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
685 /* translate SPIR-V to NIR */
686 const struct tu_shader_module
*module
=
687 tu_shader_module_from_handle(stage_info
->module
);
688 assert(module
->code_size
% 4 == 0);
689 nir
= tu_spirv_to_nir(
690 dev
->compiler
, (const uint32_t *) module
->code
, module
->code_size
/ 4,
691 stage
, stage_info
->pName
, stage_info
->pSpecializationInfo
);
693 assert(stage
== MESA_SHADER_FRAGMENT
);
695 const nir_shader_compiler_options
*nir_options
=
696 ir3_get_compiler_options(dev
->compiler
);
697 nir_builder_init_simple_shader(&fs_b
, NULL
, MESA_SHADER_FRAGMENT
, nir_options
);
698 fs_b
.shader
->info
.name
= ralloc_strdup(fs_b
.shader
, "noop_fs");
703 vk_free2(&dev
->vk
.alloc
, alloc
, shader
);
707 if (unlikely(dev
->physical_device
->instance
->debug_flags
& TU_DEBUG_NIR
)) {
708 fprintf(stderr
, "translated nir:\n");
709 nir_print_shader(nir
, stderr
);
712 /* multi step inlining procedure */
713 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
714 NIR_PASS_V(nir
, nir_lower_returns
);
715 NIR_PASS_V(nir
, nir_inline_functions
);
716 NIR_PASS_V(nir
, nir_copy_prop
);
717 NIR_PASS_V(nir
, nir_opt_deref
);
718 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
719 if (!func
->is_entrypoint
)
720 exec_node_remove(&func
->node
);
722 assert(exec_list_length(&nir
->functions
) == 1);
723 NIR_PASS_V(nir
, nir_lower_variable_initializers
, ~nir_var_function_temp
);
725 /* Split member structs. We do this before lower_io_to_temporaries so that
726 * it doesn't lower system values to temporaries by accident.
728 NIR_PASS_V(nir
, nir_split_var_copies
);
729 NIR_PASS_V(nir
, nir_split_per_member_structs
);
731 NIR_PASS_V(nir
, nir_remove_dead_variables
,
732 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
| nir_var_mem_shared
,
735 /* Gather information for transform feedback.
736 * This should be called after nir_split_per_member_structs.
737 * Also needs to be called after nir_remove_dead_variables with varyings,
738 * so that we could align stream outputs correctly.
740 struct ir3_stream_output_info so_info
= {};
741 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
742 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
||
743 nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
744 tu_gather_xfb_info(nir
, &so_info
);
746 NIR_PASS_V(nir
, nir_propagate_invariant
);
748 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
), true, true);
750 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
751 NIR_PASS_V(nir
, nir_split_var_copies
);
752 NIR_PASS_V(nir
, nir_lower_var_copies
);
754 NIR_PASS_V(nir
, nir_opt_copy_prop_vars
);
755 NIR_PASS_V(nir
, nir_opt_combine_stores
, nir_var_all
);
757 /* ir3 doesn't support indirect input/output */
758 /* TODO: We shouldn't perform this lowering pass on gl_TessLevelInner
759 * and gl_TessLevelOuter. Since the tess levels are actually stored in
760 * a global BO, they can be directly accessed via stg and ldg.
761 * nir_lower_indirect_derefs will instead generate a big if-ladder which
762 * isn't *incorrect* but is much less efficient. */
763 NIR_PASS_V(nir
, nir_lower_indirect_derefs
, nir_var_shader_in
| nir_var_shader_out
);
765 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
767 nir_assign_io_var_locations(nir
, nir_var_shader_in
, &nir
->num_inputs
, stage
);
768 nir_assign_io_var_locations(nir
, nir_var_shader_out
, &nir
->num_outputs
, stage
);
770 NIR_PASS_V(nir
, nir_lower_system_values
);
771 NIR_PASS_V(nir
, nir_lower_frexp
);
773 if (stage
== MESA_SHADER_FRAGMENT
)
774 NIR_PASS_V(nir
, nir_lower_input_attachments
, true);
776 NIR_PASS_V(nir
, nir_lower_explicit_io
,
777 nir_var_mem_ubo
| nir_var_mem_ssbo
,
778 nir_address_format_vec2_index_32bit_offset
);
780 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
781 NIR_PASS_V(nir
, nir_lower_vars_to_explicit_types
,
782 nir_var_mem_shared
, shared_type_info
);
783 NIR_PASS_V(nir
, nir_lower_explicit_io
,
785 nir_address_format_32bit_offset
);
788 NIR_PASS_V(nir
, tu_lower_io
, shader
, layout
);
790 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
792 ir3_finalize_nir(dev
->compiler
, nir
);
795 ir3_shader_from_nir(dev
->compiler
, nir
,
796 align(shader
->push_consts
.count
, 4),
803 tu_shader_destroy(struct tu_device
*dev
,
804 struct tu_shader
*shader
,
805 const VkAllocationCallbacks
*alloc
)
807 ir3_shader_destroy(shader
->ir3_shader
);
809 vk_free2(&dev
->vk
.alloc
, alloc
, shader
);
813 tu_CreateShaderModule(VkDevice _device
,
814 const VkShaderModuleCreateInfo
*pCreateInfo
,
815 const VkAllocationCallbacks
*pAllocator
,
816 VkShaderModule
*pShaderModule
)
818 TU_FROM_HANDLE(tu_device
, device
, _device
);
819 struct tu_shader_module
*module
;
821 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
822 assert(pCreateInfo
->flags
== 0);
823 assert(pCreateInfo
->codeSize
% 4 == 0);
825 module
= vk_object_alloc(&device
->vk
, pAllocator
,
826 sizeof(*module
) + pCreateInfo
->codeSize
,
827 VK_OBJECT_TYPE_SHADER_MODULE
);
829 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
831 module
->code_size
= pCreateInfo
->codeSize
;
832 memcpy(module
->code
, pCreateInfo
->pCode
, pCreateInfo
->codeSize
);
834 _mesa_sha1_compute(module
->code
, module
->code_size
, module
->sha1
);
836 *pShaderModule
= tu_shader_module_to_handle(module
);
842 tu_DestroyShaderModule(VkDevice _device
,
843 VkShaderModule _module
,
844 const VkAllocationCallbacks
*pAllocator
)
846 TU_FROM_HANDLE(tu_device
, device
, _device
);
847 TU_FROM_HANDLE(tu_shader_module
, module
, _module
);
852 vk_object_free(&device
->vk
, pAllocator
, module
);