turnip: don't set SP_FS_CTRL_REG0_VARYING if only fragcoord is used
[mesa.git] / src / freedreno / vulkan / tu_shader.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include "spirv/nir_spirv.h"
27 #include "util/mesa-sha1.h"
28
29 #include "ir3/ir3_nir.h"
30
31 static nir_shader *
32 tu_spirv_to_nir(struct ir3_compiler *compiler,
33 const uint32_t *words,
34 size_t word_count,
35 gl_shader_stage stage,
36 const char *entry_point_name,
37 const VkSpecializationInfo *spec_info)
38 {
39 /* TODO these are made-up */
40 const struct spirv_to_nir_options spirv_options = {
41 .frag_coord_is_sysval = true,
42 .lower_ubo_ssbo_access_to_offsets = true,
43 .caps = { false },
44 };
45 const nir_shader_compiler_options *nir_options =
46 ir3_get_compiler_options(compiler);
47
48 /* convert VkSpecializationInfo */
49 struct nir_spirv_specialization *spec = NULL;
50 uint32_t num_spec = 0;
51 if (spec_info && spec_info->mapEntryCount) {
52 spec = malloc(sizeof(*spec) * spec_info->mapEntryCount);
53 if (!spec)
54 return NULL;
55
56 for (uint32_t i = 0; i < spec_info->mapEntryCount; i++) {
57 const VkSpecializationMapEntry *entry = &spec_info->pMapEntries[i];
58 const void *data = spec_info->pData + entry->offset;
59 assert(data + entry->size <= spec_info->pData + spec_info->dataSize);
60 spec[i].id = entry->constantID;
61 if (entry->size == 8)
62 spec[i].data64 = *(const uint64_t *) data;
63 else
64 spec[i].data32 = *(const uint32_t *) data;
65 spec[i].defined_on_module = false;
66 }
67
68 num_spec = spec_info->mapEntryCount;
69 }
70
71 nir_shader *nir =
72 spirv_to_nir(words, word_count, spec, num_spec, stage, entry_point_name,
73 &spirv_options, nir_options);
74
75 free(spec);
76
77 assert(nir->info.stage == stage);
78 nir_validate_shader(nir, "after spirv_to_nir");
79
80 return nir;
81 }
82
83 static void
84 tu_sort_variables_by_location(struct exec_list *variables)
85 {
86 struct exec_list sorted;
87 exec_list_make_empty(&sorted);
88
89 nir_foreach_variable_safe(var, variables)
90 {
91 exec_node_remove(&var->node);
92
93 /* insert the variable into the sorted list */
94 nir_variable *next = NULL;
95 nir_foreach_variable(tmp, &sorted)
96 {
97 if (var->data.location < tmp->data.location) {
98 next = tmp;
99 break;
100 }
101 }
102 if (next)
103 exec_node_insert_node_before(&next->node, &var->node);
104 else
105 exec_list_push_tail(&sorted, &var->node);
106 }
107
108 exec_list_move_nodes_to(&sorted, variables);
109 }
110
111 static unsigned
112 map_add(struct tu_descriptor_map *map, int set, int binding, int value,
113 int array_size)
114 {
115 unsigned index = 0;
116 for (unsigned i = 0; i < map->num; i++) {
117 if (set == map->set[i] && binding == map->binding[i]) {
118 assert(value == map->value[i]);
119 assert(array_size == map->array_size[i]);
120 return index;
121 }
122 index += map->array_size[i];
123 }
124
125 assert(index == map->num_desc);
126
127 map->set[map->num] = set;
128 map->binding[map->num] = binding;
129 map->value[map->num] = value;
130 map->array_size[map->num] = array_size;
131 map->num++;
132 map->num_desc += array_size;
133
134 return index;
135 }
136
137 static void
138 lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
139 struct tu_shader *shader,
140 const struct tu_pipeline_layout *layout)
141 {
142 nir_ssa_def *index = NULL;
143 unsigned base_index = 0;
144 unsigned array_elements = 1;
145 nir_tex_src *src = &instr->src[src_idx];
146 bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
147
148 /* We compute first the offsets */
149 nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
150 while (deref->deref_type != nir_deref_type_var) {
151 assert(deref->parent.is_ssa);
152 nir_deref_instr *parent =
153 nir_instr_as_deref(deref->parent.ssa->parent_instr);
154
155 assert(deref->deref_type == nir_deref_type_array);
156
157 if (nir_src_is_const(deref->arr.index) && index == NULL) {
158 /* We're still building a direct index */
159 base_index += nir_src_as_uint(deref->arr.index) * array_elements;
160 } else {
161 if (index == NULL) {
162 /* We used to be direct but not anymore */
163 index = nir_imm_int(b, base_index);
164 base_index = 0;
165 }
166
167 index = nir_iadd(b, index,
168 nir_imul(b, nir_imm_int(b, array_elements),
169 nir_ssa_for_src(b, deref->arr.index, 1)));
170 }
171
172 array_elements *= glsl_get_length(parent->type);
173
174 deref = parent;
175 }
176
177 if (index)
178 index = nir_umin(b, index, nir_imm_int(b, array_elements - 1));
179
180 /* We have the offsets, we apply them, rewriting the source or removing
181 * instr if needed
182 */
183 if (index) {
184 nir_instr_rewrite_src(&instr->instr, &src->src,
185 nir_src_for_ssa(index));
186
187 src->src_type = is_sampler ?
188 nir_tex_src_sampler_offset :
189 nir_tex_src_texture_offset;
190
191 instr->texture_array_size = array_elements;
192 } else {
193 nir_tex_instr_remove_src(instr, src_idx);
194 }
195
196 uint32_t set = deref->var->data.descriptor_set;
197 uint32_t binding = deref->var->data.binding;
198 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
199 struct tu_descriptor_set_binding_layout *binding_layout =
200 &set_layout->binding[binding];
201
202 int desc_index = map_add(is_sampler ?
203 &shader->sampler_map : &shader->texture_map,
204 deref->var->data.descriptor_set,
205 deref->var->data.binding,
206 deref->var->data.index,
207 binding_layout->array_size) + base_index;
208 if (is_sampler)
209 instr->sampler_index = desc_index;
210 else
211 instr->texture_index = desc_index;
212 }
213
214 static bool
215 lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader,
216 const struct tu_pipeline_layout *layout)
217 {
218 int texture_idx =
219 nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
220
221 if (texture_idx >= 0)
222 lower_tex_src_to_offset(b, instr, texture_idx, shader, layout);
223
224 int sampler_idx =
225 nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
226
227 if (sampler_idx >= 0)
228 lower_tex_src_to_offset(b, instr, sampler_idx, shader, layout);
229
230 if (texture_idx < 0 && sampler_idx < 0)
231 return false;
232
233 return true;
234 }
235
236 static bool
237 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
238 struct tu_shader *shader,
239 const struct tu_pipeline_layout *layout)
240 {
241 /* TODO: remove this when layered rendering is implemented */
242 if (instr->intrinsic == nir_intrinsic_load_layer_id) {
243 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
244 nir_src_for_ssa(nir_imm_int(b, 0)));
245 nir_instr_remove(&instr->instr);
246 return true;
247 }
248
249 if (instr->intrinsic == nir_intrinsic_load_push_constant) {
250 /* note: ir3 wants load_ubo, not load_uniform */
251 assert(nir_intrinsic_base(instr) == 0);
252
253 nir_intrinsic_instr *load =
254 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
255 load->num_components = instr->num_components;
256 load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
257 load->src[1] = instr->src[0];
258 nir_ssa_dest_init(&load->instr, &load->dest,
259 load->num_components, instr->dest.ssa.bit_size,
260 instr->dest.ssa.name);
261 nir_builder_instr_insert(b, &load->instr);
262 nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
263
264 nir_instr_remove(&instr->instr);
265
266 return true;
267 }
268
269 if (instr->intrinsic != nir_intrinsic_vulkan_resource_index)
270 return false;
271
272 nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
273
274
275 unsigned set = nir_intrinsic_desc_set(instr);
276 unsigned binding = nir_intrinsic_binding(instr);
277 struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
278 struct tu_descriptor_set_binding_layout *binding_layout =
279 &set_layout->binding[binding];
280 unsigned index = 0;
281
282 switch (nir_intrinsic_desc_type(instr)) {
283 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
284 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
285 if (!const_val || const_val->u32 != 0)
286 tu_finishme("non-zero vulkan_resource_index array index");
287 /* skip index 0 which is used for push constants */
288 index = map_add(&shader->ubo_map, set, binding, 0,
289 binding_layout->array_size) + 1;
290 break;
291 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
292 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
293 if (!const_val)
294 tu_finishme("non-constant vulkan_resource_index array index");
295 index = map_add(&shader->ssbo_map, set, binding, 0,
296 binding_layout->array_size);
297 break;
298 default:
299 tu_finishme("unsupported desc_type for vulkan_resource_index");
300 break;
301 }
302
303 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
304 nir_src_for_ssa(nir_imm_int(b, index)));
305 nir_instr_remove(&instr->instr);
306
307 return true;
308 }
309
310 static bool
311 lower_impl(nir_function_impl *impl, struct tu_shader *shader,
312 const struct tu_pipeline_layout *layout)
313 {
314 nir_builder b;
315 nir_builder_init(&b, impl);
316 bool progress = false;
317
318 nir_foreach_block(block, impl) {
319 nir_foreach_instr_safe(instr, block) {
320 b.cursor = nir_before_instr(instr);
321 switch (instr->type) {
322 case nir_instr_type_tex:
323 progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader, layout);
324 break;
325 case nir_instr_type_intrinsic:
326 progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
327 break;
328 default:
329 break;
330 }
331 }
332 }
333
334 return progress;
335 }
336
337 static bool
338 tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
339 const struct tu_pipeline_layout *layout)
340 {
341 bool progress = false;
342
343 nir_foreach_function(function, shader) {
344 if (function->impl)
345 progress |= lower_impl(function->impl, tu_shader, layout);
346 }
347
348 return progress;
349 }
350
351 struct tu_shader *
352 tu_shader_create(struct tu_device *dev,
353 gl_shader_stage stage,
354 const VkPipelineShaderStageCreateInfo *stage_info,
355 struct tu_pipeline_layout *layout,
356 const VkAllocationCallbacks *alloc)
357 {
358 const struct tu_shader_module *module =
359 tu_shader_module_from_handle(stage_info->module);
360 struct tu_shader *shader;
361
362 const uint32_t max_variant_count = (stage == MESA_SHADER_VERTEX) ? 2 : 1;
363 shader = vk_zalloc2(
364 &dev->alloc, alloc,
365 sizeof(*shader) + sizeof(struct ir3_shader_variant) * max_variant_count,
366 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
367 if (!shader)
368 return NULL;
369
370 /* translate SPIR-V to NIR */
371 assert(module->code_size % 4 == 0);
372 nir_shader *nir = tu_spirv_to_nir(
373 dev->compiler, (const uint32_t *) module->code, module->code_size / 4,
374 stage, stage_info->pName, stage_info->pSpecializationInfo);
375 if (!nir) {
376 vk_free2(&dev->alloc, alloc, shader);
377 return NULL;
378 }
379
380 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_NIR)) {
381 fprintf(stderr, "translated nir:\n");
382 nir_print_shader(nir, stderr);
383 }
384
385 /* multi step inlining procedure */
386 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
387 NIR_PASS_V(nir, nir_lower_returns);
388 NIR_PASS_V(nir, nir_inline_functions);
389 NIR_PASS_V(nir, nir_opt_deref);
390 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
391 if (!func->is_entrypoint)
392 exec_node_remove(&func->node);
393 }
394 assert(exec_list_length(&nir->functions) == 1);
395 NIR_PASS_V(nir, nir_lower_constant_initializers, ~nir_var_function_temp);
396
397 /* Split member structs. We do this before lower_io_to_temporaries so that
398 * it doesn't lower system values to temporaries by accident.
399 */
400 NIR_PASS_V(nir, nir_split_var_copies);
401 NIR_PASS_V(nir, nir_split_per_member_structs);
402
403 NIR_PASS_V(nir, nir_remove_dead_variables,
404 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
405
406 NIR_PASS_V(nir, nir_propagate_invariant);
407
408 NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
409
410 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
411 NIR_PASS_V(nir, nir_split_var_copies);
412 NIR_PASS_V(nir, nir_lower_var_copies);
413
414 NIR_PASS_V(nir, nir_opt_copy_prop_vars);
415 NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
416
417 /* ir3 doesn't support indirect input/output */
418 NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
419
420 switch (stage) {
421 case MESA_SHADER_VERTEX:
422 tu_sort_variables_by_location(&nir->outputs);
423 break;
424 case MESA_SHADER_TESS_CTRL:
425 case MESA_SHADER_TESS_EVAL:
426 case MESA_SHADER_GEOMETRY:
427 tu_sort_variables_by_location(&nir->inputs);
428 tu_sort_variables_by_location(&nir->outputs);
429 break;
430 case MESA_SHADER_FRAGMENT:
431 tu_sort_variables_by_location(&nir->inputs);
432 break;
433 case MESA_SHADER_COMPUTE:
434 break;
435 default:
436 unreachable("invalid gl_shader_stage");
437 break;
438 }
439
440 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
441 nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
442
443 NIR_PASS_V(nir, nir_lower_system_values);
444 NIR_PASS_V(nir, nir_lower_frexp);
445
446 if (stage == MESA_SHADER_FRAGMENT)
447 NIR_PASS_V(nir, nir_lower_input_attachments, true);
448
449 NIR_PASS_V(nir, tu_lower_io, shader, layout);
450
451 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
452
453 if (stage == MESA_SHADER_FRAGMENT) {
454 /* NOTE: lower load_barycentric_at_sample first, since it
455 * produces load_barycentric_at_offset:
456 */
457 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
458 NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
459
460 NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
461 }
462
463 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
464
465 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
466
467 /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
468 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
469
470 shader->ir3_shader.compiler = dev->compiler;
471 shader->ir3_shader.type = stage;
472 shader->ir3_shader.nir = nir;
473
474 return shader;
475 }
476
477 void
478 tu_shader_destroy(struct tu_device *dev,
479 struct tu_shader *shader,
480 const VkAllocationCallbacks *alloc)
481 {
482 if (shader->ir3_shader.nir)
483 ralloc_free(shader->ir3_shader.nir);
484
485 for (uint32_t i = 0; i < 1 + shader->has_binning_pass; i++) {
486 if (shader->variants[i].ir)
487 ir3_destroy(shader->variants[i].ir);
488 }
489
490 if (shader->ir3_shader.const_state.immediates)
491 free(shader->ir3_shader.const_state.immediates);
492 if (shader->binary)
493 free(shader->binary);
494 if (shader->binning_binary)
495 free(shader->binning_binary);
496
497 vk_free2(&dev->alloc, alloc, shader);
498 }
499
500 void
501 tu_shader_compile_options_init(
502 struct tu_shader_compile_options *options,
503 const VkGraphicsPipelineCreateInfo *pipeline_info)
504 {
505 *options = (struct tu_shader_compile_options) {
506 /* TODO ir3_key */
507
508 /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
509 * some optimizations need to happen otherwise shader might not compile
510 */
511 .optimize = true,
512 .include_binning_pass = true,
513 };
514 }
515
516 static uint32_t *
517 tu_compile_shader_variant(struct ir3_shader *shader,
518 const struct ir3_shader_key *key,
519 struct ir3_shader_variant *nonbinning,
520 struct ir3_shader_variant *variant)
521 {
522 variant->shader = shader;
523 variant->type = shader->type;
524 variant->key = *key;
525 variant->binning_pass = !!nonbinning;
526 variant->nonbinning = nonbinning;
527
528 int ret = ir3_compile_shader_nir(shader->compiler, variant);
529 if (ret)
530 return NULL;
531
532 /* when assemble fails, we rely on tu_shader_destroy to clean up the
533 * variant
534 */
535 return ir3_shader_assemble(variant, shader->compiler->gpu_id);
536 }
537
538 VkResult
539 tu_shader_compile(struct tu_device *dev,
540 struct tu_shader *shader,
541 const struct tu_shader *next_stage,
542 const struct tu_shader_compile_options *options,
543 const VkAllocationCallbacks *alloc)
544 {
545 if (options->optimize) {
546 /* ignore the key for the first pass of optimization */
547 ir3_optimize_nir(&shader->ir3_shader, shader->ir3_shader.nir, NULL);
548
549 if (unlikely(dev->physical_device->instance->debug_flags &
550 TU_DEBUG_NIR)) {
551 fprintf(stderr, "optimized nir:\n");
552 nir_print_shader(shader->ir3_shader.nir, stderr);
553 }
554 }
555
556 shader->binary = tu_compile_shader_variant(
557 &shader->ir3_shader, &options->key, NULL, &shader->variants[0]);
558 if (!shader->binary)
559 return VK_ERROR_OUT_OF_HOST_MEMORY;
560
561 /* compile another variant for the binning pass */
562 if (options->include_binning_pass &&
563 shader->ir3_shader.type == MESA_SHADER_VERTEX) {
564 shader->binning_binary = tu_compile_shader_variant(
565 &shader->ir3_shader, &options->key, &shader->variants[0],
566 &shader->variants[1]);
567 if (!shader->binning_binary)
568 return VK_ERROR_OUT_OF_HOST_MEMORY;
569
570 shader->has_binning_pass = true;
571 }
572
573 if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_IR3)) {
574 fprintf(stderr, "disassembled ir3:\n");
575 fprintf(stderr, "shader: %s\n",
576 gl_shader_stage_name(shader->ir3_shader.type));
577 ir3_shader_disasm(&shader->variants[0], shader->binary, stderr);
578
579 if (shader->has_binning_pass) {
580 fprintf(stderr, "disassembled ir3:\n");
581 fprintf(stderr, "shader: %s (binning)\n",
582 gl_shader_stage_name(shader->ir3_shader.type));
583 ir3_shader_disasm(&shader->variants[1], shader->binning_binary,
584 stderr);
585 }
586 }
587
588 return VK_SUCCESS;
589 }
590
591 VkResult
592 tu_CreateShaderModule(VkDevice _device,
593 const VkShaderModuleCreateInfo *pCreateInfo,
594 const VkAllocationCallbacks *pAllocator,
595 VkShaderModule *pShaderModule)
596 {
597 TU_FROM_HANDLE(tu_device, device, _device);
598 struct tu_shader_module *module;
599
600 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
601 assert(pCreateInfo->flags == 0);
602 assert(pCreateInfo->codeSize % 4 == 0);
603
604 module = vk_alloc2(&device->alloc, pAllocator,
605 sizeof(*module) + pCreateInfo->codeSize, 8,
606 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
607 if (module == NULL)
608 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
609
610 module->code_size = pCreateInfo->codeSize;
611 memcpy(module->code, pCreateInfo->pCode, pCreateInfo->codeSize);
612
613 _mesa_sha1_compute(module->code, module->code_size, module->sha1);
614
615 *pShaderModule = tu_shader_module_to_handle(module);
616
617 return VK_SUCCESS;
618 }
619
620 void
621 tu_DestroyShaderModule(VkDevice _device,
622 VkShaderModule _module,
623 const VkAllocationCallbacks *pAllocator)
624 {
625 TU_FROM_HANDLE(tu_device, device, _device);
626 TU_FROM_HANDLE(tu_shader_module, module, _module);
627
628 if (!module)
629 return;
630
631 vk_free2(&device->alloc, pAllocator, module);
632 }