anv: Report correct SLM size
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "common/gen_disasm.h"
34 #include "anv_private.h"
35 #include "compiler/brw_nir.h"
36 #include "anv_nir.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
39 #include "vk_util.h"
40
41 /* Needed for SWIZZLE macros */
42 #include "program/prog_instruction.h"
43
44 // Shader functions
45
46 VkResult anv_CreateShaderModule(
47 VkDevice _device,
48 const VkShaderModuleCreateInfo* pCreateInfo,
49 const VkAllocationCallbacks* pAllocator,
50 VkShaderModule* pShaderModule)
51 {
52 ANV_FROM_HANDLE(anv_device, device, _device);
53 struct anv_shader_module *module;
54
55 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
56 assert(pCreateInfo->flags == 0);
57
58 module = vk_alloc2(&device->alloc, pAllocator,
59 sizeof(*module) + pCreateInfo->codeSize, 8,
60 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
61 if (module == NULL)
62 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
63
64 module->size = pCreateInfo->codeSize;
65 memcpy(module->data, pCreateInfo->pCode, module->size);
66
67 _mesa_sha1_compute(module->data, module->size, module->sha1);
68
69 *pShaderModule = anv_shader_module_to_handle(module);
70
71 return VK_SUCCESS;
72 }
73
74 void anv_DestroyShaderModule(
75 VkDevice _device,
76 VkShaderModule _module,
77 const VkAllocationCallbacks* pAllocator)
78 {
79 ANV_FROM_HANDLE(anv_device, device, _device);
80 ANV_FROM_HANDLE(anv_shader_module, module, _module);
81
82 if (!module)
83 return;
84
85 vk_free2(&device->alloc, pAllocator, module);
86 }
87
88 #define SPIR_V_MAGIC_NUMBER 0x07230203
89
90 struct anv_spirv_debug_data {
91 struct anv_device *device;
92 const struct anv_shader_module *module;
93 };
94
95 static void anv_spirv_nir_debug(void *private_data,
96 enum nir_spirv_debug_level level,
97 size_t spirv_offset,
98 const char *message)
99 {
100 struct anv_spirv_debug_data *debug_data = private_data;
101 struct anv_instance *instance = debug_data->device->physical->instance;
102
103 static const VkDebugReportFlagsEXT vk_flags[] = {
104 [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
105 [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
106 [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
107 };
108 char buffer[256];
109
110 snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
111
112 vk_debug_report(&instance->debug_report_callbacks,
113 vk_flags[level],
114 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
115 (uint64_t) (uintptr_t) debug_data->module,
116 0, 0, "anv", buffer);
117 }
118
119 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
120 * we can't do that yet because we don't have the ability to copy nir.
121 */
122 static nir_shader *
123 anv_shader_compile_to_nir(struct anv_device *device,
124 void *mem_ctx,
125 const struct anv_shader_module *module,
126 const char *entrypoint_name,
127 gl_shader_stage stage,
128 const VkSpecializationInfo *spec_info)
129 {
130 const struct anv_physical_device *pdevice = device->physical;
131 const struct brw_compiler *compiler = pdevice->compiler;
132 const nir_shader_compiler_options *nir_options =
133 compiler->glsl_compiler_options[stage].NirOptions;
134
135 uint32_t *spirv = (uint32_t *) module->data;
136 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
137 assert(module->size % 4 == 0);
138
139 uint32_t num_spec_entries = 0;
140 struct nir_spirv_specialization *spec_entries = NULL;
141 if (spec_info && spec_info->mapEntryCount > 0) {
142 num_spec_entries = spec_info->mapEntryCount;
143 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
144 for (uint32_t i = 0; i < num_spec_entries; i++) {
145 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
146 const void *data = spec_info->pData + entry.offset;
147 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
148
149 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
150 if (spec_info->dataSize == 8)
151 spec_entries[i].data64 = *(const uint64_t *)data;
152 else
153 spec_entries[i].data32 = *(const uint32_t *)data;
154 }
155 }
156
157 struct anv_spirv_debug_data spirv_debug_data = {
158 .device = device,
159 .module = module,
160 };
161 struct spirv_to_nir_options spirv_options = {
162 .frag_coord_is_sysval = true,
163 .caps = {
164 .demote_to_helper_invocation = true,
165 .derivative_group = true,
166 .descriptor_array_dynamic_indexing = true,
167 .descriptor_array_non_uniform_indexing = true,
168 .descriptor_indexing = true,
169 .device_group = true,
170 .draw_parameters = true,
171 .float16 = pdevice->info.gen >= 8,
172 .float64 = pdevice->info.gen >= 8,
173 .fragment_shader_sample_interlock = pdevice->info.gen >= 9,
174 .fragment_shader_pixel_interlock = pdevice->info.gen >= 9,
175 .geometry_streams = true,
176 .image_write_without_format = true,
177 .int8 = pdevice->info.gen >= 8,
178 .int16 = pdevice->info.gen >= 8,
179 .int64 = pdevice->info.gen >= 8,
180 .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
181 .integer_functions2 = pdevice->info.gen >= 8,
182 .min_lod = true,
183 .multiview = true,
184 .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
185 .post_depth_coverage = pdevice->info.gen >= 9,
186 .runtime_descriptor_array = true,
187 .float_controls = pdevice->info.gen >= 8,
188 .shader_clock = true,
189 .shader_viewport_index_layer = true,
190 .stencil_export = pdevice->info.gen >= 9,
191 .storage_8bit = pdevice->info.gen >= 8,
192 .storage_16bit = pdevice->info.gen >= 8,
193 .subgroup_arithmetic = true,
194 .subgroup_basic = true,
195 .subgroup_ballot = true,
196 .subgroup_quad = true,
197 .subgroup_shuffle = true,
198 .subgroup_vote = true,
199 .tessellation = true,
200 .transform_feedback = pdevice->info.gen >= 8,
201 .variable_pointers = true,
202 .vk_memory_model = true,
203 .vk_memory_model_device_scope = true,
204 },
205 .ubo_addr_format = nir_address_format_32bit_index_offset,
206 .ssbo_addr_format =
207 anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
208 .phys_ssbo_addr_format = nir_address_format_64bit_global,
209 .push_const_addr_format = nir_address_format_logical,
210
211 /* TODO: Consider changing this to an address format that has the NULL
212 * pointer equals to 0. That might be a better format to play nice
213 * with certain code / code generators.
214 */
215 .shared_addr_format = nir_address_format_32bit_offset,
216 .debug = {
217 .func = anv_spirv_nir_debug,
218 .private_data = &spirv_debug_data,
219 },
220 };
221
222
223 nir_shader *nir =
224 spirv_to_nir(spirv, module->size / 4,
225 spec_entries, num_spec_entries,
226 stage, entrypoint_name, &spirv_options, nir_options);
227 assert(nir->info.stage == stage);
228 nir_validate_shader(nir, "after spirv_to_nir");
229 ralloc_steal(mem_ctx, nir);
230
231 free(spec_entries);
232
233 if (unlikely(INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage))) {
234 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
235 gl_shader_stage_name(stage));
236 nir_print_shader(nir, stderr);
237 }
238
239 /* We have to lower away local constant initializers right before we
240 * inline functions. That way they get properly initialized at the top
241 * of the function and not at the top of its caller.
242 */
243 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
244 NIR_PASS_V(nir, nir_lower_returns);
245 NIR_PASS_V(nir, nir_inline_functions);
246 NIR_PASS_V(nir, nir_opt_deref);
247
248 /* Pick off the single entrypoint that we want */
249 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
250 if (!func->is_entrypoint)
251 exec_node_remove(&func->node);
252 }
253 assert(exec_list_length(&nir->functions) == 1);
254
255 /* Now that we've deleted all but the main function, we can go ahead and
256 * lower the rest of the constant initializers. We do this here so that
257 * nir_remove_dead_variables and split_per_member_structs below see the
258 * corresponding stores.
259 */
260 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
261
262 /* Split member structs. We do this before lower_io_to_temporaries so that
263 * it doesn't lower system values to temporaries by accident.
264 */
265 NIR_PASS_V(nir, nir_split_var_copies);
266 NIR_PASS_V(nir, nir_split_per_member_structs);
267
268 NIR_PASS_V(nir, nir_remove_dead_variables,
269 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
270
271 NIR_PASS_V(nir, nir_propagate_invariant);
272 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
273 nir_shader_get_entrypoint(nir), true, false);
274
275 NIR_PASS_V(nir, nir_lower_frexp);
276
277 /* Vulkan uses the separate-shader linking model */
278 nir->info.separate_shader = true;
279
280 brw_preprocess_nir(compiler, nir, NULL);
281
282 return nir;
283 }
284
285 void anv_DestroyPipeline(
286 VkDevice _device,
287 VkPipeline _pipeline,
288 const VkAllocationCallbacks* pAllocator)
289 {
290 ANV_FROM_HANDLE(anv_device, device, _device);
291 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
292
293 if (!pipeline)
294 return;
295
296 anv_reloc_list_finish(&pipeline->batch_relocs,
297 pAllocator ? pAllocator : &device->alloc);
298
299 ralloc_free(pipeline->mem_ctx);
300
301 switch (pipeline->type) {
302 case ANV_PIPELINE_GRAPHICS: {
303 struct anv_graphics_pipeline *gfx_pipeline =
304 anv_pipeline_to_graphics(pipeline);
305
306 if (gfx_pipeline->blend_state.map)
307 anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->blend_state);
308
309 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
310 if (gfx_pipeline->shaders[s])
311 anv_shader_bin_unref(device, gfx_pipeline->shaders[s]);
312 }
313 break;
314 }
315
316 case ANV_PIPELINE_COMPUTE: {
317 struct anv_compute_pipeline *compute_pipeline =
318 anv_pipeline_to_compute(pipeline);
319
320 if (compute_pipeline->cs)
321 anv_shader_bin_unref(device, compute_pipeline->cs);
322
323 break;
324 }
325
326 default:
327 unreachable("invalid pipeline type");
328 }
329
330 vk_free2(&device->alloc, pAllocator, pipeline);
331 }
332
333 static const uint32_t vk_to_gen_primitive_type[] = {
334 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
335 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
336 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
337 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
338 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
339 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
340 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
341 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
342 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
343 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
344 };
345
346 static void
347 populate_sampler_prog_key(const struct gen_device_info *devinfo,
348 struct brw_sampler_prog_key_data *key)
349 {
350 /* Almost all multisampled textures are compressed. The only time when we
351 * don't compress a multisampled texture is for 16x MSAA with a surface
352 * width greater than 8k which is a bit of an edge case. Since the sampler
353 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
354 * to tell the compiler to always assume compression.
355 */
356 key->compressed_multisample_layout_mask = ~0;
357
358 /* SkyLake added support for 16x MSAA. With this came a new message for
359 * reading from a 16x MSAA surface with compression. The new message was
360 * needed because now the MCS data is 64 bits instead of 32 or lower as is
361 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
362 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
363 * so we can just use it unconditionally. This may not be quite as
364 * efficient but it saves us from recompiling.
365 */
366 if (devinfo->gen >= 9)
367 key->msaa_16 = ~0;
368
369 /* XXX: Handle texture swizzle on HSW- */
370 for (int i = 0; i < MAX_SAMPLERS; i++) {
371 /* Assume color sampler, no swizzling. (Works for BDW+) */
372 key->swizzles[i] = SWIZZLE_XYZW;
373 }
374 }
375
376 static void
377 populate_base_prog_key(const struct gen_device_info *devinfo,
378 VkPipelineShaderStageCreateFlags flags,
379 struct brw_base_prog_key *key)
380 {
381 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
382 key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
383 else
384 key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
385
386 populate_sampler_prog_key(devinfo, &key->tex);
387 }
388
389 static void
390 populate_vs_prog_key(const struct gen_device_info *devinfo,
391 VkPipelineShaderStageCreateFlags flags,
392 struct brw_vs_prog_key *key)
393 {
394 memset(key, 0, sizeof(*key));
395
396 populate_base_prog_key(devinfo, flags, &key->base);
397
398 /* XXX: Handle vertex input work-arounds */
399
400 /* XXX: Handle sampler_prog_key */
401 }
402
403 static void
404 populate_tcs_prog_key(const struct gen_device_info *devinfo,
405 VkPipelineShaderStageCreateFlags flags,
406 unsigned input_vertices,
407 struct brw_tcs_prog_key *key)
408 {
409 memset(key, 0, sizeof(*key));
410
411 populate_base_prog_key(devinfo, flags, &key->base);
412
413 key->input_vertices = input_vertices;
414 }
415
416 static void
417 populate_tes_prog_key(const struct gen_device_info *devinfo,
418 VkPipelineShaderStageCreateFlags flags,
419 struct brw_tes_prog_key *key)
420 {
421 memset(key, 0, sizeof(*key));
422
423 populate_base_prog_key(devinfo, flags, &key->base);
424 }
425
426 static void
427 populate_gs_prog_key(const struct gen_device_info *devinfo,
428 VkPipelineShaderStageCreateFlags flags,
429 struct brw_gs_prog_key *key)
430 {
431 memset(key, 0, sizeof(*key));
432
433 populate_base_prog_key(devinfo, flags, &key->base);
434 }
435
436 static void
437 populate_wm_prog_key(const struct gen_device_info *devinfo,
438 VkPipelineShaderStageCreateFlags flags,
439 const struct anv_subpass *subpass,
440 const VkPipelineMultisampleStateCreateInfo *ms_info,
441 struct brw_wm_prog_key *key)
442 {
443 memset(key, 0, sizeof(*key));
444
445 populate_base_prog_key(devinfo, flags, &key->base);
446
447 /* We set this to 0 here and set to the actual value before we call
448 * brw_compile_fs.
449 */
450 key->input_slots_valid = 0;
451
452 /* Vulkan doesn't specify a default */
453 key->high_quality_derivatives = false;
454
455 /* XXX Vulkan doesn't appear to specify */
456 key->clamp_fragment_color = false;
457
458 assert(subpass->color_count <= MAX_RTS);
459 for (uint32_t i = 0; i < subpass->color_count; i++) {
460 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
461 key->color_outputs_valid |= (1 << i);
462 }
463
464 key->nr_color_regions = subpass->color_count;
465
466 /* To reduce possible shader recompilations we would need to know if
467 * there is a SampleMask output variable to compute if we should emit
468 * code to workaround the issue that hardware disables alpha to coverage
469 * when there is SampleMask output.
470 */
471 key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
472
473 /* Vulkan doesn't support fixed-function alpha test */
474 key->alpha_test_replicate_alpha = false;
475
476 if (ms_info) {
477 /* We should probably pull this out of the shader, but it's fairly
478 * harmless to compute it and then let dead-code take care of it.
479 */
480 if (ms_info->rasterizationSamples > 1) {
481 key->persample_interp = ms_info->sampleShadingEnable &&
482 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
483 key->multisample_fbo = true;
484 }
485
486 key->frag_coord_adds_sample_pos = key->persample_interp;
487 }
488 }
489
490 static void
491 populate_cs_prog_key(const struct gen_device_info *devinfo,
492 VkPipelineShaderStageCreateFlags flags,
493 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
494 struct brw_cs_prog_key *key)
495 {
496 memset(key, 0, sizeof(*key));
497
498 populate_base_prog_key(devinfo, flags, &key->base);
499
500 if (rss_info) {
501 assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
502
503 /* These enum values are expressly chosen to be equal to the subgroup
504 * size that they require.
505 */
506 assert(rss_info->requiredSubgroupSize == 8 ||
507 rss_info->requiredSubgroupSize == 16 ||
508 rss_info->requiredSubgroupSize == 32);
509 key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
510 } else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
511 /* If the client expressly requests full subgroups and they don't
512 * specify a subgroup size, we need to pick one. If they're requested
513 * varying subgroup sizes, we set it to UNIFORM and let the back-end
514 * compiler pick. Otherwise, we specify the API value of 32.
515 * Performance will likely be terrible in this case but there's nothing
516 * we can do about that. The client should have chosen a size.
517 */
518 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
519 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
520 else
521 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
522 }
523 }
524
525 struct anv_pipeline_stage {
526 gl_shader_stage stage;
527
528 const struct anv_shader_module *module;
529 const char *entrypoint;
530 const VkSpecializationInfo *spec_info;
531
532 unsigned char shader_sha1[20];
533
534 union brw_any_prog_key key;
535
536 struct {
537 gl_shader_stage stage;
538 unsigned char sha1[20];
539 } cache_key;
540
541 nir_shader *nir;
542
543 struct anv_pipeline_binding surface_to_descriptor[256];
544 struct anv_pipeline_binding sampler_to_descriptor[256];
545 struct anv_pipeline_bind_map bind_map;
546
547 union brw_any_prog_data prog_data;
548
549 uint32_t num_stats;
550 struct brw_compile_stats stats[3];
551 char *disasm[3];
552
553 VkPipelineCreationFeedbackEXT feedback;
554
555 const unsigned *code;
556 };
557
558 static void
559 anv_pipeline_hash_shader(const struct anv_shader_module *module,
560 const char *entrypoint,
561 gl_shader_stage stage,
562 const VkSpecializationInfo *spec_info,
563 unsigned char *sha1_out)
564 {
565 struct mesa_sha1 ctx;
566 _mesa_sha1_init(&ctx);
567
568 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
569 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
570 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
571 if (spec_info) {
572 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
573 spec_info->mapEntryCount *
574 sizeof(*spec_info->pMapEntries));
575 _mesa_sha1_update(&ctx, spec_info->pData,
576 spec_info->dataSize);
577 }
578
579 _mesa_sha1_final(&ctx, sha1_out);
580 }
581
582 static void
583 anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
584 struct anv_pipeline_layout *layout,
585 struct anv_pipeline_stage *stages,
586 unsigned char *sha1_out)
587 {
588 struct mesa_sha1 ctx;
589 _mesa_sha1_init(&ctx);
590
591 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
592 sizeof(pipeline->subpass->view_mask));
593
594 if (layout)
595 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
596
597 const bool rba = pipeline->base.device->robust_buffer_access;
598 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
599
600 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
601 if (stages[s].entrypoint) {
602 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
603 sizeof(stages[s].shader_sha1));
604 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
605 }
606 }
607
608 _mesa_sha1_final(&ctx, sha1_out);
609 }
610
611 static void
612 anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
613 struct anv_pipeline_layout *layout,
614 struct anv_pipeline_stage *stage,
615 unsigned char *sha1_out)
616 {
617 struct mesa_sha1 ctx;
618 _mesa_sha1_init(&ctx);
619
620 if (layout)
621 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
622
623 const bool rba = pipeline->base.device->robust_buffer_access;
624 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
625
626 _mesa_sha1_update(&ctx, stage->shader_sha1,
627 sizeof(stage->shader_sha1));
628 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
629
630 _mesa_sha1_final(&ctx, sha1_out);
631 }
632
633 static nir_shader *
634 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
635 struct anv_pipeline_cache *cache,
636 void *mem_ctx,
637 struct anv_pipeline_stage *stage)
638 {
639 const struct brw_compiler *compiler =
640 pipeline->device->physical->compiler;
641 const nir_shader_compiler_options *nir_options =
642 compiler->glsl_compiler_options[stage->stage].NirOptions;
643 nir_shader *nir;
644
645 nir = anv_device_search_for_nir(pipeline->device, cache,
646 nir_options,
647 stage->shader_sha1,
648 mem_ctx);
649 if (nir) {
650 assert(nir->info.stage == stage->stage);
651 return nir;
652 }
653
654 nir = anv_shader_compile_to_nir(pipeline->device,
655 mem_ctx,
656 stage->module,
657 stage->entrypoint,
658 stage->stage,
659 stage->spec_info);
660 if (nir) {
661 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
662 return nir;
663 }
664
665 return NULL;
666 }
667
668 static void
669 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
670 void *mem_ctx,
671 struct anv_pipeline_stage *stage,
672 struct anv_pipeline_layout *layout)
673 {
674 const struct anv_physical_device *pdevice = pipeline->device->physical;
675 const struct brw_compiler *compiler = pdevice->compiler;
676
677 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
678 nir_shader *nir = stage->nir;
679
680 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
681 NIR_PASS_V(nir, nir_lower_wpos_center,
682 anv_pipeline_to_graphics(pipeline)->sample_shading_enable);
683 NIR_PASS_V(nir, nir_lower_input_attachments, true);
684 }
685
686 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
687
688 if (pipeline->type == ANV_PIPELINE_GRAPHICS) {
689 NIR_PASS_V(nir, anv_nir_lower_multiview,
690 anv_pipeline_to_graphics(pipeline));
691 }
692
693 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
694
695 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo, NULL);
696
697 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
698 nir_address_format_64bit_global);
699
700 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
701 anv_nir_apply_pipeline_layout(pdevice,
702 pipeline->device->robust_buffer_access,
703 layout, nir, &stage->bind_map);
704
705 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
706 nir_address_format_32bit_index_offset);
707 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
708 anv_nir_ssbo_addr_format(pdevice,
709 pipeline->device->robust_buffer_access));
710
711 NIR_PASS_V(nir, nir_opt_constant_folding);
712
713 /* We don't support non-uniform UBOs and non-uniform SSBO access is
714 * handled naturally by falling back to A64 messages.
715 */
716 NIR_PASS_V(nir, nir_lower_non_uniform_access,
717 nir_lower_non_uniform_texture_access |
718 nir_lower_non_uniform_image_access);
719
720 anv_nir_compute_push_layout(pdevice, pipeline->device->robust_buffer_access,
721 nir, prog_data, &stage->bind_map, mem_ctx);
722
723 stage->nir = nir;
724 }
725
726 static void
727 anv_pipeline_link_vs(const struct brw_compiler *compiler,
728 struct anv_pipeline_stage *vs_stage,
729 struct anv_pipeline_stage *next_stage)
730 {
731 if (next_stage)
732 brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
733 }
734
735 static void
736 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
737 void *mem_ctx,
738 struct anv_graphics_pipeline *pipeline,
739 struct anv_pipeline_stage *vs_stage)
740 {
741 /* When using Primitive Replication for multiview, each view gets its own
742 * position slot.
743 */
744 uint32_t pos_slots = pipeline->use_primitive_replication ?
745 anv_subpass_view_count(pipeline->subpass) : 1;
746
747 brw_compute_vue_map(compiler->devinfo,
748 &vs_stage->prog_data.vs.base.vue_map,
749 vs_stage->nir->info.outputs_written,
750 vs_stage->nir->info.separate_shader,
751 pos_slots);
752
753 vs_stage->num_stats = 1;
754 vs_stage->code = brw_compile_vs(compiler, pipeline->base.device, mem_ctx,
755 &vs_stage->key.vs,
756 &vs_stage->prog_data.vs,
757 vs_stage->nir, -1,
758 vs_stage->stats, NULL);
759 }
760
761 static void
762 merge_tess_info(struct shader_info *tes_info,
763 const struct shader_info *tcs_info)
764 {
765 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
766 *
767 * "PointMode. Controls generation of points rather than triangles
768 * or lines. This functionality defaults to disabled, and is
769 * enabled if either shader stage includes the execution mode.
770 *
771 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
772 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
773 * and OutputVertices, it says:
774 *
775 * "One mode must be set in at least one of the tessellation
776 * shader stages."
777 *
778 * So, the fields can be set in either the TCS or TES, but they must
779 * agree if set in both. Our backend looks at TES, so bitwise-or in
780 * the values from the TCS.
781 */
782 assert(tcs_info->tess.tcs_vertices_out == 0 ||
783 tes_info->tess.tcs_vertices_out == 0 ||
784 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
785 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
786
787 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
788 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
789 tcs_info->tess.spacing == tes_info->tess.spacing);
790 tes_info->tess.spacing |= tcs_info->tess.spacing;
791
792 assert(tcs_info->tess.primitive_mode == 0 ||
793 tes_info->tess.primitive_mode == 0 ||
794 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
795 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
796 tes_info->tess.ccw |= tcs_info->tess.ccw;
797 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
798 }
799
800 static void
801 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
802 struct anv_pipeline_stage *tcs_stage,
803 struct anv_pipeline_stage *tes_stage)
804 {
805 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
806
807 brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
808
809 nir_lower_patch_vertices(tes_stage->nir,
810 tcs_stage->nir->info.tess.tcs_vertices_out,
811 NULL);
812
813 /* Copy TCS info into the TES info */
814 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
815
816 /* Whacking the key after cache lookup is a bit sketchy, but all of
817 * this comes from the SPIR-V, which is part of the hash used for the
818 * pipeline cache. So it should be safe.
819 */
820 tcs_stage->key.tcs.tes_primitive_mode =
821 tes_stage->nir->info.tess.primitive_mode;
822 tcs_stage->key.tcs.quads_workaround =
823 compiler->devinfo->gen < 9 &&
824 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
825 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
826 }
827
828 static void
829 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
830 void *mem_ctx,
831 struct anv_device *device,
832 struct anv_pipeline_stage *tcs_stage,
833 struct anv_pipeline_stage *prev_stage)
834 {
835 tcs_stage->key.tcs.outputs_written =
836 tcs_stage->nir->info.outputs_written;
837 tcs_stage->key.tcs.patch_outputs_written =
838 tcs_stage->nir->info.patch_outputs_written;
839
840 tcs_stage->num_stats = 1;
841 tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
842 &tcs_stage->key.tcs,
843 &tcs_stage->prog_data.tcs,
844 tcs_stage->nir, -1,
845 tcs_stage->stats, NULL);
846 }
847
848 static void
849 anv_pipeline_link_tes(const struct brw_compiler *compiler,
850 struct anv_pipeline_stage *tes_stage,
851 struct anv_pipeline_stage *next_stage)
852 {
853 if (next_stage)
854 brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
855 }
856
857 static void
858 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
859 void *mem_ctx,
860 struct anv_device *device,
861 struct anv_pipeline_stage *tes_stage,
862 struct anv_pipeline_stage *tcs_stage)
863 {
864 tes_stage->key.tes.inputs_read =
865 tcs_stage->nir->info.outputs_written;
866 tes_stage->key.tes.patch_inputs_read =
867 tcs_stage->nir->info.patch_outputs_written;
868
869 tes_stage->num_stats = 1;
870 tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
871 &tes_stage->key.tes,
872 &tcs_stage->prog_data.tcs.base.vue_map,
873 &tes_stage->prog_data.tes,
874 tes_stage->nir, -1,
875 tes_stage->stats, NULL);
876 }
877
878 static void
879 anv_pipeline_link_gs(const struct brw_compiler *compiler,
880 struct anv_pipeline_stage *gs_stage,
881 struct anv_pipeline_stage *next_stage)
882 {
883 if (next_stage)
884 brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
885 }
886
887 static void
888 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
889 void *mem_ctx,
890 struct anv_device *device,
891 struct anv_pipeline_stage *gs_stage,
892 struct anv_pipeline_stage *prev_stage)
893 {
894 brw_compute_vue_map(compiler->devinfo,
895 &gs_stage->prog_data.gs.base.vue_map,
896 gs_stage->nir->info.outputs_written,
897 gs_stage->nir->info.separate_shader, 1);
898
899 gs_stage->num_stats = 1;
900 gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
901 &gs_stage->key.gs,
902 &gs_stage->prog_data.gs,
903 gs_stage->nir, NULL, -1,
904 gs_stage->stats, NULL);
905 }
906
907 static void
908 anv_pipeline_link_fs(const struct brw_compiler *compiler,
909 struct anv_pipeline_stage *stage)
910 {
911 unsigned num_rt_bindings;
912 struct anv_pipeline_binding rt_bindings[MAX_RTS];
913 if (stage->key.wm.nr_color_regions > 0) {
914 assert(stage->key.wm.nr_color_regions <= MAX_RTS);
915 for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
916 if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
917 rt_bindings[rt] = (struct anv_pipeline_binding) {
918 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
919 .index = rt,
920 };
921 } else {
922 /* Setup a null render target */
923 rt_bindings[rt] = (struct anv_pipeline_binding) {
924 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
925 .index = UINT32_MAX,
926 };
927 }
928 }
929 num_rt_bindings = stage->key.wm.nr_color_regions;
930 } else {
931 /* Setup a null render target */
932 rt_bindings[0] = (struct anv_pipeline_binding) {
933 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
934 .index = UINT32_MAX,
935 };
936 num_rt_bindings = 1;
937 }
938
939 assert(num_rt_bindings <= MAX_RTS);
940 assert(stage->bind_map.surface_count == 0);
941 typed_memcpy(stage->bind_map.surface_to_descriptor,
942 rt_bindings, num_rt_bindings);
943 stage->bind_map.surface_count += num_rt_bindings;
944
945 /* Now that we've set up the color attachments, we can go through and
946 * eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
947 * hopes that dead code can clean them up in this and any earlier shader
948 * stages.
949 */
950 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
951 bool deleted_output = false;
952 nir_foreach_variable_safe(var, &stage->nir->outputs) {
953 /* TODO: We don't delete depth/stencil writes. We probably could if the
954 * subpass doesn't have a depth/stencil attachment.
955 */
956 if (var->data.location < FRAG_RESULT_DATA0)
957 continue;
958
959 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
960
961 /* If this is the RT at location 0 and we have alpha to coverage
962 * enabled we still need that write because it will affect the coverage
963 * mask even if it's never written to a color target.
964 */
965 if (rt == 0 && stage->key.wm.alpha_to_coverage)
966 continue;
967
968 const unsigned array_len =
969 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
970 assert(rt + array_len <= MAX_RTS);
971
972 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
973 BITFIELD_RANGE(rt, array_len))) {
974 deleted_output = true;
975 var->data.mode = nir_var_function_temp;
976 exec_node_remove(&var->node);
977 exec_list_push_tail(&impl->locals, &var->node);
978 }
979 }
980
981 if (deleted_output)
982 nir_fixup_deref_modes(stage->nir);
983
984 /* We stored the number of subpass color attachments in nr_color_regions
985 * when calculating the key for caching. Now that we've computed the bind
986 * map, we can reduce this to the actual max before we go into the back-end
987 * compiler.
988 */
989 stage->key.wm.nr_color_regions =
990 util_last_bit(stage->key.wm.color_outputs_valid);
991 }
992
993 static void
994 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
995 void *mem_ctx,
996 struct anv_device *device,
997 struct anv_pipeline_stage *fs_stage,
998 struct anv_pipeline_stage *prev_stage)
999 {
1000 /* TODO: we could set this to 0 based on the information in nir_shader, but
1001 * we need this before we call spirv_to_nir.
1002 */
1003 assert(prev_stage);
1004 fs_stage->key.wm.input_slots_valid =
1005 prev_stage->prog_data.vue.vue_map.slots_valid;
1006
1007 fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
1008 &fs_stage->key.wm,
1009 &fs_stage->prog_data.wm,
1010 fs_stage->nir, -1, -1, -1,
1011 true, false, NULL,
1012 fs_stage->stats, NULL);
1013
1014 fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
1015 (uint32_t)fs_stage->prog_data.wm.dispatch_16 +
1016 (uint32_t)fs_stage->prog_data.wm.dispatch_32;
1017
1018 if (fs_stage->key.wm.color_outputs_valid == 0 &&
1019 !fs_stage->prog_data.wm.has_side_effects &&
1020 !fs_stage->prog_data.wm.uses_omask &&
1021 !fs_stage->key.wm.alpha_to_coverage &&
1022 !fs_stage->prog_data.wm.uses_kill &&
1023 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
1024 !fs_stage->prog_data.wm.computed_stencil) {
1025 /* This fragment shader has no outputs and no side effects. Go ahead
1026 * and return the code pointer so we don't accidentally think the
1027 * compile failed but zero out prog_data which will set program_size to
1028 * zero and disable the stage.
1029 */
1030 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
1031 }
1032 }
1033
1034 static void
1035 anv_pipeline_add_executable(struct anv_pipeline *pipeline,
1036 struct anv_pipeline_stage *stage,
1037 struct brw_compile_stats *stats,
1038 uint32_t code_offset)
1039 {
1040 char *nir = NULL;
1041 if (stage->nir &&
1042 (pipeline->flags &
1043 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1044 char *stream_data = NULL;
1045 size_t stream_size = 0;
1046 FILE *stream = open_memstream(&stream_data, &stream_size);
1047
1048 nir_print_shader(stage->nir, stream);
1049
1050 fclose(stream);
1051
1052 /* Copy it to a ralloc'd thing */
1053 nir = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1054 memcpy(nir, stream_data, stream_size);
1055 nir[stream_size] = 0;
1056
1057 free(stream_data);
1058 }
1059
1060 char *disasm = NULL;
1061 if (stage->code &&
1062 (pipeline->flags &
1063 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1064 char *stream_data = NULL;
1065 size_t stream_size = 0;
1066 FILE *stream = open_memstream(&stream_data, &stream_size);
1067
1068 uint32_t push_size = 0;
1069 for (unsigned i = 0; i < 4; i++)
1070 push_size += stage->bind_map.push_ranges[i].length;
1071 if (push_size > 0) {
1072 fprintf(stream, "Push constant ranges:\n");
1073 for (unsigned i = 0; i < 4; i++) {
1074 if (stage->bind_map.push_ranges[i].length == 0)
1075 continue;
1076
1077 fprintf(stream, " RANGE%d (%dB): ", i,
1078 stage->bind_map.push_ranges[i].length * 32);
1079
1080 switch (stage->bind_map.push_ranges[i].set) {
1081 case ANV_DESCRIPTOR_SET_NULL:
1082 fprintf(stream, "NULL");
1083 break;
1084
1085 case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
1086 fprintf(stream, "Vulkan push constants and API params");
1087 break;
1088
1089 case ANV_DESCRIPTOR_SET_DESCRIPTORS:
1090 fprintf(stream, "Descriptor buffer for set %d (start=%dB)",
1091 stage->bind_map.push_ranges[i].index,
1092 stage->bind_map.push_ranges[i].start * 32);
1093 break;
1094
1095 case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS:
1096 unreachable("gl_NumWorkgroups is never pushed");
1097
1098 case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
1099 fprintf(stream, "Inline shader constant data (start=%dB)",
1100 stage->bind_map.push_ranges[i].start * 32);
1101 break;
1102
1103 case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
1104 unreachable("Color attachments can't be pushed");
1105
1106 default:
1107 fprintf(stream, "UBO (set=%d binding=%d start=%dB)",
1108 stage->bind_map.push_ranges[i].set,
1109 stage->bind_map.push_ranges[i].index,
1110 stage->bind_map.push_ranges[i].start * 32);
1111 break;
1112 }
1113 fprintf(stream, "\n");
1114 }
1115 fprintf(stream, "\n");
1116 }
1117
1118 /* Creating this is far cheaper than it looks. It's perfectly fine to
1119 * do it for every binary.
1120 */
1121 struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
1122 gen_disasm_disassemble(d, stage->code, code_offset, stream);
1123 gen_disasm_destroy(d);
1124
1125 fclose(stream);
1126
1127 /* Copy it to a ralloc'd thing */
1128 disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1129 memcpy(disasm, stream_data, stream_size);
1130 disasm[stream_size] = 0;
1131
1132 free(stream_data);
1133 }
1134
1135 const struct anv_pipeline_executable exe = {
1136 .stage = stage->stage,
1137 .stats = *stats,
1138 .nir = nir,
1139 .disasm = disasm,
1140 };
1141 util_dynarray_append(&pipeline->executables,
1142 struct anv_pipeline_executable, exe);
1143 }
1144
1145 static void
1146 anv_pipeline_add_executables(struct anv_pipeline *pipeline,
1147 struct anv_pipeline_stage *stage,
1148 struct anv_shader_bin *bin)
1149 {
1150 if (stage->stage == MESA_SHADER_FRAGMENT) {
1151 /* We pull the prog data and stats out of the anv_shader_bin because
1152 * the anv_pipeline_stage may not be fully populated if we successfully
1153 * looked up the shader in a cache.
1154 */
1155 const struct brw_wm_prog_data *wm_prog_data =
1156 (const struct brw_wm_prog_data *)bin->prog_data;
1157 struct brw_compile_stats *stats = bin->stats;
1158
1159 if (wm_prog_data->dispatch_8) {
1160 anv_pipeline_add_executable(pipeline, stage, stats++, 0);
1161 }
1162
1163 if (wm_prog_data->dispatch_16) {
1164 anv_pipeline_add_executable(pipeline, stage, stats++,
1165 wm_prog_data->prog_offset_16);
1166 }
1167
1168 if (wm_prog_data->dispatch_32) {
1169 anv_pipeline_add_executable(pipeline, stage, stats++,
1170 wm_prog_data->prog_offset_32);
1171 }
1172 } else {
1173 anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
1174 }
1175 }
1176
1177 static void
1178 anv_pipeline_init_from_cached_graphics(struct anv_graphics_pipeline *pipeline)
1179 {
1180 /* TODO: Cache this pipeline-wide information. */
1181
1182 /* Primitive replication depends on information from all the shaders.
1183 * Recover this bit from the fact that we have more than one position slot
1184 * in the vertex shader when using it.
1185 */
1186 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1187 int pos_slots = 0;
1188 const struct brw_vue_prog_data *vue_prog_data =
1189 (const void *) pipeline->shaders[MESA_SHADER_VERTEX]->prog_data;
1190 const struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1191 for (int i = 0; i < vue_map->num_slots; i++) {
1192 if (vue_map->slot_to_varying[i] == VARYING_SLOT_POS)
1193 pos_slots++;
1194 }
1195 pipeline->use_primitive_replication = pos_slots > 1;
1196 }
1197
1198 static VkResult
1199 anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
1200 struct anv_pipeline_cache *cache,
1201 const VkGraphicsPipelineCreateInfo *info)
1202 {
1203 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1204 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1205 };
1206 int64_t pipeline_start = os_time_get_nano();
1207
1208 const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1209 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
1210
1211 pipeline->active_stages = 0;
1212
1213 VkResult result;
1214 for (uint32_t i = 0; i < info->stageCount; i++) {
1215 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
1216 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
1217
1218 pipeline->active_stages |= sinfo->stage;
1219
1220 int64_t stage_start = os_time_get_nano();
1221
1222 stages[stage].stage = stage;
1223 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
1224 stages[stage].entrypoint = sinfo->pName;
1225 stages[stage].spec_info = sinfo->pSpecializationInfo;
1226 anv_pipeline_hash_shader(stages[stage].module,
1227 stages[stage].entrypoint,
1228 stage,
1229 stages[stage].spec_info,
1230 stages[stage].shader_sha1);
1231
1232 const struct gen_device_info *devinfo = &pipeline->base.device->info;
1233 switch (stage) {
1234 case MESA_SHADER_VERTEX:
1235 populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
1236 break;
1237 case MESA_SHADER_TESS_CTRL:
1238 populate_tcs_prog_key(devinfo, sinfo->flags,
1239 info->pTessellationState->patchControlPoints,
1240 &stages[stage].key.tcs);
1241 break;
1242 case MESA_SHADER_TESS_EVAL:
1243 populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
1244 break;
1245 case MESA_SHADER_GEOMETRY:
1246 populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
1247 break;
1248 case MESA_SHADER_FRAGMENT: {
1249 const bool raster_enabled =
1250 !info->pRasterizationState->rasterizerDiscardEnable;
1251 populate_wm_prog_key(devinfo, sinfo->flags,
1252 pipeline->subpass,
1253 raster_enabled ? info->pMultisampleState : NULL,
1254 &stages[stage].key.wm);
1255 break;
1256 }
1257 default:
1258 unreachable("Invalid graphics shader stage");
1259 }
1260
1261 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1262 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1263 }
1264
1265 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1266 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1267
1268 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1269
1270 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1271
1272 unsigned char sha1[20];
1273 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1274
1275 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1276 if (!stages[s].entrypoint)
1277 continue;
1278
1279 stages[s].cache_key.stage = s;
1280 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1281 }
1282
1283 const bool skip_cache_lookup =
1284 (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1285
1286 if (!skip_cache_lookup) {
1287 unsigned found = 0;
1288 unsigned cache_hits = 0;
1289 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1290 if (!stages[s].entrypoint)
1291 continue;
1292
1293 int64_t stage_start = os_time_get_nano();
1294
1295 bool cache_hit;
1296 struct anv_shader_bin *bin =
1297 anv_device_search_for_kernel(pipeline->base.device, cache,
1298 &stages[s].cache_key,
1299 sizeof(stages[s].cache_key), &cache_hit);
1300 if (bin) {
1301 found++;
1302 pipeline->shaders[s] = bin;
1303 }
1304
1305 if (cache_hit) {
1306 cache_hits++;
1307 stages[s].feedback.flags |=
1308 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1309 }
1310 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1311 }
1312
1313 if (found == __builtin_popcount(pipeline->active_stages)) {
1314 if (cache_hits == found) {
1315 pipeline_feedback.flags |=
1316 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1317 }
1318 /* We found all our shaders in the cache. We're done. */
1319 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1320 if (!stages[s].entrypoint)
1321 continue;
1322
1323 anv_pipeline_add_executables(&pipeline->base, &stages[s],
1324 pipeline->shaders[s]);
1325 }
1326 anv_pipeline_init_from_cached_graphics(pipeline);
1327 goto done;
1328 } else if (found > 0) {
1329 /* We found some but not all of our shaders. This shouldn't happen
1330 * most of the time but it can if we have a partially populated
1331 * pipeline cache.
1332 */
1333 assert(found < __builtin_popcount(pipeline->active_stages));
1334
1335 vk_debug_report(&pipeline->base.device->physical->instance->debug_report_callbacks,
1336 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1337 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1338 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1339 (uint64_t)(uintptr_t)cache,
1340 0, 0, "anv",
1341 "Found a partial pipeline in the cache. This is "
1342 "most likely caused by an incomplete pipeline cache "
1343 "import or export");
1344
1345 /* We're going to have to recompile anyway, so just throw away our
1346 * references to the shaders in the cache. We'll get them out of the
1347 * cache again as part of the compilation process.
1348 */
1349 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1350 stages[s].feedback.flags = 0;
1351 if (pipeline->shaders[s]) {
1352 anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1353 pipeline->shaders[s] = NULL;
1354 }
1355 }
1356 }
1357 }
1358
1359 void *pipeline_ctx = ralloc_context(NULL);
1360
1361 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1362 if (!stages[s].entrypoint)
1363 continue;
1364
1365 int64_t stage_start = os_time_get_nano();
1366
1367 assert(stages[s].stage == s);
1368 assert(pipeline->shaders[s] == NULL);
1369
1370 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1371 .surface_to_descriptor = stages[s].surface_to_descriptor,
1372 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1373 };
1374
1375 stages[s].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache,
1376 pipeline_ctx,
1377 &stages[s]);
1378 if (stages[s].nir == NULL) {
1379 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1380 goto fail;
1381 }
1382
1383 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1384 }
1385
1386 /* Walk backwards to link */
1387 struct anv_pipeline_stage *next_stage = NULL;
1388 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1389 if (!stages[s].entrypoint)
1390 continue;
1391
1392 switch (s) {
1393 case MESA_SHADER_VERTEX:
1394 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1395 break;
1396 case MESA_SHADER_TESS_CTRL:
1397 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1398 break;
1399 case MESA_SHADER_TESS_EVAL:
1400 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1401 break;
1402 case MESA_SHADER_GEOMETRY:
1403 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1404 break;
1405 case MESA_SHADER_FRAGMENT:
1406 anv_pipeline_link_fs(compiler, &stages[s]);
1407 break;
1408 default:
1409 unreachable("Invalid graphics shader stage");
1410 }
1411
1412 next_stage = &stages[s];
1413 }
1414
1415 if (pipeline->base.device->info.gen >= 12 &&
1416 pipeline->subpass->view_mask != 0) {
1417 /* For some pipelines HW Primitive Replication can be used instead of
1418 * instancing to implement Multiview. This depend on how viewIndex is
1419 * used in all the active shaders, so this check can't be done per
1420 * individual shaders.
1421 */
1422 nir_shader *shaders[MESA_SHADER_STAGES] = {};
1423 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++)
1424 shaders[s] = stages[s].nir;
1425
1426 pipeline->use_primitive_replication =
1427 anv_check_for_primitive_replication(shaders, pipeline);
1428 } else {
1429 pipeline->use_primitive_replication = false;
1430 }
1431
1432 struct anv_pipeline_stage *prev_stage = NULL;
1433 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1434 if (!stages[s].entrypoint)
1435 continue;
1436
1437 int64_t stage_start = os_time_get_nano();
1438
1439 void *stage_ctx = ralloc_context(NULL);
1440
1441 nir_xfb_info *xfb_info = NULL;
1442 if (s == MESA_SHADER_VERTEX ||
1443 s == MESA_SHADER_TESS_EVAL ||
1444 s == MESA_SHADER_GEOMETRY)
1445 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1446
1447 anv_pipeline_lower_nir(&pipeline->base, stage_ctx, &stages[s], layout);
1448
1449 switch (s) {
1450 case MESA_SHADER_VERTEX:
1451 anv_pipeline_compile_vs(compiler, stage_ctx, pipeline,
1452 &stages[s]);
1453 break;
1454 case MESA_SHADER_TESS_CTRL:
1455 anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->base.device,
1456 &stages[s], prev_stage);
1457 break;
1458 case MESA_SHADER_TESS_EVAL:
1459 anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->base.device,
1460 &stages[s], prev_stage);
1461 break;
1462 case MESA_SHADER_GEOMETRY:
1463 anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->base.device,
1464 &stages[s], prev_stage);
1465 break;
1466 case MESA_SHADER_FRAGMENT:
1467 anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->base.device,
1468 &stages[s], prev_stage);
1469 break;
1470 default:
1471 unreachable("Invalid graphics shader stage");
1472 }
1473 if (stages[s].code == NULL) {
1474 ralloc_free(stage_ctx);
1475 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1476 goto fail;
1477 }
1478
1479 anv_nir_validate_push_layout(&stages[s].prog_data.base,
1480 &stages[s].bind_map);
1481
1482 struct anv_shader_bin *bin =
1483 anv_device_upload_kernel(pipeline->base.device, cache, s,
1484 &stages[s].cache_key,
1485 sizeof(stages[s].cache_key),
1486 stages[s].code,
1487 stages[s].prog_data.base.program_size,
1488 stages[s].nir->constant_data,
1489 stages[s].nir->constant_data_size,
1490 &stages[s].prog_data.base,
1491 brw_prog_data_size(s),
1492 stages[s].stats, stages[s].num_stats,
1493 xfb_info, &stages[s].bind_map);
1494 if (!bin) {
1495 ralloc_free(stage_ctx);
1496 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1497 goto fail;
1498 }
1499
1500 anv_pipeline_add_executables(&pipeline->base, &stages[s], bin);
1501
1502 pipeline->shaders[s] = bin;
1503 ralloc_free(stage_ctx);
1504
1505 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1506
1507 prev_stage = &stages[s];
1508 }
1509
1510 ralloc_free(pipeline_ctx);
1511
1512 done:
1513
1514 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1515 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1516 /* This can happen if we decided to implicitly disable the fragment
1517 * shader. See anv_pipeline_compile_fs().
1518 */
1519 anv_shader_bin_unref(pipeline->base.device,
1520 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1521 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1522 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1523 }
1524
1525 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1526
1527 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1528 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1529 if (create_feedback) {
1530 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1531
1532 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1533 for (uint32_t i = 0; i < info->stageCount; i++) {
1534 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1535 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1536 }
1537 }
1538
1539 return VK_SUCCESS;
1540
1541 fail:
1542 ralloc_free(pipeline_ctx);
1543
1544 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1545 if (pipeline->shaders[s])
1546 anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1547 }
1548
1549 return result;
1550 }
1551
1552 static void
1553 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
1554 {
1555 assert(glsl_type_is_vector_or_scalar(type));
1556
1557 uint32_t comp_size = glsl_type_is_boolean(type)
1558 ? 4 : glsl_get_bit_size(type) / 8;
1559 unsigned length = glsl_get_vector_elements(type);
1560 *size = comp_size * length,
1561 *align = comp_size * (length == 3 ? 4 : length);
1562 }
1563
1564 VkResult
1565 anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
1566 struct anv_pipeline_cache *cache,
1567 const VkComputePipelineCreateInfo *info,
1568 const struct anv_shader_module *module,
1569 const char *entrypoint,
1570 const VkSpecializationInfo *spec_info)
1571 {
1572 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1573 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1574 };
1575 int64_t pipeline_start = os_time_get_nano();
1576
1577 const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1578
1579 struct anv_pipeline_stage stage = {
1580 .stage = MESA_SHADER_COMPUTE,
1581 .module = module,
1582 .entrypoint = entrypoint,
1583 .spec_info = spec_info,
1584 .cache_key = {
1585 .stage = MESA_SHADER_COMPUTE,
1586 },
1587 .feedback = {
1588 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1589 },
1590 };
1591 anv_pipeline_hash_shader(stage.module,
1592 stage.entrypoint,
1593 MESA_SHADER_COMPUTE,
1594 stage.spec_info,
1595 stage.shader_sha1);
1596
1597 struct anv_shader_bin *bin = NULL;
1598
1599 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
1600 vk_find_struct_const(info->stage.pNext,
1601 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
1602
1603 populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags,
1604 rss_info, &stage.key.cs);
1605
1606 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1607
1608 const bool skip_cache_lookup =
1609 (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1610
1611 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1612
1613 bool cache_hit = false;
1614 if (!skip_cache_lookup) {
1615 bin = anv_device_search_for_kernel(pipeline->base.device, cache,
1616 &stage.cache_key,
1617 sizeof(stage.cache_key),
1618 &cache_hit);
1619 }
1620
1621 void *mem_ctx = ralloc_context(NULL);
1622 if (bin == NULL) {
1623 int64_t stage_start = os_time_get_nano();
1624
1625 stage.bind_map = (struct anv_pipeline_bind_map) {
1626 .surface_to_descriptor = stage.surface_to_descriptor,
1627 .sampler_to_descriptor = stage.sampler_to_descriptor
1628 };
1629
1630 /* Set up a binding for the gl_NumWorkGroups */
1631 stage.bind_map.surface_count = 1;
1632 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1633 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1634 };
1635
1636 stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
1637 if (stage.nir == NULL) {
1638 ralloc_free(mem_ctx);
1639 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1640 }
1641
1642 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
1643
1644 anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout);
1645
1646 NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
1647 nir_var_mem_shared, shared_type_info);
1648 NIR_PASS_V(stage.nir, nir_lower_explicit_io,
1649 nir_var_mem_shared, nir_address_format_32bit_offset);
1650
1651 stage.num_stats = 1;
1652 stage.code = brw_compile_cs(compiler, pipeline->base.device, mem_ctx,
1653 &stage.key.cs, &stage.prog_data.cs,
1654 stage.nir, -1, stage.stats, NULL);
1655 if (stage.code == NULL) {
1656 ralloc_free(mem_ctx);
1657 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1658 }
1659
1660 anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
1661
1662 if (!stage.prog_data.cs.uses_num_work_groups) {
1663 assert(stage.bind_map.surface_to_descriptor[0].set ==
1664 ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
1665 stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
1666 }
1667
1668 const unsigned code_size = stage.prog_data.base.program_size;
1669 bin = anv_device_upload_kernel(pipeline->base.device, cache,
1670 MESA_SHADER_COMPUTE,
1671 &stage.cache_key, sizeof(stage.cache_key),
1672 stage.code, code_size,
1673 stage.nir->constant_data,
1674 stage.nir->constant_data_size,
1675 &stage.prog_data.base,
1676 sizeof(stage.prog_data.cs),
1677 stage.stats, stage.num_stats,
1678 NULL, &stage.bind_map);
1679 if (!bin) {
1680 ralloc_free(mem_ctx);
1681 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1682 }
1683
1684 stage.feedback.duration = os_time_get_nano() - stage_start;
1685 }
1686
1687 anv_pipeline_add_executables(&pipeline->base, &stage, bin);
1688
1689 ralloc_free(mem_ctx);
1690
1691 if (cache_hit) {
1692 stage.feedback.flags |=
1693 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1694 pipeline_feedback.flags |=
1695 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1696 }
1697 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1698
1699 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1700 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1701 if (create_feedback) {
1702 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1703
1704 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1705 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1706 }
1707
1708 pipeline->cs = bin;
1709
1710 return VK_SUCCESS;
1711 }
1712
1713 uint32_t
1714 anv_cs_workgroup_size(const struct anv_compute_pipeline *pipeline)
1715 {
1716 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1717 return cs_prog_data->local_size[0] *
1718 cs_prog_data->local_size[1] *
1719 cs_prog_data->local_size[2];
1720 }
1721
1722 uint32_t
1723 anv_cs_threads(const struct anv_compute_pipeline *pipeline)
1724 {
1725 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1726 return DIV_ROUND_UP(anv_cs_workgroup_size(pipeline),
1727 cs_prog_data->simd_size);
1728 }
1729
1730 /**
1731 * Copy pipeline state not marked as dynamic.
1732 * Dynamic state is pipeline state which hasn't been provided at pipeline
1733 * creation time, but is dynamically provided afterwards using various
1734 * vkCmdSet* functions.
1735 *
1736 * The set of state considered "non_dynamic" is determined by the pieces of
1737 * state that have their corresponding VkDynamicState enums omitted from
1738 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1739 *
1740 * @param[out] pipeline Destination non_dynamic state.
1741 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1742 */
1743 static void
1744 copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline,
1745 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1746 {
1747 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1748 struct anv_subpass *subpass = pipeline->subpass;
1749
1750 pipeline->dynamic_state = default_dynamic_state;
1751
1752 if (pCreateInfo->pDynamicState) {
1753 /* Remove all of the states that are marked as dynamic */
1754 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1755 for (uint32_t s = 0; s < count; s++) {
1756 states &= ~anv_cmd_dirty_bit_for_vk_dynamic_state(
1757 pCreateInfo->pDynamicState->pDynamicStates[s]);
1758 }
1759 }
1760
1761 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1762
1763 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1764 *
1765 * pViewportState is [...] NULL if the pipeline
1766 * has rasterization disabled.
1767 */
1768 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1769 assert(pCreateInfo->pViewportState);
1770
1771 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1772 if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
1773 typed_memcpy(dynamic->viewport.viewports,
1774 pCreateInfo->pViewportState->pViewports,
1775 pCreateInfo->pViewportState->viewportCount);
1776 }
1777
1778 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1779 if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
1780 typed_memcpy(dynamic->scissor.scissors,
1781 pCreateInfo->pViewportState->pScissors,
1782 pCreateInfo->pViewportState->scissorCount);
1783 }
1784 }
1785
1786 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
1787 assert(pCreateInfo->pRasterizationState);
1788 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1789 }
1790
1791 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
1792 assert(pCreateInfo->pRasterizationState);
1793 dynamic->depth_bias.bias =
1794 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1795 dynamic->depth_bias.clamp =
1796 pCreateInfo->pRasterizationState->depthBiasClamp;
1797 dynamic->depth_bias.slope =
1798 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1799 }
1800
1801 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1802 *
1803 * pColorBlendState is [...] NULL if the pipeline has rasterization
1804 * disabled or if the subpass of the render pass the pipeline is
1805 * created against does not use any color attachments.
1806 */
1807 bool uses_color_att = false;
1808 for (unsigned i = 0; i < subpass->color_count; ++i) {
1809 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1810 uses_color_att = true;
1811 break;
1812 }
1813 }
1814
1815 if (uses_color_att &&
1816 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1817 assert(pCreateInfo->pColorBlendState);
1818
1819 if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1820 typed_memcpy(dynamic->blend_constants,
1821 pCreateInfo->pColorBlendState->blendConstants, 4);
1822 }
1823
1824 /* If there is no depthstencil attachment, then don't read
1825 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1826 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1827 * no need to override the depthstencil defaults in
1828 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1829 *
1830 * Section 9.2 of the Vulkan 1.0.15 spec says:
1831 *
1832 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1833 * disabled or if the subpass of the render pass the pipeline is created
1834 * against does not use a depth/stencil attachment.
1835 */
1836 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1837 subpass->depth_stencil_attachment) {
1838 assert(pCreateInfo->pDepthStencilState);
1839
1840 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
1841 dynamic->depth_bounds.min =
1842 pCreateInfo->pDepthStencilState->minDepthBounds;
1843 dynamic->depth_bounds.max =
1844 pCreateInfo->pDepthStencilState->maxDepthBounds;
1845 }
1846
1847 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
1848 dynamic->stencil_compare_mask.front =
1849 pCreateInfo->pDepthStencilState->front.compareMask;
1850 dynamic->stencil_compare_mask.back =
1851 pCreateInfo->pDepthStencilState->back.compareMask;
1852 }
1853
1854 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
1855 dynamic->stencil_write_mask.front =
1856 pCreateInfo->pDepthStencilState->front.writeMask;
1857 dynamic->stencil_write_mask.back =
1858 pCreateInfo->pDepthStencilState->back.writeMask;
1859 }
1860
1861 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
1862 dynamic->stencil_reference.front =
1863 pCreateInfo->pDepthStencilState->front.reference;
1864 dynamic->stencil_reference.back =
1865 pCreateInfo->pDepthStencilState->back.reference;
1866 }
1867 }
1868
1869 const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
1870 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1871 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
1872 if (line_state) {
1873 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
1874 dynamic->line_stipple.factor = line_state->lineStippleFactor;
1875 dynamic->line_stipple.pattern = line_state->lineStipplePattern;
1876 }
1877 }
1878
1879 pipeline->dynamic_state_mask = states;
1880 }
1881
1882 static void
1883 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1884 {
1885 #ifdef DEBUG
1886 struct anv_render_pass *renderpass = NULL;
1887 struct anv_subpass *subpass = NULL;
1888
1889 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1890 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1891 */
1892 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1893
1894 renderpass = anv_render_pass_from_handle(info->renderPass);
1895 assert(renderpass);
1896
1897 assert(info->subpass < renderpass->subpass_count);
1898 subpass = &renderpass->subpasses[info->subpass];
1899
1900 assert(info->stageCount >= 1);
1901 assert(info->pVertexInputState);
1902 assert(info->pInputAssemblyState);
1903 assert(info->pRasterizationState);
1904 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1905 assert(info->pViewportState);
1906 assert(info->pMultisampleState);
1907
1908 if (subpass && subpass->depth_stencil_attachment)
1909 assert(info->pDepthStencilState);
1910
1911 if (subpass && subpass->color_count > 0) {
1912 bool all_color_unused = true;
1913 for (int i = 0; i < subpass->color_count; i++) {
1914 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1915 all_color_unused = false;
1916 }
1917 /* pColorBlendState is ignored if the pipeline has rasterization
1918 * disabled or if the subpass of the render pass the pipeline is
1919 * created against does not use any color attachments.
1920 */
1921 assert(info->pColorBlendState || all_color_unused);
1922 }
1923 }
1924
1925 for (uint32_t i = 0; i < info->stageCount; ++i) {
1926 switch (info->pStages[i].stage) {
1927 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1928 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1929 assert(info->pTessellationState);
1930 break;
1931 default:
1932 break;
1933 }
1934 }
1935 #endif
1936 }
1937
1938 /**
1939 * Calculate the desired L3 partitioning based on the current state of the
1940 * pipeline. For now this simply returns the conservative defaults calculated
1941 * by get_default_l3_weights(), but we could probably do better by gathering
1942 * more statistics from the pipeline state (e.g. guess of expected URB usage
1943 * and bound surfaces), or by using feed-back from performance counters.
1944 */
1945 void
1946 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1947 {
1948 const struct gen_device_info *devinfo = &pipeline->device->info;
1949
1950 const struct gen_l3_weights w =
1951 gen_get_default_l3_weights(devinfo, true, needs_slm);
1952
1953 pipeline->l3_config = gen_get_l3_config(devinfo, w);
1954 }
1955
1956 VkResult
1957 anv_pipeline_init(struct anv_graphics_pipeline *pipeline,
1958 struct anv_device *device,
1959 struct anv_pipeline_cache *cache,
1960 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1961 const VkAllocationCallbacks *alloc)
1962 {
1963 VkResult result;
1964
1965 anv_pipeline_validate_create_info(pCreateInfo);
1966
1967 if (alloc == NULL)
1968 alloc = &device->alloc;
1969
1970 pipeline->base.device = device;
1971 pipeline->base.type = ANV_PIPELINE_GRAPHICS;
1972
1973 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1974 assert(pCreateInfo->subpass < render_pass->subpass_count);
1975 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1976
1977 result = anv_reloc_list_init(&pipeline->base.batch_relocs, alloc);
1978 if (result != VK_SUCCESS)
1979 return result;
1980
1981 pipeline->base.batch.alloc = alloc;
1982 pipeline->base.batch.next = pipeline->base.batch.start = pipeline->batch_data;
1983 pipeline->base.batch.end = pipeline->base.batch.start + sizeof(pipeline->batch_data);
1984 pipeline->base.batch.relocs = &pipeline->base.batch_relocs;
1985 pipeline->base.batch.status = VK_SUCCESS;
1986
1987 pipeline->base.mem_ctx = ralloc_context(NULL);
1988 pipeline->base.flags = pCreateInfo->flags;
1989
1990 assert(pCreateInfo->pRasterizationState);
1991
1992 copy_non_dynamic_state(pipeline, pCreateInfo);
1993 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
1994
1995 /* Previously we enabled depth clipping when !depthClampEnable.
1996 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1997 * clipping info is available, use its enable value to determine clipping,
1998 * otherwise fallback to the previous !depthClampEnable logic.
1999 */
2000 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
2001 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
2002 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
2003 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
2004
2005 pipeline->sample_shading_enable =
2006 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
2007 pCreateInfo->pMultisampleState &&
2008 pCreateInfo->pMultisampleState->sampleShadingEnable;
2009
2010 /* When we free the pipeline, we detect stages based on the NULL status
2011 * of various prog_data pointers. Make them NULL by default.
2012 */
2013 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
2014
2015 util_dynarray_init(&pipeline->base.executables, pipeline->base.mem_ctx);
2016
2017 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
2018 if (result != VK_SUCCESS) {
2019 ralloc_free(pipeline->base.mem_ctx);
2020 anv_reloc_list_finish(&pipeline->base.batch_relocs, alloc);
2021 return result;
2022 }
2023
2024 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
2025
2026 anv_pipeline_setup_l3_config(&pipeline->base, false);
2027
2028 const VkPipelineVertexInputStateCreateInfo *vi_info =
2029 pCreateInfo->pVertexInputState;
2030
2031 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
2032
2033 pipeline->vb_used = 0;
2034 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
2035 const VkVertexInputAttributeDescription *desc =
2036 &vi_info->pVertexAttributeDescriptions[i];
2037
2038 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
2039 pipeline->vb_used |= 1 << desc->binding;
2040 }
2041
2042 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
2043 const VkVertexInputBindingDescription *desc =
2044 &vi_info->pVertexBindingDescriptions[i];
2045
2046 pipeline->vb[desc->binding].stride = desc->stride;
2047
2048 /* Step rate is programmed per vertex element (attribute), not
2049 * binding. Set up a map of which bindings step per instance, for
2050 * reference by vertex element setup. */
2051 switch (desc->inputRate) {
2052 default:
2053 case VK_VERTEX_INPUT_RATE_VERTEX:
2054 pipeline->vb[desc->binding].instanced = false;
2055 break;
2056 case VK_VERTEX_INPUT_RATE_INSTANCE:
2057 pipeline->vb[desc->binding].instanced = true;
2058 break;
2059 }
2060
2061 pipeline->vb[desc->binding].instance_divisor = 1;
2062 }
2063
2064 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
2065 vk_find_struct_const(vi_info->pNext,
2066 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
2067 if (vi_div_state) {
2068 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
2069 const VkVertexInputBindingDivisorDescriptionEXT *desc =
2070 &vi_div_state->pVertexBindingDivisors[i];
2071
2072 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
2073 }
2074 }
2075
2076 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2077 * different views. If the client asks for instancing, we need to multiply
2078 * the instance divisor by the number of views ensure that we repeat the
2079 * client's per-instance data once for each view.
2080 */
2081 if (pipeline->subpass->view_mask && !pipeline->use_primitive_replication) {
2082 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
2083 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
2084 if (pipeline->vb[vb].instanced)
2085 pipeline->vb[vb].instance_divisor *= view_count;
2086 }
2087 }
2088
2089 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
2090 pCreateInfo->pInputAssemblyState;
2091 const VkPipelineTessellationStateCreateInfo *tess_info =
2092 pCreateInfo->pTessellationState;
2093 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
2094
2095 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
2096 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
2097 else
2098 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
2099
2100 return VK_SUCCESS;
2101 }
2102
2103 #define WRITE_STR(field, ...) ({ \
2104 memset(field, 0, sizeof(field)); \
2105 UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
2106 assert(i > 0 && i < sizeof(field)); \
2107 })
2108
2109 VkResult anv_GetPipelineExecutablePropertiesKHR(
2110 VkDevice device,
2111 const VkPipelineInfoKHR* pPipelineInfo,
2112 uint32_t* pExecutableCount,
2113 VkPipelineExecutablePropertiesKHR* pProperties)
2114 {
2115 ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
2116 VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
2117
2118 util_dynarray_foreach (&pipeline->executables, struct anv_pipeline_executable, exe) {
2119 vk_outarray_append(&out, props) {
2120 gl_shader_stage stage = exe->stage;
2121 props->stages = mesa_to_vk_shader_stage(stage);
2122
2123 unsigned simd_width = exe->stats.dispatch_width;
2124 if (stage == MESA_SHADER_FRAGMENT) {
2125 WRITE_STR(props->name, "%s%d %s",
2126 simd_width ? "SIMD" : "vec",
2127 simd_width ? simd_width : 4,
2128 _mesa_shader_stage_to_string(stage));
2129 } else {
2130 WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
2131 }
2132 WRITE_STR(props->description, "%s%d %s shader",
2133 simd_width ? "SIMD" : "vec",
2134 simd_width ? simd_width : 4,
2135 _mesa_shader_stage_to_string(stage));
2136
2137 /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
2138 * wants a subgroup size of 1.
2139 */
2140 props->subgroupSize = MAX2(simd_width, 1);
2141 }
2142 }
2143
2144 return vk_outarray_status(&out);
2145 }
2146
2147 static const struct anv_pipeline_executable *
2148 anv_pipeline_get_executable(struct anv_pipeline *pipeline, uint32_t index)
2149 {
2150 assert(index < util_dynarray_num_elements(&pipeline->executables,
2151 struct anv_pipeline_executable));
2152 return util_dynarray_element(
2153 &pipeline->executables, struct anv_pipeline_executable, index);
2154 }
2155
2156 VkResult anv_GetPipelineExecutableStatisticsKHR(
2157 VkDevice device,
2158 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2159 uint32_t* pStatisticCount,
2160 VkPipelineExecutableStatisticKHR* pStatistics)
2161 {
2162 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2163 VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
2164
2165 const struct anv_pipeline_executable *exe =
2166 anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
2167
2168 const struct brw_stage_prog_data *prog_data;
2169 switch (pipeline->type) {
2170 case ANV_PIPELINE_GRAPHICS: {
2171 prog_data = anv_pipeline_to_graphics(pipeline)->shaders[exe->stage]->prog_data;
2172 break;
2173 }
2174 case ANV_PIPELINE_COMPUTE: {
2175 prog_data = anv_pipeline_to_compute(pipeline)->cs->prog_data;
2176 break;
2177 }
2178 default:
2179 unreachable("invalid pipeline type");
2180 }
2181
2182 vk_outarray_append(&out, stat) {
2183 WRITE_STR(stat->name, "Instruction Count");
2184 WRITE_STR(stat->description,
2185 "Number of GEN instructions in the final generated "
2186 "shader executable.");
2187 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2188 stat->value.u64 = exe->stats.instructions;
2189 }
2190
2191 vk_outarray_append(&out, stat) {
2192 WRITE_STR(stat->name, "SEND Count");
2193 WRITE_STR(stat->description,
2194 "Number of instructions in the final generated shader "
2195 "executable which access external units such as the "
2196 "constant cache or the sampler.");
2197 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2198 stat->value.u64 = exe->stats.sends;
2199 }
2200
2201 vk_outarray_append(&out, stat) {
2202 WRITE_STR(stat->name, "Loop Count");
2203 WRITE_STR(stat->description,
2204 "Number of loops (not unrolled) in the final generated "
2205 "shader executable.");
2206 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2207 stat->value.u64 = exe->stats.loops;
2208 }
2209
2210 vk_outarray_append(&out, stat) {
2211 WRITE_STR(stat->name, "Cycle Count");
2212 WRITE_STR(stat->description,
2213 "Estimate of the number of EU cycles required to execute "
2214 "the final generated executable. This is an estimate only "
2215 "and may vary greatly from actual run-time performance.");
2216 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2217 stat->value.u64 = exe->stats.cycles;
2218 }
2219
2220 vk_outarray_append(&out, stat) {
2221 WRITE_STR(stat->name, "Spill Count");
2222 WRITE_STR(stat->description,
2223 "Number of scratch spill operations. This gives a rough "
2224 "estimate of the cost incurred due to spilling temporary "
2225 "values to memory. If this is non-zero, you may want to "
2226 "adjust your shader to reduce register pressure.");
2227 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2228 stat->value.u64 = exe->stats.spills;
2229 }
2230
2231 vk_outarray_append(&out, stat) {
2232 WRITE_STR(stat->name, "Fill Count");
2233 WRITE_STR(stat->description,
2234 "Number of scratch fill operations. This gives a rough "
2235 "estimate of the cost incurred due to spilling temporary "
2236 "values to memory. If this is non-zero, you may want to "
2237 "adjust your shader to reduce register pressure.");
2238 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2239 stat->value.u64 = exe->stats.fills;
2240 }
2241
2242 vk_outarray_append(&out, stat) {
2243 WRITE_STR(stat->name, "Scratch Memory Size");
2244 WRITE_STR(stat->description,
2245 "Number of bytes of scratch memory required by the "
2246 "generated shader executable. If this is non-zero, you "
2247 "may want to adjust your shader to reduce register "
2248 "pressure.");
2249 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2250 stat->value.u64 = prog_data->total_scratch;
2251 }
2252
2253 if (exe->stage == MESA_SHADER_COMPUTE) {
2254 vk_outarray_append(&out, stat) {
2255 WRITE_STR(stat->name, "Workgroup Memory Size");
2256 WRITE_STR(stat->description,
2257 "Number of bytes of workgroup shared memory used by this "
2258 "compute shader including any padding.");
2259 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2260 stat->value.u64 = brw_cs_prog_data_const(prog_data)->slm_size;
2261 }
2262 }
2263
2264 return vk_outarray_status(&out);
2265 }
2266
2267 static bool
2268 write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
2269 const char *data)
2270 {
2271 ir->isText = VK_TRUE;
2272
2273 size_t data_len = strlen(data) + 1;
2274
2275 if (ir->pData == NULL) {
2276 ir->dataSize = data_len;
2277 return true;
2278 }
2279
2280 strncpy(ir->pData, data, ir->dataSize);
2281 if (ir->dataSize < data_len)
2282 return false;
2283
2284 ir->dataSize = data_len;
2285 return true;
2286 }
2287
2288 VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
2289 VkDevice device,
2290 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2291 uint32_t* pInternalRepresentationCount,
2292 VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
2293 {
2294 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2295 VK_OUTARRAY_MAKE(out, pInternalRepresentations,
2296 pInternalRepresentationCount);
2297 bool incomplete_text = false;
2298
2299 const struct anv_pipeline_executable *exe =
2300 anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
2301
2302 if (exe->nir) {
2303 vk_outarray_append(&out, ir) {
2304 WRITE_STR(ir->name, "Final NIR");
2305 WRITE_STR(ir->description,
2306 "Final NIR before going into the back-end compiler");
2307
2308 if (!write_ir_text(ir, exe->nir))
2309 incomplete_text = true;
2310 }
2311 }
2312
2313 if (exe->disasm) {
2314 vk_outarray_append(&out, ir) {
2315 WRITE_STR(ir->name, "GEN Assembly");
2316 WRITE_STR(ir->description,
2317 "Final GEN assembly for the generated shader binary");
2318
2319 if (!write_ir_text(ir, exe->disasm))
2320 incomplete_text = true;
2321 }
2322 }
2323
2324 return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
2325 }