anv: Add anv_pipeline_init/finish helpers
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "common/gen_disasm.h"
34 #include "anv_private.h"
35 #include "compiler/brw_nir.h"
36 #include "anv_nir.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
39 #include "vk_util.h"
40
41 /* Needed for SWIZZLE macros */
42 #include "program/prog_instruction.h"
43
44 // Shader functions
45
46 VkResult anv_CreateShaderModule(
47 VkDevice _device,
48 const VkShaderModuleCreateInfo* pCreateInfo,
49 const VkAllocationCallbacks* pAllocator,
50 VkShaderModule* pShaderModule)
51 {
52 ANV_FROM_HANDLE(anv_device, device, _device);
53 struct anv_shader_module *module;
54
55 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
56 assert(pCreateInfo->flags == 0);
57
58 module = vk_alloc2(&device->vk.alloc, pAllocator,
59 sizeof(*module) + pCreateInfo->codeSize, 8,
60 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
61 if (module == NULL)
62 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
63
64 vk_object_base_init(&device->vk, &module->base,
65 VK_OBJECT_TYPE_SHADER_MODULE);
66 module->size = pCreateInfo->codeSize;
67 memcpy(module->data, pCreateInfo->pCode, module->size);
68
69 _mesa_sha1_compute(module->data, module->size, module->sha1);
70
71 *pShaderModule = anv_shader_module_to_handle(module);
72
73 return VK_SUCCESS;
74 }
75
76 void anv_DestroyShaderModule(
77 VkDevice _device,
78 VkShaderModule _module,
79 const VkAllocationCallbacks* pAllocator)
80 {
81 ANV_FROM_HANDLE(anv_device, device, _device);
82 ANV_FROM_HANDLE(anv_shader_module, module, _module);
83
84 if (!module)
85 return;
86
87 vk_object_base_finish(&module->base);
88 vk_free2(&device->vk.alloc, pAllocator, module);
89 }
90
91 #define SPIR_V_MAGIC_NUMBER 0x07230203
92
93 struct anv_spirv_debug_data {
94 struct anv_device *device;
95 const struct anv_shader_module *module;
96 };
97
98 static void anv_spirv_nir_debug(void *private_data,
99 enum nir_spirv_debug_level level,
100 size_t spirv_offset,
101 const char *message)
102 {
103 struct anv_spirv_debug_data *debug_data = private_data;
104 struct anv_instance *instance = debug_data->device->physical->instance;
105
106 static const VkDebugReportFlagsEXT vk_flags[] = {
107 [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
108 [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
109 [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
110 };
111 char buffer[256];
112
113 snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
114
115 vk_debug_report(&instance->debug_report_callbacks,
116 vk_flags[level],
117 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
118 (uint64_t) (uintptr_t) debug_data->module,
119 0, 0, "anv", buffer);
120 }
121
122 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
123 * we can't do that yet because we don't have the ability to copy nir.
124 */
125 static nir_shader *
126 anv_shader_compile_to_nir(struct anv_device *device,
127 void *mem_ctx,
128 const struct anv_shader_module *module,
129 const char *entrypoint_name,
130 gl_shader_stage stage,
131 const VkSpecializationInfo *spec_info)
132 {
133 const struct anv_physical_device *pdevice = device->physical;
134 const struct brw_compiler *compiler = pdevice->compiler;
135 const nir_shader_compiler_options *nir_options =
136 compiler->glsl_compiler_options[stage].NirOptions;
137
138 uint32_t *spirv = (uint32_t *) module->data;
139 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
140 assert(module->size % 4 == 0);
141
142 uint32_t num_spec_entries = 0;
143 struct nir_spirv_specialization *spec_entries = NULL;
144 if (spec_info && spec_info->mapEntryCount > 0) {
145 num_spec_entries = spec_info->mapEntryCount;
146 spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
147 for (uint32_t i = 0; i < num_spec_entries; i++) {
148 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
149 const void *data = spec_info->pData + entry.offset;
150 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
151
152 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
153 switch (entry.size) {
154 case 8:
155 spec_entries[i].value.u64 = *(const uint64_t *)data;
156 break;
157 case 4:
158 spec_entries[i].value.u32 = *(const uint32_t *)data;
159 break;
160 case 2:
161 spec_entries[i].value.u16 = *(const uint16_t *)data;
162 break;
163 case 1:
164 spec_entries[i].value.u8 = *(const uint8_t *)data;
165 break;
166 default:
167 assert(!"Invalid spec constant size");
168 break;
169 }
170 }
171 }
172
173 struct anv_spirv_debug_data spirv_debug_data = {
174 .device = device,
175 .module = module,
176 };
177 struct spirv_to_nir_options spirv_options = {
178 .frag_coord_is_sysval = true,
179 .caps = {
180 .demote_to_helper_invocation = true,
181 .derivative_group = true,
182 .descriptor_array_dynamic_indexing = true,
183 .descriptor_array_non_uniform_indexing = true,
184 .descriptor_indexing = true,
185 .device_group = true,
186 .draw_parameters = true,
187 .float16 = pdevice->info.gen >= 8,
188 .float64 = pdevice->info.gen >= 8,
189 .fragment_shader_sample_interlock = pdevice->info.gen >= 9,
190 .fragment_shader_pixel_interlock = pdevice->info.gen >= 9,
191 .geometry_streams = true,
192 .image_write_without_format = true,
193 .int8 = pdevice->info.gen >= 8,
194 .int16 = pdevice->info.gen >= 8,
195 .int64 = pdevice->info.gen >= 8,
196 .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
197 .integer_functions2 = pdevice->info.gen >= 8,
198 .min_lod = true,
199 .multiview = true,
200 .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
201 .post_depth_coverage = pdevice->info.gen >= 9,
202 .runtime_descriptor_array = true,
203 .float_controls = pdevice->info.gen >= 8,
204 .shader_clock = true,
205 .shader_viewport_index_layer = true,
206 .stencil_export = pdevice->info.gen >= 9,
207 .storage_8bit = pdevice->info.gen >= 8,
208 .storage_16bit = pdevice->info.gen >= 8,
209 .subgroup_arithmetic = true,
210 .subgroup_basic = true,
211 .subgroup_ballot = true,
212 .subgroup_quad = true,
213 .subgroup_shuffle = true,
214 .subgroup_vote = true,
215 .tessellation = true,
216 .transform_feedback = pdevice->info.gen >= 8,
217 .variable_pointers = true,
218 .vk_memory_model = true,
219 .vk_memory_model_device_scope = true,
220 },
221 .ubo_addr_format = nir_address_format_32bit_index_offset,
222 .ssbo_addr_format =
223 anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
224 .phys_ssbo_addr_format = nir_address_format_64bit_global,
225 .push_const_addr_format = nir_address_format_logical,
226
227 /* TODO: Consider changing this to an address format that has the NULL
228 * pointer equals to 0. That might be a better format to play nice
229 * with certain code / code generators.
230 */
231 .shared_addr_format = nir_address_format_32bit_offset,
232 .debug = {
233 .func = anv_spirv_nir_debug,
234 .private_data = &spirv_debug_data,
235 },
236 };
237
238
239 nir_shader *nir =
240 spirv_to_nir(spirv, module->size / 4,
241 spec_entries, num_spec_entries,
242 stage, entrypoint_name, &spirv_options, nir_options);
243 assert(nir->info.stage == stage);
244 nir_validate_shader(nir, "after spirv_to_nir");
245 ralloc_steal(mem_ctx, nir);
246
247 free(spec_entries);
248
249 if (unlikely(INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage))) {
250 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
251 gl_shader_stage_name(stage));
252 nir_print_shader(nir, stderr);
253 }
254
255 /* We have to lower away local constant initializers right before we
256 * inline functions. That way they get properly initialized at the top
257 * of the function and not at the top of its caller.
258 */
259 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
260 NIR_PASS_V(nir, nir_lower_returns);
261 NIR_PASS_V(nir, nir_inline_functions);
262 NIR_PASS_V(nir, nir_opt_deref);
263
264 /* Pick off the single entrypoint that we want */
265 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
266 if (!func->is_entrypoint)
267 exec_node_remove(&func->node);
268 }
269 assert(exec_list_length(&nir->functions) == 1);
270
271 /* Now that we've deleted all but the main function, we can go ahead and
272 * lower the rest of the constant initializers. We do this here so that
273 * nir_remove_dead_variables and split_per_member_structs below see the
274 * corresponding stores.
275 */
276 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
277
278 /* Split member structs. We do this before lower_io_to_temporaries so that
279 * it doesn't lower system values to temporaries by accident.
280 */
281 NIR_PASS_V(nir, nir_split_var_copies);
282 NIR_PASS_V(nir, nir_split_per_member_structs);
283
284 NIR_PASS_V(nir, nir_remove_dead_variables,
285 nir_var_shader_in | nir_var_shader_out | nir_var_system_value,
286 NULL);
287
288 NIR_PASS_V(nir, nir_propagate_invariant);
289 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
290 nir_shader_get_entrypoint(nir), true, false);
291
292 NIR_PASS_V(nir, nir_lower_frexp);
293
294 /* Vulkan uses the separate-shader linking model */
295 nir->info.separate_shader = true;
296
297 brw_preprocess_nir(compiler, nir, NULL);
298
299 return nir;
300 }
301
302 VkResult
303 anv_pipeline_init(struct anv_pipeline *pipeline,
304 struct anv_device *device,
305 enum anv_pipeline_type type,
306 VkPipelineCreateFlags flags,
307 const VkAllocationCallbacks *pAllocator)
308 {
309 VkResult result;
310
311 memset(pipeline, 0, sizeof(*pipeline));
312
313 vk_object_base_init(&device->vk, &pipeline->base,
314 VK_OBJECT_TYPE_PIPELINE);
315 pipeline->device = device;
316
317 /* It's the job of the child class to provide actual backing storage for
318 * the batch by setting batch.start, batch.next, and batch.end.
319 */
320 pipeline->batch.alloc = pAllocator ? pAllocator : &device->vk.alloc;
321 pipeline->batch.relocs = &pipeline->batch_relocs;
322 pipeline->batch.status = VK_SUCCESS;
323
324 result = anv_reloc_list_init(&pipeline->batch_relocs,
325 pipeline->batch.alloc);
326 if (result != VK_SUCCESS)
327 return result;
328
329 pipeline->mem_ctx = ralloc_context(NULL);
330
331 pipeline->type = type;
332 pipeline->flags = flags;
333
334 util_dynarray_init(&pipeline->executables, pipeline->mem_ctx);
335
336 return VK_SUCCESS;
337 }
338
339 void
340 anv_pipeline_finish(struct anv_pipeline *pipeline,
341 struct anv_device *device,
342 const VkAllocationCallbacks *pAllocator)
343 {
344 anv_reloc_list_finish(&pipeline->batch_relocs,
345 pAllocator ? pAllocator : &device->vk.alloc);
346 ralloc_free(pipeline->mem_ctx);
347 vk_object_base_finish(&pipeline->base);
348 }
349
350 void anv_DestroyPipeline(
351 VkDevice _device,
352 VkPipeline _pipeline,
353 const VkAllocationCallbacks* pAllocator)
354 {
355 ANV_FROM_HANDLE(anv_device, device, _device);
356 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
357
358 if (!pipeline)
359 return;
360
361 switch (pipeline->type) {
362 case ANV_PIPELINE_GRAPHICS: {
363 struct anv_graphics_pipeline *gfx_pipeline =
364 anv_pipeline_to_graphics(pipeline);
365
366 if (gfx_pipeline->blend_state.map)
367 anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->blend_state);
368
369 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
370 if (gfx_pipeline->shaders[s])
371 anv_shader_bin_unref(device, gfx_pipeline->shaders[s]);
372 }
373 break;
374 }
375
376 case ANV_PIPELINE_COMPUTE: {
377 struct anv_compute_pipeline *compute_pipeline =
378 anv_pipeline_to_compute(pipeline);
379
380 if (compute_pipeline->cs)
381 anv_shader_bin_unref(device, compute_pipeline->cs);
382
383 break;
384 }
385
386 default:
387 unreachable("invalid pipeline type");
388 }
389
390 anv_pipeline_finish(pipeline, device, pAllocator);
391 vk_free2(&device->vk.alloc, pAllocator, pipeline);
392 }
393
394 static const uint32_t vk_to_gen_primitive_type[] = {
395 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
396 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
397 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
398 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
399 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
400 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
401 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
402 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
403 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
404 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
405 };
406
407 static void
408 populate_sampler_prog_key(const struct gen_device_info *devinfo,
409 struct brw_sampler_prog_key_data *key)
410 {
411 /* Almost all multisampled textures are compressed. The only time when we
412 * don't compress a multisampled texture is for 16x MSAA with a surface
413 * width greater than 8k which is a bit of an edge case. Since the sampler
414 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
415 * to tell the compiler to always assume compression.
416 */
417 key->compressed_multisample_layout_mask = ~0;
418
419 /* SkyLake added support for 16x MSAA. With this came a new message for
420 * reading from a 16x MSAA surface with compression. The new message was
421 * needed because now the MCS data is 64 bits instead of 32 or lower as is
422 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
423 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
424 * so we can just use it unconditionally. This may not be quite as
425 * efficient but it saves us from recompiling.
426 */
427 if (devinfo->gen >= 9)
428 key->msaa_16 = ~0;
429
430 /* XXX: Handle texture swizzle on HSW- */
431 for (int i = 0; i < MAX_SAMPLERS; i++) {
432 /* Assume color sampler, no swizzling. (Works for BDW+) */
433 key->swizzles[i] = SWIZZLE_XYZW;
434 }
435 }
436
437 static void
438 populate_base_prog_key(const struct gen_device_info *devinfo,
439 VkPipelineShaderStageCreateFlags flags,
440 struct brw_base_prog_key *key)
441 {
442 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
443 key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
444 else
445 key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
446
447 populate_sampler_prog_key(devinfo, &key->tex);
448 }
449
450 static void
451 populate_vs_prog_key(const struct gen_device_info *devinfo,
452 VkPipelineShaderStageCreateFlags flags,
453 struct brw_vs_prog_key *key)
454 {
455 memset(key, 0, sizeof(*key));
456
457 populate_base_prog_key(devinfo, flags, &key->base);
458
459 /* XXX: Handle vertex input work-arounds */
460
461 /* XXX: Handle sampler_prog_key */
462 }
463
464 static void
465 populate_tcs_prog_key(const struct gen_device_info *devinfo,
466 VkPipelineShaderStageCreateFlags flags,
467 unsigned input_vertices,
468 struct brw_tcs_prog_key *key)
469 {
470 memset(key, 0, sizeof(*key));
471
472 populate_base_prog_key(devinfo, flags, &key->base);
473
474 key->input_vertices = input_vertices;
475 }
476
477 static void
478 populate_tes_prog_key(const struct gen_device_info *devinfo,
479 VkPipelineShaderStageCreateFlags flags,
480 struct brw_tes_prog_key *key)
481 {
482 memset(key, 0, sizeof(*key));
483
484 populate_base_prog_key(devinfo, flags, &key->base);
485 }
486
487 static void
488 populate_gs_prog_key(const struct gen_device_info *devinfo,
489 VkPipelineShaderStageCreateFlags flags,
490 struct brw_gs_prog_key *key)
491 {
492 memset(key, 0, sizeof(*key));
493
494 populate_base_prog_key(devinfo, flags, &key->base);
495 }
496
497 static void
498 populate_wm_prog_key(const struct gen_device_info *devinfo,
499 VkPipelineShaderStageCreateFlags flags,
500 const struct anv_subpass *subpass,
501 const VkPipelineMultisampleStateCreateInfo *ms_info,
502 struct brw_wm_prog_key *key)
503 {
504 memset(key, 0, sizeof(*key));
505
506 populate_base_prog_key(devinfo, flags, &key->base);
507
508 /* We set this to 0 here and set to the actual value before we call
509 * brw_compile_fs.
510 */
511 key->input_slots_valid = 0;
512
513 /* Vulkan doesn't specify a default */
514 key->high_quality_derivatives = false;
515
516 /* XXX Vulkan doesn't appear to specify */
517 key->clamp_fragment_color = false;
518
519 assert(subpass->color_count <= MAX_RTS);
520 for (uint32_t i = 0; i < subpass->color_count; i++) {
521 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
522 key->color_outputs_valid |= (1 << i);
523 }
524
525 key->nr_color_regions = subpass->color_count;
526
527 /* To reduce possible shader recompilations we would need to know if
528 * there is a SampleMask output variable to compute if we should emit
529 * code to workaround the issue that hardware disables alpha to coverage
530 * when there is SampleMask output.
531 */
532 key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
533
534 /* Vulkan doesn't support fixed-function alpha test */
535 key->alpha_test_replicate_alpha = false;
536
537 if (ms_info) {
538 /* We should probably pull this out of the shader, but it's fairly
539 * harmless to compute it and then let dead-code take care of it.
540 */
541 if (ms_info->rasterizationSamples > 1) {
542 key->persample_interp = ms_info->sampleShadingEnable &&
543 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
544 key->multisample_fbo = true;
545 }
546
547 key->frag_coord_adds_sample_pos = key->persample_interp;
548 }
549 }
550
551 static void
552 populate_cs_prog_key(const struct gen_device_info *devinfo,
553 VkPipelineShaderStageCreateFlags flags,
554 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
555 struct brw_cs_prog_key *key)
556 {
557 memset(key, 0, sizeof(*key));
558
559 populate_base_prog_key(devinfo, flags, &key->base);
560
561 if (rss_info) {
562 assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
563
564 /* These enum values are expressly chosen to be equal to the subgroup
565 * size that they require.
566 */
567 assert(rss_info->requiredSubgroupSize == 8 ||
568 rss_info->requiredSubgroupSize == 16 ||
569 rss_info->requiredSubgroupSize == 32);
570 key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
571 } else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
572 /* If the client expressly requests full subgroups and they don't
573 * specify a subgroup size, we need to pick one. If they're requested
574 * varying subgroup sizes, we set it to UNIFORM and let the back-end
575 * compiler pick. Otherwise, we specify the API value of 32.
576 * Performance will likely be terrible in this case but there's nothing
577 * we can do about that. The client should have chosen a size.
578 */
579 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
580 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
581 else
582 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
583 }
584 }
585
586 struct anv_pipeline_stage {
587 gl_shader_stage stage;
588
589 const struct anv_shader_module *module;
590 const char *entrypoint;
591 const VkSpecializationInfo *spec_info;
592
593 unsigned char shader_sha1[20];
594
595 union brw_any_prog_key key;
596
597 struct {
598 gl_shader_stage stage;
599 unsigned char sha1[20];
600 } cache_key;
601
602 nir_shader *nir;
603
604 struct anv_pipeline_binding surface_to_descriptor[256];
605 struct anv_pipeline_binding sampler_to_descriptor[256];
606 struct anv_pipeline_bind_map bind_map;
607
608 union brw_any_prog_data prog_data;
609
610 uint32_t num_stats;
611 struct brw_compile_stats stats[3];
612 char *disasm[3];
613
614 VkPipelineCreationFeedbackEXT feedback;
615
616 const unsigned *code;
617 };
618
619 static void
620 anv_pipeline_hash_shader(const struct anv_shader_module *module,
621 const char *entrypoint,
622 gl_shader_stage stage,
623 const VkSpecializationInfo *spec_info,
624 unsigned char *sha1_out)
625 {
626 struct mesa_sha1 ctx;
627 _mesa_sha1_init(&ctx);
628
629 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
630 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
631 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
632 if (spec_info) {
633 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
634 spec_info->mapEntryCount *
635 sizeof(*spec_info->pMapEntries));
636 _mesa_sha1_update(&ctx, spec_info->pData,
637 spec_info->dataSize);
638 }
639
640 _mesa_sha1_final(&ctx, sha1_out);
641 }
642
643 static void
644 anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
645 struct anv_pipeline_layout *layout,
646 struct anv_pipeline_stage *stages,
647 unsigned char *sha1_out)
648 {
649 struct mesa_sha1 ctx;
650 _mesa_sha1_init(&ctx);
651
652 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
653 sizeof(pipeline->subpass->view_mask));
654
655 if (layout)
656 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
657
658 const bool rba = pipeline->base.device->robust_buffer_access;
659 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
660
661 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
662 if (stages[s].entrypoint) {
663 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
664 sizeof(stages[s].shader_sha1));
665 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
666 }
667 }
668
669 _mesa_sha1_final(&ctx, sha1_out);
670 }
671
672 static void
673 anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
674 struct anv_pipeline_layout *layout,
675 struct anv_pipeline_stage *stage,
676 unsigned char *sha1_out)
677 {
678 struct mesa_sha1 ctx;
679 _mesa_sha1_init(&ctx);
680
681 if (layout)
682 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
683
684 const bool rba = pipeline->base.device->robust_buffer_access;
685 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
686
687 _mesa_sha1_update(&ctx, stage->shader_sha1,
688 sizeof(stage->shader_sha1));
689 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
690
691 _mesa_sha1_final(&ctx, sha1_out);
692 }
693
694 static nir_shader *
695 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
696 struct anv_pipeline_cache *cache,
697 void *mem_ctx,
698 struct anv_pipeline_stage *stage)
699 {
700 const struct brw_compiler *compiler =
701 pipeline->device->physical->compiler;
702 const nir_shader_compiler_options *nir_options =
703 compiler->glsl_compiler_options[stage->stage].NirOptions;
704 nir_shader *nir;
705
706 nir = anv_device_search_for_nir(pipeline->device, cache,
707 nir_options,
708 stage->shader_sha1,
709 mem_ctx);
710 if (nir) {
711 assert(nir->info.stage == stage->stage);
712 return nir;
713 }
714
715 nir = anv_shader_compile_to_nir(pipeline->device,
716 mem_ctx,
717 stage->module,
718 stage->entrypoint,
719 stage->stage,
720 stage->spec_info);
721 if (nir) {
722 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
723 return nir;
724 }
725
726 return NULL;
727 }
728
729 static void
730 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
731 void *mem_ctx,
732 struct anv_pipeline_stage *stage,
733 struct anv_pipeline_layout *layout)
734 {
735 const struct anv_physical_device *pdevice = pipeline->device->physical;
736 const struct brw_compiler *compiler = pdevice->compiler;
737
738 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
739 nir_shader *nir = stage->nir;
740
741 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
742 NIR_PASS_V(nir, nir_lower_wpos_center,
743 anv_pipeline_to_graphics(pipeline)->sample_shading_enable);
744 NIR_PASS_V(nir, nir_lower_input_attachments, true);
745 }
746
747 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
748
749 if (pipeline->type == ANV_PIPELINE_GRAPHICS) {
750 NIR_PASS_V(nir, anv_nir_lower_multiview,
751 anv_pipeline_to_graphics(pipeline));
752 }
753
754 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
755
756 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo, NULL);
757
758 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
759 nir_address_format_64bit_global);
760
761 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
762 anv_nir_apply_pipeline_layout(pdevice,
763 pipeline->device->robust_buffer_access,
764 layout, nir, &stage->bind_map);
765
766 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
767 nir_address_format_32bit_index_offset);
768 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
769 anv_nir_ssbo_addr_format(pdevice,
770 pipeline->device->robust_buffer_access));
771
772 NIR_PASS_V(nir, nir_opt_constant_folding);
773
774 /* We don't support non-uniform UBOs and non-uniform SSBO access is
775 * handled naturally by falling back to A64 messages.
776 */
777 NIR_PASS_V(nir, nir_lower_non_uniform_access,
778 nir_lower_non_uniform_texture_access |
779 nir_lower_non_uniform_image_access);
780
781 anv_nir_compute_push_layout(pdevice, pipeline->device->robust_buffer_access,
782 nir, prog_data, &stage->bind_map, mem_ctx);
783
784 stage->nir = nir;
785 }
786
787 static void
788 anv_pipeline_link_vs(const struct brw_compiler *compiler,
789 struct anv_pipeline_stage *vs_stage,
790 struct anv_pipeline_stage *next_stage)
791 {
792 if (next_stage)
793 brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
794 }
795
796 static void
797 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
798 void *mem_ctx,
799 struct anv_graphics_pipeline *pipeline,
800 struct anv_pipeline_stage *vs_stage)
801 {
802 /* When using Primitive Replication for multiview, each view gets its own
803 * position slot.
804 */
805 uint32_t pos_slots = pipeline->use_primitive_replication ?
806 anv_subpass_view_count(pipeline->subpass) : 1;
807
808 brw_compute_vue_map(compiler->devinfo,
809 &vs_stage->prog_data.vs.base.vue_map,
810 vs_stage->nir->info.outputs_written,
811 vs_stage->nir->info.separate_shader,
812 pos_slots);
813
814 vs_stage->num_stats = 1;
815 vs_stage->code = brw_compile_vs(compiler, pipeline->base.device, mem_ctx,
816 &vs_stage->key.vs,
817 &vs_stage->prog_data.vs,
818 vs_stage->nir, -1,
819 vs_stage->stats, NULL);
820 }
821
822 static void
823 merge_tess_info(struct shader_info *tes_info,
824 const struct shader_info *tcs_info)
825 {
826 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
827 *
828 * "PointMode. Controls generation of points rather than triangles
829 * or lines. This functionality defaults to disabled, and is
830 * enabled if either shader stage includes the execution mode.
831 *
832 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
833 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
834 * and OutputVertices, it says:
835 *
836 * "One mode must be set in at least one of the tessellation
837 * shader stages."
838 *
839 * So, the fields can be set in either the TCS or TES, but they must
840 * agree if set in both. Our backend looks at TES, so bitwise-or in
841 * the values from the TCS.
842 */
843 assert(tcs_info->tess.tcs_vertices_out == 0 ||
844 tes_info->tess.tcs_vertices_out == 0 ||
845 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
846 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
847
848 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
849 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
850 tcs_info->tess.spacing == tes_info->tess.spacing);
851 tes_info->tess.spacing |= tcs_info->tess.spacing;
852
853 assert(tcs_info->tess.primitive_mode == 0 ||
854 tes_info->tess.primitive_mode == 0 ||
855 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
856 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
857 tes_info->tess.ccw |= tcs_info->tess.ccw;
858 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
859 }
860
861 static void
862 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
863 struct anv_pipeline_stage *tcs_stage,
864 struct anv_pipeline_stage *tes_stage)
865 {
866 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
867
868 brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
869
870 nir_lower_patch_vertices(tes_stage->nir,
871 tcs_stage->nir->info.tess.tcs_vertices_out,
872 NULL);
873
874 /* Copy TCS info into the TES info */
875 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
876
877 /* Whacking the key after cache lookup is a bit sketchy, but all of
878 * this comes from the SPIR-V, which is part of the hash used for the
879 * pipeline cache. So it should be safe.
880 */
881 tcs_stage->key.tcs.tes_primitive_mode =
882 tes_stage->nir->info.tess.primitive_mode;
883 tcs_stage->key.tcs.quads_workaround =
884 compiler->devinfo->gen < 9 &&
885 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
886 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
887 }
888
889 static void
890 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
891 void *mem_ctx,
892 struct anv_device *device,
893 struct anv_pipeline_stage *tcs_stage,
894 struct anv_pipeline_stage *prev_stage)
895 {
896 tcs_stage->key.tcs.outputs_written =
897 tcs_stage->nir->info.outputs_written;
898 tcs_stage->key.tcs.patch_outputs_written =
899 tcs_stage->nir->info.patch_outputs_written;
900
901 tcs_stage->num_stats = 1;
902 tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
903 &tcs_stage->key.tcs,
904 &tcs_stage->prog_data.tcs,
905 tcs_stage->nir, -1,
906 tcs_stage->stats, NULL);
907 }
908
909 static void
910 anv_pipeline_link_tes(const struct brw_compiler *compiler,
911 struct anv_pipeline_stage *tes_stage,
912 struct anv_pipeline_stage *next_stage)
913 {
914 if (next_stage)
915 brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
916 }
917
918 static void
919 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
920 void *mem_ctx,
921 struct anv_device *device,
922 struct anv_pipeline_stage *tes_stage,
923 struct anv_pipeline_stage *tcs_stage)
924 {
925 tes_stage->key.tes.inputs_read =
926 tcs_stage->nir->info.outputs_written;
927 tes_stage->key.tes.patch_inputs_read =
928 tcs_stage->nir->info.patch_outputs_written;
929
930 tes_stage->num_stats = 1;
931 tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
932 &tes_stage->key.tes,
933 &tcs_stage->prog_data.tcs.base.vue_map,
934 &tes_stage->prog_data.tes,
935 tes_stage->nir, -1,
936 tes_stage->stats, NULL);
937 }
938
939 static void
940 anv_pipeline_link_gs(const struct brw_compiler *compiler,
941 struct anv_pipeline_stage *gs_stage,
942 struct anv_pipeline_stage *next_stage)
943 {
944 if (next_stage)
945 brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
946 }
947
948 static void
949 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
950 void *mem_ctx,
951 struct anv_device *device,
952 struct anv_pipeline_stage *gs_stage,
953 struct anv_pipeline_stage *prev_stage)
954 {
955 brw_compute_vue_map(compiler->devinfo,
956 &gs_stage->prog_data.gs.base.vue_map,
957 gs_stage->nir->info.outputs_written,
958 gs_stage->nir->info.separate_shader, 1);
959
960 gs_stage->num_stats = 1;
961 gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
962 &gs_stage->key.gs,
963 &gs_stage->prog_data.gs,
964 gs_stage->nir, NULL, -1,
965 gs_stage->stats, NULL);
966 }
967
968 static void
969 anv_pipeline_link_fs(const struct brw_compiler *compiler,
970 struct anv_pipeline_stage *stage)
971 {
972 unsigned num_rt_bindings;
973 struct anv_pipeline_binding rt_bindings[MAX_RTS];
974 if (stage->key.wm.nr_color_regions > 0) {
975 assert(stage->key.wm.nr_color_regions <= MAX_RTS);
976 for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
977 if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
978 rt_bindings[rt] = (struct anv_pipeline_binding) {
979 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
980 .index = rt,
981 };
982 } else {
983 /* Setup a null render target */
984 rt_bindings[rt] = (struct anv_pipeline_binding) {
985 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
986 .index = UINT32_MAX,
987 };
988 }
989 }
990 num_rt_bindings = stage->key.wm.nr_color_regions;
991 } else {
992 /* Setup a null render target */
993 rt_bindings[0] = (struct anv_pipeline_binding) {
994 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
995 .index = UINT32_MAX,
996 };
997 num_rt_bindings = 1;
998 }
999
1000 assert(num_rt_bindings <= MAX_RTS);
1001 assert(stage->bind_map.surface_count == 0);
1002 typed_memcpy(stage->bind_map.surface_to_descriptor,
1003 rt_bindings, num_rt_bindings);
1004 stage->bind_map.surface_count += num_rt_bindings;
1005
1006 /* Now that we've set up the color attachments, we can go through and
1007 * eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
1008 * hopes that dead code can clean them up in this and any earlier shader
1009 * stages.
1010 */
1011 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
1012 bool deleted_output = false;
1013 nir_foreach_variable_safe(var, &stage->nir->outputs) {
1014 /* TODO: We don't delete depth/stencil writes. We probably could if the
1015 * subpass doesn't have a depth/stencil attachment.
1016 */
1017 if (var->data.location < FRAG_RESULT_DATA0)
1018 continue;
1019
1020 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
1021
1022 /* If this is the RT at location 0 and we have alpha to coverage
1023 * enabled we still need that write because it will affect the coverage
1024 * mask even if it's never written to a color target.
1025 */
1026 if (rt == 0 && stage->key.wm.alpha_to_coverage)
1027 continue;
1028
1029 const unsigned array_len =
1030 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
1031 assert(rt + array_len <= MAX_RTS);
1032
1033 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
1034 BITFIELD_RANGE(rt, array_len))) {
1035 deleted_output = true;
1036 var->data.mode = nir_var_function_temp;
1037 exec_node_remove(&var->node);
1038 exec_list_push_tail(&impl->locals, &var->node);
1039 }
1040 }
1041
1042 if (deleted_output)
1043 nir_fixup_deref_modes(stage->nir);
1044
1045 /* We stored the number of subpass color attachments in nr_color_regions
1046 * when calculating the key for caching. Now that we've computed the bind
1047 * map, we can reduce this to the actual max before we go into the back-end
1048 * compiler.
1049 */
1050 stage->key.wm.nr_color_regions =
1051 util_last_bit(stage->key.wm.color_outputs_valid);
1052 }
1053
1054 static void
1055 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
1056 void *mem_ctx,
1057 struct anv_device *device,
1058 struct anv_pipeline_stage *fs_stage,
1059 struct anv_pipeline_stage *prev_stage)
1060 {
1061 /* TODO: we could set this to 0 based on the information in nir_shader, but
1062 * we need this before we call spirv_to_nir.
1063 */
1064 assert(prev_stage);
1065 fs_stage->key.wm.input_slots_valid =
1066 prev_stage->prog_data.vue.vue_map.slots_valid;
1067
1068 fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
1069 &fs_stage->key.wm,
1070 &fs_stage->prog_data.wm,
1071 fs_stage->nir, -1, -1, -1,
1072 true, false, NULL,
1073 fs_stage->stats, NULL);
1074
1075 fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
1076 (uint32_t)fs_stage->prog_data.wm.dispatch_16 +
1077 (uint32_t)fs_stage->prog_data.wm.dispatch_32;
1078
1079 if (fs_stage->key.wm.color_outputs_valid == 0 &&
1080 !fs_stage->prog_data.wm.has_side_effects &&
1081 !fs_stage->prog_data.wm.uses_omask &&
1082 !fs_stage->key.wm.alpha_to_coverage &&
1083 !fs_stage->prog_data.wm.uses_kill &&
1084 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
1085 !fs_stage->prog_data.wm.computed_stencil) {
1086 /* This fragment shader has no outputs and no side effects. Go ahead
1087 * and return the code pointer so we don't accidentally think the
1088 * compile failed but zero out prog_data which will set program_size to
1089 * zero and disable the stage.
1090 */
1091 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
1092 }
1093 }
1094
1095 static void
1096 anv_pipeline_add_executable(struct anv_pipeline *pipeline,
1097 struct anv_pipeline_stage *stage,
1098 struct brw_compile_stats *stats,
1099 uint32_t code_offset)
1100 {
1101 char *nir = NULL;
1102 if (stage->nir &&
1103 (pipeline->flags &
1104 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1105 char *stream_data = NULL;
1106 size_t stream_size = 0;
1107 FILE *stream = open_memstream(&stream_data, &stream_size);
1108
1109 nir_print_shader(stage->nir, stream);
1110
1111 fclose(stream);
1112
1113 /* Copy it to a ralloc'd thing */
1114 nir = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1115 memcpy(nir, stream_data, stream_size);
1116 nir[stream_size] = 0;
1117
1118 free(stream_data);
1119 }
1120
1121 char *disasm = NULL;
1122 if (stage->code &&
1123 (pipeline->flags &
1124 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1125 char *stream_data = NULL;
1126 size_t stream_size = 0;
1127 FILE *stream = open_memstream(&stream_data, &stream_size);
1128
1129 uint32_t push_size = 0;
1130 for (unsigned i = 0; i < 4; i++)
1131 push_size += stage->bind_map.push_ranges[i].length;
1132 if (push_size > 0) {
1133 fprintf(stream, "Push constant ranges:\n");
1134 for (unsigned i = 0; i < 4; i++) {
1135 if (stage->bind_map.push_ranges[i].length == 0)
1136 continue;
1137
1138 fprintf(stream, " RANGE%d (%dB): ", i,
1139 stage->bind_map.push_ranges[i].length * 32);
1140
1141 switch (stage->bind_map.push_ranges[i].set) {
1142 case ANV_DESCRIPTOR_SET_NULL:
1143 fprintf(stream, "NULL");
1144 break;
1145
1146 case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
1147 fprintf(stream, "Vulkan push constants and API params");
1148 break;
1149
1150 case ANV_DESCRIPTOR_SET_DESCRIPTORS:
1151 fprintf(stream, "Descriptor buffer for set %d (start=%dB)",
1152 stage->bind_map.push_ranges[i].index,
1153 stage->bind_map.push_ranges[i].start * 32);
1154 break;
1155
1156 case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS:
1157 unreachable("gl_NumWorkgroups is never pushed");
1158
1159 case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
1160 fprintf(stream, "Inline shader constant data (start=%dB)",
1161 stage->bind_map.push_ranges[i].start * 32);
1162 break;
1163
1164 case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
1165 unreachable("Color attachments can't be pushed");
1166
1167 default:
1168 fprintf(stream, "UBO (set=%d binding=%d start=%dB)",
1169 stage->bind_map.push_ranges[i].set,
1170 stage->bind_map.push_ranges[i].index,
1171 stage->bind_map.push_ranges[i].start * 32);
1172 break;
1173 }
1174 fprintf(stream, "\n");
1175 }
1176 fprintf(stream, "\n");
1177 }
1178
1179 /* Creating this is far cheaper than it looks. It's perfectly fine to
1180 * do it for every binary.
1181 */
1182 struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
1183 gen_disasm_disassemble(d, stage->code, code_offset, stream);
1184 gen_disasm_destroy(d);
1185
1186 fclose(stream);
1187
1188 /* Copy it to a ralloc'd thing */
1189 disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1190 memcpy(disasm, stream_data, stream_size);
1191 disasm[stream_size] = 0;
1192
1193 free(stream_data);
1194 }
1195
1196 const struct anv_pipeline_executable exe = {
1197 .stage = stage->stage,
1198 .stats = *stats,
1199 .nir = nir,
1200 .disasm = disasm,
1201 };
1202 util_dynarray_append(&pipeline->executables,
1203 struct anv_pipeline_executable, exe);
1204 }
1205
1206 static void
1207 anv_pipeline_add_executables(struct anv_pipeline *pipeline,
1208 struct anv_pipeline_stage *stage,
1209 struct anv_shader_bin *bin)
1210 {
1211 if (stage->stage == MESA_SHADER_FRAGMENT) {
1212 /* We pull the prog data and stats out of the anv_shader_bin because
1213 * the anv_pipeline_stage may not be fully populated if we successfully
1214 * looked up the shader in a cache.
1215 */
1216 const struct brw_wm_prog_data *wm_prog_data =
1217 (const struct brw_wm_prog_data *)bin->prog_data;
1218 struct brw_compile_stats *stats = bin->stats;
1219
1220 if (wm_prog_data->dispatch_8) {
1221 anv_pipeline_add_executable(pipeline, stage, stats++, 0);
1222 }
1223
1224 if (wm_prog_data->dispatch_16) {
1225 anv_pipeline_add_executable(pipeline, stage, stats++,
1226 wm_prog_data->prog_offset_16);
1227 }
1228
1229 if (wm_prog_data->dispatch_32) {
1230 anv_pipeline_add_executable(pipeline, stage, stats++,
1231 wm_prog_data->prog_offset_32);
1232 }
1233 } else {
1234 anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
1235 }
1236 }
1237
1238 static void
1239 anv_pipeline_init_from_cached_graphics(struct anv_graphics_pipeline *pipeline)
1240 {
1241 /* TODO: Cache this pipeline-wide information. */
1242
1243 /* Primitive replication depends on information from all the shaders.
1244 * Recover this bit from the fact that we have more than one position slot
1245 * in the vertex shader when using it.
1246 */
1247 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1248 int pos_slots = 0;
1249 const struct brw_vue_prog_data *vue_prog_data =
1250 (const void *) pipeline->shaders[MESA_SHADER_VERTEX]->prog_data;
1251 const struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1252 for (int i = 0; i < vue_map->num_slots; i++) {
1253 if (vue_map->slot_to_varying[i] == VARYING_SLOT_POS)
1254 pos_slots++;
1255 }
1256 pipeline->use_primitive_replication = pos_slots > 1;
1257 }
1258
1259 static VkResult
1260 anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
1261 struct anv_pipeline_cache *cache,
1262 const VkGraphicsPipelineCreateInfo *info)
1263 {
1264 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1265 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1266 };
1267 int64_t pipeline_start = os_time_get_nano();
1268
1269 const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1270 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
1271
1272 pipeline->active_stages = 0;
1273
1274 VkResult result;
1275 for (uint32_t i = 0; i < info->stageCount; i++) {
1276 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
1277 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
1278
1279 pipeline->active_stages |= sinfo->stage;
1280
1281 int64_t stage_start = os_time_get_nano();
1282
1283 stages[stage].stage = stage;
1284 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
1285 stages[stage].entrypoint = sinfo->pName;
1286 stages[stage].spec_info = sinfo->pSpecializationInfo;
1287 anv_pipeline_hash_shader(stages[stage].module,
1288 stages[stage].entrypoint,
1289 stage,
1290 stages[stage].spec_info,
1291 stages[stage].shader_sha1);
1292
1293 const struct gen_device_info *devinfo = &pipeline->base.device->info;
1294 switch (stage) {
1295 case MESA_SHADER_VERTEX:
1296 populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
1297 break;
1298 case MESA_SHADER_TESS_CTRL:
1299 populate_tcs_prog_key(devinfo, sinfo->flags,
1300 info->pTessellationState->patchControlPoints,
1301 &stages[stage].key.tcs);
1302 break;
1303 case MESA_SHADER_TESS_EVAL:
1304 populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
1305 break;
1306 case MESA_SHADER_GEOMETRY:
1307 populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
1308 break;
1309 case MESA_SHADER_FRAGMENT: {
1310 const bool raster_enabled =
1311 !info->pRasterizationState->rasterizerDiscardEnable;
1312 populate_wm_prog_key(devinfo, sinfo->flags,
1313 pipeline->subpass,
1314 raster_enabled ? info->pMultisampleState : NULL,
1315 &stages[stage].key.wm);
1316 break;
1317 }
1318 default:
1319 unreachable("Invalid graphics shader stage");
1320 }
1321
1322 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1323 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1324 }
1325
1326 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1327 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1328
1329 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1330
1331 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1332
1333 unsigned char sha1[20];
1334 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1335
1336 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1337 if (!stages[s].entrypoint)
1338 continue;
1339
1340 stages[s].cache_key.stage = s;
1341 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1342 }
1343
1344 const bool skip_cache_lookup =
1345 (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1346
1347 if (!skip_cache_lookup) {
1348 unsigned found = 0;
1349 unsigned cache_hits = 0;
1350 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1351 if (!stages[s].entrypoint)
1352 continue;
1353
1354 int64_t stage_start = os_time_get_nano();
1355
1356 bool cache_hit;
1357 struct anv_shader_bin *bin =
1358 anv_device_search_for_kernel(pipeline->base.device, cache,
1359 &stages[s].cache_key,
1360 sizeof(stages[s].cache_key), &cache_hit);
1361 if (bin) {
1362 found++;
1363 pipeline->shaders[s] = bin;
1364 }
1365
1366 if (cache_hit) {
1367 cache_hits++;
1368 stages[s].feedback.flags |=
1369 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1370 }
1371 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1372 }
1373
1374 if (found == __builtin_popcount(pipeline->active_stages)) {
1375 if (cache_hits == found) {
1376 pipeline_feedback.flags |=
1377 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1378 }
1379 /* We found all our shaders in the cache. We're done. */
1380 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1381 if (!stages[s].entrypoint)
1382 continue;
1383
1384 anv_pipeline_add_executables(&pipeline->base, &stages[s],
1385 pipeline->shaders[s]);
1386 }
1387 anv_pipeline_init_from_cached_graphics(pipeline);
1388 goto done;
1389 } else if (found > 0) {
1390 /* We found some but not all of our shaders. This shouldn't happen
1391 * most of the time but it can if we have a partially populated
1392 * pipeline cache.
1393 */
1394 assert(found < __builtin_popcount(pipeline->active_stages));
1395
1396 vk_debug_report(&pipeline->base.device->physical->instance->debug_report_callbacks,
1397 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1398 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1399 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1400 (uint64_t)(uintptr_t)cache,
1401 0, 0, "anv",
1402 "Found a partial pipeline in the cache. This is "
1403 "most likely caused by an incomplete pipeline cache "
1404 "import or export");
1405
1406 /* We're going to have to recompile anyway, so just throw away our
1407 * references to the shaders in the cache. We'll get them out of the
1408 * cache again as part of the compilation process.
1409 */
1410 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1411 stages[s].feedback.flags = 0;
1412 if (pipeline->shaders[s]) {
1413 anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1414 pipeline->shaders[s] = NULL;
1415 }
1416 }
1417 }
1418 }
1419
1420 void *pipeline_ctx = ralloc_context(NULL);
1421
1422 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1423 if (!stages[s].entrypoint)
1424 continue;
1425
1426 int64_t stage_start = os_time_get_nano();
1427
1428 assert(stages[s].stage == s);
1429 assert(pipeline->shaders[s] == NULL);
1430
1431 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1432 .surface_to_descriptor = stages[s].surface_to_descriptor,
1433 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1434 };
1435
1436 stages[s].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache,
1437 pipeline_ctx,
1438 &stages[s]);
1439 if (stages[s].nir == NULL) {
1440 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1441 goto fail;
1442 }
1443
1444 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1445 }
1446
1447 /* Walk backwards to link */
1448 struct anv_pipeline_stage *next_stage = NULL;
1449 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1450 if (!stages[s].entrypoint)
1451 continue;
1452
1453 switch (s) {
1454 case MESA_SHADER_VERTEX:
1455 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1456 break;
1457 case MESA_SHADER_TESS_CTRL:
1458 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1459 break;
1460 case MESA_SHADER_TESS_EVAL:
1461 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1462 break;
1463 case MESA_SHADER_GEOMETRY:
1464 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1465 break;
1466 case MESA_SHADER_FRAGMENT:
1467 anv_pipeline_link_fs(compiler, &stages[s]);
1468 break;
1469 default:
1470 unreachable("Invalid graphics shader stage");
1471 }
1472
1473 next_stage = &stages[s];
1474 }
1475
1476 if (pipeline->base.device->info.gen >= 12 &&
1477 pipeline->subpass->view_mask != 0) {
1478 /* For some pipelines HW Primitive Replication can be used instead of
1479 * instancing to implement Multiview. This depend on how viewIndex is
1480 * used in all the active shaders, so this check can't be done per
1481 * individual shaders.
1482 */
1483 nir_shader *shaders[MESA_SHADER_STAGES] = {};
1484 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++)
1485 shaders[s] = stages[s].nir;
1486
1487 pipeline->use_primitive_replication =
1488 anv_check_for_primitive_replication(shaders, pipeline);
1489 } else {
1490 pipeline->use_primitive_replication = false;
1491 }
1492
1493 struct anv_pipeline_stage *prev_stage = NULL;
1494 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1495 if (!stages[s].entrypoint)
1496 continue;
1497
1498 int64_t stage_start = os_time_get_nano();
1499
1500 void *stage_ctx = ralloc_context(NULL);
1501
1502 nir_xfb_info *xfb_info = NULL;
1503 if (s == MESA_SHADER_VERTEX ||
1504 s == MESA_SHADER_TESS_EVAL ||
1505 s == MESA_SHADER_GEOMETRY)
1506 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1507
1508 anv_pipeline_lower_nir(&pipeline->base, stage_ctx, &stages[s], layout);
1509
1510 switch (s) {
1511 case MESA_SHADER_VERTEX:
1512 anv_pipeline_compile_vs(compiler, stage_ctx, pipeline,
1513 &stages[s]);
1514 break;
1515 case MESA_SHADER_TESS_CTRL:
1516 anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->base.device,
1517 &stages[s], prev_stage);
1518 break;
1519 case MESA_SHADER_TESS_EVAL:
1520 anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->base.device,
1521 &stages[s], prev_stage);
1522 break;
1523 case MESA_SHADER_GEOMETRY:
1524 anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->base.device,
1525 &stages[s], prev_stage);
1526 break;
1527 case MESA_SHADER_FRAGMENT:
1528 anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->base.device,
1529 &stages[s], prev_stage);
1530 break;
1531 default:
1532 unreachable("Invalid graphics shader stage");
1533 }
1534 if (stages[s].code == NULL) {
1535 ralloc_free(stage_ctx);
1536 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1537 goto fail;
1538 }
1539
1540 anv_nir_validate_push_layout(&stages[s].prog_data.base,
1541 &stages[s].bind_map);
1542
1543 struct anv_shader_bin *bin =
1544 anv_device_upload_kernel(pipeline->base.device, cache, s,
1545 &stages[s].cache_key,
1546 sizeof(stages[s].cache_key),
1547 stages[s].code,
1548 stages[s].prog_data.base.program_size,
1549 stages[s].nir->constant_data,
1550 stages[s].nir->constant_data_size,
1551 &stages[s].prog_data.base,
1552 brw_prog_data_size(s),
1553 stages[s].stats, stages[s].num_stats,
1554 xfb_info, &stages[s].bind_map);
1555 if (!bin) {
1556 ralloc_free(stage_ctx);
1557 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1558 goto fail;
1559 }
1560
1561 anv_pipeline_add_executables(&pipeline->base, &stages[s], bin);
1562
1563 pipeline->shaders[s] = bin;
1564 ralloc_free(stage_ctx);
1565
1566 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1567
1568 prev_stage = &stages[s];
1569 }
1570
1571 ralloc_free(pipeline_ctx);
1572
1573 done:
1574
1575 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1576 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1577 /* This can happen if we decided to implicitly disable the fragment
1578 * shader. See anv_pipeline_compile_fs().
1579 */
1580 anv_shader_bin_unref(pipeline->base.device,
1581 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1582 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1583 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1584 }
1585
1586 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1587
1588 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1589 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1590 if (create_feedback) {
1591 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1592
1593 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1594 for (uint32_t i = 0; i < info->stageCount; i++) {
1595 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1596 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1597 }
1598 }
1599
1600 return VK_SUCCESS;
1601
1602 fail:
1603 ralloc_free(pipeline_ctx);
1604
1605 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1606 if (pipeline->shaders[s])
1607 anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1608 }
1609
1610 return result;
1611 }
1612
1613 static void
1614 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
1615 {
1616 assert(glsl_type_is_vector_or_scalar(type));
1617
1618 uint32_t comp_size = glsl_type_is_boolean(type)
1619 ? 4 : glsl_get_bit_size(type) / 8;
1620 unsigned length = glsl_get_vector_elements(type);
1621 *size = comp_size * length,
1622 *align = comp_size * (length == 3 ? 4 : length);
1623 }
1624
1625 VkResult
1626 anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
1627 struct anv_pipeline_cache *cache,
1628 const VkComputePipelineCreateInfo *info,
1629 const struct anv_shader_module *module,
1630 const char *entrypoint,
1631 const VkSpecializationInfo *spec_info)
1632 {
1633 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1634 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1635 };
1636 int64_t pipeline_start = os_time_get_nano();
1637
1638 const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1639
1640 struct anv_pipeline_stage stage = {
1641 .stage = MESA_SHADER_COMPUTE,
1642 .module = module,
1643 .entrypoint = entrypoint,
1644 .spec_info = spec_info,
1645 .cache_key = {
1646 .stage = MESA_SHADER_COMPUTE,
1647 },
1648 .feedback = {
1649 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1650 },
1651 };
1652 anv_pipeline_hash_shader(stage.module,
1653 stage.entrypoint,
1654 MESA_SHADER_COMPUTE,
1655 stage.spec_info,
1656 stage.shader_sha1);
1657
1658 struct anv_shader_bin *bin = NULL;
1659
1660 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
1661 vk_find_struct_const(info->stage.pNext,
1662 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
1663
1664 populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags,
1665 rss_info, &stage.key.cs);
1666
1667 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1668
1669 const bool skip_cache_lookup =
1670 (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1671
1672 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1673
1674 bool cache_hit = false;
1675 if (!skip_cache_lookup) {
1676 bin = anv_device_search_for_kernel(pipeline->base.device, cache,
1677 &stage.cache_key,
1678 sizeof(stage.cache_key),
1679 &cache_hit);
1680 }
1681
1682 void *mem_ctx = ralloc_context(NULL);
1683 if (bin == NULL) {
1684 int64_t stage_start = os_time_get_nano();
1685
1686 stage.bind_map = (struct anv_pipeline_bind_map) {
1687 .surface_to_descriptor = stage.surface_to_descriptor,
1688 .sampler_to_descriptor = stage.sampler_to_descriptor
1689 };
1690
1691 /* Set up a binding for the gl_NumWorkGroups */
1692 stage.bind_map.surface_count = 1;
1693 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1694 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1695 };
1696
1697 stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
1698 if (stage.nir == NULL) {
1699 ralloc_free(mem_ctx);
1700 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1701 }
1702
1703 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
1704
1705 anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout);
1706
1707 NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
1708 nir_var_mem_shared, shared_type_info);
1709 NIR_PASS_V(stage.nir, nir_lower_explicit_io,
1710 nir_var_mem_shared, nir_address_format_32bit_offset);
1711 NIR_PASS_V(stage.nir, brw_nir_lower_cs_intrinsics);
1712
1713 stage.num_stats = 1;
1714 stage.code = brw_compile_cs(compiler, pipeline->base.device, mem_ctx,
1715 &stage.key.cs, &stage.prog_data.cs,
1716 stage.nir, -1, stage.stats, NULL);
1717 if (stage.code == NULL) {
1718 ralloc_free(mem_ctx);
1719 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1720 }
1721
1722 anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
1723
1724 if (!stage.prog_data.cs.uses_num_work_groups) {
1725 assert(stage.bind_map.surface_to_descriptor[0].set ==
1726 ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
1727 stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
1728 }
1729
1730 const unsigned code_size = stage.prog_data.base.program_size;
1731 bin = anv_device_upload_kernel(pipeline->base.device, cache,
1732 MESA_SHADER_COMPUTE,
1733 &stage.cache_key, sizeof(stage.cache_key),
1734 stage.code, code_size,
1735 stage.nir->constant_data,
1736 stage.nir->constant_data_size,
1737 &stage.prog_data.base,
1738 sizeof(stage.prog_data.cs),
1739 stage.stats, stage.num_stats,
1740 NULL, &stage.bind_map);
1741 if (!bin) {
1742 ralloc_free(mem_ctx);
1743 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1744 }
1745
1746 stage.feedback.duration = os_time_get_nano() - stage_start;
1747 }
1748
1749 anv_pipeline_add_executables(&pipeline->base, &stage, bin);
1750
1751 ralloc_free(mem_ctx);
1752
1753 if (cache_hit) {
1754 stage.feedback.flags |=
1755 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1756 pipeline_feedback.flags |=
1757 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1758 }
1759 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1760
1761 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1762 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1763 if (create_feedback) {
1764 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1765
1766 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1767 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1768 }
1769
1770 pipeline->cs = bin;
1771
1772 return VK_SUCCESS;
1773 }
1774
1775 struct anv_cs_parameters
1776 anv_cs_parameters(const struct anv_compute_pipeline *pipeline)
1777 {
1778 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1779
1780 struct anv_cs_parameters cs_params = {};
1781
1782 cs_params.group_size = cs_prog_data->local_size[0] *
1783 cs_prog_data->local_size[1] *
1784 cs_prog_data->local_size[2];
1785 cs_params.simd_size =
1786 brw_cs_simd_size_for_group_size(&pipeline->base.device->info,
1787 cs_prog_data, cs_params.group_size);
1788 cs_params.threads = DIV_ROUND_UP(cs_params.group_size, cs_params.simd_size);
1789
1790 return cs_params;
1791 }
1792
1793 /**
1794 * Copy pipeline state not marked as dynamic.
1795 * Dynamic state is pipeline state which hasn't been provided at pipeline
1796 * creation time, but is dynamically provided afterwards using various
1797 * vkCmdSet* functions.
1798 *
1799 * The set of state considered "non_dynamic" is determined by the pieces of
1800 * state that have their corresponding VkDynamicState enums omitted from
1801 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1802 *
1803 * @param[out] pipeline Destination non_dynamic state.
1804 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1805 */
1806 static void
1807 copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline,
1808 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1809 {
1810 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1811 struct anv_subpass *subpass = pipeline->subpass;
1812
1813 pipeline->dynamic_state = default_dynamic_state;
1814
1815 if (pCreateInfo->pDynamicState) {
1816 /* Remove all of the states that are marked as dynamic */
1817 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1818 for (uint32_t s = 0; s < count; s++) {
1819 states &= ~anv_cmd_dirty_bit_for_vk_dynamic_state(
1820 pCreateInfo->pDynamicState->pDynamicStates[s]);
1821 }
1822 }
1823
1824 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1825
1826 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1827 *
1828 * pViewportState is [...] NULL if the pipeline
1829 * has rasterization disabled.
1830 */
1831 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1832 assert(pCreateInfo->pViewportState);
1833
1834 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1835 if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
1836 typed_memcpy(dynamic->viewport.viewports,
1837 pCreateInfo->pViewportState->pViewports,
1838 pCreateInfo->pViewportState->viewportCount);
1839 }
1840
1841 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1842 if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
1843 typed_memcpy(dynamic->scissor.scissors,
1844 pCreateInfo->pViewportState->pScissors,
1845 pCreateInfo->pViewportState->scissorCount);
1846 }
1847 }
1848
1849 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
1850 assert(pCreateInfo->pRasterizationState);
1851 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1852 }
1853
1854 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
1855 assert(pCreateInfo->pRasterizationState);
1856 dynamic->depth_bias.bias =
1857 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1858 dynamic->depth_bias.clamp =
1859 pCreateInfo->pRasterizationState->depthBiasClamp;
1860 dynamic->depth_bias.slope =
1861 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1862 }
1863
1864 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1865 *
1866 * pColorBlendState is [...] NULL if the pipeline has rasterization
1867 * disabled or if the subpass of the render pass the pipeline is
1868 * created against does not use any color attachments.
1869 */
1870 bool uses_color_att = false;
1871 for (unsigned i = 0; i < subpass->color_count; ++i) {
1872 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1873 uses_color_att = true;
1874 break;
1875 }
1876 }
1877
1878 if (uses_color_att &&
1879 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1880 assert(pCreateInfo->pColorBlendState);
1881
1882 if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1883 typed_memcpy(dynamic->blend_constants,
1884 pCreateInfo->pColorBlendState->blendConstants, 4);
1885 }
1886
1887 /* If there is no depthstencil attachment, then don't read
1888 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1889 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1890 * no need to override the depthstencil defaults in
1891 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1892 *
1893 * Section 9.2 of the Vulkan 1.0.15 spec says:
1894 *
1895 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1896 * disabled or if the subpass of the render pass the pipeline is created
1897 * against does not use a depth/stencil attachment.
1898 */
1899 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1900 subpass->depth_stencil_attachment) {
1901 assert(pCreateInfo->pDepthStencilState);
1902
1903 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
1904 dynamic->depth_bounds.min =
1905 pCreateInfo->pDepthStencilState->minDepthBounds;
1906 dynamic->depth_bounds.max =
1907 pCreateInfo->pDepthStencilState->maxDepthBounds;
1908 }
1909
1910 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
1911 dynamic->stencil_compare_mask.front =
1912 pCreateInfo->pDepthStencilState->front.compareMask;
1913 dynamic->stencil_compare_mask.back =
1914 pCreateInfo->pDepthStencilState->back.compareMask;
1915 }
1916
1917 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
1918 dynamic->stencil_write_mask.front =
1919 pCreateInfo->pDepthStencilState->front.writeMask;
1920 dynamic->stencil_write_mask.back =
1921 pCreateInfo->pDepthStencilState->back.writeMask;
1922 }
1923
1924 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
1925 dynamic->stencil_reference.front =
1926 pCreateInfo->pDepthStencilState->front.reference;
1927 dynamic->stencil_reference.back =
1928 pCreateInfo->pDepthStencilState->back.reference;
1929 }
1930 }
1931
1932 const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
1933 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1934 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
1935 if (line_state) {
1936 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
1937 dynamic->line_stipple.factor = line_state->lineStippleFactor;
1938 dynamic->line_stipple.pattern = line_state->lineStipplePattern;
1939 }
1940 }
1941
1942 pipeline->dynamic_state_mask = states;
1943 }
1944
1945 static void
1946 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1947 {
1948 #ifdef DEBUG
1949 struct anv_render_pass *renderpass = NULL;
1950 struct anv_subpass *subpass = NULL;
1951
1952 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1953 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1954 */
1955 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1956
1957 renderpass = anv_render_pass_from_handle(info->renderPass);
1958 assert(renderpass);
1959
1960 assert(info->subpass < renderpass->subpass_count);
1961 subpass = &renderpass->subpasses[info->subpass];
1962
1963 assert(info->stageCount >= 1);
1964 assert(info->pVertexInputState);
1965 assert(info->pInputAssemblyState);
1966 assert(info->pRasterizationState);
1967 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1968 assert(info->pViewportState);
1969 assert(info->pMultisampleState);
1970
1971 if (subpass && subpass->depth_stencil_attachment)
1972 assert(info->pDepthStencilState);
1973
1974 if (subpass && subpass->color_count > 0) {
1975 bool all_color_unused = true;
1976 for (int i = 0; i < subpass->color_count; i++) {
1977 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1978 all_color_unused = false;
1979 }
1980 /* pColorBlendState is ignored if the pipeline has rasterization
1981 * disabled or if the subpass of the render pass the pipeline is
1982 * created against does not use any color attachments.
1983 */
1984 assert(info->pColorBlendState || all_color_unused);
1985 }
1986 }
1987
1988 for (uint32_t i = 0; i < info->stageCount; ++i) {
1989 switch (info->pStages[i].stage) {
1990 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1991 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1992 assert(info->pTessellationState);
1993 break;
1994 default:
1995 break;
1996 }
1997 }
1998 #endif
1999 }
2000
2001 /**
2002 * Calculate the desired L3 partitioning based on the current state of the
2003 * pipeline. For now this simply returns the conservative defaults calculated
2004 * by get_default_l3_weights(), but we could probably do better by gathering
2005 * more statistics from the pipeline state (e.g. guess of expected URB usage
2006 * and bound surfaces), or by using feed-back from performance counters.
2007 */
2008 void
2009 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
2010 {
2011 const struct gen_device_info *devinfo = &pipeline->device->info;
2012
2013 const struct gen_l3_weights w =
2014 gen_get_default_l3_weights(devinfo, true, needs_slm);
2015
2016 pipeline->l3_config = gen_get_l3_config(devinfo, w);
2017 }
2018
2019 VkResult
2020 anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline,
2021 struct anv_device *device,
2022 struct anv_pipeline_cache *cache,
2023 const VkGraphicsPipelineCreateInfo *pCreateInfo,
2024 const VkAllocationCallbacks *alloc)
2025 {
2026 VkResult result;
2027
2028 anv_pipeline_validate_create_info(pCreateInfo);
2029
2030 result = anv_pipeline_init(&pipeline->base, device,
2031 ANV_PIPELINE_GRAPHICS, pCreateInfo->flags,
2032 alloc);
2033 if (result != VK_SUCCESS)
2034 return result;
2035
2036 anv_batch_set_storage(&pipeline->base.batch, ANV_NULL_ADDRESS,
2037 pipeline->batch_data, sizeof(pipeline->batch_data));
2038
2039 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
2040 assert(pCreateInfo->subpass < render_pass->subpass_count);
2041 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
2042
2043 assert(pCreateInfo->pRasterizationState);
2044
2045 copy_non_dynamic_state(pipeline, pCreateInfo);
2046 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
2047
2048 /* Previously we enabled depth clipping when !depthClampEnable.
2049 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
2050 * clipping info is available, use its enable value to determine clipping,
2051 * otherwise fallback to the previous !depthClampEnable logic.
2052 */
2053 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
2054 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
2055 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
2056 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
2057
2058 pipeline->sample_shading_enable =
2059 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
2060 pCreateInfo->pMultisampleState &&
2061 pCreateInfo->pMultisampleState->sampleShadingEnable;
2062
2063 /* When we free the pipeline, we detect stages based on the NULL status
2064 * of various prog_data pointers. Make them NULL by default.
2065 */
2066 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
2067
2068 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
2069 if (result != VK_SUCCESS) {
2070 anv_pipeline_finish(&pipeline->base, device, alloc);
2071 return result;
2072 }
2073
2074 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
2075
2076 anv_pipeline_setup_l3_config(&pipeline->base, false);
2077
2078 const VkPipelineVertexInputStateCreateInfo *vi_info =
2079 pCreateInfo->pVertexInputState;
2080
2081 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
2082
2083 pipeline->vb_used = 0;
2084 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
2085 const VkVertexInputAttributeDescription *desc =
2086 &vi_info->pVertexAttributeDescriptions[i];
2087
2088 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
2089 pipeline->vb_used |= 1 << desc->binding;
2090 }
2091
2092 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
2093 const VkVertexInputBindingDescription *desc =
2094 &vi_info->pVertexBindingDescriptions[i];
2095
2096 pipeline->vb[desc->binding].stride = desc->stride;
2097
2098 /* Step rate is programmed per vertex element (attribute), not
2099 * binding. Set up a map of which bindings step per instance, for
2100 * reference by vertex element setup. */
2101 switch (desc->inputRate) {
2102 default:
2103 case VK_VERTEX_INPUT_RATE_VERTEX:
2104 pipeline->vb[desc->binding].instanced = false;
2105 break;
2106 case VK_VERTEX_INPUT_RATE_INSTANCE:
2107 pipeline->vb[desc->binding].instanced = true;
2108 break;
2109 }
2110
2111 pipeline->vb[desc->binding].instance_divisor = 1;
2112 }
2113
2114 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
2115 vk_find_struct_const(vi_info->pNext,
2116 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
2117 if (vi_div_state) {
2118 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
2119 const VkVertexInputBindingDivisorDescriptionEXT *desc =
2120 &vi_div_state->pVertexBindingDivisors[i];
2121
2122 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
2123 }
2124 }
2125
2126 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2127 * different views. If the client asks for instancing, we need to multiply
2128 * the instance divisor by the number of views ensure that we repeat the
2129 * client's per-instance data once for each view.
2130 */
2131 if (pipeline->subpass->view_mask && !pipeline->use_primitive_replication) {
2132 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
2133 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
2134 if (pipeline->vb[vb].instanced)
2135 pipeline->vb[vb].instance_divisor *= view_count;
2136 }
2137 }
2138
2139 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
2140 pCreateInfo->pInputAssemblyState;
2141 const VkPipelineTessellationStateCreateInfo *tess_info =
2142 pCreateInfo->pTessellationState;
2143 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
2144
2145 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
2146 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
2147 else
2148 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
2149
2150 return VK_SUCCESS;
2151 }
2152
2153 #define WRITE_STR(field, ...) ({ \
2154 memset(field, 0, sizeof(field)); \
2155 UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
2156 assert(i > 0 && i < sizeof(field)); \
2157 })
2158
2159 VkResult anv_GetPipelineExecutablePropertiesKHR(
2160 VkDevice device,
2161 const VkPipelineInfoKHR* pPipelineInfo,
2162 uint32_t* pExecutableCount,
2163 VkPipelineExecutablePropertiesKHR* pProperties)
2164 {
2165 ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
2166 VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
2167
2168 util_dynarray_foreach (&pipeline->executables, struct anv_pipeline_executable, exe) {
2169 vk_outarray_append(&out, props) {
2170 gl_shader_stage stage = exe->stage;
2171 props->stages = mesa_to_vk_shader_stage(stage);
2172
2173 unsigned simd_width = exe->stats.dispatch_width;
2174 if (stage == MESA_SHADER_FRAGMENT) {
2175 WRITE_STR(props->name, "%s%d %s",
2176 simd_width ? "SIMD" : "vec",
2177 simd_width ? simd_width : 4,
2178 _mesa_shader_stage_to_string(stage));
2179 } else {
2180 WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
2181 }
2182 WRITE_STR(props->description, "%s%d %s shader",
2183 simd_width ? "SIMD" : "vec",
2184 simd_width ? simd_width : 4,
2185 _mesa_shader_stage_to_string(stage));
2186
2187 /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
2188 * wants a subgroup size of 1.
2189 */
2190 props->subgroupSize = MAX2(simd_width, 1);
2191 }
2192 }
2193
2194 return vk_outarray_status(&out);
2195 }
2196
2197 static const struct anv_pipeline_executable *
2198 anv_pipeline_get_executable(struct anv_pipeline *pipeline, uint32_t index)
2199 {
2200 assert(index < util_dynarray_num_elements(&pipeline->executables,
2201 struct anv_pipeline_executable));
2202 return util_dynarray_element(
2203 &pipeline->executables, struct anv_pipeline_executable, index);
2204 }
2205
2206 VkResult anv_GetPipelineExecutableStatisticsKHR(
2207 VkDevice device,
2208 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2209 uint32_t* pStatisticCount,
2210 VkPipelineExecutableStatisticKHR* pStatistics)
2211 {
2212 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2213 VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
2214
2215 const struct anv_pipeline_executable *exe =
2216 anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
2217
2218 const struct brw_stage_prog_data *prog_data;
2219 switch (pipeline->type) {
2220 case ANV_PIPELINE_GRAPHICS: {
2221 prog_data = anv_pipeline_to_graphics(pipeline)->shaders[exe->stage]->prog_data;
2222 break;
2223 }
2224 case ANV_PIPELINE_COMPUTE: {
2225 prog_data = anv_pipeline_to_compute(pipeline)->cs->prog_data;
2226 break;
2227 }
2228 default:
2229 unreachable("invalid pipeline type");
2230 }
2231
2232 vk_outarray_append(&out, stat) {
2233 WRITE_STR(stat->name, "Instruction Count");
2234 WRITE_STR(stat->description,
2235 "Number of GEN instructions in the final generated "
2236 "shader executable.");
2237 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2238 stat->value.u64 = exe->stats.instructions;
2239 }
2240
2241 vk_outarray_append(&out, stat) {
2242 WRITE_STR(stat->name, "SEND Count");
2243 WRITE_STR(stat->description,
2244 "Number of instructions in the final generated shader "
2245 "executable which access external units such as the "
2246 "constant cache or the sampler.");
2247 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2248 stat->value.u64 = exe->stats.sends;
2249 }
2250
2251 vk_outarray_append(&out, stat) {
2252 WRITE_STR(stat->name, "Loop Count");
2253 WRITE_STR(stat->description,
2254 "Number of loops (not unrolled) in the final generated "
2255 "shader executable.");
2256 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2257 stat->value.u64 = exe->stats.loops;
2258 }
2259
2260 vk_outarray_append(&out, stat) {
2261 WRITE_STR(stat->name, "Cycle Count");
2262 WRITE_STR(stat->description,
2263 "Estimate of the number of EU cycles required to execute "
2264 "the final generated executable. This is an estimate only "
2265 "and may vary greatly from actual run-time performance.");
2266 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2267 stat->value.u64 = exe->stats.cycles;
2268 }
2269
2270 vk_outarray_append(&out, stat) {
2271 WRITE_STR(stat->name, "Spill Count");
2272 WRITE_STR(stat->description,
2273 "Number of scratch spill operations. This gives a rough "
2274 "estimate of the cost incurred due to spilling temporary "
2275 "values to memory. If this is non-zero, you may want to "
2276 "adjust your shader to reduce register pressure.");
2277 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2278 stat->value.u64 = exe->stats.spills;
2279 }
2280
2281 vk_outarray_append(&out, stat) {
2282 WRITE_STR(stat->name, "Fill Count");
2283 WRITE_STR(stat->description,
2284 "Number of scratch fill operations. This gives a rough "
2285 "estimate of the cost incurred due to spilling temporary "
2286 "values to memory. If this is non-zero, you may want to "
2287 "adjust your shader to reduce register pressure.");
2288 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2289 stat->value.u64 = exe->stats.fills;
2290 }
2291
2292 vk_outarray_append(&out, stat) {
2293 WRITE_STR(stat->name, "Scratch Memory Size");
2294 WRITE_STR(stat->description,
2295 "Number of bytes of scratch memory required by the "
2296 "generated shader executable. If this is non-zero, you "
2297 "may want to adjust your shader to reduce register "
2298 "pressure.");
2299 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2300 stat->value.u64 = prog_data->total_scratch;
2301 }
2302
2303 if (exe->stage == MESA_SHADER_COMPUTE) {
2304 vk_outarray_append(&out, stat) {
2305 WRITE_STR(stat->name, "Workgroup Memory Size");
2306 WRITE_STR(stat->description,
2307 "Number of bytes of workgroup shared memory used by this "
2308 "compute shader including any padding.");
2309 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2310 stat->value.u64 = brw_cs_prog_data_const(prog_data)->slm_size;
2311 }
2312 }
2313
2314 return vk_outarray_status(&out);
2315 }
2316
2317 static bool
2318 write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
2319 const char *data)
2320 {
2321 ir->isText = VK_TRUE;
2322
2323 size_t data_len = strlen(data) + 1;
2324
2325 if (ir->pData == NULL) {
2326 ir->dataSize = data_len;
2327 return true;
2328 }
2329
2330 strncpy(ir->pData, data, ir->dataSize);
2331 if (ir->dataSize < data_len)
2332 return false;
2333
2334 ir->dataSize = data_len;
2335 return true;
2336 }
2337
2338 VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
2339 VkDevice device,
2340 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2341 uint32_t* pInternalRepresentationCount,
2342 VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
2343 {
2344 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2345 VK_OUTARRAY_MAKE(out, pInternalRepresentations,
2346 pInternalRepresentationCount);
2347 bool incomplete_text = false;
2348
2349 const struct anv_pipeline_executable *exe =
2350 anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
2351
2352 if (exe->nir) {
2353 vk_outarray_append(&out, ir) {
2354 WRITE_STR(ir->name, "Final NIR");
2355 WRITE_STR(ir->description,
2356 "Final NIR before going into the back-end compiler");
2357
2358 if (!write_ir_text(ir, exe->nir))
2359 incomplete_text = true;
2360 }
2361 }
2362
2363 if (exe->disasm) {
2364 vk_outarray_append(&out, ir) {
2365 WRITE_STR(ir->name, "GEN Assembly");
2366 WRITE_STR(ir->description,
2367 "Final GEN assembly for the generated shader binary");
2368
2369 if (!write_ir_text(ir, exe->disasm))
2370 incomplete_text = true;
2371 }
2372 }
2373
2374 return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
2375 }