anv: Add an anv_physical_device field to anv_device
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "common/gen_disasm.h"
34 #include "anv_private.h"
35 #include "compiler/brw_nir.h"
36 #include "anv_nir.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
39 #include "vk_util.h"
40
41 /* Needed for SWIZZLE macros */
42 #include "program/prog_instruction.h"
43
44 // Shader functions
45
46 VkResult anv_CreateShaderModule(
47 VkDevice _device,
48 const VkShaderModuleCreateInfo* pCreateInfo,
49 const VkAllocationCallbacks* pAllocator,
50 VkShaderModule* pShaderModule)
51 {
52 ANV_FROM_HANDLE(anv_device, device, _device);
53 struct anv_shader_module *module;
54
55 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
56 assert(pCreateInfo->flags == 0);
57
58 module = vk_alloc2(&device->alloc, pAllocator,
59 sizeof(*module) + pCreateInfo->codeSize, 8,
60 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
61 if (module == NULL)
62 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
63
64 module->size = pCreateInfo->codeSize;
65 memcpy(module->data, pCreateInfo->pCode, module->size);
66
67 _mesa_sha1_compute(module->data, module->size, module->sha1);
68
69 *pShaderModule = anv_shader_module_to_handle(module);
70
71 return VK_SUCCESS;
72 }
73
74 void anv_DestroyShaderModule(
75 VkDevice _device,
76 VkShaderModule _module,
77 const VkAllocationCallbacks* pAllocator)
78 {
79 ANV_FROM_HANDLE(anv_device, device, _device);
80 ANV_FROM_HANDLE(anv_shader_module, module, _module);
81
82 if (!module)
83 return;
84
85 vk_free2(&device->alloc, pAllocator, module);
86 }
87
88 #define SPIR_V_MAGIC_NUMBER 0x07230203
89
90 static const uint64_t stage_to_debug[] = {
91 [MESA_SHADER_VERTEX] = DEBUG_VS,
92 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
93 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
94 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
95 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
96 [MESA_SHADER_COMPUTE] = DEBUG_CS,
97 };
98
99 struct anv_spirv_debug_data {
100 struct anv_device *device;
101 const struct anv_shader_module *module;
102 };
103
104 static void anv_spirv_nir_debug(void *private_data,
105 enum nir_spirv_debug_level level,
106 size_t spirv_offset,
107 const char *message)
108 {
109 struct anv_spirv_debug_data *debug_data = private_data;
110 static const VkDebugReportFlagsEXT vk_flags[] = {
111 [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
112 [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
113 [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
114 };
115 char buffer[256];
116
117 snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
118
119 vk_debug_report(&debug_data->device->instance->debug_report_callbacks,
120 vk_flags[level],
121 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
122 (uint64_t) (uintptr_t) debug_data->module,
123 0, 0, "anv", buffer);
124 }
125
126 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
127 * we can't do that yet because we don't have the ability to copy nir.
128 */
129 static nir_shader *
130 anv_shader_compile_to_nir(struct anv_device *device,
131 void *mem_ctx,
132 const struct anv_shader_module *module,
133 const char *entrypoint_name,
134 gl_shader_stage stage,
135 const VkSpecializationInfo *spec_info)
136 {
137 const struct anv_physical_device *pdevice = device->physical;
138 const struct brw_compiler *compiler = pdevice->compiler;
139 const nir_shader_compiler_options *nir_options =
140 compiler->glsl_compiler_options[stage].NirOptions;
141
142 uint32_t *spirv = (uint32_t *) module->data;
143 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
144 assert(module->size % 4 == 0);
145
146 uint32_t num_spec_entries = 0;
147 struct nir_spirv_specialization *spec_entries = NULL;
148 if (spec_info && spec_info->mapEntryCount > 0) {
149 num_spec_entries = spec_info->mapEntryCount;
150 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
151 for (uint32_t i = 0; i < num_spec_entries; i++) {
152 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
153 const void *data = spec_info->pData + entry.offset;
154 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
155
156 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
157 if (spec_info->dataSize == 8)
158 spec_entries[i].data64 = *(const uint64_t *)data;
159 else
160 spec_entries[i].data32 = *(const uint32_t *)data;
161 }
162 }
163
164 struct anv_spirv_debug_data spirv_debug_data = {
165 .device = device,
166 .module = module,
167 };
168 struct spirv_to_nir_options spirv_options = {
169 .frag_coord_is_sysval = true,
170 .use_scoped_memory_barrier = true,
171 .caps = {
172 .demote_to_helper_invocation = true,
173 .derivative_group = true,
174 .descriptor_array_dynamic_indexing = true,
175 .descriptor_array_non_uniform_indexing = true,
176 .descriptor_indexing = true,
177 .device_group = true,
178 .draw_parameters = true,
179 .float16 = pdevice->info.gen >= 8,
180 .float64 = pdevice->info.gen >= 8,
181 .fragment_shader_sample_interlock = pdevice->info.gen >= 9,
182 .fragment_shader_pixel_interlock = pdevice->info.gen >= 9,
183 .geometry_streams = true,
184 .image_write_without_format = true,
185 .int8 = pdevice->info.gen >= 8,
186 .int16 = pdevice->info.gen >= 8,
187 .int64 = pdevice->info.gen >= 8,
188 .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
189 .min_lod = true,
190 .multiview = true,
191 .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
192 .post_depth_coverage = pdevice->info.gen >= 9,
193 .runtime_descriptor_array = true,
194 .float_controls = pdevice->info.gen >= 8,
195 .shader_clock = true,
196 .shader_viewport_index_layer = true,
197 .stencil_export = pdevice->info.gen >= 9,
198 .storage_8bit = pdevice->info.gen >= 8,
199 .storage_16bit = pdevice->info.gen >= 8,
200 .subgroup_arithmetic = true,
201 .subgroup_basic = true,
202 .subgroup_ballot = true,
203 .subgroup_quad = true,
204 .subgroup_shuffle = true,
205 .subgroup_vote = true,
206 .tessellation = true,
207 .transform_feedback = pdevice->info.gen >= 8,
208 .variable_pointers = true,
209 .vk_memory_model = true,
210 .vk_memory_model_device_scope = true,
211 },
212 .ubo_addr_format = nir_address_format_32bit_index_offset,
213 .ssbo_addr_format =
214 anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
215 .phys_ssbo_addr_format = nir_address_format_64bit_global,
216 .push_const_addr_format = nir_address_format_logical,
217
218 /* TODO: Consider changing this to an address format that has the NULL
219 * pointer equals to 0. That might be a better format to play nice
220 * with certain code / code generators.
221 */
222 .shared_addr_format = nir_address_format_32bit_offset,
223 .debug = {
224 .func = anv_spirv_nir_debug,
225 .private_data = &spirv_debug_data,
226 },
227 };
228
229
230 nir_shader *nir =
231 spirv_to_nir(spirv, module->size / 4,
232 spec_entries, num_spec_entries,
233 stage, entrypoint_name, &spirv_options, nir_options);
234 assert(nir->info.stage == stage);
235 nir_validate_shader(nir, "after spirv_to_nir");
236 ralloc_steal(mem_ctx, nir);
237
238 free(spec_entries);
239
240 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
241 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
242 gl_shader_stage_name(stage));
243 nir_print_shader(nir, stderr);
244 }
245
246 /* We have to lower away local constant initializers right before we
247 * inline functions. That way they get properly initialized at the top
248 * of the function and not at the top of its caller.
249 */
250 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
251 NIR_PASS_V(nir, nir_lower_returns);
252 NIR_PASS_V(nir, nir_inline_functions);
253 NIR_PASS_V(nir, nir_opt_deref);
254
255 /* Pick off the single entrypoint that we want */
256 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
257 if (!func->is_entrypoint)
258 exec_node_remove(&func->node);
259 }
260 assert(exec_list_length(&nir->functions) == 1);
261
262 /* Now that we've deleted all but the main function, we can go ahead and
263 * lower the rest of the constant initializers. We do this here so that
264 * nir_remove_dead_variables and split_per_member_structs below see the
265 * corresponding stores.
266 */
267 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
268
269 /* Split member structs. We do this before lower_io_to_temporaries so that
270 * it doesn't lower system values to temporaries by accident.
271 */
272 NIR_PASS_V(nir, nir_split_var_copies);
273 NIR_PASS_V(nir, nir_split_per_member_structs);
274
275 NIR_PASS_V(nir, nir_remove_dead_variables,
276 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
277
278 NIR_PASS_V(nir, nir_propagate_invariant);
279 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
280 nir_shader_get_entrypoint(nir), true, false);
281
282 NIR_PASS_V(nir, nir_lower_frexp);
283
284 /* Vulkan uses the separate-shader linking model */
285 nir->info.separate_shader = true;
286
287 brw_preprocess_nir(compiler, nir, NULL);
288
289 return nir;
290 }
291
292 void anv_DestroyPipeline(
293 VkDevice _device,
294 VkPipeline _pipeline,
295 const VkAllocationCallbacks* pAllocator)
296 {
297 ANV_FROM_HANDLE(anv_device, device, _device);
298 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
299
300 if (!pipeline)
301 return;
302
303 anv_reloc_list_finish(&pipeline->batch_relocs,
304 pAllocator ? pAllocator : &device->alloc);
305
306 ralloc_free(pipeline->mem_ctx);
307
308 if (pipeline->blend_state.map)
309 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
310
311 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
312 if (pipeline->shaders[s])
313 anv_shader_bin_unref(device, pipeline->shaders[s]);
314 }
315
316 vk_free2(&device->alloc, pAllocator, pipeline);
317 }
318
319 static const uint32_t vk_to_gen_primitive_type[] = {
320 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
321 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
322 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
323 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
324 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
325 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
326 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
327 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
328 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
329 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
330 };
331
332 static void
333 populate_sampler_prog_key(const struct gen_device_info *devinfo,
334 struct brw_sampler_prog_key_data *key)
335 {
336 /* Almost all multisampled textures are compressed. The only time when we
337 * don't compress a multisampled texture is for 16x MSAA with a surface
338 * width greater than 8k which is a bit of an edge case. Since the sampler
339 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
340 * to tell the compiler to always assume compression.
341 */
342 key->compressed_multisample_layout_mask = ~0;
343
344 /* SkyLake added support for 16x MSAA. With this came a new message for
345 * reading from a 16x MSAA surface with compression. The new message was
346 * needed because now the MCS data is 64 bits instead of 32 or lower as is
347 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
348 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
349 * so we can just use it unconditionally. This may not be quite as
350 * efficient but it saves us from recompiling.
351 */
352 if (devinfo->gen >= 9)
353 key->msaa_16 = ~0;
354
355 /* XXX: Handle texture swizzle on HSW- */
356 for (int i = 0; i < MAX_SAMPLERS; i++) {
357 /* Assume color sampler, no swizzling. (Works for BDW+) */
358 key->swizzles[i] = SWIZZLE_XYZW;
359 }
360 }
361
362 static void
363 populate_base_prog_key(const struct gen_device_info *devinfo,
364 VkPipelineShaderStageCreateFlags flags,
365 struct brw_base_prog_key *key)
366 {
367 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
368 key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
369 else
370 key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
371
372 populate_sampler_prog_key(devinfo, &key->tex);
373 }
374
375 static void
376 populate_vs_prog_key(const struct gen_device_info *devinfo,
377 VkPipelineShaderStageCreateFlags flags,
378 struct brw_vs_prog_key *key)
379 {
380 memset(key, 0, sizeof(*key));
381
382 populate_base_prog_key(devinfo, flags, &key->base);
383
384 /* XXX: Handle vertex input work-arounds */
385
386 /* XXX: Handle sampler_prog_key */
387 }
388
389 static void
390 populate_tcs_prog_key(const struct gen_device_info *devinfo,
391 VkPipelineShaderStageCreateFlags flags,
392 unsigned input_vertices,
393 struct brw_tcs_prog_key *key)
394 {
395 memset(key, 0, sizeof(*key));
396
397 populate_base_prog_key(devinfo, flags, &key->base);
398
399 key->input_vertices = input_vertices;
400 }
401
402 static void
403 populate_tes_prog_key(const struct gen_device_info *devinfo,
404 VkPipelineShaderStageCreateFlags flags,
405 struct brw_tes_prog_key *key)
406 {
407 memset(key, 0, sizeof(*key));
408
409 populate_base_prog_key(devinfo, flags, &key->base);
410 }
411
412 static void
413 populate_gs_prog_key(const struct gen_device_info *devinfo,
414 VkPipelineShaderStageCreateFlags flags,
415 struct brw_gs_prog_key *key)
416 {
417 memset(key, 0, sizeof(*key));
418
419 populate_base_prog_key(devinfo, flags, &key->base);
420 }
421
422 static void
423 populate_wm_prog_key(const struct gen_device_info *devinfo,
424 VkPipelineShaderStageCreateFlags flags,
425 const struct anv_subpass *subpass,
426 const VkPipelineMultisampleStateCreateInfo *ms_info,
427 struct brw_wm_prog_key *key)
428 {
429 memset(key, 0, sizeof(*key));
430
431 populate_base_prog_key(devinfo, flags, &key->base);
432
433 /* We set this to 0 here and set to the actual value before we call
434 * brw_compile_fs.
435 */
436 key->input_slots_valid = 0;
437
438 /* Vulkan doesn't specify a default */
439 key->high_quality_derivatives = false;
440
441 /* XXX Vulkan doesn't appear to specify */
442 key->clamp_fragment_color = false;
443
444 assert(subpass->color_count <= MAX_RTS);
445 for (uint32_t i = 0; i < subpass->color_count; i++) {
446 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
447 key->color_outputs_valid |= (1 << i);
448 }
449
450 key->nr_color_regions = subpass->color_count;
451
452 /* To reduce possible shader recompilations we would need to know if
453 * there is a SampleMask output variable to compute if we should emit
454 * code to workaround the issue that hardware disables alpha to coverage
455 * when there is SampleMask output.
456 */
457 key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
458
459 /* Vulkan doesn't support fixed-function alpha test */
460 key->alpha_test_replicate_alpha = false;
461
462 if (ms_info) {
463 /* We should probably pull this out of the shader, but it's fairly
464 * harmless to compute it and then let dead-code take care of it.
465 */
466 if (ms_info->rasterizationSamples > 1) {
467 key->persample_interp = ms_info->sampleShadingEnable &&
468 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
469 key->multisample_fbo = true;
470 }
471
472 key->frag_coord_adds_sample_pos = key->persample_interp;
473 }
474 }
475
476 static void
477 populate_cs_prog_key(const struct gen_device_info *devinfo,
478 VkPipelineShaderStageCreateFlags flags,
479 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
480 struct brw_cs_prog_key *key)
481 {
482 memset(key, 0, sizeof(*key));
483
484 populate_base_prog_key(devinfo, flags, &key->base);
485
486 if (rss_info) {
487 assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
488
489 /* These enum values are expressly chosen to be equal to the subgroup
490 * size that they require.
491 */
492 assert(rss_info->requiredSubgroupSize == 8 ||
493 rss_info->requiredSubgroupSize == 16 ||
494 rss_info->requiredSubgroupSize == 32);
495 key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
496 } else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
497 /* If the client expressly requests full subgroups and they don't
498 * specify a subgroup size, we need to pick one. If they're requested
499 * varying subgroup sizes, we set it to UNIFORM and let the back-end
500 * compiler pick. Otherwise, we specify the API value of 32.
501 * Performance will likely be terrible in this case but there's nothing
502 * we can do about that. The client should have chosen a size.
503 */
504 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
505 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
506 else
507 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
508 }
509 }
510
511 struct anv_pipeline_stage {
512 gl_shader_stage stage;
513
514 const struct anv_shader_module *module;
515 const char *entrypoint;
516 const VkSpecializationInfo *spec_info;
517
518 unsigned char shader_sha1[20];
519
520 union brw_any_prog_key key;
521
522 struct {
523 gl_shader_stage stage;
524 unsigned char sha1[20];
525 } cache_key;
526
527 nir_shader *nir;
528
529 struct anv_pipeline_binding surface_to_descriptor[256];
530 struct anv_pipeline_binding sampler_to_descriptor[256];
531 struct anv_pipeline_bind_map bind_map;
532
533 union brw_any_prog_data prog_data;
534
535 uint32_t num_stats;
536 struct brw_compile_stats stats[3];
537 char *disasm[3];
538
539 VkPipelineCreationFeedbackEXT feedback;
540
541 const unsigned *code;
542 };
543
544 static void
545 anv_pipeline_hash_shader(const struct anv_shader_module *module,
546 const char *entrypoint,
547 gl_shader_stage stage,
548 const VkSpecializationInfo *spec_info,
549 unsigned char *sha1_out)
550 {
551 struct mesa_sha1 ctx;
552 _mesa_sha1_init(&ctx);
553
554 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
555 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
556 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
557 if (spec_info) {
558 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
559 spec_info->mapEntryCount *
560 sizeof(*spec_info->pMapEntries));
561 _mesa_sha1_update(&ctx, spec_info->pData,
562 spec_info->dataSize);
563 }
564
565 _mesa_sha1_final(&ctx, sha1_out);
566 }
567
568 static void
569 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
570 struct anv_pipeline_layout *layout,
571 struct anv_pipeline_stage *stages,
572 unsigned char *sha1_out)
573 {
574 struct mesa_sha1 ctx;
575 _mesa_sha1_init(&ctx);
576
577 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
578 sizeof(pipeline->subpass->view_mask));
579
580 if (layout)
581 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
582
583 const bool rba = pipeline->device->robust_buffer_access;
584 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
585
586 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
587 if (stages[s].entrypoint) {
588 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
589 sizeof(stages[s].shader_sha1));
590 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
591 }
592 }
593
594 _mesa_sha1_final(&ctx, sha1_out);
595 }
596
597 static void
598 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
599 struct anv_pipeline_layout *layout,
600 struct anv_pipeline_stage *stage,
601 unsigned char *sha1_out)
602 {
603 struct mesa_sha1 ctx;
604 _mesa_sha1_init(&ctx);
605
606 if (layout)
607 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
608
609 const bool rba = pipeline->device->robust_buffer_access;
610 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
611
612 _mesa_sha1_update(&ctx, stage->shader_sha1,
613 sizeof(stage->shader_sha1));
614 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
615
616 _mesa_sha1_final(&ctx, sha1_out);
617 }
618
619 static nir_shader *
620 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
621 struct anv_pipeline_cache *cache,
622 void *mem_ctx,
623 struct anv_pipeline_stage *stage)
624 {
625 const struct brw_compiler *compiler =
626 pipeline->device->physical->compiler;
627 const nir_shader_compiler_options *nir_options =
628 compiler->glsl_compiler_options[stage->stage].NirOptions;
629 nir_shader *nir;
630
631 nir = anv_device_search_for_nir(pipeline->device, cache,
632 nir_options,
633 stage->shader_sha1,
634 mem_ctx);
635 if (nir) {
636 assert(nir->info.stage == stage->stage);
637 return nir;
638 }
639
640 nir = anv_shader_compile_to_nir(pipeline->device,
641 mem_ctx,
642 stage->module,
643 stage->entrypoint,
644 stage->stage,
645 stage->spec_info);
646 if (nir) {
647 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
648 return nir;
649 }
650
651 return NULL;
652 }
653
654 static void
655 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
656 void *mem_ctx,
657 struct anv_pipeline_stage *stage,
658 struct anv_pipeline_layout *layout)
659 {
660 const struct anv_physical_device *pdevice = pipeline->device->physical;
661 const struct brw_compiler *compiler = pdevice->compiler;
662
663 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
664 nir_shader *nir = stage->nir;
665
666 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
667 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
668 NIR_PASS_V(nir, nir_lower_input_attachments, true);
669 }
670
671 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
672
673 if (nir->info.stage != MESA_SHADER_COMPUTE)
674 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
675
676 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
677
678 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
679 pipeline->needs_data_cache = true;
680
681 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
682
683 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
684 nir_address_format_64bit_global);
685
686 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
687 anv_nir_apply_pipeline_layout(pdevice,
688 pipeline->device->robust_buffer_access,
689 layout, nir, &stage->bind_map);
690
691 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
692 nir_address_format_32bit_index_offset);
693 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
694 anv_nir_ssbo_addr_format(pdevice,
695 pipeline->device->robust_buffer_access));
696
697 NIR_PASS_V(nir, nir_opt_constant_folding);
698
699 /* We don't support non-uniform UBOs and non-uniform SSBO access is
700 * handled naturally by falling back to A64 messages.
701 */
702 NIR_PASS_V(nir, nir_lower_non_uniform_access,
703 nir_lower_non_uniform_texture_access |
704 nir_lower_non_uniform_image_access);
705
706 anv_nir_compute_push_layout(pdevice, nir, prog_data,
707 &stage->bind_map, mem_ctx);
708
709 stage->nir = nir;
710 }
711
712 static void
713 anv_pipeline_link_vs(const struct brw_compiler *compiler,
714 struct anv_pipeline_stage *vs_stage,
715 struct anv_pipeline_stage *next_stage)
716 {
717 if (next_stage)
718 brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
719 }
720
721 static void
722 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
723 void *mem_ctx,
724 struct anv_device *device,
725 struct anv_pipeline_stage *vs_stage)
726 {
727 brw_compute_vue_map(compiler->devinfo,
728 &vs_stage->prog_data.vs.base.vue_map,
729 vs_stage->nir->info.outputs_written,
730 vs_stage->nir->info.separate_shader);
731
732 vs_stage->num_stats = 1;
733 vs_stage->code = brw_compile_vs(compiler, device, mem_ctx,
734 &vs_stage->key.vs,
735 &vs_stage->prog_data.vs,
736 vs_stage->nir, -1,
737 vs_stage->stats, NULL);
738 }
739
740 static void
741 merge_tess_info(struct shader_info *tes_info,
742 const struct shader_info *tcs_info)
743 {
744 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
745 *
746 * "PointMode. Controls generation of points rather than triangles
747 * or lines. This functionality defaults to disabled, and is
748 * enabled if either shader stage includes the execution mode.
749 *
750 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
751 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
752 * and OutputVertices, it says:
753 *
754 * "One mode must be set in at least one of the tessellation
755 * shader stages."
756 *
757 * So, the fields can be set in either the TCS or TES, but they must
758 * agree if set in both. Our backend looks at TES, so bitwise-or in
759 * the values from the TCS.
760 */
761 assert(tcs_info->tess.tcs_vertices_out == 0 ||
762 tes_info->tess.tcs_vertices_out == 0 ||
763 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
764 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
765
766 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
767 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
768 tcs_info->tess.spacing == tes_info->tess.spacing);
769 tes_info->tess.spacing |= tcs_info->tess.spacing;
770
771 assert(tcs_info->tess.primitive_mode == 0 ||
772 tes_info->tess.primitive_mode == 0 ||
773 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
774 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
775 tes_info->tess.ccw |= tcs_info->tess.ccw;
776 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
777 }
778
779 static void
780 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
781 struct anv_pipeline_stage *tcs_stage,
782 struct anv_pipeline_stage *tes_stage)
783 {
784 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
785
786 brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
787
788 nir_lower_patch_vertices(tes_stage->nir,
789 tcs_stage->nir->info.tess.tcs_vertices_out,
790 NULL);
791
792 /* Copy TCS info into the TES info */
793 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
794
795 /* Whacking the key after cache lookup is a bit sketchy, but all of
796 * this comes from the SPIR-V, which is part of the hash used for the
797 * pipeline cache. So it should be safe.
798 */
799 tcs_stage->key.tcs.tes_primitive_mode =
800 tes_stage->nir->info.tess.primitive_mode;
801 tcs_stage->key.tcs.quads_workaround =
802 compiler->devinfo->gen < 9 &&
803 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
804 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
805 }
806
807 static void
808 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
809 void *mem_ctx,
810 struct anv_device *device,
811 struct anv_pipeline_stage *tcs_stage,
812 struct anv_pipeline_stage *prev_stage)
813 {
814 tcs_stage->key.tcs.outputs_written =
815 tcs_stage->nir->info.outputs_written;
816 tcs_stage->key.tcs.patch_outputs_written =
817 tcs_stage->nir->info.patch_outputs_written;
818
819 tcs_stage->num_stats = 1;
820 tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
821 &tcs_stage->key.tcs,
822 &tcs_stage->prog_data.tcs,
823 tcs_stage->nir, -1,
824 tcs_stage->stats, NULL);
825 }
826
827 static void
828 anv_pipeline_link_tes(const struct brw_compiler *compiler,
829 struct anv_pipeline_stage *tes_stage,
830 struct anv_pipeline_stage *next_stage)
831 {
832 if (next_stage)
833 brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
834 }
835
836 static void
837 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
838 void *mem_ctx,
839 struct anv_device *device,
840 struct anv_pipeline_stage *tes_stage,
841 struct anv_pipeline_stage *tcs_stage)
842 {
843 tes_stage->key.tes.inputs_read =
844 tcs_stage->nir->info.outputs_written;
845 tes_stage->key.tes.patch_inputs_read =
846 tcs_stage->nir->info.patch_outputs_written;
847
848 tes_stage->num_stats = 1;
849 tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
850 &tes_stage->key.tes,
851 &tcs_stage->prog_data.tcs.base.vue_map,
852 &tes_stage->prog_data.tes,
853 tes_stage->nir, -1,
854 tes_stage->stats, NULL);
855 }
856
857 static void
858 anv_pipeline_link_gs(const struct brw_compiler *compiler,
859 struct anv_pipeline_stage *gs_stage,
860 struct anv_pipeline_stage *next_stage)
861 {
862 if (next_stage)
863 brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
864 }
865
866 static void
867 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
868 void *mem_ctx,
869 struct anv_device *device,
870 struct anv_pipeline_stage *gs_stage,
871 struct anv_pipeline_stage *prev_stage)
872 {
873 brw_compute_vue_map(compiler->devinfo,
874 &gs_stage->prog_data.gs.base.vue_map,
875 gs_stage->nir->info.outputs_written,
876 gs_stage->nir->info.separate_shader);
877
878 gs_stage->num_stats = 1;
879 gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
880 &gs_stage->key.gs,
881 &gs_stage->prog_data.gs,
882 gs_stage->nir, NULL, -1,
883 gs_stage->stats, NULL);
884 }
885
886 static void
887 anv_pipeline_link_fs(const struct brw_compiler *compiler,
888 struct anv_pipeline_stage *stage)
889 {
890 unsigned num_rt_bindings;
891 struct anv_pipeline_binding rt_bindings[MAX_RTS];
892 if (stage->key.wm.nr_color_regions > 0) {
893 assert(stage->key.wm.nr_color_regions <= MAX_RTS);
894 for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
895 if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
896 rt_bindings[rt] = (struct anv_pipeline_binding) {
897 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
898 .index = rt,
899 };
900 } else {
901 /* Setup a null render target */
902 rt_bindings[rt] = (struct anv_pipeline_binding) {
903 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
904 .index = UINT32_MAX,
905 };
906 }
907 }
908 num_rt_bindings = stage->key.wm.nr_color_regions;
909 } else {
910 /* Setup a null render target */
911 rt_bindings[0] = (struct anv_pipeline_binding) {
912 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
913 .index = UINT32_MAX,
914 };
915 num_rt_bindings = 1;
916 }
917
918 assert(num_rt_bindings <= MAX_RTS);
919 assert(stage->bind_map.surface_count == 0);
920 typed_memcpy(stage->bind_map.surface_to_descriptor,
921 rt_bindings, num_rt_bindings);
922 stage->bind_map.surface_count += num_rt_bindings;
923
924 /* Now that we've set up the color attachments, we can go through and
925 * eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
926 * hopes that dead code can clean them up in this and any earlier shader
927 * stages.
928 */
929 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
930 bool deleted_output = false;
931 nir_foreach_variable_safe(var, &stage->nir->outputs) {
932 /* TODO: We don't delete depth/stencil writes. We probably could if the
933 * subpass doesn't have a depth/stencil attachment.
934 */
935 if (var->data.location < FRAG_RESULT_DATA0)
936 continue;
937
938 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
939
940 /* If this is the RT at location 0 and we have alpha to coverage
941 * enabled we still need that write because it will affect the coverage
942 * mask even if it's never written to a color target.
943 */
944 if (rt == 0 && stage->key.wm.alpha_to_coverage)
945 continue;
946
947 const unsigned array_len =
948 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
949 assert(rt + array_len <= MAX_RTS);
950
951 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
952 BITFIELD_RANGE(rt, array_len))) {
953 deleted_output = true;
954 var->data.mode = nir_var_function_temp;
955 exec_node_remove(&var->node);
956 exec_list_push_tail(&impl->locals, &var->node);
957 }
958 }
959
960 if (deleted_output)
961 nir_fixup_deref_modes(stage->nir);
962
963 /* We stored the number of subpass color attachments in nr_color_regions
964 * when calculating the key for caching. Now that we've computed the bind
965 * map, we can reduce this to the actual max before we go into the back-end
966 * compiler.
967 */
968 stage->key.wm.nr_color_regions =
969 util_last_bit(stage->key.wm.color_outputs_valid);
970 }
971
972 static void
973 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
974 void *mem_ctx,
975 struct anv_device *device,
976 struct anv_pipeline_stage *fs_stage,
977 struct anv_pipeline_stage *prev_stage)
978 {
979 /* TODO: we could set this to 0 based on the information in nir_shader, but
980 * we need this before we call spirv_to_nir.
981 */
982 assert(prev_stage);
983 fs_stage->key.wm.input_slots_valid =
984 prev_stage->prog_data.vue.vue_map.slots_valid;
985
986 fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
987 &fs_stage->key.wm,
988 &fs_stage->prog_data.wm,
989 fs_stage->nir, -1, -1, -1,
990 true, false, NULL,
991 fs_stage->stats, NULL);
992
993 fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
994 (uint32_t)fs_stage->prog_data.wm.dispatch_16 +
995 (uint32_t)fs_stage->prog_data.wm.dispatch_32;
996
997 if (fs_stage->key.wm.color_outputs_valid == 0 &&
998 !fs_stage->prog_data.wm.has_side_effects &&
999 !fs_stage->prog_data.wm.uses_omask &&
1000 !fs_stage->key.wm.alpha_to_coverage &&
1001 !fs_stage->prog_data.wm.uses_kill &&
1002 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
1003 !fs_stage->prog_data.wm.computed_stencil) {
1004 /* This fragment shader has no outputs and no side effects. Go ahead
1005 * and return the code pointer so we don't accidentally think the
1006 * compile failed but zero out prog_data which will set program_size to
1007 * zero and disable the stage.
1008 */
1009 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
1010 }
1011 }
1012
1013 static void
1014 anv_pipeline_add_executable(struct anv_pipeline *pipeline,
1015 struct anv_pipeline_stage *stage,
1016 struct brw_compile_stats *stats,
1017 uint32_t code_offset)
1018 {
1019 char *nir = NULL;
1020 if (stage->nir &&
1021 (pipeline->flags &
1022 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1023 char *stream_data = NULL;
1024 size_t stream_size = 0;
1025 FILE *stream = open_memstream(&stream_data, &stream_size);
1026
1027 nir_print_shader(stage->nir, stream);
1028
1029 fclose(stream);
1030
1031 /* Copy it to a ralloc'd thing */
1032 nir = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1033 memcpy(nir, stream_data, stream_size);
1034 nir[stream_size] = 0;
1035
1036 free(stream_data);
1037 }
1038
1039 char *disasm = NULL;
1040 if (stage->code &&
1041 (pipeline->flags &
1042 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1043 char *stream_data = NULL;
1044 size_t stream_size = 0;
1045 FILE *stream = open_memstream(&stream_data, &stream_size);
1046
1047 /* Creating this is far cheaper than it looks. It's perfectly fine to
1048 * do it for every binary.
1049 */
1050 struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
1051 gen_disasm_disassemble(d, stage->code, code_offset, stream);
1052 gen_disasm_destroy(d);
1053
1054 fclose(stream);
1055
1056 /* Copy it to a ralloc'd thing */
1057 disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1058 memcpy(disasm, stream_data, stream_size);
1059 disasm[stream_size] = 0;
1060
1061 free(stream_data);
1062 }
1063
1064 pipeline->executables[pipeline->num_executables++] =
1065 (struct anv_pipeline_executable) {
1066 .stage = stage->stage,
1067 .stats = *stats,
1068 .nir = nir,
1069 .disasm = disasm,
1070 };
1071 }
1072
1073 static void
1074 anv_pipeline_add_executables(struct anv_pipeline *pipeline,
1075 struct anv_pipeline_stage *stage,
1076 struct anv_shader_bin *bin)
1077 {
1078 if (stage->stage == MESA_SHADER_FRAGMENT) {
1079 /* We pull the prog data and stats out of the anv_shader_bin because
1080 * the anv_pipeline_stage may not be fully populated if we successfully
1081 * looked up the shader in a cache.
1082 */
1083 const struct brw_wm_prog_data *wm_prog_data =
1084 (const struct brw_wm_prog_data *)bin->prog_data;
1085 struct brw_compile_stats *stats = bin->stats;
1086
1087 if (wm_prog_data->dispatch_8) {
1088 anv_pipeline_add_executable(pipeline, stage, stats++, 0);
1089 }
1090
1091 if (wm_prog_data->dispatch_16) {
1092 anv_pipeline_add_executable(pipeline, stage, stats++,
1093 wm_prog_data->prog_offset_16);
1094 }
1095
1096 if (wm_prog_data->dispatch_32) {
1097 anv_pipeline_add_executable(pipeline, stage, stats++,
1098 wm_prog_data->prog_offset_32);
1099 }
1100 } else {
1101 anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
1102 }
1103 }
1104
1105 static VkResult
1106 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
1107 struct anv_pipeline_cache *cache,
1108 const VkGraphicsPipelineCreateInfo *info)
1109 {
1110 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1111 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1112 };
1113 int64_t pipeline_start = os_time_get_nano();
1114
1115 const struct brw_compiler *compiler = pipeline->device->physical->compiler;
1116 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
1117
1118 pipeline->active_stages = 0;
1119
1120 VkResult result;
1121 for (uint32_t i = 0; i < info->stageCount; i++) {
1122 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
1123 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
1124
1125 pipeline->active_stages |= sinfo->stage;
1126
1127 int64_t stage_start = os_time_get_nano();
1128
1129 stages[stage].stage = stage;
1130 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
1131 stages[stage].entrypoint = sinfo->pName;
1132 stages[stage].spec_info = sinfo->pSpecializationInfo;
1133 anv_pipeline_hash_shader(stages[stage].module,
1134 stages[stage].entrypoint,
1135 stage,
1136 stages[stage].spec_info,
1137 stages[stage].shader_sha1);
1138
1139 const struct gen_device_info *devinfo = &pipeline->device->info;
1140 switch (stage) {
1141 case MESA_SHADER_VERTEX:
1142 populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
1143 break;
1144 case MESA_SHADER_TESS_CTRL:
1145 populate_tcs_prog_key(devinfo, sinfo->flags,
1146 info->pTessellationState->patchControlPoints,
1147 &stages[stage].key.tcs);
1148 break;
1149 case MESA_SHADER_TESS_EVAL:
1150 populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
1151 break;
1152 case MESA_SHADER_GEOMETRY:
1153 populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
1154 break;
1155 case MESA_SHADER_FRAGMENT: {
1156 const bool raster_enabled =
1157 !info->pRasterizationState->rasterizerDiscardEnable;
1158 populate_wm_prog_key(devinfo, sinfo->flags,
1159 pipeline->subpass,
1160 raster_enabled ? info->pMultisampleState : NULL,
1161 &stages[stage].key.wm);
1162 break;
1163 }
1164 default:
1165 unreachable("Invalid graphics shader stage");
1166 }
1167
1168 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1169 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1170 }
1171
1172 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1173 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1174
1175 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1176
1177 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1178
1179 unsigned char sha1[20];
1180 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1181
1182 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1183 if (!stages[s].entrypoint)
1184 continue;
1185
1186 stages[s].cache_key.stage = s;
1187 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1188 }
1189
1190 const bool skip_cache_lookup =
1191 (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1192
1193 if (!skip_cache_lookup) {
1194 unsigned found = 0;
1195 unsigned cache_hits = 0;
1196 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1197 if (!stages[s].entrypoint)
1198 continue;
1199
1200 int64_t stage_start = os_time_get_nano();
1201
1202 bool cache_hit;
1203 struct anv_shader_bin *bin =
1204 anv_device_search_for_kernel(pipeline->device, cache,
1205 &stages[s].cache_key,
1206 sizeof(stages[s].cache_key), &cache_hit);
1207 if (bin) {
1208 found++;
1209 pipeline->shaders[s] = bin;
1210 }
1211
1212 if (cache_hit) {
1213 cache_hits++;
1214 stages[s].feedback.flags |=
1215 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1216 }
1217 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1218 }
1219
1220 if (found == __builtin_popcount(pipeline->active_stages)) {
1221 if (cache_hits == found) {
1222 pipeline_feedback.flags |=
1223 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1224 }
1225 /* We found all our shaders in the cache. We're done. */
1226 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1227 if (!stages[s].entrypoint)
1228 continue;
1229
1230 anv_pipeline_add_executables(pipeline, &stages[s],
1231 pipeline->shaders[s]);
1232 }
1233 goto done;
1234 } else if (found > 0) {
1235 /* We found some but not all of our shaders. This shouldn't happen
1236 * most of the time but it can if we have a partially populated
1237 * pipeline cache.
1238 */
1239 assert(found < __builtin_popcount(pipeline->active_stages));
1240
1241 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1242 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1243 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1244 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1245 (uint64_t)(uintptr_t)cache,
1246 0, 0, "anv",
1247 "Found a partial pipeline in the cache. This is "
1248 "most likely caused by an incomplete pipeline cache "
1249 "import or export");
1250
1251 /* We're going to have to recompile anyway, so just throw away our
1252 * references to the shaders in the cache. We'll get them out of the
1253 * cache again as part of the compilation process.
1254 */
1255 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1256 stages[s].feedback.flags = 0;
1257 if (pipeline->shaders[s]) {
1258 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1259 pipeline->shaders[s] = NULL;
1260 }
1261 }
1262 }
1263 }
1264
1265 void *pipeline_ctx = ralloc_context(NULL);
1266
1267 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1268 if (!stages[s].entrypoint)
1269 continue;
1270
1271 int64_t stage_start = os_time_get_nano();
1272
1273 assert(stages[s].stage == s);
1274 assert(pipeline->shaders[s] == NULL);
1275
1276 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1277 .surface_to_descriptor = stages[s].surface_to_descriptor,
1278 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1279 };
1280
1281 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1282 pipeline_ctx,
1283 &stages[s]);
1284 if (stages[s].nir == NULL) {
1285 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1286 goto fail;
1287 }
1288
1289 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1290 }
1291
1292 /* Walk backwards to link */
1293 struct anv_pipeline_stage *next_stage = NULL;
1294 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1295 if (!stages[s].entrypoint)
1296 continue;
1297
1298 switch (s) {
1299 case MESA_SHADER_VERTEX:
1300 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1301 break;
1302 case MESA_SHADER_TESS_CTRL:
1303 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1304 break;
1305 case MESA_SHADER_TESS_EVAL:
1306 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1307 break;
1308 case MESA_SHADER_GEOMETRY:
1309 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1310 break;
1311 case MESA_SHADER_FRAGMENT:
1312 anv_pipeline_link_fs(compiler, &stages[s]);
1313 break;
1314 default:
1315 unreachable("Invalid graphics shader stage");
1316 }
1317
1318 next_stage = &stages[s];
1319 }
1320
1321 struct anv_pipeline_stage *prev_stage = NULL;
1322 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1323 if (!stages[s].entrypoint)
1324 continue;
1325
1326 int64_t stage_start = os_time_get_nano();
1327
1328 void *stage_ctx = ralloc_context(NULL);
1329
1330 nir_xfb_info *xfb_info = NULL;
1331 if (s == MESA_SHADER_VERTEX ||
1332 s == MESA_SHADER_TESS_EVAL ||
1333 s == MESA_SHADER_GEOMETRY)
1334 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1335
1336 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1337
1338 switch (s) {
1339 case MESA_SHADER_VERTEX:
1340 anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
1341 &stages[s]);
1342 break;
1343 case MESA_SHADER_TESS_CTRL:
1344 anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
1345 &stages[s], prev_stage);
1346 break;
1347 case MESA_SHADER_TESS_EVAL:
1348 anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
1349 &stages[s], prev_stage);
1350 break;
1351 case MESA_SHADER_GEOMETRY:
1352 anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
1353 &stages[s], prev_stage);
1354 break;
1355 case MESA_SHADER_FRAGMENT:
1356 anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
1357 &stages[s], prev_stage);
1358 break;
1359 default:
1360 unreachable("Invalid graphics shader stage");
1361 }
1362 if (stages[s].code == NULL) {
1363 ralloc_free(stage_ctx);
1364 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1365 goto fail;
1366 }
1367
1368 anv_nir_validate_push_layout(&stages[s].prog_data.base,
1369 &stages[s].bind_map);
1370
1371 struct anv_shader_bin *bin =
1372 anv_device_upload_kernel(pipeline->device, cache,
1373 &stages[s].cache_key,
1374 sizeof(stages[s].cache_key),
1375 stages[s].code,
1376 stages[s].prog_data.base.program_size,
1377 stages[s].nir->constant_data,
1378 stages[s].nir->constant_data_size,
1379 &stages[s].prog_data.base,
1380 brw_prog_data_size(s),
1381 stages[s].stats, stages[s].num_stats,
1382 xfb_info, &stages[s].bind_map);
1383 if (!bin) {
1384 ralloc_free(stage_ctx);
1385 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1386 goto fail;
1387 }
1388
1389 anv_pipeline_add_executables(pipeline, &stages[s], bin);
1390
1391 pipeline->shaders[s] = bin;
1392 ralloc_free(stage_ctx);
1393
1394 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1395
1396 prev_stage = &stages[s];
1397 }
1398
1399 ralloc_free(pipeline_ctx);
1400
1401 done:
1402
1403 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1404 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1405 /* This can happen if we decided to implicitly disable the fragment
1406 * shader. See anv_pipeline_compile_fs().
1407 */
1408 anv_shader_bin_unref(pipeline->device,
1409 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1410 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1411 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1412 }
1413
1414 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1415
1416 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1417 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1418 if (create_feedback) {
1419 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1420
1421 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1422 for (uint32_t i = 0; i < info->stageCount; i++) {
1423 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1424 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1425 }
1426 }
1427
1428 return VK_SUCCESS;
1429
1430 fail:
1431 ralloc_free(pipeline_ctx);
1432
1433 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1434 if (pipeline->shaders[s])
1435 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1436 }
1437
1438 return result;
1439 }
1440
1441 static void
1442 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
1443 {
1444 assert(glsl_type_is_vector_or_scalar(type));
1445
1446 uint32_t comp_size = glsl_type_is_boolean(type)
1447 ? 4 : glsl_get_bit_size(type) / 8;
1448 unsigned length = glsl_get_vector_elements(type);
1449 *size = comp_size * length,
1450 *align = comp_size * (length == 3 ? 4 : length);
1451 }
1452
1453 VkResult
1454 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1455 struct anv_pipeline_cache *cache,
1456 const VkComputePipelineCreateInfo *info,
1457 const struct anv_shader_module *module,
1458 const char *entrypoint,
1459 const VkSpecializationInfo *spec_info)
1460 {
1461 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1462 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1463 };
1464 int64_t pipeline_start = os_time_get_nano();
1465
1466 const struct brw_compiler *compiler = pipeline->device->physical->compiler;
1467
1468 struct anv_pipeline_stage stage = {
1469 .stage = MESA_SHADER_COMPUTE,
1470 .module = module,
1471 .entrypoint = entrypoint,
1472 .spec_info = spec_info,
1473 .cache_key = {
1474 .stage = MESA_SHADER_COMPUTE,
1475 },
1476 .feedback = {
1477 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1478 },
1479 };
1480 anv_pipeline_hash_shader(stage.module,
1481 stage.entrypoint,
1482 MESA_SHADER_COMPUTE,
1483 stage.spec_info,
1484 stage.shader_sha1);
1485
1486 struct anv_shader_bin *bin = NULL;
1487
1488 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
1489 vk_find_struct_const(info->stage.pNext,
1490 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
1491
1492 populate_cs_prog_key(&pipeline->device->info, info->stage.flags,
1493 rss_info, &stage.key.cs);
1494
1495 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1496
1497 const bool skip_cache_lookup =
1498 (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1499
1500 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1501
1502 bool cache_hit = false;
1503 if (!skip_cache_lookup) {
1504 bin = anv_device_search_for_kernel(pipeline->device, cache,
1505 &stage.cache_key,
1506 sizeof(stage.cache_key),
1507 &cache_hit);
1508 }
1509
1510 void *mem_ctx = ralloc_context(NULL);
1511 if (bin == NULL) {
1512 int64_t stage_start = os_time_get_nano();
1513
1514 stage.bind_map = (struct anv_pipeline_bind_map) {
1515 .surface_to_descriptor = stage.surface_to_descriptor,
1516 .sampler_to_descriptor = stage.sampler_to_descriptor
1517 };
1518
1519 /* Set up a binding for the gl_NumWorkGroups */
1520 stage.bind_map.surface_count = 1;
1521 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1522 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1523 };
1524
1525 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1526 if (stage.nir == NULL) {
1527 ralloc_free(mem_ctx);
1528 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1529 }
1530
1531 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
1532
1533 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1534
1535 NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
1536 nir_var_mem_shared, shared_type_info);
1537 NIR_PASS_V(stage.nir, nir_lower_explicit_io,
1538 nir_var_mem_shared, nir_address_format_32bit_offset);
1539
1540 stage.num_stats = 1;
1541 stage.code = brw_compile_cs(compiler, pipeline->device, mem_ctx,
1542 &stage.key.cs, &stage.prog_data.cs,
1543 stage.nir, -1, stage.stats, NULL);
1544 if (stage.code == NULL) {
1545 ralloc_free(mem_ctx);
1546 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1547 }
1548
1549 anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
1550
1551 if (!stage.prog_data.cs.uses_num_work_groups) {
1552 assert(stage.bind_map.surface_to_descriptor[0].set ==
1553 ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
1554 stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
1555 }
1556
1557 const unsigned code_size = stage.prog_data.base.program_size;
1558 bin = anv_device_upload_kernel(pipeline->device, cache,
1559 &stage.cache_key, sizeof(stage.cache_key),
1560 stage.code, code_size,
1561 stage.nir->constant_data,
1562 stage.nir->constant_data_size,
1563 &stage.prog_data.base,
1564 sizeof(stage.prog_data.cs),
1565 stage.stats, stage.num_stats,
1566 NULL, &stage.bind_map);
1567 if (!bin) {
1568 ralloc_free(mem_ctx);
1569 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1570 }
1571
1572 stage.feedback.duration = os_time_get_nano() - stage_start;
1573 }
1574
1575 anv_pipeline_add_executables(pipeline, &stage, bin);
1576
1577 ralloc_free(mem_ctx);
1578
1579 if (cache_hit) {
1580 stage.feedback.flags |=
1581 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1582 pipeline_feedback.flags |=
1583 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1584 }
1585 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1586
1587 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1588 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1589 if (create_feedback) {
1590 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1591
1592 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1593 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1594 }
1595
1596 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1597 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1598
1599 return VK_SUCCESS;
1600 }
1601
1602 /**
1603 * Copy pipeline state not marked as dynamic.
1604 * Dynamic state is pipeline state which hasn't been provided at pipeline
1605 * creation time, but is dynamically provided afterwards using various
1606 * vkCmdSet* functions.
1607 *
1608 * The set of state considered "non_dynamic" is determined by the pieces of
1609 * state that have their corresponding VkDynamicState enums omitted from
1610 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1611 *
1612 * @param[out] pipeline Destination non_dynamic state.
1613 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1614 */
1615 static void
1616 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1617 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1618 {
1619 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1620 struct anv_subpass *subpass = pipeline->subpass;
1621
1622 pipeline->dynamic_state = default_dynamic_state;
1623
1624 if (pCreateInfo->pDynamicState) {
1625 /* Remove all of the states that are marked as dynamic */
1626 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1627 for (uint32_t s = 0; s < count; s++) {
1628 states &= ~anv_cmd_dirty_bit_for_vk_dynamic_state(
1629 pCreateInfo->pDynamicState->pDynamicStates[s]);
1630 }
1631 }
1632
1633 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1634
1635 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1636 *
1637 * pViewportState is [...] NULL if the pipeline
1638 * has rasterization disabled.
1639 */
1640 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1641 assert(pCreateInfo->pViewportState);
1642
1643 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1644 if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
1645 typed_memcpy(dynamic->viewport.viewports,
1646 pCreateInfo->pViewportState->pViewports,
1647 pCreateInfo->pViewportState->viewportCount);
1648 }
1649
1650 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1651 if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
1652 typed_memcpy(dynamic->scissor.scissors,
1653 pCreateInfo->pViewportState->pScissors,
1654 pCreateInfo->pViewportState->scissorCount);
1655 }
1656 }
1657
1658 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
1659 assert(pCreateInfo->pRasterizationState);
1660 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1661 }
1662
1663 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
1664 assert(pCreateInfo->pRasterizationState);
1665 dynamic->depth_bias.bias =
1666 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1667 dynamic->depth_bias.clamp =
1668 pCreateInfo->pRasterizationState->depthBiasClamp;
1669 dynamic->depth_bias.slope =
1670 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1671 }
1672
1673 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1674 *
1675 * pColorBlendState is [...] NULL if the pipeline has rasterization
1676 * disabled or if the subpass of the render pass the pipeline is
1677 * created against does not use any color attachments.
1678 */
1679 bool uses_color_att = false;
1680 for (unsigned i = 0; i < subpass->color_count; ++i) {
1681 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1682 uses_color_att = true;
1683 break;
1684 }
1685 }
1686
1687 if (uses_color_att &&
1688 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1689 assert(pCreateInfo->pColorBlendState);
1690
1691 if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1692 typed_memcpy(dynamic->blend_constants,
1693 pCreateInfo->pColorBlendState->blendConstants, 4);
1694 }
1695
1696 /* If there is no depthstencil attachment, then don't read
1697 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1698 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1699 * no need to override the depthstencil defaults in
1700 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1701 *
1702 * Section 9.2 of the Vulkan 1.0.15 spec says:
1703 *
1704 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1705 * disabled or if the subpass of the render pass the pipeline is created
1706 * against does not use a depth/stencil attachment.
1707 */
1708 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1709 subpass->depth_stencil_attachment) {
1710 assert(pCreateInfo->pDepthStencilState);
1711
1712 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
1713 dynamic->depth_bounds.min =
1714 pCreateInfo->pDepthStencilState->minDepthBounds;
1715 dynamic->depth_bounds.max =
1716 pCreateInfo->pDepthStencilState->maxDepthBounds;
1717 }
1718
1719 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
1720 dynamic->stencil_compare_mask.front =
1721 pCreateInfo->pDepthStencilState->front.compareMask;
1722 dynamic->stencil_compare_mask.back =
1723 pCreateInfo->pDepthStencilState->back.compareMask;
1724 }
1725
1726 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
1727 dynamic->stencil_write_mask.front =
1728 pCreateInfo->pDepthStencilState->front.writeMask;
1729 dynamic->stencil_write_mask.back =
1730 pCreateInfo->pDepthStencilState->back.writeMask;
1731 }
1732
1733 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
1734 dynamic->stencil_reference.front =
1735 pCreateInfo->pDepthStencilState->front.reference;
1736 dynamic->stencil_reference.back =
1737 pCreateInfo->pDepthStencilState->back.reference;
1738 }
1739 }
1740
1741 const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
1742 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1743 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
1744 if (line_state) {
1745 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
1746 dynamic->line_stipple.factor = line_state->lineStippleFactor;
1747 dynamic->line_stipple.pattern = line_state->lineStipplePattern;
1748 }
1749 }
1750
1751 pipeline->dynamic_state_mask = states;
1752 }
1753
1754 static void
1755 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1756 {
1757 #ifdef DEBUG
1758 struct anv_render_pass *renderpass = NULL;
1759 struct anv_subpass *subpass = NULL;
1760
1761 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1762 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1763 */
1764 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1765
1766 renderpass = anv_render_pass_from_handle(info->renderPass);
1767 assert(renderpass);
1768
1769 assert(info->subpass < renderpass->subpass_count);
1770 subpass = &renderpass->subpasses[info->subpass];
1771
1772 assert(info->stageCount >= 1);
1773 assert(info->pVertexInputState);
1774 assert(info->pInputAssemblyState);
1775 assert(info->pRasterizationState);
1776 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1777 assert(info->pViewportState);
1778 assert(info->pMultisampleState);
1779
1780 if (subpass && subpass->depth_stencil_attachment)
1781 assert(info->pDepthStencilState);
1782
1783 if (subpass && subpass->color_count > 0) {
1784 bool all_color_unused = true;
1785 for (int i = 0; i < subpass->color_count; i++) {
1786 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1787 all_color_unused = false;
1788 }
1789 /* pColorBlendState is ignored if the pipeline has rasterization
1790 * disabled or if the subpass of the render pass the pipeline is
1791 * created against does not use any color attachments.
1792 */
1793 assert(info->pColorBlendState || all_color_unused);
1794 }
1795 }
1796
1797 for (uint32_t i = 0; i < info->stageCount; ++i) {
1798 switch (info->pStages[i].stage) {
1799 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1800 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1801 assert(info->pTessellationState);
1802 break;
1803 default:
1804 break;
1805 }
1806 }
1807 #endif
1808 }
1809
1810 /**
1811 * Calculate the desired L3 partitioning based on the current state of the
1812 * pipeline. For now this simply returns the conservative defaults calculated
1813 * by get_default_l3_weights(), but we could probably do better by gathering
1814 * more statistics from the pipeline state (e.g. guess of expected URB usage
1815 * and bound surfaces), or by using feed-back from performance counters.
1816 */
1817 void
1818 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1819 {
1820 const struct gen_device_info *devinfo = &pipeline->device->info;
1821
1822 const struct gen_l3_weights w =
1823 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1824
1825 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1826 pipeline->urb.total_size =
1827 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1828 }
1829
1830 VkResult
1831 anv_pipeline_init(struct anv_pipeline *pipeline,
1832 struct anv_device *device,
1833 struct anv_pipeline_cache *cache,
1834 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1835 const VkAllocationCallbacks *alloc)
1836 {
1837 VkResult result;
1838
1839 anv_pipeline_validate_create_info(pCreateInfo);
1840
1841 if (alloc == NULL)
1842 alloc = &device->alloc;
1843
1844 pipeline->device = device;
1845
1846 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1847 assert(pCreateInfo->subpass < render_pass->subpass_count);
1848 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1849
1850 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1851 if (result != VK_SUCCESS)
1852 return result;
1853
1854 pipeline->batch.alloc = alloc;
1855 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1856 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1857 pipeline->batch.relocs = &pipeline->batch_relocs;
1858 pipeline->batch.status = VK_SUCCESS;
1859
1860 pipeline->mem_ctx = ralloc_context(NULL);
1861 pipeline->flags = pCreateInfo->flags;
1862
1863 assert(pCreateInfo->pRasterizationState);
1864
1865 copy_non_dynamic_state(pipeline, pCreateInfo);
1866 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
1867
1868 /* Previously we enabled depth clipping when !depthClampEnable.
1869 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1870 * clipping info is available, use its enable value to determine clipping,
1871 * otherwise fallback to the previous !depthClampEnable logic.
1872 */
1873 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1874 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1875 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1876 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1877
1878 pipeline->sample_shading_enable =
1879 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1880 pCreateInfo->pMultisampleState &&
1881 pCreateInfo->pMultisampleState->sampleShadingEnable;
1882
1883 pipeline->needs_data_cache = false;
1884
1885 /* When we free the pipeline, we detect stages based on the NULL status
1886 * of various prog_data pointers. Make them NULL by default.
1887 */
1888 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1889 pipeline->num_executables = 0;
1890
1891 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1892 if (result != VK_SUCCESS) {
1893 ralloc_free(pipeline->mem_ctx);
1894 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1895 return result;
1896 }
1897
1898 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1899
1900 anv_pipeline_setup_l3_config(pipeline, false);
1901
1902 const VkPipelineVertexInputStateCreateInfo *vi_info =
1903 pCreateInfo->pVertexInputState;
1904
1905 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1906
1907 pipeline->vb_used = 0;
1908 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1909 const VkVertexInputAttributeDescription *desc =
1910 &vi_info->pVertexAttributeDescriptions[i];
1911
1912 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1913 pipeline->vb_used |= 1 << desc->binding;
1914 }
1915
1916 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1917 const VkVertexInputBindingDescription *desc =
1918 &vi_info->pVertexBindingDescriptions[i];
1919
1920 pipeline->vb[desc->binding].stride = desc->stride;
1921
1922 /* Step rate is programmed per vertex element (attribute), not
1923 * binding. Set up a map of which bindings step per instance, for
1924 * reference by vertex element setup. */
1925 switch (desc->inputRate) {
1926 default:
1927 case VK_VERTEX_INPUT_RATE_VERTEX:
1928 pipeline->vb[desc->binding].instanced = false;
1929 break;
1930 case VK_VERTEX_INPUT_RATE_INSTANCE:
1931 pipeline->vb[desc->binding].instanced = true;
1932 break;
1933 }
1934
1935 pipeline->vb[desc->binding].instance_divisor = 1;
1936 }
1937
1938 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1939 vk_find_struct_const(vi_info->pNext,
1940 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1941 if (vi_div_state) {
1942 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1943 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1944 &vi_div_state->pVertexBindingDivisors[i];
1945
1946 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1947 }
1948 }
1949
1950 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1951 * different views. If the client asks for instancing, we need to multiply
1952 * the instance divisor by the number of views ensure that we repeat the
1953 * client's per-instance data once for each view.
1954 */
1955 if (pipeline->subpass->view_mask) {
1956 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1957 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1958 if (pipeline->vb[vb].instanced)
1959 pipeline->vb[vb].instance_divisor *= view_count;
1960 }
1961 }
1962
1963 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1964 pCreateInfo->pInputAssemblyState;
1965 const VkPipelineTessellationStateCreateInfo *tess_info =
1966 pCreateInfo->pTessellationState;
1967 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1968
1969 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1970 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1971 else
1972 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1973
1974 return VK_SUCCESS;
1975 }
1976
1977 #define WRITE_STR(field, ...) ({ \
1978 memset(field, 0, sizeof(field)); \
1979 UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
1980 assert(i > 0 && i < sizeof(field)); \
1981 })
1982
1983 VkResult anv_GetPipelineExecutablePropertiesKHR(
1984 VkDevice device,
1985 const VkPipelineInfoKHR* pPipelineInfo,
1986 uint32_t* pExecutableCount,
1987 VkPipelineExecutablePropertiesKHR* pProperties)
1988 {
1989 ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
1990 VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
1991
1992 for (uint32_t i = 0; i < pipeline->num_executables; i++) {
1993 vk_outarray_append(&out, props) {
1994 gl_shader_stage stage = pipeline->executables[i].stage;
1995 props->stages = mesa_to_vk_shader_stage(stage);
1996
1997 unsigned simd_width = pipeline->executables[i].stats.dispatch_width;
1998 if (stage == MESA_SHADER_FRAGMENT) {
1999 WRITE_STR(props->name, "%s%d %s",
2000 simd_width ? "SIMD" : "vec",
2001 simd_width ? simd_width : 4,
2002 _mesa_shader_stage_to_string(stage));
2003 } else {
2004 WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
2005 }
2006 WRITE_STR(props->description, "%s%d %s shader",
2007 simd_width ? "SIMD" : "vec",
2008 simd_width ? simd_width : 4,
2009 _mesa_shader_stage_to_string(stage));
2010
2011 /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
2012 * wants a subgroup size of 1.
2013 */
2014 props->subgroupSize = MAX2(simd_width, 1);
2015 }
2016 }
2017
2018 return vk_outarray_status(&out);
2019 }
2020
2021 VkResult anv_GetPipelineExecutableStatisticsKHR(
2022 VkDevice device,
2023 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2024 uint32_t* pStatisticCount,
2025 VkPipelineExecutableStatisticKHR* pStatistics)
2026 {
2027 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2028 VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
2029
2030 assert(pExecutableInfo->executableIndex < pipeline->num_executables);
2031 const struct anv_pipeline_executable *exe =
2032 &pipeline->executables[pExecutableInfo->executableIndex];
2033 const struct brw_stage_prog_data *prog_data =
2034 pipeline->shaders[exe->stage]->prog_data;
2035
2036 vk_outarray_append(&out, stat) {
2037 WRITE_STR(stat->name, "Instruction Count");
2038 WRITE_STR(stat->description,
2039 "Number of GEN instructions in the final generated "
2040 "shader executable.");
2041 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2042 stat->value.u64 = exe->stats.instructions;
2043 }
2044
2045 vk_outarray_append(&out, stat) {
2046 WRITE_STR(stat->name, "Loop Count");
2047 WRITE_STR(stat->description,
2048 "Number of loops (not unrolled) in the final generated "
2049 "shader executable.");
2050 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2051 stat->value.u64 = exe->stats.loops;
2052 }
2053
2054 vk_outarray_append(&out, stat) {
2055 WRITE_STR(stat->name, "Cycle Count");
2056 WRITE_STR(stat->description,
2057 "Estimate of the number of EU cycles required to execute "
2058 "the final generated executable. This is an estimate only "
2059 "and may vary greatly from actual run-time performance.");
2060 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2061 stat->value.u64 = exe->stats.cycles;
2062 }
2063
2064 vk_outarray_append(&out, stat) {
2065 WRITE_STR(stat->name, "Spill Count");
2066 WRITE_STR(stat->description,
2067 "Number of scratch spill operations. This gives a rough "
2068 "estimate of the cost incurred due to spilling temporary "
2069 "values to memory. If this is non-zero, you may want to "
2070 "adjust your shader to reduce register pressure.");
2071 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2072 stat->value.u64 = exe->stats.spills;
2073 }
2074
2075 vk_outarray_append(&out, stat) {
2076 WRITE_STR(stat->name, "Fill Count");
2077 WRITE_STR(stat->description,
2078 "Number of scratch fill operations. This gives a rough "
2079 "estimate of the cost incurred due to spilling temporary "
2080 "values to memory. If this is non-zero, you may want to "
2081 "adjust your shader to reduce register pressure.");
2082 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2083 stat->value.u64 = exe->stats.fills;
2084 }
2085
2086 vk_outarray_append(&out, stat) {
2087 WRITE_STR(stat->name, "Scratch Memory Size");
2088 WRITE_STR(stat->description,
2089 "Number of bytes of scratch memory required by the "
2090 "generated shader executable. If this is non-zero, you "
2091 "may want to adjust your shader to reduce register "
2092 "pressure.");
2093 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2094 stat->value.u64 = prog_data->total_scratch;
2095 }
2096
2097 if (exe->stage == MESA_SHADER_COMPUTE) {
2098 vk_outarray_append(&out, stat) {
2099 WRITE_STR(stat->name, "Workgroup Memory Size");
2100 WRITE_STR(stat->description,
2101 "Number of bytes of workgroup shared memory used by this "
2102 "compute shader including any padding.");
2103 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2104 stat->value.u64 = prog_data->total_scratch;
2105 }
2106 }
2107
2108 return vk_outarray_status(&out);
2109 }
2110
2111 static bool
2112 write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
2113 const char *data)
2114 {
2115 ir->isText = VK_TRUE;
2116
2117 size_t data_len = strlen(data) + 1;
2118
2119 if (ir->pData == NULL) {
2120 ir->dataSize = data_len;
2121 return true;
2122 }
2123
2124 strncpy(ir->pData, data, ir->dataSize);
2125 if (ir->dataSize < data_len)
2126 return false;
2127
2128 ir->dataSize = data_len;
2129 return true;
2130 }
2131
2132 VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
2133 VkDevice device,
2134 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2135 uint32_t* pInternalRepresentationCount,
2136 VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
2137 {
2138 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2139 VK_OUTARRAY_MAKE(out, pInternalRepresentations,
2140 pInternalRepresentationCount);
2141 bool incomplete_text = false;
2142
2143 assert(pExecutableInfo->executableIndex < pipeline->num_executables);
2144 const struct anv_pipeline_executable *exe =
2145 &pipeline->executables[pExecutableInfo->executableIndex];
2146
2147 if (exe->nir) {
2148 vk_outarray_append(&out, ir) {
2149 WRITE_STR(ir->name, "Final NIR");
2150 WRITE_STR(ir->description,
2151 "Final NIR before going into the back-end compiler");
2152
2153 if (!write_ir_text(ir, exe->nir))
2154 incomplete_text = true;
2155 }
2156 }
2157
2158 if (exe->disasm) {
2159 vk_outarray_append(&out, ir) {
2160 WRITE_STR(ir->name, "GEN Assembly");
2161 WRITE_STR(ir->description,
2162 "Final GEN assembly for the generated shader binary");
2163
2164 if (!write_ir_text(ir, exe->disasm))
2165 incomplete_text = true;
2166 }
2167 }
2168
2169 return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
2170 }