intel/fs: Allow multiple slots for position
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "common/gen_disasm.h"
34 #include "anv_private.h"
35 #include "compiler/brw_nir.h"
36 #include "anv_nir.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
39 #include "vk_util.h"
40
41 /* Needed for SWIZZLE macros */
42 #include "program/prog_instruction.h"
43
44 // Shader functions
45
46 VkResult anv_CreateShaderModule(
47 VkDevice _device,
48 const VkShaderModuleCreateInfo* pCreateInfo,
49 const VkAllocationCallbacks* pAllocator,
50 VkShaderModule* pShaderModule)
51 {
52 ANV_FROM_HANDLE(anv_device, device, _device);
53 struct anv_shader_module *module;
54
55 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
56 assert(pCreateInfo->flags == 0);
57
58 module = vk_alloc2(&device->alloc, pAllocator,
59 sizeof(*module) + pCreateInfo->codeSize, 8,
60 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
61 if (module == NULL)
62 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
63
64 module->size = pCreateInfo->codeSize;
65 memcpy(module->data, pCreateInfo->pCode, module->size);
66
67 _mesa_sha1_compute(module->data, module->size, module->sha1);
68
69 *pShaderModule = anv_shader_module_to_handle(module);
70
71 return VK_SUCCESS;
72 }
73
74 void anv_DestroyShaderModule(
75 VkDevice _device,
76 VkShaderModule _module,
77 const VkAllocationCallbacks* pAllocator)
78 {
79 ANV_FROM_HANDLE(anv_device, device, _device);
80 ANV_FROM_HANDLE(anv_shader_module, module, _module);
81
82 if (!module)
83 return;
84
85 vk_free2(&device->alloc, pAllocator, module);
86 }
87
88 #define SPIR_V_MAGIC_NUMBER 0x07230203
89
90 struct anv_spirv_debug_data {
91 struct anv_device *device;
92 const struct anv_shader_module *module;
93 };
94
95 static void anv_spirv_nir_debug(void *private_data,
96 enum nir_spirv_debug_level level,
97 size_t spirv_offset,
98 const char *message)
99 {
100 struct anv_spirv_debug_data *debug_data = private_data;
101 struct anv_instance *instance = debug_data->device->physical->instance;
102
103 static const VkDebugReportFlagsEXT vk_flags[] = {
104 [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
105 [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
106 [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
107 };
108 char buffer[256];
109
110 snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
111
112 vk_debug_report(&instance->debug_report_callbacks,
113 vk_flags[level],
114 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
115 (uint64_t) (uintptr_t) debug_data->module,
116 0, 0, "anv", buffer);
117 }
118
119 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
120 * we can't do that yet because we don't have the ability to copy nir.
121 */
122 static nir_shader *
123 anv_shader_compile_to_nir(struct anv_device *device,
124 void *mem_ctx,
125 const struct anv_shader_module *module,
126 const char *entrypoint_name,
127 gl_shader_stage stage,
128 const VkSpecializationInfo *spec_info)
129 {
130 const struct anv_physical_device *pdevice = device->physical;
131 const struct brw_compiler *compiler = pdevice->compiler;
132 const nir_shader_compiler_options *nir_options =
133 compiler->glsl_compiler_options[stage].NirOptions;
134
135 uint32_t *spirv = (uint32_t *) module->data;
136 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
137 assert(module->size % 4 == 0);
138
139 uint32_t num_spec_entries = 0;
140 struct nir_spirv_specialization *spec_entries = NULL;
141 if (spec_info && spec_info->mapEntryCount > 0) {
142 num_spec_entries = spec_info->mapEntryCount;
143 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
144 for (uint32_t i = 0; i < num_spec_entries; i++) {
145 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
146 const void *data = spec_info->pData + entry.offset;
147 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
148
149 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
150 if (spec_info->dataSize == 8)
151 spec_entries[i].data64 = *(const uint64_t *)data;
152 else
153 spec_entries[i].data32 = *(const uint32_t *)data;
154 }
155 }
156
157 struct anv_spirv_debug_data spirv_debug_data = {
158 .device = device,
159 .module = module,
160 };
161 struct spirv_to_nir_options spirv_options = {
162 .frag_coord_is_sysval = true,
163 .caps = {
164 .demote_to_helper_invocation = true,
165 .derivative_group = true,
166 .descriptor_array_dynamic_indexing = true,
167 .descriptor_array_non_uniform_indexing = true,
168 .descriptor_indexing = true,
169 .device_group = true,
170 .draw_parameters = true,
171 .float16 = pdevice->info.gen >= 8,
172 .float64 = pdevice->info.gen >= 8,
173 .fragment_shader_sample_interlock = pdevice->info.gen >= 9,
174 .fragment_shader_pixel_interlock = pdevice->info.gen >= 9,
175 .geometry_streams = true,
176 .image_write_without_format = true,
177 .int8 = pdevice->info.gen >= 8,
178 .int16 = pdevice->info.gen >= 8,
179 .int64 = pdevice->info.gen >= 8,
180 .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
181 .integer_functions2 = pdevice->info.gen >= 8,
182 .min_lod = true,
183 .multiview = true,
184 .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
185 .post_depth_coverage = pdevice->info.gen >= 9,
186 .runtime_descriptor_array = true,
187 .float_controls = pdevice->info.gen >= 8,
188 .shader_clock = true,
189 .shader_viewport_index_layer = true,
190 .stencil_export = pdevice->info.gen >= 9,
191 .storage_8bit = pdevice->info.gen >= 8,
192 .storage_16bit = pdevice->info.gen >= 8,
193 .subgroup_arithmetic = true,
194 .subgroup_basic = true,
195 .subgroup_ballot = true,
196 .subgroup_quad = true,
197 .subgroup_shuffle = true,
198 .subgroup_vote = true,
199 .tessellation = true,
200 .transform_feedback = pdevice->info.gen >= 8,
201 .variable_pointers = true,
202 .vk_memory_model = true,
203 .vk_memory_model_device_scope = true,
204 },
205 .ubo_addr_format = nir_address_format_32bit_index_offset,
206 .ssbo_addr_format =
207 anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
208 .phys_ssbo_addr_format = nir_address_format_64bit_global,
209 .push_const_addr_format = nir_address_format_logical,
210
211 /* TODO: Consider changing this to an address format that has the NULL
212 * pointer equals to 0. That might be a better format to play nice
213 * with certain code / code generators.
214 */
215 .shared_addr_format = nir_address_format_32bit_offset,
216 .debug = {
217 .func = anv_spirv_nir_debug,
218 .private_data = &spirv_debug_data,
219 },
220 };
221
222
223 nir_shader *nir =
224 spirv_to_nir(spirv, module->size / 4,
225 spec_entries, num_spec_entries,
226 stage, entrypoint_name, &spirv_options, nir_options);
227 assert(nir->info.stage == stage);
228 nir_validate_shader(nir, "after spirv_to_nir");
229 ralloc_steal(mem_ctx, nir);
230
231 free(spec_entries);
232
233 if (unlikely(INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage))) {
234 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
235 gl_shader_stage_name(stage));
236 nir_print_shader(nir, stderr);
237 }
238
239 /* We have to lower away local constant initializers right before we
240 * inline functions. That way they get properly initialized at the top
241 * of the function and not at the top of its caller.
242 */
243 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
244 NIR_PASS_V(nir, nir_lower_returns);
245 NIR_PASS_V(nir, nir_inline_functions);
246 NIR_PASS_V(nir, nir_opt_deref);
247
248 /* Pick off the single entrypoint that we want */
249 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
250 if (!func->is_entrypoint)
251 exec_node_remove(&func->node);
252 }
253 assert(exec_list_length(&nir->functions) == 1);
254
255 /* Now that we've deleted all but the main function, we can go ahead and
256 * lower the rest of the constant initializers. We do this here so that
257 * nir_remove_dead_variables and split_per_member_structs below see the
258 * corresponding stores.
259 */
260 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
261
262 /* Split member structs. We do this before lower_io_to_temporaries so that
263 * it doesn't lower system values to temporaries by accident.
264 */
265 NIR_PASS_V(nir, nir_split_var_copies);
266 NIR_PASS_V(nir, nir_split_per_member_structs);
267
268 NIR_PASS_V(nir, nir_remove_dead_variables,
269 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
270
271 NIR_PASS_V(nir, nir_propagate_invariant);
272 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
273 nir_shader_get_entrypoint(nir), true, false);
274
275 NIR_PASS_V(nir, nir_lower_frexp);
276
277 /* Vulkan uses the separate-shader linking model */
278 nir->info.separate_shader = true;
279
280 brw_preprocess_nir(compiler, nir, NULL);
281
282 return nir;
283 }
284
285 void anv_DestroyPipeline(
286 VkDevice _device,
287 VkPipeline _pipeline,
288 const VkAllocationCallbacks* pAllocator)
289 {
290 ANV_FROM_HANDLE(anv_device, device, _device);
291 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
292
293 if (!pipeline)
294 return;
295
296 anv_reloc_list_finish(&pipeline->batch_relocs,
297 pAllocator ? pAllocator : &device->alloc);
298
299 ralloc_free(pipeline->mem_ctx);
300
301 switch (pipeline->type) {
302 case ANV_PIPELINE_GRAPHICS: {
303 struct anv_graphics_pipeline *gfx_pipeline =
304 anv_pipeline_to_graphics(pipeline);
305
306 if (gfx_pipeline->blend_state.map)
307 anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->blend_state);
308
309 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
310 if (gfx_pipeline->shaders[s])
311 anv_shader_bin_unref(device, gfx_pipeline->shaders[s]);
312 }
313 break;
314 }
315
316 case ANV_PIPELINE_COMPUTE: {
317 struct anv_compute_pipeline *compute_pipeline =
318 anv_pipeline_to_compute(pipeline);
319
320 if (compute_pipeline->cs)
321 anv_shader_bin_unref(device, compute_pipeline->cs);
322
323 break;
324 }
325
326 default:
327 unreachable("invalid pipeline type");
328 }
329
330 vk_free2(&device->alloc, pAllocator, pipeline);
331 }
332
333 static const uint32_t vk_to_gen_primitive_type[] = {
334 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
335 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
336 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
337 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
338 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
339 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
340 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
341 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
342 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
343 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
344 };
345
346 static void
347 populate_sampler_prog_key(const struct gen_device_info *devinfo,
348 struct brw_sampler_prog_key_data *key)
349 {
350 /* Almost all multisampled textures are compressed. The only time when we
351 * don't compress a multisampled texture is for 16x MSAA with a surface
352 * width greater than 8k which is a bit of an edge case. Since the sampler
353 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
354 * to tell the compiler to always assume compression.
355 */
356 key->compressed_multisample_layout_mask = ~0;
357
358 /* SkyLake added support for 16x MSAA. With this came a new message for
359 * reading from a 16x MSAA surface with compression. The new message was
360 * needed because now the MCS data is 64 bits instead of 32 or lower as is
361 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
362 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
363 * so we can just use it unconditionally. This may not be quite as
364 * efficient but it saves us from recompiling.
365 */
366 if (devinfo->gen >= 9)
367 key->msaa_16 = ~0;
368
369 /* XXX: Handle texture swizzle on HSW- */
370 for (int i = 0; i < MAX_SAMPLERS; i++) {
371 /* Assume color sampler, no swizzling. (Works for BDW+) */
372 key->swizzles[i] = SWIZZLE_XYZW;
373 }
374 }
375
376 static void
377 populate_base_prog_key(const struct gen_device_info *devinfo,
378 VkPipelineShaderStageCreateFlags flags,
379 struct brw_base_prog_key *key)
380 {
381 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
382 key->subgroup_size_type = BRW_SUBGROUP_SIZE_VARYING;
383 else
384 key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
385
386 populate_sampler_prog_key(devinfo, &key->tex);
387 }
388
389 static void
390 populate_vs_prog_key(const struct gen_device_info *devinfo,
391 VkPipelineShaderStageCreateFlags flags,
392 struct brw_vs_prog_key *key)
393 {
394 memset(key, 0, sizeof(*key));
395
396 populate_base_prog_key(devinfo, flags, &key->base);
397
398 /* XXX: Handle vertex input work-arounds */
399
400 /* XXX: Handle sampler_prog_key */
401 }
402
403 static void
404 populate_tcs_prog_key(const struct gen_device_info *devinfo,
405 VkPipelineShaderStageCreateFlags flags,
406 unsigned input_vertices,
407 struct brw_tcs_prog_key *key)
408 {
409 memset(key, 0, sizeof(*key));
410
411 populate_base_prog_key(devinfo, flags, &key->base);
412
413 key->input_vertices = input_vertices;
414 }
415
416 static void
417 populate_tes_prog_key(const struct gen_device_info *devinfo,
418 VkPipelineShaderStageCreateFlags flags,
419 struct brw_tes_prog_key *key)
420 {
421 memset(key, 0, sizeof(*key));
422
423 populate_base_prog_key(devinfo, flags, &key->base);
424 }
425
426 static void
427 populate_gs_prog_key(const struct gen_device_info *devinfo,
428 VkPipelineShaderStageCreateFlags flags,
429 struct brw_gs_prog_key *key)
430 {
431 memset(key, 0, sizeof(*key));
432
433 populate_base_prog_key(devinfo, flags, &key->base);
434 }
435
436 static void
437 populate_wm_prog_key(const struct gen_device_info *devinfo,
438 VkPipelineShaderStageCreateFlags flags,
439 const struct anv_subpass *subpass,
440 const VkPipelineMultisampleStateCreateInfo *ms_info,
441 struct brw_wm_prog_key *key)
442 {
443 memset(key, 0, sizeof(*key));
444
445 populate_base_prog_key(devinfo, flags, &key->base);
446
447 /* We set this to 0 here and set to the actual value before we call
448 * brw_compile_fs.
449 */
450 key->input_slots_valid = 0;
451
452 /* Vulkan doesn't specify a default */
453 key->high_quality_derivatives = false;
454
455 /* XXX Vulkan doesn't appear to specify */
456 key->clamp_fragment_color = false;
457
458 assert(subpass->color_count <= MAX_RTS);
459 for (uint32_t i = 0; i < subpass->color_count; i++) {
460 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
461 key->color_outputs_valid |= (1 << i);
462 }
463
464 key->nr_color_regions = subpass->color_count;
465
466 /* To reduce possible shader recompilations we would need to know if
467 * there is a SampleMask output variable to compute if we should emit
468 * code to workaround the issue that hardware disables alpha to coverage
469 * when there is SampleMask output.
470 */
471 key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
472
473 /* Vulkan doesn't support fixed-function alpha test */
474 key->alpha_test_replicate_alpha = false;
475
476 if (ms_info) {
477 /* We should probably pull this out of the shader, but it's fairly
478 * harmless to compute it and then let dead-code take care of it.
479 */
480 if (ms_info->rasterizationSamples > 1) {
481 key->persample_interp = ms_info->sampleShadingEnable &&
482 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
483 key->multisample_fbo = true;
484 }
485
486 key->frag_coord_adds_sample_pos = key->persample_interp;
487 }
488 }
489
490 static void
491 populate_cs_prog_key(const struct gen_device_info *devinfo,
492 VkPipelineShaderStageCreateFlags flags,
493 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info,
494 struct brw_cs_prog_key *key)
495 {
496 memset(key, 0, sizeof(*key));
497
498 populate_base_prog_key(devinfo, flags, &key->base);
499
500 if (rss_info) {
501 assert(key->base.subgroup_size_type != BRW_SUBGROUP_SIZE_VARYING);
502
503 /* These enum values are expressly chosen to be equal to the subgroup
504 * size that they require.
505 */
506 assert(rss_info->requiredSubgroupSize == 8 ||
507 rss_info->requiredSubgroupSize == 16 ||
508 rss_info->requiredSubgroupSize == 32);
509 key->base.subgroup_size_type = rss_info->requiredSubgroupSize;
510 } else if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
511 /* If the client expressly requests full subgroups and they don't
512 * specify a subgroup size, we need to pick one. If they're requested
513 * varying subgroup sizes, we set it to UNIFORM and let the back-end
514 * compiler pick. Otherwise, we specify the API value of 32.
515 * Performance will likely be terrible in this case but there's nothing
516 * we can do about that. The client should have chosen a size.
517 */
518 if (flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
519 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
520 else
521 key->base.subgroup_size_type = BRW_SUBGROUP_SIZE_REQUIRE_32;
522 }
523 }
524
525 struct anv_pipeline_stage {
526 gl_shader_stage stage;
527
528 const struct anv_shader_module *module;
529 const char *entrypoint;
530 const VkSpecializationInfo *spec_info;
531
532 unsigned char shader_sha1[20];
533
534 union brw_any_prog_key key;
535
536 struct {
537 gl_shader_stage stage;
538 unsigned char sha1[20];
539 } cache_key;
540
541 nir_shader *nir;
542
543 struct anv_pipeline_binding surface_to_descriptor[256];
544 struct anv_pipeline_binding sampler_to_descriptor[256];
545 struct anv_pipeline_bind_map bind_map;
546
547 union brw_any_prog_data prog_data;
548
549 uint32_t num_stats;
550 struct brw_compile_stats stats[3];
551 char *disasm[3];
552
553 VkPipelineCreationFeedbackEXT feedback;
554
555 const unsigned *code;
556 };
557
558 static void
559 anv_pipeline_hash_shader(const struct anv_shader_module *module,
560 const char *entrypoint,
561 gl_shader_stage stage,
562 const VkSpecializationInfo *spec_info,
563 unsigned char *sha1_out)
564 {
565 struct mesa_sha1 ctx;
566 _mesa_sha1_init(&ctx);
567
568 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
569 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
570 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
571 if (spec_info) {
572 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
573 spec_info->mapEntryCount *
574 sizeof(*spec_info->pMapEntries));
575 _mesa_sha1_update(&ctx, spec_info->pData,
576 spec_info->dataSize);
577 }
578
579 _mesa_sha1_final(&ctx, sha1_out);
580 }
581
582 static void
583 anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
584 struct anv_pipeline_layout *layout,
585 struct anv_pipeline_stage *stages,
586 unsigned char *sha1_out)
587 {
588 struct mesa_sha1 ctx;
589 _mesa_sha1_init(&ctx);
590
591 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
592 sizeof(pipeline->subpass->view_mask));
593
594 if (layout)
595 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
596
597 const bool rba = pipeline->base.device->robust_buffer_access;
598 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
599
600 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
601 if (stages[s].entrypoint) {
602 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
603 sizeof(stages[s].shader_sha1));
604 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
605 }
606 }
607
608 _mesa_sha1_final(&ctx, sha1_out);
609 }
610
611 static void
612 anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
613 struct anv_pipeline_layout *layout,
614 struct anv_pipeline_stage *stage,
615 unsigned char *sha1_out)
616 {
617 struct mesa_sha1 ctx;
618 _mesa_sha1_init(&ctx);
619
620 if (layout)
621 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
622
623 const bool rba = pipeline->base.device->robust_buffer_access;
624 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
625
626 _mesa_sha1_update(&ctx, stage->shader_sha1,
627 sizeof(stage->shader_sha1));
628 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
629
630 _mesa_sha1_final(&ctx, sha1_out);
631 }
632
633 static nir_shader *
634 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
635 struct anv_pipeline_cache *cache,
636 void *mem_ctx,
637 struct anv_pipeline_stage *stage)
638 {
639 const struct brw_compiler *compiler =
640 pipeline->device->physical->compiler;
641 const nir_shader_compiler_options *nir_options =
642 compiler->glsl_compiler_options[stage->stage].NirOptions;
643 nir_shader *nir;
644
645 nir = anv_device_search_for_nir(pipeline->device, cache,
646 nir_options,
647 stage->shader_sha1,
648 mem_ctx);
649 if (nir) {
650 assert(nir->info.stage == stage->stage);
651 return nir;
652 }
653
654 nir = anv_shader_compile_to_nir(pipeline->device,
655 mem_ctx,
656 stage->module,
657 stage->entrypoint,
658 stage->stage,
659 stage->spec_info);
660 if (nir) {
661 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
662 return nir;
663 }
664
665 return NULL;
666 }
667
668 static void
669 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
670 void *mem_ctx,
671 struct anv_pipeline_stage *stage,
672 struct anv_pipeline_layout *layout)
673 {
674 const struct anv_physical_device *pdevice = pipeline->device->physical;
675 const struct brw_compiler *compiler = pdevice->compiler;
676
677 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
678 nir_shader *nir = stage->nir;
679
680 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
681 NIR_PASS_V(nir, nir_lower_wpos_center,
682 anv_pipeline_to_graphics(pipeline)->sample_shading_enable);
683 NIR_PASS_V(nir, nir_lower_input_attachments, true);
684 }
685
686 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
687
688 if (pipeline->type == ANV_PIPELINE_GRAPHICS) {
689 NIR_PASS_V(nir, anv_nir_lower_multiview,
690 anv_pipeline_to_graphics(pipeline)->subpass->view_mask);
691 }
692
693 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
694
695 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo, NULL);
696
697 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
698 nir_address_format_64bit_global);
699
700 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
701 anv_nir_apply_pipeline_layout(pdevice,
702 pipeline->device->robust_buffer_access,
703 layout, nir, &stage->bind_map);
704
705 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
706 nir_address_format_32bit_index_offset);
707 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
708 anv_nir_ssbo_addr_format(pdevice,
709 pipeline->device->robust_buffer_access));
710
711 NIR_PASS_V(nir, nir_opt_constant_folding);
712
713 /* We don't support non-uniform UBOs and non-uniform SSBO access is
714 * handled naturally by falling back to A64 messages.
715 */
716 NIR_PASS_V(nir, nir_lower_non_uniform_access,
717 nir_lower_non_uniform_texture_access |
718 nir_lower_non_uniform_image_access);
719
720 anv_nir_compute_push_layout(pdevice, pipeline->device->robust_buffer_access,
721 nir, prog_data, &stage->bind_map, mem_ctx);
722
723 stage->nir = nir;
724 }
725
726 static void
727 anv_pipeline_link_vs(const struct brw_compiler *compiler,
728 struct anv_pipeline_stage *vs_stage,
729 struct anv_pipeline_stage *next_stage)
730 {
731 if (next_stage)
732 brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
733 }
734
735 static void
736 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
737 void *mem_ctx,
738 struct anv_device *device,
739 struct anv_pipeline_stage *vs_stage)
740 {
741 brw_compute_vue_map(compiler->devinfo,
742 &vs_stage->prog_data.vs.base.vue_map,
743 vs_stage->nir->info.outputs_written,
744 vs_stage->nir->info.separate_shader, 1);
745
746 vs_stage->num_stats = 1;
747 vs_stage->code = brw_compile_vs(compiler, device, mem_ctx,
748 &vs_stage->key.vs,
749 &vs_stage->prog_data.vs,
750 vs_stage->nir, -1,
751 vs_stage->stats, NULL);
752 }
753
754 static void
755 merge_tess_info(struct shader_info *tes_info,
756 const struct shader_info *tcs_info)
757 {
758 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
759 *
760 * "PointMode. Controls generation of points rather than triangles
761 * or lines. This functionality defaults to disabled, and is
762 * enabled if either shader stage includes the execution mode.
763 *
764 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
765 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
766 * and OutputVertices, it says:
767 *
768 * "One mode must be set in at least one of the tessellation
769 * shader stages."
770 *
771 * So, the fields can be set in either the TCS or TES, but they must
772 * agree if set in both. Our backend looks at TES, so bitwise-or in
773 * the values from the TCS.
774 */
775 assert(tcs_info->tess.tcs_vertices_out == 0 ||
776 tes_info->tess.tcs_vertices_out == 0 ||
777 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
778 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
779
780 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
781 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
782 tcs_info->tess.spacing == tes_info->tess.spacing);
783 tes_info->tess.spacing |= tcs_info->tess.spacing;
784
785 assert(tcs_info->tess.primitive_mode == 0 ||
786 tes_info->tess.primitive_mode == 0 ||
787 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
788 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
789 tes_info->tess.ccw |= tcs_info->tess.ccw;
790 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
791 }
792
793 static void
794 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
795 struct anv_pipeline_stage *tcs_stage,
796 struct anv_pipeline_stage *tes_stage)
797 {
798 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
799
800 brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
801
802 nir_lower_patch_vertices(tes_stage->nir,
803 tcs_stage->nir->info.tess.tcs_vertices_out,
804 NULL);
805
806 /* Copy TCS info into the TES info */
807 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
808
809 /* Whacking the key after cache lookup is a bit sketchy, but all of
810 * this comes from the SPIR-V, which is part of the hash used for the
811 * pipeline cache. So it should be safe.
812 */
813 tcs_stage->key.tcs.tes_primitive_mode =
814 tes_stage->nir->info.tess.primitive_mode;
815 tcs_stage->key.tcs.quads_workaround =
816 compiler->devinfo->gen < 9 &&
817 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
818 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
819 }
820
821 static void
822 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
823 void *mem_ctx,
824 struct anv_device *device,
825 struct anv_pipeline_stage *tcs_stage,
826 struct anv_pipeline_stage *prev_stage)
827 {
828 tcs_stage->key.tcs.outputs_written =
829 tcs_stage->nir->info.outputs_written;
830 tcs_stage->key.tcs.patch_outputs_written =
831 tcs_stage->nir->info.patch_outputs_written;
832
833 tcs_stage->num_stats = 1;
834 tcs_stage->code = brw_compile_tcs(compiler, device, mem_ctx,
835 &tcs_stage->key.tcs,
836 &tcs_stage->prog_data.tcs,
837 tcs_stage->nir, -1,
838 tcs_stage->stats, NULL);
839 }
840
841 static void
842 anv_pipeline_link_tes(const struct brw_compiler *compiler,
843 struct anv_pipeline_stage *tes_stage,
844 struct anv_pipeline_stage *next_stage)
845 {
846 if (next_stage)
847 brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
848 }
849
850 static void
851 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
852 void *mem_ctx,
853 struct anv_device *device,
854 struct anv_pipeline_stage *tes_stage,
855 struct anv_pipeline_stage *tcs_stage)
856 {
857 tes_stage->key.tes.inputs_read =
858 tcs_stage->nir->info.outputs_written;
859 tes_stage->key.tes.patch_inputs_read =
860 tcs_stage->nir->info.patch_outputs_written;
861
862 tes_stage->num_stats = 1;
863 tes_stage->code = brw_compile_tes(compiler, device, mem_ctx,
864 &tes_stage->key.tes,
865 &tcs_stage->prog_data.tcs.base.vue_map,
866 &tes_stage->prog_data.tes,
867 tes_stage->nir, -1,
868 tes_stage->stats, NULL);
869 }
870
871 static void
872 anv_pipeline_link_gs(const struct brw_compiler *compiler,
873 struct anv_pipeline_stage *gs_stage,
874 struct anv_pipeline_stage *next_stage)
875 {
876 if (next_stage)
877 brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
878 }
879
880 static void
881 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
882 void *mem_ctx,
883 struct anv_device *device,
884 struct anv_pipeline_stage *gs_stage,
885 struct anv_pipeline_stage *prev_stage)
886 {
887 brw_compute_vue_map(compiler->devinfo,
888 &gs_stage->prog_data.gs.base.vue_map,
889 gs_stage->nir->info.outputs_written,
890 gs_stage->nir->info.separate_shader, 1);
891
892 gs_stage->num_stats = 1;
893 gs_stage->code = brw_compile_gs(compiler, device, mem_ctx,
894 &gs_stage->key.gs,
895 &gs_stage->prog_data.gs,
896 gs_stage->nir, NULL, -1,
897 gs_stage->stats, NULL);
898 }
899
900 static void
901 anv_pipeline_link_fs(const struct brw_compiler *compiler,
902 struct anv_pipeline_stage *stage)
903 {
904 unsigned num_rt_bindings;
905 struct anv_pipeline_binding rt_bindings[MAX_RTS];
906 if (stage->key.wm.nr_color_regions > 0) {
907 assert(stage->key.wm.nr_color_regions <= MAX_RTS);
908 for (unsigned rt = 0; rt < stage->key.wm.nr_color_regions; rt++) {
909 if (stage->key.wm.color_outputs_valid & BITFIELD_BIT(rt)) {
910 rt_bindings[rt] = (struct anv_pipeline_binding) {
911 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
912 .index = rt,
913 };
914 } else {
915 /* Setup a null render target */
916 rt_bindings[rt] = (struct anv_pipeline_binding) {
917 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
918 .index = UINT32_MAX,
919 };
920 }
921 }
922 num_rt_bindings = stage->key.wm.nr_color_regions;
923 } else {
924 /* Setup a null render target */
925 rt_bindings[0] = (struct anv_pipeline_binding) {
926 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
927 .index = UINT32_MAX,
928 };
929 num_rt_bindings = 1;
930 }
931
932 assert(num_rt_bindings <= MAX_RTS);
933 assert(stage->bind_map.surface_count == 0);
934 typed_memcpy(stage->bind_map.surface_to_descriptor,
935 rt_bindings, num_rt_bindings);
936 stage->bind_map.surface_count += num_rt_bindings;
937
938 /* Now that we've set up the color attachments, we can go through and
939 * eliminate any shader outputs that map to VK_ATTACHMENT_UNUSED in the
940 * hopes that dead code can clean them up in this and any earlier shader
941 * stages.
942 */
943 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
944 bool deleted_output = false;
945 nir_foreach_variable_safe(var, &stage->nir->outputs) {
946 /* TODO: We don't delete depth/stencil writes. We probably could if the
947 * subpass doesn't have a depth/stencil attachment.
948 */
949 if (var->data.location < FRAG_RESULT_DATA0)
950 continue;
951
952 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
953
954 /* If this is the RT at location 0 and we have alpha to coverage
955 * enabled we still need that write because it will affect the coverage
956 * mask even if it's never written to a color target.
957 */
958 if (rt == 0 && stage->key.wm.alpha_to_coverage)
959 continue;
960
961 const unsigned array_len =
962 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
963 assert(rt + array_len <= MAX_RTS);
964
965 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid &
966 BITFIELD_RANGE(rt, array_len))) {
967 deleted_output = true;
968 var->data.mode = nir_var_function_temp;
969 exec_node_remove(&var->node);
970 exec_list_push_tail(&impl->locals, &var->node);
971 }
972 }
973
974 if (deleted_output)
975 nir_fixup_deref_modes(stage->nir);
976
977 /* We stored the number of subpass color attachments in nr_color_regions
978 * when calculating the key for caching. Now that we've computed the bind
979 * map, we can reduce this to the actual max before we go into the back-end
980 * compiler.
981 */
982 stage->key.wm.nr_color_regions =
983 util_last_bit(stage->key.wm.color_outputs_valid);
984 }
985
986 static void
987 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
988 void *mem_ctx,
989 struct anv_device *device,
990 struct anv_pipeline_stage *fs_stage,
991 struct anv_pipeline_stage *prev_stage)
992 {
993 /* TODO: we could set this to 0 based on the information in nir_shader, but
994 * we need this before we call spirv_to_nir.
995 */
996 assert(prev_stage);
997 fs_stage->key.wm.input_slots_valid =
998 prev_stage->prog_data.vue.vue_map.slots_valid;
999
1000 fs_stage->code = brw_compile_fs(compiler, device, mem_ctx,
1001 &fs_stage->key.wm,
1002 &fs_stage->prog_data.wm,
1003 fs_stage->nir, -1, -1, -1,
1004 true, false, NULL,
1005 fs_stage->stats, NULL);
1006
1007 fs_stage->num_stats = (uint32_t)fs_stage->prog_data.wm.dispatch_8 +
1008 (uint32_t)fs_stage->prog_data.wm.dispatch_16 +
1009 (uint32_t)fs_stage->prog_data.wm.dispatch_32;
1010
1011 if (fs_stage->key.wm.color_outputs_valid == 0 &&
1012 !fs_stage->prog_data.wm.has_side_effects &&
1013 !fs_stage->prog_data.wm.uses_omask &&
1014 !fs_stage->key.wm.alpha_to_coverage &&
1015 !fs_stage->prog_data.wm.uses_kill &&
1016 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
1017 !fs_stage->prog_data.wm.computed_stencil) {
1018 /* This fragment shader has no outputs and no side effects. Go ahead
1019 * and return the code pointer so we don't accidentally think the
1020 * compile failed but zero out prog_data which will set program_size to
1021 * zero and disable the stage.
1022 */
1023 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
1024 }
1025 }
1026
1027 static void
1028 anv_pipeline_add_executable(struct anv_pipeline *pipeline,
1029 struct anv_pipeline_stage *stage,
1030 struct brw_compile_stats *stats,
1031 uint32_t code_offset)
1032 {
1033 char *nir = NULL;
1034 if (stage->nir &&
1035 (pipeline->flags &
1036 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1037 char *stream_data = NULL;
1038 size_t stream_size = 0;
1039 FILE *stream = open_memstream(&stream_data, &stream_size);
1040
1041 nir_print_shader(stage->nir, stream);
1042
1043 fclose(stream);
1044
1045 /* Copy it to a ralloc'd thing */
1046 nir = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1047 memcpy(nir, stream_data, stream_size);
1048 nir[stream_size] = 0;
1049
1050 free(stream_data);
1051 }
1052
1053 char *disasm = NULL;
1054 if (stage->code &&
1055 (pipeline->flags &
1056 VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
1057 char *stream_data = NULL;
1058 size_t stream_size = 0;
1059 FILE *stream = open_memstream(&stream_data, &stream_size);
1060
1061 uint32_t push_size = 0;
1062 for (unsigned i = 0; i < 4; i++)
1063 push_size += stage->bind_map.push_ranges[i].length;
1064 if (push_size > 0) {
1065 fprintf(stream, "Push constant ranges:\n");
1066 for (unsigned i = 0; i < 4; i++) {
1067 if (stage->bind_map.push_ranges[i].length == 0)
1068 continue;
1069
1070 fprintf(stream, " RANGE%d (%dB): ", i,
1071 stage->bind_map.push_ranges[i].length * 32);
1072
1073 switch (stage->bind_map.push_ranges[i].set) {
1074 case ANV_DESCRIPTOR_SET_NULL:
1075 fprintf(stream, "NULL");
1076 break;
1077
1078 case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
1079 fprintf(stream, "Vulkan push constants and API params");
1080 break;
1081
1082 case ANV_DESCRIPTOR_SET_DESCRIPTORS:
1083 fprintf(stream, "Descriptor buffer for set %d (start=%dB)",
1084 stage->bind_map.push_ranges[i].index,
1085 stage->bind_map.push_ranges[i].start * 32);
1086 break;
1087
1088 case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS:
1089 unreachable("gl_NumWorkgroups is never pushed");
1090
1091 case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
1092 fprintf(stream, "Inline shader constant data (start=%dB)",
1093 stage->bind_map.push_ranges[i].start * 32);
1094 break;
1095
1096 case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
1097 unreachable("Color attachments can't be pushed");
1098
1099 default:
1100 fprintf(stream, "UBO (set=%d binding=%d start=%dB)",
1101 stage->bind_map.push_ranges[i].set,
1102 stage->bind_map.push_ranges[i].index,
1103 stage->bind_map.push_ranges[i].start * 32);
1104 break;
1105 }
1106 fprintf(stream, "\n");
1107 }
1108 fprintf(stream, "\n");
1109 }
1110
1111 /* Creating this is far cheaper than it looks. It's perfectly fine to
1112 * do it for every binary.
1113 */
1114 struct gen_disasm *d = gen_disasm_create(&pipeline->device->info);
1115 gen_disasm_disassemble(d, stage->code, code_offset, stream);
1116 gen_disasm_destroy(d);
1117
1118 fclose(stream);
1119
1120 /* Copy it to a ralloc'd thing */
1121 disasm = ralloc_size(pipeline->mem_ctx, stream_size + 1);
1122 memcpy(disasm, stream_data, stream_size);
1123 disasm[stream_size] = 0;
1124
1125 free(stream_data);
1126 }
1127
1128 const struct anv_pipeline_executable exe = {
1129 .stage = stage->stage,
1130 .stats = *stats,
1131 .nir = nir,
1132 .disasm = disasm,
1133 };
1134 util_dynarray_append(&pipeline->executables,
1135 struct anv_pipeline_executable, exe);
1136 }
1137
1138 static void
1139 anv_pipeline_add_executables(struct anv_pipeline *pipeline,
1140 struct anv_pipeline_stage *stage,
1141 struct anv_shader_bin *bin)
1142 {
1143 if (stage->stage == MESA_SHADER_FRAGMENT) {
1144 /* We pull the prog data and stats out of the anv_shader_bin because
1145 * the anv_pipeline_stage may not be fully populated if we successfully
1146 * looked up the shader in a cache.
1147 */
1148 const struct brw_wm_prog_data *wm_prog_data =
1149 (const struct brw_wm_prog_data *)bin->prog_data;
1150 struct brw_compile_stats *stats = bin->stats;
1151
1152 if (wm_prog_data->dispatch_8) {
1153 anv_pipeline_add_executable(pipeline, stage, stats++, 0);
1154 }
1155
1156 if (wm_prog_data->dispatch_16) {
1157 anv_pipeline_add_executable(pipeline, stage, stats++,
1158 wm_prog_data->prog_offset_16);
1159 }
1160
1161 if (wm_prog_data->dispatch_32) {
1162 anv_pipeline_add_executable(pipeline, stage, stats++,
1163 wm_prog_data->prog_offset_32);
1164 }
1165 } else {
1166 anv_pipeline_add_executable(pipeline, stage, bin->stats, 0);
1167 }
1168 }
1169
1170 static VkResult
1171 anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
1172 struct anv_pipeline_cache *cache,
1173 const VkGraphicsPipelineCreateInfo *info)
1174 {
1175 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1176 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1177 };
1178 int64_t pipeline_start = os_time_get_nano();
1179
1180 const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1181 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
1182
1183 pipeline->active_stages = 0;
1184
1185 VkResult result;
1186 for (uint32_t i = 0; i < info->stageCount; i++) {
1187 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
1188 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
1189
1190 pipeline->active_stages |= sinfo->stage;
1191
1192 int64_t stage_start = os_time_get_nano();
1193
1194 stages[stage].stage = stage;
1195 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
1196 stages[stage].entrypoint = sinfo->pName;
1197 stages[stage].spec_info = sinfo->pSpecializationInfo;
1198 anv_pipeline_hash_shader(stages[stage].module,
1199 stages[stage].entrypoint,
1200 stage,
1201 stages[stage].spec_info,
1202 stages[stage].shader_sha1);
1203
1204 const struct gen_device_info *devinfo = &pipeline->base.device->info;
1205 switch (stage) {
1206 case MESA_SHADER_VERTEX:
1207 populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
1208 break;
1209 case MESA_SHADER_TESS_CTRL:
1210 populate_tcs_prog_key(devinfo, sinfo->flags,
1211 info->pTessellationState->patchControlPoints,
1212 &stages[stage].key.tcs);
1213 break;
1214 case MESA_SHADER_TESS_EVAL:
1215 populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
1216 break;
1217 case MESA_SHADER_GEOMETRY:
1218 populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
1219 break;
1220 case MESA_SHADER_FRAGMENT: {
1221 const bool raster_enabled =
1222 !info->pRasterizationState->rasterizerDiscardEnable;
1223 populate_wm_prog_key(devinfo, sinfo->flags,
1224 pipeline->subpass,
1225 raster_enabled ? info->pMultisampleState : NULL,
1226 &stages[stage].key.wm);
1227 break;
1228 }
1229 default:
1230 unreachable("Invalid graphics shader stage");
1231 }
1232
1233 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1234 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1235 }
1236
1237 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1238 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1239
1240 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1241
1242 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1243
1244 unsigned char sha1[20];
1245 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1246
1247 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1248 if (!stages[s].entrypoint)
1249 continue;
1250
1251 stages[s].cache_key.stage = s;
1252 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1253 }
1254
1255 const bool skip_cache_lookup =
1256 (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1257
1258 if (!skip_cache_lookup) {
1259 unsigned found = 0;
1260 unsigned cache_hits = 0;
1261 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1262 if (!stages[s].entrypoint)
1263 continue;
1264
1265 int64_t stage_start = os_time_get_nano();
1266
1267 bool cache_hit;
1268 struct anv_shader_bin *bin =
1269 anv_device_search_for_kernel(pipeline->base.device, cache,
1270 &stages[s].cache_key,
1271 sizeof(stages[s].cache_key), &cache_hit);
1272 if (bin) {
1273 found++;
1274 pipeline->shaders[s] = bin;
1275 }
1276
1277 if (cache_hit) {
1278 cache_hits++;
1279 stages[s].feedback.flags |=
1280 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1281 }
1282 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1283 }
1284
1285 if (found == __builtin_popcount(pipeline->active_stages)) {
1286 if (cache_hits == found) {
1287 pipeline_feedback.flags |=
1288 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1289 }
1290 /* We found all our shaders in the cache. We're done. */
1291 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1292 if (!stages[s].entrypoint)
1293 continue;
1294
1295 anv_pipeline_add_executables(&pipeline->base, &stages[s],
1296 pipeline->shaders[s]);
1297 }
1298 goto done;
1299 } else if (found > 0) {
1300 /* We found some but not all of our shaders. This shouldn't happen
1301 * most of the time but it can if we have a partially populated
1302 * pipeline cache.
1303 */
1304 assert(found < __builtin_popcount(pipeline->active_stages));
1305
1306 vk_debug_report(&pipeline->base.device->physical->instance->debug_report_callbacks,
1307 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1308 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1309 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1310 (uint64_t)(uintptr_t)cache,
1311 0, 0, "anv",
1312 "Found a partial pipeline in the cache. This is "
1313 "most likely caused by an incomplete pipeline cache "
1314 "import or export");
1315
1316 /* We're going to have to recompile anyway, so just throw away our
1317 * references to the shaders in the cache. We'll get them out of the
1318 * cache again as part of the compilation process.
1319 */
1320 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1321 stages[s].feedback.flags = 0;
1322 if (pipeline->shaders[s]) {
1323 anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1324 pipeline->shaders[s] = NULL;
1325 }
1326 }
1327 }
1328 }
1329
1330 void *pipeline_ctx = ralloc_context(NULL);
1331
1332 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1333 if (!stages[s].entrypoint)
1334 continue;
1335
1336 int64_t stage_start = os_time_get_nano();
1337
1338 assert(stages[s].stage == s);
1339 assert(pipeline->shaders[s] == NULL);
1340
1341 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1342 .surface_to_descriptor = stages[s].surface_to_descriptor,
1343 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1344 };
1345
1346 stages[s].nir = anv_pipeline_stage_get_nir(&pipeline->base, cache,
1347 pipeline_ctx,
1348 &stages[s]);
1349 if (stages[s].nir == NULL) {
1350 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1351 goto fail;
1352 }
1353
1354 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1355 }
1356
1357 /* Walk backwards to link */
1358 struct anv_pipeline_stage *next_stage = NULL;
1359 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1360 if (!stages[s].entrypoint)
1361 continue;
1362
1363 switch (s) {
1364 case MESA_SHADER_VERTEX:
1365 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1366 break;
1367 case MESA_SHADER_TESS_CTRL:
1368 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1369 break;
1370 case MESA_SHADER_TESS_EVAL:
1371 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1372 break;
1373 case MESA_SHADER_GEOMETRY:
1374 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1375 break;
1376 case MESA_SHADER_FRAGMENT:
1377 anv_pipeline_link_fs(compiler, &stages[s]);
1378 break;
1379 default:
1380 unreachable("Invalid graphics shader stage");
1381 }
1382
1383 next_stage = &stages[s];
1384 }
1385
1386 struct anv_pipeline_stage *prev_stage = NULL;
1387 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1388 if (!stages[s].entrypoint)
1389 continue;
1390
1391 int64_t stage_start = os_time_get_nano();
1392
1393 void *stage_ctx = ralloc_context(NULL);
1394
1395 nir_xfb_info *xfb_info = NULL;
1396 if (s == MESA_SHADER_VERTEX ||
1397 s == MESA_SHADER_TESS_EVAL ||
1398 s == MESA_SHADER_GEOMETRY)
1399 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1400
1401 anv_pipeline_lower_nir(&pipeline->base, stage_ctx, &stages[s], layout);
1402
1403 switch (s) {
1404 case MESA_SHADER_VERTEX:
1405 anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->base.device,
1406 &stages[s]);
1407 break;
1408 case MESA_SHADER_TESS_CTRL:
1409 anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->base.device,
1410 &stages[s], prev_stage);
1411 break;
1412 case MESA_SHADER_TESS_EVAL:
1413 anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->base.device,
1414 &stages[s], prev_stage);
1415 break;
1416 case MESA_SHADER_GEOMETRY:
1417 anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->base.device,
1418 &stages[s], prev_stage);
1419 break;
1420 case MESA_SHADER_FRAGMENT:
1421 anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->base.device,
1422 &stages[s], prev_stage);
1423 break;
1424 default:
1425 unreachable("Invalid graphics shader stage");
1426 }
1427 if (stages[s].code == NULL) {
1428 ralloc_free(stage_ctx);
1429 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1430 goto fail;
1431 }
1432
1433 anv_nir_validate_push_layout(&stages[s].prog_data.base,
1434 &stages[s].bind_map);
1435
1436 struct anv_shader_bin *bin =
1437 anv_device_upload_kernel(pipeline->base.device, cache, s,
1438 &stages[s].cache_key,
1439 sizeof(stages[s].cache_key),
1440 stages[s].code,
1441 stages[s].prog_data.base.program_size,
1442 stages[s].nir->constant_data,
1443 stages[s].nir->constant_data_size,
1444 &stages[s].prog_data.base,
1445 brw_prog_data_size(s),
1446 stages[s].stats, stages[s].num_stats,
1447 xfb_info, &stages[s].bind_map);
1448 if (!bin) {
1449 ralloc_free(stage_ctx);
1450 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1451 goto fail;
1452 }
1453
1454 anv_pipeline_add_executables(&pipeline->base, &stages[s], bin);
1455
1456 pipeline->shaders[s] = bin;
1457 ralloc_free(stage_ctx);
1458
1459 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1460
1461 prev_stage = &stages[s];
1462 }
1463
1464 ralloc_free(pipeline_ctx);
1465
1466 done:
1467
1468 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1469 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1470 /* This can happen if we decided to implicitly disable the fragment
1471 * shader. See anv_pipeline_compile_fs().
1472 */
1473 anv_shader_bin_unref(pipeline->base.device,
1474 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1475 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1476 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1477 }
1478
1479 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1480
1481 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1482 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1483 if (create_feedback) {
1484 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1485
1486 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1487 for (uint32_t i = 0; i < info->stageCount; i++) {
1488 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1489 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1490 }
1491 }
1492
1493 return VK_SUCCESS;
1494
1495 fail:
1496 ralloc_free(pipeline_ctx);
1497
1498 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1499 if (pipeline->shaders[s])
1500 anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
1501 }
1502
1503 return result;
1504 }
1505
1506 static void
1507 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
1508 {
1509 assert(glsl_type_is_vector_or_scalar(type));
1510
1511 uint32_t comp_size = glsl_type_is_boolean(type)
1512 ? 4 : glsl_get_bit_size(type) / 8;
1513 unsigned length = glsl_get_vector_elements(type);
1514 *size = comp_size * length,
1515 *align = comp_size * (length == 3 ? 4 : length);
1516 }
1517
1518 VkResult
1519 anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
1520 struct anv_pipeline_cache *cache,
1521 const VkComputePipelineCreateInfo *info,
1522 const struct anv_shader_module *module,
1523 const char *entrypoint,
1524 const VkSpecializationInfo *spec_info)
1525 {
1526 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1527 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1528 };
1529 int64_t pipeline_start = os_time_get_nano();
1530
1531 const struct brw_compiler *compiler = pipeline->base.device->physical->compiler;
1532
1533 struct anv_pipeline_stage stage = {
1534 .stage = MESA_SHADER_COMPUTE,
1535 .module = module,
1536 .entrypoint = entrypoint,
1537 .spec_info = spec_info,
1538 .cache_key = {
1539 .stage = MESA_SHADER_COMPUTE,
1540 },
1541 .feedback = {
1542 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1543 },
1544 };
1545 anv_pipeline_hash_shader(stage.module,
1546 stage.entrypoint,
1547 MESA_SHADER_COMPUTE,
1548 stage.spec_info,
1549 stage.shader_sha1);
1550
1551 struct anv_shader_bin *bin = NULL;
1552
1553 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *rss_info =
1554 vk_find_struct_const(info->stage.pNext,
1555 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
1556
1557 populate_cs_prog_key(&pipeline->base.device->info, info->stage.flags,
1558 rss_info, &stage.key.cs);
1559
1560 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1561
1562 const bool skip_cache_lookup =
1563 (pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
1564
1565 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1566
1567 bool cache_hit = false;
1568 if (!skip_cache_lookup) {
1569 bin = anv_device_search_for_kernel(pipeline->base.device, cache,
1570 &stage.cache_key,
1571 sizeof(stage.cache_key),
1572 &cache_hit);
1573 }
1574
1575 void *mem_ctx = ralloc_context(NULL);
1576 if (bin == NULL) {
1577 int64_t stage_start = os_time_get_nano();
1578
1579 stage.bind_map = (struct anv_pipeline_bind_map) {
1580 .surface_to_descriptor = stage.surface_to_descriptor,
1581 .sampler_to_descriptor = stage.sampler_to_descriptor
1582 };
1583
1584 /* Set up a binding for the gl_NumWorkGroups */
1585 stage.bind_map.surface_count = 1;
1586 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1587 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1588 };
1589
1590 stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
1591 if (stage.nir == NULL) {
1592 ralloc_free(mem_ctx);
1593 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1594 }
1595
1596 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
1597
1598 anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout);
1599
1600 NIR_PASS_V(stage.nir, nir_lower_vars_to_explicit_types,
1601 nir_var_mem_shared, shared_type_info);
1602 NIR_PASS_V(stage.nir, nir_lower_explicit_io,
1603 nir_var_mem_shared, nir_address_format_32bit_offset);
1604
1605 stage.num_stats = 1;
1606 stage.code = brw_compile_cs(compiler, pipeline->base.device, mem_ctx,
1607 &stage.key.cs, &stage.prog_data.cs,
1608 stage.nir, -1, stage.stats, NULL);
1609 if (stage.code == NULL) {
1610 ralloc_free(mem_ctx);
1611 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1612 }
1613
1614 anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
1615
1616 if (!stage.prog_data.cs.uses_num_work_groups) {
1617 assert(stage.bind_map.surface_to_descriptor[0].set ==
1618 ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS);
1619 stage.bind_map.surface_to_descriptor[0].set = ANV_DESCRIPTOR_SET_NULL;
1620 }
1621
1622 const unsigned code_size = stage.prog_data.base.program_size;
1623 bin = anv_device_upload_kernel(pipeline->base.device, cache,
1624 MESA_SHADER_COMPUTE,
1625 &stage.cache_key, sizeof(stage.cache_key),
1626 stage.code, code_size,
1627 stage.nir->constant_data,
1628 stage.nir->constant_data_size,
1629 &stage.prog_data.base,
1630 sizeof(stage.prog_data.cs),
1631 stage.stats, stage.num_stats,
1632 NULL, &stage.bind_map);
1633 if (!bin) {
1634 ralloc_free(mem_ctx);
1635 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1636 }
1637
1638 stage.feedback.duration = os_time_get_nano() - stage_start;
1639 }
1640
1641 anv_pipeline_add_executables(&pipeline->base, &stage, bin);
1642
1643 ralloc_free(mem_ctx);
1644
1645 if (cache_hit) {
1646 stage.feedback.flags |=
1647 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1648 pipeline_feedback.flags |=
1649 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1650 }
1651 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1652
1653 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1654 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1655 if (create_feedback) {
1656 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1657
1658 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1659 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1660 }
1661
1662 pipeline->cs = bin;
1663
1664 return VK_SUCCESS;
1665 }
1666
1667 /**
1668 * Copy pipeline state not marked as dynamic.
1669 * Dynamic state is pipeline state which hasn't been provided at pipeline
1670 * creation time, but is dynamically provided afterwards using various
1671 * vkCmdSet* functions.
1672 *
1673 * The set of state considered "non_dynamic" is determined by the pieces of
1674 * state that have their corresponding VkDynamicState enums omitted from
1675 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1676 *
1677 * @param[out] pipeline Destination non_dynamic state.
1678 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1679 */
1680 static void
1681 copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline,
1682 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1683 {
1684 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1685 struct anv_subpass *subpass = pipeline->subpass;
1686
1687 pipeline->dynamic_state = default_dynamic_state;
1688
1689 if (pCreateInfo->pDynamicState) {
1690 /* Remove all of the states that are marked as dynamic */
1691 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1692 for (uint32_t s = 0; s < count; s++) {
1693 states &= ~anv_cmd_dirty_bit_for_vk_dynamic_state(
1694 pCreateInfo->pDynamicState->pDynamicStates[s]);
1695 }
1696 }
1697
1698 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1699
1700 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1701 *
1702 * pViewportState is [...] NULL if the pipeline
1703 * has rasterization disabled.
1704 */
1705 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1706 assert(pCreateInfo->pViewportState);
1707
1708 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1709 if (states & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
1710 typed_memcpy(dynamic->viewport.viewports,
1711 pCreateInfo->pViewportState->pViewports,
1712 pCreateInfo->pViewportState->viewportCount);
1713 }
1714
1715 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1716 if (states & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
1717 typed_memcpy(dynamic->scissor.scissors,
1718 pCreateInfo->pViewportState->pScissors,
1719 pCreateInfo->pViewportState->scissorCount);
1720 }
1721 }
1722
1723 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
1724 assert(pCreateInfo->pRasterizationState);
1725 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1726 }
1727
1728 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS) {
1729 assert(pCreateInfo->pRasterizationState);
1730 dynamic->depth_bias.bias =
1731 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1732 dynamic->depth_bias.clamp =
1733 pCreateInfo->pRasterizationState->depthBiasClamp;
1734 dynamic->depth_bias.slope =
1735 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1736 }
1737
1738 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1739 *
1740 * pColorBlendState is [...] NULL if the pipeline has rasterization
1741 * disabled or if the subpass of the render pass the pipeline is
1742 * created against does not use any color attachments.
1743 */
1744 bool uses_color_att = false;
1745 for (unsigned i = 0; i < subpass->color_count; ++i) {
1746 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1747 uses_color_att = true;
1748 break;
1749 }
1750 }
1751
1752 if (uses_color_att &&
1753 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1754 assert(pCreateInfo->pColorBlendState);
1755
1756 if (states & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1757 typed_memcpy(dynamic->blend_constants,
1758 pCreateInfo->pColorBlendState->blendConstants, 4);
1759 }
1760
1761 /* If there is no depthstencil attachment, then don't read
1762 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1763 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1764 * no need to override the depthstencil defaults in
1765 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1766 *
1767 * Section 9.2 of the Vulkan 1.0.15 spec says:
1768 *
1769 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1770 * disabled or if the subpass of the render pass the pipeline is created
1771 * against does not use a depth/stencil attachment.
1772 */
1773 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1774 subpass->depth_stencil_attachment) {
1775 assert(pCreateInfo->pDepthStencilState);
1776
1777 if (states & ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS) {
1778 dynamic->depth_bounds.min =
1779 pCreateInfo->pDepthStencilState->minDepthBounds;
1780 dynamic->depth_bounds.max =
1781 pCreateInfo->pDepthStencilState->maxDepthBounds;
1782 }
1783
1784 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) {
1785 dynamic->stencil_compare_mask.front =
1786 pCreateInfo->pDepthStencilState->front.compareMask;
1787 dynamic->stencil_compare_mask.back =
1788 pCreateInfo->pDepthStencilState->back.compareMask;
1789 }
1790
1791 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) {
1792 dynamic->stencil_write_mask.front =
1793 pCreateInfo->pDepthStencilState->front.writeMask;
1794 dynamic->stencil_write_mask.back =
1795 pCreateInfo->pDepthStencilState->back.writeMask;
1796 }
1797
1798 if (states & ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) {
1799 dynamic->stencil_reference.front =
1800 pCreateInfo->pDepthStencilState->front.reference;
1801 dynamic->stencil_reference.back =
1802 pCreateInfo->pDepthStencilState->back.reference;
1803 }
1804 }
1805
1806 const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
1807 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1808 PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
1809 if (line_state) {
1810 if (states & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) {
1811 dynamic->line_stipple.factor = line_state->lineStippleFactor;
1812 dynamic->line_stipple.pattern = line_state->lineStipplePattern;
1813 }
1814 }
1815
1816 pipeline->dynamic_state_mask = states;
1817 }
1818
1819 static void
1820 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1821 {
1822 #ifdef DEBUG
1823 struct anv_render_pass *renderpass = NULL;
1824 struct anv_subpass *subpass = NULL;
1825
1826 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1827 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1828 */
1829 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1830
1831 renderpass = anv_render_pass_from_handle(info->renderPass);
1832 assert(renderpass);
1833
1834 assert(info->subpass < renderpass->subpass_count);
1835 subpass = &renderpass->subpasses[info->subpass];
1836
1837 assert(info->stageCount >= 1);
1838 assert(info->pVertexInputState);
1839 assert(info->pInputAssemblyState);
1840 assert(info->pRasterizationState);
1841 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1842 assert(info->pViewportState);
1843 assert(info->pMultisampleState);
1844
1845 if (subpass && subpass->depth_stencil_attachment)
1846 assert(info->pDepthStencilState);
1847
1848 if (subpass && subpass->color_count > 0) {
1849 bool all_color_unused = true;
1850 for (int i = 0; i < subpass->color_count; i++) {
1851 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1852 all_color_unused = false;
1853 }
1854 /* pColorBlendState is ignored if the pipeline has rasterization
1855 * disabled or if the subpass of the render pass the pipeline is
1856 * created against does not use any color attachments.
1857 */
1858 assert(info->pColorBlendState || all_color_unused);
1859 }
1860 }
1861
1862 for (uint32_t i = 0; i < info->stageCount; ++i) {
1863 switch (info->pStages[i].stage) {
1864 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1865 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1866 assert(info->pTessellationState);
1867 break;
1868 default:
1869 break;
1870 }
1871 }
1872 #endif
1873 }
1874
1875 /**
1876 * Calculate the desired L3 partitioning based on the current state of the
1877 * pipeline. For now this simply returns the conservative defaults calculated
1878 * by get_default_l3_weights(), but we could probably do better by gathering
1879 * more statistics from the pipeline state (e.g. guess of expected URB usage
1880 * and bound surfaces), or by using feed-back from performance counters.
1881 */
1882 void
1883 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1884 {
1885 const struct gen_device_info *devinfo = &pipeline->device->info;
1886
1887 const struct gen_l3_weights w =
1888 gen_get_default_l3_weights(devinfo, true, needs_slm);
1889
1890 pipeline->l3_config = gen_get_l3_config(devinfo, w);
1891 }
1892
1893 VkResult
1894 anv_pipeline_init(struct anv_graphics_pipeline *pipeline,
1895 struct anv_device *device,
1896 struct anv_pipeline_cache *cache,
1897 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1898 const VkAllocationCallbacks *alloc)
1899 {
1900 VkResult result;
1901
1902 anv_pipeline_validate_create_info(pCreateInfo);
1903
1904 if (alloc == NULL)
1905 alloc = &device->alloc;
1906
1907 pipeline->base.device = device;
1908 pipeline->base.type = ANV_PIPELINE_GRAPHICS;
1909
1910 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1911 assert(pCreateInfo->subpass < render_pass->subpass_count);
1912 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1913
1914 result = anv_reloc_list_init(&pipeline->base.batch_relocs, alloc);
1915 if (result != VK_SUCCESS)
1916 return result;
1917
1918 pipeline->base.batch.alloc = alloc;
1919 pipeline->base.batch.next = pipeline->base.batch.start = pipeline->batch_data;
1920 pipeline->base.batch.end = pipeline->base.batch.start + sizeof(pipeline->batch_data);
1921 pipeline->base.batch.relocs = &pipeline->base.batch_relocs;
1922 pipeline->base.batch.status = VK_SUCCESS;
1923
1924 pipeline->base.mem_ctx = ralloc_context(NULL);
1925 pipeline->base.flags = pCreateInfo->flags;
1926
1927 assert(pCreateInfo->pRasterizationState);
1928
1929 copy_non_dynamic_state(pipeline, pCreateInfo);
1930 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState->depthClampEnable;
1931
1932 /* Previously we enabled depth clipping when !depthClampEnable.
1933 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1934 * clipping info is available, use its enable value to determine clipping,
1935 * otherwise fallback to the previous !depthClampEnable logic.
1936 */
1937 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1938 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1939 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1940 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1941
1942 pipeline->sample_shading_enable =
1943 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1944 pCreateInfo->pMultisampleState &&
1945 pCreateInfo->pMultisampleState->sampleShadingEnable;
1946
1947 /* When we free the pipeline, we detect stages based on the NULL status
1948 * of various prog_data pointers. Make them NULL by default.
1949 */
1950 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1951
1952 util_dynarray_init(&pipeline->base.executables, pipeline->base.mem_ctx);
1953
1954 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1955 if (result != VK_SUCCESS) {
1956 ralloc_free(pipeline->base.mem_ctx);
1957 anv_reloc_list_finish(&pipeline->base.batch_relocs, alloc);
1958 return result;
1959 }
1960
1961 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1962
1963 anv_pipeline_setup_l3_config(&pipeline->base, false);
1964
1965 const VkPipelineVertexInputStateCreateInfo *vi_info =
1966 pCreateInfo->pVertexInputState;
1967
1968 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1969
1970 pipeline->vb_used = 0;
1971 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1972 const VkVertexInputAttributeDescription *desc =
1973 &vi_info->pVertexAttributeDescriptions[i];
1974
1975 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1976 pipeline->vb_used |= 1 << desc->binding;
1977 }
1978
1979 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1980 const VkVertexInputBindingDescription *desc =
1981 &vi_info->pVertexBindingDescriptions[i];
1982
1983 pipeline->vb[desc->binding].stride = desc->stride;
1984
1985 /* Step rate is programmed per vertex element (attribute), not
1986 * binding. Set up a map of which bindings step per instance, for
1987 * reference by vertex element setup. */
1988 switch (desc->inputRate) {
1989 default:
1990 case VK_VERTEX_INPUT_RATE_VERTEX:
1991 pipeline->vb[desc->binding].instanced = false;
1992 break;
1993 case VK_VERTEX_INPUT_RATE_INSTANCE:
1994 pipeline->vb[desc->binding].instanced = true;
1995 break;
1996 }
1997
1998 pipeline->vb[desc->binding].instance_divisor = 1;
1999 }
2000
2001 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
2002 vk_find_struct_const(vi_info->pNext,
2003 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
2004 if (vi_div_state) {
2005 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
2006 const VkVertexInputBindingDivisorDescriptionEXT *desc =
2007 &vi_div_state->pVertexBindingDivisors[i];
2008
2009 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
2010 }
2011 }
2012
2013 /* Our implementation of VK_KHR_multiview uses instancing to draw the
2014 * different views. If the client asks for instancing, we need to multiply
2015 * the instance divisor by the number of views ensure that we repeat the
2016 * client's per-instance data once for each view.
2017 */
2018 if (pipeline->subpass->view_mask) {
2019 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
2020 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
2021 if (pipeline->vb[vb].instanced)
2022 pipeline->vb[vb].instance_divisor *= view_count;
2023 }
2024 }
2025
2026 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
2027 pCreateInfo->pInputAssemblyState;
2028 const VkPipelineTessellationStateCreateInfo *tess_info =
2029 pCreateInfo->pTessellationState;
2030 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
2031
2032 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
2033 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
2034 else
2035 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
2036
2037 return VK_SUCCESS;
2038 }
2039
2040 #define WRITE_STR(field, ...) ({ \
2041 memset(field, 0, sizeof(field)); \
2042 UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
2043 assert(i > 0 && i < sizeof(field)); \
2044 })
2045
2046 VkResult anv_GetPipelineExecutablePropertiesKHR(
2047 VkDevice device,
2048 const VkPipelineInfoKHR* pPipelineInfo,
2049 uint32_t* pExecutableCount,
2050 VkPipelineExecutablePropertiesKHR* pProperties)
2051 {
2052 ANV_FROM_HANDLE(anv_pipeline, pipeline, pPipelineInfo->pipeline);
2053 VK_OUTARRAY_MAKE(out, pProperties, pExecutableCount);
2054
2055 util_dynarray_foreach (&pipeline->executables, struct anv_pipeline_executable, exe) {
2056 vk_outarray_append(&out, props) {
2057 gl_shader_stage stage = exe->stage;
2058 props->stages = mesa_to_vk_shader_stage(stage);
2059
2060 unsigned simd_width = exe->stats.dispatch_width;
2061 if (stage == MESA_SHADER_FRAGMENT) {
2062 WRITE_STR(props->name, "%s%d %s",
2063 simd_width ? "SIMD" : "vec",
2064 simd_width ? simd_width : 4,
2065 _mesa_shader_stage_to_string(stage));
2066 } else {
2067 WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
2068 }
2069 WRITE_STR(props->description, "%s%d %s shader",
2070 simd_width ? "SIMD" : "vec",
2071 simd_width ? simd_width : 4,
2072 _mesa_shader_stage_to_string(stage));
2073
2074 /* The compiler gives us a dispatch width of 0 for vec4 but Vulkan
2075 * wants a subgroup size of 1.
2076 */
2077 props->subgroupSize = MAX2(simd_width, 1);
2078 }
2079 }
2080
2081 return vk_outarray_status(&out);
2082 }
2083
2084 static const struct anv_pipeline_executable *
2085 anv_pipeline_get_executable(struct anv_pipeline *pipeline, uint32_t index)
2086 {
2087 assert(index < util_dynarray_num_elements(&pipeline->executables,
2088 struct anv_pipeline_executable));
2089 return util_dynarray_element(
2090 &pipeline->executables, struct anv_pipeline_executable, index);
2091 }
2092
2093 VkResult anv_GetPipelineExecutableStatisticsKHR(
2094 VkDevice device,
2095 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2096 uint32_t* pStatisticCount,
2097 VkPipelineExecutableStatisticKHR* pStatistics)
2098 {
2099 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2100 VK_OUTARRAY_MAKE(out, pStatistics, pStatisticCount);
2101
2102 const struct anv_pipeline_executable *exe =
2103 anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
2104
2105 const struct brw_stage_prog_data *prog_data;
2106 switch (pipeline->type) {
2107 case ANV_PIPELINE_GRAPHICS: {
2108 prog_data = anv_pipeline_to_graphics(pipeline)->shaders[exe->stage]->prog_data;
2109 break;
2110 }
2111 case ANV_PIPELINE_COMPUTE: {
2112 prog_data = anv_pipeline_to_compute(pipeline)->cs->prog_data;
2113 break;
2114 }
2115 default:
2116 unreachable("invalid pipeline type");
2117 }
2118
2119 vk_outarray_append(&out, stat) {
2120 WRITE_STR(stat->name, "Instruction Count");
2121 WRITE_STR(stat->description,
2122 "Number of GEN instructions in the final generated "
2123 "shader executable.");
2124 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2125 stat->value.u64 = exe->stats.instructions;
2126 }
2127
2128 vk_outarray_append(&out, stat) {
2129 WRITE_STR(stat->name, "Loop Count");
2130 WRITE_STR(stat->description,
2131 "Number of loops (not unrolled) in the final generated "
2132 "shader executable.");
2133 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2134 stat->value.u64 = exe->stats.loops;
2135 }
2136
2137 vk_outarray_append(&out, stat) {
2138 WRITE_STR(stat->name, "Cycle Count");
2139 WRITE_STR(stat->description,
2140 "Estimate of the number of EU cycles required to execute "
2141 "the final generated executable. This is an estimate only "
2142 "and may vary greatly from actual run-time performance.");
2143 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2144 stat->value.u64 = exe->stats.cycles;
2145 }
2146
2147 vk_outarray_append(&out, stat) {
2148 WRITE_STR(stat->name, "Spill Count");
2149 WRITE_STR(stat->description,
2150 "Number of scratch spill operations. This gives a rough "
2151 "estimate of the cost incurred due to spilling temporary "
2152 "values to memory. If this is non-zero, you may want to "
2153 "adjust your shader to reduce register pressure.");
2154 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2155 stat->value.u64 = exe->stats.spills;
2156 }
2157
2158 vk_outarray_append(&out, stat) {
2159 WRITE_STR(stat->name, "Fill Count");
2160 WRITE_STR(stat->description,
2161 "Number of scratch fill operations. This gives a rough "
2162 "estimate of the cost incurred due to spilling temporary "
2163 "values to memory. If this is non-zero, you may want to "
2164 "adjust your shader to reduce register pressure.");
2165 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2166 stat->value.u64 = exe->stats.fills;
2167 }
2168
2169 vk_outarray_append(&out, stat) {
2170 WRITE_STR(stat->name, "Scratch Memory Size");
2171 WRITE_STR(stat->description,
2172 "Number of bytes of scratch memory required by the "
2173 "generated shader executable. If this is non-zero, you "
2174 "may want to adjust your shader to reduce register "
2175 "pressure.");
2176 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2177 stat->value.u64 = prog_data->total_scratch;
2178 }
2179
2180 if (exe->stage == MESA_SHADER_COMPUTE) {
2181 vk_outarray_append(&out, stat) {
2182 WRITE_STR(stat->name, "Workgroup Memory Size");
2183 WRITE_STR(stat->description,
2184 "Number of bytes of workgroup shared memory used by this "
2185 "compute shader including any padding.");
2186 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
2187 stat->value.u64 = prog_data->total_scratch;
2188 }
2189 }
2190
2191 return vk_outarray_status(&out);
2192 }
2193
2194 static bool
2195 write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
2196 const char *data)
2197 {
2198 ir->isText = VK_TRUE;
2199
2200 size_t data_len = strlen(data) + 1;
2201
2202 if (ir->pData == NULL) {
2203 ir->dataSize = data_len;
2204 return true;
2205 }
2206
2207 strncpy(ir->pData, data, ir->dataSize);
2208 if (ir->dataSize < data_len)
2209 return false;
2210
2211 ir->dataSize = data_len;
2212 return true;
2213 }
2214
2215 VkResult anv_GetPipelineExecutableInternalRepresentationsKHR(
2216 VkDevice device,
2217 const VkPipelineExecutableInfoKHR* pExecutableInfo,
2218 uint32_t* pInternalRepresentationCount,
2219 VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
2220 {
2221 ANV_FROM_HANDLE(anv_pipeline, pipeline, pExecutableInfo->pipeline);
2222 VK_OUTARRAY_MAKE(out, pInternalRepresentations,
2223 pInternalRepresentationCount);
2224 bool incomplete_text = false;
2225
2226 const struct anv_pipeline_executable *exe =
2227 anv_pipeline_get_executable(pipeline, pExecutableInfo->executableIndex);
2228
2229 if (exe->nir) {
2230 vk_outarray_append(&out, ir) {
2231 WRITE_STR(ir->name, "Final NIR");
2232 WRITE_STR(ir->description,
2233 "Final NIR before going into the back-end compiler");
2234
2235 if (!write_ir_text(ir, exe->nir))
2236 incomplete_text = true;
2237 }
2238 }
2239
2240 if (exe->disasm) {
2241 vk_outarray_append(&out, ir) {
2242 WRITE_STR(ir->name, "GEN Assembly");
2243 WRITE_STR(ir->description,
2244 "Final GEN assembly for the generated shader binary");
2245
2246 if (!write_ir_text(ir, exe->disasm))
2247 incomplete_text = true;
2248 }
2249 }
2250
2251 return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
2252 }