anv: implement VK_EXT_pipeline_creation_feedback
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "anv_private.h"
34 #include "compiler/brw_nir.h"
35 #include "anv_nir.h"
36 #include "nir/nir_xfb_info.h"
37 #include "spirv/nir_spirv.h"
38 #include "vk_util.h"
39
40 /* Needed for SWIZZLE macros */
41 #include "program/prog_instruction.h"
42
43 // Shader functions
44
45 VkResult anv_CreateShaderModule(
46 VkDevice _device,
47 const VkShaderModuleCreateInfo* pCreateInfo,
48 const VkAllocationCallbacks* pAllocator,
49 VkShaderModule* pShaderModule)
50 {
51 ANV_FROM_HANDLE(anv_device, device, _device);
52 struct anv_shader_module *module;
53
54 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
55 assert(pCreateInfo->flags == 0);
56
57 module = vk_alloc2(&device->alloc, pAllocator,
58 sizeof(*module) + pCreateInfo->codeSize, 8,
59 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
60 if (module == NULL)
61 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
62
63 module->size = pCreateInfo->codeSize;
64 memcpy(module->data, pCreateInfo->pCode, module->size);
65
66 _mesa_sha1_compute(module->data, module->size, module->sha1);
67
68 *pShaderModule = anv_shader_module_to_handle(module);
69
70 return VK_SUCCESS;
71 }
72
73 void anv_DestroyShaderModule(
74 VkDevice _device,
75 VkShaderModule _module,
76 const VkAllocationCallbacks* pAllocator)
77 {
78 ANV_FROM_HANDLE(anv_device, device, _device);
79 ANV_FROM_HANDLE(anv_shader_module, module, _module);
80
81 if (!module)
82 return;
83
84 vk_free2(&device->alloc, pAllocator, module);
85 }
86
87 #define SPIR_V_MAGIC_NUMBER 0x07230203
88
89 static const uint64_t stage_to_debug[] = {
90 [MESA_SHADER_VERTEX] = DEBUG_VS,
91 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
92 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
93 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
94 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
95 [MESA_SHADER_COMPUTE] = DEBUG_CS,
96 };
97
98 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
99 * we can't do that yet because we don't have the ability to copy nir.
100 */
101 static nir_shader *
102 anv_shader_compile_to_nir(struct anv_device *device,
103 void *mem_ctx,
104 const struct anv_shader_module *module,
105 const char *entrypoint_name,
106 gl_shader_stage stage,
107 const VkSpecializationInfo *spec_info)
108 {
109 const struct anv_physical_device *pdevice =
110 &device->instance->physicalDevice;
111 const struct brw_compiler *compiler = pdevice->compiler;
112 const nir_shader_compiler_options *nir_options =
113 compiler->glsl_compiler_options[stage].NirOptions;
114
115 uint32_t *spirv = (uint32_t *) module->data;
116 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
117 assert(module->size % 4 == 0);
118
119 uint32_t num_spec_entries = 0;
120 struct nir_spirv_specialization *spec_entries = NULL;
121 if (spec_info && spec_info->mapEntryCount > 0) {
122 num_spec_entries = spec_info->mapEntryCount;
123 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
124 for (uint32_t i = 0; i < num_spec_entries; i++) {
125 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
126 const void *data = spec_info->pData + entry.offset;
127 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
128
129 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
130 if (spec_info->dataSize == 8)
131 spec_entries[i].data64 = *(const uint64_t *)data;
132 else
133 spec_entries[i].data32 = *(const uint32_t *)data;
134 }
135 }
136
137 struct spirv_to_nir_options spirv_options = {
138 .lower_workgroup_access_to_offsets = true,
139 .caps = {
140 .device_group = true,
141 .draw_parameters = true,
142 .float64 = pdevice->info.gen >= 8,
143 .geometry_streams = true,
144 .image_write_without_format = true,
145 .int16 = pdevice->info.gen >= 8,
146 .int64 = pdevice->info.gen >= 8,
147 .min_lod = true,
148 .multiview = true,
149 .physical_storage_buffer_address = pdevice->info.gen >= 8 &&
150 pdevice->use_softpin,
151 .post_depth_coverage = pdevice->info.gen >= 9,
152 .shader_viewport_index_layer = true,
153 .stencil_export = pdevice->info.gen >= 9,
154 .storage_8bit = pdevice->info.gen >= 8,
155 .storage_16bit = pdevice->info.gen >= 8,
156 .subgroup_arithmetic = true,
157 .subgroup_basic = true,
158 .subgroup_ballot = true,
159 .subgroup_quad = true,
160 .subgroup_shuffle = true,
161 .subgroup_vote = true,
162 .tessellation = true,
163 .transform_feedback = pdevice->info.gen >= 8,
164 .variable_pointers = true,
165 },
166 .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
167 .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
168 .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
169 .push_const_ptr_type = glsl_uint_type(),
170 .shared_ptr_type = glsl_uint_type(),
171 };
172
173 nir_function *entry_point =
174 spirv_to_nir(spirv, module->size / 4,
175 spec_entries, num_spec_entries,
176 stage, entrypoint_name, &spirv_options, nir_options);
177 nir_shader *nir = entry_point->shader;
178 assert(nir->info.stage == stage);
179 nir_validate_shader(nir, "after spirv_to_nir");
180 ralloc_steal(mem_ctx, nir);
181
182 free(spec_entries);
183
184 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
185 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
186 gl_shader_stage_name(stage));
187 nir_print_shader(nir, stderr);
188 }
189
190 /* We have to lower away local constant initializers right before we
191 * inline functions. That way they get properly initialized at the top
192 * of the function and not at the top of its caller.
193 */
194 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
195 NIR_PASS_V(nir, nir_lower_returns);
196 NIR_PASS_V(nir, nir_inline_functions);
197 NIR_PASS_V(nir, nir_opt_deref);
198
199 /* Pick off the single entrypoint that we want */
200 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
201 if (func != entry_point)
202 exec_node_remove(&func->node);
203 }
204 assert(exec_list_length(&nir->functions) == 1);
205
206 /* Now that we've deleted all but the main function, we can go ahead and
207 * lower the rest of the constant initializers. We do this here so that
208 * nir_remove_dead_variables and split_per_member_structs below see the
209 * corresponding stores.
210 */
211 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
212
213 /* Split member structs. We do this before lower_io_to_temporaries so that
214 * it doesn't lower system values to temporaries by accident.
215 */
216 NIR_PASS_V(nir, nir_split_var_copies);
217 NIR_PASS_V(nir, nir_split_per_member_structs);
218
219 NIR_PASS_V(nir, nir_remove_dead_variables,
220 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
221
222 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
223 nir_address_format_64bit_global);
224
225 NIR_PASS_V(nir, nir_propagate_invariant);
226 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
227 entry_point->impl, true, false);
228
229 /* Vulkan uses the separate-shader linking model */
230 nir->info.separate_shader = true;
231
232 nir = brw_preprocess_nir(compiler, nir, NULL);
233
234 return nir;
235 }
236
237 void anv_DestroyPipeline(
238 VkDevice _device,
239 VkPipeline _pipeline,
240 const VkAllocationCallbacks* pAllocator)
241 {
242 ANV_FROM_HANDLE(anv_device, device, _device);
243 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
244
245 if (!pipeline)
246 return;
247
248 anv_reloc_list_finish(&pipeline->batch_relocs,
249 pAllocator ? pAllocator : &device->alloc);
250 if (pipeline->blend_state.map)
251 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
252
253 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
254 if (pipeline->shaders[s])
255 anv_shader_bin_unref(device, pipeline->shaders[s]);
256 }
257
258 vk_free2(&device->alloc, pAllocator, pipeline);
259 }
260
261 static const uint32_t vk_to_gen_primitive_type[] = {
262 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
263 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
264 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
265 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
266 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
267 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
268 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
269 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
270 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
271 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
272 };
273
274 static void
275 populate_sampler_prog_key(const struct gen_device_info *devinfo,
276 struct brw_sampler_prog_key_data *key)
277 {
278 /* Almost all multisampled textures are compressed. The only time when we
279 * don't compress a multisampled texture is for 16x MSAA with a surface
280 * width greater than 8k which is a bit of an edge case. Since the sampler
281 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
282 * to tell the compiler to always assume compression.
283 */
284 key->compressed_multisample_layout_mask = ~0;
285
286 /* SkyLake added support for 16x MSAA. With this came a new message for
287 * reading from a 16x MSAA surface with compression. The new message was
288 * needed because now the MCS data is 64 bits instead of 32 or lower as is
289 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
290 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
291 * so we can just use it unconditionally. This may not be quite as
292 * efficient but it saves us from recompiling.
293 */
294 if (devinfo->gen >= 9)
295 key->msaa_16 = ~0;
296
297 /* XXX: Handle texture swizzle on HSW- */
298 for (int i = 0; i < MAX_SAMPLERS; i++) {
299 /* Assume color sampler, no swizzling. (Works for BDW+) */
300 key->swizzles[i] = SWIZZLE_XYZW;
301 }
302 }
303
304 static void
305 populate_vs_prog_key(const struct gen_device_info *devinfo,
306 struct brw_vs_prog_key *key)
307 {
308 memset(key, 0, sizeof(*key));
309
310 populate_sampler_prog_key(devinfo, &key->tex);
311
312 /* XXX: Handle vertex input work-arounds */
313
314 /* XXX: Handle sampler_prog_key */
315 }
316
317 static void
318 populate_tcs_prog_key(const struct gen_device_info *devinfo,
319 unsigned input_vertices,
320 struct brw_tcs_prog_key *key)
321 {
322 memset(key, 0, sizeof(*key));
323
324 populate_sampler_prog_key(devinfo, &key->tex);
325
326 key->input_vertices = input_vertices;
327 }
328
329 static void
330 populate_tes_prog_key(const struct gen_device_info *devinfo,
331 struct brw_tes_prog_key *key)
332 {
333 memset(key, 0, sizeof(*key));
334
335 populate_sampler_prog_key(devinfo, &key->tex);
336 }
337
338 static void
339 populate_gs_prog_key(const struct gen_device_info *devinfo,
340 struct brw_gs_prog_key *key)
341 {
342 memset(key, 0, sizeof(*key));
343
344 populate_sampler_prog_key(devinfo, &key->tex);
345 }
346
347 static void
348 populate_wm_prog_key(const struct gen_device_info *devinfo,
349 const struct anv_subpass *subpass,
350 const VkPipelineMultisampleStateCreateInfo *ms_info,
351 struct brw_wm_prog_key *key)
352 {
353 memset(key, 0, sizeof(*key));
354
355 populate_sampler_prog_key(devinfo, &key->tex);
356
357 /* We set this to 0 here and set to the actual value before we call
358 * brw_compile_fs.
359 */
360 key->input_slots_valid = 0;
361
362 /* Vulkan doesn't specify a default */
363 key->high_quality_derivatives = false;
364
365 /* XXX Vulkan doesn't appear to specify */
366 key->clamp_fragment_color = false;
367
368 assert(subpass->color_count <= MAX_RTS);
369 for (uint32_t i = 0; i < subpass->color_count; i++) {
370 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
371 key->color_outputs_valid |= (1 << i);
372 }
373
374 key->nr_color_regions = util_bitcount(key->color_outputs_valid);
375
376 key->replicate_alpha = key->nr_color_regions > 1 &&
377 ms_info && ms_info->alphaToCoverageEnable;
378
379 if (ms_info) {
380 /* We should probably pull this out of the shader, but it's fairly
381 * harmless to compute it and then let dead-code take care of it.
382 */
383 if (ms_info->rasterizationSamples > 1) {
384 key->persample_interp =
385 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
386 key->multisample_fbo = true;
387 }
388
389 key->frag_coord_adds_sample_pos = ms_info->sampleShadingEnable;
390 }
391 }
392
393 static void
394 populate_cs_prog_key(const struct gen_device_info *devinfo,
395 struct brw_cs_prog_key *key)
396 {
397 memset(key, 0, sizeof(*key));
398
399 populate_sampler_prog_key(devinfo, &key->tex);
400 }
401
402 struct anv_pipeline_stage {
403 gl_shader_stage stage;
404
405 const struct anv_shader_module *module;
406 const char *entrypoint;
407 const VkSpecializationInfo *spec_info;
408
409 unsigned char shader_sha1[20];
410
411 union brw_any_prog_key key;
412
413 struct {
414 gl_shader_stage stage;
415 unsigned char sha1[20];
416 } cache_key;
417
418 nir_shader *nir;
419
420 struct anv_pipeline_binding surface_to_descriptor[256];
421 struct anv_pipeline_binding sampler_to_descriptor[256];
422 struct anv_pipeline_bind_map bind_map;
423
424 union brw_any_prog_data prog_data;
425
426 VkPipelineCreationFeedbackEXT feedback;
427 };
428
429 static void
430 anv_pipeline_hash_shader(const struct anv_shader_module *module,
431 const char *entrypoint,
432 gl_shader_stage stage,
433 const VkSpecializationInfo *spec_info,
434 unsigned char *sha1_out)
435 {
436 struct mesa_sha1 ctx;
437 _mesa_sha1_init(&ctx);
438
439 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
440 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
441 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
442 if (spec_info) {
443 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
444 spec_info->mapEntryCount *
445 sizeof(*spec_info->pMapEntries));
446 _mesa_sha1_update(&ctx, spec_info->pData,
447 spec_info->dataSize);
448 }
449
450 _mesa_sha1_final(&ctx, sha1_out);
451 }
452
453 static void
454 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
455 struct anv_pipeline_layout *layout,
456 struct anv_pipeline_stage *stages,
457 unsigned char *sha1_out)
458 {
459 struct mesa_sha1 ctx;
460 _mesa_sha1_init(&ctx);
461
462 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
463 sizeof(pipeline->subpass->view_mask));
464
465 if (layout)
466 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
467
468 const bool rba = pipeline->device->robust_buffer_access;
469 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
470
471 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
472 if (stages[s].entrypoint) {
473 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
474 sizeof(stages[s].shader_sha1));
475 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
476 }
477 }
478
479 _mesa_sha1_final(&ctx, sha1_out);
480 }
481
482 static void
483 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
484 struct anv_pipeline_layout *layout,
485 struct anv_pipeline_stage *stage,
486 unsigned char *sha1_out)
487 {
488 struct mesa_sha1 ctx;
489 _mesa_sha1_init(&ctx);
490
491 if (layout)
492 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
493
494 const bool rba = pipeline->device->robust_buffer_access;
495 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
496
497 _mesa_sha1_update(&ctx, stage->shader_sha1,
498 sizeof(stage->shader_sha1));
499 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
500
501 _mesa_sha1_final(&ctx, sha1_out);
502 }
503
504 static nir_shader *
505 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
506 struct anv_pipeline_cache *cache,
507 void *mem_ctx,
508 struct anv_pipeline_stage *stage)
509 {
510 const struct brw_compiler *compiler =
511 pipeline->device->instance->physicalDevice.compiler;
512 const nir_shader_compiler_options *nir_options =
513 compiler->glsl_compiler_options[stage->stage].NirOptions;
514 nir_shader *nir;
515
516 nir = anv_device_search_for_nir(pipeline->device, cache,
517 nir_options,
518 stage->shader_sha1,
519 mem_ctx);
520 if (nir) {
521 assert(nir->info.stage == stage->stage);
522 return nir;
523 }
524
525 nir = anv_shader_compile_to_nir(pipeline->device,
526 mem_ctx,
527 stage->module,
528 stage->entrypoint,
529 stage->stage,
530 stage->spec_info);
531 if (nir) {
532 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
533 return nir;
534 }
535
536 return NULL;
537 }
538
539 static void
540 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
541 void *mem_ctx,
542 struct anv_pipeline_stage *stage,
543 struct anv_pipeline_layout *layout)
544 {
545 const struct brw_compiler *compiler =
546 pipeline->device->instance->physicalDevice.compiler;
547
548 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
549 nir_shader *nir = stage->nir;
550
551 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
552 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
553 NIR_PASS_V(nir, anv_nir_lower_input_attachments);
554 }
555
556 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
557
558 NIR_PASS_V(nir, anv_nir_lower_push_constants);
559
560 if (nir->info.stage != MESA_SHADER_COMPUTE)
561 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
562
563 if (nir->info.stage == MESA_SHADER_COMPUTE)
564 prog_data->total_shared = nir->num_shared;
565
566 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
567
568 if (nir->num_uniforms > 0) {
569 assert(prog_data->nr_params == 0);
570
571 /* If the shader uses any push constants at all, we'll just give
572 * them the maximum possible number
573 */
574 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
575 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
576 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
577 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
578
579 /* We now set the param values to be offsets into a
580 * anv_push_constant_data structure. Since the compiler doesn't
581 * actually dereference any of the gl_constant_value pointers in the
582 * params array, it doesn't really matter what we put here.
583 */
584 struct anv_push_constants *null_data = NULL;
585 /* Fill out the push constants section of the param array */
586 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
587 prog_data->param[i] = ANV_PARAM_PUSH(
588 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
589 }
590 }
591
592 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
593 pipeline->needs_data_cache = true;
594
595 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
596
597 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
598 if (layout) {
599 anv_nir_apply_pipeline_layout(&pipeline->device->instance->physicalDevice,
600 pipeline->device->robust_buffer_access,
601 layout, nir, prog_data,
602 &stage->bind_map);
603
604 NIR_PASS_V(nir, nir_lower_explicit_io,
605 nir_var_mem_ubo | nir_var_mem_ssbo,
606 nir_address_format_32bit_index_offset);
607
608 NIR_PASS_V(nir, nir_opt_constant_folding);
609 }
610
611 if (nir->info.stage != MESA_SHADER_COMPUTE)
612 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
613
614 assert(nir->num_uniforms == prog_data->nr_params * 4);
615
616 stage->nir = nir;
617 }
618
619 static void
620 anv_pipeline_link_vs(const struct brw_compiler *compiler,
621 struct anv_pipeline_stage *vs_stage,
622 struct anv_pipeline_stage *next_stage)
623 {
624 if (next_stage)
625 brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
626 }
627
628 static const unsigned *
629 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
630 void *mem_ctx,
631 struct anv_device *device,
632 struct anv_pipeline_stage *vs_stage)
633 {
634 brw_compute_vue_map(compiler->devinfo,
635 &vs_stage->prog_data.vs.base.vue_map,
636 vs_stage->nir->info.outputs_written,
637 vs_stage->nir->info.separate_shader);
638
639 return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
640 &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
641 }
642
643 static void
644 merge_tess_info(struct shader_info *tes_info,
645 const struct shader_info *tcs_info)
646 {
647 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
648 *
649 * "PointMode. Controls generation of points rather than triangles
650 * or lines. This functionality defaults to disabled, and is
651 * enabled if either shader stage includes the execution mode.
652 *
653 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
654 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
655 * and OutputVertices, it says:
656 *
657 * "One mode must be set in at least one of the tessellation
658 * shader stages."
659 *
660 * So, the fields can be set in either the TCS or TES, but they must
661 * agree if set in both. Our backend looks at TES, so bitwise-or in
662 * the values from the TCS.
663 */
664 assert(tcs_info->tess.tcs_vertices_out == 0 ||
665 tes_info->tess.tcs_vertices_out == 0 ||
666 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
667 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
668
669 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
670 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
671 tcs_info->tess.spacing == tes_info->tess.spacing);
672 tes_info->tess.spacing |= tcs_info->tess.spacing;
673
674 assert(tcs_info->tess.primitive_mode == 0 ||
675 tes_info->tess.primitive_mode == 0 ||
676 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
677 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
678 tes_info->tess.ccw |= tcs_info->tess.ccw;
679 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
680 }
681
682 static void
683 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
684 struct anv_pipeline_stage *tcs_stage,
685 struct anv_pipeline_stage *tes_stage)
686 {
687 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
688
689 brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
690
691 nir_lower_patch_vertices(tes_stage->nir,
692 tcs_stage->nir->info.tess.tcs_vertices_out,
693 NULL);
694
695 /* Copy TCS info into the TES info */
696 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
697
698 /* Whacking the key after cache lookup is a bit sketchy, but all of
699 * this comes from the SPIR-V, which is part of the hash used for the
700 * pipeline cache. So it should be safe.
701 */
702 tcs_stage->key.tcs.tes_primitive_mode =
703 tes_stage->nir->info.tess.primitive_mode;
704 tcs_stage->key.tcs.quads_workaround =
705 compiler->devinfo->gen < 9 &&
706 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
707 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
708 }
709
710 static const unsigned *
711 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
712 void *mem_ctx,
713 struct anv_device *device,
714 struct anv_pipeline_stage *tcs_stage,
715 struct anv_pipeline_stage *prev_stage)
716 {
717 tcs_stage->key.tcs.outputs_written =
718 tcs_stage->nir->info.outputs_written;
719 tcs_stage->key.tcs.patch_outputs_written =
720 tcs_stage->nir->info.patch_outputs_written;
721
722 return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
723 &tcs_stage->prog_data.tcs, tcs_stage->nir,
724 -1, NULL);
725 }
726
727 static void
728 anv_pipeline_link_tes(const struct brw_compiler *compiler,
729 struct anv_pipeline_stage *tes_stage,
730 struct anv_pipeline_stage *next_stage)
731 {
732 if (next_stage)
733 brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
734 }
735
736 static const unsigned *
737 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
738 void *mem_ctx,
739 struct anv_device *device,
740 struct anv_pipeline_stage *tes_stage,
741 struct anv_pipeline_stage *tcs_stage)
742 {
743 tes_stage->key.tes.inputs_read =
744 tcs_stage->nir->info.outputs_written;
745 tes_stage->key.tes.patch_inputs_read =
746 tcs_stage->nir->info.patch_outputs_written;
747
748 return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
749 &tcs_stage->prog_data.tcs.base.vue_map,
750 &tes_stage->prog_data.tes, tes_stage->nir,
751 NULL, -1, NULL);
752 }
753
754 static void
755 anv_pipeline_link_gs(const struct brw_compiler *compiler,
756 struct anv_pipeline_stage *gs_stage,
757 struct anv_pipeline_stage *next_stage)
758 {
759 if (next_stage)
760 brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
761 }
762
763 static const unsigned *
764 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
765 void *mem_ctx,
766 struct anv_device *device,
767 struct anv_pipeline_stage *gs_stage,
768 struct anv_pipeline_stage *prev_stage)
769 {
770 brw_compute_vue_map(compiler->devinfo,
771 &gs_stage->prog_data.gs.base.vue_map,
772 gs_stage->nir->info.outputs_written,
773 gs_stage->nir->info.separate_shader);
774
775 return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
776 &gs_stage->prog_data.gs, gs_stage->nir,
777 NULL, -1, NULL);
778 }
779
780 static void
781 anv_pipeline_link_fs(const struct brw_compiler *compiler,
782 struct anv_pipeline_stage *stage)
783 {
784 unsigned num_rts = 0;
785 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
786 struct anv_pipeline_binding rt_bindings[max_rt];
787 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
788 int rt_to_bindings[max_rt];
789 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
790 bool rt_used[max_rt];
791 memset(rt_used, 0, sizeof(rt_used));
792
793 /* Flag used render targets */
794 nir_foreach_variable_safe(var, &stage->nir->outputs) {
795 if (var->data.location < FRAG_RESULT_DATA0)
796 continue;
797
798 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
799 /* Unused or out-of-bounds */
800 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid & (1 << rt)))
801 continue;
802
803 const unsigned array_len =
804 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
805 assert(rt + array_len <= max_rt);
806
807 for (unsigned i = 0; i < array_len; i++)
808 rt_used[rt + i] = true;
809 }
810
811 /* Set new, compacted, location */
812 for (unsigned i = 0; i < max_rt; i++) {
813 if (!rt_used[i])
814 continue;
815
816 rt_to_bindings[i] = num_rts;
817 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
818 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
819 .binding = 0,
820 .index = i,
821 };
822 num_rts++;
823 }
824
825 bool deleted_output = false;
826 nir_foreach_variable_safe(var, &stage->nir->outputs) {
827 if (var->data.location < FRAG_RESULT_DATA0)
828 continue;
829
830 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
831 if (rt >= MAX_RTS ||
832 !(stage->key.wm.color_outputs_valid & (1 << rt))) {
833 /* Unused or out-of-bounds, throw it away */
834 deleted_output = true;
835 var->data.mode = nir_var_function_temp;
836 exec_node_remove(&var->node);
837 exec_list_push_tail(&impl->locals, &var->node);
838 continue;
839 }
840
841 /* Give it the new location */
842 assert(rt_to_bindings[rt] != -1);
843 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
844 }
845
846 if (deleted_output)
847 nir_fixup_deref_modes(stage->nir);
848
849 if (num_rts == 0) {
850 /* If we have no render targets, we need a null render target */
851 rt_bindings[0] = (struct anv_pipeline_binding) {
852 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
853 .binding = 0,
854 .index = UINT32_MAX,
855 };
856 num_rts = 1;
857 }
858
859 /* Now that we've determined the actual number of render targets, adjust
860 * the key accordingly.
861 */
862 stage->key.wm.nr_color_regions = num_rts;
863 stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
864
865 assert(num_rts <= max_rt);
866 assert(stage->bind_map.surface_count == 0);
867 typed_memcpy(stage->bind_map.surface_to_descriptor,
868 rt_bindings, num_rts);
869 stage->bind_map.surface_count += num_rts;
870 }
871
872 static const unsigned *
873 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
874 void *mem_ctx,
875 struct anv_device *device,
876 struct anv_pipeline_stage *fs_stage,
877 struct anv_pipeline_stage *prev_stage)
878 {
879 /* TODO: we could set this to 0 based on the information in nir_shader, but
880 * we need this before we call spirv_to_nir.
881 */
882 assert(prev_stage);
883 fs_stage->key.wm.input_slots_valid =
884 prev_stage->prog_data.vue.vue_map.slots_valid;
885
886 const unsigned *code =
887 brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
888 &fs_stage->prog_data.wm, fs_stage->nir,
889 NULL, -1, -1, -1, true, false, NULL, NULL);
890
891 if (fs_stage->key.wm.nr_color_regions == 0 &&
892 !fs_stage->prog_data.wm.has_side_effects &&
893 !fs_stage->prog_data.wm.uses_kill &&
894 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
895 !fs_stage->prog_data.wm.computed_stencil) {
896 /* This fragment shader has no outputs and no side effects. Go ahead
897 * and return the code pointer so we don't accidentally think the
898 * compile failed but zero out prog_data which will set program_size to
899 * zero and disable the stage.
900 */
901 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
902 }
903
904 return code;
905 }
906
907 static VkResult
908 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
909 struct anv_pipeline_cache *cache,
910 const VkGraphicsPipelineCreateInfo *info)
911 {
912 VkPipelineCreationFeedbackEXT pipeline_feedback = {
913 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
914 };
915 int64_t pipeline_start = os_time_get_nano();
916
917 const struct brw_compiler *compiler =
918 pipeline->device->instance->physicalDevice.compiler;
919 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
920
921 pipeline->active_stages = 0;
922
923 VkResult result;
924 for (uint32_t i = 0; i < info->stageCount; i++) {
925 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
926 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
927
928 pipeline->active_stages |= sinfo->stage;
929
930 int64_t stage_start = os_time_get_nano();
931
932 stages[stage].stage = stage;
933 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
934 stages[stage].entrypoint = sinfo->pName;
935 stages[stage].spec_info = sinfo->pSpecializationInfo;
936 anv_pipeline_hash_shader(stages[stage].module,
937 stages[stage].entrypoint,
938 stage,
939 stages[stage].spec_info,
940 stages[stage].shader_sha1);
941
942 const struct gen_device_info *devinfo = &pipeline->device->info;
943 switch (stage) {
944 case MESA_SHADER_VERTEX:
945 populate_vs_prog_key(devinfo, &stages[stage].key.vs);
946 break;
947 case MESA_SHADER_TESS_CTRL:
948 populate_tcs_prog_key(devinfo,
949 info->pTessellationState->patchControlPoints,
950 &stages[stage].key.tcs);
951 break;
952 case MESA_SHADER_TESS_EVAL:
953 populate_tes_prog_key(devinfo, &stages[stage].key.tes);
954 break;
955 case MESA_SHADER_GEOMETRY:
956 populate_gs_prog_key(devinfo, &stages[stage].key.gs);
957 break;
958 case MESA_SHADER_FRAGMENT:
959 populate_wm_prog_key(devinfo, pipeline->subpass,
960 info->pMultisampleState,
961 &stages[stage].key.wm);
962 break;
963 default:
964 unreachable("Invalid graphics shader stage");
965 }
966
967 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
968 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
969 }
970
971 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
972 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
973
974 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
975
976 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
977
978 unsigned char sha1[20];
979 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
980
981 unsigned found = 0;
982 unsigned cache_hits = 0;
983 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
984 if (!stages[s].entrypoint)
985 continue;
986
987 int64_t stage_start = os_time_get_nano();
988
989 stages[s].cache_key.stage = s;
990 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
991
992 bool cache_hit;
993 struct anv_shader_bin *bin =
994 anv_device_search_for_kernel(pipeline->device, cache,
995 &stages[s].cache_key,
996 sizeof(stages[s].cache_key), &cache_hit);
997 if (bin) {
998 found++;
999 pipeline->shaders[s] = bin;
1000 }
1001
1002 if (cache_hit) {
1003 cache_hits++;
1004 stages[s].feedback.flags |=
1005 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1006 }
1007 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1008 }
1009
1010 if (found == __builtin_popcount(pipeline->active_stages)) {
1011 if (cache_hits == found) {
1012 pipeline_feedback.flags |=
1013 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1014 }
1015 /* We found all our shaders in the cache. We're done. */
1016 goto done;
1017 } else if (found > 0) {
1018 /* We found some but not all of our shaders. This shouldn't happen
1019 * most of the time but it can if we have a partially populated
1020 * pipeline cache.
1021 */
1022 assert(found < __builtin_popcount(pipeline->active_stages));
1023
1024 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1025 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1026 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1027 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1028 (uint64_t)(uintptr_t)cache,
1029 0, 0, "anv",
1030 "Found a partial pipeline in the cache. This is "
1031 "most likely caused by an incomplete pipeline cache "
1032 "import or export");
1033
1034 /* We're going to have to recompile anyway, so just throw away our
1035 * references to the shaders in the cache. We'll get them out of the
1036 * cache again as part of the compilation process.
1037 */
1038 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1039 stages[s].feedback.flags = 0;
1040 if (pipeline->shaders[s]) {
1041 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1042 pipeline->shaders[s] = NULL;
1043 }
1044 }
1045 }
1046
1047 void *pipeline_ctx = ralloc_context(NULL);
1048
1049 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1050 if (!stages[s].entrypoint)
1051 continue;
1052
1053 int64_t stage_start = os_time_get_nano();
1054
1055 assert(stages[s].stage == s);
1056 assert(pipeline->shaders[s] == NULL);
1057
1058 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1059 .surface_to_descriptor = stages[s].surface_to_descriptor,
1060 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1061 };
1062
1063 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1064 pipeline_ctx,
1065 &stages[s]);
1066 if (stages[s].nir == NULL) {
1067 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1068 goto fail;
1069 }
1070
1071 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1072 }
1073
1074 /* Walk backwards to link */
1075 struct anv_pipeline_stage *next_stage = NULL;
1076 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1077 if (!stages[s].entrypoint)
1078 continue;
1079
1080 switch (s) {
1081 case MESA_SHADER_VERTEX:
1082 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1083 break;
1084 case MESA_SHADER_TESS_CTRL:
1085 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1086 break;
1087 case MESA_SHADER_TESS_EVAL:
1088 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1089 break;
1090 case MESA_SHADER_GEOMETRY:
1091 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1092 break;
1093 case MESA_SHADER_FRAGMENT:
1094 anv_pipeline_link_fs(compiler, &stages[s]);
1095 break;
1096 default:
1097 unreachable("Invalid graphics shader stage");
1098 }
1099
1100 next_stage = &stages[s];
1101 }
1102
1103 struct anv_pipeline_stage *prev_stage = NULL;
1104 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1105 if (!stages[s].entrypoint)
1106 continue;
1107
1108 int64_t stage_start = os_time_get_nano();
1109
1110 void *stage_ctx = ralloc_context(NULL);
1111
1112 nir_xfb_info *xfb_info = NULL;
1113 if (s == MESA_SHADER_VERTEX ||
1114 s == MESA_SHADER_TESS_EVAL ||
1115 s == MESA_SHADER_GEOMETRY)
1116 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1117
1118 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1119
1120 const unsigned *code;
1121 switch (s) {
1122 case MESA_SHADER_VERTEX:
1123 code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
1124 &stages[s]);
1125 break;
1126 case MESA_SHADER_TESS_CTRL:
1127 code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
1128 &stages[s], prev_stage);
1129 break;
1130 case MESA_SHADER_TESS_EVAL:
1131 code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
1132 &stages[s], prev_stage);
1133 break;
1134 case MESA_SHADER_GEOMETRY:
1135 code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
1136 &stages[s], prev_stage);
1137 break;
1138 case MESA_SHADER_FRAGMENT:
1139 code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
1140 &stages[s], prev_stage);
1141 break;
1142 default:
1143 unreachable("Invalid graphics shader stage");
1144 }
1145 if (code == NULL) {
1146 ralloc_free(stage_ctx);
1147 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1148 goto fail;
1149 }
1150
1151 struct anv_shader_bin *bin =
1152 anv_device_upload_kernel(pipeline->device, cache,
1153 &stages[s].cache_key,
1154 sizeof(stages[s].cache_key),
1155 code, stages[s].prog_data.base.program_size,
1156 stages[s].nir->constant_data,
1157 stages[s].nir->constant_data_size,
1158 &stages[s].prog_data.base,
1159 brw_prog_data_size(s),
1160 xfb_info, &stages[s].bind_map);
1161 if (!bin) {
1162 ralloc_free(stage_ctx);
1163 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1164 goto fail;
1165 }
1166
1167 pipeline->shaders[s] = bin;
1168 ralloc_free(stage_ctx);
1169
1170 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1171
1172 prev_stage = &stages[s];
1173 }
1174
1175 ralloc_free(pipeline_ctx);
1176
1177 done:
1178
1179 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1180 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1181 /* This can happen if we decided to implicitly disable the fragment
1182 * shader. See anv_pipeline_compile_fs().
1183 */
1184 anv_shader_bin_unref(pipeline->device,
1185 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1186 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1187 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1188 }
1189
1190 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1191
1192 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1193 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1194 if (create_feedback) {
1195 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1196
1197 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1198 for (uint32_t i = 0; i < info->stageCount; i++) {
1199 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1200 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1201 }
1202 }
1203
1204 return VK_SUCCESS;
1205
1206 fail:
1207 ralloc_free(pipeline_ctx);
1208
1209 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1210 if (pipeline->shaders[s])
1211 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1212 }
1213
1214 return result;
1215 }
1216
1217 VkResult
1218 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1219 struct anv_pipeline_cache *cache,
1220 const VkComputePipelineCreateInfo *info,
1221 const struct anv_shader_module *module,
1222 const char *entrypoint,
1223 const VkSpecializationInfo *spec_info)
1224 {
1225 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1226 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1227 };
1228 int64_t pipeline_start = os_time_get_nano();
1229
1230 const struct brw_compiler *compiler =
1231 pipeline->device->instance->physicalDevice.compiler;
1232
1233 struct anv_pipeline_stage stage = {
1234 .stage = MESA_SHADER_COMPUTE,
1235 .module = module,
1236 .entrypoint = entrypoint,
1237 .spec_info = spec_info,
1238 .cache_key = {
1239 .stage = MESA_SHADER_COMPUTE,
1240 },
1241 .feedback = {
1242 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1243 },
1244 };
1245 anv_pipeline_hash_shader(stage.module,
1246 stage.entrypoint,
1247 MESA_SHADER_COMPUTE,
1248 stage.spec_info,
1249 stage.shader_sha1);
1250
1251 struct anv_shader_bin *bin = NULL;
1252
1253 populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
1254
1255 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1256
1257 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1258 bool cache_hit;
1259 bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1260 sizeof(stage.cache_key), &cache_hit);
1261
1262 if (bin == NULL) {
1263 int64_t stage_start = os_time_get_nano();
1264
1265 stage.bind_map = (struct anv_pipeline_bind_map) {
1266 .surface_to_descriptor = stage.surface_to_descriptor,
1267 .sampler_to_descriptor = stage.sampler_to_descriptor
1268 };
1269
1270 /* Set up a binding for the gl_NumWorkGroups */
1271 stage.bind_map.surface_count = 1;
1272 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1273 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1274 };
1275
1276 void *mem_ctx = ralloc_context(NULL);
1277
1278 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1279 if (stage.nir == NULL) {
1280 ralloc_free(mem_ctx);
1281 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1282 }
1283
1284 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1285
1286 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1287 &stage.prog_data.cs);
1288
1289 const unsigned *shader_code =
1290 brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
1291 &stage.prog_data.cs, stage.nir, -1, NULL);
1292 if (shader_code == NULL) {
1293 ralloc_free(mem_ctx);
1294 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1295 }
1296
1297 const unsigned code_size = stage.prog_data.base.program_size;
1298 bin = anv_device_upload_kernel(pipeline->device, cache,
1299 &stage.cache_key, sizeof(stage.cache_key),
1300 shader_code, code_size,
1301 stage.nir->constant_data,
1302 stage.nir->constant_data_size,
1303 &stage.prog_data.base,
1304 sizeof(stage.prog_data.cs),
1305 NULL, &stage.bind_map);
1306 if (!bin) {
1307 ralloc_free(mem_ctx);
1308 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1309 }
1310
1311 ralloc_free(mem_ctx);
1312
1313 stage.feedback.duration = os_time_get_nano() - stage_start;
1314 }
1315
1316 if (cache_hit) {
1317 stage.feedback.flags |=
1318 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1319 pipeline_feedback.flags |=
1320 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1321 }
1322 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1323
1324 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1325 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1326 if (create_feedback) {
1327 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1328
1329 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1330 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1331 }
1332
1333 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1334 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1335
1336 return VK_SUCCESS;
1337 }
1338
1339 /**
1340 * Copy pipeline state not marked as dynamic.
1341 * Dynamic state is pipeline state which hasn't been provided at pipeline
1342 * creation time, but is dynamically provided afterwards using various
1343 * vkCmdSet* functions.
1344 *
1345 * The set of state considered "non_dynamic" is determined by the pieces of
1346 * state that have their corresponding VkDynamicState enums omitted from
1347 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1348 *
1349 * @param[out] pipeline Destination non_dynamic state.
1350 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1351 */
1352 static void
1353 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1354 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1355 {
1356 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1357 struct anv_subpass *subpass = pipeline->subpass;
1358
1359 pipeline->dynamic_state = default_dynamic_state;
1360
1361 if (pCreateInfo->pDynamicState) {
1362 /* Remove all of the states that are marked as dynamic */
1363 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1364 for (uint32_t s = 0; s < count; s++)
1365 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1366 }
1367
1368 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1369
1370 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1371 *
1372 * pViewportState is [...] NULL if the pipeline
1373 * has rasterization disabled.
1374 */
1375 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1376 assert(pCreateInfo->pViewportState);
1377
1378 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1379 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1380 typed_memcpy(dynamic->viewport.viewports,
1381 pCreateInfo->pViewportState->pViewports,
1382 pCreateInfo->pViewportState->viewportCount);
1383 }
1384
1385 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1386 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1387 typed_memcpy(dynamic->scissor.scissors,
1388 pCreateInfo->pViewportState->pScissors,
1389 pCreateInfo->pViewportState->scissorCount);
1390 }
1391 }
1392
1393 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1394 assert(pCreateInfo->pRasterizationState);
1395 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1396 }
1397
1398 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1399 assert(pCreateInfo->pRasterizationState);
1400 dynamic->depth_bias.bias =
1401 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1402 dynamic->depth_bias.clamp =
1403 pCreateInfo->pRasterizationState->depthBiasClamp;
1404 dynamic->depth_bias.slope =
1405 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1406 }
1407
1408 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1409 *
1410 * pColorBlendState is [...] NULL if the pipeline has rasterization
1411 * disabled or if the subpass of the render pass the pipeline is
1412 * created against does not use any color attachments.
1413 */
1414 bool uses_color_att = false;
1415 for (unsigned i = 0; i < subpass->color_count; ++i) {
1416 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1417 uses_color_att = true;
1418 break;
1419 }
1420 }
1421
1422 if (uses_color_att &&
1423 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1424 assert(pCreateInfo->pColorBlendState);
1425
1426 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1427 typed_memcpy(dynamic->blend_constants,
1428 pCreateInfo->pColorBlendState->blendConstants, 4);
1429 }
1430
1431 /* If there is no depthstencil attachment, then don't read
1432 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1433 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1434 * no need to override the depthstencil defaults in
1435 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1436 *
1437 * Section 9.2 of the Vulkan 1.0.15 spec says:
1438 *
1439 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1440 * disabled or if the subpass of the render pass the pipeline is created
1441 * against does not use a depth/stencil attachment.
1442 */
1443 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1444 subpass->depth_stencil_attachment) {
1445 assert(pCreateInfo->pDepthStencilState);
1446
1447 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1448 dynamic->depth_bounds.min =
1449 pCreateInfo->pDepthStencilState->minDepthBounds;
1450 dynamic->depth_bounds.max =
1451 pCreateInfo->pDepthStencilState->maxDepthBounds;
1452 }
1453
1454 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1455 dynamic->stencil_compare_mask.front =
1456 pCreateInfo->pDepthStencilState->front.compareMask;
1457 dynamic->stencil_compare_mask.back =
1458 pCreateInfo->pDepthStencilState->back.compareMask;
1459 }
1460
1461 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1462 dynamic->stencil_write_mask.front =
1463 pCreateInfo->pDepthStencilState->front.writeMask;
1464 dynamic->stencil_write_mask.back =
1465 pCreateInfo->pDepthStencilState->back.writeMask;
1466 }
1467
1468 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1469 dynamic->stencil_reference.front =
1470 pCreateInfo->pDepthStencilState->front.reference;
1471 dynamic->stencil_reference.back =
1472 pCreateInfo->pDepthStencilState->back.reference;
1473 }
1474 }
1475
1476 pipeline->dynamic_state_mask = states;
1477 }
1478
1479 static void
1480 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1481 {
1482 #ifdef DEBUG
1483 struct anv_render_pass *renderpass = NULL;
1484 struct anv_subpass *subpass = NULL;
1485
1486 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1487 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1488 */
1489 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1490
1491 renderpass = anv_render_pass_from_handle(info->renderPass);
1492 assert(renderpass);
1493
1494 assert(info->subpass < renderpass->subpass_count);
1495 subpass = &renderpass->subpasses[info->subpass];
1496
1497 assert(info->stageCount >= 1);
1498 assert(info->pVertexInputState);
1499 assert(info->pInputAssemblyState);
1500 assert(info->pRasterizationState);
1501 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1502 assert(info->pViewportState);
1503 assert(info->pMultisampleState);
1504
1505 if (subpass && subpass->depth_stencil_attachment)
1506 assert(info->pDepthStencilState);
1507
1508 if (subpass && subpass->color_count > 0) {
1509 bool all_color_unused = true;
1510 for (int i = 0; i < subpass->color_count; i++) {
1511 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1512 all_color_unused = false;
1513 }
1514 /* pColorBlendState is ignored if the pipeline has rasterization
1515 * disabled or if the subpass of the render pass the pipeline is
1516 * created against does not use any color attachments.
1517 */
1518 assert(info->pColorBlendState || all_color_unused);
1519 }
1520 }
1521
1522 for (uint32_t i = 0; i < info->stageCount; ++i) {
1523 switch (info->pStages[i].stage) {
1524 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1525 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1526 assert(info->pTessellationState);
1527 break;
1528 default:
1529 break;
1530 }
1531 }
1532 #endif
1533 }
1534
1535 /**
1536 * Calculate the desired L3 partitioning based on the current state of the
1537 * pipeline. For now this simply returns the conservative defaults calculated
1538 * by get_default_l3_weights(), but we could probably do better by gathering
1539 * more statistics from the pipeline state (e.g. guess of expected URB usage
1540 * and bound surfaces), or by using feed-back from performance counters.
1541 */
1542 void
1543 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1544 {
1545 const struct gen_device_info *devinfo = &pipeline->device->info;
1546
1547 const struct gen_l3_weights w =
1548 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1549
1550 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1551 pipeline->urb.total_size =
1552 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1553 }
1554
1555 VkResult
1556 anv_pipeline_init(struct anv_pipeline *pipeline,
1557 struct anv_device *device,
1558 struct anv_pipeline_cache *cache,
1559 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1560 const VkAllocationCallbacks *alloc)
1561 {
1562 VkResult result;
1563
1564 anv_pipeline_validate_create_info(pCreateInfo);
1565
1566 if (alloc == NULL)
1567 alloc = &device->alloc;
1568
1569 pipeline->device = device;
1570
1571 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1572 assert(pCreateInfo->subpass < render_pass->subpass_count);
1573 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1574
1575 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1576 if (result != VK_SUCCESS)
1577 return result;
1578
1579 pipeline->batch.alloc = alloc;
1580 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1581 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1582 pipeline->batch.relocs = &pipeline->batch_relocs;
1583 pipeline->batch.status = VK_SUCCESS;
1584
1585 copy_non_dynamic_state(pipeline, pCreateInfo);
1586 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1587 pCreateInfo->pRasterizationState->depthClampEnable;
1588
1589 /* Previously we enabled depth clipping when !depthClampEnable.
1590 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1591 * clipping info is available, use its enable value to determine clipping,
1592 * otherwise fallback to the previous !depthClampEnable logic.
1593 */
1594 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1595 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1596 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1597 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1598
1599 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1600 pCreateInfo->pMultisampleState->sampleShadingEnable;
1601
1602 pipeline->needs_data_cache = false;
1603
1604 /* When we free the pipeline, we detect stages based on the NULL status
1605 * of various prog_data pointers. Make them NULL by default.
1606 */
1607 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1608
1609 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1610 if (result != VK_SUCCESS) {
1611 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1612 return result;
1613 }
1614
1615 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1616
1617 anv_pipeline_setup_l3_config(pipeline, false);
1618
1619 const VkPipelineVertexInputStateCreateInfo *vi_info =
1620 pCreateInfo->pVertexInputState;
1621
1622 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1623
1624 pipeline->vb_used = 0;
1625 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1626 const VkVertexInputAttributeDescription *desc =
1627 &vi_info->pVertexAttributeDescriptions[i];
1628
1629 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1630 pipeline->vb_used |= 1 << desc->binding;
1631 }
1632
1633 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1634 const VkVertexInputBindingDescription *desc =
1635 &vi_info->pVertexBindingDescriptions[i];
1636
1637 pipeline->vb[desc->binding].stride = desc->stride;
1638
1639 /* Step rate is programmed per vertex element (attribute), not
1640 * binding. Set up a map of which bindings step per instance, for
1641 * reference by vertex element setup. */
1642 switch (desc->inputRate) {
1643 default:
1644 case VK_VERTEX_INPUT_RATE_VERTEX:
1645 pipeline->vb[desc->binding].instanced = false;
1646 break;
1647 case VK_VERTEX_INPUT_RATE_INSTANCE:
1648 pipeline->vb[desc->binding].instanced = true;
1649 break;
1650 }
1651
1652 pipeline->vb[desc->binding].instance_divisor = 1;
1653 }
1654
1655 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1656 vk_find_struct_const(vi_info->pNext,
1657 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1658 if (vi_div_state) {
1659 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1660 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1661 &vi_div_state->pVertexBindingDivisors[i];
1662
1663 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1664 }
1665 }
1666
1667 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1668 * different views. If the client asks for instancing, we need to multiply
1669 * the instance divisor by the number of views ensure that we repeat the
1670 * client's per-instance data once for each view.
1671 */
1672 if (pipeline->subpass->view_mask) {
1673 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1674 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1675 if (pipeline->vb[vb].instanced)
1676 pipeline->vb[vb].instance_divisor *= view_count;
1677 }
1678 }
1679
1680 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1681 pCreateInfo->pInputAssemblyState;
1682 const VkPipelineTessellationStateCreateInfo *tess_info =
1683 pCreateInfo->pTessellationState;
1684 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1685
1686 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1687 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1688 else
1689 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1690
1691 return VK_SUCCESS;
1692 }