anv/pipeline: Plumb pipeline shader stage create flags
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "anv_private.h"
34 #include "compiler/brw_nir.h"
35 #include "anv_nir.h"
36 #include "nir/nir_xfb_info.h"
37 #include "spirv/nir_spirv.h"
38 #include "vk_util.h"
39
40 /* Needed for SWIZZLE macros */
41 #include "program/prog_instruction.h"
42
43 // Shader functions
44
45 VkResult anv_CreateShaderModule(
46 VkDevice _device,
47 const VkShaderModuleCreateInfo* pCreateInfo,
48 const VkAllocationCallbacks* pAllocator,
49 VkShaderModule* pShaderModule)
50 {
51 ANV_FROM_HANDLE(anv_device, device, _device);
52 struct anv_shader_module *module;
53
54 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
55 assert(pCreateInfo->flags == 0);
56
57 module = vk_alloc2(&device->alloc, pAllocator,
58 sizeof(*module) + pCreateInfo->codeSize, 8,
59 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
60 if (module == NULL)
61 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
62
63 module->size = pCreateInfo->codeSize;
64 memcpy(module->data, pCreateInfo->pCode, module->size);
65
66 _mesa_sha1_compute(module->data, module->size, module->sha1);
67
68 *pShaderModule = anv_shader_module_to_handle(module);
69
70 return VK_SUCCESS;
71 }
72
73 void anv_DestroyShaderModule(
74 VkDevice _device,
75 VkShaderModule _module,
76 const VkAllocationCallbacks* pAllocator)
77 {
78 ANV_FROM_HANDLE(anv_device, device, _device);
79 ANV_FROM_HANDLE(anv_shader_module, module, _module);
80
81 if (!module)
82 return;
83
84 vk_free2(&device->alloc, pAllocator, module);
85 }
86
87 #define SPIR_V_MAGIC_NUMBER 0x07230203
88
89 static const uint64_t stage_to_debug[] = {
90 [MESA_SHADER_VERTEX] = DEBUG_VS,
91 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
92 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
93 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
94 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
95 [MESA_SHADER_COMPUTE] = DEBUG_CS,
96 };
97
98 struct anv_spirv_debug_data {
99 struct anv_device *device;
100 const struct anv_shader_module *module;
101 };
102
103 static void anv_spirv_nir_debug(void *private_data,
104 enum nir_spirv_debug_level level,
105 size_t spirv_offset,
106 const char *message)
107 {
108 struct anv_spirv_debug_data *debug_data = private_data;
109 static const VkDebugReportFlagsEXT vk_flags[] = {
110 [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
111 [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
112 [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
113 };
114 char buffer[256];
115
116 snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", (unsigned long) spirv_offset, message);
117
118 vk_debug_report(&debug_data->device->instance->debug_report_callbacks,
119 vk_flags[level],
120 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
121 (uint64_t) (uintptr_t) debug_data->module,
122 0, 0, "anv", buffer);
123 }
124
125 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
126 * we can't do that yet because we don't have the ability to copy nir.
127 */
128 static nir_shader *
129 anv_shader_compile_to_nir(struct anv_device *device,
130 void *mem_ctx,
131 const struct anv_shader_module *module,
132 const char *entrypoint_name,
133 gl_shader_stage stage,
134 const VkSpecializationInfo *spec_info)
135 {
136 const struct anv_physical_device *pdevice =
137 &device->instance->physicalDevice;
138 const struct brw_compiler *compiler = pdevice->compiler;
139 const nir_shader_compiler_options *nir_options =
140 compiler->glsl_compiler_options[stage].NirOptions;
141
142 uint32_t *spirv = (uint32_t *) module->data;
143 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
144 assert(module->size % 4 == 0);
145
146 uint32_t num_spec_entries = 0;
147 struct nir_spirv_specialization *spec_entries = NULL;
148 if (spec_info && spec_info->mapEntryCount > 0) {
149 num_spec_entries = spec_info->mapEntryCount;
150 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
151 for (uint32_t i = 0; i < num_spec_entries; i++) {
152 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
153 const void *data = spec_info->pData + entry.offset;
154 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
155
156 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
157 if (spec_info->dataSize == 8)
158 spec_entries[i].data64 = *(const uint64_t *)data;
159 else
160 spec_entries[i].data32 = *(const uint32_t *)data;
161 }
162 }
163
164 struct anv_spirv_debug_data spirv_debug_data = {
165 .device = device,
166 .module = module,
167 };
168 struct spirv_to_nir_options spirv_options = {
169 .lower_workgroup_access_to_offsets = true,
170 .caps = {
171 .demote_to_helper_invocation = true,
172 .derivative_group = true,
173 .descriptor_array_dynamic_indexing = true,
174 .descriptor_array_non_uniform_indexing = true,
175 .descriptor_indexing = true,
176 .device_group = true,
177 .draw_parameters = true,
178 .float16 = pdevice->info.gen >= 8,
179 .float64 = pdevice->info.gen >= 8,
180 .fragment_shader_sample_interlock = pdevice->info.gen >= 9,
181 .fragment_shader_pixel_interlock = pdevice->info.gen >= 9,
182 .geometry_streams = true,
183 .image_write_without_format = true,
184 .int8 = pdevice->info.gen >= 8,
185 .int16 = pdevice->info.gen >= 8,
186 .int64 = pdevice->info.gen >= 8,
187 .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
188 .min_lod = true,
189 .multiview = true,
190 .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
191 .post_depth_coverage = pdevice->info.gen >= 9,
192 .runtime_descriptor_array = true,
193 .shader_viewport_index_layer = true,
194 .stencil_export = pdevice->info.gen >= 9,
195 .storage_8bit = pdevice->info.gen >= 8,
196 .storage_16bit = pdevice->info.gen >= 8,
197 .subgroup_arithmetic = true,
198 .subgroup_basic = true,
199 .subgroup_ballot = true,
200 .subgroup_quad = true,
201 .subgroup_shuffle = true,
202 .subgroup_vote = true,
203 .tessellation = true,
204 .transform_feedback = pdevice->info.gen >= 8,
205 .variable_pointers = true,
206 },
207 .ubo_addr_format = nir_address_format_32bit_index_offset,
208 .ssbo_addr_format =
209 anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
210 .phys_ssbo_addr_format = nir_address_format_64bit_global,
211 .push_const_addr_format = nir_address_format_logical,
212
213 /* TODO: Consider changing this to an address format that has the NULL
214 * pointer equals to 0. That might be a better format to play nice
215 * with certain code / code generators.
216 */
217 .shared_addr_format = nir_address_format_32bit_offset,
218 .debug = {
219 .func = anv_spirv_nir_debug,
220 .private_data = &spirv_debug_data,
221 },
222 };
223
224
225 nir_shader *nir =
226 spirv_to_nir(spirv, module->size / 4,
227 spec_entries, num_spec_entries,
228 stage, entrypoint_name, &spirv_options, nir_options);
229 assert(nir->info.stage == stage);
230 nir_validate_shader(nir, "after spirv_to_nir");
231 ralloc_steal(mem_ctx, nir);
232
233 free(spec_entries);
234
235 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
236 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
237 gl_shader_stage_name(stage));
238 nir_print_shader(nir, stderr);
239 }
240
241 /* We have to lower away local constant initializers right before we
242 * inline functions. That way they get properly initialized at the top
243 * of the function and not at the top of its caller.
244 */
245 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
246 NIR_PASS_V(nir, nir_lower_returns);
247 NIR_PASS_V(nir, nir_inline_functions);
248 NIR_PASS_V(nir, nir_opt_deref);
249
250 /* Pick off the single entrypoint that we want */
251 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
252 if (!func->is_entrypoint)
253 exec_node_remove(&func->node);
254 }
255 assert(exec_list_length(&nir->functions) == 1);
256
257 /* Now that we've deleted all but the main function, we can go ahead and
258 * lower the rest of the constant initializers. We do this here so that
259 * nir_remove_dead_variables and split_per_member_structs below see the
260 * corresponding stores.
261 */
262 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
263
264 /* Split member structs. We do this before lower_io_to_temporaries so that
265 * it doesn't lower system values to temporaries by accident.
266 */
267 NIR_PASS_V(nir, nir_split_var_copies);
268 NIR_PASS_V(nir, nir_split_per_member_structs);
269
270 NIR_PASS_V(nir, nir_remove_dead_variables,
271 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
272
273 NIR_PASS_V(nir, nir_propagate_invariant);
274 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
275 nir_shader_get_entrypoint(nir), true, false);
276
277 NIR_PASS_V(nir, nir_lower_frexp);
278
279 /* Vulkan uses the separate-shader linking model */
280 nir->info.separate_shader = true;
281
282 brw_preprocess_nir(compiler, nir, NULL);
283
284 return nir;
285 }
286
287 void anv_DestroyPipeline(
288 VkDevice _device,
289 VkPipeline _pipeline,
290 const VkAllocationCallbacks* pAllocator)
291 {
292 ANV_FROM_HANDLE(anv_device, device, _device);
293 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
294
295 if (!pipeline)
296 return;
297
298 anv_reloc_list_finish(&pipeline->batch_relocs,
299 pAllocator ? pAllocator : &device->alloc);
300 if (pipeline->blend_state.map)
301 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
302
303 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
304 if (pipeline->shaders[s])
305 anv_shader_bin_unref(device, pipeline->shaders[s]);
306 }
307
308 vk_free2(&device->alloc, pAllocator, pipeline);
309 }
310
311 static const uint32_t vk_to_gen_primitive_type[] = {
312 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
313 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
314 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
315 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
316 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
317 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
318 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
319 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
320 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
321 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
322 };
323
324 static void
325 populate_sampler_prog_key(const struct gen_device_info *devinfo,
326 struct brw_sampler_prog_key_data *key)
327 {
328 /* Almost all multisampled textures are compressed. The only time when we
329 * don't compress a multisampled texture is for 16x MSAA with a surface
330 * width greater than 8k which is a bit of an edge case. Since the sampler
331 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
332 * to tell the compiler to always assume compression.
333 */
334 key->compressed_multisample_layout_mask = ~0;
335
336 /* SkyLake added support for 16x MSAA. With this came a new message for
337 * reading from a 16x MSAA surface with compression. The new message was
338 * needed because now the MCS data is 64 bits instead of 32 or lower as is
339 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
340 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
341 * so we can just use it unconditionally. This may not be quite as
342 * efficient but it saves us from recompiling.
343 */
344 if (devinfo->gen >= 9)
345 key->msaa_16 = ~0;
346
347 /* XXX: Handle texture swizzle on HSW- */
348 for (int i = 0; i < MAX_SAMPLERS; i++) {
349 /* Assume color sampler, no swizzling. (Works for BDW+) */
350 key->swizzles[i] = SWIZZLE_XYZW;
351 }
352 }
353
354 static void
355 populate_base_prog_key(const struct gen_device_info *devinfo,
356 VkPipelineShaderStageCreateFlags flags,
357 struct brw_base_prog_key *key)
358 {
359 key->subgroup_size_type = BRW_SUBGROUP_SIZE_API_CONSTANT;
360
361 populate_sampler_prog_key(devinfo, &key->tex);
362 }
363
364 static void
365 populate_vs_prog_key(const struct gen_device_info *devinfo,
366 VkPipelineShaderStageCreateFlags flags,
367 struct brw_vs_prog_key *key)
368 {
369 memset(key, 0, sizeof(*key));
370
371 populate_base_prog_key(devinfo, flags, &key->base);
372
373 /* XXX: Handle vertex input work-arounds */
374
375 /* XXX: Handle sampler_prog_key */
376 }
377
378 static void
379 populate_tcs_prog_key(const struct gen_device_info *devinfo,
380 VkPipelineShaderStageCreateFlags flags,
381 unsigned input_vertices,
382 struct brw_tcs_prog_key *key)
383 {
384 memset(key, 0, sizeof(*key));
385
386 populate_base_prog_key(devinfo, flags, &key->base);
387
388 key->input_vertices = input_vertices;
389 }
390
391 static void
392 populate_tes_prog_key(const struct gen_device_info *devinfo,
393 VkPipelineShaderStageCreateFlags flags,
394 struct brw_tes_prog_key *key)
395 {
396 memset(key, 0, sizeof(*key));
397
398 populate_base_prog_key(devinfo, flags, &key->base);
399 }
400
401 static void
402 populate_gs_prog_key(const struct gen_device_info *devinfo,
403 VkPipelineShaderStageCreateFlags flags,
404 struct brw_gs_prog_key *key)
405 {
406 memset(key, 0, sizeof(*key));
407
408 populate_base_prog_key(devinfo, flags, &key->base);
409 }
410
411 static void
412 populate_wm_prog_key(const struct gen_device_info *devinfo,
413 VkPipelineShaderStageCreateFlags flags,
414 const struct anv_subpass *subpass,
415 const VkPipelineMultisampleStateCreateInfo *ms_info,
416 struct brw_wm_prog_key *key)
417 {
418 memset(key, 0, sizeof(*key));
419
420 populate_base_prog_key(devinfo, flags, &key->base);
421
422 /* We set this to 0 here and set to the actual value before we call
423 * brw_compile_fs.
424 */
425 key->input_slots_valid = 0;
426
427 /* Vulkan doesn't specify a default */
428 key->high_quality_derivatives = false;
429
430 /* XXX Vulkan doesn't appear to specify */
431 key->clamp_fragment_color = false;
432
433 assert(subpass->color_count <= MAX_RTS);
434 for (uint32_t i = 0; i < subpass->color_count; i++) {
435 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
436 key->color_outputs_valid |= (1 << i);
437 }
438
439 key->nr_color_regions = util_bitcount(key->color_outputs_valid);
440
441 /* To reduce possible shader recompilations we would need to know if
442 * there is a SampleMask output variable to compute if we should emit
443 * code to workaround the issue that hardware disables alpha to coverage
444 * when there is SampleMask output.
445 */
446 key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
447
448 /* Vulkan doesn't support fixed-function alpha test */
449 key->alpha_test_replicate_alpha = false;
450
451 if (ms_info) {
452 /* We should probably pull this out of the shader, but it's fairly
453 * harmless to compute it and then let dead-code take care of it.
454 */
455 if (ms_info->rasterizationSamples > 1) {
456 key->persample_interp = ms_info->sampleShadingEnable &&
457 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
458 key->multisample_fbo = true;
459 }
460
461 key->frag_coord_adds_sample_pos = key->persample_interp;
462 }
463 }
464
465 static void
466 populate_cs_prog_key(const struct gen_device_info *devinfo,
467 VkPipelineShaderStageCreateFlags flags,
468 struct brw_cs_prog_key *key)
469 {
470 memset(key, 0, sizeof(*key));
471
472 populate_base_prog_key(devinfo, flags, &key->base);
473 }
474
475 struct anv_pipeline_stage {
476 gl_shader_stage stage;
477
478 const struct anv_shader_module *module;
479 const char *entrypoint;
480 const VkSpecializationInfo *spec_info;
481
482 unsigned char shader_sha1[20];
483
484 union brw_any_prog_key key;
485
486 struct {
487 gl_shader_stage stage;
488 unsigned char sha1[20];
489 } cache_key;
490
491 nir_shader *nir;
492
493 struct anv_pipeline_binding surface_to_descriptor[256];
494 struct anv_pipeline_binding sampler_to_descriptor[256];
495 struct anv_pipeline_bind_map bind_map;
496
497 union brw_any_prog_data prog_data;
498
499 VkPipelineCreationFeedbackEXT feedback;
500 };
501
502 static void
503 anv_pipeline_hash_shader(const struct anv_shader_module *module,
504 const char *entrypoint,
505 gl_shader_stage stage,
506 const VkSpecializationInfo *spec_info,
507 unsigned char *sha1_out)
508 {
509 struct mesa_sha1 ctx;
510 _mesa_sha1_init(&ctx);
511
512 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
513 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
514 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
515 if (spec_info) {
516 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
517 spec_info->mapEntryCount *
518 sizeof(*spec_info->pMapEntries));
519 _mesa_sha1_update(&ctx, spec_info->pData,
520 spec_info->dataSize);
521 }
522
523 _mesa_sha1_final(&ctx, sha1_out);
524 }
525
526 static void
527 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
528 struct anv_pipeline_layout *layout,
529 struct anv_pipeline_stage *stages,
530 unsigned char *sha1_out)
531 {
532 struct mesa_sha1 ctx;
533 _mesa_sha1_init(&ctx);
534
535 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
536 sizeof(pipeline->subpass->view_mask));
537
538 if (layout)
539 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
540
541 const bool rba = pipeline->device->robust_buffer_access;
542 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
543
544 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
545 if (stages[s].entrypoint) {
546 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
547 sizeof(stages[s].shader_sha1));
548 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
549 }
550 }
551
552 _mesa_sha1_final(&ctx, sha1_out);
553 }
554
555 static void
556 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
557 struct anv_pipeline_layout *layout,
558 struct anv_pipeline_stage *stage,
559 unsigned char *sha1_out)
560 {
561 struct mesa_sha1 ctx;
562 _mesa_sha1_init(&ctx);
563
564 if (layout)
565 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
566
567 const bool rba = pipeline->device->robust_buffer_access;
568 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
569
570 _mesa_sha1_update(&ctx, stage->shader_sha1,
571 sizeof(stage->shader_sha1));
572 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
573
574 _mesa_sha1_final(&ctx, sha1_out);
575 }
576
577 static nir_shader *
578 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
579 struct anv_pipeline_cache *cache,
580 void *mem_ctx,
581 struct anv_pipeline_stage *stage)
582 {
583 const struct brw_compiler *compiler =
584 pipeline->device->instance->physicalDevice.compiler;
585 const nir_shader_compiler_options *nir_options =
586 compiler->glsl_compiler_options[stage->stage].NirOptions;
587 nir_shader *nir;
588
589 nir = anv_device_search_for_nir(pipeline->device, cache,
590 nir_options,
591 stage->shader_sha1,
592 mem_ctx);
593 if (nir) {
594 assert(nir->info.stage == stage->stage);
595 return nir;
596 }
597
598 nir = anv_shader_compile_to_nir(pipeline->device,
599 mem_ctx,
600 stage->module,
601 stage->entrypoint,
602 stage->stage,
603 stage->spec_info);
604 if (nir) {
605 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
606 return nir;
607 }
608
609 return NULL;
610 }
611
612 static void
613 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
614 void *mem_ctx,
615 struct anv_pipeline_stage *stage,
616 struct anv_pipeline_layout *layout)
617 {
618 const struct anv_physical_device *pdevice =
619 &pipeline->device->instance->physicalDevice;
620 const struct brw_compiler *compiler = pdevice->compiler;
621
622 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
623 nir_shader *nir = stage->nir;
624
625 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
626 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
627 NIR_PASS_V(nir, nir_lower_input_attachments, false);
628 }
629
630 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
631
632 NIR_PASS_V(nir, anv_nir_lower_push_constants);
633
634 if (nir->info.stage != MESA_SHADER_COMPUTE)
635 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
636
637 if (nir->info.stage == MESA_SHADER_COMPUTE)
638 prog_data->total_shared = nir->num_shared;
639
640 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
641
642 if (nir->num_uniforms > 0) {
643 assert(prog_data->nr_params == 0);
644
645 /* If the shader uses any push constants at all, we'll just give
646 * them the maximum possible number
647 */
648 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
649 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
650 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
651 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
652
653 /* We now set the param values to be offsets into a
654 * anv_push_constant_data structure. Since the compiler doesn't
655 * actually dereference any of the gl_constant_value pointers in the
656 * params array, it doesn't really matter what we put here.
657 */
658 struct anv_push_constants *null_data = NULL;
659 /* Fill out the push constants section of the param array */
660 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
661 prog_data->param[i] = ANV_PARAM_PUSH(
662 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
663 }
664 }
665
666 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
667 pipeline->needs_data_cache = true;
668
669 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
670
671 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
672 nir_address_format_64bit_global);
673
674 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
675 if (layout) {
676 anv_nir_apply_pipeline_layout(pdevice,
677 pipeline->device->robust_buffer_access,
678 layout, nir, prog_data,
679 &stage->bind_map);
680
681 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
682 nir_address_format_32bit_index_offset);
683 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
684 anv_nir_ssbo_addr_format(pdevice,
685 pipeline->device->robust_buffer_access));
686
687 NIR_PASS_V(nir, nir_opt_constant_folding);
688
689 /* We don't support non-uniform UBOs and non-uniform SSBO access is
690 * handled naturally by falling back to A64 messages.
691 */
692 NIR_PASS_V(nir, nir_lower_non_uniform_access,
693 nir_lower_non_uniform_texture_access |
694 nir_lower_non_uniform_image_access);
695 }
696
697 if (nir->info.stage != MESA_SHADER_COMPUTE)
698 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
699
700 assert(nir->num_uniforms == prog_data->nr_params * 4);
701
702 stage->nir = nir;
703 }
704
705 static void
706 anv_pipeline_link_vs(const struct brw_compiler *compiler,
707 struct anv_pipeline_stage *vs_stage,
708 struct anv_pipeline_stage *next_stage)
709 {
710 if (next_stage)
711 brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
712 }
713
714 static const unsigned *
715 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
716 void *mem_ctx,
717 struct anv_device *device,
718 struct anv_pipeline_stage *vs_stage)
719 {
720 brw_compute_vue_map(compiler->devinfo,
721 &vs_stage->prog_data.vs.base.vue_map,
722 vs_stage->nir->info.outputs_written,
723 vs_stage->nir->info.separate_shader);
724
725 return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
726 &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
727 }
728
729 static void
730 merge_tess_info(struct shader_info *tes_info,
731 const struct shader_info *tcs_info)
732 {
733 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
734 *
735 * "PointMode. Controls generation of points rather than triangles
736 * or lines. This functionality defaults to disabled, and is
737 * enabled if either shader stage includes the execution mode.
738 *
739 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
740 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
741 * and OutputVertices, it says:
742 *
743 * "One mode must be set in at least one of the tessellation
744 * shader stages."
745 *
746 * So, the fields can be set in either the TCS or TES, but they must
747 * agree if set in both. Our backend looks at TES, so bitwise-or in
748 * the values from the TCS.
749 */
750 assert(tcs_info->tess.tcs_vertices_out == 0 ||
751 tes_info->tess.tcs_vertices_out == 0 ||
752 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
753 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
754
755 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
756 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
757 tcs_info->tess.spacing == tes_info->tess.spacing);
758 tes_info->tess.spacing |= tcs_info->tess.spacing;
759
760 assert(tcs_info->tess.primitive_mode == 0 ||
761 tes_info->tess.primitive_mode == 0 ||
762 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
763 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
764 tes_info->tess.ccw |= tcs_info->tess.ccw;
765 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
766 }
767
768 static void
769 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
770 struct anv_pipeline_stage *tcs_stage,
771 struct anv_pipeline_stage *tes_stage)
772 {
773 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
774
775 brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
776
777 nir_lower_patch_vertices(tes_stage->nir,
778 tcs_stage->nir->info.tess.tcs_vertices_out,
779 NULL);
780
781 /* Copy TCS info into the TES info */
782 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
783
784 /* Whacking the key after cache lookup is a bit sketchy, but all of
785 * this comes from the SPIR-V, which is part of the hash used for the
786 * pipeline cache. So it should be safe.
787 */
788 tcs_stage->key.tcs.tes_primitive_mode =
789 tes_stage->nir->info.tess.primitive_mode;
790 tcs_stage->key.tcs.quads_workaround =
791 compiler->devinfo->gen < 9 &&
792 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
793 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
794 }
795
796 static const unsigned *
797 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
798 void *mem_ctx,
799 struct anv_device *device,
800 struct anv_pipeline_stage *tcs_stage,
801 struct anv_pipeline_stage *prev_stage)
802 {
803 tcs_stage->key.tcs.outputs_written =
804 tcs_stage->nir->info.outputs_written;
805 tcs_stage->key.tcs.patch_outputs_written =
806 tcs_stage->nir->info.patch_outputs_written;
807
808 return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
809 &tcs_stage->prog_data.tcs, tcs_stage->nir,
810 -1, NULL);
811 }
812
813 static void
814 anv_pipeline_link_tes(const struct brw_compiler *compiler,
815 struct anv_pipeline_stage *tes_stage,
816 struct anv_pipeline_stage *next_stage)
817 {
818 if (next_stage)
819 brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
820 }
821
822 static const unsigned *
823 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
824 void *mem_ctx,
825 struct anv_device *device,
826 struct anv_pipeline_stage *tes_stage,
827 struct anv_pipeline_stage *tcs_stage)
828 {
829 tes_stage->key.tes.inputs_read =
830 tcs_stage->nir->info.outputs_written;
831 tes_stage->key.tes.patch_inputs_read =
832 tcs_stage->nir->info.patch_outputs_written;
833
834 return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
835 &tcs_stage->prog_data.tcs.base.vue_map,
836 &tes_stage->prog_data.tes, tes_stage->nir,
837 NULL, -1, NULL);
838 }
839
840 static void
841 anv_pipeline_link_gs(const struct brw_compiler *compiler,
842 struct anv_pipeline_stage *gs_stage,
843 struct anv_pipeline_stage *next_stage)
844 {
845 if (next_stage)
846 brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
847 }
848
849 static const unsigned *
850 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
851 void *mem_ctx,
852 struct anv_device *device,
853 struct anv_pipeline_stage *gs_stage,
854 struct anv_pipeline_stage *prev_stage)
855 {
856 brw_compute_vue_map(compiler->devinfo,
857 &gs_stage->prog_data.gs.base.vue_map,
858 gs_stage->nir->info.outputs_written,
859 gs_stage->nir->info.separate_shader);
860
861 return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
862 &gs_stage->prog_data.gs, gs_stage->nir,
863 NULL, -1, NULL);
864 }
865
866 static void
867 anv_pipeline_link_fs(const struct brw_compiler *compiler,
868 struct anv_pipeline_stage *stage)
869 {
870 unsigned num_rts = 0;
871 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
872 struct anv_pipeline_binding rt_bindings[max_rt];
873 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
874 int rt_to_bindings[max_rt];
875 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
876 bool rt_used[max_rt];
877 memset(rt_used, 0, sizeof(rt_used));
878
879 /* Flag used render targets */
880 nir_foreach_variable_safe(var, &stage->nir->outputs) {
881 if (var->data.location < FRAG_RESULT_DATA0)
882 continue;
883
884 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
885 /* Out-of-bounds */
886 if (rt >= MAX_RTS)
887 continue;
888
889 const unsigned array_len =
890 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
891 assert(rt + array_len <= max_rt);
892
893 /* Unused */
894 if (!(stage->key.wm.color_outputs_valid & BITFIELD_RANGE(rt, array_len))) {
895 /* If this is the RT at location 0 and we have alpha to coverage
896 * enabled we will have to create a null RT for it, so mark it as
897 * used.
898 */
899 if (rt > 0 || !stage->key.wm.alpha_to_coverage)
900 continue;
901 }
902
903 for (unsigned i = 0; i < array_len; i++)
904 rt_used[rt + i] = true;
905 }
906
907 /* Set new, compacted, location */
908 for (unsigned i = 0; i < max_rt; i++) {
909 if (!rt_used[i])
910 continue;
911
912 rt_to_bindings[i] = num_rts;
913
914 if (stage->key.wm.color_outputs_valid & (1 << i)) {
915 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
916 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
917 .binding = 0,
918 .index = i,
919 };
920 } else {
921 /* Setup a null render target */
922 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
923 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
924 .binding = 0,
925 .index = UINT32_MAX,
926 };
927 }
928
929 num_rts++;
930 }
931
932 bool deleted_output = false;
933 nir_foreach_variable_safe(var, &stage->nir->outputs) {
934 if (var->data.location < FRAG_RESULT_DATA0)
935 continue;
936
937 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
938
939 if (rt >= MAX_RTS || !rt_used[rt]) {
940 /* Unused or out-of-bounds, throw it away, unless it is the first
941 * RT and we have alpha to coverage enabled.
942 */
943 deleted_output = true;
944 var->data.mode = nir_var_function_temp;
945 exec_node_remove(&var->node);
946 exec_list_push_tail(&impl->locals, &var->node);
947 continue;
948 }
949
950 /* Give it the new location */
951 assert(rt_to_bindings[rt] != -1);
952 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
953 }
954
955 if (deleted_output)
956 nir_fixup_deref_modes(stage->nir);
957
958 if (num_rts == 0) {
959 /* If we have no render targets, we need a null render target */
960 rt_bindings[0] = (struct anv_pipeline_binding) {
961 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
962 .binding = 0,
963 .index = UINT32_MAX,
964 };
965 num_rts = 1;
966 }
967
968 /* Now that we've determined the actual number of render targets, adjust
969 * the key accordingly.
970 */
971 stage->key.wm.nr_color_regions = num_rts;
972 stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
973
974 assert(num_rts <= max_rt);
975 assert(stage->bind_map.surface_count == 0);
976 typed_memcpy(stage->bind_map.surface_to_descriptor,
977 rt_bindings, num_rts);
978 stage->bind_map.surface_count += num_rts;
979 }
980
981 static const unsigned *
982 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
983 void *mem_ctx,
984 struct anv_device *device,
985 struct anv_pipeline_stage *fs_stage,
986 struct anv_pipeline_stage *prev_stage)
987 {
988 /* TODO: we could set this to 0 based on the information in nir_shader, but
989 * we need this before we call spirv_to_nir.
990 */
991 assert(prev_stage);
992 fs_stage->key.wm.input_slots_valid =
993 prev_stage->prog_data.vue.vue_map.slots_valid;
994
995 const unsigned *code =
996 brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
997 &fs_stage->prog_data.wm, fs_stage->nir,
998 NULL, -1, -1, -1, true, false, NULL, NULL);
999
1000 if (fs_stage->key.wm.nr_color_regions == 0 &&
1001 !fs_stage->prog_data.wm.has_side_effects &&
1002 !fs_stage->prog_data.wm.uses_kill &&
1003 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
1004 !fs_stage->prog_data.wm.computed_stencil) {
1005 /* This fragment shader has no outputs and no side effects. Go ahead
1006 * and return the code pointer so we don't accidentally think the
1007 * compile failed but zero out prog_data which will set program_size to
1008 * zero and disable the stage.
1009 */
1010 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
1011 }
1012
1013 return code;
1014 }
1015
1016 static VkResult
1017 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
1018 struct anv_pipeline_cache *cache,
1019 const VkGraphicsPipelineCreateInfo *info)
1020 {
1021 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1022 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1023 };
1024 int64_t pipeline_start = os_time_get_nano();
1025
1026 const struct brw_compiler *compiler =
1027 pipeline->device->instance->physicalDevice.compiler;
1028 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
1029
1030 pipeline->active_stages = 0;
1031
1032 VkResult result;
1033 for (uint32_t i = 0; i < info->stageCount; i++) {
1034 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
1035 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
1036
1037 pipeline->active_stages |= sinfo->stage;
1038
1039 int64_t stage_start = os_time_get_nano();
1040
1041 stages[stage].stage = stage;
1042 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
1043 stages[stage].entrypoint = sinfo->pName;
1044 stages[stage].spec_info = sinfo->pSpecializationInfo;
1045 anv_pipeline_hash_shader(stages[stage].module,
1046 stages[stage].entrypoint,
1047 stage,
1048 stages[stage].spec_info,
1049 stages[stage].shader_sha1);
1050
1051 const struct gen_device_info *devinfo = &pipeline->device->info;
1052 switch (stage) {
1053 case MESA_SHADER_VERTEX:
1054 populate_vs_prog_key(devinfo, sinfo->flags, &stages[stage].key.vs);
1055 break;
1056 case MESA_SHADER_TESS_CTRL:
1057 populate_tcs_prog_key(devinfo, sinfo->flags,
1058 info->pTessellationState->patchControlPoints,
1059 &stages[stage].key.tcs);
1060 break;
1061 case MESA_SHADER_TESS_EVAL:
1062 populate_tes_prog_key(devinfo, sinfo->flags, &stages[stage].key.tes);
1063 break;
1064 case MESA_SHADER_GEOMETRY:
1065 populate_gs_prog_key(devinfo, sinfo->flags, &stages[stage].key.gs);
1066 break;
1067 case MESA_SHADER_FRAGMENT:
1068 populate_wm_prog_key(devinfo, sinfo->flags,
1069 pipeline->subpass,
1070 info->pMultisampleState,
1071 &stages[stage].key.wm);
1072 break;
1073 default:
1074 unreachable("Invalid graphics shader stage");
1075 }
1076
1077 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1078 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1079 }
1080
1081 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1082 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1083
1084 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1085
1086 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1087
1088 unsigned char sha1[20];
1089 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1090
1091 unsigned found = 0;
1092 unsigned cache_hits = 0;
1093 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1094 if (!stages[s].entrypoint)
1095 continue;
1096
1097 int64_t stage_start = os_time_get_nano();
1098
1099 stages[s].cache_key.stage = s;
1100 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1101
1102 bool cache_hit;
1103 struct anv_shader_bin *bin =
1104 anv_device_search_for_kernel(pipeline->device, cache,
1105 &stages[s].cache_key,
1106 sizeof(stages[s].cache_key), &cache_hit);
1107 if (bin) {
1108 found++;
1109 pipeline->shaders[s] = bin;
1110 }
1111
1112 if (cache_hit) {
1113 cache_hits++;
1114 stages[s].feedback.flags |=
1115 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1116 }
1117 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1118 }
1119
1120 if (found == __builtin_popcount(pipeline->active_stages)) {
1121 if (cache_hits == found) {
1122 pipeline_feedback.flags |=
1123 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1124 }
1125 /* We found all our shaders in the cache. We're done. */
1126 goto done;
1127 } else if (found > 0) {
1128 /* We found some but not all of our shaders. This shouldn't happen
1129 * most of the time but it can if we have a partially populated
1130 * pipeline cache.
1131 */
1132 assert(found < __builtin_popcount(pipeline->active_stages));
1133
1134 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1135 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1136 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1137 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1138 (uint64_t)(uintptr_t)cache,
1139 0, 0, "anv",
1140 "Found a partial pipeline in the cache. This is "
1141 "most likely caused by an incomplete pipeline cache "
1142 "import or export");
1143
1144 /* We're going to have to recompile anyway, so just throw away our
1145 * references to the shaders in the cache. We'll get them out of the
1146 * cache again as part of the compilation process.
1147 */
1148 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1149 stages[s].feedback.flags = 0;
1150 if (pipeline->shaders[s]) {
1151 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1152 pipeline->shaders[s] = NULL;
1153 }
1154 }
1155 }
1156
1157 void *pipeline_ctx = ralloc_context(NULL);
1158
1159 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1160 if (!stages[s].entrypoint)
1161 continue;
1162
1163 int64_t stage_start = os_time_get_nano();
1164
1165 assert(stages[s].stage == s);
1166 assert(pipeline->shaders[s] == NULL);
1167
1168 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1169 .surface_to_descriptor = stages[s].surface_to_descriptor,
1170 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1171 };
1172
1173 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1174 pipeline_ctx,
1175 &stages[s]);
1176 if (stages[s].nir == NULL) {
1177 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1178 goto fail;
1179 }
1180
1181 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1182 }
1183
1184 /* Walk backwards to link */
1185 struct anv_pipeline_stage *next_stage = NULL;
1186 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1187 if (!stages[s].entrypoint)
1188 continue;
1189
1190 switch (s) {
1191 case MESA_SHADER_VERTEX:
1192 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1193 break;
1194 case MESA_SHADER_TESS_CTRL:
1195 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1196 break;
1197 case MESA_SHADER_TESS_EVAL:
1198 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1199 break;
1200 case MESA_SHADER_GEOMETRY:
1201 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1202 break;
1203 case MESA_SHADER_FRAGMENT:
1204 anv_pipeline_link_fs(compiler, &stages[s]);
1205 break;
1206 default:
1207 unreachable("Invalid graphics shader stage");
1208 }
1209
1210 next_stage = &stages[s];
1211 }
1212
1213 struct anv_pipeline_stage *prev_stage = NULL;
1214 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1215 if (!stages[s].entrypoint)
1216 continue;
1217
1218 int64_t stage_start = os_time_get_nano();
1219
1220 void *stage_ctx = ralloc_context(NULL);
1221
1222 nir_xfb_info *xfb_info = NULL;
1223 if (s == MESA_SHADER_VERTEX ||
1224 s == MESA_SHADER_TESS_EVAL ||
1225 s == MESA_SHADER_GEOMETRY)
1226 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1227
1228 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1229
1230 const unsigned *code;
1231 switch (s) {
1232 case MESA_SHADER_VERTEX:
1233 code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
1234 &stages[s]);
1235 break;
1236 case MESA_SHADER_TESS_CTRL:
1237 code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
1238 &stages[s], prev_stage);
1239 break;
1240 case MESA_SHADER_TESS_EVAL:
1241 code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
1242 &stages[s], prev_stage);
1243 break;
1244 case MESA_SHADER_GEOMETRY:
1245 code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
1246 &stages[s], prev_stage);
1247 break;
1248 case MESA_SHADER_FRAGMENT:
1249 code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
1250 &stages[s], prev_stage);
1251 break;
1252 default:
1253 unreachable("Invalid graphics shader stage");
1254 }
1255 if (code == NULL) {
1256 ralloc_free(stage_ctx);
1257 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1258 goto fail;
1259 }
1260
1261 struct anv_shader_bin *bin =
1262 anv_device_upload_kernel(pipeline->device, cache,
1263 &stages[s].cache_key,
1264 sizeof(stages[s].cache_key),
1265 code, stages[s].prog_data.base.program_size,
1266 stages[s].nir->constant_data,
1267 stages[s].nir->constant_data_size,
1268 &stages[s].prog_data.base,
1269 brw_prog_data_size(s),
1270 xfb_info, &stages[s].bind_map);
1271 if (!bin) {
1272 ralloc_free(stage_ctx);
1273 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1274 goto fail;
1275 }
1276
1277 pipeline->shaders[s] = bin;
1278 ralloc_free(stage_ctx);
1279
1280 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1281
1282 prev_stage = &stages[s];
1283 }
1284
1285 ralloc_free(pipeline_ctx);
1286
1287 done:
1288
1289 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1290 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1291 /* This can happen if we decided to implicitly disable the fragment
1292 * shader. See anv_pipeline_compile_fs().
1293 */
1294 anv_shader_bin_unref(pipeline->device,
1295 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1296 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1297 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1298 }
1299
1300 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1301
1302 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1303 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1304 if (create_feedback) {
1305 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1306
1307 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1308 for (uint32_t i = 0; i < info->stageCount; i++) {
1309 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1310 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1311 }
1312 }
1313
1314 return VK_SUCCESS;
1315
1316 fail:
1317 ralloc_free(pipeline_ctx);
1318
1319 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1320 if (pipeline->shaders[s])
1321 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1322 }
1323
1324 return result;
1325 }
1326
1327 VkResult
1328 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1329 struct anv_pipeline_cache *cache,
1330 const VkComputePipelineCreateInfo *info,
1331 const struct anv_shader_module *module,
1332 const char *entrypoint,
1333 const VkSpecializationInfo *spec_info)
1334 {
1335 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1336 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1337 };
1338 int64_t pipeline_start = os_time_get_nano();
1339
1340 const struct brw_compiler *compiler =
1341 pipeline->device->instance->physicalDevice.compiler;
1342
1343 struct anv_pipeline_stage stage = {
1344 .stage = MESA_SHADER_COMPUTE,
1345 .module = module,
1346 .entrypoint = entrypoint,
1347 .spec_info = spec_info,
1348 .cache_key = {
1349 .stage = MESA_SHADER_COMPUTE,
1350 },
1351 .feedback = {
1352 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1353 },
1354 };
1355 anv_pipeline_hash_shader(stage.module,
1356 stage.entrypoint,
1357 MESA_SHADER_COMPUTE,
1358 stage.spec_info,
1359 stage.shader_sha1);
1360
1361 struct anv_shader_bin *bin = NULL;
1362
1363 populate_cs_prog_key(&pipeline->device->info, info->stage.flags,
1364 &stage.key.cs);
1365
1366 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1367
1368 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1369 bool cache_hit;
1370 bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1371 sizeof(stage.cache_key), &cache_hit);
1372
1373 if (bin == NULL) {
1374 int64_t stage_start = os_time_get_nano();
1375
1376 stage.bind_map = (struct anv_pipeline_bind_map) {
1377 .surface_to_descriptor = stage.surface_to_descriptor,
1378 .sampler_to_descriptor = stage.sampler_to_descriptor
1379 };
1380
1381 /* Set up a binding for the gl_NumWorkGroups */
1382 stage.bind_map.surface_count = 1;
1383 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1384 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1385 };
1386
1387 void *mem_ctx = ralloc_context(NULL);
1388
1389 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1390 if (stage.nir == NULL) {
1391 ralloc_free(mem_ctx);
1392 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1393 }
1394
1395 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1396
1397 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1398 &stage.prog_data.cs);
1399
1400 const unsigned *shader_code =
1401 brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
1402 &stage.prog_data.cs, stage.nir, -1, NULL);
1403 if (shader_code == NULL) {
1404 ralloc_free(mem_ctx);
1405 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1406 }
1407
1408 const unsigned code_size = stage.prog_data.base.program_size;
1409 bin = anv_device_upload_kernel(pipeline->device, cache,
1410 &stage.cache_key, sizeof(stage.cache_key),
1411 shader_code, code_size,
1412 stage.nir->constant_data,
1413 stage.nir->constant_data_size,
1414 &stage.prog_data.base,
1415 sizeof(stage.prog_data.cs),
1416 NULL, &stage.bind_map);
1417 if (!bin) {
1418 ralloc_free(mem_ctx);
1419 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1420 }
1421
1422 ralloc_free(mem_ctx);
1423
1424 stage.feedback.duration = os_time_get_nano() - stage_start;
1425 }
1426
1427 if (cache_hit) {
1428 stage.feedback.flags |=
1429 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1430 pipeline_feedback.flags |=
1431 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1432 }
1433 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1434
1435 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1436 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1437 if (create_feedback) {
1438 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1439
1440 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1441 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1442 }
1443
1444 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1445 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1446
1447 return VK_SUCCESS;
1448 }
1449
1450 /**
1451 * Copy pipeline state not marked as dynamic.
1452 * Dynamic state is pipeline state which hasn't been provided at pipeline
1453 * creation time, but is dynamically provided afterwards using various
1454 * vkCmdSet* functions.
1455 *
1456 * The set of state considered "non_dynamic" is determined by the pieces of
1457 * state that have their corresponding VkDynamicState enums omitted from
1458 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1459 *
1460 * @param[out] pipeline Destination non_dynamic state.
1461 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1462 */
1463 static void
1464 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1465 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1466 {
1467 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1468 struct anv_subpass *subpass = pipeline->subpass;
1469
1470 pipeline->dynamic_state = default_dynamic_state;
1471
1472 if (pCreateInfo->pDynamicState) {
1473 /* Remove all of the states that are marked as dynamic */
1474 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1475 for (uint32_t s = 0; s < count; s++)
1476 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1477 }
1478
1479 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1480
1481 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1482 *
1483 * pViewportState is [...] NULL if the pipeline
1484 * has rasterization disabled.
1485 */
1486 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1487 assert(pCreateInfo->pViewportState);
1488
1489 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1490 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1491 typed_memcpy(dynamic->viewport.viewports,
1492 pCreateInfo->pViewportState->pViewports,
1493 pCreateInfo->pViewportState->viewportCount);
1494 }
1495
1496 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1497 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1498 typed_memcpy(dynamic->scissor.scissors,
1499 pCreateInfo->pViewportState->pScissors,
1500 pCreateInfo->pViewportState->scissorCount);
1501 }
1502 }
1503
1504 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1505 assert(pCreateInfo->pRasterizationState);
1506 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1507 }
1508
1509 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1510 assert(pCreateInfo->pRasterizationState);
1511 dynamic->depth_bias.bias =
1512 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1513 dynamic->depth_bias.clamp =
1514 pCreateInfo->pRasterizationState->depthBiasClamp;
1515 dynamic->depth_bias.slope =
1516 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1517 }
1518
1519 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1520 *
1521 * pColorBlendState is [...] NULL if the pipeline has rasterization
1522 * disabled or if the subpass of the render pass the pipeline is
1523 * created against does not use any color attachments.
1524 */
1525 bool uses_color_att = false;
1526 for (unsigned i = 0; i < subpass->color_count; ++i) {
1527 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1528 uses_color_att = true;
1529 break;
1530 }
1531 }
1532
1533 if (uses_color_att &&
1534 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1535 assert(pCreateInfo->pColorBlendState);
1536
1537 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1538 typed_memcpy(dynamic->blend_constants,
1539 pCreateInfo->pColorBlendState->blendConstants, 4);
1540 }
1541
1542 /* If there is no depthstencil attachment, then don't read
1543 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1544 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1545 * no need to override the depthstencil defaults in
1546 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1547 *
1548 * Section 9.2 of the Vulkan 1.0.15 spec says:
1549 *
1550 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1551 * disabled or if the subpass of the render pass the pipeline is created
1552 * against does not use a depth/stencil attachment.
1553 */
1554 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1555 subpass->depth_stencil_attachment) {
1556 assert(pCreateInfo->pDepthStencilState);
1557
1558 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1559 dynamic->depth_bounds.min =
1560 pCreateInfo->pDepthStencilState->minDepthBounds;
1561 dynamic->depth_bounds.max =
1562 pCreateInfo->pDepthStencilState->maxDepthBounds;
1563 }
1564
1565 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1566 dynamic->stencil_compare_mask.front =
1567 pCreateInfo->pDepthStencilState->front.compareMask;
1568 dynamic->stencil_compare_mask.back =
1569 pCreateInfo->pDepthStencilState->back.compareMask;
1570 }
1571
1572 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1573 dynamic->stencil_write_mask.front =
1574 pCreateInfo->pDepthStencilState->front.writeMask;
1575 dynamic->stencil_write_mask.back =
1576 pCreateInfo->pDepthStencilState->back.writeMask;
1577 }
1578
1579 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1580 dynamic->stencil_reference.front =
1581 pCreateInfo->pDepthStencilState->front.reference;
1582 dynamic->stencil_reference.back =
1583 pCreateInfo->pDepthStencilState->back.reference;
1584 }
1585 }
1586
1587 pipeline->dynamic_state_mask = states;
1588 }
1589
1590 static void
1591 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1592 {
1593 #ifdef DEBUG
1594 struct anv_render_pass *renderpass = NULL;
1595 struct anv_subpass *subpass = NULL;
1596
1597 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1598 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1599 */
1600 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1601
1602 renderpass = anv_render_pass_from_handle(info->renderPass);
1603 assert(renderpass);
1604
1605 assert(info->subpass < renderpass->subpass_count);
1606 subpass = &renderpass->subpasses[info->subpass];
1607
1608 assert(info->stageCount >= 1);
1609 assert(info->pVertexInputState);
1610 assert(info->pInputAssemblyState);
1611 assert(info->pRasterizationState);
1612 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1613 assert(info->pViewportState);
1614 assert(info->pMultisampleState);
1615
1616 if (subpass && subpass->depth_stencil_attachment)
1617 assert(info->pDepthStencilState);
1618
1619 if (subpass && subpass->color_count > 0) {
1620 bool all_color_unused = true;
1621 for (int i = 0; i < subpass->color_count; i++) {
1622 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1623 all_color_unused = false;
1624 }
1625 /* pColorBlendState is ignored if the pipeline has rasterization
1626 * disabled or if the subpass of the render pass the pipeline is
1627 * created against does not use any color attachments.
1628 */
1629 assert(info->pColorBlendState || all_color_unused);
1630 }
1631 }
1632
1633 for (uint32_t i = 0; i < info->stageCount; ++i) {
1634 switch (info->pStages[i].stage) {
1635 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1636 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1637 assert(info->pTessellationState);
1638 break;
1639 default:
1640 break;
1641 }
1642 }
1643 #endif
1644 }
1645
1646 /**
1647 * Calculate the desired L3 partitioning based on the current state of the
1648 * pipeline. For now this simply returns the conservative defaults calculated
1649 * by get_default_l3_weights(), but we could probably do better by gathering
1650 * more statistics from the pipeline state (e.g. guess of expected URB usage
1651 * and bound surfaces), or by using feed-back from performance counters.
1652 */
1653 void
1654 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1655 {
1656 const struct gen_device_info *devinfo = &pipeline->device->info;
1657
1658 const struct gen_l3_weights w =
1659 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1660
1661 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1662 pipeline->urb.total_size =
1663 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1664 }
1665
1666 VkResult
1667 anv_pipeline_init(struct anv_pipeline *pipeline,
1668 struct anv_device *device,
1669 struct anv_pipeline_cache *cache,
1670 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1671 const VkAllocationCallbacks *alloc)
1672 {
1673 VkResult result;
1674
1675 anv_pipeline_validate_create_info(pCreateInfo);
1676
1677 if (alloc == NULL)
1678 alloc = &device->alloc;
1679
1680 pipeline->device = device;
1681
1682 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1683 assert(pCreateInfo->subpass < render_pass->subpass_count);
1684 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1685
1686 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1687 if (result != VK_SUCCESS)
1688 return result;
1689
1690 pipeline->batch.alloc = alloc;
1691 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1692 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1693 pipeline->batch.relocs = &pipeline->batch_relocs;
1694 pipeline->batch.status = VK_SUCCESS;
1695
1696 copy_non_dynamic_state(pipeline, pCreateInfo);
1697 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1698 pCreateInfo->pRasterizationState->depthClampEnable;
1699
1700 /* Previously we enabled depth clipping when !depthClampEnable.
1701 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1702 * clipping info is available, use its enable value to determine clipping,
1703 * otherwise fallback to the previous !depthClampEnable logic.
1704 */
1705 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1706 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1707 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1708 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1709
1710 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1711 pCreateInfo->pMultisampleState->sampleShadingEnable;
1712
1713 pipeline->needs_data_cache = false;
1714
1715 /* When we free the pipeline, we detect stages based on the NULL status
1716 * of various prog_data pointers. Make them NULL by default.
1717 */
1718 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1719
1720 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1721 if (result != VK_SUCCESS) {
1722 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1723 return result;
1724 }
1725
1726 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1727
1728 anv_pipeline_setup_l3_config(pipeline, false);
1729
1730 const VkPipelineVertexInputStateCreateInfo *vi_info =
1731 pCreateInfo->pVertexInputState;
1732
1733 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1734
1735 pipeline->vb_used = 0;
1736 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1737 const VkVertexInputAttributeDescription *desc =
1738 &vi_info->pVertexAttributeDescriptions[i];
1739
1740 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1741 pipeline->vb_used |= 1 << desc->binding;
1742 }
1743
1744 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1745 const VkVertexInputBindingDescription *desc =
1746 &vi_info->pVertexBindingDescriptions[i];
1747
1748 pipeline->vb[desc->binding].stride = desc->stride;
1749
1750 /* Step rate is programmed per vertex element (attribute), not
1751 * binding. Set up a map of which bindings step per instance, for
1752 * reference by vertex element setup. */
1753 switch (desc->inputRate) {
1754 default:
1755 case VK_VERTEX_INPUT_RATE_VERTEX:
1756 pipeline->vb[desc->binding].instanced = false;
1757 break;
1758 case VK_VERTEX_INPUT_RATE_INSTANCE:
1759 pipeline->vb[desc->binding].instanced = true;
1760 break;
1761 }
1762
1763 pipeline->vb[desc->binding].instance_divisor = 1;
1764 }
1765
1766 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1767 vk_find_struct_const(vi_info->pNext,
1768 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1769 if (vi_div_state) {
1770 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1771 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1772 &vi_div_state->pVertexBindingDivisors[i];
1773
1774 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1775 }
1776 }
1777
1778 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1779 * different views. If the client asks for instancing, we need to multiply
1780 * the instance divisor by the number of views ensure that we repeat the
1781 * client's per-instance data once for each view.
1782 */
1783 if (pipeline->subpass->view_mask) {
1784 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1785 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1786 if (pipeline->vb[vb].instanced)
1787 pipeline->vb[vb].instance_divisor *= view_count;
1788 }
1789 }
1790
1791 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1792 pCreateInfo->pInputAssemblyState;
1793 const VkPipelineTessellationStateCreateInfo *tess_info =
1794 pCreateInfo->pTessellationState;
1795 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1796
1797 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1798 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1799 else
1800 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1801
1802 return VK_SUCCESS;
1803 }