anv: enable descriptor indexing capabilities
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "anv_private.h"
34 #include "compiler/brw_nir.h"
35 #include "anv_nir.h"
36 #include "nir/nir_xfb_info.h"
37 #include "spirv/nir_spirv.h"
38 #include "vk_util.h"
39
40 /* Needed for SWIZZLE macros */
41 #include "program/prog_instruction.h"
42
43 // Shader functions
44
45 VkResult anv_CreateShaderModule(
46 VkDevice _device,
47 const VkShaderModuleCreateInfo* pCreateInfo,
48 const VkAllocationCallbacks* pAllocator,
49 VkShaderModule* pShaderModule)
50 {
51 ANV_FROM_HANDLE(anv_device, device, _device);
52 struct anv_shader_module *module;
53
54 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
55 assert(pCreateInfo->flags == 0);
56
57 module = vk_alloc2(&device->alloc, pAllocator,
58 sizeof(*module) + pCreateInfo->codeSize, 8,
59 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
60 if (module == NULL)
61 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
62
63 module->size = pCreateInfo->codeSize;
64 memcpy(module->data, pCreateInfo->pCode, module->size);
65
66 _mesa_sha1_compute(module->data, module->size, module->sha1);
67
68 *pShaderModule = anv_shader_module_to_handle(module);
69
70 return VK_SUCCESS;
71 }
72
73 void anv_DestroyShaderModule(
74 VkDevice _device,
75 VkShaderModule _module,
76 const VkAllocationCallbacks* pAllocator)
77 {
78 ANV_FROM_HANDLE(anv_device, device, _device);
79 ANV_FROM_HANDLE(anv_shader_module, module, _module);
80
81 if (!module)
82 return;
83
84 vk_free2(&device->alloc, pAllocator, module);
85 }
86
87 #define SPIR_V_MAGIC_NUMBER 0x07230203
88
89 static const uint64_t stage_to_debug[] = {
90 [MESA_SHADER_VERTEX] = DEBUG_VS,
91 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
92 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
93 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
94 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
95 [MESA_SHADER_COMPUTE] = DEBUG_CS,
96 };
97
98 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
99 * we can't do that yet because we don't have the ability to copy nir.
100 */
101 static nir_shader *
102 anv_shader_compile_to_nir(struct anv_device *device,
103 void *mem_ctx,
104 const struct anv_shader_module *module,
105 const char *entrypoint_name,
106 gl_shader_stage stage,
107 const VkSpecializationInfo *spec_info)
108 {
109 const struct anv_physical_device *pdevice =
110 &device->instance->physicalDevice;
111 const struct brw_compiler *compiler = pdevice->compiler;
112 const nir_shader_compiler_options *nir_options =
113 compiler->glsl_compiler_options[stage].NirOptions;
114
115 uint32_t *spirv = (uint32_t *) module->data;
116 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
117 assert(module->size % 4 == 0);
118
119 uint32_t num_spec_entries = 0;
120 struct nir_spirv_specialization *spec_entries = NULL;
121 if (spec_info && spec_info->mapEntryCount > 0) {
122 num_spec_entries = spec_info->mapEntryCount;
123 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
124 for (uint32_t i = 0; i < num_spec_entries; i++) {
125 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
126 const void *data = spec_info->pData + entry.offset;
127 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
128
129 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
130 if (spec_info->dataSize == 8)
131 spec_entries[i].data64 = *(const uint64_t *)data;
132 else
133 spec_entries[i].data32 = *(const uint32_t *)data;
134 }
135 }
136
137 nir_address_format ssbo_addr_format =
138 anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access);
139 struct spirv_to_nir_options spirv_options = {
140 .lower_workgroup_access_to_offsets = true,
141 .caps = {
142 .derivative_group = true,
143 .descriptor_array_dynamic_indexing = true,
144 .descriptor_array_non_uniform_indexing = true,
145 .descriptor_indexing = true,
146 .device_group = true,
147 .draw_parameters = true,
148 .float16 = pdevice->info.gen >= 8,
149 .float64 = pdevice->info.gen >= 8,
150 .geometry_streams = true,
151 .image_write_without_format = true,
152 .int8 = pdevice->info.gen >= 8,
153 .int16 = pdevice->info.gen >= 8,
154 .int64 = pdevice->info.gen >= 8,
155 .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
156 .min_lod = true,
157 .multiview = true,
158 .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
159 .post_depth_coverage = pdevice->info.gen >= 9,
160 .runtime_descriptor_array = true,
161 .shader_viewport_index_layer = true,
162 .stencil_export = pdevice->info.gen >= 9,
163 .storage_8bit = pdevice->info.gen >= 8,
164 .storage_16bit = pdevice->info.gen >= 8,
165 .subgroup_arithmetic = true,
166 .subgroup_basic = true,
167 .subgroup_ballot = true,
168 .subgroup_quad = true,
169 .subgroup_shuffle = true,
170 .subgroup_vote = true,
171 .tessellation = true,
172 .transform_feedback = pdevice->info.gen >= 8,
173 .variable_pointers = true,
174 },
175 .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
176 .ssbo_ptr_type = nir_address_format_to_glsl_type(ssbo_addr_format),
177 .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
178 .push_const_ptr_type = glsl_uint_type(),
179 .shared_ptr_type = glsl_uint_type(),
180 };
181
182
183 nir_function *entry_point =
184 spirv_to_nir(spirv, module->size / 4,
185 spec_entries, num_spec_entries,
186 stage, entrypoint_name, &spirv_options, nir_options);
187 nir_shader *nir = entry_point->shader;
188 assert(nir->info.stage == stage);
189 nir_validate_shader(nir, "after spirv_to_nir");
190 ralloc_steal(mem_ctx, nir);
191
192 free(spec_entries);
193
194 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
195 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
196 gl_shader_stage_name(stage));
197 nir_print_shader(nir, stderr);
198 }
199
200 /* We have to lower away local constant initializers right before we
201 * inline functions. That way they get properly initialized at the top
202 * of the function and not at the top of its caller.
203 */
204 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
205 NIR_PASS_V(nir, nir_lower_returns);
206 NIR_PASS_V(nir, nir_inline_functions);
207 NIR_PASS_V(nir, nir_opt_deref);
208
209 /* Pick off the single entrypoint that we want */
210 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
211 if (func != entry_point)
212 exec_node_remove(&func->node);
213 }
214 assert(exec_list_length(&nir->functions) == 1);
215
216 /* Now that we've deleted all but the main function, we can go ahead and
217 * lower the rest of the constant initializers. We do this here so that
218 * nir_remove_dead_variables and split_per_member_structs below see the
219 * corresponding stores.
220 */
221 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
222
223 /* Split member structs. We do this before lower_io_to_temporaries so that
224 * it doesn't lower system values to temporaries by accident.
225 */
226 NIR_PASS_V(nir, nir_split_var_copies);
227 NIR_PASS_V(nir, nir_split_per_member_structs);
228
229 NIR_PASS_V(nir, nir_remove_dead_variables,
230 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
231
232 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
233 nir_address_format_64bit_global);
234
235 NIR_PASS_V(nir, nir_propagate_invariant);
236 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
237 entry_point->impl, true, false);
238
239 NIR_PASS_V(nir, nir_lower_frexp);
240
241 /* Vulkan uses the separate-shader linking model */
242 nir->info.separate_shader = true;
243
244 nir = brw_preprocess_nir(compiler, nir, NULL);
245
246 return nir;
247 }
248
249 void anv_DestroyPipeline(
250 VkDevice _device,
251 VkPipeline _pipeline,
252 const VkAllocationCallbacks* pAllocator)
253 {
254 ANV_FROM_HANDLE(anv_device, device, _device);
255 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
256
257 if (!pipeline)
258 return;
259
260 anv_reloc_list_finish(&pipeline->batch_relocs,
261 pAllocator ? pAllocator : &device->alloc);
262 if (pipeline->blend_state.map)
263 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
264
265 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
266 if (pipeline->shaders[s])
267 anv_shader_bin_unref(device, pipeline->shaders[s]);
268 }
269
270 vk_free2(&device->alloc, pAllocator, pipeline);
271 }
272
273 static const uint32_t vk_to_gen_primitive_type[] = {
274 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
275 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
276 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
277 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
278 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
279 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
280 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
281 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
282 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
283 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
284 };
285
286 static void
287 populate_sampler_prog_key(const struct gen_device_info *devinfo,
288 struct brw_sampler_prog_key_data *key)
289 {
290 /* Almost all multisampled textures are compressed. The only time when we
291 * don't compress a multisampled texture is for 16x MSAA with a surface
292 * width greater than 8k which is a bit of an edge case. Since the sampler
293 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
294 * to tell the compiler to always assume compression.
295 */
296 key->compressed_multisample_layout_mask = ~0;
297
298 /* SkyLake added support for 16x MSAA. With this came a new message for
299 * reading from a 16x MSAA surface with compression. The new message was
300 * needed because now the MCS data is 64 bits instead of 32 or lower as is
301 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
302 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
303 * so we can just use it unconditionally. This may not be quite as
304 * efficient but it saves us from recompiling.
305 */
306 if (devinfo->gen >= 9)
307 key->msaa_16 = ~0;
308
309 /* XXX: Handle texture swizzle on HSW- */
310 for (int i = 0; i < MAX_SAMPLERS; i++) {
311 /* Assume color sampler, no swizzling. (Works for BDW+) */
312 key->swizzles[i] = SWIZZLE_XYZW;
313 }
314 }
315
316 static void
317 populate_vs_prog_key(const struct gen_device_info *devinfo,
318 struct brw_vs_prog_key *key)
319 {
320 memset(key, 0, sizeof(*key));
321
322 populate_sampler_prog_key(devinfo, &key->tex);
323
324 /* XXX: Handle vertex input work-arounds */
325
326 /* XXX: Handle sampler_prog_key */
327 }
328
329 static void
330 populate_tcs_prog_key(const struct gen_device_info *devinfo,
331 unsigned input_vertices,
332 struct brw_tcs_prog_key *key)
333 {
334 memset(key, 0, sizeof(*key));
335
336 populate_sampler_prog_key(devinfo, &key->tex);
337
338 key->input_vertices = input_vertices;
339 }
340
341 static void
342 populate_tes_prog_key(const struct gen_device_info *devinfo,
343 struct brw_tes_prog_key *key)
344 {
345 memset(key, 0, sizeof(*key));
346
347 populate_sampler_prog_key(devinfo, &key->tex);
348 }
349
350 static void
351 populate_gs_prog_key(const struct gen_device_info *devinfo,
352 struct brw_gs_prog_key *key)
353 {
354 memset(key, 0, sizeof(*key));
355
356 populate_sampler_prog_key(devinfo, &key->tex);
357 }
358
359 static void
360 populate_wm_prog_key(const struct gen_device_info *devinfo,
361 const struct anv_subpass *subpass,
362 const VkPipelineMultisampleStateCreateInfo *ms_info,
363 struct brw_wm_prog_key *key)
364 {
365 memset(key, 0, sizeof(*key));
366
367 populate_sampler_prog_key(devinfo, &key->tex);
368
369 /* We set this to 0 here and set to the actual value before we call
370 * brw_compile_fs.
371 */
372 key->input_slots_valid = 0;
373
374 /* Vulkan doesn't specify a default */
375 key->high_quality_derivatives = false;
376
377 /* XXX Vulkan doesn't appear to specify */
378 key->clamp_fragment_color = false;
379
380 assert(subpass->color_count <= MAX_RTS);
381 for (uint32_t i = 0; i < subpass->color_count; i++) {
382 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
383 key->color_outputs_valid |= (1 << i);
384 }
385
386 key->nr_color_regions = util_bitcount(key->color_outputs_valid);
387
388 /* To reduce possible shader recompilations we would need to know if
389 * there is a SampleMask output variable to compute if we should emit
390 * code to workaround the issue that hardware disables alpha to coverage
391 * when there is SampleMask output.
392 */
393 key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
394
395 /* Vulkan doesn't support fixed-function alpha test */
396 key->alpha_test_replicate_alpha = false;
397
398 if (ms_info) {
399 /* We should probably pull this out of the shader, but it's fairly
400 * harmless to compute it and then let dead-code take care of it.
401 */
402 if (ms_info->rasterizationSamples > 1) {
403 key->persample_interp =
404 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
405 key->multisample_fbo = true;
406 }
407
408 key->frag_coord_adds_sample_pos = ms_info->sampleShadingEnable;
409 }
410 }
411
412 static void
413 populate_cs_prog_key(const struct gen_device_info *devinfo,
414 struct brw_cs_prog_key *key)
415 {
416 memset(key, 0, sizeof(*key));
417
418 populate_sampler_prog_key(devinfo, &key->tex);
419 }
420
421 struct anv_pipeline_stage {
422 gl_shader_stage stage;
423
424 const struct anv_shader_module *module;
425 const char *entrypoint;
426 const VkSpecializationInfo *spec_info;
427
428 unsigned char shader_sha1[20];
429
430 union brw_any_prog_key key;
431
432 struct {
433 gl_shader_stage stage;
434 unsigned char sha1[20];
435 } cache_key;
436
437 nir_shader *nir;
438
439 struct anv_pipeline_binding surface_to_descriptor[256];
440 struct anv_pipeline_binding sampler_to_descriptor[256];
441 struct anv_pipeline_bind_map bind_map;
442
443 union brw_any_prog_data prog_data;
444
445 VkPipelineCreationFeedbackEXT feedback;
446 };
447
448 static void
449 anv_pipeline_hash_shader(const struct anv_shader_module *module,
450 const char *entrypoint,
451 gl_shader_stage stage,
452 const VkSpecializationInfo *spec_info,
453 unsigned char *sha1_out)
454 {
455 struct mesa_sha1 ctx;
456 _mesa_sha1_init(&ctx);
457
458 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
459 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
460 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
461 if (spec_info) {
462 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
463 spec_info->mapEntryCount *
464 sizeof(*spec_info->pMapEntries));
465 _mesa_sha1_update(&ctx, spec_info->pData,
466 spec_info->dataSize);
467 }
468
469 _mesa_sha1_final(&ctx, sha1_out);
470 }
471
472 static void
473 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
474 struct anv_pipeline_layout *layout,
475 struct anv_pipeline_stage *stages,
476 unsigned char *sha1_out)
477 {
478 struct mesa_sha1 ctx;
479 _mesa_sha1_init(&ctx);
480
481 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
482 sizeof(pipeline->subpass->view_mask));
483
484 if (layout)
485 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
486
487 const bool rba = pipeline->device->robust_buffer_access;
488 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
489
490 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
491 if (stages[s].entrypoint) {
492 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
493 sizeof(stages[s].shader_sha1));
494 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
495 }
496 }
497
498 _mesa_sha1_final(&ctx, sha1_out);
499 }
500
501 static void
502 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
503 struct anv_pipeline_layout *layout,
504 struct anv_pipeline_stage *stage,
505 unsigned char *sha1_out)
506 {
507 struct mesa_sha1 ctx;
508 _mesa_sha1_init(&ctx);
509
510 if (layout)
511 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
512
513 const bool rba = pipeline->device->robust_buffer_access;
514 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
515
516 _mesa_sha1_update(&ctx, stage->shader_sha1,
517 sizeof(stage->shader_sha1));
518 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
519
520 _mesa_sha1_final(&ctx, sha1_out);
521 }
522
523 static nir_shader *
524 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
525 struct anv_pipeline_cache *cache,
526 void *mem_ctx,
527 struct anv_pipeline_stage *stage)
528 {
529 const struct brw_compiler *compiler =
530 pipeline->device->instance->physicalDevice.compiler;
531 const nir_shader_compiler_options *nir_options =
532 compiler->glsl_compiler_options[stage->stage].NirOptions;
533 nir_shader *nir;
534
535 nir = anv_device_search_for_nir(pipeline->device, cache,
536 nir_options,
537 stage->shader_sha1,
538 mem_ctx);
539 if (nir) {
540 assert(nir->info.stage == stage->stage);
541 return nir;
542 }
543
544 nir = anv_shader_compile_to_nir(pipeline->device,
545 mem_ctx,
546 stage->module,
547 stage->entrypoint,
548 stage->stage,
549 stage->spec_info);
550 if (nir) {
551 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
552 return nir;
553 }
554
555 return NULL;
556 }
557
558 static void
559 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
560 void *mem_ctx,
561 struct anv_pipeline_stage *stage,
562 struct anv_pipeline_layout *layout)
563 {
564 const struct anv_physical_device *pdevice =
565 &pipeline->device->instance->physicalDevice;
566 const struct brw_compiler *compiler = pdevice->compiler;
567
568 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
569 nir_shader *nir = stage->nir;
570
571 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
572 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
573 NIR_PASS_V(nir, anv_nir_lower_input_attachments);
574 }
575
576 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
577
578 NIR_PASS_V(nir, anv_nir_lower_push_constants);
579
580 if (nir->info.stage != MESA_SHADER_COMPUTE)
581 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
582
583 if (nir->info.stage == MESA_SHADER_COMPUTE)
584 prog_data->total_shared = nir->num_shared;
585
586 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
587
588 if (nir->num_uniforms > 0) {
589 assert(prog_data->nr_params == 0);
590
591 /* If the shader uses any push constants at all, we'll just give
592 * them the maximum possible number
593 */
594 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
595 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
596 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
597 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
598
599 /* We now set the param values to be offsets into a
600 * anv_push_constant_data structure. Since the compiler doesn't
601 * actually dereference any of the gl_constant_value pointers in the
602 * params array, it doesn't really matter what we put here.
603 */
604 struct anv_push_constants *null_data = NULL;
605 /* Fill out the push constants section of the param array */
606 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
607 prog_data->param[i] = ANV_PARAM_PUSH(
608 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
609 }
610 }
611
612 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
613 pipeline->needs_data_cache = true;
614
615 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
616
617 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
618 if (layout) {
619 anv_nir_apply_pipeline_layout(pdevice,
620 pipeline->device->robust_buffer_access,
621 layout, nir, prog_data,
622 &stage->bind_map);
623
624 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
625 nir_address_format_32bit_index_offset);
626 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
627 anv_nir_ssbo_addr_format(pdevice,
628 pipeline->device->robust_buffer_access));
629
630 NIR_PASS_V(nir, nir_opt_constant_folding);
631
632 /* We don't support non-uniform UBOs and non-uniform SSBO access is
633 * handled naturally by falling back to A64 messages.
634 */
635 NIR_PASS_V(nir, nir_lower_non_uniform_access,
636 nir_lower_non_uniform_texture_access |
637 nir_lower_non_uniform_image_access);
638 }
639
640 if (nir->info.stage != MESA_SHADER_COMPUTE)
641 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
642
643 assert(nir->num_uniforms == prog_data->nr_params * 4);
644
645 stage->nir = nir;
646 }
647
648 static void
649 anv_pipeline_link_vs(const struct brw_compiler *compiler,
650 struct anv_pipeline_stage *vs_stage,
651 struct anv_pipeline_stage *next_stage)
652 {
653 if (next_stage)
654 brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
655 }
656
657 static const unsigned *
658 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
659 void *mem_ctx,
660 struct anv_device *device,
661 struct anv_pipeline_stage *vs_stage)
662 {
663 brw_compute_vue_map(compiler->devinfo,
664 &vs_stage->prog_data.vs.base.vue_map,
665 vs_stage->nir->info.outputs_written,
666 vs_stage->nir->info.separate_shader);
667
668 return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
669 &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
670 }
671
672 static void
673 merge_tess_info(struct shader_info *tes_info,
674 const struct shader_info *tcs_info)
675 {
676 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
677 *
678 * "PointMode. Controls generation of points rather than triangles
679 * or lines. This functionality defaults to disabled, and is
680 * enabled if either shader stage includes the execution mode.
681 *
682 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
683 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
684 * and OutputVertices, it says:
685 *
686 * "One mode must be set in at least one of the tessellation
687 * shader stages."
688 *
689 * So, the fields can be set in either the TCS or TES, but they must
690 * agree if set in both. Our backend looks at TES, so bitwise-or in
691 * the values from the TCS.
692 */
693 assert(tcs_info->tess.tcs_vertices_out == 0 ||
694 tes_info->tess.tcs_vertices_out == 0 ||
695 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
696 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
697
698 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
699 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
700 tcs_info->tess.spacing == tes_info->tess.spacing);
701 tes_info->tess.spacing |= tcs_info->tess.spacing;
702
703 assert(tcs_info->tess.primitive_mode == 0 ||
704 tes_info->tess.primitive_mode == 0 ||
705 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
706 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
707 tes_info->tess.ccw |= tcs_info->tess.ccw;
708 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
709 }
710
711 static void
712 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
713 struct anv_pipeline_stage *tcs_stage,
714 struct anv_pipeline_stage *tes_stage)
715 {
716 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
717
718 brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
719
720 nir_lower_patch_vertices(tes_stage->nir,
721 tcs_stage->nir->info.tess.tcs_vertices_out,
722 NULL);
723
724 /* Copy TCS info into the TES info */
725 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
726
727 /* Whacking the key after cache lookup is a bit sketchy, but all of
728 * this comes from the SPIR-V, which is part of the hash used for the
729 * pipeline cache. So it should be safe.
730 */
731 tcs_stage->key.tcs.tes_primitive_mode =
732 tes_stage->nir->info.tess.primitive_mode;
733 tcs_stage->key.tcs.quads_workaround =
734 compiler->devinfo->gen < 9 &&
735 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
736 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
737 }
738
739 static const unsigned *
740 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
741 void *mem_ctx,
742 struct anv_device *device,
743 struct anv_pipeline_stage *tcs_stage,
744 struct anv_pipeline_stage *prev_stage)
745 {
746 tcs_stage->key.tcs.outputs_written =
747 tcs_stage->nir->info.outputs_written;
748 tcs_stage->key.tcs.patch_outputs_written =
749 tcs_stage->nir->info.patch_outputs_written;
750
751 return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
752 &tcs_stage->prog_data.tcs, tcs_stage->nir,
753 -1, NULL);
754 }
755
756 static void
757 anv_pipeline_link_tes(const struct brw_compiler *compiler,
758 struct anv_pipeline_stage *tes_stage,
759 struct anv_pipeline_stage *next_stage)
760 {
761 if (next_stage)
762 brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
763 }
764
765 static const unsigned *
766 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
767 void *mem_ctx,
768 struct anv_device *device,
769 struct anv_pipeline_stage *tes_stage,
770 struct anv_pipeline_stage *tcs_stage)
771 {
772 tes_stage->key.tes.inputs_read =
773 tcs_stage->nir->info.outputs_written;
774 tes_stage->key.tes.patch_inputs_read =
775 tcs_stage->nir->info.patch_outputs_written;
776
777 return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
778 &tcs_stage->prog_data.tcs.base.vue_map,
779 &tes_stage->prog_data.tes, tes_stage->nir,
780 NULL, -1, NULL);
781 }
782
783 static void
784 anv_pipeline_link_gs(const struct brw_compiler *compiler,
785 struct anv_pipeline_stage *gs_stage,
786 struct anv_pipeline_stage *next_stage)
787 {
788 if (next_stage)
789 brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
790 }
791
792 static const unsigned *
793 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
794 void *mem_ctx,
795 struct anv_device *device,
796 struct anv_pipeline_stage *gs_stage,
797 struct anv_pipeline_stage *prev_stage)
798 {
799 brw_compute_vue_map(compiler->devinfo,
800 &gs_stage->prog_data.gs.base.vue_map,
801 gs_stage->nir->info.outputs_written,
802 gs_stage->nir->info.separate_shader);
803
804 return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
805 &gs_stage->prog_data.gs, gs_stage->nir,
806 NULL, -1, NULL);
807 }
808
809 static void
810 anv_pipeline_link_fs(const struct brw_compiler *compiler,
811 struct anv_pipeline_stage *stage)
812 {
813 unsigned num_rts = 0;
814 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
815 struct anv_pipeline_binding rt_bindings[max_rt];
816 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
817 int rt_to_bindings[max_rt];
818 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
819 bool rt_used[max_rt];
820 memset(rt_used, 0, sizeof(rt_used));
821
822 /* Flag used render targets */
823 nir_foreach_variable_safe(var, &stage->nir->outputs) {
824 if (var->data.location < FRAG_RESULT_DATA0)
825 continue;
826
827 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
828 /* Unused or out-of-bounds */
829 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid & (1 << rt)))
830 continue;
831
832 const unsigned array_len =
833 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
834 assert(rt + array_len <= max_rt);
835
836 for (unsigned i = 0; i < array_len; i++)
837 rt_used[rt + i] = true;
838 }
839
840 /* Set new, compacted, location */
841 for (unsigned i = 0; i < max_rt; i++) {
842 if (!rt_used[i])
843 continue;
844
845 rt_to_bindings[i] = num_rts;
846 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
847 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
848 .binding = 0,
849 .index = i,
850 };
851 num_rts++;
852 }
853
854 bool deleted_output = false;
855 nir_foreach_variable_safe(var, &stage->nir->outputs) {
856 if (var->data.location < FRAG_RESULT_DATA0)
857 continue;
858
859 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
860 if (rt >= MAX_RTS ||
861 !(stage->key.wm.color_outputs_valid & (1 << rt))) {
862 /* Unused or out-of-bounds, throw it away */
863 deleted_output = true;
864 var->data.mode = nir_var_function_temp;
865 exec_node_remove(&var->node);
866 exec_list_push_tail(&impl->locals, &var->node);
867 continue;
868 }
869
870 /* Give it the new location */
871 assert(rt_to_bindings[rt] != -1);
872 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
873 }
874
875 if (deleted_output)
876 nir_fixup_deref_modes(stage->nir);
877
878 if (num_rts == 0) {
879 /* If we have no render targets, we need a null render target */
880 rt_bindings[0] = (struct anv_pipeline_binding) {
881 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
882 .binding = 0,
883 .index = UINT32_MAX,
884 };
885 num_rts = 1;
886 }
887
888 /* Now that we've determined the actual number of render targets, adjust
889 * the key accordingly.
890 */
891 stage->key.wm.nr_color_regions = num_rts;
892 stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
893
894 assert(num_rts <= max_rt);
895 assert(stage->bind_map.surface_count == 0);
896 typed_memcpy(stage->bind_map.surface_to_descriptor,
897 rt_bindings, num_rts);
898 stage->bind_map.surface_count += num_rts;
899 }
900
901 static const unsigned *
902 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
903 void *mem_ctx,
904 struct anv_device *device,
905 struct anv_pipeline_stage *fs_stage,
906 struct anv_pipeline_stage *prev_stage)
907 {
908 /* TODO: we could set this to 0 based on the information in nir_shader, but
909 * we need this before we call spirv_to_nir.
910 */
911 assert(prev_stage);
912 fs_stage->key.wm.input_slots_valid =
913 prev_stage->prog_data.vue.vue_map.slots_valid;
914
915 const unsigned *code =
916 brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
917 &fs_stage->prog_data.wm, fs_stage->nir,
918 NULL, -1, -1, -1, true, false, NULL, NULL);
919
920 if (fs_stage->key.wm.nr_color_regions == 0 &&
921 !fs_stage->prog_data.wm.has_side_effects &&
922 !fs_stage->prog_data.wm.uses_kill &&
923 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
924 !fs_stage->prog_data.wm.computed_stencil) {
925 /* This fragment shader has no outputs and no side effects. Go ahead
926 * and return the code pointer so we don't accidentally think the
927 * compile failed but zero out prog_data which will set program_size to
928 * zero and disable the stage.
929 */
930 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
931 }
932
933 return code;
934 }
935
936 static VkResult
937 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
938 struct anv_pipeline_cache *cache,
939 const VkGraphicsPipelineCreateInfo *info)
940 {
941 VkPipelineCreationFeedbackEXT pipeline_feedback = {
942 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
943 };
944 int64_t pipeline_start = os_time_get_nano();
945
946 const struct brw_compiler *compiler =
947 pipeline->device->instance->physicalDevice.compiler;
948 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
949
950 pipeline->active_stages = 0;
951
952 VkResult result;
953 for (uint32_t i = 0; i < info->stageCount; i++) {
954 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
955 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
956
957 pipeline->active_stages |= sinfo->stage;
958
959 int64_t stage_start = os_time_get_nano();
960
961 stages[stage].stage = stage;
962 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
963 stages[stage].entrypoint = sinfo->pName;
964 stages[stage].spec_info = sinfo->pSpecializationInfo;
965 anv_pipeline_hash_shader(stages[stage].module,
966 stages[stage].entrypoint,
967 stage,
968 stages[stage].spec_info,
969 stages[stage].shader_sha1);
970
971 const struct gen_device_info *devinfo = &pipeline->device->info;
972 switch (stage) {
973 case MESA_SHADER_VERTEX:
974 populate_vs_prog_key(devinfo, &stages[stage].key.vs);
975 break;
976 case MESA_SHADER_TESS_CTRL:
977 populate_tcs_prog_key(devinfo,
978 info->pTessellationState->patchControlPoints,
979 &stages[stage].key.tcs);
980 break;
981 case MESA_SHADER_TESS_EVAL:
982 populate_tes_prog_key(devinfo, &stages[stage].key.tes);
983 break;
984 case MESA_SHADER_GEOMETRY:
985 populate_gs_prog_key(devinfo, &stages[stage].key.gs);
986 break;
987 case MESA_SHADER_FRAGMENT:
988 populate_wm_prog_key(devinfo, pipeline->subpass,
989 info->pMultisampleState,
990 &stages[stage].key.wm);
991 break;
992 default:
993 unreachable("Invalid graphics shader stage");
994 }
995
996 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
997 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
998 }
999
1000 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1001 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1002
1003 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1004
1005 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1006
1007 unsigned char sha1[20];
1008 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1009
1010 unsigned found = 0;
1011 unsigned cache_hits = 0;
1012 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1013 if (!stages[s].entrypoint)
1014 continue;
1015
1016 int64_t stage_start = os_time_get_nano();
1017
1018 stages[s].cache_key.stage = s;
1019 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1020
1021 bool cache_hit;
1022 struct anv_shader_bin *bin =
1023 anv_device_search_for_kernel(pipeline->device, cache,
1024 &stages[s].cache_key,
1025 sizeof(stages[s].cache_key), &cache_hit);
1026 if (bin) {
1027 found++;
1028 pipeline->shaders[s] = bin;
1029 }
1030
1031 if (cache_hit) {
1032 cache_hits++;
1033 stages[s].feedback.flags |=
1034 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1035 }
1036 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1037 }
1038
1039 if (found == __builtin_popcount(pipeline->active_stages)) {
1040 if (cache_hits == found) {
1041 pipeline_feedback.flags |=
1042 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1043 }
1044 /* We found all our shaders in the cache. We're done. */
1045 goto done;
1046 } else if (found > 0) {
1047 /* We found some but not all of our shaders. This shouldn't happen
1048 * most of the time but it can if we have a partially populated
1049 * pipeline cache.
1050 */
1051 assert(found < __builtin_popcount(pipeline->active_stages));
1052
1053 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1054 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1055 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1056 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1057 (uint64_t)(uintptr_t)cache,
1058 0, 0, "anv",
1059 "Found a partial pipeline in the cache. This is "
1060 "most likely caused by an incomplete pipeline cache "
1061 "import or export");
1062
1063 /* We're going to have to recompile anyway, so just throw away our
1064 * references to the shaders in the cache. We'll get them out of the
1065 * cache again as part of the compilation process.
1066 */
1067 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1068 stages[s].feedback.flags = 0;
1069 if (pipeline->shaders[s]) {
1070 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1071 pipeline->shaders[s] = NULL;
1072 }
1073 }
1074 }
1075
1076 void *pipeline_ctx = ralloc_context(NULL);
1077
1078 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1079 if (!stages[s].entrypoint)
1080 continue;
1081
1082 int64_t stage_start = os_time_get_nano();
1083
1084 assert(stages[s].stage == s);
1085 assert(pipeline->shaders[s] == NULL);
1086
1087 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1088 .surface_to_descriptor = stages[s].surface_to_descriptor,
1089 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1090 };
1091
1092 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1093 pipeline_ctx,
1094 &stages[s]);
1095 if (stages[s].nir == NULL) {
1096 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1097 goto fail;
1098 }
1099
1100 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1101 }
1102
1103 /* Walk backwards to link */
1104 struct anv_pipeline_stage *next_stage = NULL;
1105 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1106 if (!stages[s].entrypoint)
1107 continue;
1108
1109 switch (s) {
1110 case MESA_SHADER_VERTEX:
1111 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1112 break;
1113 case MESA_SHADER_TESS_CTRL:
1114 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1115 break;
1116 case MESA_SHADER_TESS_EVAL:
1117 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1118 break;
1119 case MESA_SHADER_GEOMETRY:
1120 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1121 break;
1122 case MESA_SHADER_FRAGMENT:
1123 anv_pipeline_link_fs(compiler, &stages[s]);
1124 break;
1125 default:
1126 unreachable("Invalid graphics shader stage");
1127 }
1128
1129 next_stage = &stages[s];
1130 }
1131
1132 struct anv_pipeline_stage *prev_stage = NULL;
1133 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1134 if (!stages[s].entrypoint)
1135 continue;
1136
1137 int64_t stage_start = os_time_get_nano();
1138
1139 void *stage_ctx = ralloc_context(NULL);
1140
1141 nir_xfb_info *xfb_info = NULL;
1142 if (s == MESA_SHADER_VERTEX ||
1143 s == MESA_SHADER_TESS_EVAL ||
1144 s == MESA_SHADER_GEOMETRY)
1145 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1146
1147 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1148
1149 const unsigned *code;
1150 switch (s) {
1151 case MESA_SHADER_VERTEX:
1152 code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
1153 &stages[s]);
1154 break;
1155 case MESA_SHADER_TESS_CTRL:
1156 code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
1157 &stages[s], prev_stage);
1158 break;
1159 case MESA_SHADER_TESS_EVAL:
1160 code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
1161 &stages[s], prev_stage);
1162 break;
1163 case MESA_SHADER_GEOMETRY:
1164 code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
1165 &stages[s], prev_stage);
1166 break;
1167 case MESA_SHADER_FRAGMENT:
1168 code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
1169 &stages[s], prev_stage);
1170 break;
1171 default:
1172 unreachable("Invalid graphics shader stage");
1173 }
1174 if (code == NULL) {
1175 ralloc_free(stage_ctx);
1176 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1177 goto fail;
1178 }
1179
1180 struct anv_shader_bin *bin =
1181 anv_device_upload_kernel(pipeline->device, cache,
1182 &stages[s].cache_key,
1183 sizeof(stages[s].cache_key),
1184 code, stages[s].prog_data.base.program_size,
1185 stages[s].nir->constant_data,
1186 stages[s].nir->constant_data_size,
1187 &stages[s].prog_data.base,
1188 brw_prog_data_size(s),
1189 xfb_info, &stages[s].bind_map);
1190 if (!bin) {
1191 ralloc_free(stage_ctx);
1192 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1193 goto fail;
1194 }
1195
1196 pipeline->shaders[s] = bin;
1197 ralloc_free(stage_ctx);
1198
1199 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1200
1201 prev_stage = &stages[s];
1202 }
1203
1204 ralloc_free(pipeline_ctx);
1205
1206 done:
1207
1208 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1209 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1210 /* This can happen if we decided to implicitly disable the fragment
1211 * shader. See anv_pipeline_compile_fs().
1212 */
1213 anv_shader_bin_unref(pipeline->device,
1214 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1215 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1216 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1217 }
1218
1219 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1220
1221 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1222 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1223 if (create_feedback) {
1224 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1225
1226 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1227 for (uint32_t i = 0; i < info->stageCount; i++) {
1228 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1229 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1230 }
1231 }
1232
1233 return VK_SUCCESS;
1234
1235 fail:
1236 ralloc_free(pipeline_ctx);
1237
1238 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1239 if (pipeline->shaders[s])
1240 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1241 }
1242
1243 return result;
1244 }
1245
1246 VkResult
1247 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1248 struct anv_pipeline_cache *cache,
1249 const VkComputePipelineCreateInfo *info,
1250 const struct anv_shader_module *module,
1251 const char *entrypoint,
1252 const VkSpecializationInfo *spec_info)
1253 {
1254 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1255 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1256 };
1257 int64_t pipeline_start = os_time_get_nano();
1258
1259 const struct brw_compiler *compiler =
1260 pipeline->device->instance->physicalDevice.compiler;
1261
1262 struct anv_pipeline_stage stage = {
1263 .stage = MESA_SHADER_COMPUTE,
1264 .module = module,
1265 .entrypoint = entrypoint,
1266 .spec_info = spec_info,
1267 .cache_key = {
1268 .stage = MESA_SHADER_COMPUTE,
1269 },
1270 .feedback = {
1271 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1272 },
1273 };
1274 anv_pipeline_hash_shader(stage.module,
1275 stage.entrypoint,
1276 MESA_SHADER_COMPUTE,
1277 stage.spec_info,
1278 stage.shader_sha1);
1279
1280 struct anv_shader_bin *bin = NULL;
1281
1282 populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
1283
1284 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1285
1286 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1287 bool cache_hit;
1288 bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1289 sizeof(stage.cache_key), &cache_hit);
1290
1291 if (bin == NULL) {
1292 int64_t stage_start = os_time_get_nano();
1293
1294 stage.bind_map = (struct anv_pipeline_bind_map) {
1295 .surface_to_descriptor = stage.surface_to_descriptor,
1296 .sampler_to_descriptor = stage.sampler_to_descriptor
1297 };
1298
1299 /* Set up a binding for the gl_NumWorkGroups */
1300 stage.bind_map.surface_count = 1;
1301 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1302 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1303 };
1304
1305 void *mem_ctx = ralloc_context(NULL);
1306
1307 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1308 if (stage.nir == NULL) {
1309 ralloc_free(mem_ctx);
1310 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1311 }
1312
1313 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1314
1315 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1316 &stage.prog_data.cs);
1317
1318 const unsigned *shader_code =
1319 brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
1320 &stage.prog_data.cs, stage.nir, -1, NULL);
1321 if (shader_code == NULL) {
1322 ralloc_free(mem_ctx);
1323 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1324 }
1325
1326 const unsigned code_size = stage.prog_data.base.program_size;
1327 bin = anv_device_upload_kernel(pipeline->device, cache,
1328 &stage.cache_key, sizeof(stage.cache_key),
1329 shader_code, code_size,
1330 stage.nir->constant_data,
1331 stage.nir->constant_data_size,
1332 &stage.prog_data.base,
1333 sizeof(stage.prog_data.cs),
1334 NULL, &stage.bind_map);
1335 if (!bin) {
1336 ralloc_free(mem_ctx);
1337 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1338 }
1339
1340 ralloc_free(mem_ctx);
1341
1342 stage.feedback.duration = os_time_get_nano() - stage_start;
1343 }
1344
1345 if (cache_hit) {
1346 stage.feedback.flags |=
1347 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1348 pipeline_feedback.flags |=
1349 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1350 }
1351 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1352
1353 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1354 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1355 if (create_feedback) {
1356 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1357
1358 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1359 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1360 }
1361
1362 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1363 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1364
1365 return VK_SUCCESS;
1366 }
1367
1368 /**
1369 * Copy pipeline state not marked as dynamic.
1370 * Dynamic state is pipeline state which hasn't been provided at pipeline
1371 * creation time, but is dynamically provided afterwards using various
1372 * vkCmdSet* functions.
1373 *
1374 * The set of state considered "non_dynamic" is determined by the pieces of
1375 * state that have their corresponding VkDynamicState enums omitted from
1376 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1377 *
1378 * @param[out] pipeline Destination non_dynamic state.
1379 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1380 */
1381 static void
1382 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1383 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1384 {
1385 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1386 struct anv_subpass *subpass = pipeline->subpass;
1387
1388 pipeline->dynamic_state = default_dynamic_state;
1389
1390 if (pCreateInfo->pDynamicState) {
1391 /* Remove all of the states that are marked as dynamic */
1392 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1393 for (uint32_t s = 0; s < count; s++)
1394 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1395 }
1396
1397 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1398
1399 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1400 *
1401 * pViewportState is [...] NULL if the pipeline
1402 * has rasterization disabled.
1403 */
1404 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1405 assert(pCreateInfo->pViewportState);
1406
1407 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1408 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1409 typed_memcpy(dynamic->viewport.viewports,
1410 pCreateInfo->pViewportState->pViewports,
1411 pCreateInfo->pViewportState->viewportCount);
1412 }
1413
1414 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1415 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1416 typed_memcpy(dynamic->scissor.scissors,
1417 pCreateInfo->pViewportState->pScissors,
1418 pCreateInfo->pViewportState->scissorCount);
1419 }
1420 }
1421
1422 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1423 assert(pCreateInfo->pRasterizationState);
1424 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1425 }
1426
1427 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1428 assert(pCreateInfo->pRasterizationState);
1429 dynamic->depth_bias.bias =
1430 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1431 dynamic->depth_bias.clamp =
1432 pCreateInfo->pRasterizationState->depthBiasClamp;
1433 dynamic->depth_bias.slope =
1434 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1435 }
1436
1437 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1438 *
1439 * pColorBlendState is [...] NULL if the pipeline has rasterization
1440 * disabled or if the subpass of the render pass the pipeline is
1441 * created against does not use any color attachments.
1442 */
1443 bool uses_color_att = false;
1444 for (unsigned i = 0; i < subpass->color_count; ++i) {
1445 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1446 uses_color_att = true;
1447 break;
1448 }
1449 }
1450
1451 if (uses_color_att &&
1452 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1453 assert(pCreateInfo->pColorBlendState);
1454
1455 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1456 typed_memcpy(dynamic->blend_constants,
1457 pCreateInfo->pColorBlendState->blendConstants, 4);
1458 }
1459
1460 /* If there is no depthstencil attachment, then don't read
1461 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1462 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1463 * no need to override the depthstencil defaults in
1464 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1465 *
1466 * Section 9.2 of the Vulkan 1.0.15 spec says:
1467 *
1468 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1469 * disabled or if the subpass of the render pass the pipeline is created
1470 * against does not use a depth/stencil attachment.
1471 */
1472 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1473 subpass->depth_stencil_attachment) {
1474 assert(pCreateInfo->pDepthStencilState);
1475
1476 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1477 dynamic->depth_bounds.min =
1478 pCreateInfo->pDepthStencilState->minDepthBounds;
1479 dynamic->depth_bounds.max =
1480 pCreateInfo->pDepthStencilState->maxDepthBounds;
1481 }
1482
1483 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1484 dynamic->stencil_compare_mask.front =
1485 pCreateInfo->pDepthStencilState->front.compareMask;
1486 dynamic->stencil_compare_mask.back =
1487 pCreateInfo->pDepthStencilState->back.compareMask;
1488 }
1489
1490 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1491 dynamic->stencil_write_mask.front =
1492 pCreateInfo->pDepthStencilState->front.writeMask;
1493 dynamic->stencil_write_mask.back =
1494 pCreateInfo->pDepthStencilState->back.writeMask;
1495 }
1496
1497 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1498 dynamic->stencil_reference.front =
1499 pCreateInfo->pDepthStencilState->front.reference;
1500 dynamic->stencil_reference.back =
1501 pCreateInfo->pDepthStencilState->back.reference;
1502 }
1503 }
1504
1505 pipeline->dynamic_state_mask = states;
1506 }
1507
1508 static void
1509 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1510 {
1511 #ifdef DEBUG
1512 struct anv_render_pass *renderpass = NULL;
1513 struct anv_subpass *subpass = NULL;
1514
1515 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1516 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1517 */
1518 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1519
1520 renderpass = anv_render_pass_from_handle(info->renderPass);
1521 assert(renderpass);
1522
1523 assert(info->subpass < renderpass->subpass_count);
1524 subpass = &renderpass->subpasses[info->subpass];
1525
1526 assert(info->stageCount >= 1);
1527 assert(info->pVertexInputState);
1528 assert(info->pInputAssemblyState);
1529 assert(info->pRasterizationState);
1530 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1531 assert(info->pViewportState);
1532 assert(info->pMultisampleState);
1533
1534 if (subpass && subpass->depth_stencil_attachment)
1535 assert(info->pDepthStencilState);
1536
1537 if (subpass && subpass->color_count > 0) {
1538 bool all_color_unused = true;
1539 for (int i = 0; i < subpass->color_count; i++) {
1540 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1541 all_color_unused = false;
1542 }
1543 /* pColorBlendState is ignored if the pipeline has rasterization
1544 * disabled or if the subpass of the render pass the pipeline is
1545 * created against does not use any color attachments.
1546 */
1547 assert(info->pColorBlendState || all_color_unused);
1548 }
1549 }
1550
1551 for (uint32_t i = 0; i < info->stageCount; ++i) {
1552 switch (info->pStages[i].stage) {
1553 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1554 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1555 assert(info->pTessellationState);
1556 break;
1557 default:
1558 break;
1559 }
1560 }
1561 #endif
1562 }
1563
1564 /**
1565 * Calculate the desired L3 partitioning based on the current state of the
1566 * pipeline. For now this simply returns the conservative defaults calculated
1567 * by get_default_l3_weights(), but we could probably do better by gathering
1568 * more statistics from the pipeline state (e.g. guess of expected URB usage
1569 * and bound surfaces), or by using feed-back from performance counters.
1570 */
1571 void
1572 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1573 {
1574 const struct gen_device_info *devinfo = &pipeline->device->info;
1575
1576 const struct gen_l3_weights w =
1577 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1578
1579 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1580 pipeline->urb.total_size =
1581 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1582 }
1583
1584 VkResult
1585 anv_pipeline_init(struct anv_pipeline *pipeline,
1586 struct anv_device *device,
1587 struct anv_pipeline_cache *cache,
1588 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1589 const VkAllocationCallbacks *alloc)
1590 {
1591 VkResult result;
1592
1593 anv_pipeline_validate_create_info(pCreateInfo);
1594
1595 if (alloc == NULL)
1596 alloc = &device->alloc;
1597
1598 pipeline->device = device;
1599
1600 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1601 assert(pCreateInfo->subpass < render_pass->subpass_count);
1602 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1603
1604 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1605 if (result != VK_SUCCESS)
1606 return result;
1607
1608 pipeline->batch.alloc = alloc;
1609 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1610 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1611 pipeline->batch.relocs = &pipeline->batch_relocs;
1612 pipeline->batch.status = VK_SUCCESS;
1613
1614 copy_non_dynamic_state(pipeline, pCreateInfo);
1615 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1616 pCreateInfo->pRasterizationState->depthClampEnable;
1617
1618 /* Previously we enabled depth clipping when !depthClampEnable.
1619 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1620 * clipping info is available, use its enable value to determine clipping,
1621 * otherwise fallback to the previous !depthClampEnable logic.
1622 */
1623 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1624 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1625 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1626 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1627
1628 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1629 pCreateInfo->pMultisampleState->sampleShadingEnable;
1630
1631 pipeline->needs_data_cache = false;
1632
1633 /* When we free the pipeline, we detect stages based on the NULL status
1634 * of various prog_data pointers. Make them NULL by default.
1635 */
1636 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1637
1638 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1639 if (result != VK_SUCCESS) {
1640 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1641 return result;
1642 }
1643
1644 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1645
1646 anv_pipeline_setup_l3_config(pipeline, false);
1647
1648 const VkPipelineVertexInputStateCreateInfo *vi_info =
1649 pCreateInfo->pVertexInputState;
1650
1651 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1652
1653 pipeline->vb_used = 0;
1654 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1655 const VkVertexInputAttributeDescription *desc =
1656 &vi_info->pVertexAttributeDescriptions[i];
1657
1658 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1659 pipeline->vb_used |= 1 << desc->binding;
1660 }
1661
1662 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1663 const VkVertexInputBindingDescription *desc =
1664 &vi_info->pVertexBindingDescriptions[i];
1665
1666 pipeline->vb[desc->binding].stride = desc->stride;
1667
1668 /* Step rate is programmed per vertex element (attribute), not
1669 * binding. Set up a map of which bindings step per instance, for
1670 * reference by vertex element setup. */
1671 switch (desc->inputRate) {
1672 default:
1673 case VK_VERTEX_INPUT_RATE_VERTEX:
1674 pipeline->vb[desc->binding].instanced = false;
1675 break;
1676 case VK_VERTEX_INPUT_RATE_INSTANCE:
1677 pipeline->vb[desc->binding].instanced = true;
1678 break;
1679 }
1680
1681 pipeline->vb[desc->binding].instance_divisor = 1;
1682 }
1683
1684 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1685 vk_find_struct_const(vi_info->pNext,
1686 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1687 if (vi_div_state) {
1688 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1689 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1690 &vi_div_state->pVertexBindingDivisors[i];
1691
1692 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1693 }
1694 }
1695
1696 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1697 * different views. If the client asks for instancing, we need to multiply
1698 * the instance divisor by the number of views ensure that we repeat the
1699 * client's per-instance data once for each view.
1700 */
1701 if (pipeline->subpass->view_mask) {
1702 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1703 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1704 if (pipeline->vb[vb].instanced)
1705 pipeline->vb[vb].instance_divisor *= view_count;
1706 }
1707 }
1708
1709 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1710 pCreateInfo->pInputAssemblyState;
1711 const VkPipelineTessellationStateCreateInfo *tess_info =
1712 pCreateInfo->pTessellationState;
1713 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1714
1715 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1716 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1717 else
1718 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1719
1720 return VK_SUCCESS;
1721 }