vk/0.210.0: Delete three no longer existant entrypoints
[mesa.git] / src / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "brw_nir.h"
32 #include "anv_nir.h"
33 #include "glsl/nir/nir_spirv.h"
34
35 /* Needed for SWIZZLE macros */
36 #include "program/prog_instruction.h"
37
38 // Shader functions
39
40 VkResult anv_CreateShaderModule(
41 VkDevice _device,
42 const VkShaderModuleCreateInfo* pCreateInfo,
43 const VkAllocationCallbacks* pAllocator,
44 VkShaderModule* pShaderModule)
45 {
46 ANV_FROM_HANDLE(anv_device, device, _device);
47 struct anv_shader_module *module;
48
49 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
50 assert(pCreateInfo->flags == 0);
51
52 module = anv_alloc2(&device->alloc, pAllocator,
53 sizeof(*module) + pCreateInfo->codeSize, 8,
54 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
55 if (module == NULL)
56 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
57
58 module->nir = NULL;
59 module->size = pCreateInfo->codeSize;
60 memcpy(module->data, pCreateInfo->pCode, module->size);
61
62 *pShaderModule = anv_shader_module_to_handle(module);
63
64 return VK_SUCCESS;
65 }
66
67 void anv_DestroyShaderModule(
68 VkDevice _device,
69 VkShaderModule _module,
70 const VkAllocationCallbacks* pAllocator)
71 {
72 ANV_FROM_HANDLE(anv_device, device, _device);
73 ANV_FROM_HANDLE(anv_shader_module, module, _module);
74
75 anv_free2(&device->alloc, pAllocator, module);
76 }
77
78 VkResult anv_CreateShader(
79 VkDevice _device,
80 const VkShaderCreateInfo* pCreateInfo,
81 VkShader* pShader)
82 {
83 ANV_FROM_HANDLE(anv_device, device, _device);
84 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->module);
85 struct anv_shader *shader;
86
87 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_CREATE_INFO);
88 assert(pCreateInfo->flags == 0);
89
90 const char *name = pCreateInfo->pName ? pCreateInfo->pName : "main";
91 size_t name_len = strlen(name);
92
93 shader = anv_alloc(&device->alloc, sizeof(*shader) + name_len + 1, 8,
94 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
95 if (shader == NULL)
96 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
97
98 shader->module = module,
99 memcpy(shader->entrypoint, name, name_len + 1);
100
101 *pShader = anv_shader_to_handle(shader);
102
103 return VK_SUCCESS;
104 }
105
106 void anv_DestroyShader(
107 VkDevice _device,
108 VkShader _shader)
109 {
110 ANV_FROM_HANDLE(anv_device, device, _device);
111 ANV_FROM_HANDLE(anv_shader, shader, _shader);
112
113 anv_free(&device->alloc, shader);
114 }
115
116 #define SPIR_V_MAGIC_NUMBER 0x07230203
117
118 static const gl_shader_stage vk_shader_stage_to_mesa_stage[] = {
119 [VK_SHADER_STAGE_VERTEX] = MESA_SHADER_VERTEX,
120 [VK_SHADER_STAGE_TESS_CONTROL] = -1,
121 [VK_SHADER_STAGE_TESS_EVALUATION] = -1,
122 [VK_SHADER_STAGE_GEOMETRY] = MESA_SHADER_GEOMETRY,
123 [VK_SHADER_STAGE_FRAGMENT] = MESA_SHADER_FRAGMENT,
124 [VK_SHADER_STAGE_COMPUTE] = MESA_SHADER_COMPUTE,
125 };
126
127 bool
128 anv_is_scalar_shader_stage(const struct brw_compiler *compiler,
129 VkShaderStage stage)
130 {
131 return compiler->scalar_stage[vk_shader_stage_to_mesa_stage[stage]];
132 }
133
134 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
135 * we can't do that yet because we don't have the ability to copy nir.
136 */
137 static nir_shader *
138 anv_shader_compile_to_nir(struct anv_device *device,
139 struct anv_shader *shader, VkShaderStage vk_stage)
140 {
141 if (strcmp(shader->entrypoint, "main") != 0) {
142 anv_finishme("Multiple shaders per module not really supported");
143 }
144
145 gl_shader_stage stage = vk_shader_stage_to_mesa_stage[vk_stage];
146 const struct brw_compiler *compiler =
147 device->instance->physicalDevice.compiler;
148 const nir_shader_compiler_options *nir_options =
149 compiler->glsl_compiler_options[stage].NirOptions;
150
151 nir_shader *nir;
152 if (shader->module->nir) {
153 /* Some things such as our meta clear/blit code will give us a NIR
154 * shader directly. In that case, we just ignore the SPIR-V entirely
155 * and just use the NIR shader */
156 nir = shader->module->nir;
157 nir->options = nir_options;
158 } else {
159 uint32_t *spirv = (uint32_t *) shader->module->data;
160 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
161 assert(shader->module->size % 4 == 0);
162
163 nir = spirv_to_nir(spirv, shader->module->size / 4, stage, nir_options);
164 }
165 nir_validate_shader(nir);
166
167 /* Vulkan uses the separate-shader linking model */
168 nir->info.separate_shader = true;
169
170 /* Make sure the provided shader has exactly one entrypoint and that the
171 * name matches the name that came in from the VkShader.
172 */
173 nir_function_impl *entrypoint = NULL;
174 nir_foreach_overload(nir, overload) {
175 if (strcmp(shader->entrypoint, overload->function->name) == 0 &&
176 overload->impl) {
177 assert(entrypoint == NULL);
178 entrypoint = overload->impl;
179 }
180 }
181 assert(entrypoint != NULL);
182
183 nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]);
184
185 nir_shader_gather_info(nir, entrypoint);
186
187 return nir;
188 }
189
190 VkResult anv_CreatePipelineCache(
191 VkDevice device,
192 const VkPipelineCacheCreateInfo* pCreateInfo,
193 const VkAllocationCallbacks* pAllocator,
194 VkPipelineCache* pPipelineCache)
195 {
196 *pPipelineCache = (VkPipelineCache)1;
197
198 stub_return(VK_SUCCESS);
199 }
200
201 void anv_DestroyPipelineCache(
202 VkDevice _device,
203 VkPipelineCache _cache,
204 const VkAllocationCallbacks* pAllocator)
205 {
206 }
207
208 VkResult anv_GetPipelineCacheData(
209 VkDevice device,
210 VkPipelineCache pipelineCache,
211 size_t* pDataSize,
212 void* pData)
213 {
214 stub_return(VK_UNSUPPORTED);
215 }
216
217 VkResult anv_MergePipelineCaches(
218 VkDevice device,
219 VkPipelineCache destCache,
220 uint32_t srcCacheCount,
221 const VkPipelineCache* pSrcCaches)
222 {
223 stub_return(VK_UNSUPPORTED);
224 }
225
226 void anv_DestroyPipeline(
227 VkDevice _device,
228 VkPipeline _pipeline,
229 const VkAllocationCallbacks* pAllocator)
230 {
231 ANV_FROM_HANDLE(anv_device, device, _device);
232 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
233
234 anv_reloc_list_finish(&pipeline->batch_relocs,
235 pAllocator ? pAllocator : &device->alloc);
236 anv_state_stream_finish(&pipeline->program_stream);
237 if (pipeline->blend_state.map)
238 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
239 anv_free2(&device->alloc, pAllocator, pipeline);
240 }
241
242 static const uint32_t vk_to_gen_primitive_type[] = {
243 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
244 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
245 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
246 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
247 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
248 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
249 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
250 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
251 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
252 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
253 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
254 };
255
256 static void
257 populate_sampler_prog_key(const struct brw_device_info *devinfo,
258 struct brw_sampler_prog_key_data *key)
259 {
260 /* XXX: Handle texture swizzle on HSW- */
261 for (int i = 0; i < MAX_SAMPLERS; i++) {
262 /* Assume color sampler, no swizzling. (Works for BDW+) */
263 key->swizzles[i] = SWIZZLE_XYZW;
264 }
265 }
266
267 static void
268 populate_vs_prog_key(const struct brw_device_info *devinfo,
269 struct brw_vs_prog_key *key)
270 {
271 memset(key, 0, sizeof(*key));
272
273 populate_sampler_prog_key(devinfo, &key->tex);
274
275 /* XXX: Handle vertex input work-arounds */
276
277 /* XXX: Handle sampler_prog_key */
278 }
279
280 static void
281 populate_gs_prog_key(const struct brw_device_info *devinfo,
282 struct brw_gs_prog_key *key)
283 {
284 memset(key, 0, sizeof(*key));
285
286 populate_sampler_prog_key(devinfo, &key->tex);
287 }
288
289 static void
290 populate_wm_prog_key(const struct brw_device_info *devinfo,
291 const VkGraphicsPipelineCreateInfo *info,
292 struct brw_wm_prog_key *key)
293 {
294 ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
295
296 memset(key, 0, sizeof(*key));
297
298 populate_sampler_prog_key(devinfo, &key->tex);
299
300 /* TODO: Fill out key->input_slots_valid */
301
302 /* Vulkan doesn't specify a default */
303 key->high_quality_derivatives = false;
304
305 /* XXX Vulkan doesn't appear to specify */
306 key->clamp_fragment_color = false;
307
308 /* Vulkan always specifies upper-left coordinates */
309 key->drawable_height = 0;
310 key->render_to_fbo = false;
311
312 key->nr_color_regions = render_pass->subpasses[info->subpass].color_count;
313
314 key->replicate_alpha = key->nr_color_regions > 1 &&
315 info->pMultisampleState &&
316 info->pMultisampleState->alphaToCoverageEnable;
317
318 if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
319 /* We should probably pull this out of the shader, but it's fairly
320 * harmless to compute it and then let dead-code take care of it.
321 */
322 key->persample_shading = info->pMultisampleState->sampleShadingEnable;
323 if (key->persample_shading)
324 key->persample_2x = info->pMultisampleState->rasterizationSamples == 2;
325
326 key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable;
327 key->compute_sample_id = info->pMultisampleState->sampleShadingEnable;
328 }
329 }
330
331 static void
332 populate_cs_prog_key(const struct brw_device_info *devinfo,
333 struct brw_cs_prog_key *key)
334 {
335 memset(key, 0, sizeof(*key));
336
337 populate_sampler_prog_key(devinfo, &key->tex);
338 }
339
340 static nir_shader *
341 anv_pipeline_compile(struct anv_pipeline *pipeline,
342 struct anv_shader *shader,
343 VkShaderStage stage,
344 struct brw_stage_prog_data *prog_data)
345 {
346 const struct brw_compiler *compiler =
347 pipeline->device->instance->physicalDevice.compiler;
348
349 nir_shader *nir = anv_shader_compile_to_nir(pipeline->device, shader, stage);
350 if (nir == NULL)
351 return NULL;
352
353 anv_nir_lower_push_constants(nir, anv_is_scalar_shader_stage(compiler, stage));
354
355 /* Figure out the number of parameters */
356 prog_data->nr_params = 0;
357
358 if (nir->num_uniforms > 0) {
359 /* If the shader uses any push constants at all, we'll just give
360 * them the maximum possible number
361 */
362 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
363 }
364
365 if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
366 prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
367
368 if (prog_data->nr_params > 0) {
369 /* XXX: I think we're leaking this */
370 prog_data->param = (const gl_constant_value **)
371 malloc(prog_data->nr_params * sizeof(gl_constant_value *));
372
373 /* We now set the param values to be offsets into a
374 * anv_push_constant_data structure. Since the compiler doesn't
375 * actually dereference any of the gl_constant_value pointers in the
376 * params array, it doesn't really matter what we put here.
377 */
378 struct anv_push_constants *null_data = NULL;
379 if (nir->num_uniforms > 0) {
380 /* Fill out the push constants section of the param array */
381 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
382 prog_data->param[i] = (const gl_constant_value *)
383 &null_data->client_data[i * sizeof(float)];
384 }
385 }
386
387 /* Set up dynamic offsets */
388 anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
389
390 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
391 anv_nir_apply_pipeline_layout(nir, pipeline->layout);
392
393 /* All binding table offsets provided by apply_pipeline_layout() are
394 * relative to the start of the bindint table (plus MAX_RTS for VS).
395 */
396 unsigned bias = stage == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0;
397 prog_data->binding_table.size_bytes = 0;
398 prog_data->binding_table.texture_start = bias;
399 prog_data->binding_table.ubo_start = bias;
400 prog_data->binding_table.ssbo_start = bias;
401 prog_data->binding_table.image_start = bias;
402
403 /* Finish the optimization and compilation process */
404 nir = brw_lower_nir(nir, &pipeline->device->info, NULL,
405 anv_is_scalar_shader_stage(compiler, stage));
406
407 /* nir_lower_io will only handle the push constants; we need to set this
408 * to the full number of possible uniforms.
409 */
410 nir->num_uniforms = prog_data->nr_params;
411
412 return nir;
413 }
414
415 static uint32_t
416 anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
417 const void *data, size_t size)
418 {
419 struct anv_state state =
420 anv_state_stream_alloc(&pipeline->program_stream, size, 64);
421
422 assert(size < pipeline->program_stream.block_pool->block_size);
423
424 memcpy(state.map, data, size);
425
426 return state.offset;
427 }
428 static void
429 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
430 VkShaderStage stage,
431 struct brw_stage_prog_data *prog_data)
432 {
433 struct brw_device_info *devinfo = &pipeline->device->info;
434 uint32_t max_threads[] = {
435 [VK_SHADER_STAGE_VERTEX] = devinfo->max_vs_threads,
436 [VK_SHADER_STAGE_TESS_CONTROL] = 0,
437 [VK_SHADER_STAGE_TESS_EVALUATION] = 0,
438 [VK_SHADER_STAGE_GEOMETRY] = devinfo->max_gs_threads,
439 [VK_SHADER_STAGE_FRAGMENT] = devinfo->max_wm_threads,
440 [VK_SHADER_STAGE_COMPUTE] = devinfo->max_cs_threads,
441 };
442
443 pipeline->prog_data[stage] = prog_data;
444 pipeline->active_stages |= 1 << stage;
445 pipeline->scratch_start[stage] = pipeline->total_scratch;
446 pipeline->total_scratch =
447 align_u32(pipeline->total_scratch, 1024) +
448 prog_data->total_scratch * max_threads[stage];
449 }
450
451 static VkResult
452 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
453 const VkGraphicsPipelineCreateInfo *info,
454 struct anv_shader *shader)
455 {
456 const struct brw_compiler *compiler =
457 pipeline->device->instance->physicalDevice.compiler;
458 struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
459 struct brw_vs_prog_key key;
460
461 populate_vs_prog_key(&pipeline->device->info, &key);
462
463 /* TODO: Look up shader in cache */
464
465 memset(prog_data, 0, sizeof(*prog_data));
466
467 nir_shader *nir = anv_pipeline_compile(pipeline, shader,
468 VK_SHADER_STAGE_VERTEX,
469 &prog_data->base.base);
470 if (nir == NULL)
471 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
472
473 void *mem_ctx = ralloc_context(NULL);
474
475 if (shader->module->nir == NULL)
476 ralloc_steal(mem_ctx, nir);
477
478 prog_data->inputs_read = nir->info.inputs_read;
479 pipeline->writes_point_size = nir->info.outputs_written & VARYING_SLOT_PSIZ;
480
481 brw_compute_vue_map(&pipeline->device->info,
482 &prog_data->base.vue_map,
483 nir->info.outputs_written,
484 nir->info.separate_shader);
485
486 unsigned code_size;
487 const unsigned *shader_code =
488 brw_compile_vs(compiler, NULL, mem_ctx, &key, prog_data, nir,
489 NULL, false, -1, &code_size, NULL);
490 if (shader_code == NULL) {
491 ralloc_free(mem_ctx);
492 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
493 }
494
495 const uint32_t offset =
496 anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
497 if (prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
498 pipeline->vs_simd8 = offset;
499 pipeline->vs_vec4 = NO_KERNEL;
500 } else {
501 pipeline->vs_simd8 = NO_KERNEL;
502 pipeline->vs_vec4 = offset;
503 }
504
505 ralloc_free(mem_ctx);
506
507 anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX,
508 &prog_data->base.base);
509
510 return VK_SUCCESS;
511 }
512
513 static VkResult
514 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
515 const VkGraphicsPipelineCreateInfo *info,
516 struct anv_shader *shader)
517 {
518 const struct brw_compiler *compiler =
519 pipeline->device->instance->physicalDevice.compiler;
520 struct brw_gs_prog_data *prog_data = &pipeline->gs_prog_data;
521 struct brw_gs_prog_key key;
522
523 populate_gs_prog_key(&pipeline->device->info, &key);
524
525 /* TODO: Look up shader in cache */
526
527 memset(prog_data, 0, sizeof(*prog_data));
528
529 nir_shader *nir = anv_pipeline_compile(pipeline, shader,
530 VK_SHADER_STAGE_GEOMETRY,
531 &prog_data->base.base);
532 if (nir == NULL)
533 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
534
535 void *mem_ctx = ralloc_context(NULL);
536
537 if (shader->module->nir == NULL)
538 ralloc_steal(mem_ctx, nir);
539
540 brw_compute_vue_map(&pipeline->device->info,
541 &prog_data->base.vue_map,
542 nir->info.outputs_written,
543 nir->info.separate_shader);
544
545 unsigned code_size;
546 const unsigned *shader_code =
547 brw_compile_gs(compiler, NULL, mem_ctx, &key, prog_data, nir,
548 NULL, -1, &code_size, NULL);
549 if (shader_code == NULL) {
550 ralloc_free(mem_ctx);
551 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
552 }
553
554 /* TODO: SIMD8 GS */
555 pipeline->gs_vec4 =
556 anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
557 pipeline->gs_vertex_count = nir->info.gs.vertices_in;
558
559 ralloc_free(mem_ctx);
560
561 anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_GEOMETRY,
562 &prog_data->base.base);
563
564 return VK_SUCCESS;
565 }
566
567 static VkResult
568 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
569 const VkGraphicsPipelineCreateInfo *info,
570 struct anv_shader *shader)
571 {
572 const struct brw_compiler *compiler =
573 pipeline->device->instance->physicalDevice.compiler;
574 struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
575 struct brw_wm_prog_key key;
576
577 populate_wm_prog_key(&pipeline->device->info, info, &key);
578
579 if (pipeline->use_repclear)
580 key.nr_color_regions = 1;
581
582 /* TODO: Look up shader in cache */
583
584 memset(prog_data, 0, sizeof(*prog_data));
585
586 prog_data->binding_table.render_target_start = 0;
587
588 nir_shader *nir = anv_pipeline_compile(pipeline, shader,
589 VK_SHADER_STAGE_FRAGMENT,
590 &prog_data->base);
591 if (nir == NULL)
592 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
593
594 void *mem_ctx = ralloc_context(NULL);
595
596 if (shader->module->nir == NULL)
597 ralloc_steal(mem_ctx, nir);
598
599 unsigned code_size;
600 const unsigned *shader_code =
601 brw_compile_fs(compiler, NULL, mem_ctx, &key, prog_data, nir,
602 NULL, -1, -1, pipeline->use_repclear, &code_size, NULL);
603 if (shader_code == NULL) {
604 ralloc_free(mem_ctx);
605 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
606 }
607
608 uint32_t offset = anv_pipeline_upload_kernel(pipeline,
609 shader_code, code_size);
610 if (prog_data->no_8)
611 pipeline->ps_simd8 = NO_KERNEL;
612 else
613 pipeline->ps_simd8 = offset;
614
615 if (prog_data->no_8 || prog_data->prog_offset_16) {
616 pipeline->ps_simd16 = offset + prog_data->prog_offset_16;
617 } else {
618 pipeline->ps_simd16 = NO_KERNEL;
619 }
620
621 pipeline->ps_ksp2 = 0;
622 pipeline->ps_grf_start2 = 0;
623 if (pipeline->ps_simd8 != NO_KERNEL) {
624 pipeline->ps_ksp0 = pipeline->ps_simd8;
625 pipeline->ps_grf_start0 = prog_data->base.dispatch_grf_start_reg;
626 if (pipeline->ps_simd16 != NO_KERNEL) {
627 pipeline->ps_ksp2 = pipeline->ps_simd16;
628 pipeline->ps_grf_start2 = prog_data->dispatch_grf_start_reg_16;
629 }
630 } else if (pipeline->ps_simd16 != NO_KERNEL) {
631 pipeline->ps_ksp0 = pipeline->ps_simd16;
632 pipeline->ps_grf_start0 = prog_data->dispatch_grf_start_reg_16;
633 }
634
635 ralloc_free(mem_ctx);
636
637 anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT,
638 &prog_data->base);
639
640 return VK_SUCCESS;
641 }
642
643 VkResult
644 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
645 const VkComputePipelineCreateInfo *info,
646 struct anv_shader *shader)
647 {
648 const struct brw_compiler *compiler =
649 pipeline->device->instance->physicalDevice.compiler;
650 struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
651 struct brw_cs_prog_key key;
652
653 populate_cs_prog_key(&pipeline->device->info, &key);
654
655 /* TODO: Look up shader in cache */
656
657 memset(prog_data, 0, sizeof(*prog_data));
658
659 nir_shader *nir = anv_pipeline_compile(pipeline, shader,
660 VK_SHADER_STAGE_COMPUTE,
661 &prog_data->base);
662 if (nir == NULL)
663 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
664
665 void *mem_ctx = ralloc_context(NULL);
666
667 if (shader->module->nir == NULL)
668 ralloc_steal(mem_ctx, nir);
669
670 unsigned code_size;
671 const unsigned *shader_code =
672 brw_compile_cs(compiler, NULL, mem_ctx, &key, prog_data, nir,
673 -1, &code_size, NULL);
674 if (shader_code == NULL) {
675 ralloc_free(mem_ctx);
676 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
677 }
678
679 pipeline->cs_simd = anv_pipeline_upload_kernel(pipeline,
680 shader_code, code_size);
681 ralloc_free(mem_ctx);
682
683 anv_pipeline_add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE,
684 &prog_data->base);
685
686 return VK_SUCCESS;
687 }
688
689 static const int gen8_push_size = 32 * 1024;
690
691 static void
692 gen7_compute_urb_partition(struct anv_pipeline *pipeline)
693 {
694 const struct brw_device_info *devinfo = &pipeline->device->info;
695 bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
696 unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
697 unsigned vs_entry_size_bytes = vs_size * 64;
698 bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
699 unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
700 unsigned gs_entry_size_bytes = gs_size * 64;
701
702 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
703 *
704 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
705 * Allocation Size is less than 9 512-bit URB entries.
706 *
707 * Similar text exists for GS.
708 */
709 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
710 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
711
712 /* URB allocations must be done in 8k chunks. */
713 unsigned chunk_size_bytes = 8192;
714
715 /* Determine the size of the URB in chunks. */
716 unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
717
718 /* Reserve space for push constants */
719 unsigned push_constant_bytes = gen8_push_size;
720 unsigned push_constant_chunks =
721 push_constant_bytes / chunk_size_bytes;
722
723 /* Initially, assign each stage the minimum amount of URB space it needs,
724 * and make a note of how much additional space it "wants" (the amount of
725 * additional space it could actually make use of).
726 */
727
728 /* VS has a lower limit on the number of URB entries */
729 unsigned vs_chunks =
730 ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
731 chunk_size_bytes) / chunk_size_bytes;
732 unsigned vs_wants =
733 ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
734 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
735
736 unsigned gs_chunks = 0;
737 unsigned gs_wants = 0;
738 if (gs_present) {
739 /* There are two constraints on the minimum amount of URB space we can
740 * allocate:
741 *
742 * (1) We need room for at least 2 URB entries, since we always operate
743 * the GS in DUAL_OBJECT mode.
744 *
745 * (2) We can't allocate less than nr_gs_entries_granularity.
746 */
747 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
748 chunk_size_bytes) / chunk_size_bytes;
749 gs_wants =
750 ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
751 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
752 }
753
754 /* There should always be enough URB space to satisfy the minimum
755 * requirements of each stage.
756 */
757 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
758 assert(total_needs <= urb_chunks);
759
760 /* Mete out remaining space (if any) in proportion to "wants". */
761 unsigned total_wants = vs_wants + gs_wants;
762 unsigned remaining_space = urb_chunks - total_needs;
763 if (remaining_space > total_wants)
764 remaining_space = total_wants;
765 if (remaining_space > 0) {
766 unsigned vs_additional = (unsigned)
767 round(vs_wants * (((double) remaining_space) / total_wants));
768 vs_chunks += vs_additional;
769 remaining_space -= vs_additional;
770 gs_chunks += remaining_space;
771 }
772
773 /* Sanity check that we haven't over-allocated. */
774 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
775
776 /* Finally, compute the number of entries that can fit in the space
777 * allocated to each stage.
778 */
779 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
780 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
781
782 /* Since we rounded up when computing *_wants, this may be slightly more
783 * than the maximum allowed amount, so correct for that.
784 */
785 nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
786 nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
787
788 /* Ensure that we program a multiple of the granularity. */
789 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
790 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
791
792 /* Finally, sanity check to make sure we have at least the minimum number
793 * of entries needed for each stage.
794 */
795 assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
796 if (gs_present)
797 assert(nr_gs_entries >= 2);
798
799 /* Lay out the URB in the following order:
800 * - push constants
801 * - VS
802 * - GS
803 */
804 pipeline->urb.vs_start = push_constant_chunks;
805 pipeline->urb.vs_size = vs_size;
806 pipeline->urb.nr_vs_entries = nr_vs_entries;
807
808 pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
809 pipeline->urb.gs_size = gs_size;
810 pipeline->urb.nr_gs_entries = nr_gs_entries;
811 }
812
813 static void
814 anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
815 const VkGraphicsPipelineCreateInfo *pCreateInfo)
816 {
817 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
818 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
819 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
820
821 pipeline->dynamic_state = default_dynamic_state;
822
823 if (pCreateInfo->pDynamicState) {
824 /* Remove all of the states that are marked as dynamic */
825 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
826 for (uint32_t s = 0; s < count; s++)
827 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
828 }
829
830 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
831
832 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
833 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
834 typed_memcpy(dynamic->viewport.viewports,
835 pCreateInfo->pViewportState->pViewports,
836 pCreateInfo->pViewportState->viewportCount);
837 }
838
839 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
840 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
841 typed_memcpy(dynamic->scissor.scissors,
842 pCreateInfo->pViewportState->pScissors,
843 pCreateInfo->pViewportState->scissorCount);
844 }
845
846 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
847 assert(pCreateInfo->pRasterizationState);
848 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
849 }
850
851 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
852 assert(pCreateInfo->pRasterizationState);
853 dynamic->depth_bias.bias =
854 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
855 dynamic->depth_bias.clamp =
856 pCreateInfo->pRasterizationState->depthBiasClamp;
857 dynamic->depth_bias.slope =
858 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
859 }
860
861 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
862 assert(pCreateInfo->pColorBlendState);
863 typed_memcpy(dynamic->blend_constants,
864 pCreateInfo->pColorBlendState->blendConstants, 4);
865 }
866
867 /* If there is no depthstencil attachment, then don't read
868 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
869 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
870 * no need to override the depthstencil defaults in
871 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
872 *
873 * From the Vulkan spec (20 Oct 2015, git-aa308cb):
874 *
875 * pDepthStencilState [...] may only be NULL if renderPass and subpass
876 * specify a subpass that has no depth/stencil attachment.
877 */
878 if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
879 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
880 assert(pCreateInfo->pDepthStencilState);
881 dynamic->depth_bounds.min =
882 pCreateInfo->pDepthStencilState->minDepthBounds;
883 dynamic->depth_bounds.max =
884 pCreateInfo->pDepthStencilState->maxDepthBounds;
885 }
886
887 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
888 assert(pCreateInfo->pDepthStencilState);
889 dynamic->stencil_compare_mask.front =
890 pCreateInfo->pDepthStencilState->front.compareMask;
891 dynamic->stencil_compare_mask.back =
892 pCreateInfo->pDepthStencilState->back.compareMask;
893 }
894
895 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
896 assert(pCreateInfo->pDepthStencilState);
897 dynamic->stencil_write_mask.front =
898 pCreateInfo->pDepthStencilState->front.writeMask;
899 dynamic->stencil_write_mask.back =
900 pCreateInfo->pDepthStencilState->back.writeMask;
901 }
902
903 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
904 assert(pCreateInfo->pDepthStencilState);
905 dynamic->stencil_reference.front =
906 pCreateInfo->pDepthStencilState->front.reference;
907 dynamic->stencil_reference.back =
908 pCreateInfo->pDepthStencilState->back.reference;
909 }
910 }
911
912 pipeline->dynamic_state_mask = states;
913 }
914
915 static void
916 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
917 {
918 struct anv_render_pass *renderpass = NULL;
919 struct anv_subpass *subpass = NULL;
920
921 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
922 * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
923 * 4.2 Graphics Pipeline.
924 */
925 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
926
927 renderpass = anv_render_pass_from_handle(info->renderPass);
928 assert(renderpass);
929
930 if (renderpass != &anv_meta_dummy_renderpass) {
931 assert(info->subpass < renderpass->subpass_count);
932 subpass = &renderpass->subpasses[info->subpass];
933 }
934
935 assert(info->stageCount >= 1);
936 assert(info->pVertexInputState);
937 assert(info->pInputAssemblyState);
938 assert(info->pViewportState);
939 assert(info->pRasterizationState);
940 assert(info->pMultisampleState);
941
942 if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
943 assert(info->pDepthStencilState);
944
945 if (subpass && subpass->color_count > 0)
946 assert(info->pColorBlendState);
947
948 for (uint32_t i = 0; i < info->stageCount; ++i) {
949 switch (info->pStages[i].stage) {
950 case VK_SHADER_STAGE_TESS_CONTROL:
951 case VK_SHADER_STAGE_TESS_EVALUATION:
952 assert(info->pTessellationState);
953 break;
954 default:
955 break;
956 }
957 }
958 }
959
960 VkResult
961 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
962 const VkGraphicsPipelineCreateInfo *pCreateInfo,
963 const struct anv_graphics_pipeline_create_info *extra,
964 const VkAllocationCallbacks *alloc)
965 {
966 anv_validate {
967 anv_pipeline_validate_create_info(pCreateInfo);
968 }
969
970 if (alloc == NULL)
971 alloc = &device->alloc;
972
973 pipeline->device = device;
974 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
975
976 anv_reloc_list_init(&pipeline->batch_relocs, alloc);
977 /* TODO: Handle allocation fail */
978
979 pipeline->batch.alloc = alloc;
980 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
981 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
982 pipeline->batch.relocs = &pipeline->batch_relocs;
983
984 anv_state_stream_init(&pipeline->program_stream,
985 &device->instruction_block_pool);
986
987 anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
988
989 if (pCreateInfo->pTessellationState)
990 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
991 if (pCreateInfo->pMultisampleState &&
992 pCreateInfo->pMultisampleState->rasterizationSamples > 1)
993 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
994
995 pipeline->use_repclear = extra && extra->use_repclear;
996 pipeline->writes_point_size = false;
997
998 /* When we free the pipeline, we detect stages based on the NULL status
999 * of various prog_data pointers. Make them NULL by default.
1000 */
1001 memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
1002 memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
1003
1004 pipeline->vs_simd8 = NO_KERNEL;
1005 pipeline->vs_vec4 = NO_KERNEL;
1006 pipeline->gs_vec4 = NO_KERNEL;
1007
1008 pipeline->active_stages = 0;
1009 pipeline->total_scratch = 0;
1010
1011 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1012 ANV_FROM_HANDLE(anv_shader, shader, pCreateInfo->pStages[i].shader);
1013
1014 switch (pCreateInfo->pStages[i].stage) {
1015 case VK_SHADER_STAGE_VERTEX:
1016 anv_pipeline_compile_vs(pipeline, pCreateInfo, shader);
1017 break;
1018 case VK_SHADER_STAGE_GEOMETRY:
1019 anv_pipeline_compile_gs(pipeline, pCreateInfo, shader);
1020 break;
1021 case VK_SHADER_STAGE_FRAGMENT:
1022 anv_pipeline_compile_fs(pipeline, pCreateInfo, shader);
1023 break;
1024 default:
1025 anv_finishme("Unsupported shader stage");
1026 }
1027 }
1028
1029 if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
1030 /* Vertex is only optional if disable_vs is set */
1031 assert(extra->disable_vs);
1032 memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
1033 }
1034
1035 gen7_compute_urb_partition(pipeline);
1036
1037 const VkPipelineVertexInputStateCreateInfo *vi_info =
1038 pCreateInfo->pVertexInputState;
1039 pipeline->vb_used = 0;
1040 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1041 const VkVertexInputBindingDescription *desc =
1042 &vi_info->pVertexBindingDescriptions[i];
1043
1044 pipeline->vb_used |= 1 << desc->binding;
1045 pipeline->binding_stride[desc->binding] = desc->stride;
1046
1047 /* Step rate is programmed per vertex element (attribute), not
1048 * binding. Set up a map of which bindings step per instance, for
1049 * reference by vertex element setup. */
1050 switch (desc->inputRate) {
1051 default:
1052 case VK_VERTEX_INPUT_RATE_VERTEX:
1053 pipeline->instancing_enable[desc->binding] = false;
1054 break;
1055 case VK_VERTEX_INPUT_RATE_INSTANCE:
1056 pipeline->instancing_enable[desc->binding] = true;
1057 break;
1058 }
1059 }
1060
1061 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1062 pCreateInfo->pInputAssemblyState;
1063 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1064 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1065
1066 if (extra && extra->use_rectlist)
1067 pipeline->topology = _3DPRIM_RECTLIST;
1068
1069 return VK_SUCCESS;
1070 }
1071
1072 VkResult
1073 anv_graphics_pipeline_create(
1074 VkDevice _device,
1075 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1076 const struct anv_graphics_pipeline_create_info *extra,
1077 const VkAllocationCallbacks *pAllocator,
1078 VkPipeline *pPipeline)
1079 {
1080 ANV_FROM_HANDLE(anv_device, device, _device);
1081
1082 switch (device->info.gen) {
1083 case 7:
1084 if (device->info.is_haswell)
1085 return gen75_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
1086 else
1087 return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
1088 case 8:
1089 return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
1090 case 9:
1091 return gen9_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
1092 default:
1093 unreachable("unsupported gen\n");
1094 }
1095 }
1096
1097 VkResult anv_CreateGraphicsPipelines(
1098 VkDevice _device,
1099 VkPipelineCache pipelineCache,
1100 uint32_t count,
1101 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1102 const VkAllocationCallbacks* pAllocator,
1103 VkPipeline* pPipelines)
1104 {
1105 VkResult result = VK_SUCCESS;
1106
1107 unsigned i = 0;
1108 for (; i < count; i++) {
1109 result = anv_graphics_pipeline_create(_device, &pCreateInfos[i],
1110 NULL, pAllocator, &pPipelines[i]);
1111 if (result != VK_SUCCESS) {
1112 for (unsigned j = 0; j < i; j++) {
1113 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1114 }
1115
1116 return result;
1117 }
1118 }
1119
1120 return VK_SUCCESS;
1121 }
1122
1123 static VkResult anv_compute_pipeline_create(
1124 VkDevice _device,
1125 const VkComputePipelineCreateInfo* pCreateInfo,
1126 const VkAllocationCallbacks* pAllocator,
1127 VkPipeline* pPipeline)
1128 {
1129 ANV_FROM_HANDLE(anv_device, device, _device);
1130
1131 switch (device->info.gen) {
1132 case 7:
1133 if (device->info.is_haswell)
1134 return gen75_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
1135 else
1136 return gen7_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
1137 case 8:
1138 return gen8_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
1139 case 9:
1140 return gen9_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
1141 default:
1142 unreachable("unsupported gen\n");
1143 }
1144 }
1145
1146 VkResult anv_CreateComputePipelines(
1147 VkDevice _device,
1148 VkPipelineCache pipelineCache,
1149 uint32_t count,
1150 const VkComputePipelineCreateInfo* pCreateInfos,
1151 const VkAllocationCallbacks* pAllocator,
1152 VkPipeline* pPipelines)
1153 {
1154 VkResult result = VK_SUCCESS;
1155
1156 unsigned i = 0;
1157 for (; i < count; i++) {
1158 result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
1159 pAllocator, &pPipelines[i]);
1160 if (result != VK_SUCCESS) {
1161 for (unsigned j = 0; j < i; j++) {
1162 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1163 }
1164
1165 return result;
1166 }
1167 }
1168
1169 return VK_SUCCESS;
1170 }