35acc4aaef9c22d3fef44aa8772b580aa4c0c0d8
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
33 #include "brw_nir.h"
34 #include "anv_nir.h"
35 #include "spirv/nir_spirv.h"
36
37 /* Needed for SWIZZLE macros */
38 #include "program/prog_instruction.h"
39
40 // Shader functions
41
42 VkResult anv_CreateShaderModule(
43 VkDevice _device,
44 const VkShaderModuleCreateInfo* pCreateInfo,
45 const VkAllocationCallbacks* pAllocator,
46 VkShaderModule* pShaderModule)
47 {
48 ANV_FROM_HANDLE(anv_device, device, _device);
49 struct anv_shader_module *module;
50
51 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
52 assert(pCreateInfo->flags == 0);
53
54 module = anv_alloc2(&device->alloc, pAllocator,
55 sizeof(*module) + pCreateInfo->codeSize, 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
57 if (module == NULL)
58 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
59
60 module->nir = NULL;
61 module->size = pCreateInfo->codeSize;
62 memcpy(module->data, pCreateInfo->pCode, module->size);
63
64 _mesa_sha1_compute(module->data, module->size, module->sha1);
65
66 *pShaderModule = anv_shader_module_to_handle(module);
67
68 return VK_SUCCESS;
69 }
70
71 void anv_DestroyShaderModule(
72 VkDevice _device,
73 VkShaderModule _module,
74 const VkAllocationCallbacks* pAllocator)
75 {
76 ANV_FROM_HANDLE(anv_device, device, _device);
77 ANV_FROM_HANDLE(anv_shader_module, module, _module);
78
79 anv_free2(&device->alloc, pAllocator, module);
80 }
81
82 #define SPIR_V_MAGIC_NUMBER 0x07230203
83
84 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
85 * we can't do that yet because we don't have the ability to copy nir.
86 */
87 static nir_shader *
88 anv_shader_compile_to_nir(struct anv_device *device,
89 struct anv_shader_module *module,
90 const char *entrypoint_name,
91 gl_shader_stage stage,
92 const VkSpecializationInfo *spec_info)
93 {
94 if (strcmp(entrypoint_name, "main") != 0) {
95 anv_finishme("Multiple shaders per module not really supported");
96 }
97
98 const struct brw_compiler *compiler =
99 device->instance->physicalDevice.compiler;
100 const nir_shader_compiler_options *nir_options =
101 compiler->glsl_compiler_options[stage].NirOptions;
102
103 nir_shader *nir;
104 nir_function *entry_point;
105 if (module->nir) {
106 /* Some things such as our meta clear/blit code will give us a NIR
107 * shader directly. In that case, we just ignore the SPIR-V entirely
108 * and just use the NIR shader */
109 nir = module->nir;
110 nir->options = nir_options;
111 nir_validate_shader(nir);
112
113 assert(exec_list_length(&nir->functions) == 1);
114 struct exec_node *node = exec_list_get_head(&nir->functions);
115 entry_point = exec_node_data(nir_function, node, node);
116 } else {
117 uint32_t *spirv = (uint32_t *) module->data;
118 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
119 assert(module->size % 4 == 0);
120
121 uint32_t num_spec_entries = 0;
122 struct nir_spirv_specialization *spec_entries = NULL;
123 if (spec_info && spec_info->mapEntryCount > 0) {
124 num_spec_entries = spec_info->mapEntryCount;
125 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
126 for (uint32_t i = 0; i < num_spec_entries; i++) {
127 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
128 const void *data = spec_info->pData + entry.offset;
129 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
130
131 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
132 spec_entries[i].data = *(const uint32_t *)data;
133 }
134 }
135
136 entry_point = spirv_to_nir(spirv, module->size / 4,
137 spec_entries, num_spec_entries,
138 stage, entrypoint_name, nir_options);
139 nir = entry_point->shader;
140 assert(nir->stage == stage);
141 nir_validate_shader(nir);
142
143 free(spec_entries);
144
145 if (stage == MESA_SHADER_FRAGMENT) {
146 nir_lower_wpos_center(nir);
147 nir_validate_shader(nir);
148 }
149
150 nir_lower_returns(nir);
151 nir_validate_shader(nir);
152
153 nir_inline_functions(nir);
154 nir_validate_shader(nir);
155
156 /* Pick off the single entrypoint that we want */
157 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
158 if (func != entry_point)
159 exec_node_remove(&func->node);
160 }
161 assert(exec_list_length(&nir->functions) == 1);
162 entry_point->name = ralloc_strdup(entry_point, "main");
163
164 nir_remove_dead_variables(nir, nir_var_shader_in);
165 nir_remove_dead_variables(nir, nir_var_shader_out);
166 nir_remove_dead_variables(nir, nir_var_system_value);
167 nir_validate_shader(nir);
168
169 nir_propagate_invariant(nir);
170 nir_validate_shader(nir);
171
172 nir_lower_io_to_temporaries(entry_point->shader, entry_point->impl,
173 true, false);
174
175 nir_lower_system_values(nir);
176 nir_validate_shader(nir);
177 }
178
179 /* Vulkan uses the separate-shader linking model */
180 nir->info.separate_shader = true;
181
182 nir = brw_preprocess_nir(compiler, nir);
183
184 nir_shader_gather_info(nir, entry_point->impl);
185
186 nir_variable_mode indirect_mask = 0;
187 if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
188 indirect_mask |= nir_var_shader_in;
189 if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
190 indirect_mask |= nir_var_local;
191
192 nir_lower_indirect_derefs(nir, indirect_mask);
193
194 return nir;
195 }
196
197 void anv_DestroyPipeline(
198 VkDevice _device,
199 VkPipeline _pipeline,
200 const VkAllocationCallbacks* pAllocator)
201 {
202 ANV_FROM_HANDLE(anv_device, device, _device);
203 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
204
205 anv_reloc_list_finish(&pipeline->batch_relocs,
206 pAllocator ? pAllocator : &device->alloc);
207 if (pipeline->blend_state.map)
208 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
209
210 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
211 if (pipeline->shaders[s])
212 anv_shader_bin_unref(device, pipeline->shaders[s]);
213 }
214
215 anv_free2(&device->alloc, pAllocator, pipeline);
216 }
217
218 static const uint32_t vk_to_gen_primitive_type[] = {
219 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
220 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
221 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
222 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
223 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
224 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
225 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
226 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
227 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
228 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
229 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
230 };
231
232 static void
233 populate_sampler_prog_key(const struct gen_device_info *devinfo,
234 struct brw_sampler_prog_key_data *key)
235 {
236 /* XXX: Handle texture swizzle on HSW- */
237 for (int i = 0; i < MAX_SAMPLERS; i++) {
238 /* Assume color sampler, no swizzling. (Works for BDW+) */
239 key->swizzles[i] = SWIZZLE_XYZW;
240 }
241 }
242
243 static void
244 populate_vs_prog_key(const struct gen_device_info *devinfo,
245 struct brw_vs_prog_key *key)
246 {
247 memset(key, 0, sizeof(*key));
248
249 populate_sampler_prog_key(devinfo, &key->tex);
250
251 /* XXX: Handle vertex input work-arounds */
252
253 /* XXX: Handle sampler_prog_key */
254 }
255
256 static void
257 populate_gs_prog_key(const struct gen_device_info *devinfo,
258 struct brw_gs_prog_key *key)
259 {
260 memset(key, 0, sizeof(*key));
261
262 populate_sampler_prog_key(devinfo, &key->tex);
263 }
264
265 static void
266 populate_wm_prog_key(const struct gen_device_info *devinfo,
267 const VkGraphicsPipelineCreateInfo *info,
268 const struct anv_graphics_pipeline_create_info *extra,
269 struct brw_wm_prog_key *key)
270 {
271 ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
272
273 memset(key, 0, sizeof(*key));
274
275 populate_sampler_prog_key(devinfo, &key->tex);
276
277 /* TODO: Fill out key->input_slots_valid */
278
279 /* Vulkan doesn't specify a default */
280 key->high_quality_derivatives = false;
281
282 /* XXX Vulkan doesn't appear to specify */
283 key->clamp_fragment_color = false;
284
285 if (extra && extra->color_attachment_count >= 0) {
286 key->nr_color_regions = extra->color_attachment_count;
287 } else {
288 key->nr_color_regions =
289 render_pass->subpasses[info->subpass].color_count;
290 }
291
292 key->replicate_alpha = key->nr_color_regions > 1 &&
293 info->pMultisampleState &&
294 info->pMultisampleState->alphaToCoverageEnable;
295
296 if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
297 /* We should probably pull this out of the shader, but it's fairly
298 * harmless to compute it and then let dead-code take care of it.
299 */
300 key->persample_interp =
301 (info->pMultisampleState->minSampleShading *
302 info->pMultisampleState->rasterizationSamples) > 1;
303 key->multisample_fbo = true;
304 }
305 }
306
307 static void
308 populate_cs_prog_key(const struct gen_device_info *devinfo,
309 struct brw_cs_prog_key *key)
310 {
311 memset(key, 0, sizeof(*key));
312
313 populate_sampler_prog_key(devinfo, &key->tex);
314 }
315
316 static nir_shader *
317 anv_pipeline_compile(struct anv_pipeline *pipeline,
318 struct anv_shader_module *module,
319 const char *entrypoint,
320 gl_shader_stage stage,
321 const VkSpecializationInfo *spec_info,
322 struct brw_stage_prog_data *prog_data,
323 struct anv_pipeline_bind_map *map)
324 {
325 nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
326 module, entrypoint, stage,
327 spec_info);
328 if (nir == NULL)
329 return NULL;
330
331 anv_nir_lower_push_constants(nir);
332
333 /* Figure out the number of parameters */
334 prog_data->nr_params = 0;
335
336 if (nir->num_uniforms > 0) {
337 /* If the shader uses any push constants at all, we'll just give
338 * them the maximum possible number
339 */
340 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
341 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
342 }
343
344 if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
345 prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
346
347 if (nir->info.num_images > 0) {
348 prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
349 pipeline->needs_data_cache = true;
350 }
351
352 if (stage == MESA_SHADER_COMPUTE)
353 ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
354 prog_data->nr_params++; /* The CS Thread ID uniform */
355
356 if (nir->info.num_ssbos > 0)
357 pipeline->needs_data_cache = true;
358
359 if (prog_data->nr_params > 0) {
360 /* XXX: I think we're leaking this */
361 prog_data->param = (const union gl_constant_value **)
362 malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
363
364 /* We now set the param values to be offsets into a
365 * anv_push_constant_data structure. Since the compiler doesn't
366 * actually dereference any of the gl_constant_value pointers in the
367 * params array, it doesn't really matter what we put here.
368 */
369 struct anv_push_constants *null_data = NULL;
370 if (nir->num_uniforms > 0) {
371 /* Fill out the push constants section of the param array */
372 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
373 prog_data->param[i] = (const union gl_constant_value *)
374 &null_data->client_data[i * sizeof(float)];
375 }
376 }
377
378 /* Set up dynamic offsets */
379 anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
380
381 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
382 if (pipeline->layout)
383 anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
384
385 /* nir_lower_io will only handle the push constants; we need to set this
386 * to the full number of possible uniforms.
387 */
388 nir->num_uniforms = prog_data->nr_params * 4;
389
390 return nir;
391 }
392
393 static void
394 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
395 {
396 prog_data->binding_table.size_bytes = 0;
397 prog_data->binding_table.texture_start = bias;
398 prog_data->binding_table.gather_texture_start = bias;
399 prog_data->binding_table.ubo_start = bias;
400 prog_data->binding_table.ssbo_start = bias;
401 prog_data->binding_table.image_start = bias;
402 }
403
404 static struct anv_shader_bin *
405 anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
406 struct anv_pipeline_cache *cache,
407 const void *key_data, uint32_t key_size,
408 const void *kernel_data, uint32_t kernel_size,
409 const void *prog_data, uint32_t prog_data_size,
410 const struct anv_pipeline_bind_map *bind_map)
411 {
412 if (cache) {
413 return anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
414 kernel_data, kernel_size,
415 prog_data, prog_data_size,
416 bind_map);
417 } else {
418 return anv_shader_bin_create(pipeline->device, key_data, key_size,
419 kernel_data, kernel_size,
420 prog_data, prog_data_size, bind_map);
421 }
422 }
423
424
425 static void
426 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
427 gl_shader_stage stage,
428 struct anv_shader_bin *shader)
429 {
430 pipeline->shaders[stage] = shader;
431 pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
432 }
433
434 static VkResult
435 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
436 struct anv_pipeline_cache *cache,
437 const VkGraphicsPipelineCreateInfo *info,
438 struct anv_shader_module *module,
439 const char *entrypoint,
440 const VkSpecializationInfo *spec_info)
441 {
442 const struct brw_compiler *compiler =
443 pipeline->device->instance->physicalDevice.compiler;
444 struct anv_pipeline_bind_map map;
445 struct brw_vs_prog_key key;
446 struct anv_shader_bin *bin = NULL;
447 unsigned char sha1[20];
448
449 populate_vs_prog_key(&pipeline->device->info, &key);
450
451 if (cache) {
452 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
453 pipeline->layout, spec_info);
454 bin = anv_pipeline_cache_search(cache, sha1, 20);
455 }
456
457 if (bin == NULL) {
458 struct brw_vs_prog_data prog_data = { 0, };
459 struct anv_pipeline_binding surface_to_descriptor[256];
460 struct anv_pipeline_binding sampler_to_descriptor[256];
461
462 map = (struct anv_pipeline_bind_map) {
463 .surface_to_descriptor = surface_to_descriptor,
464 .sampler_to_descriptor = sampler_to_descriptor
465 };
466
467 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
468 MESA_SHADER_VERTEX, spec_info,
469 &prog_data.base.base, &map);
470 if (nir == NULL)
471 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
472
473 anv_fill_binding_table(&prog_data.base.base, 0);
474
475 void *mem_ctx = ralloc_context(NULL);
476
477 if (module->nir == NULL)
478 ralloc_steal(mem_ctx, nir);
479
480 prog_data.inputs_read = nir->info.inputs_read;
481
482 brw_compute_vue_map(&pipeline->device->info,
483 &prog_data.base.vue_map,
484 nir->info.outputs_written,
485 nir->info.separate_shader);
486
487 unsigned code_size;
488 const unsigned *shader_code =
489 brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
490 NULL, false, -1, &code_size, NULL);
491 if (shader_code == NULL) {
492 ralloc_free(mem_ctx);
493 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
494 }
495
496 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
497 shader_code, code_size,
498 &prog_data, sizeof(prog_data), &map);
499 if (!bin) {
500 ralloc_free(mem_ctx);
501 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
502 }
503
504 ralloc_free(mem_ctx);
505 }
506
507 const struct brw_vs_prog_data *vs_prog_data =
508 (const struct brw_vs_prog_data *)anv_shader_bin_get_prog_data(bin);
509
510 if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
511 pipeline->vs_simd8 = bin->kernel.offset;
512 pipeline->vs_vec4 = NO_KERNEL;
513 } else {
514 pipeline->vs_simd8 = NO_KERNEL;
515 pipeline->vs_vec4 = bin->kernel.offset;
516 }
517
518 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
519
520 return VK_SUCCESS;
521 }
522
523 static VkResult
524 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
525 struct anv_pipeline_cache *cache,
526 const VkGraphicsPipelineCreateInfo *info,
527 struct anv_shader_module *module,
528 const char *entrypoint,
529 const VkSpecializationInfo *spec_info)
530 {
531 const struct brw_compiler *compiler =
532 pipeline->device->instance->physicalDevice.compiler;
533 struct anv_pipeline_bind_map map;
534 struct brw_gs_prog_key key;
535 struct anv_shader_bin *bin = NULL;
536 unsigned char sha1[20];
537
538 populate_gs_prog_key(&pipeline->device->info, &key);
539
540 if (cache) {
541 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
542 pipeline->layout, spec_info);
543 bin = anv_pipeline_cache_search(cache, sha1, 20);
544 }
545
546 if (bin == NULL) {
547 struct brw_gs_prog_data prog_data = { 0, };
548 struct anv_pipeline_binding surface_to_descriptor[256];
549 struct anv_pipeline_binding sampler_to_descriptor[256];
550
551 map = (struct anv_pipeline_bind_map) {
552 .surface_to_descriptor = surface_to_descriptor,
553 .sampler_to_descriptor = sampler_to_descriptor
554 };
555
556 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
557 MESA_SHADER_GEOMETRY, spec_info,
558 &prog_data.base.base, &map);
559 if (nir == NULL)
560 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
561
562 anv_fill_binding_table(&prog_data.base.base, 0);
563
564 void *mem_ctx = ralloc_context(NULL);
565
566 if (module->nir == NULL)
567 ralloc_steal(mem_ctx, nir);
568
569 brw_compute_vue_map(&pipeline->device->info,
570 &prog_data.base.vue_map,
571 nir->info.outputs_written,
572 nir->info.separate_shader);
573
574 unsigned code_size;
575 const unsigned *shader_code =
576 brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
577 NULL, -1, &code_size, NULL);
578 if (shader_code == NULL) {
579 ralloc_free(mem_ctx);
580 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
581 }
582
583 /* TODO: SIMD8 GS */
584 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
585 shader_code, code_size,
586 &prog_data, sizeof(prog_data), &map);
587 if (!bin) {
588 ralloc_free(mem_ctx);
589 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
590 }
591
592 ralloc_free(mem_ctx);
593 }
594
595 pipeline->gs_kernel = bin->kernel.offset;
596
597 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
598
599 return VK_SUCCESS;
600 }
601
602 static VkResult
603 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
604 struct anv_pipeline_cache *cache,
605 const VkGraphicsPipelineCreateInfo *info,
606 const struct anv_graphics_pipeline_create_info *extra,
607 struct anv_shader_module *module,
608 const char *entrypoint,
609 const VkSpecializationInfo *spec_info)
610 {
611 const struct brw_compiler *compiler =
612 pipeline->device->instance->physicalDevice.compiler;
613 struct anv_pipeline_bind_map map;
614 struct brw_wm_prog_key key;
615 struct anv_shader_bin *bin = NULL;
616 unsigned char sha1[20];
617
618 populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
619
620 if (cache) {
621 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
622 pipeline->layout, spec_info);
623 bin = anv_pipeline_cache_search(cache, sha1, 20);
624 }
625
626 if (bin == NULL) {
627 struct brw_wm_prog_data prog_data = { 0, };
628 struct anv_pipeline_binding surface_to_descriptor[256];
629 struct anv_pipeline_binding sampler_to_descriptor[256];
630
631 map = (struct anv_pipeline_bind_map) {
632 .surface_to_descriptor = surface_to_descriptor + 8,
633 .sampler_to_descriptor = sampler_to_descriptor
634 };
635
636 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
637 MESA_SHADER_FRAGMENT, spec_info,
638 &prog_data.base, &map);
639 if (nir == NULL)
640 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
641
642 unsigned num_rts = 0;
643 struct anv_pipeline_binding rt_bindings[8];
644 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
645 nir_foreach_variable_safe(var, &nir->outputs) {
646 if (var->data.location < FRAG_RESULT_DATA0)
647 continue;
648
649 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
650 if (rt >= key.nr_color_regions) {
651 /* Out-of-bounds, throw it away */
652 var->data.mode = nir_var_local;
653 exec_node_remove(&var->node);
654 exec_list_push_tail(&impl->locals, &var->node);
655 continue;
656 }
657
658 /* Give it a new, compacted, location */
659 var->data.location = FRAG_RESULT_DATA0 + num_rts;
660
661 unsigned array_len =
662 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
663 assert(num_rts + array_len <= 8);
664
665 for (unsigned i = 0; i < array_len; i++) {
666 rt_bindings[num_rts + i] = (struct anv_pipeline_binding) {
667 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
668 .binding = 0,
669 .index = rt + i,
670 };
671 }
672
673 num_rts += array_len;
674 }
675
676 if (pipeline->use_repclear) {
677 assert(num_rts == 1);
678 key.nr_color_regions = 1;
679 }
680
681 if (num_rts == 0) {
682 /* If we have no render targets, we need a null render target */
683 rt_bindings[0] = (struct anv_pipeline_binding) {
684 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
685 .binding = 0,
686 .index = UINT8_MAX,
687 };
688 num_rts = 1;
689 }
690
691 assert(num_rts <= 8);
692 map.surface_to_descriptor -= num_rts;
693 map.surface_count += num_rts;
694 assert(map.surface_count <= 256);
695 memcpy(map.surface_to_descriptor, rt_bindings,
696 num_rts * sizeof(*rt_bindings));
697
698 anv_fill_binding_table(&prog_data.base, num_rts);
699
700 void *mem_ctx = ralloc_context(NULL);
701
702 if (module->nir == NULL)
703 ralloc_steal(mem_ctx, nir);
704
705 unsigned code_size;
706 const unsigned *shader_code =
707 brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
708 NULL, -1, -1, true, pipeline->use_repclear,
709 &code_size, NULL);
710 if (shader_code == NULL) {
711 ralloc_free(mem_ctx);
712 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
713 }
714
715 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
716 shader_code, code_size,
717 &prog_data, sizeof(prog_data), &map);
718 if (!bin) {
719 ralloc_free(mem_ctx);
720 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
721 }
722
723 ralloc_free(mem_ctx);
724 }
725
726 pipeline->ps_ksp0 = bin->kernel.offset;
727
728 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
729
730 return VK_SUCCESS;
731 }
732
733 VkResult
734 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
735 struct anv_pipeline_cache *cache,
736 const VkComputePipelineCreateInfo *info,
737 struct anv_shader_module *module,
738 const char *entrypoint,
739 const VkSpecializationInfo *spec_info)
740 {
741 const struct brw_compiler *compiler =
742 pipeline->device->instance->physicalDevice.compiler;
743 struct anv_pipeline_bind_map map;
744 struct brw_cs_prog_key key;
745 struct anv_shader_bin *bin = NULL;
746 unsigned char sha1[20];
747
748 populate_cs_prog_key(&pipeline->device->info, &key);
749
750 if (cache) {
751 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
752 pipeline->layout, spec_info);
753 bin = anv_pipeline_cache_search(cache, sha1, 20);
754 }
755
756 if (bin == NULL) {
757 struct brw_cs_prog_data prog_data = { 0, };
758 struct anv_pipeline_binding surface_to_descriptor[256];
759 struct anv_pipeline_binding sampler_to_descriptor[256];
760
761 map = (struct anv_pipeline_bind_map) {
762 .surface_to_descriptor = surface_to_descriptor,
763 .sampler_to_descriptor = sampler_to_descriptor
764 };
765
766 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
767 MESA_SHADER_COMPUTE, spec_info,
768 &prog_data.base, &map);
769 if (nir == NULL)
770 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
771
772 anv_fill_binding_table(&prog_data.base, 1);
773
774 void *mem_ctx = ralloc_context(NULL);
775
776 if (module->nir == NULL)
777 ralloc_steal(mem_ctx, nir);
778
779 unsigned code_size;
780 const unsigned *shader_code =
781 brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
782 -1, &code_size, NULL);
783 if (shader_code == NULL) {
784 ralloc_free(mem_ctx);
785 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
786 }
787
788 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
789 shader_code, code_size,
790 &prog_data, sizeof(prog_data), &map);
791 if (!bin) {
792 ralloc_free(mem_ctx);
793 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
794 }
795
796 ralloc_free(mem_ctx);
797 }
798
799 pipeline->cs_simd = bin->kernel.offset;
800
801 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
802
803 return VK_SUCCESS;
804 }
805
806 void
807 anv_compute_urb_partition(struct anv_pipeline *pipeline)
808 {
809 const struct gen_device_info *devinfo = &pipeline->device->info;
810
811 bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
812 unsigned vs_size = vs_present ?
813 get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
814 unsigned vs_entry_size_bytes = vs_size * 64;
815 bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
816 unsigned gs_size = gs_present ?
817 get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
818 unsigned gs_entry_size_bytes = gs_size * 64;
819
820 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
821 *
822 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
823 * Allocation Size is less than 9 512-bit URB entries.
824 *
825 * Similar text exists for GS.
826 */
827 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
828 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
829
830 /* URB allocations must be done in 8k chunks. */
831 unsigned chunk_size_bytes = 8192;
832
833 /* Determine the size of the URB in chunks. */
834 unsigned urb_chunks = pipeline->urb.total_size * 1024 / chunk_size_bytes;
835
836 /* Reserve space for push constants */
837 unsigned push_constant_kb;
838 if (pipeline->device->info.gen >= 8)
839 push_constant_kb = 32;
840 else if (pipeline->device->info.is_haswell)
841 push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
842 else
843 push_constant_kb = 16;
844
845 unsigned push_constant_bytes = push_constant_kb * 1024;
846 unsigned push_constant_chunks =
847 push_constant_bytes / chunk_size_bytes;
848
849 /* Initially, assign each stage the minimum amount of URB space it needs,
850 * and make a note of how much additional space it "wants" (the amount of
851 * additional space it could actually make use of).
852 */
853
854 /* VS has a lower limit on the number of URB entries */
855 unsigned vs_chunks =
856 ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
857 chunk_size_bytes) / chunk_size_bytes;
858 unsigned vs_wants =
859 ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
860 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
861
862 unsigned gs_chunks = 0;
863 unsigned gs_wants = 0;
864 if (gs_present) {
865 /* There are two constraints on the minimum amount of URB space we can
866 * allocate:
867 *
868 * (1) We need room for at least 2 URB entries, since we always operate
869 * the GS in DUAL_OBJECT mode.
870 *
871 * (2) We can't allocate less than nr_gs_entries_granularity.
872 */
873 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
874 chunk_size_bytes) / chunk_size_bytes;
875 gs_wants =
876 ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
877 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
878 }
879
880 /* There should always be enough URB space to satisfy the minimum
881 * requirements of each stage.
882 */
883 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
884 assert(total_needs <= urb_chunks);
885
886 /* Mete out remaining space (if any) in proportion to "wants". */
887 unsigned total_wants = vs_wants + gs_wants;
888 unsigned remaining_space = urb_chunks - total_needs;
889 if (remaining_space > total_wants)
890 remaining_space = total_wants;
891 if (remaining_space > 0) {
892 unsigned vs_additional = (unsigned)
893 round(vs_wants * (((double) remaining_space) / total_wants));
894 vs_chunks += vs_additional;
895 remaining_space -= vs_additional;
896 gs_chunks += remaining_space;
897 }
898
899 /* Sanity check that we haven't over-allocated. */
900 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
901
902 /* Finally, compute the number of entries that can fit in the space
903 * allocated to each stage.
904 */
905 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
906 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
907
908 /* Since we rounded up when computing *_wants, this may be slightly more
909 * than the maximum allowed amount, so correct for that.
910 */
911 nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
912 nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
913
914 /* Ensure that we program a multiple of the granularity. */
915 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
916 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
917
918 /* Finally, sanity check to make sure we have at least the minimum number
919 * of entries needed for each stage.
920 */
921 assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
922 if (gs_present)
923 assert(nr_gs_entries >= 2);
924
925 /* Lay out the URB in the following order:
926 * - push constants
927 * - VS
928 * - GS
929 */
930 pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
931 pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
932 pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
933
934 pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
935 pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
936 pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
937
938 pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
939 pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
940 pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
941
942 pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
943 pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
944 pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
945 }
946
947 /**
948 * Copy pipeline state not marked as dynamic.
949 * Dynamic state is pipeline state which hasn't been provided at pipeline
950 * creation time, but is dynamically provided afterwards using various
951 * vkCmdSet* functions.
952 *
953 * The set of state considered "non_dynamic" is determined by the pieces of
954 * state that have their corresponding VkDynamicState enums omitted from
955 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
956 *
957 * @param[out] pipeline Destination non_dynamic state.
958 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
959 */
960 static void
961 copy_non_dynamic_state(struct anv_pipeline *pipeline,
962 const VkGraphicsPipelineCreateInfo *pCreateInfo)
963 {
964 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
965 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
966 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
967
968 pipeline->dynamic_state = default_dynamic_state;
969
970 if (pCreateInfo->pDynamicState) {
971 /* Remove all of the states that are marked as dynamic */
972 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
973 for (uint32_t s = 0; s < count; s++)
974 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
975 }
976
977 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
978
979 /* Section 9.2 of the Vulkan 1.0.15 spec says:
980 *
981 * pViewportState is [...] NULL if the pipeline
982 * has rasterization disabled.
983 */
984 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
985 assert(pCreateInfo->pViewportState);
986
987 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
988 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
989 typed_memcpy(dynamic->viewport.viewports,
990 pCreateInfo->pViewportState->pViewports,
991 pCreateInfo->pViewportState->viewportCount);
992 }
993
994 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
995 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
996 typed_memcpy(dynamic->scissor.scissors,
997 pCreateInfo->pViewportState->pScissors,
998 pCreateInfo->pViewportState->scissorCount);
999 }
1000 }
1001
1002 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1003 assert(pCreateInfo->pRasterizationState);
1004 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1005 }
1006
1007 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1008 assert(pCreateInfo->pRasterizationState);
1009 dynamic->depth_bias.bias =
1010 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1011 dynamic->depth_bias.clamp =
1012 pCreateInfo->pRasterizationState->depthBiasClamp;
1013 dynamic->depth_bias.slope =
1014 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1015 }
1016
1017 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1018 *
1019 * pColorBlendState is [...] NULL if the pipeline has rasterization
1020 * disabled or if the subpass of the render pass the pipeline is
1021 * created against does not use any color attachments.
1022 */
1023 bool uses_color_att = false;
1024 for (unsigned i = 0; i < subpass->color_count; ++i) {
1025 if (subpass->color_attachments[i] != VK_ATTACHMENT_UNUSED) {
1026 uses_color_att = true;
1027 break;
1028 }
1029 }
1030
1031 if (uses_color_att &&
1032 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1033 assert(pCreateInfo->pColorBlendState);
1034
1035 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1036 typed_memcpy(dynamic->blend_constants,
1037 pCreateInfo->pColorBlendState->blendConstants, 4);
1038 }
1039
1040 /* If there is no depthstencil attachment, then don't read
1041 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1042 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1043 * no need to override the depthstencil defaults in
1044 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1045 *
1046 * Section 9.2 of the Vulkan 1.0.15 spec says:
1047 *
1048 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1049 * disabled or if the subpass of the render pass the pipeline is created
1050 * against does not use a depth/stencil attachment.
1051 */
1052 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1053 subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
1054 assert(pCreateInfo->pDepthStencilState);
1055
1056 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1057 dynamic->depth_bounds.min =
1058 pCreateInfo->pDepthStencilState->minDepthBounds;
1059 dynamic->depth_bounds.max =
1060 pCreateInfo->pDepthStencilState->maxDepthBounds;
1061 }
1062
1063 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1064 dynamic->stencil_compare_mask.front =
1065 pCreateInfo->pDepthStencilState->front.compareMask;
1066 dynamic->stencil_compare_mask.back =
1067 pCreateInfo->pDepthStencilState->back.compareMask;
1068 }
1069
1070 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1071 dynamic->stencil_write_mask.front =
1072 pCreateInfo->pDepthStencilState->front.writeMask;
1073 dynamic->stencil_write_mask.back =
1074 pCreateInfo->pDepthStencilState->back.writeMask;
1075 }
1076
1077 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1078 dynamic->stencil_reference.front =
1079 pCreateInfo->pDepthStencilState->front.reference;
1080 dynamic->stencil_reference.back =
1081 pCreateInfo->pDepthStencilState->back.reference;
1082 }
1083 }
1084
1085 pipeline->dynamic_state_mask = states;
1086 }
1087
1088 static void
1089 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1090 {
1091 struct anv_render_pass *renderpass = NULL;
1092 struct anv_subpass *subpass = NULL;
1093
1094 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1095 * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
1096 * 4.2 Graphics Pipeline.
1097 */
1098 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1099
1100 renderpass = anv_render_pass_from_handle(info->renderPass);
1101 assert(renderpass);
1102
1103 if (renderpass != &anv_meta_dummy_renderpass) {
1104 assert(info->subpass < renderpass->subpass_count);
1105 subpass = &renderpass->subpasses[info->subpass];
1106 }
1107
1108 assert(info->stageCount >= 1);
1109 assert(info->pVertexInputState);
1110 assert(info->pInputAssemblyState);
1111 assert(info->pViewportState);
1112 assert(info->pRasterizationState);
1113
1114 if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
1115 assert(info->pDepthStencilState);
1116
1117 if (subpass && subpass->color_count > 0)
1118 assert(info->pColorBlendState);
1119
1120 for (uint32_t i = 0; i < info->stageCount; ++i) {
1121 switch (info->pStages[i].stage) {
1122 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1123 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1124 assert(info->pTessellationState);
1125 break;
1126 default:
1127 break;
1128 }
1129 }
1130 }
1131
1132 /**
1133 * Calculate the desired L3 partitioning based on the current state of the
1134 * pipeline. For now this simply returns the conservative defaults calculated
1135 * by get_default_l3_weights(), but we could probably do better by gathering
1136 * more statistics from the pipeline state (e.g. guess of expected URB usage
1137 * and bound surfaces), or by using feed-back from performance counters.
1138 */
1139 void
1140 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1141 {
1142 const struct gen_device_info *devinfo = &pipeline->device->info;
1143
1144 const struct gen_l3_weights w =
1145 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1146
1147 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1148 pipeline->urb.total_size =
1149 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1150 }
1151
1152 VkResult
1153 anv_pipeline_init(struct anv_pipeline *pipeline,
1154 struct anv_device *device,
1155 struct anv_pipeline_cache *cache,
1156 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1157 const struct anv_graphics_pipeline_create_info *extra,
1158 const VkAllocationCallbacks *alloc)
1159 {
1160 VkResult result;
1161
1162 anv_validate {
1163 anv_pipeline_validate_create_info(pCreateInfo);
1164 }
1165
1166 if (alloc == NULL)
1167 alloc = &device->alloc;
1168
1169 pipeline->device = device;
1170 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1171
1172 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1173 if (result != VK_SUCCESS)
1174 return result;
1175
1176 pipeline->batch.alloc = alloc;
1177 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1178 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1179 pipeline->batch.relocs = &pipeline->batch_relocs;
1180
1181 copy_non_dynamic_state(pipeline, pCreateInfo);
1182 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1183 pCreateInfo->pRasterizationState->depthClampEnable;
1184
1185 pipeline->use_repclear = extra && extra->use_repclear;
1186
1187 pipeline->needs_data_cache = false;
1188
1189 /* When we free the pipeline, we detect stages based on the NULL status
1190 * of various prog_data pointers. Make them NULL by default.
1191 */
1192 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1193
1194 pipeline->vs_simd8 = NO_KERNEL;
1195 pipeline->vs_vec4 = NO_KERNEL;
1196 pipeline->gs_kernel = NO_KERNEL;
1197 pipeline->ps_ksp0 = NO_KERNEL;
1198
1199 pipeline->active_stages = 0;
1200
1201 const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
1202 struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
1203 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1204 gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
1205 pStages[stage] = &pCreateInfo->pStages[i];
1206 modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
1207 }
1208
1209 if (modules[MESA_SHADER_VERTEX]) {
1210 result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
1211 modules[MESA_SHADER_VERTEX],
1212 pStages[MESA_SHADER_VERTEX]->pName,
1213 pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
1214 if (result != VK_SUCCESS)
1215 goto compile_fail;
1216 }
1217
1218 if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
1219 anv_finishme("no tessellation support");
1220
1221 if (modules[MESA_SHADER_GEOMETRY]) {
1222 result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
1223 modules[MESA_SHADER_GEOMETRY],
1224 pStages[MESA_SHADER_GEOMETRY]->pName,
1225 pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
1226 if (result != VK_SUCCESS)
1227 goto compile_fail;
1228 }
1229
1230 if (modules[MESA_SHADER_FRAGMENT]) {
1231 result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
1232 modules[MESA_SHADER_FRAGMENT],
1233 pStages[MESA_SHADER_FRAGMENT]->pName,
1234 pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
1235 if (result != VK_SUCCESS)
1236 goto compile_fail;
1237 }
1238
1239 if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
1240 /* Vertex is only optional if disable_vs is set */
1241 assert(extra->disable_vs);
1242 }
1243
1244 anv_pipeline_setup_l3_config(pipeline, false);
1245 anv_compute_urb_partition(pipeline);
1246
1247 const VkPipelineVertexInputStateCreateInfo *vi_info =
1248 pCreateInfo->pVertexInputState;
1249
1250 uint64_t inputs_read;
1251 if (extra && extra->disable_vs) {
1252 /* If the VS is disabled, just assume the user knows what they're
1253 * doing and apply the layout blindly. This can only come from
1254 * meta, so this *should* be safe.
1255 */
1256 inputs_read = ~0ull;
1257 } else {
1258 inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1259 }
1260
1261 pipeline->vb_used = 0;
1262 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1263 const VkVertexInputAttributeDescription *desc =
1264 &vi_info->pVertexAttributeDescriptions[i];
1265
1266 if (inputs_read & (1 << (VERT_ATTRIB_GENERIC0 + desc->location)))
1267 pipeline->vb_used |= 1 << desc->binding;
1268 }
1269
1270 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1271 const VkVertexInputBindingDescription *desc =
1272 &vi_info->pVertexBindingDescriptions[i];
1273
1274 pipeline->binding_stride[desc->binding] = desc->stride;
1275
1276 /* Step rate is programmed per vertex element (attribute), not
1277 * binding. Set up a map of which bindings step per instance, for
1278 * reference by vertex element setup. */
1279 switch (desc->inputRate) {
1280 default:
1281 case VK_VERTEX_INPUT_RATE_VERTEX:
1282 pipeline->instancing_enable[desc->binding] = false;
1283 break;
1284 case VK_VERTEX_INPUT_RATE_INSTANCE:
1285 pipeline->instancing_enable[desc->binding] = true;
1286 break;
1287 }
1288 }
1289
1290 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1291 pCreateInfo->pInputAssemblyState;
1292 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1293 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1294
1295 if (extra && extra->use_rectlist)
1296 pipeline->topology = _3DPRIM_RECTLIST;
1297
1298 return VK_SUCCESS;
1299
1300 compile_fail:
1301 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1302 if (pipeline->shaders[s])
1303 anv_shader_bin_unref(device, pipeline->shaders[s]);
1304 }
1305
1306 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1307
1308 return result;
1309 }
1310
1311 VkResult
1312 anv_graphics_pipeline_create(
1313 VkDevice _device,
1314 VkPipelineCache _cache,
1315 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1316 const struct anv_graphics_pipeline_create_info *extra,
1317 const VkAllocationCallbacks *pAllocator,
1318 VkPipeline *pPipeline)
1319 {
1320 ANV_FROM_HANDLE(anv_device, device, _device);
1321 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1322
1323 switch (device->info.gen) {
1324 case 7:
1325 if (device->info.is_haswell)
1326 return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1327 else
1328 return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1329 case 8:
1330 return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1331 case 9:
1332 return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1333 default:
1334 unreachable("unsupported gen\n");
1335 }
1336 }
1337
1338 VkResult anv_CreateGraphicsPipelines(
1339 VkDevice _device,
1340 VkPipelineCache pipelineCache,
1341 uint32_t count,
1342 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1343 const VkAllocationCallbacks* pAllocator,
1344 VkPipeline* pPipelines)
1345 {
1346 VkResult result = VK_SUCCESS;
1347
1348 unsigned i = 0;
1349 for (; i < count; i++) {
1350 result = anv_graphics_pipeline_create(_device,
1351 pipelineCache,
1352 &pCreateInfos[i],
1353 NULL, pAllocator, &pPipelines[i]);
1354 if (result != VK_SUCCESS) {
1355 for (unsigned j = 0; j < i; j++) {
1356 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1357 }
1358
1359 return result;
1360 }
1361 }
1362
1363 return VK_SUCCESS;
1364 }
1365
1366 static VkResult anv_compute_pipeline_create(
1367 VkDevice _device,
1368 VkPipelineCache _cache,
1369 const VkComputePipelineCreateInfo* pCreateInfo,
1370 const VkAllocationCallbacks* pAllocator,
1371 VkPipeline* pPipeline)
1372 {
1373 ANV_FROM_HANDLE(anv_device, device, _device);
1374 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1375
1376 switch (device->info.gen) {
1377 case 7:
1378 if (device->info.is_haswell)
1379 return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1380 else
1381 return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1382 case 8:
1383 return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1384 case 9:
1385 return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1386 default:
1387 unreachable("unsupported gen\n");
1388 }
1389 }
1390
1391 VkResult anv_CreateComputePipelines(
1392 VkDevice _device,
1393 VkPipelineCache pipelineCache,
1394 uint32_t count,
1395 const VkComputePipelineCreateInfo* pCreateInfos,
1396 const VkAllocationCallbacks* pAllocator,
1397 VkPipeline* pPipelines)
1398 {
1399 VkResult result = VK_SUCCESS;
1400
1401 unsigned i = 0;
1402 for (; i < count; i++) {
1403 result = anv_compute_pipeline_create(_device, pipelineCache,
1404 &pCreateInfos[i],
1405 pAllocator, &pPipelines[i]);
1406 if (result != VK_SUCCESS) {
1407 for (unsigned j = 0; j < i; j++) {
1408 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1409 }
1410
1411 return result;
1412 }
1413 }
1414
1415 return VK_SUCCESS;
1416 }