anv/pipeline: Drop anv_fill_binding_table
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
33 #include "compiler/brw_nir.h"
34 #include "anv_nir.h"
35 #include "nir/nir_xfb_info.h"
36 #include "spirv/nir_spirv.h"
37 #include "vk_util.h"
38
39 /* Needed for SWIZZLE macros */
40 #include "program/prog_instruction.h"
41
42 // Shader functions
43
44 VkResult anv_CreateShaderModule(
45 VkDevice _device,
46 const VkShaderModuleCreateInfo* pCreateInfo,
47 const VkAllocationCallbacks* pAllocator,
48 VkShaderModule* pShaderModule)
49 {
50 ANV_FROM_HANDLE(anv_device, device, _device);
51 struct anv_shader_module *module;
52
53 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
54 assert(pCreateInfo->flags == 0);
55
56 module = vk_alloc2(&device->alloc, pAllocator,
57 sizeof(*module) + pCreateInfo->codeSize, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
59 if (module == NULL)
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
61
62 module->size = pCreateInfo->codeSize;
63 memcpy(module->data, pCreateInfo->pCode, module->size);
64
65 _mesa_sha1_compute(module->data, module->size, module->sha1);
66
67 *pShaderModule = anv_shader_module_to_handle(module);
68
69 return VK_SUCCESS;
70 }
71
72 void anv_DestroyShaderModule(
73 VkDevice _device,
74 VkShaderModule _module,
75 const VkAllocationCallbacks* pAllocator)
76 {
77 ANV_FROM_HANDLE(anv_device, device, _device);
78 ANV_FROM_HANDLE(anv_shader_module, module, _module);
79
80 if (!module)
81 return;
82
83 vk_free2(&device->alloc, pAllocator, module);
84 }
85
86 #define SPIR_V_MAGIC_NUMBER 0x07230203
87
88 static const uint64_t stage_to_debug[] = {
89 [MESA_SHADER_VERTEX] = DEBUG_VS,
90 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
91 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
92 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
93 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
94 [MESA_SHADER_COMPUTE] = DEBUG_CS,
95 };
96
97 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
98 * we can't do that yet because we don't have the ability to copy nir.
99 */
100 static nir_shader *
101 anv_shader_compile_to_nir(struct anv_device *device,
102 void *mem_ctx,
103 const struct anv_shader_module *module,
104 const char *entrypoint_name,
105 gl_shader_stage stage,
106 const VkSpecializationInfo *spec_info)
107 {
108 const struct anv_physical_device *pdevice =
109 &device->instance->physicalDevice;
110 const struct brw_compiler *compiler = pdevice->compiler;
111 const nir_shader_compiler_options *nir_options =
112 compiler->glsl_compiler_options[stage].NirOptions;
113
114 uint32_t *spirv = (uint32_t *) module->data;
115 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
116 assert(module->size % 4 == 0);
117
118 uint32_t num_spec_entries = 0;
119 struct nir_spirv_specialization *spec_entries = NULL;
120 if (spec_info && spec_info->mapEntryCount > 0) {
121 num_spec_entries = spec_info->mapEntryCount;
122 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
123 for (uint32_t i = 0; i < num_spec_entries; i++) {
124 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
125 const void *data = spec_info->pData + entry.offset;
126 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
127
128 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
129 if (spec_info->dataSize == 8)
130 spec_entries[i].data64 = *(const uint64_t *)data;
131 else
132 spec_entries[i].data32 = *(const uint32_t *)data;
133 }
134 }
135
136 struct spirv_to_nir_options spirv_options = {
137 .lower_workgroup_access_to_offsets = true,
138 .caps = {
139 .device_group = true,
140 .draw_parameters = true,
141 .float64 = pdevice->info.gen >= 8,
142 .geometry_streams = true,
143 .image_write_without_format = true,
144 .int16 = pdevice->info.gen >= 8,
145 .int64 = pdevice->info.gen >= 8,
146 .min_lod = true,
147 .multiview = true,
148 .physical_storage_buffer_address = pdevice->info.gen >= 8 &&
149 pdevice->use_softpin,
150 .post_depth_coverage = pdevice->info.gen >= 9,
151 .shader_viewport_index_layer = true,
152 .stencil_export = pdevice->info.gen >= 9,
153 .storage_8bit = pdevice->info.gen >= 8,
154 .storage_16bit = pdevice->info.gen >= 8,
155 .subgroup_arithmetic = true,
156 .subgroup_basic = true,
157 .subgroup_ballot = true,
158 .subgroup_quad = true,
159 .subgroup_shuffle = true,
160 .subgroup_vote = true,
161 .tessellation = true,
162 .transform_feedback = pdevice->info.gen >= 8,
163 .variable_pointers = true,
164 },
165 .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
166 .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
167 .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
168 .push_const_ptr_type = glsl_uint_type(),
169 .shared_ptr_type = glsl_uint_type(),
170 };
171
172 nir_function *entry_point =
173 spirv_to_nir(spirv, module->size / 4,
174 spec_entries, num_spec_entries,
175 stage, entrypoint_name, &spirv_options, nir_options);
176 nir_shader *nir = entry_point->shader;
177 assert(nir->info.stage == stage);
178 nir_validate_shader(nir, "after spirv_to_nir");
179 ralloc_steal(mem_ctx, nir);
180
181 free(spec_entries);
182
183 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
184 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
185 gl_shader_stage_name(stage));
186 nir_print_shader(nir, stderr);
187 }
188
189 /* We have to lower away local constant initializers right before we
190 * inline functions. That way they get properly initialized at the top
191 * of the function and not at the top of its caller.
192 */
193 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
194 NIR_PASS_V(nir, nir_lower_returns);
195 NIR_PASS_V(nir, nir_inline_functions);
196 NIR_PASS_V(nir, nir_opt_deref);
197
198 /* Pick off the single entrypoint that we want */
199 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
200 if (func != entry_point)
201 exec_node_remove(&func->node);
202 }
203 assert(exec_list_length(&nir->functions) == 1);
204
205 /* Now that we've deleted all but the main function, we can go ahead and
206 * lower the rest of the constant initializers. We do this here so that
207 * nir_remove_dead_variables and split_per_member_structs below see the
208 * corresponding stores.
209 */
210 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
211
212 /* Split member structs. We do this before lower_io_to_temporaries so that
213 * it doesn't lower system values to temporaries by accident.
214 */
215 NIR_PASS_V(nir, nir_split_var_copies);
216 NIR_PASS_V(nir, nir_split_per_member_structs);
217
218 NIR_PASS_V(nir, nir_remove_dead_variables,
219 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
220
221 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo | nir_var_mem_ssbo,
222 nir_address_format_vk_index_offset);
223
224 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
225 nir_address_format_64bit_global);
226
227 NIR_PASS_V(nir, nir_propagate_invariant);
228 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
229 entry_point->impl, true, false);
230
231 /* Vulkan uses the separate-shader linking model */
232 nir->info.separate_shader = true;
233
234 nir = brw_preprocess_nir(compiler, nir);
235
236 return nir;
237 }
238
239 void anv_DestroyPipeline(
240 VkDevice _device,
241 VkPipeline _pipeline,
242 const VkAllocationCallbacks* pAllocator)
243 {
244 ANV_FROM_HANDLE(anv_device, device, _device);
245 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
246
247 if (!pipeline)
248 return;
249
250 anv_reloc_list_finish(&pipeline->batch_relocs,
251 pAllocator ? pAllocator : &device->alloc);
252 if (pipeline->blend_state.map)
253 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
254
255 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
256 if (pipeline->shaders[s])
257 anv_shader_bin_unref(device, pipeline->shaders[s]);
258 }
259
260 vk_free2(&device->alloc, pAllocator, pipeline);
261 }
262
263 static const uint32_t vk_to_gen_primitive_type[] = {
264 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
265 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
266 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
267 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
268 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
269 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
270 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
271 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
272 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
273 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
274 };
275
276 static void
277 populate_sampler_prog_key(const struct gen_device_info *devinfo,
278 struct brw_sampler_prog_key_data *key)
279 {
280 /* Almost all multisampled textures are compressed. The only time when we
281 * don't compress a multisampled texture is for 16x MSAA with a surface
282 * width greater than 8k which is a bit of an edge case. Since the sampler
283 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
284 * to tell the compiler to always assume compression.
285 */
286 key->compressed_multisample_layout_mask = ~0;
287
288 /* SkyLake added support for 16x MSAA. With this came a new message for
289 * reading from a 16x MSAA surface with compression. The new message was
290 * needed because now the MCS data is 64 bits instead of 32 or lower as is
291 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
292 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
293 * so we can just use it unconditionally. This may not be quite as
294 * efficient but it saves us from recompiling.
295 */
296 if (devinfo->gen >= 9)
297 key->msaa_16 = ~0;
298
299 /* XXX: Handle texture swizzle on HSW- */
300 for (int i = 0; i < MAX_SAMPLERS; i++) {
301 /* Assume color sampler, no swizzling. (Works for BDW+) */
302 key->swizzles[i] = SWIZZLE_XYZW;
303 }
304 }
305
306 static void
307 populate_vs_prog_key(const struct gen_device_info *devinfo,
308 struct brw_vs_prog_key *key)
309 {
310 memset(key, 0, sizeof(*key));
311
312 populate_sampler_prog_key(devinfo, &key->tex);
313
314 /* XXX: Handle vertex input work-arounds */
315
316 /* XXX: Handle sampler_prog_key */
317 }
318
319 static void
320 populate_tcs_prog_key(const struct gen_device_info *devinfo,
321 unsigned input_vertices,
322 struct brw_tcs_prog_key *key)
323 {
324 memset(key, 0, sizeof(*key));
325
326 populate_sampler_prog_key(devinfo, &key->tex);
327
328 key->input_vertices = input_vertices;
329 }
330
331 static void
332 populate_tes_prog_key(const struct gen_device_info *devinfo,
333 struct brw_tes_prog_key *key)
334 {
335 memset(key, 0, sizeof(*key));
336
337 populate_sampler_prog_key(devinfo, &key->tex);
338 }
339
340 static void
341 populate_gs_prog_key(const struct gen_device_info *devinfo,
342 struct brw_gs_prog_key *key)
343 {
344 memset(key, 0, sizeof(*key));
345
346 populate_sampler_prog_key(devinfo, &key->tex);
347 }
348
349 static void
350 populate_wm_prog_key(const struct gen_device_info *devinfo,
351 const struct anv_subpass *subpass,
352 const VkPipelineMultisampleStateCreateInfo *ms_info,
353 struct brw_wm_prog_key *key)
354 {
355 memset(key, 0, sizeof(*key));
356
357 populate_sampler_prog_key(devinfo, &key->tex);
358
359 /* We set this to 0 here and set to the actual value before we call
360 * brw_compile_fs.
361 */
362 key->input_slots_valid = 0;
363
364 /* Vulkan doesn't specify a default */
365 key->high_quality_derivatives = false;
366
367 /* XXX Vulkan doesn't appear to specify */
368 key->clamp_fragment_color = false;
369
370 assert(subpass->color_count <= MAX_RTS);
371 for (uint32_t i = 0; i < subpass->color_count; i++) {
372 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
373 key->color_outputs_valid |= (1 << i);
374 }
375
376 key->nr_color_regions = util_bitcount(key->color_outputs_valid);
377
378 key->replicate_alpha = key->nr_color_regions > 1 &&
379 ms_info && ms_info->alphaToCoverageEnable;
380
381 if (ms_info) {
382 /* We should probably pull this out of the shader, but it's fairly
383 * harmless to compute it and then let dead-code take care of it.
384 */
385 if (ms_info->rasterizationSamples > 1) {
386 key->persample_interp =
387 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
388 key->multisample_fbo = true;
389 }
390
391 key->frag_coord_adds_sample_pos = ms_info->sampleShadingEnable;
392 }
393 }
394
395 static void
396 populate_cs_prog_key(const struct gen_device_info *devinfo,
397 struct brw_cs_prog_key *key)
398 {
399 memset(key, 0, sizeof(*key));
400
401 populate_sampler_prog_key(devinfo, &key->tex);
402 }
403
404 struct anv_pipeline_stage {
405 gl_shader_stage stage;
406
407 const struct anv_shader_module *module;
408 const char *entrypoint;
409 const VkSpecializationInfo *spec_info;
410
411 unsigned char shader_sha1[20];
412
413 union brw_any_prog_key key;
414
415 struct {
416 gl_shader_stage stage;
417 unsigned char sha1[20];
418 } cache_key;
419
420 nir_shader *nir;
421
422 struct anv_pipeline_binding surface_to_descriptor[256];
423 struct anv_pipeline_binding sampler_to_descriptor[256];
424 struct anv_pipeline_bind_map bind_map;
425
426 union brw_any_prog_data prog_data;
427 };
428
429 static void
430 anv_pipeline_hash_shader(const struct anv_shader_module *module,
431 const char *entrypoint,
432 gl_shader_stage stage,
433 const VkSpecializationInfo *spec_info,
434 unsigned char *sha1_out)
435 {
436 struct mesa_sha1 ctx;
437 _mesa_sha1_init(&ctx);
438
439 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
440 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
441 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
442 if (spec_info) {
443 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
444 spec_info->mapEntryCount *
445 sizeof(*spec_info->pMapEntries));
446 _mesa_sha1_update(&ctx, spec_info->pData,
447 spec_info->dataSize);
448 }
449
450 _mesa_sha1_final(&ctx, sha1_out);
451 }
452
453 static void
454 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
455 struct anv_pipeline_layout *layout,
456 struct anv_pipeline_stage *stages,
457 unsigned char *sha1_out)
458 {
459 struct mesa_sha1 ctx;
460 _mesa_sha1_init(&ctx);
461
462 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
463 sizeof(pipeline->subpass->view_mask));
464
465 if (layout)
466 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
467
468 const bool rba = pipeline->device->robust_buffer_access;
469 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
470
471 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
472 if (stages[s].entrypoint) {
473 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
474 sizeof(stages[s].shader_sha1));
475 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
476 }
477 }
478
479 _mesa_sha1_final(&ctx, sha1_out);
480 }
481
482 static void
483 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
484 struct anv_pipeline_layout *layout,
485 struct anv_pipeline_stage *stage,
486 unsigned char *sha1_out)
487 {
488 struct mesa_sha1 ctx;
489 _mesa_sha1_init(&ctx);
490
491 if (layout)
492 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
493
494 const bool rba = pipeline->device->robust_buffer_access;
495 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
496
497 _mesa_sha1_update(&ctx, stage->shader_sha1,
498 sizeof(stage->shader_sha1));
499 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
500
501 _mesa_sha1_final(&ctx, sha1_out);
502 }
503
504 static nir_shader *
505 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
506 struct anv_pipeline_cache *cache,
507 void *mem_ctx,
508 struct anv_pipeline_stage *stage)
509 {
510 const struct brw_compiler *compiler =
511 pipeline->device->instance->physicalDevice.compiler;
512 const nir_shader_compiler_options *nir_options =
513 compiler->glsl_compiler_options[stage->stage].NirOptions;
514 nir_shader *nir;
515
516 nir = anv_device_search_for_nir(pipeline->device, cache,
517 nir_options,
518 stage->shader_sha1,
519 mem_ctx);
520 if (nir) {
521 assert(nir->info.stage == stage->stage);
522 return nir;
523 }
524
525 nir = anv_shader_compile_to_nir(pipeline->device,
526 mem_ctx,
527 stage->module,
528 stage->entrypoint,
529 stage->stage,
530 stage->spec_info);
531 if (nir) {
532 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
533 return nir;
534 }
535
536 return NULL;
537 }
538
539 static void
540 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
541 void *mem_ctx,
542 struct anv_pipeline_stage *stage,
543 struct anv_pipeline_layout *layout)
544 {
545 const struct brw_compiler *compiler =
546 pipeline->device->instance->physicalDevice.compiler;
547
548 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
549 nir_shader *nir = stage->nir;
550
551 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
552 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
553 NIR_PASS_V(nir, anv_nir_lower_input_attachments);
554 }
555
556 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
557
558 NIR_PASS_V(nir, anv_nir_lower_push_constants);
559
560 if (nir->info.stage != MESA_SHADER_COMPUTE)
561 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
562
563 if (nir->info.stage == MESA_SHADER_COMPUTE)
564 prog_data->total_shared = nir->num_shared;
565
566 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
567
568 if (nir->num_uniforms > 0) {
569 assert(prog_data->nr_params == 0);
570
571 /* If the shader uses any push constants at all, we'll just give
572 * them the maximum possible number
573 */
574 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
575 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
576 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
577 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
578
579 /* We now set the param values to be offsets into a
580 * anv_push_constant_data structure. Since the compiler doesn't
581 * actually dereference any of the gl_constant_value pointers in the
582 * params array, it doesn't really matter what we put here.
583 */
584 struct anv_push_constants *null_data = NULL;
585 /* Fill out the push constants section of the param array */
586 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
587 prog_data->param[i] = ANV_PARAM_PUSH(
588 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
589 }
590 }
591
592 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
593 pipeline->needs_data_cache = true;
594
595 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
596
597 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
598 if (layout) {
599 anv_nir_apply_pipeline_layout(&pipeline->device->instance->physicalDevice,
600 pipeline->device->robust_buffer_access,
601 layout, nir, prog_data,
602 &stage->bind_map);
603 NIR_PASS_V(nir, nir_opt_constant_folding);
604 }
605
606 if (nir->info.stage != MESA_SHADER_COMPUTE)
607 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
608
609 assert(nir->num_uniforms == prog_data->nr_params * 4);
610
611 stage->nir = nir;
612 }
613
614 static void
615 anv_pipeline_link_vs(const struct brw_compiler *compiler,
616 struct anv_pipeline_stage *vs_stage,
617 struct anv_pipeline_stage *next_stage)
618 {
619 if (next_stage)
620 brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
621 }
622
623 static const unsigned *
624 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
625 void *mem_ctx,
626 struct anv_pipeline_stage *vs_stage)
627 {
628 brw_compute_vue_map(compiler->devinfo,
629 &vs_stage->prog_data.vs.base.vue_map,
630 vs_stage->nir->info.outputs_written,
631 vs_stage->nir->info.separate_shader);
632
633 return brw_compile_vs(compiler, NULL, mem_ctx, &vs_stage->key.vs,
634 &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
635 }
636
637 static void
638 merge_tess_info(struct shader_info *tes_info,
639 const struct shader_info *tcs_info)
640 {
641 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
642 *
643 * "PointMode. Controls generation of points rather than triangles
644 * or lines. This functionality defaults to disabled, and is
645 * enabled if either shader stage includes the execution mode.
646 *
647 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
648 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
649 * and OutputVertices, it says:
650 *
651 * "One mode must be set in at least one of the tessellation
652 * shader stages."
653 *
654 * So, the fields can be set in either the TCS or TES, but they must
655 * agree if set in both. Our backend looks at TES, so bitwise-or in
656 * the values from the TCS.
657 */
658 assert(tcs_info->tess.tcs_vertices_out == 0 ||
659 tes_info->tess.tcs_vertices_out == 0 ||
660 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
661 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
662
663 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
664 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
665 tcs_info->tess.spacing == tes_info->tess.spacing);
666 tes_info->tess.spacing |= tcs_info->tess.spacing;
667
668 assert(tcs_info->tess.primitive_mode == 0 ||
669 tes_info->tess.primitive_mode == 0 ||
670 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
671 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
672 tes_info->tess.ccw |= tcs_info->tess.ccw;
673 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
674 }
675
676 static void
677 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
678 struct anv_pipeline_stage *tcs_stage,
679 struct anv_pipeline_stage *tes_stage)
680 {
681 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
682
683 brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
684
685 nir_lower_patch_vertices(tes_stage->nir,
686 tcs_stage->nir->info.tess.tcs_vertices_out,
687 NULL);
688
689 /* Copy TCS info into the TES info */
690 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
691
692 /* Whacking the key after cache lookup is a bit sketchy, but all of
693 * this comes from the SPIR-V, which is part of the hash used for the
694 * pipeline cache. So it should be safe.
695 */
696 tcs_stage->key.tcs.tes_primitive_mode =
697 tes_stage->nir->info.tess.primitive_mode;
698 tcs_stage->key.tcs.quads_workaround =
699 compiler->devinfo->gen < 9 &&
700 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
701 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
702 }
703
704 static const unsigned *
705 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
706 void *mem_ctx,
707 struct anv_pipeline_stage *tcs_stage,
708 struct anv_pipeline_stage *prev_stage)
709 {
710 tcs_stage->key.tcs.outputs_written =
711 tcs_stage->nir->info.outputs_written;
712 tcs_stage->key.tcs.patch_outputs_written =
713 tcs_stage->nir->info.patch_outputs_written;
714
715 return brw_compile_tcs(compiler, NULL, mem_ctx, &tcs_stage->key.tcs,
716 &tcs_stage->prog_data.tcs, tcs_stage->nir,
717 -1, NULL);
718 }
719
720 static void
721 anv_pipeline_link_tes(const struct brw_compiler *compiler,
722 struct anv_pipeline_stage *tes_stage,
723 struct anv_pipeline_stage *next_stage)
724 {
725 if (next_stage)
726 brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
727 }
728
729 static const unsigned *
730 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
731 void *mem_ctx,
732 struct anv_pipeline_stage *tes_stage,
733 struct anv_pipeline_stage *tcs_stage)
734 {
735 tes_stage->key.tes.inputs_read =
736 tcs_stage->nir->info.outputs_written;
737 tes_stage->key.tes.patch_inputs_read =
738 tcs_stage->nir->info.patch_outputs_written;
739
740 return brw_compile_tes(compiler, NULL, mem_ctx, &tes_stage->key.tes,
741 &tcs_stage->prog_data.tcs.base.vue_map,
742 &tes_stage->prog_data.tes, tes_stage->nir,
743 NULL, -1, NULL);
744 }
745
746 static void
747 anv_pipeline_link_gs(const struct brw_compiler *compiler,
748 struct anv_pipeline_stage *gs_stage,
749 struct anv_pipeline_stage *next_stage)
750 {
751 if (next_stage)
752 brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
753 }
754
755 static const unsigned *
756 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
757 void *mem_ctx,
758 struct anv_pipeline_stage *gs_stage,
759 struct anv_pipeline_stage *prev_stage)
760 {
761 brw_compute_vue_map(compiler->devinfo,
762 &gs_stage->prog_data.gs.base.vue_map,
763 gs_stage->nir->info.outputs_written,
764 gs_stage->nir->info.separate_shader);
765
766 return brw_compile_gs(compiler, NULL, mem_ctx, &gs_stage->key.gs,
767 &gs_stage->prog_data.gs, gs_stage->nir,
768 NULL, -1, NULL);
769 }
770
771 static void
772 anv_pipeline_link_fs(const struct brw_compiler *compiler,
773 struct anv_pipeline_stage *stage)
774 {
775 unsigned num_rts = 0;
776 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
777 struct anv_pipeline_binding rt_bindings[max_rt];
778 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
779 int rt_to_bindings[max_rt];
780 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
781 bool rt_used[max_rt];
782 memset(rt_used, 0, sizeof(rt_used));
783
784 /* Flag used render targets */
785 nir_foreach_variable_safe(var, &stage->nir->outputs) {
786 if (var->data.location < FRAG_RESULT_DATA0)
787 continue;
788
789 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
790 /* Unused or out-of-bounds */
791 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid & (1 << rt)))
792 continue;
793
794 const unsigned array_len =
795 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
796 assert(rt + array_len <= max_rt);
797
798 for (unsigned i = 0; i < array_len; i++)
799 rt_used[rt + i] = true;
800 }
801
802 /* Set new, compacted, location */
803 for (unsigned i = 0; i < max_rt; i++) {
804 if (!rt_used[i])
805 continue;
806
807 rt_to_bindings[i] = num_rts;
808 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
809 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
810 .binding = 0,
811 .index = i,
812 };
813 num_rts++;
814 }
815
816 bool deleted_output = false;
817 nir_foreach_variable_safe(var, &stage->nir->outputs) {
818 if (var->data.location < FRAG_RESULT_DATA0)
819 continue;
820
821 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
822 if (rt >= MAX_RTS ||
823 !(stage->key.wm.color_outputs_valid & (1 << rt))) {
824 /* Unused or out-of-bounds, throw it away */
825 deleted_output = true;
826 var->data.mode = nir_var_function_temp;
827 exec_node_remove(&var->node);
828 exec_list_push_tail(&impl->locals, &var->node);
829 continue;
830 }
831
832 /* Give it the new location */
833 assert(rt_to_bindings[rt] != -1);
834 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
835 }
836
837 if (deleted_output)
838 nir_fixup_deref_modes(stage->nir);
839
840 if (num_rts == 0) {
841 /* If we have no render targets, we need a null render target */
842 rt_bindings[0] = (struct anv_pipeline_binding) {
843 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
844 .binding = 0,
845 .index = UINT32_MAX,
846 };
847 num_rts = 1;
848 }
849
850 /* Now that we've determined the actual number of render targets, adjust
851 * the key accordingly.
852 */
853 stage->key.wm.nr_color_regions = num_rts;
854 stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
855
856 assert(num_rts <= max_rt);
857 assert(stage->bind_map.surface_count == 0);
858 typed_memcpy(stage->bind_map.surface_to_descriptor,
859 rt_bindings, num_rts);
860 stage->bind_map.surface_count += num_rts;
861 }
862
863 static const unsigned *
864 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
865 void *mem_ctx,
866 struct anv_pipeline_stage *fs_stage,
867 struct anv_pipeline_stage *prev_stage)
868 {
869 /* TODO: we could set this to 0 based on the information in nir_shader, but
870 * we need this before we call spirv_to_nir.
871 */
872 assert(prev_stage);
873 fs_stage->key.wm.input_slots_valid =
874 prev_stage->prog_data.vue.vue_map.slots_valid;
875
876 const unsigned *code =
877 brw_compile_fs(compiler, NULL, mem_ctx, &fs_stage->key.wm,
878 &fs_stage->prog_data.wm, fs_stage->nir,
879 NULL, -1, -1, -1, true, false, NULL, NULL);
880
881 if (fs_stage->key.wm.nr_color_regions == 0 &&
882 !fs_stage->prog_data.wm.has_side_effects &&
883 !fs_stage->prog_data.wm.uses_kill &&
884 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
885 !fs_stage->prog_data.wm.computed_stencil) {
886 /* This fragment shader has no outputs and no side effects. Go ahead
887 * and return the code pointer so we don't accidentally think the
888 * compile failed but zero out prog_data which will set program_size to
889 * zero and disable the stage.
890 */
891 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
892 }
893
894 return code;
895 }
896
897 static VkResult
898 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
899 struct anv_pipeline_cache *cache,
900 const VkGraphicsPipelineCreateInfo *info)
901 {
902 const struct brw_compiler *compiler =
903 pipeline->device->instance->physicalDevice.compiler;
904 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
905
906 pipeline->active_stages = 0;
907
908 VkResult result;
909 for (uint32_t i = 0; i < info->stageCount; i++) {
910 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
911 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
912
913 pipeline->active_stages |= sinfo->stage;
914
915 stages[stage].stage = stage;
916 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
917 stages[stage].entrypoint = sinfo->pName;
918 stages[stage].spec_info = sinfo->pSpecializationInfo;
919 anv_pipeline_hash_shader(stages[stage].module,
920 stages[stage].entrypoint,
921 stage,
922 stages[stage].spec_info,
923 stages[stage].shader_sha1);
924
925 const struct gen_device_info *devinfo = &pipeline->device->info;
926 switch (stage) {
927 case MESA_SHADER_VERTEX:
928 populate_vs_prog_key(devinfo, &stages[stage].key.vs);
929 break;
930 case MESA_SHADER_TESS_CTRL:
931 populate_tcs_prog_key(devinfo,
932 info->pTessellationState->patchControlPoints,
933 &stages[stage].key.tcs);
934 break;
935 case MESA_SHADER_TESS_EVAL:
936 populate_tes_prog_key(devinfo, &stages[stage].key.tes);
937 break;
938 case MESA_SHADER_GEOMETRY:
939 populate_gs_prog_key(devinfo, &stages[stage].key.gs);
940 break;
941 case MESA_SHADER_FRAGMENT:
942 populate_wm_prog_key(devinfo, pipeline->subpass,
943 info->pMultisampleState,
944 &stages[stage].key.wm);
945 break;
946 default:
947 unreachable("Invalid graphics shader stage");
948 }
949 }
950
951 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
952 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
953
954 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
955
956 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
957
958 unsigned char sha1[20];
959 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
960
961 unsigned found = 0;
962 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
963 if (!stages[s].entrypoint)
964 continue;
965
966 stages[s].cache_key.stage = s;
967 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
968
969 struct anv_shader_bin *bin =
970 anv_device_search_for_kernel(pipeline->device, cache,
971 &stages[s].cache_key,
972 sizeof(stages[s].cache_key));
973 if (bin) {
974 found++;
975 pipeline->shaders[s] = bin;
976 }
977 }
978
979 if (found == __builtin_popcount(pipeline->active_stages)) {
980 /* We found all our shaders in the cache. We're done. */
981 goto done;
982 } else if (found > 0) {
983 /* We found some but not all of our shaders. This shouldn't happen
984 * most of the time but it can if we have a partially populated
985 * pipeline cache.
986 */
987 assert(found < __builtin_popcount(pipeline->active_stages));
988
989 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
990 VK_DEBUG_REPORT_WARNING_BIT_EXT |
991 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
992 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
993 (uint64_t)(uintptr_t)cache,
994 0, 0, "anv",
995 "Found a partial pipeline in the cache. This is "
996 "most likely caused by an incomplete pipeline cache "
997 "import or export");
998
999 /* We're going to have to recompile anyway, so just throw away our
1000 * references to the shaders in the cache. We'll get them out of the
1001 * cache again as part of the compilation process.
1002 */
1003 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1004 if (pipeline->shaders[s]) {
1005 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1006 pipeline->shaders[s] = NULL;
1007 }
1008 }
1009 }
1010
1011 void *pipeline_ctx = ralloc_context(NULL);
1012
1013 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1014 if (!stages[s].entrypoint)
1015 continue;
1016
1017 assert(stages[s].stage == s);
1018 assert(pipeline->shaders[s] == NULL);
1019
1020 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1021 .surface_to_descriptor = stages[s].surface_to_descriptor,
1022 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1023 };
1024
1025 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1026 pipeline_ctx,
1027 &stages[s]);
1028 if (stages[s].nir == NULL) {
1029 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1030 goto fail;
1031 }
1032 }
1033
1034 /* Walk backwards to link */
1035 struct anv_pipeline_stage *next_stage = NULL;
1036 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1037 if (!stages[s].entrypoint)
1038 continue;
1039
1040 switch (s) {
1041 case MESA_SHADER_VERTEX:
1042 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1043 break;
1044 case MESA_SHADER_TESS_CTRL:
1045 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1046 break;
1047 case MESA_SHADER_TESS_EVAL:
1048 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1049 break;
1050 case MESA_SHADER_GEOMETRY:
1051 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1052 break;
1053 case MESA_SHADER_FRAGMENT:
1054 anv_pipeline_link_fs(compiler, &stages[s]);
1055 break;
1056 default:
1057 unreachable("Invalid graphics shader stage");
1058 }
1059
1060 next_stage = &stages[s];
1061 }
1062
1063 struct anv_pipeline_stage *prev_stage = NULL;
1064 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1065 if (!stages[s].entrypoint)
1066 continue;
1067
1068 void *stage_ctx = ralloc_context(NULL);
1069
1070 nir_xfb_info *xfb_info = NULL;
1071 if (s == MESA_SHADER_VERTEX ||
1072 s == MESA_SHADER_TESS_EVAL ||
1073 s == MESA_SHADER_GEOMETRY)
1074 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1075
1076 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1077
1078 const unsigned *code;
1079 switch (s) {
1080 case MESA_SHADER_VERTEX:
1081 code = anv_pipeline_compile_vs(compiler, stage_ctx, &stages[s]);
1082 break;
1083 case MESA_SHADER_TESS_CTRL:
1084 code = anv_pipeline_compile_tcs(compiler, stage_ctx,
1085 &stages[s], prev_stage);
1086 break;
1087 case MESA_SHADER_TESS_EVAL:
1088 code = anv_pipeline_compile_tes(compiler, stage_ctx,
1089 &stages[s], prev_stage);
1090 break;
1091 case MESA_SHADER_GEOMETRY:
1092 code = anv_pipeline_compile_gs(compiler, stage_ctx,
1093 &stages[s], prev_stage);
1094 break;
1095 case MESA_SHADER_FRAGMENT:
1096 code = anv_pipeline_compile_fs(compiler, stage_ctx,
1097 &stages[s], prev_stage);
1098 break;
1099 default:
1100 unreachable("Invalid graphics shader stage");
1101 }
1102 if (code == NULL) {
1103 ralloc_free(stage_ctx);
1104 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1105 goto fail;
1106 }
1107
1108 struct anv_shader_bin *bin =
1109 anv_device_upload_kernel(pipeline->device, cache,
1110 &stages[s].cache_key,
1111 sizeof(stages[s].cache_key),
1112 code, stages[s].prog_data.base.program_size,
1113 stages[s].nir->constant_data,
1114 stages[s].nir->constant_data_size,
1115 &stages[s].prog_data.base,
1116 brw_prog_data_size(s),
1117 xfb_info, &stages[s].bind_map);
1118 if (!bin) {
1119 ralloc_free(stage_ctx);
1120 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1121 goto fail;
1122 }
1123
1124 pipeline->shaders[s] = bin;
1125 ralloc_free(stage_ctx);
1126
1127 prev_stage = &stages[s];
1128 }
1129
1130 ralloc_free(pipeline_ctx);
1131
1132 done:
1133
1134 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1135 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1136 /* This can happen if we decided to implicitly disable the fragment
1137 * shader. See anv_pipeline_compile_fs().
1138 */
1139 anv_shader_bin_unref(pipeline->device,
1140 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1141 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1142 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1143 }
1144
1145 return VK_SUCCESS;
1146
1147 fail:
1148 ralloc_free(pipeline_ctx);
1149
1150 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1151 if (pipeline->shaders[s])
1152 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1153 }
1154
1155 return result;
1156 }
1157
1158 VkResult
1159 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1160 struct anv_pipeline_cache *cache,
1161 const VkComputePipelineCreateInfo *info,
1162 const struct anv_shader_module *module,
1163 const char *entrypoint,
1164 const VkSpecializationInfo *spec_info)
1165 {
1166 const struct brw_compiler *compiler =
1167 pipeline->device->instance->physicalDevice.compiler;
1168
1169 struct anv_pipeline_stage stage = {
1170 .stage = MESA_SHADER_COMPUTE,
1171 .module = module,
1172 .entrypoint = entrypoint,
1173 .spec_info = spec_info,
1174 .cache_key = {
1175 .stage = MESA_SHADER_COMPUTE,
1176 }
1177 };
1178 anv_pipeline_hash_shader(stage.module,
1179 stage.entrypoint,
1180 MESA_SHADER_COMPUTE,
1181 stage.spec_info,
1182 stage.shader_sha1);
1183
1184 struct anv_shader_bin *bin = NULL;
1185
1186 populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
1187
1188 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1189
1190 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1191 bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1192 sizeof(stage.cache_key));
1193
1194 if (bin == NULL) {
1195 stage.bind_map = (struct anv_pipeline_bind_map) {
1196 .surface_to_descriptor = stage.surface_to_descriptor,
1197 .sampler_to_descriptor = stage.sampler_to_descriptor
1198 };
1199
1200 /* Set up a binding for the gl_NumWorkGroups */
1201 stage.bind_map.surface_count = 1;
1202 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1203 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1204 };
1205
1206 void *mem_ctx = ralloc_context(NULL);
1207
1208 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1209 if (stage.nir == NULL) {
1210 ralloc_free(mem_ctx);
1211 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1212 }
1213
1214 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1215
1216 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1217 &stage.prog_data.cs);
1218
1219 const unsigned *shader_code =
1220 brw_compile_cs(compiler, NULL, mem_ctx, &stage.key.cs,
1221 &stage.prog_data.cs, stage.nir, -1, NULL);
1222 if (shader_code == NULL) {
1223 ralloc_free(mem_ctx);
1224 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1225 }
1226
1227 const unsigned code_size = stage.prog_data.base.program_size;
1228 bin = anv_device_upload_kernel(pipeline->device, cache,
1229 &stage.cache_key, sizeof(stage.cache_key),
1230 shader_code, code_size,
1231 stage.nir->constant_data,
1232 stage.nir->constant_data_size,
1233 &stage.prog_data.base,
1234 sizeof(stage.prog_data.cs),
1235 NULL, &stage.bind_map);
1236 if (!bin) {
1237 ralloc_free(mem_ctx);
1238 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1239 }
1240
1241 ralloc_free(mem_ctx);
1242 }
1243
1244 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1245 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1246
1247 return VK_SUCCESS;
1248 }
1249
1250 /**
1251 * Copy pipeline state not marked as dynamic.
1252 * Dynamic state is pipeline state which hasn't been provided at pipeline
1253 * creation time, but is dynamically provided afterwards using various
1254 * vkCmdSet* functions.
1255 *
1256 * The set of state considered "non_dynamic" is determined by the pieces of
1257 * state that have their corresponding VkDynamicState enums omitted from
1258 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1259 *
1260 * @param[out] pipeline Destination non_dynamic state.
1261 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1262 */
1263 static void
1264 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1265 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1266 {
1267 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1268 struct anv_subpass *subpass = pipeline->subpass;
1269
1270 pipeline->dynamic_state = default_dynamic_state;
1271
1272 if (pCreateInfo->pDynamicState) {
1273 /* Remove all of the states that are marked as dynamic */
1274 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1275 for (uint32_t s = 0; s < count; s++)
1276 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1277 }
1278
1279 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1280
1281 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1282 *
1283 * pViewportState is [...] NULL if the pipeline
1284 * has rasterization disabled.
1285 */
1286 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1287 assert(pCreateInfo->pViewportState);
1288
1289 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1290 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1291 typed_memcpy(dynamic->viewport.viewports,
1292 pCreateInfo->pViewportState->pViewports,
1293 pCreateInfo->pViewportState->viewportCount);
1294 }
1295
1296 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1297 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1298 typed_memcpy(dynamic->scissor.scissors,
1299 pCreateInfo->pViewportState->pScissors,
1300 pCreateInfo->pViewportState->scissorCount);
1301 }
1302 }
1303
1304 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1305 assert(pCreateInfo->pRasterizationState);
1306 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1307 }
1308
1309 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1310 assert(pCreateInfo->pRasterizationState);
1311 dynamic->depth_bias.bias =
1312 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1313 dynamic->depth_bias.clamp =
1314 pCreateInfo->pRasterizationState->depthBiasClamp;
1315 dynamic->depth_bias.slope =
1316 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1317 }
1318
1319 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1320 *
1321 * pColorBlendState is [...] NULL if the pipeline has rasterization
1322 * disabled or if the subpass of the render pass the pipeline is
1323 * created against does not use any color attachments.
1324 */
1325 bool uses_color_att = false;
1326 for (unsigned i = 0; i < subpass->color_count; ++i) {
1327 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1328 uses_color_att = true;
1329 break;
1330 }
1331 }
1332
1333 if (uses_color_att &&
1334 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1335 assert(pCreateInfo->pColorBlendState);
1336
1337 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1338 typed_memcpy(dynamic->blend_constants,
1339 pCreateInfo->pColorBlendState->blendConstants, 4);
1340 }
1341
1342 /* If there is no depthstencil attachment, then don't read
1343 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1344 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1345 * no need to override the depthstencil defaults in
1346 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1347 *
1348 * Section 9.2 of the Vulkan 1.0.15 spec says:
1349 *
1350 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1351 * disabled or if the subpass of the render pass the pipeline is created
1352 * against does not use a depth/stencil attachment.
1353 */
1354 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1355 subpass->depth_stencil_attachment) {
1356 assert(pCreateInfo->pDepthStencilState);
1357
1358 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1359 dynamic->depth_bounds.min =
1360 pCreateInfo->pDepthStencilState->minDepthBounds;
1361 dynamic->depth_bounds.max =
1362 pCreateInfo->pDepthStencilState->maxDepthBounds;
1363 }
1364
1365 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1366 dynamic->stencil_compare_mask.front =
1367 pCreateInfo->pDepthStencilState->front.compareMask;
1368 dynamic->stencil_compare_mask.back =
1369 pCreateInfo->pDepthStencilState->back.compareMask;
1370 }
1371
1372 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1373 dynamic->stencil_write_mask.front =
1374 pCreateInfo->pDepthStencilState->front.writeMask;
1375 dynamic->stencil_write_mask.back =
1376 pCreateInfo->pDepthStencilState->back.writeMask;
1377 }
1378
1379 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1380 dynamic->stencil_reference.front =
1381 pCreateInfo->pDepthStencilState->front.reference;
1382 dynamic->stencil_reference.back =
1383 pCreateInfo->pDepthStencilState->back.reference;
1384 }
1385 }
1386
1387 pipeline->dynamic_state_mask = states;
1388 }
1389
1390 static void
1391 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1392 {
1393 #ifdef DEBUG
1394 struct anv_render_pass *renderpass = NULL;
1395 struct anv_subpass *subpass = NULL;
1396
1397 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1398 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1399 */
1400 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1401
1402 renderpass = anv_render_pass_from_handle(info->renderPass);
1403 assert(renderpass);
1404
1405 assert(info->subpass < renderpass->subpass_count);
1406 subpass = &renderpass->subpasses[info->subpass];
1407
1408 assert(info->stageCount >= 1);
1409 assert(info->pVertexInputState);
1410 assert(info->pInputAssemblyState);
1411 assert(info->pRasterizationState);
1412 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1413 assert(info->pViewportState);
1414 assert(info->pMultisampleState);
1415
1416 if (subpass && subpass->depth_stencil_attachment)
1417 assert(info->pDepthStencilState);
1418
1419 if (subpass && subpass->color_count > 0) {
1420 bool all_color_unused = true;
1421 for (int i = 0; i < subpass->color_count; i++) {
1422 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1423 all_color_unused = false;
1424 }
1425 /* pColorBlendState is ignored if the pipeline has rasterization
1426 * disabled or if the subpass of the render pass the pipeline is
1427 * created against does not use any color attachments.
1428 */
1429 assert(info->pColorBlendState || all_color_unused);
1430 }
1431 }
1432
1433 for (uint32_t i = 0; i < info->stageCount; ++i) {
1434 switch (info->pStages[i].stage) {
1435 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1436 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1437 assert(info->pTessellationState);
1438 break;
1439 default:
1440 break;
1441 }
1442 }
1443 #endif
1444 }
1445
1446 /**
1447 * Calculate the desired L3 partitioning based on the current state of the
1448 * pipeline. For now this simply returns the conservative defaults calculated
1449 * by get_default_l3_weights(), but we could probably do better by gathering
1450 * more statistics from the pipeline state (e.g. guess of expected URB usage
1451 * and bound surfaces), or by using feed-back from performance counters.
1452 */
1453 void
1454 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1455 {
1456 const struct gen_device_info *devinfo = &pipeline->device->info;
1457
1458 const struct gen_l3_weights w =
1459 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1460
1461 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1462 pipeline->urb.total_size =
1463 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1464 }
1465
1466 VkResult
1467 anv_pipeline_init(struct anv_pipeline *pipeline,
1468 struct anv_device *device,
1469 struct anv_pipeline_cache *cache,
1470 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1471 const VkAllocationCallbacks *alloc)
1472 {
1473 VkResult result;
1474
1475 anv_pipeline_validate_create_info(pCreateInfo);
1476
1477 if (alloc == NULL)
1478 alloc = &device->alloc;
1479
1480 pipeline->device = device;
1481
1482 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1483 assert(pCreateInfo->subpass < render_pass->subpass_count);
1484 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1485
1486 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1487 if (result != VK_SUCCESS)
1488 return result;
1489
1490 pipeline->batch.alloc = alloc;
1491 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1492 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1493 pipeline->batch.relocs = &pipeline->batch_relocs;
1494 pipeline->batch.status = VK_SUCCESS;
1495
1496 copy_non_dynamic_state(pipeline, pCreateInfo);
1497 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1498 pCreateInfo->pRasterizationState->depthClampEnable;
1499
1500 /* Previously we enabled depth clipping when !depthClampEnable.
1501 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1502 * clipping info is available, use its enable value to determine clipping,
1503 * otherwise fallback to the previous !depthClampEnable logic.
1504 */
1505 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1506 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1507 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1508 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1509
1510 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1511 pCreateInfo->pMultisampleState->sampleShadingEnable;
1512
1513 pipeline->needs_data_cache = false;
1514
1515 /* When we free the pipeline, we detect stages based on the NULL status
1516 * of various prog_data pointers. Make them NULL by default.
1517 */
1518 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1519
1520 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1521 if (result != VK_SUCCESS) {
1522 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1523 return result;
1524 }
1525
1526 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1527
1528 anv_pipeline_setup_l3_config(pipeline, false);
1529
1530 const VkPipelineVertexInputStateCreateInfo *vi_info =
1531 pCreateInfo->pVertexInputState;
1532
1533 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1534
1535 pipeline->vb_used = 0;
1536 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1537 const VkVertexInputAttributeDescription *desc =
1538 &vi_info->pVertexAttributeDescriptions[i];
1539
1540 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1541 pipeline->vb_used |= 1 << desc->binding;
1542 }
1543
1544 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1545 const VkVertexInputBindingDescription *desc =
1546 &vi_info->pVertexBindingDescriptions[i];
1547
1548 pipeline->vb[desc->binding].stride = desc->stride;
1549
1550 /* Step rate is programmed per vertex element (attribute), not
1551 * binding. Set up a map of which bindings step per instance, for
1552 * reference by vertex element setup. */
1553 switch (desc->inputRate) {
1554 default:
1555 case VK_VERTEX_INPUT_RATE_VERTEX:
1556 pipeline->vb[desc->binding].instanced = false;
1557 break;
1558 case VK_VERTEX_INPUT_RATE_INSTANCE:
1559 pipeline->vb[desc->binding].instanced = true;
1560 break;
1561 }
1562
1563 pipeline->vb[desc->binding].instance_divisor = 1;
1564 }
1565
1566 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1567 vk_find_struct_const(vi_info->pNext,
1568 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1569 if (vi_div_state) {
1570 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1571 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1572 &vi_div_state->pVertexBindingDivisors[i];
1573
1574 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1575 }
1576 }
1577
1578 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1579 * different views. If the client asks for instancing, we need to multiply
1580 * the instance divisor by the number of views ensure that we repeat the
1581 * client's per-instance data once for each view.
1582 */
1583 if (pipeline->subpass->view_mask) {
1584 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1585 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1586 if (pipeline->vb[vb].instanced)
1587 pipeline->vb[vb].instance_divisor *= view_count;
1588 }
1589 }
1590
1591 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1592 pCreateInfo->pInputAssemblyState;
1593 const VkPipelineTessellationStateCreateInfo *tess_info =
1594 pCreateInfo->pTessellationState;
1595 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1596
1597 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1598 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1599 else
1600 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1601
1602 return VK_SUCCESS;
1603 }