anv: Implement VK_EXT_buffer_device_address
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
33 #include "compiler/brw_nir.h"
34 #include "anv_nir.h"
35 #include "nir/nir_xfb_info.h"
36 #include "spirv/nir_spirv.h"
37 #include "vk_util.h"
38
39 /* Needed for SWIZZLE macros */
40 #include "program/prog_instruction.h"
41
42 // Shader functions
43
44 VkResult anv_CreateShaderModule(
45 VkDevice _device,
46 const VkShaderModuleCreateInfo* pCreateInfo,
47 const VkAllocationCallbacks* pAllocator,
48 VkShaderModule* pShaderModule)
49 {
50 ANV_FROM_HANDLE(anv_device, device, _device);
51 struct anv_shader_module *module;
52
53 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
54 assert(pCreateInfo->flags == 0);
55
56 module = vk_alloc2(&device->alloc, pAllocator,
57 sizeof(*module) + pCreateInfo->codeSize, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
59 if (module == NULL)
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
61
62 module->size = pCreateInfo->codeSize;
63 memcpy(module->data, pCreateInfo->pCode, module->size);
64
65 _mesa_sha1_compute(module->data, module->size, module->sha1);
66
67 *pShaderModule = anv_shader_module_to_handle(module);
68
69 return VK_SUCCESS;
70 }
71
72 void anv_DestroyShaderModule(
73 VkDevice _device,
74 VkShaderModule _module,
75 const VkAllocationCallbacks* pAllocator)
76 {
77 ANV_FROM_HANDLE(anv_device, device, _device);
78 ANV_FROM_HANDLE(anv_shader_module, module, _module);
79
80 if (!module)
81 return;
82
83 vk_free2(&device->alloc, pAllocator, module);
84 }
85
86 #define SPIR_V_MAGIC_NUMBER 0x07230203
87
88 static const uint64_t stage_to_debug[] = {
89 [MESA_SHADER_VERTEX] = DEBUG_VS,
90 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
91 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
92 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
93 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
94 [MESA_SHADER_COMPUTE] = DEBUG_CS,
95 };
96
97 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
98 * we can't do that yet because we don't have the ability to copy nir.
99 */
100 static nir_shader *
101 anv_shader_compile_to_nir(struct anv_device *device,
102 void *mem_ctx,
103 const struct anv_shader_module *module,
104 const char *entrypoint_name,
105 gl_shader_stage stage,
106 const VkSpecializationInfo *spec_info)
107 {
108 const struct anv_physical_device *pdevice =
109 &device->instance->physicalDevice;
110 const struct brw_compiler *compiler = pdevice->compiler;
111 const nir_shader_compiler_options *nir_options =
112 compiler->glsl_compiler_options[stage].NirOptions;
113
114 uint32_t *spirv = (uint32_t *) module->data;
115 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
116 assert(module->size % 4 == 0);
117
118 uint32_t num_spec_entries = 0;
119 struct nir_spirv_specialization *spec_entries = NULL;
120 if (spec_info && spec_info->mapEntryCount > 0) {
121 num_spec_entries = spec_info->mapEntryCount;
122 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
123 for (uint32_t i = 0; i < num_spec_entries; i++) {
124 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
125 const void *data = spec_info->pData + entry.offset;
126 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
127
128 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
129 if (spec_info->dataSize == 8)
130 spec_entries[i].data64 = *(const uint64_t *)data;
131 else
132 spec_entries[i].data32 = *(const uint32_t *)data;
133 }
134 }
135
136 struct spirv_to_nir_options spirv_options = {
137 .lower_workgroup_access_to_offsets = true,
138 .caps = {
139 .device_group = true,
140 .draw_parameters = true,
141 .float64 = pdevice->info.gen >= 8,
142 .geometry_streams = true,
143 .image_write_without_format = true,
144 .int16 = pdevice->info.gen >= 8,
145 .int64 = pdevice->info.gen >= 8,
146 .min_lod = true,
147 .multiview = true,
148 .physical_storage_buffer_address = pdevice->info.gen >= 8 &&
149 pdevice->use_softpin,
150 .post_depth_coverage = pdevice->info.gen >= 9,
151 .shader_viewport_index_layer = true,
152 .stencil_export = pdevice->info.gen >= 9,
153 .storage_8bit = pdevice->info.gen >= 8,
154 .storage_16bit = pdevice->info.gen >= 8,
155 .subgroup_arithmetic = true,
156 .subgroup_basic = true,
157 .subgroup_ballot = true,
158 .subgroup_quad = true,
159 .subgroup_shuffle = true,
160 .subgroup_vote = true,
161 .tessellation = true,
162 .transform_feedback = pdevice->info.gen >= 8,
163 .variable_pointers = true,
164 },
165 .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
166 .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
167 .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
168 .push_const_ptr_type = glsl_uint_type(),
169 .shared_ptr_type = glsl_uint_type(),
170 };
171
172 nir_function *entry_point =
173 spirv_to_nir(spirv, module->size / 4,
174 spec_entries, num_spec_entries,
175 stage, entrypoint_name, &spirv_options, nir_options);
176 nir_shader *nir = entry_point->shader;
177 assert(nir->info.stage == stage);
178 nir_validate_shader(nir, "after spirv_to_nir");
179 ralloc_steal(mem_ctx, nir);
180
181 free(spec_entries);
182
183 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
184 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
185 gl_shader_stage_name(stage));
186 nir_print_shader(nir, stderr);
187 }
188
189 /* We have to lower away local constant initializers right before we
190 * inline functions. That way they get properly initialized at the top
191 * of the function and not at the top of its caller.
192 */
193 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
194 NIR_PASS_V(nir, nir_lower_returns);
195 NIR_PASS_V(nir, nir_inline_functions);
196 NIR_PASS_V(nir, nir_opt_deref);
197
198 /* Pick off the single entrypoint that we want */
199 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
200 if (func != entry_point)
201 exec_node_remove(&func->node);
202 }
203 assert(exec_list_length(&nir->functions) == 1);
204
205 /* Now that we've deleted all but the main function, we can go ahead and
206 * lower the rest of the constant initializers. We do this here so that
207 * nir_remove_dead_variables and split_per_member_structs below see the
208 * corresponding stores.
209 */
210 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
211
212 /* Split member structs. We do this before lower_io_to_temporaries so that
213 * it doesn't lower system values to temporaries by accident.
214 */
215 NIR_PASS_V(nir, nir_split_var_copies);
216 NIR_PASS_V(nir, nir_split_per_member_structs);
217
218 NIR_PASS_V(nir, nir_remove_dead_variables,
219 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
220
221 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo | nir_var_mem_ssbo,
222 nir_address_format_vk_index_offset);
223
224 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
225 nir_address_format_64bit_global);
226
227 NIR_PASS_V(nir, nir_propagate_invariant);
228 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
229 entry_point->impl, true, false);
230
231 /* Vulkan uses the separate-shader linking model */
232 nir->info.separate_shader = true;
233
234 nir = brw_preprocess_nir(compiler, nir);
235
236 return nir;
237 }
238
239 void anv_DestroyPipeline(
240 VkDevice _device,
241 VkPipeline _pipeline,
242 const VkAllocationCallbacks* pAllocator)
243 {
244 ANV_FROM_HANDLE(anv_device, device, _device);
245 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
246
247 if (!pipeline)
248 return;
249
250 anv_reloc_list_finish(&pipeline->batch_relocs,
251 pAllocator ? pAllocator : &device->alloc);
252 if (pipeline->blend_state.map)
253 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
254
255 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
256 if (pipeline->shaders[s])
257 anv_shader_bin_unref(device, pipeline->shaders[s]);
258 }
259
260 vk_free2(&device->alloc, pAllocator, pipeline);
261 }
262
263 static const uint32_t vk_to_gen_primitive_type[] = {
264 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
265 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
266 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
267 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
268 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
269 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
270 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
271 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
272 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
273 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
274 };
275
276 static void
277 populate_sampler_prog_key(const struct gen_device_info *devinfo,
278 struct brw_sampler_prog_key_data *key)
279 {
280 /* Almost all multisampled textures are compressed. The only time when we
281 * don't compress a multisampled texture is for 16x MSAA with a surface
282 * width greater than 8k which is a bit of an edge case. Since the sampler
283 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
284 * to tell the compiler to always assume compression.
285 */
286 key->compressed_multisample_layout_mask = ~0;
287
288 /* SkyLake added support for 16x MSAA. With this came a new message for
289 * reading from a 16x MSAA surface with compression. The new message was
290 * needed because now the MCS data is 64 bits instead of 32 or lower as is
291 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
292 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
293 * so we can just use it unconditionally. This may not be quite as
294 * efficient but it saves us from recompiling.
295 */
296 if (devinfo->gen >= 9)
297 key->msaa_16 = ~0;
298
299 /* XXX: Handle texture swizzle on HSW- */
300 for (int i = 0; i < MAX_SAMPLERS; i++) {
301 /* Assume color sampler, no swizzling. (Works for BDW+) */
302 key->swizzles[i] = SWIZZLE_XYZW;
303 }
304 }
305
306 static void
307 populate_vs_prog_key(const struct gen_device_info *devinfo,
308 struct brw_vs_prog_key *key)
309 {
310 memset(key, 0, sizeof(*key));
311
312 populate_sampler_prog_key(devinfo, &key->tex);
313
314 /* XXX: Handle vertex input work-arounds */
315
316 /* XXX: Handle sampler_prog_key */
317 }
318
319 static void
320 populate_tcs_prog_key(const struct gen_device_info *devinfo,
321 unsigned input_vertices,
322 struct brw_tcs_prog_key *key)
323 {
324 memset(key, 0, sizeof(*key));
325
326 populate_sampler_prog_key(devinfo, &key->tex);
327
328 key->input_vertices = input_vertices;
329 }
330
331 static void
332 populate_tes_prog_key(const struct gen_device_info *devinfo,
333 struct brw_tes_prog_key *key)
334 {
335 memset(key, 0, sizeof(*key));
336
337 populate_sampler_prog_key(devinfo, &key->tex);
338 }
339
340 static void
341 populate_gs_prog_key(const struct gen_device_info *devinfo,
342 struct brw_gs_prog_key *key)
343 {
344 memset(key, 0, sizeof(*key));
345
346 populate_sampler_prog_key(devinfo, &key->tex);
347 }
348
349 static void
350 populate_wm_prog_key(const struct gen_device_info *devinfo,
351 const struct anv_subpass *subpass,
352 const VkPipelineMultisampleStateCreateInfo *ms_info,
353 struct brw_wm_prog_key *key)
354 {
355 memset(key, 0, sizeof(*key));
356
357 populate_sampler_prog_key(devinfo, &key->tex);
358
359 /* We set this to 0 here and set to the actual value before we call
360 * brw_compile_fs.
361 */
362 key->input_slots_valid = 0;
363
364 /* Vulkan doesn't specify a default */
365 key->high_quality_derivatives = false;
366
367 /* XXX Vulkan doesn't appear to specify */
368 key->clamp_fragment_color = false;
369
370 assert(subpass->color_count <= MAX_RTS);
371 for (uint32_t i = 0; i < subpass->color_count; i++) {
372 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
373 key->color_outputs_valid |= (1 << i);
374 }
375
376 key->nr_color_regions = util_bitcount(key->color_outputs_valid);
377
378 key->replicate_alpha = key->nr_color_regions > 1 &&
379 ms_info && ms_info->alphaToCoverageEnable;
380
381 if (ms_info) {
382 /* We should probably pull this out of the shader, but it's fairly
383 * harmless to compute it and then let dead-code take care of it.
384 */
385 if (ms_info->rasterizationSamples > 1) {
386 key->persample_interp =
387 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
388 key->multisample_fbo = true;
389 }
390
391 key->frag_coord_adds_sample_pos = ms_info->sampleShadingEnable;
392 }
393 }
394
395 static void
396 populate_cs_prog_key(const struct gen_device_info *devinfo,
397 struct brw_cs_prog_key *key)
398 {
399 memset(key, 0, sizeof(*key));
400
401 populate_sampler_prog_key(devinfo, &key->tex);
402 }
403
404 struct anv_pipeline_stage {
405 gl_shader_stage stage;
406
407 const struct anv_shader_module *module;
408 const char *entrypoint;
409 const VkSpecializationInfo *spec_info;
410
411 unsigned char shader_sha1[20];
412
413 union brw_any_prog_key key;
414
415 struct {
416 gl_shader_stage stage;
417 unsigned char sha1[20];
418 } cache_key;
419
420 nir_shader *nir;
421
422 struct anv_pipeline_binding surface_to_descriptor[256];
423 struct anv_pipeline_binding sampler_to_descriptor[256];
424 struct anv_pipeline_bind_map bind_map;
425
426 union brw_any_prog_data prog_data;
427 };
428
429 static void
430 anv_pipeline_hash_shader(const struct anv_shader_module *module,
431 const char *entrypoint,
432 gl_shader_stage stage,
433 const VkSpecializationInfo *spec_info,
434 unsigned char *sha1_out)
435 {
436 struct mesa_sha1 ctx;
437 _mesa_sha1_init(&ctx);
438
439 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
440 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
441 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
442 if (spec_info) {
443 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
444 spec_info->mapEntryCount *
445 sizeof(*spec_info->pMapEntries));
446 _mesa_sha1_update(&ctx, spec_info->pData,
447 spec_info->dataSize);
448 }
449
450 _mesa_sha1_final(&ctx, sha1_out);
451 }
452
453 static void
454 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
455 struct anv_pipeline_layout *layout,
456 struct anv_pipeline_stage *stages,
457 unsigned char *sha1_out)
458 {
459 struct mesa_sha1 ctx;
460 _mesa_sha1_init(&ctx);
461
462 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
463 sizeof(pipeline->subpass->view_mask));
464
465 if (layout)
466 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
467
468 const bool rba = pipeline->device->robust_buffer_access;
469 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
470
471 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
472 if (stages[s].entrypoint) {
473 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
474 sizeof(stages[s].shader_sha1));
475 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
476 }
477 }
478
479 _mesa_sha1_final(&ctx, sha1_out);
480 }
481
482 static void
483 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
484 struct anv_pipeline_layout *layout,
485 struct anv_pipeline_stage *stage,
486 unsigned char *sha1_out)
487 {
488 struct mesa_sha1 ctx;
489 _mesa_sha1_init(&ctx);
490
491 if (layout)
492 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
493
494 const bool rba = pipeline->device->robust_buffer_access;
495 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
496
497 _mesa_sha1_update(&ctx, stage->shader_sha1,
498 sizeof(stage->shader_sha1));
499 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
500
501 _mesa_sha1_final(&ctx, sha1_out);
502 }
503
504 static nir_shader *
505 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
506 struct anv_pipeline_cache *cache,
507 void *mem_ctx,
508 struct anv_pipeline_stage *stage)
509 {
510 const struct brw_compiler *compiler =
511 pipeline->device->instance->physicalDevice.compiler;
512 const nir_shader_compiler_options *nir_options =
513 compiler->glsl_compiler_options[stage->stage].NirOptions;
514 nir_shader *nir;
515
516 nir = anv_device_search_for_nir(pipeline->device, cache,
517 nir_options,
518 stage->shader_sha1,
519 mem_ctx);
520 if (nir) {
521 assert(nir->info.stage == stage->stage);
522 return nir;
523 }
524
525 nir = anv_shader_compile_to_nir(pipeline->device,
526 mem_ctx,
527 stage->module,
528 stage->entrypoint,
529 stage->stage,
530 stage->spec_info);
531 if (nir) {
532 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
533 return nir;
534 }
535
536 return NULL;
537 }
538
539 static void
540 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
541 void *mem_ctx,
542 struct anv_pipeline_stage *stage,
543 struct anv_pipeline_layout *layout)
544 {
545 const struct brw_compiler *compiler =
546 pipeline->device->instance->physicalDevice.compiler;
547
548 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
549 nir_shader *nir = stage->nir;
550
551 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
552 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
553 NIR_PASS_V(nir, anv_nir_lower_input_attachments);
554 }
555
556 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
557
558 NIR_PASS_V(nir, anv_nir_lower_push_constants);
559
560 if (nir->info.stage != MESA_SHADER_COMPUTE)
561 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
562
563 if (nir->info.stage == MESA_SHADER_COMPUTE)
564 prog_data->total_shared = nir->num_shared;
565
566 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
567
568 if (nir->num_uniforms > 0) {
569 assert(prog_data->nr_params == 0);
570
571 /* If the shader uses any push constants at all, we'll just give
572 * them the maximum possible number
573 */
574 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
575 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
576 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
577 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
578
579 /* We now set the param values to be offsets into a
580 * anv_push_constant_data structure. Since the compiler doesn't
581 * actually dereference any of the gl_constant_value pointers in the
582 * params array, it doesn't really matter what we put here.
583 */
584 struct anv_push_constants *null_data = NULL;
585 /* Fill out the push constants section of the param array */
586 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
587 prog_data->param[i] = ANV_PARAM_PUSH(
588 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
589 }
590 }
591
592 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
593 pipeline->needs_data_cache = true;
594
595 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
596
597 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
598 if (layout) {
599 anv_nir_apply_pipeline_layout(&pipeline->device->instance->physicalDevice,
600 pipeline->device->robust_buffer_access,
601 layout, nir, prog_data,
602 &stage->bind_map);
603 NIR_PASS_V(nir, nir_opt_constant_folding);
604 }
605
606 if (nir->info.stage != MESA_SHADER_COMPUTE)
607 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
608
609 assert(nir->num_uniforms == prog_data->nr_params * 4);
610
611 stage->nir = nir;
612 }
613
614 static void
615 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
616 {
617 prog_data->binding_table.size_bytes = 0;
618 prog_data->binding_table.texture_start = bias;
619 prog_data->binding_table.gather_texture_start = bias;
620 prog_data->binding_table.ubo_start = bias;
621 prog_data->binding_table.ssbo_start = bias;
622 prog_data->binding_table.image_start = bias;
623 }
624
625 static void
626 anv_pipeline_link_vs(const struct brw_compiler *compiler,
627 struct anv_pipeline_stage *vs_stage,
628 struct anv_pipeline_stage *next_stage)
629 {
630 anv_fill_binding_table(&vs_stage->prog_data.vs.base.base, 0);
631
632 if (next_stage)
633 brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
634 }
635
636 static const unsigned *
637 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
638 void *mem_ctx,
639 struct anv_pipeline_stage *vs_stage)
640 {
641 brw_compute_vue_map(compiler->devinfo,
642 &vs_stage->prog_data.vs.base.vue_map,
643 vs_stage->nir->info.outputs_written,
644 vs_stage->nir->info.separate_shader);
645
646 return brw_compile_vs(compiler, NULL, mem_ctx, &vs_stage->key.vs,
647 &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
648 }
649
650 static void
651 merge_tess_info(struct shader_info *tes_info,
652 const struct shader_info *tcs_info)
653 {
654 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
655 *
656 * "PointMode. Controls generation of points rather than triangles
657 * or lines. This functionality defaults to disabled, and is
658 * enabled if either shader stage includes the execution mode.
659 *
660 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
661 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
662 * and OutputVertices, it says:
663 *
664 * "One mode must be set in at least one of the tessellation
665 * shader stages."
666 *
667 * So, the fields can be set in either the TCS or TES, but they must
668 * agree if set in both. Our backend looks at TES, so bitwise-or in
669 * the values from the TCS.
670 */
671 assert(tcs_info->tess.tcs_vertices_out == 0 ||
672 tes_info->tess.tcs_vertices_out == 0 ||
673 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
674 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
675
676 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
677 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
678 tcs_info->tess.spacing == tes_info->tess.spacing);
679 tes_info->tess.spacing |= tcs_info->tess.spacing;
680
681 assert(tcs_info->tess.primitive_mode == 0 ||
682 tes_info->tess.primitive_mode == 0 ||
683 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
684 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
685 tes_info->tess.ccw |= tcs_info->tess.ccw;
686 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
687 }
688
689 static void
690 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
691 struct anv_pipeline_stage *tcs_stage,
692 struct anv_pipeline_stage *tes_stage)
693 {
694 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
695
696 anv_fill_binding_table(&tcs_stage->prog_data.tcs.base.base, 0);
697
698 brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
699
700 nir_lower_patch_vertices(tes_stage->nir,
701 tcs_stage->nir->info.tess.tcs_vertices_out,
702 NULL);
703
704 /* Copy TCS info into the TES info */
705 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
706
707 anv_fill_binding_table(&tcs_stage->prog_data.tcs.base.base, 0);
708 anv_fill_binding_table(&tes_stage->prog_data.tes.base.base, 0);
709
710 /* Whacking the key after cache lookup is a bit sketchy, but all of
711 * this comes from the SPIR-V, which is part of the hash used for the
712 * pipeline cache. So it should be safe.
713 */
714 tcs_stage->key.tcs.tes_primitive_mode =
715 tes_stage->nir->info.tess.primitive_mode;
716 tcs_stage->key.tcs.quads_workaround =
717 compiler->devinfo->gen < 9 &&
718 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
719 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
720 }
721
722 static const unsigned *
723 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
724 void *mem_ctx,
725 struct anv_pipeline_stage *tcs_stage,
726 struct anv_pipeline_stage *prev_stage)
727 {
728 tcs_stage->key.tcs.outputs_written =
729 tcs_stage->nir->info.outputs_written;
730 tcs_stage->key.tcs.patch_outputs_written =
731 tcs_stage->nir->info.patch_outputs_written;
732
733 return brw_compile_tcs(compiler, NULL, mem_ctx, &tcs_stage->key.tcs,
734 &tcs_stage->prog_data.tcs, tcs_stage->nir,
735 -1, NULL);
736 }
737
738 static void
739 anv_pipeline_link_tes(const struct brw_compiler *compiler,
740 struct anv_pipeline_stage *tes_stage,
741 struct anv_pipeline_stage *next_stage)
742 {
743 anv_fill_binding_table(&tes_stage->prog_data.tes.base.base, 0);
744
745 if (next_stage)
746 brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
747 }
748
749 static const unsigned *
750 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
751 void *mem_ctx,
752 struct anv_pipeline_stage *tes_stage,
753 struct anv_pipeline_stage *tcs_stage)
754 {
755 tes_stage->key.tes.inputs_read =
756 tcs_stage->nir->info.outputs_written;
757 tes_stage->key.tes.patch_inputs_read =
758 tcs_stage->nir->info.patch_outputs_written;
759
760 return brw_compile_tes(compiler, NULL, mem_ctx, &tes_stage->key.tes,
761 &tcs_stage->prog_data.tcs.base.vue_map,
762 &tes_stage->prog_data.tes, tes_stage->nir,
763 NULL, -1, NULL);
764 }
765
766 static void
767 anv_pipeline_link_gs(const struct brw_compiler *compiler,
768 struct anv_pipeline_stage *gs_stage,
769 struct anv_pipeline_stage *next_stage)
770 {
771 anv_fill_binding_table(&gs_stage->prog_data.gs.base.base, 0);
772
773 if (next_stage)
774 brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
775 }
776
777 static const unsigned *
778 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
779 void *mem_ctx,
780 struct anv_pipeline_stage *gs_stage,
781 struct anv_pipeline_stage *prev_stage)
782 {
783 brw_compute_vue_map(compiler->devinfo,
784 &gs_stage->prog_data.gs.base.vue_map,
785 gs_stage->nir->info.outputs_written,
786 gs_stage->nir->info.separate_shader);
787
788 return brw_compile_gs(compiler, NULL, mem_ctx, &gs_stage->key.gs,
789 &gs_stage->prog_data.gs, gs_stage->nir,
790 NULL, -1, NULL);
791 }
792
793 static void
794 anv_pipeline_link_fs(const struct brw_compiler *compiler,
795 struct anv_pipeline_stage *stage)
796 {
797 unsigned num_rts = 0;
798 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
799 struct anv_pipeline_binding rt_bindings[max_rt];
800 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
801 int rt_to_bindings[max_rt];
802 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
803 bool rt_used[max_rt];
804 memset(rt_used, 0, sizeof(rt_used));
805
806 /* Flag used render targets */
807 nir_foreach_variable_safe(var, &stage->nir->outputs) {
808 if (var->data.location < FRAG_RESULT_DATA0)
809 continue;
810
811 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
812 /* Unused or out-of-bounds */
813 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid & (1 << rt)))
814 continue;
815
816 const unsigned array_len =
817 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
818 assert(rt + array_len <= max_rt);
819
820 for (unsigned i = 0; i < array_len; i++)
821 rt_used[rt + i] = true;
822 }
823
824 /* Set new, compacted, location */
825 for (unsigned i = 0; i < max_rt; i++) {
826 if (!rt_used[i])
827 continue;
828
829 rt_to_bindings[i] = num_rts;
830 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
831 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
832 .binding = 0,
833 .index = i,
834 };
835 num_rts++;
836 }
837
838 bool deleted_output = false;
839 nir_foreach_variable_safe(var, &stage->nir->outputs) {
840 if (var->data.location < FRAG_RESULT_DATA0)
841 continue;
842
843 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
844 if (rt >= MAX_RTS ||
845 !(stage->key.wm.color_outputs_valid & (1 << rt))) {
846 /* Unused or out-of-bounds, throw it away */
847 deleted_output = true;
848 var->data.mode = nir_var_function_temp;
849 exec_node_remove(&var->node);
850 exec_list_push_tail(&impl->locals, &var->node);
851 continue;
852 }
853
854 /* Give it the new location */
855 assert(rt_to_bindings[rt] != -1);
856 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
857 }
858
859 if (deleted_output)
860 nir_fixup_deref_modes(stage->nir);
861
862 if (num_rts == 0) {
863 /* If we have no render targets, we need a null render target */
864 rt_bindings[0] = (struct anv_pipeline_binding) {
865 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
866 .binding = 0,
867 .index = UINT32_MAX,
868 };
869 num_rts = 1;
870 }
871
872 /* Now that we've determined the actual number of render targets, adjust
873 * the key accordingly.
874 */
875 stage->key.wm.nr_color_regions = num_rts;
876 stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
877
878 assert(num_rts <= max_rt);
879 assert(stage->bind_map.surface_count == 0);
880 typed_memcpy(stage->bind_map.surface_to_descriptor,
881 rt_bindings, num_rts);
882 stage->bind_map.surface_count += num_rts;
883
884 anv_fill_binding_table(&stage->prog_data.wm.base, 0);
885 }
886
887 static const unsigned *
888 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
889 void *mem_ctx,
890 struct anv_pipeline_stage *fs_stage,
891 struct anv_pipeline_stage *prev_stage)
892 {
893 /* TODO: we could set this to 0 based on the information in nir_shader, but
894 * we need this before we call spirv_to_nir.
895 */
896 assert(prev_stage);
897 fs_stage->key.wm.input_slots_valid =
898 prev_stage->prog_data.vue.vue_map.slots_valid;
899
900 const unsigned *code =
901 brw_compile_fs(compiler, NULL, mem_ctx, &fs_stage->key.wm,
902 &fs_stage->prog_data.wm, fs_stage->nir,
903 NULL, -1, -1, -1, true, false, NULL, NULL);
904
905 if (fs_stage->key.wm.nr_color_regions == 0 &&
906 !fs_stage->prog_data.wm.has_side_effects &&
907 !fs_stage->prog_data.wm.uses_kill &&
908 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
909 !fs_stage->prog_data.wm.computed_stencil) {
910 /* This fragment shader has no outputs and no side effects. Go ahead
911 * and return the code pointer so we don't accidentally think the
912 * compile failed but zero out prog_data which will set program_size to
913 * zero and disable the stage.
914 */
915 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
916 }
917
918 return code;
919 }
920
921 static VkResult
922 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
923 struct anv_pipeline_cache *cache,
924 const VkGraphicsPipelineCreateInfo *info)
925 {
926 const struct brw_compiler *compiler =
927 pipeline->device->instance->physicalDevice.compiler;
928 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
929
930 pipeline->active_stages = 0;
931
932 VkResult result;
933 for (uint32_t i = 0; i < info->stageCount; i++) {
934 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
935 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
936
937 pipeline->active_stages |= sinfo->stage;
938
939 stages[stage].stage = stage;
940 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
941 stages[stage].entrypoint = sinfo->pName;
942 stages[stage].spec_info = sinfo->pSpecializationInfo;
943 anv_pipeline_hash_shader(stages[stage].module,
944 stages[stage].entrypoint,
945 stage,
946 stages[stage].spec_info,
947 stages[stage].shader_sha1);
948
949 const struct gen_device_info *devinfo = &pipeline->device->info;
950 switch (stage) {
951 case MESA_SHADER_VERTEX:
952 populate_vs_prog_key(devinfo, &stages[stage].key.vs);
953 break;
954 case MESA_SHADER_TESS_CTRL:
955 populate_tcs_prog_key(devinfo,
956 info->pTessellationState->patchControlPoints,
957 &stages[stage].key.tcs);
958 break;
959 case MESA_SHADER_TESS_EVAL:
960 populate_tes_prog_key(devinfo, &stages[stage].key.tes);
961 break;
962 case MESA_SHADER_GEOMETRY:
963 populate_gs_prog_key(devinfo, &stages[stage].key.gs);
964 break;
965 case MESA_SHADER_FRAGMENT:
966 populate_wm_prog_key(devinfo, pipeline->subpass,
967 info->pMultisampleState,
968 &stages[stage].key.wm);
969 break;
970 default:
971 unreachable("Invalid graphics shader stage");
972 }
973 }
974
975 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
976 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
977
978 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
979
980 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
981
982 unsigned char sha1[20];
983 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
984
985 unsigned found = 0;
986 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
987 if (!stages[s].entrypoint)
988 continue;
989
990 stages[s].cache_key.stage = s;
991 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
992
993 struct anv_shader_bin *bin =
994 anv_device_search_for_kernel(pipeline->device, cache,
995 &stages[s].cache_key,
996 sizeof(stages[s].cache_key));
997 if (bin) {
998 found++;
999 pipeline->shaders[s] = bin;
1000 }
1001 }
1002
1003 if (found == __builtin_popcount(pipeline->active_stages)) {
1004 /* We found all our shaders in the cache. We're done. */
1005 goto done;
1006 } else if (found > 0) {
1007 /* We found some but not all of our shaders. This shouldn't happen
1008 * most of the time but it can if we have a partially populated
1009 * pipeline cache.
1010 */
1011 assert(found < __builtin_popcount(pipeline->active_stages));
1012
1013 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1014 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1015 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1016 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1017 (uint64_t)(uintptr_t)cache,
1018 0, 0, "anv",
1019 "Found a partial pipeline in the cache. This is "
1020 "most likely caused by an incomplete pipeline cache "
1021 "import or export");
1022
1023 /* We're going to have to recompile anyway, so just throw away our
1024 * references to the shaders in the cache. We'll get them out of the
1025 * cache again as part of the compilation process.
1026 */
1027 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1028 if (pipeline->shaders[s]) {
1029 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1030 pipeline->shaders[s] = NULL;
1031 }
1032 }
1033 }
1034
1035 void *pipeline_ctx = ralloc_context(NULL);
1036
1037 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1038 if (!stages[s].entrypoint)
1039 continue;
1040
1041 assert(stages[s].stage == s);
1042 assert(pipeline->shaders[s] == NULL);
1043
1044 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1045 .surface_to_descriptor = stages[s].surface_to_descriptor,
1046 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1047 };
1048
1049 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1050 pipeline_ctx,
1051 &stages[s]);
1052 if (stages[s].nir == NULL) {
1053 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1054 goto fail;
1055 }
1056 }
1057
1058 /* Walk backwards to link */
1059 struct anv_pipeline_stage *next_stage = NULL;
1060 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1061 if (!stages[s].entrypoint)
1062 continue;
1063
1064 switch (s) {
1065 case MESA_SHADER_VERTEX:
1066 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1067 break;
1068 case MESA_SHADER_TESS_CTRL:
1069 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1070 break;
1071 case MESA_SHADER_TESS_EVAL:
1072 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1073 break;
1074 case MESA_SHADER_GEOMETRY:
1075 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1076 break;
1077 case MESA_SHADER_FRAGMENT:
1078 anv_pipeline_link_fs(compiler, &stages[s]);
1079 break;
1080 default:
1081 unreachable("Invalid graphics shader stage");
1082 }
1083
1084 next_stage = &stages[s];
1085 }
1086
1087 struct anv_pipeline_stage *prev_stage = NULL;
1088 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1089 if (!stages[s].entrypoint)
1090 continue;
1091
1092 void *stage_ctx = ralloc_context(NULL);
1093
1094 nir_xfb_info *xfb_info = NULL;
1095 if (s == MESA_SHADER_VERTEX ||
1096 s == MESA_SHADER_TESS_EVAL ||
1097 s == MESA_SHADER_GEOMETRY)
1098 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1099
1100 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1101
1102 const unsigned *code;
1103 switch (s) {
1104 case MESA_SHADER_VERTEX:
1105 code = anv_pipeline_compile_vs(compiler, stage_ctx, &stages[s]);
1106 break;
1107 case MESA_SHADER_TESS_CTRL:
1108 code = anv_pipeline_compile_tcs(compiler, stage_ctx,
1109 &stages[s], prev_stage);
1110 break;
1111 case MESA_SHADER_TESS_EVAL:
1112 code = anv_pipeline_compile_tes(compiler, stage_ctx,
1113 &stages[s], prev_stage);
1114 break;
1115 case MESA_SHADER_GEOMETRY:
1116 code = anv_pipeline_compile_gs(compiler, stage_ctx,
1117 &stages[s], prev_stage);
1118 break;
1119 case MESA_SHADER_FRAGMENT:
1120 code = anv_pipeline_compile_fs(compiler, stage_ctx,
1121 &stages[s], prev_stage);
1122 break;
1123 default:
1124 unreachable("Invalid graphics shader stage");
1125 }
1126 if (code == NULL) {
1127 ralloc_free(stage_ctx);
1128 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1129 goto fail;
1130 }
1131
1132 struct anv_shader_bin *bin =
1133 anv_device_upload_kernel(pipeline->device, cache,
1134 &stages[s].cache_key,
1135 sizeof(stages[s].cache_key),
1136 code, stages[s].prog_data.base.program_size,
1137 stages[s].nir->constant_data,
1138 stages[s].nir->constant_data_size,
1139 &stages[s].prog_data.base,
1140 brw_prog_data_size(s),
1141 xfb_info, &stages[s].bind_map);
1142 if (!bin) {
1143 ralloc_free(stage_ctx);
1144 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1145 goto fail;
1146 }
1147
1148 pipeline->shaders[s] = bin;
1149 ralloc_free(stage_ctx);
1150
1151 prev_stage = &stages[s];
1152 }
1153
1154 ralloc_free(pipeline_ctx);
1155
1156 done:
1157
1158 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1159 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1160 /* This can happen if we decided to implicitly disable the fragment
1161 * shader. See anv_pipeline_compile_fs().
1162 */
1163 anv_shader_bin_unref(pipeline->device,
1164 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1165 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1166 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1167 }
1168
1169 return VK_SUCCESS;
1170
1171 fail:
1172 ralloc_free(pipeline_ctx);
1173
1174 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1175 if (pipeline->shaders[s])
1176 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1177 }
1178
1179 return result;
1180 }
1181
1182 VkResult
1183 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1184 struct anv_pipeline_cache *cache,
1185 const VkComputePipelineCreateInfo *info,
1186 const struct anv_shader_module *module,
1187 const char *entrypoint,
1188 const VkSpecializationInfo *spec_info)
1189 {
1190 const struct brw_compiler *compiler =
1191 pipeline->device->instance->physicalDevice.compiler;
1192
1193 struct anv_pipeline_stage stage = {
1194 .stage = MESA_SHADER_COMPUTE,
1195 .module = module,
1196 .entrypoint = entrypoint,
1197 .spec_info = spec_info,
1198 .cache_key = {
1199 .stage = MESA_SHADER_COMPUTE,
1200 }
1201 };
1202 anv_pipeline_hash_shader(stage.module,
1203 stage.entrypoint,
1204 MESA_SHADER_COMPUTE,
1205 stage.spec_info,
1206 stage.shader_sha1);
1207
1208 struct anv_shader_bin *bin = NULL;
1209
1210 populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
1211
1212 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1213
1214 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1215 bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1216 sizeof(stage.cache_key));
1217
1218 if (bin == NULL) {
1219 stage.bind_map = (struct anv_pipeline_bind_map) {
1220 .surface_to_descriptor = stage.surface_to_descriptor,
1221 .sampler_to_descriptor = stage.sampler_to_descriptor
1222 };
1223
1224 void *mem_ctx = ralloc_context(NULL);
1225
1226 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1227 if (stage.nir == NULL) {
1228 ralloc_free(mem_ctx);
1229 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1230 }
1231
1232 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1233
1234 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1235 &stage.prog_data.cs);
1236
1237 anv_fill_binding_table(&stage.prog_data.cs.base, 1);
1238
1239 const unsigned *shader_code =
1240 brw_compile_cs(compiler, NULL, mem_ctx, &stage.key.cs,
1241 &stage.prog_data.cs, stage.nir, -1, NULL);
1242 if (shader_code == NULL) {
1243 ralloc_free(mem_ctx);
1244 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1245 }
1246
1247 const unsigned code_size = stage.prog_data.base.program_size;
1248 bin = anv_device_upload_kernel(pipeline->device, cache,
1249 &stage.cache_key, sizeof(stage.cache_key),
1250 shader_code, code_size,
1251 stage.nir->constant_data,
1252 stage.nir->constant_data_size,
1253 &stage.prog_data.base,
1254 sizeof(stage.prog_data.cs),
1255 NULL, &stage.bind_map);
1256 if (!bin) {
1257 ralloc_free(mem_ctx);
1258 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1259 }
1260
1261 ralloc_free(mem_ctx);
1262 }
1263
1264 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1265 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1266
1267 return VK_SUCCESS;
1268 }
1269
1270 /**
1271 * Copy pipeline state not marked as dynamic.
1272 * Dynamic state is pipeline state which hasn't been provided at pipeline
1273 * creation time, but is dynamically provided afterwards using various
1274 * vkCmdSet* functions.
1275 *
1276 * The set of state considered "non_dynamic" is determined by the pieces of
1277 * state that have their corresponding VkDynamicState enums omitted from
1278 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1279 *
1280 * @param[out] pipeline Destination non_dynamic state.
1281 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1282 */
1283 static void
1284 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1285 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1286 {
1287 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1288 struct anv_subpass *subpass = pipeline->subpass;
1289
1290 pipeline->dynamic_state = default_dynamic_state;
1291
1292 if (pCreateInfo->pDynamicState) {
1293 /* Remove all of the states that are marked as dynamic */
1294 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1295 for (uint32_t s = 0; s < count; s++)
1296 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1297 }
1298
1299 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1300
1301 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1302 *
1303 * pViewportState is [...] NULL if the pipeline
1304 * has rasterization disabled.
1305 */
1306 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1307 assert(pCreateInfo->pViewportState);
1308
1309 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1310 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1311 typed_memcpy(dynamic->viewport.viewports,
1312 pCreateInfo->pViewportState->pViewports,
1313 pCreateInfo->pViewportState->viewportCount);
1314 }
1315
1316 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1317 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1318 typed_memcpy(dynamic->scissor.scissors,
1319 pCreateInfo->pViewportState->pScissors,
1320 pCreateInfo->pViewportState->scissorCount);
1321 }
1322 }
1323
1324 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1325 assert(pCreateInfo->pRasterizationState);
1326 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1327 }
1328
1329 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1330 assert(pCreateInfo->pRasterizationState);
1331 dynamic->depth_bias.bias =
1332 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1333 dynamic->depth_bias.clamp =
1334 pCreateInfo->pRasterizationState->depthBiasClamp;
1335 dynamic->depth_bias.slope =
1336 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1337 }
1338
1339 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1340 *
1341 * pColorBlendState is [...] NULL if the pipeline has rasterization
1342 * disabled or if the subpass of the render pass the pipeline is
1343 * created against does not use any color attachments.
1344 */
1345 bool uses_color_att = false;
1346 for (unsigned i = 0; i < subpass->color_count; ++i) {
1347 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1348 uses_color_att = true;
1349 break;
1350 }
1351 }
1352
1353 if (uses_color_att &&
1354 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1355 assert(pCreateInfo->pColorBlendState);
1356
1357 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1358 typed_memcpy(dynamic->blend_constants,
1359 pCreateInfo->pColorBlendState->blendConstants, 4);
1360 }
1361
1362 /* If there is no depthstencil attachment, then don't read
1363 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1364 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1365 * no need to override the depthstencil defaults in
1366 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1367 *
1368 * Section 9.2 of the Vulkan 1.0.15 spec says:
1369 *
1370 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1371 * disabled or if the subpass of the render pass the pipeline is created
1372 * against does not use a depth/stencil attachment.
1373 */
1374 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1375 subpass->depth_stencil_attachment) {
1376 assert(pCreateInfo->pDepthStencilState);
1377
1378 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1379 dynamic->depth_bounds.min =
1380 pCreateInfo->pDepthStencilState->minDepthBounds;
1381 dynamic->depth_bounds.max =
1382 pCreateInfo->pDepthStencilState->maxDepthBounds;
1383 }
1384
1385 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1386 dynamic->stencil_compare_mask.front =
1387 pCreateInfo->pDepthStencilState->front.compareMask;
1388 dynamic->stencil_compare_mask.back =
1389 pCreateInfo->pDepthStencilState->back.compareMask;
1390 }
1391
1392 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1393 dynamic->stencil_write_mask.front =
1394 pCreateInfo->pDepthStencilState->front.writeMask;
1395 dynamic->stencil_write_mask.back =
1396 pCreateInfo->pDepthStencilState->back.writeMask;
1397 }
1398
1399 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1400 dynamic->stencil_reference.front =
1401 pCreateInfo->pDepthStencilState->front.reference;
1402 dynamic->stencil_reference.back =
1403 pCreateInfo->pDepthStencilState->back.reference;
1404 }
1405 }
1406
1407 pipeline->dynamic_state_mask = states;
1408 }
1409
1410 static void
1411 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1412 {
1413 #ifdef DEBUG
1414 struct anv_render_pass *renderpass = NULL;
1415 struct anv_subpass *subpass = NULL;
1416
1417 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1418 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1419 */
1420 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1421
1422 renderpass = anv_render_pass_from_handle(info->renderPass);
1423 assert(renderpass);
1424
1425 assert(info->subpass < renderpass->subpass_count);
1426 subpass = &renderpass->subpasses[info->subpass];
1427
1428 assert(info->stageCount >= 1);
1429 assert(info->pVertexInputState);
1430 assert(info->pInputAssemblyState);
1431 assert(info->pRasterizationState);
1432 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1433 assert(info->pViewportState);
1434 assert(info->pMultisampleState);
1435
1436 if (subpass && subpass->depth_stencil_attachment)
1437 assert(info->pDepthStencilState);
1438
1439 if (subpass && subpass->color_count > 0) {
1440 bool all_color_unused = true;
1441 for (int i = 0; i < subpass->color_count; i++) {
1442 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1443 all_color_unused = false;
1444 }
1445 /* pColorBlendState is ignored if the pipeline has rasterization
1446 * disabled or if the subpass of the render pass the pipeline is
1447 * created against does not use any color attachments.
1448 */
1449 assert(info->pColorBlendState || all_color_unused);
1450 }
1451 }
1452
1453 for (uint32_t i = 0; i < info->stageCount; ++i) {
1454 switch (info->pStages[i].stage) {
1455 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1456 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1457 assert(info->pTessellationState);
1458 break;
1459 default:
1460 break;
1461 }
1462 }
1463 #endif
1464 }
1465
1466 /**
1467 * Calculate the desired L3 partitioning based on the current state of the
1468 * pipeline. For now this simply returns the conservative defaults calculated
1469 * by get_default_l3_weights(), but we could probably do better by gathering
1470 * more statistics from the pipeline state (e.g. guess of expected URB usage
1471 * and bound surfaces), or by using feed-back from performance counters.
1472 */
1473 void
1474 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1475 {
1476 const struct gen_device_info *devinfo = &pipeline->device->info;
1477
1478 const struct gen_l3_weights w =
1479 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1480
1481 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1482 pipeline->urb.total_size =
1483 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1484 }
1485
1486 VkResult
1487 anv_pipeline_init(struct anv_pipeline *pipeline,
1488 struct anv_device *device,
1489 struct anv_pipeline_cache *cache,
1490 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1491 const VkAllocationCallbacks *alloc)
1492 {
1493 VkResult result;
1494
1495 anv_pipeline_validate_create_info(pCreateInfo);
1496
1497 if (alloc == NULL)
1498 alloc = &device->alloc;
1499
1500 pipeline->device = device;
1501
1502 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1503 assert(pCreateInfo->subpass < render_pass->subpass_count);
1504 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1505
1506 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1507 if (result != VK_SUCCESS)
1508 return result;
1509
1510 pipeline->batch.alloc = alloc;
1511 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1512 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1513 pipeline->batch.relocs = &pipeline->batch_relocs;
1514 pipeline->batch.status = VK_SUCCESS;
1515
1516 copy_non_dynamic_state(pipeline, pCreateInfo);
1517 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1518 pCreateInfo->pRasterizationState->depthClampEnable;
1519
1520 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1521 pCreateInfo->pMultisampleState->sampleShadingEnable;
1522
1523 pipeline->needs_data_cache = false;
1524
1525 /* When we free the pipeline, we detect stages based on the NULL status
1526 * of various prog_data pointers. Make them NULL by default.
1527 */
1528 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1529
1530 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1531 if (result != VK_SUCCESS) {
1532 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1533 return result;
1534 }
1535
1536 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1537
1538 anv_pipeline_setup_l3_config(pipeline, false);
1539
1540 const VkPipelineVertexInputStateCreateInfo *vi_info =
1541 pCreateInfo->pVertexInputState;
1542
1543 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1544
1545 pipeline->vb_used = 0;
1546 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1547 const VkVertexInputAttributeDescription *desc =
1548 &vi_info->pVertexAttributeDescriptions[i];
1549
1550 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1551 pipeline->vb_used |= 1 << desc->binding;
1552 }
1553
1554 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1555 const VkVertexInputBindingDescription *desc =
1556 &vi_info->pVertexBindingDescriptions[i];
1557
1558 pipeline->vb[desc->binding].stride = desc->stride;
1559
1560 /* Step rate is programmed per vertex element (attribute), not
1561 * binding. Set up a map of which bindings step per instance, for
1562 * reference by vertex element setup. */
1563 switch (desc->inputRate) {
1564 default:
1565 case VK_VERTEX_INPUT_RATE_VERTEX:
1566 pipeline->vb[desc->binding].instanced = false;
1567 break;
1568 case VK_VERTEX_INPUT_RATE_INSTANCE:
1569 pipeline->vb[desc->binding].instanced = true;
1570 break;
1571 }
1572
1573 pipeline->vb[desc->binding].instance_divisor = 1;
1574 }
1575
1576 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1577 vk_find_struct_const(vi_info->pNext,
1578 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1579 if (vi_div_state) {
1580 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1581 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1582 &vi_div_state->pVertexBindingDivisors[i];
1583
1584 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1585 }
1586 }
1587
1588 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1589 * different views. If the client asks for instancing, we need to multiply
1590 * the instance divisor by the number of views ensure that we repeat the
1591 * client's per-instance data once for each view.
1592 */
1593 if (pipeline->subpass->view_mask) {
1594 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1595 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1596 if (pipeline->vb[vb].instanced)
1597 pipeline->vb[vb].instance_divisor *= view_count;
1598 }
1599 }
1600
1601 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1602 pCreateInfo->pInputAssemblyState;
1603 const VkPipelineTessellationStateCreateInfo *tess_info =
1604 pCreateInfo->pTessellationState;
1605 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1606
1607 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1608 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1609 else
1610 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1611
1612 return VK_SUCCESS;
1613 }