spirv,nir: lower frexp_exp/frexp_sig inside a new NIR pass
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "util/os_time.h"
32 #include "common/gen_l3_config.h"
33 #include "anv_private.h"
34 #include "compiler/brw_nir.h"
35 #include "anv_nir.h"
36 #include "nir/nir_xfb_info.h"
37 #include "spirv/nir_spirv.h"
38 #include "vk_util.h"
39
40 /* Needed for SWIZZLE macros */
41 #include "program/prog_instruction.h"
42
43 // Shader functions
44
45 VkResult anv_CreateShaderModule(
46 VkDevice _device,
47 const VkShaderModuleCreateInfo* pCreateInfo,
48 const VkAllocationCallbacks* pAllocator,
49 VkShaderModule* pShaderModule)
50 {
51 ANV_FROM_HANDLE(anv_device, device, _device);
52 struct anv_shader_module *module;
53
54 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
55 assert(pCreateInfo->flags == 0);
56
57 module = vk_alloc2(&device->alloc, pAllocator,
58 sizeof(*module) + pCreateInfo->codeSize, 8,
59 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
60 if (module == NULL)
61 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
62
63 module->size = pCreateInfo->codeSize;
64 memcpy(module->data, pCreateInfo->pCode, module->size);
65
66 _mesa_sha1_compute(module->data, module->size, module->sha1);
67
68 *pShaderModule = anv_shader_module_to_handle(module);
69
70 return VK_SUCCESS;
71 }
72
73 void anv_DestroyShaderModule(
74 VkDevice _device,
75 VkShaderModule _module,
76 const VkAllocationCallbacks* pAllocator)
77 {
78 ANV_FROM_HANDLE(anv_device, device, _device);
79 ANV_FROM_HANDLE(anv_shader_module, module, _module);
80
81 if (!module)
82 return;
83
84 vk_free2(&device->alloc, pAllocator, module);
85 }
86
87 #define SPIR_V_MAGIC_NUMBER 0x07230203
88
89 static const uint64_t stage_to_debug[] = {
90 [MESA_SHADER_VERTEX] = DEBUG_VS,
91 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
92 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
93 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
94 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
95 [MESA_SHADER_COMPUTE] = DEBUG_CS,
96 };
97
98 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
99 * we can't do that yet because we don't have the ability to copy nir.
100 */
101 static nir_shader *
102 anv_shader_compile_to_nir(struct anv_device *device,
103 void *mem_ctx,
104 const struct anv_shader_module *module,
105 const char *entrypoint_name,
106 gl_shader_stage stage,
107 const VkSpecializationInfo *spec_info)
108 {
109 const struct anv_physical_device *pdevice =
110 &device->instance->physicalDevice;
111 const struct brw_compiler *compiler = pdevice->compiler;
112 const nir_shader_compiler_options *nir_options =
113 compiler->glsl_compiler_options[stage].NirOptions;
114
115 uint32_t *spirv = (uint32_t *) module->data;
116 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
117 assert(module->size % 4 == 0);
118
119 uint32_t num_spec_entries = 0;
120 struct nir_spirv_specialization *spec_entries = NULL;
121 if (spec_info && spec_info->mapEntryCount > 0) {
122 num_spec_entries = spec_info->mapEntryCount;
123 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
124 for (uint32_t i = 0; i < num_spec_entries; i++) {
125 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
126 const void *data = spec_info->pData + entry.offset;
127 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
128
129 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
130 if (spec_info->dataSize == 8)
131 spec_entries[i].data64 = *(const uint64_t *)data;
132 else
133 spec_entries[i].data32 = *(const uint32_t *)data;
134 }
135 }
136
137 struct spirv_to_nir_options spirv_options = {
138 .lower_workgroup_access_to_offsets = true,
139 .caps = {
140 .device_group = true,
141 .draw_parameters = true,
142 .float64 = pdevice->info.gen >= 8,
143 .geometry_streams = true,
144 .image_write_without_format = true,
145 .int16 = pdevice->info.gen >= 8,
146 .int64 = pdevice->info.gen >= 8,
147 .min_lod = true,
148 .multiview = true,
149 .physical_storage_buffer_address = pdevice->info.gen >= 8 &&
150 pdevice->use_softpin,
151 .post_depth_coverage = pdevice->info.gen >= 9,
152 .shader_viewport_index_layer = true,
153 .stencil_export = pdevice->info.gen >= 9,
154 .storage_8bit = pdevice->info.gen >= 8,
155 .storage_16bit = pdevice->info.gen >= 8,
156 .subgroup_arithmetic = true,
157 .subgroup_basic = true,
158 .subgroup_ballot = true,
159 .subgroup_quad = true,
160 .subgroup_shuffle = true,
161 .subgroup_vote = true,
162 .tessellation = true,
163 .transform_feedback = pdevice->info.gen >= 8,
164 .variable_pointers = true,
165 },
166 .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
167 .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
168 .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
169 .push_const_ptr_type = glsl_uint_type(),
170 .shared_ptr_type = glsl_uint_type(),
171 };
172
173 nir_function *entry_point =
174 spirv_to_nir(spirv, module->size / 4,
175 spec_entries, num_spec_entries,
176 stage, entrypoint_name, &spirv_options, nir_options);
177 nir_shader *nir = entry_point->shader;
178 assert(nir->info.stage == stage);
179 nir_validate_shader(nir, "after spirv_to_nir");
180 ralloc_steal(mem_ctx, nir);
181
182 free(spec_entries);
183
184 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
185 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
186 gl_shader_stage_name(stage));
187 nir_print_shader(nir, stderr);
188 }
189
190 /* We have to lower away local constant initializers right before we
191 * inline functions. That way they get properly initialized at the top
192 * of the function and not at the top of its caller.
193 */
194 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
195 NIR_PASS_V(nir, nir_lower_returns);
196 NIR_PASS_V(nir, nir_inline_functions);
197 NIR_PASS_V(nir, nir_opt_deref);
198
199 /* Pick off the single entrypoint that we want */
200 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
201 if (func != entry_point)
202 exec_node_remove(&func->node);
203 }
204 assert(exec_list_length(&nir->functions) == 1);
205
206 /* Now that we've deleted all but the main function, we can go ahead and
207 * lower the rest of the constant initializers. We do this here so that
208 * nir_remove_dead_variables and split_per_member_structs below see the
209 * corresponding stores.
210 */
211 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
212
213 /* Split member structs. We do this before lower_io_to_temporaries so that
214 * it doesn't lower system values to temporaries by accident.
215 */
216 NIR_PASS_V(nir, nir_split_var_copies);
217 NIR_PASS_V(nir, nir_split_per_member_structs);
218
219 NIR_PASS_V(nir, nir_remove_dead_variables,
220 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
221
222 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
223 nir_address_format_64bit_global);
224
225 NIR_PASS_V(nir, nir_propagate_invariant);
226 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
227 entry_point->impl, true, false);
228
229 NIR_PASS_V(nir, nir_lower_frexp);
230
231 /* Vulkan uses the separate-shader linking model */
232 nir->info.separate_shader = true;
233
234 nir = brw_preprocess_nir(compiler, nir, NULL);
235
236 return nir;
237 }
238
239 void anv_DestroyPipeline(
240 VkDevice _device,
241 VkPipeline _pipeline,
242 const VkAllocationCallbacks* pAllocator)
243 {
244 ANV_FROM_HANDLE(anv_device, device, _device);
245 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
246
247 if (!pipeline)
248 return;
249
250 anv_reloc_list_finish(&pipeline->batch_relocs,
251 pAllocator ? pAllocator : &device->alloc);
252 if (pipeline->blend_state.map)
253 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
254
255 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
256 if (pipeline->shaders[s])
257 anv_shader_bin_unref(device, pipeline->shaders[s]);
258 }
259
260 vk_free2(&device->alloc, pAllocator, pipeline);
261 }
262
263 static const uint32_t vk_to_gen_primitive_type[] = {
264 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
265 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
266 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
267 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
268 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
269 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
270 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
271 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
272 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
273 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
274 };
275
276 static void
277 populate_sampler_prog_key(const struct gen_device_info *devinfo,
278 struct brw_sampler_prog_key_data *key)
279 {
280 /* Almost all multisampled textures are compressed. The only time when we
281 * don't compress a multisampled texture is for 16x MSAA with a surface
282 * width greater than 8k which is a bit of an edge case. Since the sampler
283 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
284 * to tell the compiler to always assume compression.
285 */
286 key->compressed_multisample_layout_mask = ~0;
287
288 /* SkyLake added support for 16x MSAA. With this came a new message for
289 * reading from a 16x MSAA surface with compression. The new message was
290 * needed because now the MCS data is 64 bits instead of 32 or lower as is
291 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
292 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
293 * so we can just use it unconditionally. This may not be quite as
294 * efficient but it saves us from recompiling.
295 */
296 if (devinfo->gen >= 9)
297 key->msaa_16 = ~0;
298
299 /* XXX: Handle texture swizzle on HSW- */
300 for (int i = 0; i < MAX_SAMPLERS; i++) {
301 /* Assume color sampler, no swizzling. (Works for BDW+) */
302 key->swizzles[i] = SWIZZLE_XYZW;
303 }
304 }
305
306 static void
307 populate_vs_prog_key(const struct gen_device_info *devinfo,
308 struct brw_vs_prog_key *key)
309 {
310 memset(key, 0, sizeof(*key));
311
312 populate_sampler_prog_key(devinfo, &key->tex);
313
314 /* XXX: Handle vertex input work-arounds */
315
316 /* XXX: Handle sampler_prog_key */
317 }
318
319 static void
320 populate_tcs_prog_key(const struct gen_device_info *devinfo,
321 unsigned input_vertices,
322 struct brw_tcs_prog_key *key)
323 {
324 memset(key, 0, sizeof(*key));
325
326 populate_sampler_prog_key(devinfo, &key->tex);
327
328 key->input_vertices = input_vertices;
329 }
330
331 static void
332 populate_tes_prog_key(const struct gen_device_info *devinfo,
333 struct brw_tes_prog_key *key)
334 {
335 memset(key, 0, sizeof(*key));
336
337 populate_sampler_prog_key(devinfo, &key->tex);
338 }
339
340 static void
341 populate_gs_prog_key(const struct gen_device_info *devinfo,
342 struct brw_gs_prog_key *key)
343 {
344 memset(key, 0, sizeof(*key));
345
346 populate_sampler_prog_key(devinfo, &key->tex);
347 }
348
349 static void
350 populate_wm_prog_key(const struct gen_device_info *devinfo,
351 const struct anv_subpass *subpass,
352 const VkPipelineMultisampleStateCreateInfo *ms_info,
353 struct brw_wm_prog_key *key)
354 {
355 memset(key, 0, sizeof(*key));
356
357 populate_sampler_prog_key(devinfo, &key->tex);
358
359 /* We set this to 0 here and set to the actual value before we call
360 * brw_compile_fs.
361 */
362 key->input_slots_valid = 0;
363
364 /* Vulkan doesn't specify a default */
365 key->high_quality_derivatives = false;
366
367 /* XXX Vulkan doesn't appear to specify */
368 key->clamp_fragment_color = false;
369
370 assert(subpass->color_count <= MAX_RTS);
371 for (uint32_t i = 0; i < subpass->color_count; i++) {
372 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
373 key->color_outputs_valid |= (1 << i);
374 }
375
376 key->nr_color_regions = util_bitcount(key->color_outputs_valid);
377
378 key->replicate_alpha = key->nr_color_regions > 1 &&
379 ms_info && ms_info->alphaToCoverageEnable;
380
381 if (ms_info) {
382 /* We should probably pull this out of the shader, but it's fairly
383 * harmless to compute it and then let dead-code take care of it.
384 */
385 if (ms_info->rasterizationSamples > 1) {
386 key->persample_interp =
387 (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
388 key->multisample_fbo = true;
389 }
390
391 key->frag_coord_adds_sample_pos = ms_info->sampleShadingEnable;
392 }
393 }
394
395 static void
396 populate_cs_prog_key(const struct gen_device_info *devinfo,
397 struct brw_cs_prog_key *key)
398 {
399 memset(key, 0, sizeof(*key));
400
401 populate_sampler_prog_key(devinfo, &key->tex);
402 }
403
404 struct anv_pipeline_stage {
405 gl_shader_stage stage;
406
407 const struct anv_shader_module *module;
408 const char *entrypoint;
409 const VkSpecializationInfo *spec_info;
410
411 unsigned char shader_sha1[20];
412
413 union brw_any_prog_key key;
414
415 struct {
416 gl_shader_stage stage;
417 unsigned char sha1[20];
418 } cache_key;
419
420 nir_shader *nir;
421
422 struct anv_pipeline_binding surface_to_descriptor[256];
423 struct anv_pipeline_binding sampler_to_descriptor[256];
424 struct anv_pipeline_bind_map bind_map;
425
426 union brw_any_prog_data prog_data;
427
428 VkPipelineCreationFeedbackEXT feedback;
429 };
430
431 static void
432 anv_pipeline_hash_shader(const struct anv_shader_module *module,
433 const char *entrypoint,
434 gl_shader_stage stage,
435 const VkSpecializationInfo *spec_info,
436 unsigned char *sha1_out)
437 {
438 struct mesa_sha1 ctx;
439 _mesa_sha1_init(&ctx);
440
441 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
442 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
443 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
444 if (spec_info) {
445 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
446 spec_info->mapEntryCount *
447 sizeof(*spec_info->pMapEntries));
448 _mesa_sha1_update(&ctx, spec_info->pData,
449 spec_info->dataSize);
450 }
451
452 _mesa_sha1_final(&ctx, sha1_out);
453 }
454
455 static void
456 anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
457 struct anv_pipeline_layout *layout,
458 struct anv_pipeline_stage *stages,
459 unsigned char *sha1_out)
460 {
461 struct mesa_sha1 ctx;
462 _mesa_sha1_init(&ctx);
463
464 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
465 sizeof(pipeline->subpass->view_mask));
466
467 if (layout)
468 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
469
470 const bool rba = pipeline->device->robust_buffer_access;
471 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
472
473 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
474 if (stages[s].entrypoint) {
475 _mesa_sha1_update(&ctx, stages[s].shader_sha1,
476 sizeof(stages[s].shader_sha1));
477 _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
478 }
479 }
480
481 _mesa_sha1_final(&ctx, sha1_out);
482 }
483
484 static void
485 anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
486 struct anv_pipeline_layout *layout,
487 struct anv_pipeline_stage *stage,
488 unsigned char *sha1_out)
489 {
490 struct mesa_sha1 ctx;
491 _mesa_sha1_init(&ctx);
492
493 if (layout)
494 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
495
496 const bool rba = pipeline->device->robust_buffer_access;
497 _mesa_sha1_update(&ctx, &rba, sizeof(rba));
498
499 _mesa_sha1_update(&ctx, stage->shader_sha1,
500 sizeof(stage->shader_sha1));
501 _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
502
503 _mesa_sha1_final(&ctx, sha1_out);
504 }
505
506 static nir_shader *
507 anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
508 struct anv_pipeline_cache *cache,
509 void *mem_ctx,
510 struct anv_pipeline_stage *stage)
511 {
512 const struct brw_compiler *compiler =
513 pipeline->device->instance->physicalDevice.compiler;
514 const nir_shader_compiler_options *nir_options =
515 compiler->glsl_compiler_options[stage->stage].NirOptions;
516 nir_shader *nir;
517
518 nir = anv_device_search_for_nir(pipeline->device, cache,
519 nir_options,
520 stage->shader_sha1,
521 mem_ctx);
522 if (nir) {
523 assert(nir->info.stage == stage->stage);
524 return nir;
525 }
526
527 nir = anv_shader_compile_to_nir(pipeline->device,
528 mem_ctx,
529 stage->module,
530 stage->entrypoint,
531 stage->stage,
532 stage->spec_info);
533 if (nir) {
534 anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
535 return nir;
536 }
537
538 return NULL;
539 }
540
541 static void
542 anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
543 void *mem_ctx,
544 struct anv_pipeline_stage *stage,
545 struct anv_pipeline_layout *layout)
546 {
547 const struct brw_compiler *compiler =
548 pipeline->device->instance->physicalDevice.compiler;
549
550 struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
551 nir_shader *nir = stage->nir;
552
553 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
554 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
555 NIR_PASS_V(nir, anv_nir_lower_input_attachments);
556 }
557
558 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
559
560 NIR_PASS_V(nir, anv_nir_lower_push_constants);
561
562 if (nir->info.stage != MESA_SHADER_COMPUTE)
563 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
564
565 if (nir->info.stage == MESA_SHADER_COMPUTE)
566 prog_data->total_shared = nir->num_shared;
567
568 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
569
570 if (nir->num_uniforms > 0) {
571 assert(prog_data->nr_params == 0);
572
573 /* If the shader uses any push constants at all, we'll just give
574 * them the maximum possible number
575 */
576 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
577 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
578 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
579 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
580
581 /* We now set the param values to be offsets into a
582 * anv_push_constant_data structure. Since the compiler doesn't
583 * actually dereference any of the gl_constant_value pointers in the
584 * params array, it doesn't really matter what we put here.
585 */
586 struct anv_push_constants *null_data = NULL;
587 /* Fill out the push constants section of the param array */
588 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
589 prog_data->param[i] = ANV_PARAM_PUSH(
590 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
591 }
592 }
593
594 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
595 pipeline->needs_data_cache = true;
596
597 NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
598
599 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
600 if (layout) {
601 anv_nir_apply_pipeline_layout(&pipeline->device->instance->physicalDevice,
602 pipeline->device->robust_buffer_access,
603 layout, nir, prog_data,
604 &stage->bind_map);
605
606 NIR_PASS_V(nir, nir_lower_explicit_io,
607 nir_var_mem_ubo | nir_var_mem_ssbo,
608 nir_address_format_32bit_index_offset);
609
610 NIR_PASS_V(nir, nir_opt_constant_folding);
611 }
612
613 if (nir->info.stage != MESA_SHADER_COMPUTE)
614 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
615
616 assert(nir->num_uniforms == prog_data->nr_params * 4);
617
618 stage->nir = nir;
619 }
620
621 static void
622 anv_pipeline_link_vs(const struct brw_compiler *compiler,
623 struct anv_pipeline_stage *vs_stage,
624 struct anv_pipeline_stage *next_stage)
625 {
626 if (next_stage)
627 brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
628 }
629
630 static const unsigned *
631 anv_pipeline_compile_vs(const struct brw_compiler *compiler,
632 void *mem_ctx,
633 struct anv_device *device,
634 struct anv_pipeline_stage *vs_stage)
635 {
636 brw_compute_vue_map(compiler->devinfo,
637 &vs_stage->prog_data.vs.base.vue_map,
638 vs_stage->nir->info.outputs_written,
639 vs_stage->nir->info.separate_shader);
640
641 return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
642 &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
643 }
644
645 static void
646 merge_tess_info(struct shader_info *tes_info,
647 const struct shader_info *tcs_info)
648 {
649 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
650 *
651 * "PointMode. Controls generation of points rather than triangles
652 * or lines. This functionality defaults to disabled, and is
653 * enabled if either shader stage includes the execution mode.
654 *
655 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
656 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
657 * and OutputVertices, it says:
658 *
659 * "One mode must be set in at least one of the tessellation
660 * shader stages."
661 *
662 * So, the fields can be set in either the TCS or TES, but they must
663 * agree if set in both. Our backend looks at TES, so bitwise-or in
664 * the values from the TCS.
665 */
666 assert(tcs_info->tess.tcs_vertices_out == 0 ||
667 tes_info->tess.tcs_vertices_out == 0 ||
668 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
669 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
670
671 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
672 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
673 tcs_info->tess.spacing == tes_info->tess.spacing);
674 tes_info->tess.spacing |= tcs_info->tess.spacing;
675
676 assert(tcs_info->tess.primitive_mode == 0 ||
677 tes_info->tess.primitive_mode == 0 ||
678 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
679 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
680 tes_info->tess.ccw |= tcs_info->tess.ccw;
681 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
682 }
683
684 static void
685 anv_pipeline_link_tcs(const struct brw_compiler *compiler,
686 struct anv_pipeline_stage *tcs_stage,
687 struct anv_pipeline_stage *tes_stage)
688 {
689 assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
690
691 brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
692
693 nir_lower_patch_vertices(tes_stage->nir,
694 tcs_stage->nir->info.tess.tcs_vertices_out,
695 NULL);
696
697 /* Copy TCS info into the TES info */
698 merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
699
700 /* Whacking the key after cache lookup is a bit sketchy, but all of
701 * this comes from the SPIR-V, which is part of the hash used for the
702 * pipeline cache. So it should be safe.
703 */
704 tcs_stage->key.tcs.tes_primitive_mode =
705 tes_stage->nir->info.tess.primitive_mode;
706 tcs_stage->key.tcs.quads_workaround =
707 compiler->devinfo->gen < 9 &&
708 tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
709 tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
710 }
711
712 static const unsigned *
713 anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
714 void *mem_ctx,
715 struct anv_device *device,
716 struct anv_pipeline_stage *tcs_stage,
717 struct anv_pipeline_stage *prev_stage)
718 {
719 tcs_stage->key.tcs.outputs_written =
720 tcs_stage->nir->info.outputs_written;
721 tcs_stage->key.tcs.patch_outputs_written =
722 tcs_stage->nir->info.patch_outputs_written;
723
724 return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
725 &tcs_stage->prog_data.tcs, tcs_stage->nir,
726 -1, NULL);
727 }
728
729 static void
730 anv_pipeline_link_tes(const struct brw_compiler *compiler,
731 struct anv_pipeline_stage *tes_stage,
732 struct anv_pipeline_stage *next_stage)
733 {
734 if (next_stage)
735 brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
736 }
737
738 static const unsigned *
739 anv_pipeline_compile_tes(const struct brw_compiler *compiler,
740 void *mem_ctx,
741 struct anv_device *device,
742 struct anv_pipeline_stage *tes_stage,
743 struct anv_pipeline_stage *tcs_stage)
744 {
745 tes_stage->key.tes.inputs_read =
746 tcs_stage->nir->info.outputs_written;
747 tes_stage->key.tes.patch_inputs_read =
748 tcs_stage->nir->info.patch_outputs_written;
749
750 return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
751 &tcs_stage->prog_data.tcs.base.vue_map,
752 &tes_stage->prog_data.tes, tes_stage->nir,
753 NULL, -1, NULL);
754 }
755
756 static void
757 anv_pipeline_link_gs(const struct brw_compiler *compiler,
758 struct anv_pipeline_stage *gs_stage,
759 struct anv_pipeline_stage *next_stage)
760 {
761 if (next_stage)
762 brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
763 }
764
765 static const unsigned *
766 anv_pipeline_compile_gs(const struct brw_compiler *compiler,
767 void *mem_ctx,
768 struct anv_device *device,
769 struct anv_pipeline_stage *gs_stage,
770 struct anv_pipeline_stage *prev_stage)
771 {
772 brw_compute_vue_map(compiler->devinfo,
773 &gs_stage->prog_data.gs.base.vue_map,
774 gs_stage->nir->info.outputs_written,
775 gs_stage->nir->info.separate_shader);
776
777 return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
778 &gs_stage->prog_data.gs, gs_stage->nir,
779 NULL, -1, NULL);
780 }
781
782 static void
783 anv_pipeline_link_fs(const struct brw_compiler *compiler,
784 struct anv_pipeline_stage *stage)
785 {
786 unsigned num_rts = 0;
787 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
788 struct anv_pipeline_binding rt_bindings[max_rt];
789 nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
790 int rt_to_bindings[max_rt];
791 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
792 bool rt_used[max_rt];
793 memset(rt_used, 0, sizeof(rt_used));
794
795 /* Flag used render targets */
796 nir_foreach_variable_safe(var, &stage->nir->outputs) {
797 if (var->data.location < FRAG_RESULT_DATA0)
798 continue;
799
800 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
801 /* Unused or out-of-bounds */
802 if (rt >= MAX_RTS || !(stage->key.wm.color_outputs_valid & (1 << rt)))
803 continue;
804
805 const unsigned array_len =
806 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
807 assert(rt + array_len <= max_rt);
808
809 for (unsigned i = 0; i < array_len; i++)
810 rt_used[rt + i] = true;
811 }
812
813 /* Set new, compacted, location */
814 for (unsigned i = 0; i < max_rt; i++) {
815 if (!rt_used[i])
816 continue;
817
818 rt_to_bindings[i] = num_rts;
819 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
820 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
821 .binding = 0,
822 .index = i,
823 };
824 num_rts++;
825 }
826
827 bool deleted_output = false;
828 nir_foreach_variable_safe(var, &stage->nir->outputs) {
829 if (var->data.location < FRAG_RESULT_DATA0)
830 continue;
831
832 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
833 if (rt >= MAX_RTS ||
834 !(stage->key.wm.color_outputs_valid & (1 << rt))) {
835 /* Unused or out-of-bounds, throw it away */
836 deleted_output = true;
837 var->data.mode = nir_var_function_temp;
838 exec_node_remove(&var->node);
839 exec_list_push_tail(&impl->locals, &var->node);
840 continue;
841 }
842
843 /* Give it the new location */
844 assert(rt_to_bindings[rt] != -1);
845 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
846 }
847
848 if (deleted_output)
849 nir_fixup_deref_modes(stage->nir);
850
851 if (num_rts == 0) {
852 /* If we have no render targets, we need a null render target */
853 rt_bindings[0] = (struct anv_pipeline_binding) {
854 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
855 .binding = 0,
856 .index = UINT32_MAX,
857 };
858 num_rts = 1;
859 }
860
861 /* Now that we've determined the actual number of render targets, adjust
862 * the key accordingly.
863 */
864 stage->key.wm.nr_color_regions = num_rts;
865 stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
866
867 assert(num_rts <= max_rt);
868 assert(stage->bind_map.surface_count == 0);
869 typed_memcpy(stage->bind_map.surface_to_descriptor,
870 rt_bindings, num_rts);
871 stage->bind_map.surface_count += num_rts;
872 }
873
874 static const unsigned *
875 anv_pipeline_compile_fs(const struct brw_compiler *compiler,
876 void *mem_ctx,
877 struct anv_device *device,
878 struct anv_pipeline_stage *fs_stage,
879 struct anv_pipeline_stage *prev_stage)
880 {
881 /* TODO: we could set this to 0 based on the information in nir_shader, but
882 * we need this before we call spirv_to_nir.
883 */
884 assert(prev_stage);
885 fs_stage->key.wm.input_slots_valid =
886 prev_stage->prog_data.vue.vue_map.slots_valid;
887
888 const unsigned *code =
889 brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
890 &fs_stage->prog_data.wm, fs_stage->nir,
891 NULL, -1, -1, -1, true, false, NULL, NULL);
892
893 if (fs_stage->key.wm.nr_color_regions == 0 &&
894 !fs_stage->prog_data.wm.has_side_effects &&
895 !fs_stage->prog_data.wm.uses_kill &&
896 fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
897 !fs_stage->prog_data.wm.computed_stencil) {
898 /* This fragment shader has no outputs and no side effects. Go ahead
899 * and return the code pointer so we don't accidentally think the
900 * compile failed but zero out prog_data which will set program_size to
901 * zero and disable the stage.
902 */
903 memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
904 }
905
906 return code;
907 }
908
909 static VkResult
910 anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
911 struct anv_pipeline_cache *cache,
912 const VkGraphicsPipelineCreateInfo *info)
913 {
914 VkPipelineCreationFeedbackEXT pipeline_feedback = {
915 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
916 };
917 int64_t pipeline_start = os_time_get_nano();
918
919 const struct brw_compiler *compiler =
920 pipeline->device->instance->physicalDevice.compiler;
921 struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
922
923 pipeline->active_stages = 0;
924
925 VkResult result;
926 for (uint32_t i = 0; i < info->stageCount; i++) {
927 const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
928 gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
929
930 pipeline->active_stages |= sinfo->stage;
931
932 int64_t stage_start = os_time_get_nano();
933
934 stages[stage].stage = stage;
935 stages[stage].module = anv_shader_module_from_handle(sinfo->module);
936 stages[stage].entrypoint = sinfo->pName;
937 stages[stage].spec_info = sinfo->pSpecializationInfo;
938 anv_pipeline_hash_shader(stages[stage].module,
939 stages[stage].entrypoint,
940 stage,
941 stages[stage].spec_info,
942 stages[stage].shader_sha1);
943
944 const struct gen_device_info *devinfo = &pipeline->device->info;
945 switch (stage) {
946 case MESA_SHADER_VERTEX:
947 populate_vs_prog_key(devinfo, &stages[stage].key.vs);
948 break;
949 case MESA_SHADER_TESS_CTRL:
950 populate_tcs_prog_key(devinfo,
951 info->pTessellationState->patchControlPoints,
952 &stages[stage].key.tcs);
953 break;
954 case MESA_SHADER_TESS_EVAL:
955 populate_tes_prog_key(devinfo, &stages[stage].key.tes);
956 break;
957 case MESA_SHADER_GEOMETRY:
958 populate_gs_prog_key(devinfo, &stages[stage].key.gs);
959 break;
960 case MESA_SHADER_FRAGMENT:
961 populate_wm_prog_key(devinfo, pipeline->subpass,
962 info->pMultisampleState,
963 &stages[stage].key.wm);
964 break;
965 default:
966 unreachable("Invalid graphics shader stage");
967 }
968
969 stages[stage].feedback.duration += os_time_get_nano() - stage_start;
970 stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
971 }
972
973 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
974 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
975
976 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
977
978 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
979
980 unsigned char sha1[20];
981 anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
982
983 unsigned found = 0;
984 unsigned cache_hits = 0;
985 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
986 if (!stages[s].entrypoint)
987 continue;
988
989 int64_t stage_start = os_time_get_nano();
990
991 stages[s].cache_key.stage = s;
992 memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
993
994 bool cache_hit;
995 struct anv_shader_bin *bin =
996 anv_device_search_for_kernel(pipeline->device, cache,
997 &stages[s].cache_key,
998 sizeof(stages[s].cache_key), &cache_hit);
999 if (bin) {
1000 found++;
1001 pipeline->shaders[s] = bin;
1002 }
1003
1004 if (cache_hit) {
1005 cache_hits++;
1006 stages[s].feedback.flags |=
1007 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1008 }
1009 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1010 }
1011
1012 if (found == __builtin_popcount(pipeline->active_stages)) {
1013 if (cache_hits == found) {
1014 pipeline_feedback.flags |=
1015 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1016 }
1017 /* We found all our shaders in the cache. We're done. */
1018 goto done;
1019 } else if (found > 0) {
1020 /* We found some but not all of our shaders. This shouldn't happen
1021 * most of the time but it can if we have a partially populated
1022 * pipeline cache.
1023 */
1024 assert(found < __builtin_popcount(pipeline->active_stages));
1025
1026 vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1027 VK_DEBUG_REPORT_WARNING_BIT_EXT |
1028 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1029 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1030 (uint64_t)(uintptr_t)cache,
1031 0, 0, "anv",
1032 "Found a partial pipeline in the cache. This is "
1033 "most likely caused by an incomplete pipeline cache "
1034 "import or export");
1035
1036 /* We're going to have to recompile anyway, so just throw away our
1037 * references to the shaders in the cache. We'll get them out of the
1038 * cache again as part of the compilation process.
1039 */
1040 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1041 stages[s].feedback.flags = 0;
1042 if (pipeline->shaders[s]) {
1043 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1044 pipeline->shaders[s] = NULL;
1045 }
1046 }
1047 }
1048
1049 void *pipeline_ctx = ralloc_context(NULL);
1050
1051 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1052 if (!stages[s].entrypoint)
1053 continue;
1054
1055 int64_t stage_start = os_time_get_nano();
1056
1057 assert(stages[s].stage == s);
1058 assert(pipeline->shaders[s] == NULL);
1059
1060 stages[s].bind_map = (struct anv_pipeline_bind_map) {
1061 .surface_to_descriptor = stages[s].surface_to_descriptor,
1062 .sampler_to_descriptor = stages[s].sampler_to_descriptor
1063 };
1064
1065 stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1066 pipeline_ctx,
1067 &stages[s]);
1068 if (stages[s].nir == NULL) {
1069 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1070 goto fail;
1071 }
1072
1073 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1074 }
1075
1076 /* Walk backwards to link */
1077 struct anv_pipeline_stage *next_stage = NULL;
1078 for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1079 if (!stages[s].entrypoint)
1080 continue;
1081
1082 switch (s) {
1083 case MESA_SHADER_VERTEX:
1084 anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1085 break;
1086 case MESA_SHADER_TESS_CTRL:
1087 anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1088 break;
1089 case MESA_SHADER_TESS_EVAL:
1090 anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1091 break;
1092 case MESA_SHADER_GEOMETRY:
1093 anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1094 break;
1095 case MESA_SHADER_FRAGMENT:
1096 anv_pipeline_link_fs(compiler, &stages[s]);
1097 break;
1098 default:
1099 unreachable("Invalid graphics shader stage");
1100 }
1101
1102 next_stage = &stages[s];
1103 }
1104
1105 struct anv_pipeline_stage *prev_stage = NULL;
1106 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1107 if (!stages[s].entrypoint)
1108 continue;
1109
1110 int64_t stage_start = os_time_get_nano();
1111
1112 void *stage_ctx = ralloc_context(NULL);
1113
1114 nir_xfb_info *xfb_info = NULL;
1115 if (s == MESA_SHADER_VERTEX ||
1116 s == MESA_SHADER_TESS_EVAL ||
1117 s == MESA_SHADER_GEOMETRY)
1118 xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1119
1120 anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1121
1122 const unsigned *code;
1123 switch (s) {
1124 case MESA_SHADER_VERTEX:
1125 code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
1126 &stages[s]);
1127 break;
1128 case MESA_SHADER_TESS_CTRL:
1129 code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
1130 &stages[s], prev_stage);
1131 break;
1132 case MESA_SHADER_TESS_EVAL:
1133 code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
1134 &stages[s], prev_stage);
1135 break;
1136 case MESA_SHADER_GEOMETRY:
1137 code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
1138 &stages[s], prev_stage);
1139 break;
1140 case MESA_SHADER_FRAGMENT:
1141 code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
1142 &stages[s], prev_stage);
1143 break;
1144 default:
1145 unreachable("Invalid graphics shader stage");
1146 }
1147 if (code == NULL) {
1148 ralloc_free(stage_ctx);
1149 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1150 goto fail;
1151 }
1152
1153 struct anv_shader_bin *bin =
1154 anv_device_upload_kernel(pipeline->device, cache,
1155 &stages[s].cache_key,
1156 sizeof(stages[s].cache_key),
1157 code, stages[s].prog_data.base.program_size,
1158 stages[s].nir->constant_data,
1159 stages[s].nir->constant_data_size,
1160 &stages[s].prog_data.base,
1161 brw_prog_data_size(s),
1162 xfb_info, &stages[s].bind_map);
1163 if (!bin) {
1164 ralloc_free(stage_ctx);
1165 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1166 goto fail;
1167 }
1168
1169 pipeline->shaders[s] = bin;
1170 ralloc_free(stage_ctx);
1171
1172 stages[s].feedback.duration += os_time_get_nano() - stage_start;
1173
1174 prev_stage = &stages[s];
1175 }
1176
1177 ralloc_free(pipeline_ctx);
1178
1179 done:
1180
1181 if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1182 pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1183 /* This can happen if we decided to implicitly disable the fragment
1184 * shader. See anv_pipeline_compile_fs().
1185 */
1186 anv_shader_bin_unref(pipeline->device,
1187 pipeline->shaders[MESA_SHADER_FRAGMENT]);
1188 pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1189 pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1190 }
1191
1192 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1193
1194 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1195 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1196 if (create_feedback) {
1197 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1198
1199 assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1200 for (uint32_t i = 0; i < info->stageCount; i++) {
1201 gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1202 create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1203 }
1204 }
1205
1206 return VK_SUCCESS;
1207
1208 fail:
1209 ralloc_free(pipeline_ctx);
1210
1211 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1212 if (pipeline->shaders[s])
1213 anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1214 }
1215
1216 return result;
1217 }
1218
1219 VkResult
1220 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1221 struct anv_pipeline_cache *cache,
1222 const VkComputePipelineCreateInfo *info,
1223 const struct anv_shader_module *module,
1224 const char *entrypoint,
1225 const VkSpecializationInfo *spec_info)
1226 {
1227 VkPipelineCreationFeedbackEXT pipeline_feedback = {
1228 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1229 };
1230 int64_t pipeline_start = os_time_get_nano();
1231
1232 const struct brw_compiler *compiler =
1233 pipeline->device->instance->physicalDevice.compiler;
1234
1235 struct anv_pipeline_stage stage = {
1236 .stage = MESA_SHADER_COMPUTE,
1237 .module = module,
1238 .entrypoint = entrypoint,
1239 .spec_info = spec_info,
1240 .cache_key = {
1241 .stage = MESA_SHADER_COMPUTE,
1242 },
1243 .feedback = {
1244 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1245 },
1246 };
1247 anv_pipeline_hash_shader(stage.module,
1248 stage.entrypoint,
1249 MESA_SHADER_COMPUTE,
1250 stage.spec_info,
1251 stage.shader_sha1);
1252
1253 struct anv_shader_bin *bin = NULL;
1254
1255 populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
1256
1257 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1258
1259 anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1260 bool cache_hit;
1261 bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1262 sizeof(stage.cache_key), &cache_hit);
1263
1264 if (bin == NULL) {
1265 int64_t stage_start = os_time_get_nano();
1266
1267 stage.bind_map = (struct anv_pipeline_bind_map) {
1268 .surface_to_descriptor = stage.surface_to_descriptor,
1269 .sampler_to_descriptor = stage.sampler_to_descriptor
1270 };
1271
1272 /* Set up a binding for the gl_NumWorkGroups */
1273 stage.bind_map.surface_count = 1;
1274 stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1275 .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1276 };
1277
1278 void *mem_ctx = ralloc_context(NULL);
1279
1280 stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1281 if (stage.nir == NULL) {
1282 ralloc_free(mem_ctx);
1283 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1284 }
1285
1286 anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1287
1288 NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1289 &stage.prog_data.cs);
1290
1291 const unsigned *shader_code =
1292 brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
1293 &stage.prog_data.cs, stage.nir, -1, NULL);
1294 if (shader_code == NULL) {
1295 ralloc_free(mem_ctx);
1296 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1297 }
1298
1299 const unsigned code_size = stage.prog_data.base.program_size;
1300 bin = anv_device_upload_kernel(pipeline->device, cache,
1301 &stage.cache_key, sizeof(stage.cache_key),
1302 shader_code, code_size,
1303 stage.nir->constant_data,
1304 stage.nir->constant_data_size,
1305 &stage.prog_data.base,
1306 sizeof(stage.prog_data.cs),
1307 NULL, &stage.bind_map);
1308 if (!bin) {
1309 ralloc_free(mem_ctx);
1310 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1311 }
1312
1313 ralloc_free(mem_ctx);
1314
1315 stage.feedback.duration = os_time_get_nano() - stage_start;
1316 }
1317
1318 if (cache_hit) {
1319 stage.feedback.flags |=
1320 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1321 pipeline_feedback.flags |=
1322 VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1323 }
1324 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1325
1326 const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1327 vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1328 if (create_feedback) {
1329 *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1330
1331 assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1332 create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1333 }
1334
1335 pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1336 pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1337
1338 return VK_SUCCESS;
1339 }
1340
1341 /**
1342 * Copy pipeline state not marked as dynamic.
1343 * Dynamic state is pipeline state which hasn't been provided at pipeline
1344 * creation time, but is dynamically provided afterwards using various
1345 * vkCmdSet* functions.
1346 *
1347 * The set of state considered "non_dynamic" is determined by the pieces of
1348 * state that have their corresponding VkDynamicState enums omitted from
1349 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1350 *
1351 * @param[out] pipeline Destination non_dynamic state.
1352 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1353 */
1354 static void
1355 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1356 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1357 {
1358 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1359 struct anv_subpass *subpass = pipeline->subpass;
1360
1361 pipeline->dynamic_state = default_dynamic_state;
1362
1363 if (pCreateInfo->pDynamicState) {
1364 /* Remove all of the states that are marked as dynamic */
1365 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1366 for (uint32_t s = 0; s < count; s++)
1367 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1368 }
1369
1370 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1371
1372 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1373 *
1374 * pViewportState is [...] NULL if the pipeline
1375 * has rasterization disabled.
1376 */
1377 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1378 assert(pCreateInfo->pViewportState);
1379
1380 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1381 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1382 typed_memcpy(dynamic->viewport.viewports,
1383 pCreateInfo->pViewportState->pViewports,
1384 pCreateInfo->pViewportState->viewportCount);
1385 }
1386
1387 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1388 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1389 typed_memcpy(dynamic->scissor.scissors,
1390 pCreateInfo->pViewportState->pScissors,
1391 pCreateInfo->pViewportState->scissorCount);
1392 }
1393 }
1394
1395 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1396 assert(pCreateInfo->pRasterizationState);
1397 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1398 }
1399
1400 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1401 assert(pCreateInfo->pRasterizationState);
1402 dynamic->depth_bias.bias =
1403 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1404 dynamic->depth_bias.clamp =
1405 pCreateInfo->pRasterizationState->depthBiasClamp;
1406 dynamic->depth_bias.slope =
1407 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1408 }
1409
1410 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1411 *
1412 * pColorBlendState is [...] NULL if the pipeline has rasterization
1413 * disabled or if the subpass of the render pass the pipeline is
1414 * created against does not use any color attachments.
1415 */
1416 bool uses_color_att = false;
1417 for (unsigned i = 0; i < subpass->color_count; ++i) {
1418 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1419 uses_color_att = true;
1420 break;
1421 }
1422 }
1423
1424 if (uses_color_att &&
1425 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1426 assert(pCreateInfo->pColorBlendState);
1427
1428 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1429 typed_memcpy(dynamic->blend_constants,
1430 pCreateInfo->pColorBlendState->blendConstants, 4);
1431 }
1432
1433 /* If there is no depthstencil attachment, then don't read
1434 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1435 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1436 * no need to override the depthstencil defaults in
1437 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1438 *
1439 * Section 9.2 of the Vulkan 1.0.15 spec says:
1440 *
1441 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1442 * disabled or if the subpass of the render pass the pipeline is created
1443 * against does not use a depth/stencil attachment.
1444 */
1445 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1446 subpass->depth_stencil_attachment) {
1447 assert(pCreateInfo->pDepthStencilState);
1448
1449 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1450 dynamic->depth_bounds.min =
1451 pCreateInfo->pDepthStencilState->minDepthBounds;
1452 dynamic->depth_bounds.max =
1453 pCreateInfo->pDepthStencilState->maxDepthBounds;
1454 }
1455
1456 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1457 dynamic->stencil_compare_mask.front =
1458 pCreateInfo->pDepthStencilState->front.compareMask;
1459 dynamic->stencil_compare_mask.back =
1460 pCreateInfo->pDepthStencilState->back.compareMask;
1461 }
1462
1463 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1464 dynamic->stencil_write_mask.front =
1465 pCreateInfo->pDepthStencilState->front.writeMask;
1466 dynamic->stencil_write_mask.back =
1467 pCreateInfo->pDepthStencilState->back.writeMask;
1468 }
1469
1470 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1471 dynamic->stencil_reference.front =
1472 pCreateInfo->pDepthStencilState->front.reference;
1473 dynamic->stencil_reference.back =
1474 pCreateInfo->pDepthStencilState->back.reference;
1475 }
1476 }
1477
1478 pipeline->dynamic_state_mask = states;
1479 }
1480
1481 static void
1482 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1483 {
1484 #ifdef DEBUG
1485 struct anv_render_pass *renderpass = NULL;
1486 struct anv_subpass *subpass = NULL;
1487
1488 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1489 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1490 */
1491 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1492
1493 renderpass = anv_render_pass_from_handle(info->renderPass);
1494 assert(renderpass);
1495
1496 assert(info->subpass < renderpass->subpass_count);
1497 subpass = &renderpass->subpasses[info->subpass];
1498
1499 assert(info->stageCount >= 1);
1500 assert(info->pVertexInputState);
1501 assert(info->pInputAssemblyState);
1502 assert(info->pRasterizationState);
1503 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1504 assert(info->pViewportState);
1505 assert(info->pMultisampleState);
1506
1507 if (subpass && subpass->depth_stencil_attachment)
1508 assert(info->pDepthStencilState);
1509
1510 if (subpass && subpass->color_count > 0) {
1511 bool all_color_unused = true;
1512 for (int i = 0; i < subpass->color_count; i++) {
1513 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1514 all_color_unused = false;
1515 }
1516 /* pColorBlendState is ignored if the pipeline has rasterization
1517 * disabled or if the subpass of the render pass the pipeline is
1518 * created against does not use any color attachments.
1519 */
1520 assert(info->pColorBlendState || all_color_unused);
1521 }
1522 }
1523
1524 for (uint32_t i = 0; i < info->stageCount; ++i) {
1525 switch (info->pStages[i].stage) {
1526 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1527 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1528 assert(info->pTessellationState);
1529 break;
1530 default:
1531 break;
1532 }
1533 }
1534 #endif
1535 }
1536
1537 /**
1538 * Calculate the desired L3 partitioning based on the current state of the
1539 * pipeline. For now this simply returns the conservative defaults calculated
1540 * by get_default_l3_weights(), but we could probably do better by gathering
1541 * more statistics from the pipeline state (e.g. guess of expected URB usage
1542 * and bound surfaces), or by using feed-back from performance counters.
1543 */
1544 void
1545 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1546 {
1547 const struct gen_device_info *devinfo = &pipeline->device->info;
1548
1549 const struct gen_l3_weights w =
1550 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1551
1552 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1553 pipeline->urb.total_size =
1554 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1555 }
1556
1557 VkResult
1558 anv_pipeline_init(struct anv_pipeline *pipeline,
1559 struct anv_device *device,
1560 struct anv_pipeline_cache *cache,
1561 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1562 const VkAllocationCallbacks *alloc)
1563 {
1564 VkResult result;
1565
1566 anv_pipeline_validate_create_info(pCreateInfo);
1567
1568 if (alloc == NULL)
1569 alloc = &device->alloc;
1570
1571 pipeline->device = device;
1572
1573 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1574 assert(pCreateInfo->subpass < render_pass->subpass_count);
1575 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1576
1577 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1578 if (result != VK_SUCCESS)
1579 return result;
1580
1581 pipeline->batch.alloc = alloc;
1582 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1583 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1584 pipeline->batch.relocs = &pipeline->batch_relocs;
1585 pipeline->batch.status = VK_SUCCESS;
1586
1587 copy_non_dynamic_state(pipeline, pCreateInfo);
1588 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1589 pCreateInfo->pRasterizationState->depthClampEnable;
1590
1591 /* Previously we enabled depth clipping when !depthClampEnable.
1592 * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1593 * clipping info is available, use its enable value to determine clipping,
1594 * otherwise fallback to the previous !depthClampEnable logic.
1595 */
1596 const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1597 vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1598 PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1599 pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1600
1601 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1602 pCreateInfo->pMultisampleState->sampleShadingEnable;
1603
1604 pipeline->needs_data_cache = false;
1605
1606 /* When we free the pipeline, we detect stages based on the NULL status
1607 * of various prog_data pointers. Make them NULL by default.
1608 */
1609 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1610
1611 result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1612 if (result != VK_SUCCESS) {
1613 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1614 return result;
1615 }
1616
1617 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1618
1619 anv_pipeline_setup_l3_config(pipeline, false);
1620
1621 const VkPipelineVertexInputStateCreateInfo *vi_info =
1622 pCreateInfo->pVertexInputState;
1623
1624 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1625
1626 pipeline->vb_used = 0;
1627 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1628 const VkVertexInputAttributeDescription *desc =
1629 &vi_info->pVertexAttributeDescriptions[i];
1630
1631 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1632 pipeline->vb_used |= 1 << desc->binding;
1633 }
1634
1635 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1636 const VkVertexInputBindingDescription *desc =
1637 &vi_info->pVertexBindingDescriptions[i];
1638
1639 pipeline->vb[desc->binding].stride = desc->stride;
1640
1641 /* Step rate is programmed per vertex element (attribute), not
1642 * binding. Set up a map of which bindings step per instance, for
1643 * reference by vertex element setup. */
1644 switch (desc->inputRate) {
1645 default:
1646 case VK_VERTEX_INPUT_RATE_VERTEX:
1647 pipeline->vb[desc->binding].instanced = false;
1648 break;
1649 case VK_VERTEX_INPUT_RATE_INSTANCE:
1650 pipeline->vb[desc->binding].instanced = true;
1651 break;
1652 }
1653
1654 pipeline->vb[desc->binding].instance_divisor = 1;
1655 }
1656
1657 const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1658 vk_find_struct_const(vi_info->pNext,
1659 PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1660 if (vi_div_state) {
1661 for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1662 const VkVertexInputBindingDivisorDescriptionEXT *desc =
1663 &vi_div_state->pVertexBindingDivisors[i];
1664
1665 pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1666 }
1667 }
1668
1669 /* Our implementation of VK_KHR_multiview uses instancing to draw the
1670 * different views. If the client asks for instancing, we need to multiply
1671 * the instance divisor by the number of views ensure that we repeat the
1672 * client's per-instance data once for each view.
1673 */
1674 if (pipeline->subpass->view_mask) {
1675 const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1676 for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1677 if (pipeline->vb[vb].instanced)
1678 pipeline->vb[vb].instance_divisor *= view_count;
1679 }
1680 }
1681
1682 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1683 pCreateInfo->pInputAssemblyState;
1684 const VkPipelineTessellationStateCreateInfo *tess_info =
1685 pCreateInfo->pTessellationState;
1686 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1687
1688 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1689 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1690 else
1691 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1692
1693 return VK_SUCCESS;
1694 }