anv: Emit cherryview SF state without including gen9_pack.h
[mesa.git] / src / intel / vulkan / anv_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
33 #include "brw_nir.h"
34 #include "anv_nir.h"
35 #include "spirv/nir_spirv.h"
36
37 /* Needed for SWIZZLE macros */
38 #include "program/prog_instruction.h"
39
40 // Shader functions
41
42 VkResult anv_CreateShaderModule(
43 VkDevice _device,
44 const VkShaderModuleCreateInfo* pCreateInfo,
45 const VkAllocationCallbacks* pAllocator,
46 VkShaderModule* pShaderModule)
47 {
48 ANV_FROM_HANDLE(anv_device, device, _device);
49 struct anv_shader_module *module;
50
51 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
52 assert(pCreateInfo->flags == 0);
53
54 module = vk_alloc2(&device->alloc, pAllocator,
55 sizeof(*module) + pCreateInfo->codeSize, 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
57 if (module == NULL)
58 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
59
60 module->size = pCreateInfo->codeSize;
61 memcpy(module->data, pCreateInfo->pCode, module->size);
62
63 _mesa_sha1_compute(module->data, module->size, module->sha1);
64
65 *pShaderModule = anv_shader_module_to_handle(module);
66
67 return VK_SUCCESS;
68 }
69
70 void anv_DestroyShaderModule(
71 VkDevice _device,
72 VkShaderModule _module,
73 const VkAllocationCallbacks* pAllocator)
74 {
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, _module);
77
78 if (!module)
79 return;
80
81 vk_free2(&device->alloc, pAllocator, module);
82 }
83
84 #define SPIR_V_MAGIC_NUMBER 0x07230203
85
86 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
87 * we can't do that yet because we don't have the ability to copy nir.
88 */
89 static nir_shader *
90 anv_shader_compile_to_nir(struct anv_device *device,
91 struct anv_shader_module *module,
92 const char *entrypoint_name,
93 gl_shader_stage stage,
94 const VkSpecializationInfo *spec_info)
95 {
96 if (strcmp(entrypoint_name, "main") != 0) {
97 anv_finishme("Multiple shaders per module not really supported");
98 }
99
100 const struct brw_compiler *compiler =
101 device->instance->physicalDevice.compiler;
102 const nir_shader_compiler_options *nir_options =
103 compiler->glsl_compiler_options[stage].NirOptions;
104
105 uint32_t *spirv = (uint32_t *) module->data;
106 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
107 assert(module->size % 4 == 0);
108
109 uint32_t num_spec_entries = 0;
110 struct nir_spirv_specialization *spec_entries = NULL;
111 if (spec_info && spec_info->mapEntryCount > 0) {
112 num_spec_entries = spec_info->mapEntryCount;
113 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
114 for (uint32_t i = 0; i < num_spec_entries; i++) {
115 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
116 const void *data = spec_info->pData + entry.offset;
117 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
118
119 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
120 spec_entries[i].data = *(const uint32_t *)data;
121 }
122 }
123
124 nir_function *entry_point =
125 spirv_to_nir(spirv, module->size / 4,
126 spec_entries, num_spec_entries,
127 stage, entrypoint_name, nir_options);
128 nir_shader *nir = entry_point->shader;
129 assert(nir->stage == stage);
130 nir_validate_shader(nir);
131
132 free(spec_entries);
133
134 if (stage == MESA_SHADER_FRAGMENT) {
135 nir_lower_wpos_center(nir);
136 nir_validate_shader(nir);
137 }
138
139 nir_lower_returns(nir);
140 nir_validate_shader(nir);
141
142 nir_inline_functions(nir);
143 nir_validate_shader(nir);
144
145 /* Pick off the single entrypoint that we want */
146 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
147 if (func != entry_point)
148 exec_node_remove(&func->node);
149 }
150 assert(exec_list_length(&nir->functions) == 1);
151 entry_point->name = ralloc_strdup(entry_point, "main");
152
153 nir_remove_dead_variables(nir, nir_var_shader_in);
154 nir_remove_dead_variables(nir, nir_var_shader_out);
155 nir_remove_dead_variables(nir, nir_var_system_value);
156 nir_validate_shader(nir);
157
158 nir_propagate_invariant(nir);
159 nir_validate_shader(nir);
160
161 nir_lower_io_to_temporaries(entry_point->shader, entry_point->impl,
162 true, false);
163
164 nir_lower_system_values(nir);
165 nir_validate_shader(nir);
166
167 /* Vulkan uses the separate-shader linking model */
168 nir->info->separate_shader = true;
169
170 nir = brw_preprocess_nir(compiler, nir);
171
172 nir_lower_clip_cull_distance_arrays(nir);
173 nir_validate_shader(nir);
174
175 if (stage == MESA_SHADER_FRAGMENT)
176 anv_nir_lower_input_attachments(nir);
177
178 nir_shader_gather_info(nir, entry_point->impl);
179
180 nir_variable_mode indirect_mask = 0;
181 if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
182 indirect_mask |= nir_var_shader_in;
183 if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
184 indirect_mask |= nir_var_shader_out;
185 if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
186 indirect_mask |= nir_var_local;
187
188 nir_lower_indirect_derefs(nir, indirect_mask);
189
190 return nir;
191 }
192
193 void anv_DestroyPipeline(
194 VkDevice _device,
195 VkPipeline _pipeline,
196 const VkAllocationCallbacks* pAllocator)
197 {
198 ANV_FROM_HANDLE(anv_device, device, _device);
199 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
200
201 if (!pipeline)
202 return;
203
204 anv_reloc_list_finish(&pipeline->batch_relocs,
205 pAllocator ? pAllocator : &device->alloc);
206 if (pipeline->blend_state.map)
207 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
208
209 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
210 if (pipeline->shaders[s])
211 anv_shader_bin_unref(device, pipeline->shaders[s]);
212 }
213
214 vk_free2(&device->alloc, pAllocator, pipeline);
215 }
216
217 static const uint32_t vk_to_gen_primitive_type[] = {
218 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
219 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
220 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
221 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
222 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
223 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
224 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
225 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
226 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
227 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
228 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
229 };
230
231 static void
232 populate_sampler_prog_key(const struct gen_device_info *devinfo,
233 struct brw_sampler_prog_key_data *key)
234 {
235 /* XXX: Handle texture swizzle on HSW- */
236 for (int i = 0; i < MAX_SAMPLERS; i++) {
237 /* Assume color sampler, no swizzling. (Works for BDW+) */
238 key->swizzles[i] = SWIZZLE_XYZW;
239 }
240 }
241
242 static void
243 populate_vs_prog_key(const struct gen_device_info *devinfo,
244 struct brw_vs_prog_key *key)
245 {
246 memset(key, 0, sizeof(*key));
247
248 populate_sampler_prog_key(devinfo, &key->tex);
249
250 /* XXX: Handle vertex input work-arounds */
251
252 /* XXX: Handle sampler_prog_key */
253 }
254
255 static void
256 populate_gs_prog_key(const struct gen_device_info *devinfo,
257 struct brw_gs_prog_key *key)
258 {
259 memset(key, 0, sizeof(*key));
260
261 populate_sampler_prog_key(devinfo, &key->tex);
262 }
263
264 static void
265 populate_wm_prog_key(const struct gen_device_info *devinfo,
266 const VkGraphicsPipelineCreateInfo *info,
267 struct brw_wm_prog_key *key)
268 {
269 ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
270
271 memset(key, 0, sizeof(*key));
272
273 populate_sampler_prog_key(devinfo, &key->tex);
274
275 /* TODO: Fill out key->input_slots_valid */
276
277 /* Vulkan doesn't specify a default */
278 key->high_quality_derivatives = false;
279
280 /* XXX Vulkan doesn't appear to specify */
281 key->clamp_fragment_color = false;
282
283 key->nr_color_regions =
284 render_pass->subpasses[info->subpass].color_count;
285
286 key->replicate_alpha = key->nr_color_regions > 1 &&
287 info->pMultisampleState &&
288 info->pMultisampleState->alphaToCoverageEnable;
289
290 if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
291 /* We should probably pull this out of the shader, but it's fairly
292 * harmless to compute it and then let dead-code take care of it.
293 */
294 key->persample_interp =
295 (info->pMultisampleState->minSampleShading *
296 info->pMultisampleState->rasterizationSamples) > 1;
297 key->multisample_fbo = true;
298 }
299 }
300
301 static void
302 populate_cs_prog_key(const struct gen_device_info *devinfo,
303 struct brw_cs_prog_key *key)
304 {
305 memset(key, 0, sizeof(*key));
306
307 populate_sampler_prog_key(devinfo, &key->tex);
308 }
309
310 static nir_shader *
311 anv_pipeline_compile(struct anv_pipeline *pipeline,
312 struct anv_shader_module *module,
313 const char *entrypoint,
314 gl_shader_stage stage,
315 const VkSpecializationInfo *spec_info,
316 struct brw_stage_prog_data *prog_data,
317 struct anv_pipeline_bind_map *map)
318 {
319 nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
320 module, entrypoint, stage,
321 spec_info);
322 if (nir == NULL)
323 return NULL;
324
325 anv_nir_lower_push_constants(nir);
326
327 /* Figure out the number of parameters */
328 prog_data->nr_params = 0;
329
330 if (nir->num_uniforms > 0) {
331 /* If the shader uses any push constants at all, we'll just give
332 * them the maximum possible number
333 */
334 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
335 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
336 }
337
338 if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
339 prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
340
341 if (nir->info->num_images > 0) {
342 prog_data->nr_params += nir->info->num_images * BRW_IMAGE_PARAM_SIZE;
343 pipeline->needs_data_cache = true;
344 }
345
346 if (stage == MESA_SHADER_COMPUTE)
347 ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
348 prog_data->nr_params++; /* The CS Thread ID uniform */
349
350 if (nir->info->num_ssbos > 0)
351 pipeline->needs_data_cache = true;
352
353 if (prog_data->nr_params > 0) {
354 /* XXX: I think we're leaking this */
355 prog_data->param = (const union gl_constant_value **)
356 malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
357
358 /* We now set the param values to be offsets into a
359 * anv_push_constant_data structure. Since the compiler doesn't
360 * actually dereference any of the gl_constant_value pointers in the
361 * params array, it doesn't really matter what we put here.
362 */
363 struct anv_push_constants *null_data = NULL;
364 if (nir->num_uniforms > 0) {
365 /* Fill out the push constants section of the param array */
366 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
367 prog_data->param[i] = (const union gl_constant_value *)
368 &null_data->client_data[i * sizeof(float)];
369 }
370 }
371
372 /* Set up dynamic offsets */
373 anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
374
375 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
376 if (pipeline->layout)
377 anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
378
379 /* nir_lower_io will only handle the push constants; we need to set this
380 * to the full number of possible uniforms.
381 */
382 nir->num_uniforms = prog_data->nr_params * 4;
383
384 return nir;
385 }
386
387 static void
388 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
389 {
390 prog_data->binding_table.size_bytes = 0;
391 prog_data->binding_table.texture_start = bias;
392 prog_data->binding_table.gather_texture_start = bias;
393 prog_data->binding_table.ubo_start = bias;
394 prog_data->binding_table.ssbo_start = bias;
395 prog_data->binding_table.image_start = bias;
396 }
397
398 static struct anv_shader_bin *
399 anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
400 struct anv_pipeline_cache *cache,
401 const void *key_data, uint32_t key_size,
402 const void *kernel_data, uint32_t kernel_size,
403 const struct brw_stage_prog_data *prog_data,
404 uint32_t prog_data_size,
405 const struct anv_pipeline_bind_map *bind_map)
406 {
407 if (cache) {
408 return anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
409 kernel_data, kernel_size,
410 prog_data, prog_data_size,
411 bind_map);
412 } else {
413 return anv_shader_bin_create(pipeline->device, key_data, key_size,
414 kernel_data, kernel_size,
415 prog_data, prog_data_size,
416 prog_data->param, bind_map);
417 }
418 }
419
420
421 static void
422 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
423 gl_shader_stage stage,
424 struct anv_shader_bin *shader)
425 {
426 pipeline->shaders[stage] = shader;
427 pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
428 }
429
430 static VkResult
431 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
432 struct anv_pipeline_cache *cache,
433 const VkGraphicsPipelineCreateInfo *info,
434 struct anv_shader_module *module,
435 const char *entrypoint,
436 const VkSpecializationInfo *spec_info)
437 {
438 const struct brw_compiler *compiler =
439 pipeline->device->instance->physicalDevice.compiler;
440 struct anv_pipeline_bind_map map;
441 struct brw_vs_prog_key key;
442 struct anv_shader_bin *bin = NULL;
443 unsigned char sha1[20];
444
445 populate_vs_prog_key(&pipeline->device->info, &key);
446
447 if (cache) {
448 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
449 pipeline->layout, spec_info);
450 bin = anv_pipeline_cache_search(cache, sha1, 20);
451 }
452
453 if (bin == NULL) {
454 struct brw_vs_prog_data prog_data = { 0, };
455 struct anv_pipeline_binding surface_to_descriptor[256];
456 struct anv_pipeline_binding sampler_to_descriptor[256];
457
458 map = (struct anv_pipeline_bind_map) {
459 .surface_to_descriptor = surface_to_descriptor,
460 .sampler_to_descriptor = sampler_to_descriptor
461 };
462
463 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
464 MESA_SHADER_VERTEX, spec_info,
465 &prog_data.base.base, &map);
466 if (nir == NULL)
467 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
468
469 anv_fill_binding_table(&prog_data.base.base, 0);
470
471 void *mem_ctx = ralloc_context(NULL);
472
473 ralloc_steal(mem_ctx, nir);
474
475 prog_data.inputs_read = nir->info->inputs_read;
476
477 brw_compute_vue_map(&pipeline->device->info,
478 &prog_data.base.vue_map,
479 nir->info->outputs_written,
480 nir->info->separate_shader);
481
482 unsigned code_size;
483 const unsigned *shader_code =
484 brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
485 NULL, false, -1, &code_size, NULL);
486 if (shader_code == NULL) {
487 ralloc_free(mem_ctx);
488 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
489 }
490
491 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
492 shader_code, code_size,
493 &prog_data.base.base, sizeof(prog_data),
494 &map);
495 if (!bin) {
496 ralloc_free(mem_ctx);
497 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
498 }
499
500 ralloc_free(mem_ctx);
501 }
502
503 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
504
505 return VK_SUCCESS;
506 }
507
508 static VkResult
509 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
510 struct anv_pipeline_cache *cache,
511 const VkGraphicsPipelineCreateInfo *info,
512 struct anv_shader_module *module,
513 const char *entrypoint,
514 const VkSpecializationInfo *spec_info)
515 {
516 const struct brw_compiler *compiler =
517 pipeline->device->instance->physicalDevice.compiler;
518 struct anv_pipeline_bind_map map;
519 struct brw_gs_prog_key key;
520 struct anv_shader_bin *bin = NULL;
521 unsigned char sha1[20];
522
523 populate_gs_prog_key(&pipeline->device->info, &key);
524
525 if (cache) {
526 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
527 pipeline->layout, spec_info);
528 bin = anv_pipeline_cache_search(cache, sha1, 20);
529 }
530
531 if (bin == NULL) {
532 struct brw_gs_prog_data prog_data = { 0, };
533 struct anv_pipeline_binding surface_to_descriptor[256];
534 struct anv_pipeline_binding sampler_to_descriptor[256];
535
536 map = (struct anv_pipeline_bind_map) {
537 .surface_to_descriptor = surface_to_descriptor,
538 .sampler_to_descriptor = sampler_to_descriptor
539 };
540
541 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
542 MESA_SHADER_GEOMETRY, spec_info,
543 &prog_data.base.base, &map);
544 if (nir == NULL)
545 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
546
547 anv_fill_binding_table(&prog_data.base.base, 0);
548
549 void *mem_ctx = ralloc_context(NULL);
550
551 ralloc_steal(mem_ctx, nir);
552
553 brw_compute_vue_map(&pipeline->device->info,
554 &prog_data.base.vue_map,
555 nir->info->outputs_written,
556 nir->info->separate_shader);
557
558 unsigned code_size;
559 const unsigned *shader_code =
560 brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
561 NULL, -1, &code_size, NULL);
562 if (shader_code == NULL) {
563 ralloc_free(mem_ctx);
564 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
565 }
566
567 /* TODO: SIMD8 GS */
568 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
569 shader_code, code_size,
570 &prog_data.base.base, sizeof(prog_data),
571 &map);
572 if (!bin) {
573 ralloc_free(mem_ctx);
574 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
575 }
576
577 ralloc_free(mem_ctx);
578 }
579
580 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
581
582 return VK_SUCCESS;
583 }
584
585 static VkResult
586 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
587 struct anv_pipeline_cache *cache,
588 const VkGraphicsPipelineCreateInfo *info,
589 struct anv_shader_module *module,
590 const char *entrypoint,
591 const VkSpecializationInfo *spec_info)
592 {
593 const struct brw_compiler *compiler =
594 pipeline->device->instance->physicalDevice.compiler;
595 struct anv_pipeline_bind_map map;
596 struct brw_wm_prog_key key;
597 struct anv_shader_bin *bin = NULL;
598 unsigned char sha1[20];
599
600 populate_wm_prog_key(&pipeline->device->info, info, &key);
601
602 if (cache) {
603 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
604 pipeline->layout, spec_info);
605 bin = anv_pipeline_cache_search(cache, sha1, 20);
606 }
607
608 if (bin == NULL) {
609 struct brw_wm_prog_data prog_data = { 0, };
610 struct anv_pipeline_binding surface_to_descriptor[256];
611 struct anv_pipeline_binding sampler_to_descriptor[256];
612
613 map = (struct anv_pipeline_bind_map) {
614 .surface_to_descriptor = surface_to_descriptor + 8,
615 .sampler_to_descriptor = sampler_to_descriptor
616 };
617
618 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
619 MESA_SHADER_FRAGMENT, spec_info,
620 &prog_data.base, &map);
621 if (nir == NULL)
622 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
623
624 unsigned num_rts = 0;
625 struct anv_pipeline_binding rt_bindings[8];
626 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
627 nir_foreach_variable_safe(var, &nir->outputs) {
628 if (var->data.location < FRAG_RESULT_DATA0)
629 continue;
630
631 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
632 if (rt >= key.nr_color_regions) {
633 /* Out-of-bounds, throw it away */
634 var->data.mode = nir_var_local;
635 exec_node_remove(&var->node);
636 exec_list_push_tail(&impl->locals, &var->node);
637 continue;
638 }
639
640 /* Give it a new, compacted, location */
641 var->data.location = FRAG_RESULT_DATA0 + num_rts;
642
643 unsigned array_len =
644 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
645 assert(num_rts + array_len <= 8);
646
647 for (unsigned i = 0; i < array_len; i++) {
648 rt_bindings[num_rts + i] = (struct anv_pipeline_binding) {
649 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
650 .binding = 0,
651 .index = rt + i,
652 };
653 }
654
655 num_rts += array_len;
656 }
657
658 if (num_rts == 0) {
659 /* If we have no render targets, we need a null render target */
660 rt_bindings[0] = (struct anv_pipeline_binding) {
661 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
662 .binding = 0,
663 .index = UINT8_MAX,
664 };
665 num_rts = 1;
666 }
667
668 assert(num_rts <= 8);
669 map.surface_to_descriptor -= num_rts;
670 map.surface_count += num_rts;
671 assert(map.surface_count <= 256);
672 memcpy(map.surface_to_descriptor, rt_bindings,
673 num_rts * sizeof(*rt_bindings));
674
675 anv_fill_binding_table(&prog_data.base, num_rts);
676
677 void *mem_ctx = ralloc_context(NULL);
678
679 ralloc_steal(mem_ctx, nir);
680
681 unsigned code_size;
682 const unsigned *shader_code =
683 brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
684 NULL, -1, -1, true, false, NULL, &code_size, NULL);
685 if (shader_code == NULL) {
686 ralloc_free(mem_ctx);
687 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
688 }
689
690 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
691 shader_code, code_size,
692 &prog_data.base, sizeof(prog_data),
693 &map);
694 if (!bin) {
695 ralloc_free(mem_ctx);
696 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
697 }
698
699 ralloc_free(mem_ctx);
700 }
701
702 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
703
704 return VK_SUCCESS;
705 }
706
707 VkResult
708 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
709 struct anv_pipeline_cache *cache,
710 const VkComputePipelineCreateInfo *info,
711 struct anv_shader_module *module,
712 const char *entrypoint,
713 const VkSpecializationInfo *spec_info)
714 {
715 const struct brw_compiler *compiler =
716 pipeline->device->instance->physicalDevice.compiler;
717 struct anv_pipeline_bind_map map;
718 struct brw_cs_prog_key key;
719 struct anv_shader_bin *bin = NULL;
720 unsigned char sha1[20];
721
722 populate_cs_prog_key(&pipeline->device->info, &key);
723
724 if (cache) {
725 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
726 pipeline->layout, spec_info);
727 bin = anv_pipeline_cache_search(cache, sha1, 20);
728 }
729
730 if (bin == NULL) {
731 struct brw_cs_prog_data prog_data = { 0, };
732 struct anv_pipeline_binding surface_to_descriptor[256];
733 struct anv_pipeline_binding sampler_to_descriptor[256];
734
735 map = (struct anv_pipeline_bind_map) {
736 .surface_to_descriptor = surface_to_descriptor,
737 .sampler_to_descriptor = sampler_to_descriptor
738 };
739
740 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
741 MESA_SHADER_COMPUTE, spec_info,
742 &prog_data.base, &map);
743 if (nir == NULL)
744 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
745
746 anv_fill_binding_table(&prog_data.base, 1);
747
748 void *mem_ctx = ralloc_context(NULL);
749
750 ralloc_steal(mem_ctx, nir);
751
752 unsigned code_size;
753 const unsigned *shader_code =
754 brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
755 -1, &code_size, NULL);
756 if (shader_code == NULL) {
757 ralloc_free(mem_ctx);
758 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
759 }
760
761 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
762 shader_code, code_size,
763 &prog_data.base, sizeof(prog_data),
764 &map);
765 if (!bin) {
766 ralloc_free(mem_ctx);
767 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
768 }
769
770 ralloc_free(mem_ctx);
771 }
772
773 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
774
775 return VK_SUCCESS;
776 }
777
778 /**
779 * Copy pipeline state not marked as dynamic.
780 * Dynamic state is pipeline state which hasn't been provided at pipeline
781 * creation time, but is dynamically provided afterwards using various
782 * vkCmdSet* functions.
783 *
784 * The set of state considered "non_dynamic" is determined by the pieces of
785 * state that have their corresponding VkDynamicState enums omitted from
786 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
787 *
788 * @param[out] pipeline Destination non_dynamic state.
789 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
790 */
791 static void
792 copy_non_dynamic_state(struct anv_pipeline *pipeline,
793 const VkGraphicsPipelineCreateInfo *pCreateInfo)
794 {
795 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
796 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
797 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
798
799 pipeline->dynamic_state = default_dynamic_state;
800
801 if (pCreateInfo->pDynamicState) {
802 /* Remove all of the states that are marked as dynamic */
803 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
804 for (uint32_t s = 0; s < count; s++)
805 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
806 }
807
808 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
809
810 /* Section 9.2 of the Vulkan 1.0.15 spec says:
811 *
812 * pViewportState is [...] NULL if the pipeline
813 * has rasterization disabled.
814 */
815 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
816 assert(pCreateInfo->pViewportState);
817
818 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
819 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
820 typed_memcpy(dynamic->viewport.viewports,
821 pCreateInfo->pViewportState->pViewports,
822 pCreateInfo->pViewportState->viewportCount);
823 }
824
825 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
826 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
827 typed_memcpy(dynamic->scissor.scissors,
828 pCreateInfo->pViewportState->pScissors,
829 pCreateInfo->pViewportState->scissorCount);
830 }
831 }
832
833 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
834 assert(pCreateInfo->pRasterizationState);
835 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
836 }
837
838 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
839 assert(pCreateInfo->pRasterizationState);
840 dynamic->depth_bias.bias =
841 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
842 dynamic->depth_bias.clamp =
843 pCreateInfo->pRasterizationState->depthBiasClamp;
844 dynamic->depth_bias.slope =
845 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
846 }
847
848 /* Section 9.2 of the Vulkan 1.0.15 spec says:
849 *
850 * pColorBlendState is [...] NULL if the pipeline has rasterization
851 * disabled or if the subpass of the render pass the pipeline is
852 * created against does not use any color attachments.
853 */
854 bool uses_color_att = false;
855 for (unsigned i = 0; i < subpass->color_count; ++i) {
856 if (subpass->color_attachments[i] != VK_ATTACHMENT_UNUSED) {
857 uses_color_att = true;
858 break;
859 }
860 }
861
862 if (uses_color_att &&
863 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
864 assert(pCreateInfo->pColorBlendState);
865
866 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
867 typed_memcpy(dynamic->blend_constants,
868 pCreateInfo->pColorBlendState->blendConstants, 4);
869 }
870
871 /* If there is no depthstencil attachment, then don't read
872 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
873 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
874 * no need to override the depthstencil defaults in
875 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
876 *
877 * Section 9.2 of the Vulkan 1.0.15 spec says:
878 *
879 * pDepthStencilState is [...] NULL if the pipeline has rasterization
880 * disabled or if the subpass of the render pass the pipeline is created
881 * against does not use a depth/stencil attachment.
882 */
883 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
884 subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
885 assert(pCreateInfo->pDepthStencilState);
886
887 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
888 dynamic->depth_bounds.min =
889 pCreateInfo->pDepthStencilState->minDepthBounds;
890 dynamic->depth_bounds.max =
891 pCreateInfo->pDepthStencilState->maxDepthBounds;
892 }
893
894 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
895 dynamic->stencil_compare_mask.front =
896 pCreateInfo->pDepthStencilState->front.compareMask;
897 dynamic->stencil_compare_mask.back =
898 pCreateInfo->pDepthStencilState->back.compareMask;
899 }
900
901 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
902 dynamic->stencil_write_mask.front =
903 pCreateInfo->pDepthStencilState->front.writeMask;
904 dynamic->stencil_write_mask.back =
905 pCreateInfo->pDepthStencilState->back.writeMask;
906 }
907
908 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
909 dynamic->stencil_reference.front =
910 pCreateInfo->pDepthStencilState->front.reference;
911 dynamic->stencil_reference.back =
912 pCreateInfo->pDepthStencilState->back.reference;
913 }
914 }
915
916 pipeline->dynamic_state_mask = states;
917 }
918
919 static void
920 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
921 {
922 struct anv_render_pass *renderpass = NULL;
923 struct anv_subpass *subpass = NULL;
924
925 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
926 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
927 */
928 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
929
930 renderpass = anv_render_pass_from_handle(info->renderPass);
931 assert(renderpass);
932
933 assert(info->subpass < renderpass->subpass_count);
934 subpass = &renderpass->subpasses[info->subpass];
935
936 assert(info->stageCount >= 1);
937 assert(info->pVertexInputState);
938 assert(info->pInputAssemblyState);
939 assert(info->pRasterizationState);
940 if (!info->pRasterizationState->rasterizerDiscardEnable) {
941 assert(info->pViewportState);
942 assert(info->pMultisampleState);
943
944 if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
945 assert(info->pDepthStencilState);
946
947 if (subpass && subpass->color_count > 0)
948 assert(info->pColorBlendState);
949 }
950
951 for (uint32_t i = 0; i < info->stageCount; ++i) {
952 switch (info->pStages[i].stage) {
953 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
954 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
955 assert(info->pTessellationState);
956 break;
957 default:
958 break;
959 }
960 }
961 }
962
963 /**
964 * Calculate the desired L3 partitioning based on the current state of the
965 * pipeline. For now this simply returns the conservative defaults calculated
966 * by get_default_l3_weights(), but we could probably do better by gathering
967 * more statistics from the pipeline state (e.g. guess of expected URB usage
968 * and bound surfaces), or by using feed-back from performance counters.
969 */
970 void
971 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
972 {
973 const struct gen_device_info *devinfo = &pipeline->device->info;
974
975 const struct gen_l3_weights w =
976 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
977
978 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
979 pipeline->urb.total_size =
980 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
981 }
982
983 VkResult
984 anv_pipeline_init(struct anv_pipeline *pipeline,
985 struct anv_device *device,
986 struct anv_pipeline_cache *cache,
987 const VkGraphicsPipelineCreateInfo *pCreateInfo,
988 const VkAllocationCallbacks *alloc)
989 {
990 VkResult result;
991
992 anv_validate {
993 anv_pipeline_validate_create_info(pCreateInfo);
994 }
995
996 if (alloc == NULL)
997 alloc = &device->alloc;
998
999 pipeline->device = device;
1000 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1001
1002 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1003 if (result != VK_SUCCESS)
1004 return result;
1005
1006 pipeline->batch.alloc = alloc;
1007 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1008 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1009 pipeline->batch.relocs = &pipeline->batch_relocs;
1010
1011 copy_non_dynamic_state(pipeline, pCreateInfo);
1012 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1013 pCreateInfo->pRasterizationState->depthClampEnable;
1014
1015 pipeline->needs_data_cache = false;
1016
1017 /* When we free the pipeline, we detect stages based on the NULL status
1018 * of various prog_data pointers. Make them NULL by default.
1019 */
1020 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1021
1022 pipeline->active_stages = 0;
1023
1024 const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
1025 struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
1026 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1027 gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
1028 pStages[stage] = &pCreateInfo->pStages[i];
1029 modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
1030 }
1031
1032 if (modules[MESA_SHADER_VERTEX]) {
1033 result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
1034 modules[MESA_SHADER_VERTEX],
1035 pStages[MESA_SHADER_VERTEX]->pName,
1036 pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
1037 if (result != VK_SUCCESS)
1038 goto compile_fail;
1039 }
1040
1041 if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
1042 anv_finishme("no tessellation support");
1043
1044 if (modules[MESA_SHADER_GEOMETRY]) {
1045 result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
1046 modules[MESA_SHADER_GEOMETRY],
1047 pStages[MESA_SHADER_GEOMETRY]->pName,
1048 pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
1049 if (result != VK_SUCCESS)
1050 goto compile_fail;
1051 }
1052
1053 if (modules[MESA_SHADER_FRAGMENT]) {
1054 result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo,
1055 modules[MESA_SHADER_FRAGMENT],
1056 pStages[MESA_SHADER_FRAGMENT]->pName,
1057 pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
1058 if (result != VK_SUCCESS)
1059 goto compile_fail;
1060 }
1061
1062 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1063
1064 anv_pipeline_setup_l3_config(pipeline, false);
1065
1066 const VkPipelineVertexInputStateCreateInfo *vi_info =
1067 pCreateInfo->pVertexInputState;
1068
1069 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1070
1071 pipeline->vb_used = 0;
1072 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1073 const VkVertexInputAttributeDescription *desc =
1074 &vi_info->pVertexAttributeDescriptions[i];
1075
1076 if (inputs_read & (1 << (VERT_ATTRIB_GENERIC0 + desc->location)))
1077 pipeline->vb_used |= 1 << desc->binding;
1078 }
1079
1080 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1081 const VkVertexInputBindingDescription *desc =
1082 &vi_info->pVertexBindingDescriptions[i];
1083
1084 pipeline->binding_stride[desc->binding] = desc->stride;
1085
1086 /* Step rate is programmed per vertex element (attribute), not
1087 * binding. Set up a map of which bindings step per instance, for
1088 * reference by vertex element setup. */
1089 switch (desc->inputRate) {
1090 default:
1091 case VK_VERTEX_INPUT_RATE_VERTEX:
1092 pipeline->instancing_enable[desc->binding] = false;
1093 break;
1094 case VK_VERTEX_INPUT_RATE_INSTANCE:
1095 pipeline->instancing_enable[desc->binding] = true;
1096 break;
1097 }
1098 }
1099
1100 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1101 pCreateInfo->pInputAssemblyState;
1102 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1103 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1104
1105 return VK_SUCCESS;
1106
1107 compile_fail:
1108 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1109 if (pipeline->shaders[s])
1110 anv_shader_bin_unref(device, pipeline->shaders[s]);
1111 }
1112
1113 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1114
1115 return result;
1116 }