2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
35 #include "spirv/nir_spirv.h"
37 /* Needed for SWIZZLE macros */
38 #include "program/prog_instruction.h"
42 VkResult
anv_CreateShaderModule(
44 const VkShaderModuleCreateInfo
* pCreateInfo
,
45 const VkAllocationCallbacks
* pAllocator
,
46 VkShaderModule
* pShaderModule
)
48 ANV_FROM_HANDLE(anv_device
, device
, _device
);
49 struct anv_shader_module
*module
;
51 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
52 assert(pCreateInfo
->flags
== 0);
54 module
= vk_alloc2(&device
->alloc
, pAllocator
,
55 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
58 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
60 module
->size
= pCreateInfo
->codeSize
;
61 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
63 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
65 *pShaderModule
= anv_shader_module_to_handle(module
);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module
,
73 const VkAllocationCallbacks
* pAllocator
)
75 ANV_FROM_HANDLE(anv_device
, device
, _device
);
76 ANV_FROM_HANDLE(anv_shader_module
, module
, _module
);
81 vk_free2(&device
->alloc
, pAllocator
, module
);
84 #define SPIR_V_MAGIC_NUMBER 0x07230203
86 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
87 * we can't do that yet because we don't have the ability to copy nir.
90 anv_shader_compile_to_nir(struct anv_device
*device
,
91 struct anv_shader_module
*module
,
92 const char *entrypoint_name
,
93 gl_shader_stage stage
,
94 const VkSpecializationInfo
*spec_info
)
96 if (strcmp(entrypoint_name
, "main") != 0) {
97 anv_finishme("Multiple shaders per module not really supported");
100 const struct brw_compiler
*compiler
=
101 device
->instance
->physicalDevice
.compiler
;
102 const nir_shader_compiler_options
*nir_options
=
103 compiler
->glsl_compiler_options
[stage
].NirOptions
;
105 uint32_t *spirv
= (uint32_t *) module
->data
;
106 assert(spirv
[0] == SPIR_V_MAGIC_NUMBER
);
107 assert(module
->size
% 4 == 0);
109 uint32_t num_spec_entries
= 0;
110 struct nir_spirv_specialization
*spec_entries
= NULL
;
111 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
112 num_spec_entries
= spec_info
->mapEntryCount
;
113 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
114 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
115 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
116 const void *data
= spec_info
->pData
+ entry
.offset
;
117 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
119 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
120 spec_entries
[i
].data
= *(const uint32_t *)data
;
124 nir_function
*entry_point
=
125 spirv_to_nir(spirv
, module
->size
/ 4,
126 spec_entries
, num_spec_entries
,
127 stage
, entrypoint_name
, nir_options
);
128 nir_shader
*nir
= entry_point
->shader
;
129 assert(nir
->stage
== stage
);
130 nir_validate_shader(nir
);
134 if (stage
== MESA_SHADER_FRAGMENT
) {
135 nir_lower_wpos_center(nir
);
136 nir_validate_shader(nir
);
139 nir_lower_returns(nir
);
140 nir_validate_shader(nir
);
142 nir_inline_functions(nir
);
143 nir_validate_shader(nir
);
145 /* Pick off the single entrypoint that we want */
146 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
147 if (func
!= entry_point
)
148 exec_node_remove(&func
->node
);
150 assert(exec_list_length(&nir
->functions
) == 1);
151 entry_point
->name
= ralloc_strdup(entry_point
, "main");
153 nir_remove_dead_variables(nir
, nir_var_shader_in
);
154 nir_remove_dead_variables(nir
, nir_var_shader_out
);
155 nir_remove_dead_variables(nir
, nir_var_system_value
);
156 nir_validate_shader(nir
);
158 nir_propagate_invariant(nir
);
159 nir_validate_shader(nir
);
161 nir_lower_io_to_temporaries(entry_point
->shader
, entry_point
->impl
,
164 nir_lower_system_values(nir
);
165 nir_validate_shader(nir
);
167 /* Vulkan uses the separate-shader linking model */
168 nir
->info
->separate_shader
= true;
170 nir
= brw_preprocess_nir(compiler
, nir
);
172 nir_lower_clip_cull_distance_arrays(nir
);
173 nir_validate_shader(nir
);
175 if (stage
== MESA_SHADER_FRAGMENT
)
176 anv_nir_lower_input_attachments(nir
);
178 nir_shader_gather_info(nir
, entry_point
->impl
);
180 nir_variable_mode indirect_mask
= 0;
181 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectInput
)
182 indirect_mask
|= nir_var_shader_in
;
183 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectOutput
)
184 indirect_mask
|= nir_var_shader_out
;
185 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectTemp
)
186 indirect_mask
|= nir_var_local
;
188 nir_lower_indirect_derefs(nir
, indirect_mask
);
193 void anv_DestroyPipeline(
195 VkPipeline _pipeline
,
196 const VkAllocationCallbacks
* pAllocator
)
198 ANV_FROM_HANDLE(anv_device
, device
, _device
);
199 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
204 anv_reloc_list_finish(&pipeline
->batch_relocs
,
205 pAllocator
? pAllocator
: &device
->alloc
);
206 if (pipeline
->blend_state
.map
)
207 anv_state_pool_free(&device
->dynamic_state_pool
, pipeline
->blend_state
);
209 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
210 if (pipeline
->shaders
[s
])
211 anv_shader_bin_unref(device
, pipeline
->shaders
[s
]);
214 vk_free2(&device
->alloc
, pAllocator
, pipeline
);
217 static const uint32_t vk_to_gen_primitive_type
[] = {
218 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
219 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
220 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
221 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
222 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
223 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
224 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
225 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
226 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
227 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
228 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
232 populate_sampler_prog_key(const struct gen_device_info
*devinfo
,
233 struct brw_sampler_prog_key_data
*key
)
235 /* XXX: Handle texture swizzle on HSW- */
236 for (int i
= 0; i
< MAX_SAMPLERS
; i
++) {
237 /* Assume color sampler, no swizzling. (Works for BDW+) */
238 key
->swizzles
[i
] = SWIZZLE_XYZW
;
243 populate_vs_prog_key(const struct gen_device_info
*devinfo
,
244 struct brw_vs_prog_key
*key
)
246 memset(key
, 0, sizeof(*key
));
248 populate_sampler_prog_key(devinfo
, &key
->tex
);
250 /* XXX: Handle vertex input work-arounds */
252 /* XXX: Handle sampler_prog_key */
256 populate_gs_prog_key(const struct gen_device_info
*devinfo
,
257 struct brw_gs_prog_key
*key
)
259 memset(key
, 0, sizeof(*key
));
261 populate_sampler_prog_key(devinfo
, &key
->tex
);
265 populate_wm_prog_key(const struct gen_device_info
*devinfo
,
266 const VkGraphicsPipelineCreateInfo
*info
,
267 struct brw_wm_prog_key
*key
)
269 ANV_FROM_HANDLE(anv_render_pass
, render_pass
, info
->renderPass
);
271 memset(key
, 0, sizeof(*key
));
273 populate_sampler_prog_key(devinfo
, &key
->tex
);
275 /* TODO: Fill out key->input_slots_valid */
277 /* Vulkan doesn't specify a default */
278 key
->high_quality_derivatives
= false;
280 /* XXX Vulkan doesn't appear to specify */
281 key
->clamp_fragment_color
= false;
283 key
->nr_color_regions
=
284 render_pass
->subpasses
[info
->subpass
].color_count
;
286 key
->replicate_alpha
= key
->nr_color_regions
> 1 &&
287 info
->pMultisampleState
&&
288 info
->pMultisampleState
->alphaToCoverageEnable
;
290 if (info
->pMultisampleState
&& info
->pMultisampleState
->rasterizationSamples
> 1) {
291 /* We should probably pull this out of the shader, but it's fairly
292 * harmless to compute it and then let dead-code take care of it.
294 key
->persample_interp
=
295 (info
->pMultisampleState
->minSampleShading
*
296 info
->pMultisampleState
->rasterizationSamples
) > 1;
297 key
->multisample_fbo
= true;
302 populate_cs_prog_key(const struct gen_device_info
*devinfo
,
303 struct brw_cs_prog_key
*key
)
305 memset(key
, 0, sizeof(*key
));
307 populate_sampler_prog_key(devinfo
, &key
->tex
);
311 anv_pipeline_compile(struct anv_pipeline
*pipeline
,
312 struct anv_shader_module
*module
,
313 const char *entrypoint
,
314 gl_shader_stage stage
,
315 const VkSpecializationInfo
*spec_info
,
316 struct brw_stage_prog_data
*prog_data
,
317 struct anv_pipeline_bind_map
*map
)
319 nir_shader
*nir
= anv_shader_compile_to_nir(pipeline
->device
,
320 module
, entrypoint
, stage
,
325 anv_nir_lower_push_constants(nir
);
327 /* Figure out the number of parameters */
328 prog_data
->nr_params
= 0;
330 if (nir
->num_uniforms
> 0) {
331 /* If the shader uses any push constants at all, we'll just give
332 * them the maximum possible number
334 assert(nir
->num_uniforms
<= MAX_PUSH_CONSTANTS_SIZE
);
335 prog_data
->nr_params
+= MAX_PUSH_CONSTANTS_SIZE
/ sizeof(float);
338 if (pipeline
->layout
&& pipeline
->layout
->stage
[stage
].has_dynamic_offsets
)
339 prog_data
->nr_params
+= MAX_DYNAMIC_BUFFERS
* 2;
341 if (nir
->info
->num_images
> 0) {
342 prog_data
->nr_params
+= nir
->info
->num_images
* BRW_IMAGE_PARAM_SIZE
;
343 pipeline
->needs_data_cache
= true;
346 if (stage
== MESA_SHADER_COMPUTE
)
347 ((struct brw_cs_prog_data
*)prog_data
)->thread_local_id_index
=
348 prog_data
->nr_params
++; /* The CS Thread ID uniform */
350 if (nir
->info
->num_ssbos
> 0)
351 pipeline
->needs_data_cache
= true;
353 if (prog_data
->nr_params
> 0) {
354 /* XXX: I think we're leaking this */
355 prog_data
->param
= (const union gl_constant_value
**)
356 malloc(prog_data
->nr_params
* sizeof(union gl_constant_value
*));
358 /* We now set the param values to be offsets into a
359 * anv_push_constant_data structure. Since the compiler doesn't
360 * actually dereference any of the gl_constant_value pointers in the
361 * params array, it doesn't really matter what we put here.
363 struct anv_push_constants
*null_data
= NULL
;
364 if (nir
->num_uniforms
> 0) {
365 /* Fill out the push constants section of the param array */
366 for (unsigned i
= 0; i
< MAX_PUSH_CONSTANTS_SIZE
/ sizeof(float); i
++)
367 prog_data
->param
[i
] = (const union gl_constant_value
*)
368 &null_data
->client_data
[i
* sizeof(float)];
372 /* Set up dynamic offsets */
373 anv_nir_apply_dynamic_offsets(pipeline
, nir
, prog_data
);
375 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
376 if (pipeline
->layout
)
377 anv_nir_apply_pipeline_layout(pipeline
, nir
, prog_data
, map
);
379 /* nir_lower_io will only handle the push constants; we need to set this
380 * to the full number of possible uniforms.
382 nir
->num_uniforms
= prog_data
->nr_params
* 4;
388 anv_fill_binding_table(struct brw_stage_prog_data
*prog_data
, unsigned bias
)
390 prog_data
->binding_table
.size_bytes
= 0;
391 prog_data
->binding_table
.texture_start
= bias
;
392 prog_data
->binding_table
.gather_texture_start
= bias
;
393 prog_data
->binding_table
.ubo_start
= bias
;
394 prog_data
->binding_table
.ssbo_start
= bias
;
395 prog_data
->binding_table
.image_start
= bias
;
398 static struct anv_shader_bin
*
399 anv_pipeline_upload_kernel(struct anv_pipeline
*pipeline
,
400 struct anv_pipeline_cache
*cache
,
401 const void *key_data
, uint32_t key_size
,
402 const void *kernel_data
, uint32_t kernel_size
,
403 const struct brw_stage_prog_data
*prog_data
,
404 uint32_t prog_data_size
,
405 const struct anv_pipeline_bind_map
*bind_map
)
408 return anv_pipeline_cache_upload_kernel(cache
, key_data
, key_size
,
409 kernel_data
, kernel_size
,
410 prog_data
, prog_data_size
,
413 return anv_shader_bin_create(pipeline
->device
, key_data
, key_size
,
414 kernel_data
, kernel_size
,
415 prog_data
, prog_data_size
,
416 prog_data
->param
, bind_map
);
422 anv_pipeline_add_compiled_stage(struct anv_pipeline
*pipeline
,
423 gl_shader_stage stage
,
424 struct anv_shader_bin
*shader
)
426 pipeline
->shaders
[stage
] = shader
;
427 pipeline
->active_stages
|= mesa_to_vk_shader_stage(stage
);
431 anv_pipeline_compile_vs(struct anv_pipeline
*pipeline
,
432 struct anv_pipeline_cache
*cache
,
433 const VkGraphicsPipelineCreateInfo
*info
,
434 struct anv_shader_module
*module
,
435 const char *entrypoint
,
436 const VkSpecializationInfo
*spec_info
)
438 const struct brw_compiler
*compiler
=
439 pipeline
->device
->instance
->physicalDevice
.compiler
;
440 struct anv_pipeline_bind_map map
;
441 struct brw_vs_prog_key key
;
442 struct anv_shader_bin
*bin
= NULL
;
443 unsigned char sha1
[20];
445 populate_vs_prog_key(&pipeline
->device
->info
, &key
);
448 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
449 pipeline
->layout
, spec_info
);
450 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
454 struct brw_vs_prog_data prog_data
= { 0, };
455 struct anv_pipeline_binding surface_to_descriptor
[256];
456 struct anv_pipeline_binding sampler_to_descriptor
[256];
458 map
= (struct anv_pipeline_bind_map
) {
459 .surface_to_descriptor
= surface_to_descriptor
,
460 .sampler_to_descriptor
= sampler_to_descriptor
463 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
464 MESA_SHADER_VERTEX
, spec_info
,
465 &prog_data
.base
.base
, &map
);
467 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
469 anv_fill_binding_table(&prog_data
.base
.base
, 0);
471 void *mem_ctx
= ralloc_context(NULL
);
473 ralloc_steal(mem_ctx
, nir
);
475 prog_data
.inputs_read
= nir
->info
->inputs_read
;
477 brw_compute_vue_map(&pipeline
->device
->info
,
478 &prog_data
.base
.vue_map
,
479 nir
->info
->outputs_written
,
480 nir
->info
->separate_shader
);
483 const unsigned *shader_code
=
484 brw_compile_vs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
485 NULL
, false, -1, &code_size
, NULL
);
486 if (shader_code
== NULL
) {
487 ralloc_free(mem_ctx
);
488 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
491 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
492 shader_code
, code_size
,
493 &prog_data
.base
.base
, sizeof(prog_data
),
496 ralloc_free(mem_ctx
);
497 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
500 ralloc_free(mem_ctx
);
503 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_VERTEX
, bin
);
509 anv_pipeline_compile_gs(struct anv_pipeline
*pipeline
,
510 struct anv_pipeline_cache
*cache
,
511 const VkGraphicsPipelineCreateInfo
*info
,
512 struct anv_shader_module
*module
,
513 const char *entrypoint
,
514 const VkSpecializationInfo
*spec_info
)
516 const struct brw_compiler
*compiler
=
517 pipeline
->device
->instance
->physicalDevice
.compiler
;
518 struct anv_pipeline_bind_map map
;
519 struct brw_gs_prog_key key
;
520 struct anv_shader_bin
*bin
= NULL
;
521 unsigned char sha1
[20];
523 populate_gs_prog_key(&pipeline
->device
->info
, &key
);
526 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
527 pipeline
->layout
, spec_info
);
528 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
532 struct brw_gs_prog_data prog_data
= { 0, };
533 struct anv_pipeline_binding surface_to_descriptor
[256];
534 struct anv_pipeline_binding sampler_to_descriptor
[256];
536 map
= (struct anv_pipeline_bind_map
) {
537 .surface_to_descriptor
= surface_to_descriptor
,
538 .sampler_to_descriptor
= sampler_to_descriptor
541 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
542 MESA_SHADER_GEOMETRY
, spec_info
,
543 &prog_data
.base
.base
, &map
);
545 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
547 anv_fill_binding_table(&prog_data
.base
.base
, 0);
549 void *mem_ctx
= ralloc_context(NULL
);
551 ralloc_steal(mem_ctx
, nir
);
553 brw_compute_vue_map(&pipeline
->device
->info
,
554 &prog_data
.base
.vue_map
,
555 nir
->info
->outputs_written
,
556 nir
->info
->separate_shader
);
559 const unsigned *shader_code
=
560 brw_compile_gs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
561 NULL
, -1, &code_size
, NULL
);
562 if (shader_code
== NULL
) {
563 ralloc_free(mem_ctx
);
564 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
568 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
569 shader_code
, code_size
,
570 &prog_data
.base
.base
, sizeof(prog_data
),
573 ralloc_free(mem_ctx
);
574 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
577 ralloc_free(mem_ctx
);
580 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_GEOMETRY
, bin
);
586 anv_pipeline_compile_fs(struct anv_pipeline
*pipeline
,
587 struct anv_pipeline_cache
*cache
,
588 const VkGraphicsPipelineCreateInfo
*info
,
589 struct anv_shader_module
*module
,
590 const char *entrypoint
,
591 const VkSpecializationInfo
*spec_info
)
593 const struct brw_compiler
*compiler
=
594 pipeline
->device
->instance
->physicalDevice
.compiler
;
595 struct anv_pipeline_bind_map map
;
596 struct brw_wm_prog_key key
;
597 struct anv_shader_bin
*bin
= NULL
;
598 unsigned char sha1
[20];
600 populate_wm_prog_key(&pipeline
->device
->info
, info
, &key
);
603 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
604 pipeline
->layout
, spec_info
);
605 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
609 struct brw_wm_prog_data prog_data
= { 0, };
610 struct anv_pipeline_binding surface_to_descriptor
[256];
611 struct anv_pipeline_binding sampler_to_descriptor
[256];
613 map
= (struct anv_pipeline_bind_map
) {
614 .surface_to_descriptor
= surface_to_descriptor
+ 8,
615 .sampler_to_descriptor
= sampler_to_descriptor
618 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
619 MESA_SHADER_FRAGMENT
, spec_info
,
620 &prog_data
.base
, &map
);
622 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
624 unsigned num_rts
= 0;
625 struct anv_pipeline_binding rt_bindings
[8];
626 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
627 nir_foreach_variable_safe(var
, &nir
->outputs
) {
628 if (var
->data
.location
< FRAG_RESULT_DATA0
)
631 unsigned rt
= var
->data
.location
- FRAG_RESULT_DATA0
;
632 if (rt
>= key
.nr_color_regions
) {
633 /* Out-of-bounds, throw it away */
634 var
->data
.mode
= nir_var_local
;
635 exec_node_remove(&var
->node
);
636 exec_list_push_tail(&impl
->locals
, &var
->node
);
640 /* Give it a new, compacted, location */
641 var
->data
.location
= FRAG_RESULT_DATA0
+ num_rts
;
644 glsl_type_is_array(var
->type
) ? glsl_get_length(var
->type
) : 1;
645 assert(num_rts
+ array_len
<= 8);
647 for (unsigned i
= 0; i
< array_len
; i
++) {
648 rt_bindings
[num_rts
+ i
] = (struct anv_pipeline_binding
) {
649 .set
= ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
,
655 num_rts
+= array_len
;
659 /* If we have no render targets, we need a null render target */
660 rt_bindings
[0] = (struct anv_pipeline_binding
) {
661 .set
= ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
,
668 assert(num_rts
<= 8);
669 map
.surface_to_descriptor
-= num_rts
;
670 map
.surface_count
+= num_rts
;
671 assert(map
.surface_count
<= 256);
672 memcpy(map
.surface_to_descriptor
, rt_bindings
,
673 num_rts
* sizeof(*rt_bindings
));
675 anv_fill_binding_table(&prog_data
.base
, num_rts
);
677 void *mem_ctx
= ralloc_context(NULL
);
679 ralloc_steal(mem_ctx
, nir
);
682 const unsigned *shader_code
=
683 brw_compile_fs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
684 NULL
, -1, -1, true, false, NULL
, &code_size
, NULL
);
685 if (shader_code
== NULL
) {
686 ralloc_free(mem_ctx
);
687 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
690 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
691 shader_code
, code_size
,
692 &prog_data
.base
, sizeof(prog_data
),
695 ralloc_free(mem_ctx
);
696 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
699 ralloc_free(mem_ctx
);
702 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_FRAGMENT
, bin
);
708 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
709 struct anv_pipeline_cache
*cache
,
710 const VkComputePipelineCreateInfo
*info
,
711 struct anv_shader_module
*module
,
712 const char *entrypoint
,
713 const VkSpecializationInfo
*spec_info
)
715 const struct brw_compiler
*compiler
=
716 pipeline
->device
->instance
->physicalDevice
.compiler
;
717 struct anv_pipeline_bind_map map
;
718 struct brw_cs_prog_key key
;
719 struct anv_shader_bin
*bin
= NULL
;
720 unsigned char sha1
[20];
722 populate_cs_prog_key(&pipeline
->device
->info
, &key
);
725 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
726 pipeline
->layout
, spec_info
);
727 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
731 struct brw_cs_prog_data prog_data
= { 0, };
732 struct anv_pipeline_binding surface_to_descriptor
[256];
733 struct anv_pipeline_binding sampler_to_descriptor
[256];
735 map
= (struct anv_pipeline_bind_map
) {
736 .surface_to_descriptor
= surface_to_descriptor
,
737 .sampler_to_descriptor
= sampler_to_descriptor
740 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
741 MESA_SHADER_COMPUTE
, spec_info
,
742 &prog_data
.base
, &map
);
744 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
746 anv_fill_binding_table(&prog_data
.base
, 1);
748 void *mem_ctx
= ralloc_context(NULL
);
750 ralloc_steal(mem_ctx
, nir
);
753 const unsigned *shader_code
=
754 brw_compile_cs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
755 -1, &code_size
, NULL
);
756 if (shader_code
== NULL
) {
757 ralloc_free(mem_ctx
);
758 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
761 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
762 shader_code
, code_size
,
763 &prog_data
.base
, sizeof(prog_data
),
766 ralloc_free(mem_ctx
);
767 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
770 ralloc_free(mem_ctx
);
773 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_COMPUTE
, bin
);
779 * Copy pipeline state not marked as dynamic.
780 * Dynamic state is pipeline state which hasn't been provided at pipeline
781 * creation time, but is dynamically provided afterwards using various
782 * vkCmdSet* functions.
784 * The set of state considered "non_dynamic" is determined by the pieces of
785 * state that have their corresponding VkDynamicState enums omitted from
786 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
788 * @param[out] pipeline Destination non_dynamic state.
789 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
792 copy_non_dynamic_state(struct anv_pipeline
*pipeline
,
793 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
795 anv_cmd_dirty_mask_t states
= ANV_CMD_DIRTY_DYNAMIC_ALL
;
796 ANV_FROM_HANDLE(anv_render_pass
, pass
, pCreateInfo
->renderPass
);
797 struct anv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
799 pipeline
->dynamic_state
= default_dynamic_state
;
801 if (pCreateInfo
->pDynamicState
) {
802 /* Remove all of the states that are marked as dynamic */
803 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
804 for (uint32_t s
= 0; s
< count
; s
++)
805 states
&= ~(1 << pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
808 struct anv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
810 /* Section 9.2 of the Vulkan 1.0.15 spec says:
812 * pViewportState is [...] NULL if the pipeline
813 * has rasterization disabled.
815 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
816 assert(pCreateInfo
->pViewportState
);
818 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
819 if (states
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
820 typed_memcpy(dynamic
->viewport
.viewports
,
821 pCreateInfo
->pViewportState
->pViewports
,
822 pCreateInfo
->pViewportState
->viewportCount
);
825 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
826 if (states
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
827 typed_memcpy(dynamic
->scissor
.scissors
,
828 pCreateInfo
->pViewportState
->pScissors
,
829 pCreateInfo
->pViewportState
->scissorCount
);
833 if (states
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
)) {
834 assert(pCreateInfo
->pRasterizationState
);
835 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
838 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
)) {
839 assert(pCreateInfo
->pRasterizationState
);
840 dynamic
->depth_bias
.bias
=
841 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
842 dynamic
->depth_bias
.clamp
=
843 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
844 dynamic
->depth_bias
.slope
=
845 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
848 /* Section 9.2 of the Vulkan 1.0.15 spec says:
850 * pColorBlendState is [...] NULL if the pipeline has rasterization
851 * disabled or if the subpass of the render pass the pipeline is
852 * created against does not use any color attachments.
854 bool uses_color_att
= false;
855 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
856 if (subpass
->color_attachments
[i
] != VK_ATTACHMENT_UNUSED
) {
857 uses_color_att
= true;
862 if (uses_color_att
&&
863 !pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
864 assert(pCreateInfo
->pColorBlendState
);
866 if (states
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
867 typed_memcpy(dynamic
->blend_constants
,
868 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
871 /* If there is no depthstencil attachment, then don't read
872 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
873 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
874 * no need to override the depthstencil defaults in
875 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
877 * Section 9.2 of the Vulkan 1.0.15 spec says:
879 * pDepthStencilState is [...] NULL if the pipeline has rasterization
880 * disabled or if the subpass of the render pass the pipeline is created
881 * against does not use a depth/stencil attachment.
883 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
884 subpass
->depth_stencil_attachment
!= VK_ATTACHMENT_UNUSED
) {
885 assert(pCreateInfo
->pDepthStencilState
);
887 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
)) {
888 dynamic
->depth_bounds
.min
=
889 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
890 dynamic
->depth_bounds
.max
=
891 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
894 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
)) {
895 dynamic
->stencil_compare_mask
.front
=
896 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
897 dynamic
->stencil_compare_mask
.back
=
898 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
901 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
)) {
902 dynamic
->stencil_write_mask
.front
=
903 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
904 dynamic
->stencil_write_mask
.back
=
905 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
908 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
)) {
909 dynamic
->stencil_reference
.front
=
910 pCreateInfo
->pDepthStencilState
->front
.reference
;
911 dynamic
->stencil_reference
.back
=
912 pCreateInfo
->pDepthStencilState
->back
.reference
;
916 pipeline
->dynamic_state_mask
= states
;
920 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo
*info
)
922 struct anv_render_pass
*renderpass
= NULL
;
923 struct anv_subpass
*subpass
= NULL
;
925 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
926 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
928 assert(info
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
930 renderpass
= anv_render_pass_from_handle(info
->renderPass
);
933 assert(info
->subpass
< renderpass
->subpass_count
);
934 subpass
= &renderpass
->subpasses
[info
->subpass
];
936 assert(info
->stageCount
>= 1);
937 assert(info
->pVertexInputState
);
938 assert(info
->pInputAssemblyState
);
939 assert(info
->pRasterizationState
);
940 if (!info
->pRasterizationState
->rasterizerDiscardEnable
) {
941 assert(info
->pViewportState
);
942 assert(info
->pMultisampleState
);
944 if (subpass
&& subpass
->depth_stencil_attachment
!= VK_ATTACHMENT_UNUSED
)
945 assert(info
->pDepthStencilState
);
947 if (subpass
&& subpass
->color_count
> 0)
948 assert(info
->pColorBlendState
);
951 for (uint32_t i
= 0; i
< info
->stageCount
; ++i
) {
952 switch (info
->pStages
[i
].stage
) {
953 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
954 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
955 assert(info
->pTessellationState
);
964 * Calculate the desired L3 partitioning based on the current state of the
965 * pipeline. For now this simply returns the conservative defaults calculated
966 * by get_default_l3_weights(), but we could probably do better by gathering
967 * more statistics from the pipeline state (e.g. guess of expected URB usage
968 * and bound surfaces), or by using feed-back from performance counters.
971 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
)
973 const struct gen_device_info
*devinfo
= &pipeline
->device
->info
;
975 const struct gen_l3_weights w
=
976 gen_get_default_l3_weights(devinfo
, pipeline
->needs_data_cache
, needs_slm
);
978 pipeline
->urb
.l3_config
= gen_get_l3_config(devinfo
, w
);
979 pipeline
->urb
.total_size
=
980 gen_get_l3_config_urb_size(devinfo
, pipeline
->urb
.l3_config
);
984 anv_pipeline_init(struct anv_pipeline
*pipeline
,
985 struct anv_device
*device
,
986 struct anv_pipeline_cache
*cache
,
987 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
988 const VkAllocationCallbacks
*alloc
)
993 anv_pipeline_validate_create_info(pCreateInfo
);
997 alloc
= &device
->alloc
;
999 pipeline
->device
= device
;
1000 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1002 result
= anv_reloc_list_init(&pipeline
->batch_relocs
, alloc
);
1003 if (result
!= VK_SUCCESS
)
1006 pipeline
->batch
.alloc
= alloc
;
1007 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
1008 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
1009 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
1011 copy_non_dynamic_state(pipeline
, pCreateInfo
);
1012 pipeline
->depth_clamp_enable
= pCreateInfo
->pRasterizationState
&&
1013 pCreateInfo
->pRasterizationState
->depthClampEnable
;
1015 pipeline
->needs_data_cache
= false;
1017 /* When we free the pipeline, we detect stages based on the NULL status
1018 * of various prog_data pointers. Make them NULL by default.
1020 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
1022 pipeline
->active_stages
= 0;
1024 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
1025 struct anv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
1026 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
1027 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
1028 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
1029 modules
[stage
] = anv_shader_module_from_handle(pStages
[stage
]->module
);
1032 if (modules
[MESA_SHADER_VERTEX
]) {
1033 result
= anv_pipeline_compile_vs(pipeline
, cache
, pCreateInfo
,
1034 modules
[MESA_SHADER_VERTEX
],
1035 pStages
[MESA_SHADER_VERTEX
]->pName
,
1036 pStages
[MESA_SHADER_VERTEX
]->pSpecializationInfo
);
1037 if (result
!= VK_SUCCESS
)
1041 if (modules
[MESA_SHADER_TESS_CTRL
] || modules
[MESA_SHADER_TESS_EVAL
])
1042 anv_finishme("no tessellation support");
1044 if (modules
[MESA_SHADER_GEOMETRY
]) {
1045 result
= anv_pipeline_compile_gs(pipeline
, cache
, pCreateInfo
,
1046 modules
[MESA_SHADER_GEOMETRY
],
1047 pStages
[MESA_SHADER_GEOMETRY
]->pName
,
1048 pStages
[MESA_SHADER_GEOMETRY
]->pSpecializationInfo
);
1049 if (result
!= VK_SUCCESS
)
1053 if (modules
[MESA_SHADER_FRAGMENT
]) {
1054 result
= anv_pipeline_compile_fs(pipeline
, cache
, pCreateInfo
,
1055 modules
[MESA_SHADER_FRAGMENT
],
1056 pStages
[MESA_SHADER_FRAGMENT
]->pName
,
1057 pStages
[MESA_SHADER_FRAGMENT
]->pSpecializationInfo
);
1058 if (result
!= VK_SUCCESS
)
1062 assert(pipeline
->active_stages
& VK_SHADER_STAGE_VERTEX_BIT
);
1064 anv_pipeline_setup_l3_config(pipeline
, false);
1066 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1067 pCreateInfo
->pVertexInputState
;
1069 const uint64_t inputs_read
= get_vs_prog_data(pipeline
)->inputs_read
;
1071 pipeline
->vb_used
= 0;
1072 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
1073 const VkVertexInputAttributeDescription
*desc
=
1074 &vi_info
->pVertexAttributeDescriptions
[i
];
1076 if (inputs_read
& (1 << (VERT_ATTRIB_GENERIC0
+ desc
->location
)))
1077 pipeline
->vb_used
|= 1 << desc
->binding
;
1080 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
1081 const VkVertexInputBindingDescription
*desc
=
1082 &vi_info
->pVertexBindingDescriptions
[i
];
1084 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
1086 /* Step rate is programmed per vertex element (attribute), not
1087 * binding. Set up a map of which bindings step per instance, for
1088 * reference by vertex element setup. */
1089 switch (desc
->inputRate
) {
1091 case VK_VERTEX_INPUT_RATE_VERTEX
:
1092 pipeline
->instancing_enable
[desc
->binding
] = false;
1094 case VK_VERTEX_INPUT_RATE_INSTANCE
:
1095 pipeline
->instancing_enable
[desc
->binding
] = true;
1100 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
1101 pCreateInfo
->pInputAssemblyState
;
1102 pipeline
->primitive_restart
= ia_info
->primitiveRestartEnable
;
1103 pipeline
->topology
= vk_to_gen_primitive_type
[ia_info
->topology
];
1108 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1109 if (pipeline
->shaders
[s
])
1110 anv_shader_bin_unref(device
, pipeline
->shaders
[s
]);
1113 anv_reloc_list_finish(&pipeline
->batch_relocs
, alloc
);