2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
35 #include "spirv/nir_spirv.h"
37 /* Needed for SWIZZLE macros */
38 #include "program/prog_instruction.h"
42 VkResult
anv_CreateShaderModule(
44 const VkShaderModuleCreateInfo
* pCreateInfo
,
45 const VkAllocationCallbacks
* pAllocator
,
46 VkShaderModule
* pShaderModule
)
48 ANV_FROM_HANDLE(anv_device
, device
, _device
);
49 struct anv_shader_module
*module
;
51 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
52 assert(pCreateInfo
->flags
== 0);
54 module
= vk_alloc2(&device
->alloc
, pAllocator
,
55 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
58 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
60 module
->size
= pCreateInfo
->codeSize
;
61 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
63 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
65 *pShaderModule
= anv_shader_module_to_handle(module
);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module
,
73 const VkAllocationCallbacks
* pAllocator
)
75 ANV_FROM_HANDLE(anv_device
, device
, _device
);
76 ANV_FROM_HANDLE(anv_shader_module
, module
, _module
);
81 vk_free2(&device
->alloc
, pAllocator
, module
);
84 #define SPIR_V_MAGIC_NUMBER 0x07230203
86 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
87 * we can't do that yet because we don't have the ability to copy nir.
90 anv_shader_compile_to_nir(struct anv_device
*device
,
91 struct anv_shader_module
*module
,
92 const char *entrypoint_name
,
93 gl_shader_stage stage
,
94 const VkSpecializationInfo
*spec_info
)
96 if (strcmp(entrypoint_name
, "main") != 0) {
97 anv_finishme("Multiple shaders per module not really supported");
100 const struct brw_compiler
*compiler
=
101 device
->instance
->physicalDevice
.compiler
;
102 const nir_shader_compiler_options
*nir_options
=
103 compiler
->glsl_compiler_options
[stage
].NirOptions
;
105 uint32_t *spirv
= (uint32_t *) module
->data
;
106 assert(spirv
[0] == SPIR_V_MAGIC_NUMBER
);
107 assert(module
->size
% 4 == 0);
109 uint32_t num_spec_entries
= 0;
110 struct nir_spirv_specialization
*spec_entries
= NULL
;
111 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
112 num_spec_entries
= spec_info
->mapEntryCount
;
113 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
114 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
115 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
116 const void *data
= spec_info
->pData
+ entry
.offset
;
117 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
119 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
120 if (spec_info
->dataSize
== 8)
121 spec_entries
[i
].data64
= *(const uint64_t *)data
;
123 spec_entries
[i
].data32
= *(const uint32_t *)data
;
127 nir_function
*entry_point
=
128 spirv_to_nir(spirv
, module
->size
/ 4,
129 spec_entries
, num_spec_entries
,
130 stage
, entrypoint_name
, NULL
, nir_options
);
131 nir_shader
*nir
= entry_point
->shader
;
132 assert(nir
->stage
== stage
);
133 nir_validate_shader(nir
);
137 if (stage
== MESA_SHADER_FRAGMENT
)
138 NIR_PASS_V(nir
, nir_lower_wpos_center
);
140 /* We have to lower away local constant initializers right before we
141 * inline functions. That way they get properly initialized at the top
142 * of the function and not at the top of its caller.
144 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_local
);
145 NIR_PASS_V(nir
, nir_lower_returns
);
146 NIR_PASS_V(nir
, nir_inline_functions
);
148 /* Pick off the single entrypoint that we want */
149 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
150 if (func
!= entry_point
)
151 exec_node_remove(&func
->node
);
153 assert(exec_list_length(&nir
->functions
) == 1);
154 entry_point
->name
= ralloc_strdup(entry_point
, "main");
156 NIR_PASS_V(nir
, nir_remove_dead_variables
,
157 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
159 /* Now that we've deleted all but the main function, we can go ahead and
160 * lower the rest of the constant initializers.
162 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
163 NIR_PASS_V(nir
, nir_propagate_invariant
);
164 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
165 entry_point
->impl
, true, false);
166 NIR_PASS_V(nir
, nir_lower_system_values
);
168 /* Vulkan uses the separate-shader linking model */
169 nir
->info
->separate_shader
= true;
171 nir
= brw_preprocess_nir(compiler
, nir
);
173 NIR_PASS_V(nir
, nir_lower_clip_cull_distance_arrays
);
175 if (stage
== MESA_SHADER_FRAGMENT
)
176 NIR_PASS_V(nir
, anv_nir_lower_input_attachments
);
178 nir_shader_gather_info(nir
, entry_point
->impl
);
183 void anv_DestroyPipeline(
185 VkPipeline _pipeline
,
186 const VkAllocationCallbacks
* pAllocator
)
188 ANV_FROM_HANDLE(anv_device
, device
, _device
);
189 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
194 anv_reloc_list_finish(&pipeline
->batch_relocs
,
195 pAllocator
? pAllocator
: &device
->alloc
);
196 if (pipeline
->blend_state
.map
)
197 anv_state_pool_free(&device
->dynamic_state_pool
, pipeline
->blend_state
);
199 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
200 if (pipeline
->shaders
[s
])
201 anv_shader_bin_unref(device
, pipeline
->shaders
[s
]);
204 vk_free2(&device
->alloc
, pAllocator
, pipeline
);
207 static const uint32_t vk_to_gen_primitive_type
[] = {
208 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
209 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
210 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
211 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
212 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
213 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
214 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
215 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
216 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
217 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
218 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
222 populate_sampler_prog_key(const struct gen_device_info
*devinfo
,
223 struct brw_sampler_prog_key_data
*key
)
225 /* XXX: Handle texture swizzle on HSW- */
226 for (int i
= 0; i
< MAX_SAMPLERS
; i
++) {
227 /* Assume color sampler, no swizzling. (Works for BDW+) */
228 key
->swizzles
[i
] = SWIZZLE_XYZW
;
233 populate_vs_prog_key(const struct gen_device_info
*devinfo
,
234 struct brw_vs_prog_key
*key
)
236 memset(key
, 0, sizeof(*key
));
238 populate_sampler_prog_key(devinfo
, &key
->tex
);
240 /* XXX: Handle vertex input work-arounds */
242 /* XXX: Handle sampler_prog_key */
246 populate_gs_prog_key(const struct gen_device_info
*devinfo
,
247 struct brw_gs_prog_key
*key
)
249 memset(key
, 0, sizeof(*key
));
251 populate_sampler_prog_key(devinfo
, &key
->tex
);
255 populate_wm_prog_key(const struct gen_device_info
*devinfo
,
256 const VkGraphicsPipelineCreateInfo
*info
,
257 struct brw_wm_prog_key
*key
)
259 ANV_FROM_HANDLE(anv_render_pass
, render_pass
, info
->renderPass
);
261 memset(key
, 0, sizeof(*key
));
263 populate_sampler_prog_key(devinfo
, &key
->tex
);
265 /* TODO: Fill out key->input_slots_valid */
267 /* Vulkan doesn't specify a default */
268 key
->high_quality_derivatives
= false;
270 /* XXX Vulkan doesn't appear to specify */
271 key
->clamp_fragment_color
= false;
273 key
->nr_color_regions
=
274 render_pass
->subpasses
[info
->subpass
].color_count
;
276 key
->replicate_alpha
= key
->nr_color_regions
> 1 &&
277 info
->pMultisampleState
&&
278 info
->pMultisampleState
->alphaToCoverageEnable
;
280 if (info
->pMultisampleState
&& info
->pMultisampleState
->rasterizationSamples
> 1) {
281 /* We should probably pull this out of the shader, but it's fairly
282 * harmless to compute it and then let dead-code take care of it.
284 key
->persample_interp
=
285 (info
->pMultisampleState
->minSampleShading
*
286 info
->pMultisampleState
->rasterizationSamples
) > 1;
287 key
->multisample_fbo
= true;
292 populate_cs_prog_key(const struct gen_device_info
*devinfo
,
293 struct brw_cs_prog_key
*key
)
295 memset(key
, 0, sizeof(*key
));
297 populate_sampler_prog_key(devinfo
, &key
->tex
);
301 anv_pipeline_compile(struct anv_pipeline
*pipeline
,
302 struct anv_shader_module
*module
,
303 const char *entrypoint
,
304 gl_shader_stage stage
,
305 const VkSpecializationInfo
*spec_info
,
306 struct brw_stage_prog_data
*prog_data
,
307 struct anv_pipeline_bind_map
*map
)
309 nir_shader
*nir
= anv_shader_compile_to_nir(pipeline
->device
,
310 module
, entrypoint
, stage
,
315 NIR_PASS_V(nir
, anv_nir_lower_push_constants
);
317 /* Figure out the number of parameters */
318 prog_data
->nr_params
= 0;
320 if (nir
->num_uniforms
> 0) {
321 /* If the shader uses any push constants at all, we'll just give
322 * them the maximum possible number
324 assert(nir
->num_uniforms
<= MAX_PUSH_CONSTANTS_SIZE
);
325 prog_data
->nr_params
+= MAX_PUSH_CONSTANTS_SIZE
/ sizeof(float);
328 if (pipeline
->layout
&& pipeline
->layout
->stage
[stage
].has_dynamic_offsets
)
329 prog_data
->nr_params
+= MAX_DYNAMIC_BUFFERS
* 2;
331 if (nir
->info
->num_images
> 0) {
332 prog_data
->nr_params
+= nir
->info
->num_images
* BRW_IMAGE_PARAM_SIZE
;
333 pipeline
->needs_data_cache
= true;
336 if (stage
== MESA_SHADER_COMPUTE
)
337 ((struct brw_cs_prog_data
*)prog_data
)->thread_local_id_index
=
338 prog_data
->nr_params
++; /* The CS Thread ID uniform */
340 if (nir
->info
->num_ssbos
> 0)
341 pipeline
->needs_data_cache
= true;
343 if (prog_data
->nr_params
> 0) {
344 /* XXX: I think we're leaking this */
345 prog_data
->param
= (const union gl_constant_value
**)
346 malloc(prog_data
->nr_params
* sizeof(union gl_constant_value
*));
348 /* We now set the param values to be offsets into a
349 * anv_push_constant_data structure. Since the compiler doesn't
350 * actually dereference any of the gl_constant_value pointers in the
351 * params array, it doesn't really matter what we put here.
353 struct anv_push_constants
*null_data
= NULL
;
354 if (nir
->num_uniforms
> 0) {
355 /* Fill out the push constants section of the param array */
356 for (unsigned i
= 0; i
< MAX_PUSH_CONSTANTS_SIZE
/ sizeof(float); i
++)
357 prog_data
->param
[i
] = (const union gl_constant_value
*)
358 &null_data
->client_data
[i
* sizeof(float)];
362 /* Set up dynamic offsets */
363 anv_nir_apply_dynamic_offsets(pipeline
, nir
, prog_data
);
365 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
366 if (pipeline
->layout
)
367 anv_nir_apply_pipeline_layout(pipeline
, nir
, prog_data
, map
);
369 /* nir_lower_io will only handle the push constants; we need to set this
370 * to the full number of possible uniforms.
372 nir
->num_uniforms
= prog_data
->nr_params
* 4;
378 anv_fill_binding_table(struct brw_stage_prog_data
*prog_data
, unsigned bias
)
380 prog_data
->binding_table
.size_bytes
= 0;
381 prog_data
->binding_table
.texture_start
= bias
;
382 prog_data
->binding_table
.gather_texture_start
= bias
;
383 prog_data
->binding_table
.ubo_start
= bias
;
384 prog_data
->binding_table
.ssbo_start
= bias
;
385 prog_data
->binding_table
.image_start
= bias
;
388 static struct anv_shader_bin
*
389 anv_pipeline_upload_kernel(struct anv_pipeline
*pipeline
,
390 struct anv_pipeline_cache
*cache
,
391 const void *key_data
, uint32_t key_size
,
392 const void *kernel_data
, uint32_t kernel_size
,
393 const struct brw_stage_prog_data
*prog_data
,
394 uint32_t prog_data_size
,
395 const struct anv_pipeline_bind_map
*bind_map
)
398 return anv_pipeline_cache_upload_kernel(cache
, key_data
, key_size
,
399 kernel_data
, kernel_size
,
400 prog_data
, prog_data_size
,
403 return anv_shader_bin_create(pipeline
->device
, key_data
, key_size
,
404 kernel_data
, kernel_size
,
405 prog_data
, prog_data_size
,
406 prog_data
->param
, bind_map
);
412 anv_pipeline_add_compiled_stage(struct anv_pipeline
*pipeline
,
413 gl_shader_stage stage
,
414 struct anv_shader_bin
*shader
)
416 pipeline
->shaders
[stage
] = shader
;
417 pipeline
->active_stages
|= mesa_to_vk_shader_stage(stage
);
421 anv_pipeline_compile_vs(struct anv_pipeline
*pipeline
,
422 struct anv_pipeline_cache
*cache
,
423 const VkGraphicsPipelineCreateInfo
*info
,
424 struct anv_shader_module
*module
,
425 const char *entrypoint
,
426 const VkSpecializationInfo
*spec_info
)
428 const struct brw_compiler
*compiler
=
429 pipeline
->device
->instance
->physicalDevice
.compiler
;
430 struct anv_pipeline_bind_map map
;
431 struct brw_vs_prog_key key
;
432 struct anv_shader_bin
*bin
= NULL
;
433 unsigned char sha1
[20];
435 populate_vs_prog_key(&pipeline
->device
->info
, &key
);
438 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
439 pipeline
->layout
, spec_info
);
440 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
444 struct brw_vs_prog_data prog_data
= { 0, };
445 struct anv_pipeline_binding surface_to_descriptor
[256];
446 struct anv_pipeline_binding sampler_to_descriptor
[256];
448 map
= (struct anv_pipeline_bind_map
) {
449 .surface_to_descriptor
= surface_to_descriptor
,
450 .sampler_to_descriptor
= sampler_to_descriptor
453 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
454 MESA_SHADER_VERTEX
, spec_info
,
455 &prog_data
.base
.base
, &map
);
457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
459 anv_fill_binding_table(&prog_data
.base
.base
, 0);
461 void *mem_ctx
= ralloc_context(NULL
);
463 ralloc_steal(mem_ctx
, nir
);
465 prog_data
.inputs_read
= nir
->info
->inputs_read
;
466 prog_data
.double_inputs_read
= nir
->info
->double_inputs_read
;
468 brw_compute_vue_map(&pipeline
->device
->info
,
469 &prog_data
.base
.vue_map
,
470 nir
->info
->outputs_written
,
471 nir
->info
->separate_shader
);
474 const unsigned *shader_code
=
475 brw_compile_vs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
476 NULL
, false, -1, &code_size
, NULL
);
477 if (shader_code
== NULL
) {
478 ralloc_free(mem_ctx
);
479 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
482 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
483 shader_code
, code_size
,
484 &prog_data
.base
.base
, sizeof(prog_data
),
487 ralloc_free(mem_ctx
);
488 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
491 ralloc_free(mem_ctx
);
494 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_VERTEX
, bin
);
500 anv_pipeline_compile_gs(struct anv_pipeline
*pipeline
,
501 struct anv_pipeline_cache
*cache
,
502 const VkGraphicsPipelineCreateInfo
*info
,
503 struct anv_shader_module
*module
,
504 const char *entrypoint
,
505 const VkSpecializationInfo
*spec_info
)
507 const struct brw_compiler
*compiler
=
508 pipeline
->device
->instance
->physicalDevice
.compiler
;
509 struct anv_pipeline_bind_map map
;
510 struct brw_gs_prog_key key
;
511 struct anv_shader_bin
*bin
= NULL
;
512 unsigned char sha1
[20];
514 populate_gs_prog_key(&pipeline
->device
->info
, &key
);
517 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
518 pipeline
->layout
, spec_info
);
519 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
523 struct brw_gs_prog_data prog_data
= { 0, };
524 struct anv_pipeline_binding surface_to_descriptor
[256];
525 struct anv_pipeline_binding sampler_to_descriptor
[256];
527 map
= (struct anv_pipeline_bind_map
) {
528 .surface_to_descriptor
= surface_to_descriptor
,
529 .sampler_to_descriptor
= sampler_to_descriptor
532 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
533 MESA_SHADER_GEOMETRY
, spec_info
,
534 &prog_data
.base
.base
, &map
);
536 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
538 anv_fill_binding_table(&prog_data
.base
.base
, 0);
540 void *mem_ctx
= ralloc_context(NULL
);
542 ralloc_steal(mem_ctx
, nir
);
544 brw_compute_vue_map(&pipeline
->device
->info
,
545 &prog_data
.base
.vue_map
,
546 nir
->info
->outputs_written
,
547 nir
->info
->separate_shader
);
550 const unsigned *shader_code
=
551 brw_compile_gs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
552 NULL
, -1, &code_size
, NULL
);
553 if (shader_code
== NULL
) {
554 ralloc_free(mem_ctx
);
555 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
559 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
560 shader_code
, code_size
,
561 &prog_data
.base
.base
, sizeof(prog_data
),
564 ralloc_free(mem_ctx
);
565 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
568 ralloc_free(mem_ctx
);
571 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_GEOMETRY
, bin
);
577 anv_pipeline_compile_fs(struct anv_pipeline
*pipeline
,
578 struct anv_pipeline_cache
*cache
,
579 const VkGraphicsPipelineCreateInfo
*info
,
580 struct anv_shader_module
*module
,
581 const char *entrypoint
,
582 const VkSpecializationInfo
*spec_info
)
584 const struct brw_compiler
*compiler
=
585 pipeline
->device
->instance
->physicalDevice
.compiler
;
586 struct anv_pipeline_bind_map map
;
587 struct brw_wm_prog_key key
;
588 struct anv_shader_bin
*bin
= NULL
;
589 unsigned char sha1
[20];
591 populate_wm_prog_key(&pipeline
->device
->info
, info
, &key
);
594 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
595 pipeline
->layout
, spec_info
);
596 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
600 struct brw_wm_prog_data prog_data
= { 0, };
601 struct anv_pipeline_binding surface_to_descriptor
[256];
602 struct anv_pipeline_binding sampler_to_descriptor
[256];
604 map
= (struct anv_pipeline_bind_map
) {
605 .surface_to_descriptor
= surface_to_descriptor
+ 8,
606 .sampler_to_descriptor
= sampler_to_descriptor
609 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
610 MESA_SHADER_FRAGMENT
, spec_info
,
611 &prog_data
.base
, &map
);
613 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
615 unsigned num_rts
= 0;
616 struct anv_pipeline_binding rt_bindings
[8];
617 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
618 nir_foreach_variable_safe(var
, &nir
->outputs
) {
619 if (var
->data
.location
< FRAG_RESULT_DATA0
)
622 unsigned rt
= var
->data
.location
- FRAG_RESULT_DATA0
;
623 if (rt
>= key
.nr_color_regions
) {
624 /* Out-of-bounds, throw it away */
625 var
->data
.mode
= nir_var_local
;
626 exec_node_remove(&var
->node
);
627 exec_list_push_tail(&impl
->locals
, &var
->node
);
631 /* Give it a new, compacted, location */
632 var
->data
.location
= FRAG_RESULT_DATA0
+ num_rts
;
635 glsl_type_is_array(var
->type
) ? glsl_get_length(var
->type
) : 1;
636 assert(num_rts
+ array_len
<= 8);
638 for (unsigned i
= 0; i
< array_len
; i
++) {
639 rt_bindings
[num_rts
+ i
] = (struct anv_pipeline_binding
) {
640 .set
= ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
,
646 num_rts
+= array_len
;
650 /* If we have no render targets, we need a null render target */
651 rt_bindings
[0] = (struct anv_pipeline_binding
) {
652 .set
= ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
,
659 assert(num_rts
<= 8);
660 map
.surface_to_descriptor
-= num_rts
;
661 map
.surface_count
+= num_rts
;
662 assert(map
.surface_count
<= 256);
663 memcpy(map
.surface_to_descriptor
, rt_bindings
,
664 num_rts
* sizeof(*rt_bindings
));
666 anv_fill_binding_table(&prog_data
.base
, num_rts
);
668 void *mem_ctx
= ralloc_context(NULL
);
670 ralloc_steal(mem_ctx
, nir
);
673 const unsigned *shader_code
=
674 brw_compile_fs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
675 NULL
, -1, -1, true, false, NULL
, &code_size
, NULL
);
676 if (shader_code
== NULL
) {
677 ralloc_free(mem_ctx
);
678 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
681 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
682 shader_code
, code_size
,
683 &prog_data
.base
, sizeof(prog_data
),
686 ralloc_free(mem_ctx
);
687 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
690 ralloc_free(mem_ctx
);
693 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_FRAGMENT
, bin
);
699 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
700 struct anv_pipeline_cache
*cache
,
701 const VkComputePipelineCreateInfo
*info
,
702 struct anv_shader_module
*module
,
703 const char *entrypoint
,
704 const VkSpecializationInfo
*spec_info
)
706 const struct brw_compiler
*compiler
=
707 pipeline
->device
->instance
->physicalDevice
.compiler
;
708 struct anv_pipeline_bind_map map
;
709 struct brw_cs_prog_key key
;
710 struct anv_shader_bin
*bin
= NULL
;
711 unsigned char sha1
[20];
713 populate_cs_prog_key(&pipeline
->device
->info
, &key
);
716 anv_hash_shader(sha1
, &key
, sizeof(key
), module
, entrypoint
,
717 pipeline
->layout
, spec_info
);
718 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
722 struct brw_cs_prog_data prog_data
= { 0, };
723 struct anv_pipeline_binding surface_to_descriptor
[256];
724 struct anv_pipeline_binding sampler_to_descriptor
[256];
726 map
= (struct anv_pipeline_bind_map
) {
727 .surface_to_descriptor
= surface_to_descriptor
,
728 .sampler_to_descriptor
= sampler_to_descriptor
731 nir_shader
*nir
= anv_pipeline_compile(pipeline
, module
, entrypoint
,
732 MESA_SHADER_COMPUTE
, spec_info
,
733 &prog_data
.base
, &map
);
735 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
737 anv_fill_binding_table(&prog_data
.base
, 1);
739 void *mem_ctx
= ralloc_context(NULL
);
741 ralloc_steal(mem_ctx
, nir
);
744 const unsigned *shader_code
=
745 brw_compile_cs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
746 -1, &code_size
, NULL
);
747 if (shader_code
== NULL
) {
748 ralloc_free(mem_ctx
);
749 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
752 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
753 shader_code
, code_size
,
754 &prog_data
.base
, sizeof(prog_data
),
757 ralloc_free(mem_ctx
);
758 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
761 ralloc_free(mem_ctx
);
764 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_COMPUTE
, bin
);
770 * Copy pipeline state not marked as dynamic.
771 * Dynamic state is pipeline state which hasn't been provided at pipeline
772 * creation time, but is dynamically provided afterwards using various
773 * vkCmdSet* functions.
775 * The set of state considered "non_dynamic" is determined by the pieces of
776 * state that have their corresponding VkDynamicState enums omitted from
777 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
779 * @param[out] pipeline Destination non_dynamic state.
780 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
783 copy_non_dynamic_state(struct anv_pipeline
*pipeline
,
784 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
786 anv_cmd_dirty_mask_t states
= ANV_CMD_DIRTY_DYNAMIC_ALL
;
787 ANV_FROM_HANDLE(anv_render_pass
, pass
, pCreateInfo
->renderPass
);
788 struct anv_subpass
*subpass
= &pass
->subpasses
[pCreateInfo
->subpass
];
790 pipeline
->dynamic_state
= default_dynamic_state
;
792 if (pCreateInfo
->pDynamicState
) {
793 /* Remove all of the states that are marked as dynamic */
794 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
795 for (uint32_t s
= 0; s
< count
; s
++)
796 states
&= ~(1 << pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
799 struct anv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
801 /* Section 9.2 of the Vulkan 1.0.15 spec says:
803 * pViewportState is [...] NULL if the pipeline
804 * has rasterization disabled.
806 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
807 assert(pCreateInfo
->pViewportState
);
809 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
810 if (states
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
811 typed_memcpy(dynamic
->viewport
.viewports
,
812 pCreateInfo
->pViewportState
->pViewports
,
813 pCreateInfo
->pViewportState
->viewportCount
);
816 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
817 if (states
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
818 typed_memcpy(dynamic
->scissor
.scissors
,
819 pCreateInfo
->pViewportState
->pScissors
,
820 pCreateInfo
->pViewportState
->scissorCount
);
824 if (states
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
)) {
825 assert(pCreateInfo
->pRasterizationState
);
826 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
829 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
)) {
830 assert(pCreateInfo
->pRasterizationState
);
831 dynamic
->depth_bias
.bias
=
832 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
833 dynamic
->depth_bias
.clamp
=
834 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
835 dynamic
->depth_bias
.slope
=
836 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
839 /* Section 9.2 of the Vulkan 1.0.15 spec says:
841 * pColorBlendState is [...] NULL if the pipeline has rasterization
842 * disabled or if the subpass of the render pass the pipeline is
843 * created against does not use any color attachments.
845 bool uses_color_att
= false;
846 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
847 if (subpass
->color_attachments
[i
] != VK_ATTACHMENT_UNUSED
) {
848 uses_color_att
= true;
853 if (uses_color_att
&&
854 !pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
855 assert(pCreateInfo
->pColorBlendState
);
857 if (states
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
858 typed_memcpy(dynamic
->blend_constants
,
859 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
862 /* If there is no depthstencil attachment, then don't read
863 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
864 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
865 * no need to override the depthstencil defaults in
866 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
868 * Section 9.2 of the Vulkan 1.0.15 spec says:
870 * pDepthStencilState is [...] NULL if the pipeline has rasterization
871 * disabled or if the subpass of the render pass the pipeline is created
872 * against does not use a depth/stencil attachment.
874 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
875 subpass
->depth_stencil_attachment
!= VK_ATTACHMENT_UNUSED
) {
876 assert(pCreateInfo
->pDepthStencilState
);
878 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
)) {
879 dynamic
->depth_bounds
.min
=
880 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
881 dynamic
->depth_bounds
.max
=
882 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
885 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
)) {
886 dynamic
->stencil_compare_mask
.front
=
887 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
888 dynamic
->stencil_compare_mask
.back
=
889 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
892 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
)) {
893 dynamic
->stencil_write_mask
.front
=
894 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
895 dynamic
->stencil_write_mask
.back
=
896 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
899 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
)) {
900 dynamic
->stencil_reference
.front
=
901 pCreateInfo
->pDepthStencilState
->front
.reference
;
902 dynamic
->stencil_reference
.back
=
903 pCreateInfo
->pDepthStencilState
->back
.reference
;
907 pipeline
->dynamic_state_mask
= states
;
911 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo
*info
)
913 struct anv_render_pass
*renderpass
= NULL
;
914 struct anv_subpass
*subpass
= NULL
;
916 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
917 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
919 assert(info
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
921 renderpass
= anv_render_pass_from_handle(info
->renderPass
);
924 assert(info
->subpass
< renderpass
->subpass_count
);
925 subpass
= &renderpass
->subpasses
[info
->subpass
];
927 assert(info
->stageCount
>= 1);
928 assert(info
->pVertexInputState
);
929 assert(info
->pInputAssemblyState
);
930 assert(info
->pRasterizationState
);
931 if (!info
->pRasterizationState
->rasterizerDiscardEnable
) {
932 assert(info
->pViewportState
);
933 assert(info
->pMultisampleState
);
935 if (subpass
&& subpass
->depth_stencil_attachment
!= VK_ATTACHMENT_UNUSED
)
936 assert(info
->pDepthStencilState
);
938 if (subpass
&& subpass
->color_count
> 0)
939 assert(info
->pColorBlendState
);
942 for (uint32_t i
= 0; i
< info
->stageCount
; ++i
) {
943 switch (info
->pStages
[i
].stage
) {
944 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
945 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
946 assert(info
->pTessellationState
);
955 * Calculate the desired L3 partitioning based on the current state of the
956 * pipeline. For now this simply returns the conservative defaults calculated
957 * by get_default_l3_weights(), but we could probably do better by gathering
958 * more statistics from the pipeline state (e.g. guess of expected URB usage
959 * and bound surfaces), or by using feed-back from performance counters.
962 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
)
964 const struct gen_device_info
*devinfo
= &pipeline
->device
->info
;
966 const struct gen_l3_weights w
=
967 gen_get_default_l3_weights(devinfo
, pipeline
->needs_data_cache
, needs_slm
);
969 pipeline
->urb
.l3_config
= gen_get_l3_config(devinfo
, w
);
970 pipeline
->urb
.total_size
=
971 gen_get_l3_config_urb_size(devinfo
, pipeline
->urb
.l3_config
);
975 anv_pipeline_init(struct anv_pipeline
*pipeline
,
976 struct anv_device
*device
,
977 struct anv_pipeline_cache
*cache
,
978 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
979 const VkAllocationCallbacks
*alloc
)
984 anv_pipeline_validate_create_info(pCreateInfo
);
988 alloc
= &device
->alloc
;
990 pipeline
->device
= device
;
991 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
993 result
= anv_reloc_list_init(&pipeline
->batch_relocs
, alloc
);
994 if (result
!= VK_SUCCESS
)
997 pipeline
->batch
.alloc
= alloc
;
998 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
999 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
1000 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
1002 copy_non_dynamic_state(pipeline
, pCreateInfo
);
1003 pipeline
->depth_clamp_enable
= pCreateInfo
->pRasterizationState
&&
1004 pCreateInfo
->pRasterizationState
->depthClampEnable
;
1006 pipeline
->needs_data_cache
= false;
1008 /* When we free the pipeline, we detect stages based on the NULL status
1009 * of various prog_data pointers. Make them NULL by default.
1011 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
1013 pipeline
->active_stages
= 0;
1015 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = { 0, };
1016 struct anv_shader_module
*modules
[MESA_SHADER_STAGES
] = { 0, };
1017 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
1018 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
1019 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
1020 modules
[stage
] = anv_shader_module_from_handle(pStages
[stage
]->module
);
1023 if (modules
[MESA_SHADER_VERTEX
]) {
1024 result
= anv_pipeline_compile_vs(pipeline
, cache
, pCreateInfo
,
1025 modules
[MESA_SHADER_VERTEX
],
1026 pStages
[MESA_SHADER_VERTEX
]->pName
,
1027 pStages
[MESA_SHADER_VERTEX
]->pSpecializationInfo
);
1028 if (result
!= VK_SUCCESS
)
1032 if (modules
[MESA_SHADER_TESS_CTRL
] || modules
[MESA_SHADER_TESS_EVAL
])
1033 anv_finishme("no tessellation support");
1035 if (modules
[MESA_SHADER_GEOMETRY
]) {
1036 result
= anv_pipeline_compile_gs(pipeline
, cache
, pCreateInfo
,
1037 modules
[MESA_SHADER_GEOMETRY
],
1038 pStages
[MESA_SHADER_GEOMETRY
]->pName
,
1039 pStages
[MESA_SHADER_GEOMETRY
]->pSpecializationInfo
);
1040 if (result
!= VK_SUCCESS
)
1044 if (modules
[MESA_SHADER_FRAGMENT
]) {
1045 result
= anv_pipeline_compile_fs(pipeline
, cache
, pCreateInfo
,
1046 modules
[MESA_SHADER_FRAGMENT
],
1047 pStages
[MESA_SHADER_FRAGMENT
]->pName
,
1048 pStages
[MESA_SHADER_FRAGMENT
]->pSpecializationInfo
);
1049 if (result
!= VK_SUCCESS
)
1053 assert(pipeline
->active_stages
& VK_SHADER_STAGE_VERTEX_BIT
);
1055 anv_pipeline_setup_l3_config(pipeline
, false);
1057 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1058 pCreateInfo
->pVertexInputState
;
1060 const uint64_t inputs_read
= get_vs_prog_data(pipeline
)->inputs_read
;
1062 pipeline
->vb_used
= 0;
1063 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
1064 const VkVertexInputAttributeDescription
*desc
=
1065 &vi_info
->pVertexAttributeDescriptions
[i
];
1067 if (inputs_read
& (1 << (VERT_ATTRIB_GENERIC0
+ desc
->location
)))
1068 pipeline
->vb_used
|= 1 << desc
->binding
;
1071 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
1072 const VkVertexInputBindingDescription
*desc
=
1073 &vi_info
->pVertexBindingDescriptions
[i
];
1075 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
1077 /* Step rate is programmed per vertex element (attribute), not
1078 * binding. Set up a map of which bindings step per instance, for
1079 * reference by vertex element setup. */
1080 switch (desc
->inputRate
) {
1082 case VK_VERTEX_INPUT_RATE_VERTEX
:
1083 pipeline
->instancing_enable
[desc
->binding
] = false;
1085 case VK_VERTEX_INPUT_RATE_INSTANCE
:
1086 pipeline
->instancing_enable
[desc
->binding
] = true;
1091 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
1092 pCreateInfo
->pInputAssemblyState
;
1093 pipeline
->primitive_restart
= ia_info
->primitiveRestartEnable
;
1094 pipeline
->topology
= vk_to_gen_primitive_type
[ia_info
->topology
];
1099 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1100 if (pipeline
->shaders
[s
])
1101 anv_shader_bin_unref(device
, pipeline
->shaders
[s
]);
1104 anv_reloc_list_finish(&pipeline
->batch_relocs
, alloc
);