2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
33 #include "compiler/brw_nir.h"
35 #include "spirv/nir_spirv.h"
37 /* Needed for SWIZZLE macros */
38 #include "program/prog_instruction.h"
42 VkResult
anv_CreateShaderModule(
44 const VkShaderModuleCreateInfo
* pCreateInfo
,
45 const VkAllocationCallbacks
* pAllocator
,
46 VkShaderModule
* pShaderModule
)
48 ANV_FROM_HANDLE(anv_device
, device
, _device
);
49 struct anv_shader_module
*module
;
51 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO
);
52 assert(pCreateInfo
->flags
== 0);
54 module
= vk_alloc2(&device
->alloc
, pAllocator
,
55 sizeof(*module
) + pCreateInfo
->codeSize
, 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
58 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
60 module
->size
= pCreateInfo
->codeSize
;
61 memcpy(module
->data
, pCreateInfo
->pCode
, module
->size
);
63 _mesa_sha1_compute(module
->data
, module
->size
, module
->sha1
);
65 *pShaderModule
= anv_shader_module_to_handle(module
);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module
,
73 const VkAllocationCallbacks
* pAllocator
)
75 ANV_FROM_HANDLE(anv_device
, device
, _device
);
76 ANV_FROM_HANDLE(anv_shader_module
, module
, _module
);
81 vk_free2(&device
->alloc
, pAllocator
, module
);
84 #define SPIR_V_MAGIC_NUMBER 0x07230203
86 static const uint64_t stage_to_debug
[] = {
87 [MESA_SHADER_VERTEX
] = DEBUG_VS
,
88 [MESA_SHADER_TESS_CTRL
] = DEBUG_TCS
,
89 [MESA_SHADER_TESS_EVAL
] = DEBUG_TES
,
90 [MESA_SHADER_GEOMETRY
] = DEBUG_GS
,
91 [MESA_SHADER_FRAGMENT
] = DEBUG_WM
,
92 [MESA_SHADER_COMPUTE
] = DEBUG_CS
,
95 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
96 * we can't do that yet because we don't have the ability to copy nir.
99 anv_shader_compile_to_nir(struct anv_pipeline
*pipeline
,
101 struct anv_shader_module
*module
,
102 const char *entrypoint_name
,
103 gl_shader_stage stage
,
104 const VkSpecializationInfo
*spec_info
)
106 const struct anv_device
*device
= pipeline
->device
;
108 const struct brw_compiler
*compiler
=
109 device
->instance
->physicalDevice
.compiler
;
110 const nir_shader_compiler_options
*nir_options
=
111 compiler
->glsl_compiler_options
[stage
].NirOptions
;
113 uint32_t *spirv
= (uint32_t *) module
->data
;
114 assert(spirv
[0] == SPIR_V_MAGIC_NUMBER
);
115 assert(module
->size
% 4 == 0);
117 uint32_t num_spec_entries
= 0;
118 struct nir_spirv_specialization
*spec_entries
= NULL
;
119 if (spec_info
&& spec_info
->mapEntryCount
> 0) {
120 num_spec_entries
= spec_info
->mapEntryCount
;
121 spec_entries
= malloc(num_spec_entries
* sizeof(*spec_entries
));
122 for (uint32_t i
= 0; i
< num_spec_entries
; i
++) {
123 VkSpecializationMapEntry entry
= spec_info
->pMapEntries
[i
];
124 const void *data
= spec_info
->pData
+ entry
.offset
;
125 assert(data
+ entry
.size
<= spec_info
->pData
+ spec_info
->dataSize
);
127 spec_entries
[i
].id
= spec_info
->pMapEntries
[i
].constantID
;
128 if (spec_info
->dataSize
== 8)
129 spec_entries
[i
].data64
= *(const uint64_t *)data
;
131 spec_entries
[i
].data32
= *(const uint32_t *)data
;
135 struct spirv_to_nir_options spirv_options
= {
137 .float64
= device
->instance
->physicalDevice
.info
.gen
>= 8,
138 .int64
= device
->instance
->physicalDevice
.info
.gen
>= 8,
139 .tessellation
= true,
140 .draw_parameters
= true,
141 .image_write_without_format
= true,
143 .variable_pointers
= true,
147 nir_function
*entry_point
=
148 spirv_to_nir(spirv
, module
->size
/ 4,
149 spec_entries
, num_spec_entries
,
150 stage
, entrypoint_name
, &spirv_options
, nir_options
);
151 nir_shader
*nir
= entry_point
->shader
;
152 assert(nir
->info
.stage
== stage
);
153 nir_validate_shader(nir
);
154 ralloc_steal(mem_ctx
, nir
);
158 if (unlikely(INTEL_DEBUG
& stage_to_debug
[stage
])) {
159 fprintf(stderr
, "NIR (from SPIR-V) for %s shader:\n",
160 gl_shader_stage_name(stage
));
161 nir_print_shader(nir
, stderr
);
164 /* We have to lower away local constant initializers right before we
165 * inline functions. That way they get properly initialized at the top
166 * of the function and not at the top of its caller.
168 NIR_PASS_V(nir
, nir_lower_constant_initializers
, nir_var_local
);
169 NIR_PASS_V(nir
, nir_lower_returns
);
170 NIR_PASS_V(nir
, nir_inline_functions
);
172 /* Pick off the single entrypoint that we want */
173 foreach_list_typed_safe(nir_function
, func
, node
, &nir
->functions
) {
174 if (func
!= entry_point
)
175 exec_node_remove(&func
->node
);
177 assert(exec_list_length(&nir
->functions
) == 1);
178 entry_point
->name
= ralloc_strdup(entry_point
, "main");
180 NIR_PASS_V(nir
, nir_remove_dead_variables
,
181 nir_var_shader_in
| nir_var_shader_out
| nir_var_system_value
);
183 if (stage
== MESA_SHADER_FRAGMENT
)
184 NIR_PASS_V(nir
, nir_lower_wpos_center
, pipeline
->sample_shading_enable
);
186 /* Now that we've deleted all but the main function, we can go ahead and
187 * lower the rest of the constant initializers.
189 NIR_PASS_V(nir
, nir_lower_constant_initializers
, ~0);
190 NIR_PASS_V(nir
, nir_propagate_invariant
);
191 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
192 entry_point
->impl
, true, false);
194 /* Vulkan uses the separate-shader linking model */
195 nir
->info
.separate_shader
= true;
197 nir
= brw_preprocess_nir(compiler
, nir
);
199 if (stage
== MESA_SHADER_FRAGMENT
)
200 NIR_PASS_V(nir
, anv_nir_lower_input_attachments
);
205 void anv_DestroyPipeline(
207 VkPipeline _pipeline
,
208 const VkAllocationCallbacks
* pAllocator
)
210 ANV_FROM_HANDLE(anv_device
, device
, _device
);
211 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
216 anv_reloc_list_finish(&pipeline
->batch_relocs
,
217 pAllocator
? pAllocator
: &device
->alloc
);
218 if (pipeline
->blend_state
.map
)
219 anv_state_pool_free(&device
->dynamic_state_pool
, pipeline
->blend_state
);
221 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
222 if (pipeline
->shaders
[s
])
223 anv_shader_bin_unref(device
, pipeline
->shaders
[s
]);
226 vk_free2(&device
->alloc
, pAllocator
, pipeline
);
229 static const uint32_t vk_to_gen_primitive_type
[] = {
230 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
231 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
232 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
233 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
234 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
235 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
236 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
237 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
238 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
239 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
243 populate_sampler_prog_key(const struct gen_device_info
*devinfo
,
244 struct brw_sampler_prog_key_data
*key
)
246 /* Almost all multisampled textures are compressed. The only time when we
247 * don't compress a multisampled texture is for 16x MSAA with a surface
248 * width greater than 8k which is a bit of an edge case. Since the sampler
249 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
250 * to tell the compiler to always assume compression.
252 key
->compressed_multisample_layout_mask
= ~0;
254 /* SkyLake added support for 16x MSAA. With this came a new message for
255 * reading from a 16x MSAA surface with compression. The new message was
256 * needed because now the MCS data is 64 bits instead of 32 or lower as is
257 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
258 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
259 * so we can just use it unconditionally. This may not be quite as
260 * efficient but it saves us from recompiling.
262 if (devinfo
->gen
>= 9)
265 /* XXX: Handle texture swizzle on HSW- */
266 for (int i
= 0; i
< MAX_SAMPLERS
; i
++) {
267 /* Assume color sampler, no swizzling. (Works for BDW+) */
268 key
->swizzles
[i
] = SWIZZLE_XYZW
;
273 populate_vs_prog_key(const struct gen_device_info
*devinfo
,
274 struct brw_vs_prog_key
*key
)
276 memset(key
, 0, sizeof(*key
));
278 populate_sampler_prog_key(devinfo
, &key
->tex
);
280 /* XXX: Handle vertex input work-arounds */
282 /* XXX: Handle sampler_prog_key */
286 populate_gs_prog_key(const struct gen_device_info
*devinfo
,
287 struct brw_gs_prog_key
*key
)
289 memset(key
, 0, sizeof(*key
));
291 populate_sampler_prog_key(devinfo
, &key
->tex
);
295 populate_wm_prog_key(const struct anv_pipeline
*pipeline
,
296 const VkGraphicsPipelineCreateInfo
*info
,
297 struct brw_wm_prog_key
*key
)
299 const struct gen_device_info
*devinfo
= &pipeline
->device
->info
;
301 memset(key
, 0, sizeof(*key
));
303 populate_sampler_prog_key(devinfo
, &key
->tex
);
305 /* TODO: we could set this to 0 based on the information in nir_shader, but
306 * this function is called before spirv_to_nir. */
307 const struct brw_vue_map
*vue_map
=
308 &anv_pipeline_get_last_vue_prog_data(pipeline
)->vue_map
;
309 key
->input_slots_valid
= vue_map
->slots_valid
;
311 /* Vulkan doesn't specify a default */
312 key
->high_quality_derivatives
= false;
314 /* XXX Vulkan doesn't appear to specify */
315 key
->clamp_fragment_color
= false;
317 key
->nr_color_regions
= pipeline
->subpass
->color_count
;
319 key
->replicate_alpha
= key
->nr_color_regions
> 1 &&
320 info
->pMultisampleState
&&
321 info
->pMultisampleState
->alphaToCoverageEnable
;
323 if (info
->pMultisampleState
) {
324 /* We should probably pull this out of the shader, but it's fairly
325 * harmless to compute it and then let dead-code take care of it.
327 if (info
->pMultisampleState
->rasterizationSamples
> 1) {
328 key
->persample_interp
=
329 (info
->pMultisampleState
->minSampleShading
*
330 info
->pMultisampleState
->rasterizationSamples
) > 1;
331 key
->multisample_fbo
= true;
334 key
->frag_coord_adds_sample_pos
=
335 info
->pMultisampleState
->sampleShadingEnable
;
340 populate_cs_prog_key(const struct gen_device_info
*devinfo
,
341 struct brw_cs_prog_key
*key
)
343 memset(key
, 0, sizeof(*key
));
345 populate_sampler_prog_key(devinfo
, &key
->tex
);
349 anv_pipeline_hash_shader(struct anv_pipeline
*pipeline
,
350 struct anv_shader_module
*module
,
351 const char *entrypoint
,
352 gl_shader_stage stage
,
353 const VkSpecializationInfo
*spec_info
,
354 const void *key
, size_t key_size
,
355 unsigned char *sha1_out
)
357 struct mesa_sha1 ctx
;
359 _mesa_sha1_init(&ctx
);
360 if (stage
!= MESA_SHADER_COMPUTE
) {
361 _mesa_sha1_update(&ctx
, &pipeline
->subpass
->view_mask
,
362 sizeof(pipeline
->subpass
->view_mask
));
364 if (pipeline
->layout
) {
365 _mesa_sha1_update(&ctx
, pipeline
->layout
->sha1
,
366 sizeof(pipeline
->layout
->sha1
));
368 _mesa_sha1_update(&ctx
, module
->sha1
, sizeof(module
->sha1
));
369 _mesa_sha1_update(&ctx
, entrypoint
, strlen(entrypoint
));
370 _mesa_sha1_update(&ctx
, &stage
, sizeof(stage
));
372 _mesa_sha1_update(&ctx
, spec_info
->pMapEntries
,
373 spec_info
->mapEntryCount
* sizeof(*spec_info
->pMapEntries
));
374 _mesa_sha1_update(&ctx
, spec_info
->pData
, spec_info
->dataSize
);
376 _mesa_sha1_update(&ctx
, key
, key_size
);
377 _mesa_sha1_final(&ctx
, sha1_out
);
381 anv_pipeline_compile(struct anv_pipeline
*pipeline
,
383 struct anv_shader_module
*module
,
384 const char *entrypoint
,
385 gl_shader_stage stage
,
386 const VkSpecializationInfo
*spec_info
,
387 struct brw_stage_prog_data
*prog_data
,
388 struct anv_pipeline_bind_map
*map
)
390 nir_shader
*nir
= anv_shader_compile_to_nir(pipeline
, mem_ctx
,
391 module
, entrypoint
, stage
,
396 NIR_PASS_V(nir
, anv_nir_lower_ycbcr_textures
, pipeline
);
398 NIR_PASS_V(nir
, anv_nir_lower_push_constants
);
400 if (stage
!= MESA_SHADER_COMPUTE
)
401 NIR_PASS_V(nir
, anv_nir_lower_multiview
, pipeline
->subpass
->view_mask
);
403 if (stage
== MESA_SHADER_COMPUTE
) {
404 NIR_PASS_V(nir
, brw_nir_lower_cs_shared
);
405 prog_data
->total_shared
= nir
->num_shared
;
408 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
410 if (nir
->num_uniforms
> 0) {
411 assert(prog_data
->nr_params
== 0);
413 /* If the shader uses any push constants at all, we'll just give
414 * them the maximum possible number
416 assert(nir
->num_uniforms
<= MAX_PUSH_CONSTANTS_SIZE
);
417 nir
->num_uniforms
= MAX_PUSH_CONSTANTS_SIZE
;
418 prog_data
->nr_params
+= MAX_PUSH_CONSTANTS_SIZE
/ sizeof(float);
419 prog_data
->param
= ralloc_array(mem_ctx
, uint32_t, prog_data
->nr_params
);
421 /* We now set the param values to be offsets into a
422 * anv_push_constant_data structure. Since the compiler doesn't
423 * actually dereference any of the gl_constant_value pointers in the
424 * params array, it doesn't really matter what we put here.
426 struct anv_push_constants
*null_data
= NULL
;
427 /* Fill out the push constants section of the param array */
428 for (unsigned i
= 0; i
< MAX_PUSH_CONSTANTS_SIZE
/ sizeof(float); i
++) {
429 prog_data
->param
[i
] = ANV_PARAM_PUSH(
430 (uintptr_t)&null_data
->client_data
[i
* sizeof(float)]);
434 if (nir
->info
.num_ssbos
> 0 || nir
->info
.num_images
> 0)
435 pipeline
->needs_data_cache
= true;
437 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
438 if (pipeline
->layout
)
439 anv_nir_apply_pipeline_layout(pipeline
, nir
, prog_data
, map
);
441 assert(nir
->num_uniforms
== prog_data
->nr_params
* 4);
447 anv_fill_binding_table(struct brw_stage_prog_data
*prog_data
, unsigned bias
)
449 prog_data
->binding_table
.size_bytes
= 0;
450 prog_data
->binding_table
.texture_start
= bias
;
451 prog_data
->binding_table
.gather_texture_start
= bias
;
452 prog_data
->binding_table
.ubo_start
= bias
;
453 prog_data
->binding_table
.ssbo_start
= bias
;
454 prog_data
->binding_table
.image_start
= bias
;
457 static struct anv_shader_bin
*
458 anv_pipeline_upload_kernel(struct anv_pipeline
*pipeline
,
459 struct anv_pipeline_cache
*cache
,
460 const void *key_data
, uint32_t key_size
,
461 const void *kernel_data
, uint32_t kernel_size
,
462 const struct brw_stage_prog_data
*prog_data
,
463 uint32_t prog_data_size
,
464 const struct anv_pipeline_bind_map
*bind_map
)
467 return anv_pipeline_cache_upload_kernel(cache
, key_data
, key_size
,
468 kernel_data
, kernel_size
,
469 prog_data
, prog_data_size
,
472 return anv_shader_bin_create(pipeline
->device
, key_data
, key_size
,
473 kernel_data
, kernel_size
,
474 prog_data
, prog_data_size
,
475 prog_data
->param
, bind_map
);
481 anv_pipeline_add_compiled_stage(struct anv_pipeline
*pipeline
,
482 gl_shader_stage stage
,
483 struct anv_shader_bin
*shader
)
485 pipeline
->shaders
[stage
] = shader
;
486 pipeline
->active_stages
|= mesa_to_vk_shader_stage(stage
);
490 anv_pipeline_compile_vs(struct anv_pipeline
*pipeline
,
491 struct anv_pipeline_cache
*cache
,
492 const VkGraphicsPipelineCreateInfo
*info
,
493 struct anv_shader_module
*module
,
494 const char *entrypoint
,
495 const VkSpecializationInfo
*spec_info
)
497 const struct brw_compiler
*compiler
=
498 pipeline
->device
->instance
->physicalDevice
.compiler
;
499 struct brw_vs_prog_key key
;
500 struct anv_shader_bin
*bin
= NULL
;
501 unsigned char sha1
[20];
503 populate_vs_prog_key(&pipeline
->device
->info
, &key
);
506 anv_pipeline_hash_shader(pipeline
, module
, entrypoint
,
507 MESA_SHADER_VERTEX
, spec_info
,
508 &key
, sizeof(key
), sha1
);
509 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
513 struct brw_vs_prog_data prog_data
= {};
514 struct anv_pipeline_binding surface_to_descriptor
[256];
515 struct anv_pipeline_binding sampler_to_descriptor
[256];
517 struct anv_pipeline_bind_map map
= {
518 .surface_to_descriptor
= surface_to_descriptor
,
519 .sampler_to_descriptor
= sampler_to_descriptor
522 void *mem_ctx
= ralloc_context(NULL
);
524 nir_shader
*nir
= anv_pipeline_compile(pipeline
, mem_ctx
,
526 MESA_SHADER_VERTEX
, spec_info
,
527 &prog_data
.base
.base
, &map
);
529 ralloc_free(mem_ctx
);
530 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
533 anv_fill_binding_table(&prog_data
.base
.base
, 0);
535 brw_compute_vue_map(&pipeline
->device
->info
,
536 &prog_data
.base
.vue_map
,
537 nir
->info
.outputs_written
,
538 nir
->info
.separate_shader
);
540 const unsigned *shader_code
=
541 brw_compile_vs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
543 if (shader_code
== NULL
) {
544 ralloc_free(mem_ctx
);
545 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
548 unsigned code_size
= prog_data
.base
.base
.program_size
;
549 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
550 shader_code
, code_size
,
551 &prog_data
.base
.base
, sizeof(prog_data
),
554 ralloc_free(mem_ctx
);
555 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
558 ralloc_free(mem_ctx
);
561 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_VERTEX
, bin
);
567 merge_tess_info(struct shader_info
*tes_info
,
568 const struct shader_info
*tcs_info
)
570 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
572 * "PointMode. Controls generation of points rather than triangles
573 * or lines. This functionality defaults to disabled, and is
574 * enabled if either shader stage includes the execution mode.
576 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
577 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
578 * and OutputVertices, it says:
580 * "One mode must be set in at least one of the tessellation
583 * So, the fields can be set in either the TCS or TES, but they must
584 * agree if set in both. Our backend looks at TES, so bitwise-or in
585 * the values from the TCS.
587 assert(tcs_info
->tess
.tcs_vertices_out
== 0 ||
588 tes_info
->tess
.tcs_vertices_out
== 0 ||
589 tcs_info
->tess
.tcs_vertices_out
== tes_info
->tess
.tcs_vertices_out
);
590 tes_info
->tess
.tcs_vertices_out
|= tcs_info
->tess
.tcs_vertices_out
;
592 assert(tcs_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
593 tes_info
->tess
.spacing
== TESS_SPACING_UNSPECIFIED
||
594 tcs_info
->tess
.spacing
== tes_info
->tess
.spacing
);
595 tes_info
->tess
.spacing
|= tcs_info
->tess
.spacing
;
597 assert(tcs_info
->tess
.primitive_mode
== 0 ||
598 tes_info
->tess
.primitive_mode
== 0 ||
599 tcs_info
->tess
.primitive_mode
== tes_info
->tess
.primitive_mode
);
600 tes_info
->tess
.primitive_mode
|= tcs_info
->tess
.primitive_mode
;
601 tes_info
->tess
.ccw
|= tcs_info
->tess
.ccw
;
602 tes_info
->tess
.point_mode
|= tcs_info
->tess
.point_mode
;
606 anv_pipeline_compile_tcs_tes(struct anv_pipeline
*pipeline
,
607 struct anv_pipeline_cache
*cache
,
608 const VkGraphicsPipelineCreateInfo
*info
,
609 struct anv_shader_module
*tcs_module
,
610 const char *tcs_entrypoint
,
611 const VkSpecializationInfo
*tcs_spec_info
,
612 struct anv_shader_module
*tes_module
,
613 const char *tes_entrypoint
,
614 const VkSpecializationInfo
*tes_spec_info
)
616 const struct gen_device_info
*devinfo
= &pipeline
->device
->info
;
617 const struct brw_compiler
*compiler
=
618 pipeline
->device
->instance
->physicalDevice
.compiler
;
619 struct brw_tcs_prog_key tcs_key
= {};
620 struct brw_tes_prog_key tes_key
= {};
621 struct anv_shader_bin
*tcs_bin
= NULL
;
622 struct anv_shader_bin
*tes_bin
= NULL
;
623 unsigned char tcs_sha1
[40];
624 unsigned char tes_sha1
[40];
626 populate_sampler_prog_key(&pipeline
->device
->info
, &tcs_key
.tex
);
627 populate_sampler_prog_key(&pipeline
->device
->info
, &tes_key
.tex
);
628 tcs_key
.input_vertices
= info
->pTessellationState
->patchControlPoints
;
631 anv_pipeline_hash_shader(pipeline
, tcs_module
, tcs_entrypoint
,
632 MESA_SHADER_TESS_CTRL
, tcs_spec_info
,
633 &tcs_key
, sizeof(tcs_key
), tcs_sha1
);
634 anv_pipeline_hash_shader(pipeline
, tes_module
, tes_entrypoint
,
635 MESA_SHADER_TESS_EVAL
, tes_spec_info
,
636 &tes_key
, sizeof(tes_key
), tes_sha1
);
637 memcpy(&tcs_sha1
[20], tes_sha1
, 20);
638 memcpy(&tes_sha1
[20], tcs_sha1
, 20);
639 tcs_bin
= anv_pipeline_cache_search(cache
, tcs_sha1
, sizeof(tcs_sha1
));
640 tes_bin
= anv_pipeline_cache_search(cache
, tes_sha1
, sizeof(tes_sha1
));
643 if (tcs_bin
== NULL
|| tes_bin
== NULL
) {
644 struct brw_tcs_prog_data tcs_prog_data
= {};
645 struct brw_tes_prog_data tes_prog_data
= {};
646 struct anv_pipeline_binding tcs_surface_to_descriptor
[256];
647 struct anv_pipeline_binding tcs_sampler_to_descriptor
[256];
648 struct anv_pipeline_binding tes_surface_to_descriptor
[256];
649 struct anv_pipeline_binding tes_sampler_to_descriptor
[256];
651 struct anv_pipeline_bind_map tcs_map
= {
652 .surface_to_descriptor
= tcs_surface_to_descriptor
,
653 .sampler_to_descriptor
= tcs_sampler_to_descriptor
655 struct anv_pipeline_bind_map tes_map
= {
656 .surface_to_descriptor
= tes_surface_to_descriptor
,
657 .sampler_to_descriptor
= tes_sampler_to_descriptor
660 void *mem_ctx
= ralloc_context(NULL
);
662 nir_shader
*tcs_nir
=
663 anv_pipeline_compile(pipeline
, mem_ctx
, tcs_module
, tcs_entrypoint
,
664 MESA_SHADER_TESS_CTRL
, tcs_spec_info
,
665 &tcs_prog_data
.base
.base
, &tcs_map
);
666 nir_shader
*tes_nir
=
667 anv_pipeline_compile(pipeline
, mem_ctx
, tes_module
, tes_entrypoint
,
668 MESA_SHADER_TESS_EVAL
, tes_spec_info
,
669 &tes_prog_data
.base
.base
, &tes_map
);
670 if (tcs_nir
== NULL
|| tes_nir
== NULL
) {
671 ralloc_free(mem_ctx
);
672 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
675 nir_lower_tes_patch_vertices(tes_nir
,
676 tcs_nir
->info
.tess
.tcs_vertices_out
);
678 /* Copy TCS info into the TES info */
679 merge_tess_info(&tes_nir
->info
, &tcs_nir
->info
);
681 anv_fill_binding_table(&tcs_prog_data
.base
.base
, 0);
682 anv_fill_binding_table(&tes_prog_data
.base
.base
, 0);
684 /* Whacking the key after cache lookup is a bit sketchy, but all of
685 * this comes from the SPIR-V, which is part of the hash used for the
686 * pipeline cache. So it should be safe.
688 tcs_key
.tes_primitive_mode
= tes_nir
->info
.tess
.primitive_mode
;
689 tcs_key
.outputs_written
= tcs_nir
->info
.outputs_written
;
690 tcs_key
.patch_outputs_written
= tcs_nir
->info
.patch_outputs_written
;
691 tcs_key
.quads_workaround
=
693 tes_nir
->info
.tess
.primitive_mode
== 7 /* GL_QUADS */ &&
694 tes_nir
->info
.tess
.spacing
== TESS_SPACING_EQUAL
;
696 tes_key
.inputs_read
= tcs_key
.outputs_written
;
697 tes_key
.patch_inputs_read
= tcs_key
.patch_outputs_written
;
699 const int shader_time_index
= -1;
700 const unsigned *shader_code
;
703 brw_compile_tcs(compiler
, NULL
, mem_ctx
, &tcs_key
, &tcs_prog_data
,
704 tcs_nir
, shader_time_index
, NULL
);
705 if (shader_code
== NULL
) {
706 ralloc_free(mem_ctx
);
707 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
710 unsigned code_size
= tcs_prog_data
.base
.base
.program_size
;
711 tcs_bin
= anv_pipeline_upload_kernel(pipeline
, cache
,
712 tcs_sha1
, sizeof(tcs_sha1
),
713 shader_code
, code_size
,
714 &tcs_prog_data
.base
.base
,
715 sizeof(tcs_prog_data
),
718 ralloc_free(mem_ctx
);
719 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
723 brw_compile_tes(compiler
, NULL
, mem_ctx
, &tes_key
,
724 &tcs_prog_data
.base
.vue_map
, &tes_prog_data
, tes_nir
,
725 NULL
, shader_time_index
, NULL
);
726 if (shader_code
== NULL
) {
727 ralloc_free(mem_ctx
);
728 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
731 code_size
= tes_prog_data
.base
.base
.program_size
;
732 tes_bin
= anv_pipeline_upload_kernel(pipeline
, cache
,
733 tes_sha1
, sizeof(tes_sha1
),
734 shader_code
, code_size
,
735 &tes_prog_data
.base
.base
,
736 sizeof(tes_prog_data
),
739 ralloc_free(mem_ctx
);
740 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
743 ralloc_free(mem_ctx
);
746 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_TESS_CTRL
, tcs_bin
);
747 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_TESS_EVAL
, tes_bin
);
753 anv_pipeline_compile_gs(struct anv_pipeline
*pipeline
,
754 struct anv_pipeline_cache
*cache
,
755 const VkGraphicsPipelineCreateInfo
*info
,
756 struct anv_shader_module
*module
,
757 const char *entrypoint
,
758 const VkSpecializationInfo
*spec_info
)
760 const struct brw_compiler
*compiler
=
761 pipeline
->device
->instance
->physicalDevice
.compiler
;
762 struct brw_gs_prog_key key
;
763 struct anv_shader_bin
*bin
= NULL
;
764 unsigned char sha1
[20];
766 populate_gs_prog_key(&pipeline
->device
->info
, &key
);
769 anv_pipeline_hash_shader(pipeline
, module
, entrypoint
,
770 MESA_SHADER_GEOMETRY
, spec_info
,
771 &key
, sizeof(key
), sha1
);
772 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
776 struct brw_gs_prog_data prog_data
= {};
777 struct anv_pipeline_binding surface_to_descriptor
[256];
778 struct anv_pipeline_binding sampler_to_descriptor
[256];
780 struct anv_pipeline_bind_map map
= {
781 .surface_to_descriptor
= surface_to_descriptor
,
782 .sampler_to_descriptor
= sampler_to_descriptor
785 void *mem_ctx
= ralloc_context(NULL
);
787 nir_shader
*nir
= anv_pipeline_compile(pipeline
, mem_ctx
,
789 MESA_SHADER_GEOMETRY
, spec_info
,
790 &prog_data
.base
.base
, &map
);
792 ralloc_free(mem_ctx
);
793 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
796 anv_fill_binding_table(&prog_data
.base
.base
, 0);
798 brw_compute_vue_map(&pipeline
->device
->info
,
799 &prog_data
.base
.vue_map
,
800 nir
->info
.outputs_written
,
801 nir
->info
.separate_shader
);
803 const unsigned *shader_code
=
804 brw_compile_gs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
806 if (shader_code
== NULL
) {
807 ralloc_free(mem_ctx
);
808 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
812 const unsigned code_size
= prog_data
.base
.base
.program_size
;
813 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
814 shader_code
, code_size
,
815 &prog_data
.base
.base
, sizeof(prog_data
),
818 ralloc_free(mem_ctx
);
819 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
822 ralloc_free(mem_ctx
);
825 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_GEOMETRY
, bin
);
831 anv_pipeline_compile_fs(struct anv_pipeline
*pipeline
,
832 struct anv_pipeline_cache
*cache
,
833 const VkGraphicsPipelineCreateInfo
*info
,
834 struct anv_shader_module
*module
,
835 const char *entrypoint
,
836 const VkSpecializationInfo
*spec_info
)
838 const struct brw_compiler
*compiler
=
839 pipeline
->device
->instance
->physicalDevice
.compiler
;
840 struct brw_wm_prog_key key
;
841 struct anv_shader_bin
*bin
= NULL
;
842 unsigned char sha1
[20];
844 populate_wm_prog_key(pipeline
, info
, &key
);
847 anv_pipeline_hash_shader(pipeline
, module
, entrypoint
,
848 MESA_SHADER_FRAGMENT
, spec_info
,
849 &key
, sizeof(key
), sha1
);
850 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
854 struct brw_wm_prog_data prog_data
= {};
855 struct anv_pipeline_binding surface_to_descriptor
[256];
856 struct anv_pipeline_binding sampler_to_descriptor
[256];
858 struct anv_pipeline_bind_map map
= {
859 .surface_to_descriptor
= surface_to_descriptor
+ 8,
860 .sampler_to_descriptor
= sampler_to_descriptor
863 void *mem_ctx
= ralloc_context(NULL
);
865 nir_shader
*nir
= anv_pipeline_compile(pipeline
, mem_ctx
,
867 MESA_SHADER_FRAGMENT
, spec_info
,
868 &prog_data
.base
, &map
);
870 ralloc_free(mem_ctx
);
871 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
874 unsigned num_rts
= 0;
875 struct anv_pipeline_binding rt_bindings
[8];
876 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
877 nir_foreach_variable_safe(var
, &nir
->outputs
) {
878 if (var
->data
.location
< FRAG_RESULT_DATA0
)
881 unsigned rt
= var
->data
.location
- FRAG_RESULT_DATA0
;
882 if (rt
>= key
.nr_color_regions
) {
883 /* Out-of-bounds, throw it away */
884 var
->data
.mode
= nir_var_local
;
885 exec_node_remove(&var
->node
);
886 exec_list_push_tail(&impl
->locals
, &var
->node
);
890 /* Give it a new, compacted, location */
891 var
->data
.location
= FRAG_RESULT_DATA0
+ num_rts
;
894 glsl_type_is_array(var
->type
) ? glsl_get_length(var
->type
) : 1;
895 assert(num_rts
+ array_len
<= 8);
897 for (unsigned i
= 0; i
< array_len
; i
++) {
898 rt_bindings
[num_rts
+ i
] = (struct anv_pipeline_binding
) {
899 .set
= ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
,
905 num_rts
+= array_len
;
909 /* If we have no render targets, we need a null render target */
910 rt_bindings
[0] = (struct anv_pipeline_binding
) {
911 .set
= ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
,
918 assert(num_rts
<= 8);
919 map
.surface_to_descriptor
-= num_rts
;
920 map
.surface_count
+= num_rts
;
921 assert(map
.surface_count
<= 256);
922 memcpy(map
.surface_to_descriptor
, rt_bindings
,
923 num_rts
* sizeof(*rt_bindings
));
925 anv_fill_binding_table(&prog_data
.base
, num_rts
);
927 const unsigned *shader_code
=
928 brw_compile_fs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
929 NULL
, -1, -1, true, false, NULL
, NULL
);
930 if (shader_code
== NULL
) {
931 ralloc_free(mem_ctx
);
932 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
935 unsigned code_size
= prog_data
.base
.program_size
;
936 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
937 shader_code
, code_size
,
938 &prog_data
.base
, sizeof(prog_data
),
941 ralloc_free(mem_ctx
);
942 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
945 ralloc_free(mem_ctx
);
948 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_FRAGMENT
, bin
);
954 anv_pipeline_compile_cs(struct anv_pipeline
*pipeline
,
955 struct anv_pipeline_cache
*cache
,
956 const VkComputePipelineCreateInfo
*info
,
957 struct anv_shader_module
*module
,
958 const char *entrypoint
,
959 const VkSpecializationInfo
*spec_info
)
961 const struct brw_compiler
*compiler
=
962 pipeline
->device
->instance
->physicalDevice
.compiler
;
963 struct brw_cs_prog_key key
;
964 struct anv_shader_bin
*bin
= NULL
;
965 unsigned char sha1
[20];
967 populate_cs_prog_key(&pipeline
->device
->info
, &key
);
970 anv_pipeline_hash_shader(pipeline
, module
, entrypoint
,
971 MESA_SHADER_COMPUTE
, spec_info
,
972 &key
, sizeof(key
), sha1
);
973 bin
= anv_pipeline_cache_search(cache
, sha1
, 20);
977 struct brw_cs_prog_data prog_data
= {};
978 struct anv_pipeline_binding surface_to_descriptor
[256];
979 struct anv_pipeline_binding sampler_to_descriptor
[256];
981 struct anv_pipeline_bind_map map
= {
982 .surface_to_descriptor
= surface_to_descriptor
,
983 .sampler_to_descriptor
= sampler_to_descriptor
986 void *mem_ctx
= ralloc_context(NULL
);
988 nir_shader
*nir
= anv_pipeline_compile(pipeline
, mem_ctx
,
990 MESA_SHADER_COMPUTE
, spec_info
,
991 &prog_data
.base
, &map
);
993 ralloc_free(mem_ctx
);
994 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
997 anv_fill_binding_table(&prog_data
.base
, 1);
999 const unsigned *shader_code
=
1000 brw_compile_cs(compiler
, NULL
, mem_ctx
, &key
, &prog_data
, nir
,
1002 if (shader_code
== NULL
) {
1003 ralloc_free(mem_ctx
);
1004 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1007 const unsigned code_size
= prog_data
.base
.program_size
;
1008 bin
= anv_pipeline_upload_kernel(pipeline
, cache
, sha1
, 20,
1009 shader_code
, code_size
,
1010 &prog_data
.base
, sizeof(prog_data
),
1013 ralloc_free(mem_ctx
);
1014 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1017 ralloc_free(mem_ctx
);
1020 anv_pipeline_add_compiled_stage(pipeline
, MESA_SHADER_COMPUTE
, bin
);
1026 * Copy pipeline state not marked as dynamic.
1027 * Dynamic state is pipeline state which hasn't been provided at pipeline
1028 * creation time, but is dynamically provided afterwards using various
1029 * vkCmdSet* functions.
1031 * The set of state considered "non_dynamic" is determined by the pieces of
1032 * state that have their corresponding VkDynamicState enums omitted from
1033 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1035 * @param[out] pipeline Destination non_dynamic state.
1036 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1039 copy_non_dynamic_state(struct anv_pipeline
*pipeline
,
1040 const VkGraphicsPipelineCreateInfo
*pCreateInfo
)
1042 anv_cmd_dirty_mask_t states
= ANV_CMD_DIRTY_DYNAMIC_ALL
;
1043 struct anv_subpass
*subpass
= pipeline
->subpass
;
1045 pipeline
->dynamic_state
= default_dynamic_state
;
1047 if (pCreateInfo
->pDynamicState
) {
1048 /* Remove all of the states that are marked as dynamic */
1049 uint32_t count
= pCreateInfo
->pDynamicState
->dynamicStateCount
;
1050 for (uint32_t s
= 0; s
< count
; s
++)
1051 states
&= ~(1 << pCreateInfo
->pDynamicState
->pDynamicStates
[s
]);
1054 struct anv_dynamic_state
*dynamic
= &pipeline
->dynamic_state
;
1056 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1058 * pViewportState is [...] NULL if the pipeline
1059 * has rasterization disabled.
1061 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
1062 assert(pCreateInfo
->pViewportState
);
1064 dynamic
->viewport
.count
= pCreateInfo
->pViewportState
->viewportCount
;
1065 if (states
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
1066 typed_memcpy(dynamic
->viewport
.viewports
,
1067 pCreateInfo
->pViewportState
->pViewports
,
1068 pCreateInfo
->pViewportState
->viewportCount
);
1071 dynamic
->scissor
.count
= pCreateInfo
->pViewportState
->scissorCount
;
1072 if (states
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
1073 typed_memcpy(dynamic
->scissor
.scissors
,
1074 pCreateInfo
->pViewportState
->pScissors
,
1075 pCreateInfo
->pViewportState
->scissorCount
);
1079 if (states
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
)) {
1080 assert(pCreateInfo
->pRasterizationState
);
1081 dynamic
->line_width
= pCreateInfo
->pRasterizationState
->lineWidth
;
1084 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
)) {
1085 assert(pCreateInfo
->pRasterizationState
);
1086 dynamic
->depth_bias
.bias
=
1087 pCreateInfo
->pRasterizationState
->depthBiasConstantFactor
;
1088 dynamic
->depth_bias
.clamp
=
1089 pCreateInfo
->pRasterizationState
->depthBiasClamp
;
1090 dynamic
->depth_bias
.slope
=
1091 pCreateInfo
->pRasterizationState
->depthBiasSlopeFactor
;
1094 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1096 * pColorBlendState is [...] NULL if the pipeline has rasterization
1097 * disabled or if the subpass of the render pass the pipeline is
1098 * created against does not use any color attachments.
1100 bool uses_color_att
= false;
1101 for (unsigned i
= 0; i
< subpass
->color_count
; ++i
) {
1102 if (subpass
->color_attachments
[i
].attachment
!= VK_ATTACHMENT_UNUSED
) {
1103 uses_color_att
= true;
1108 if (uses_color_att
&&
1109 !pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
) {
1110 assert(pCreateInfo
->pColorBlendState
);
1112 if (states
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
1113 typed_memcpy(dynamic
->blend_constants
,
1114 pCreateInfo
->pColorBlendState
->blendConstants
, 4);
1117 /* If there is no depthstencil attachment, then don't read
1118 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1119 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1120 * no need to override the depthstencil defaults in
1121 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1123 * Section 9.2 of the Vulkan 1.0.15 spec says:
1125 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1126 * disabled or if the subpass of the render pass the pipeline is created
1127 * against does not use a depth/stencil attachment.
1129 if (!pCreateInfo
->pRasterizationState
->rasterizerDiscardEnable
&&
1130 subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
) {
1131 assert(pCreateInfo
->pDepthStencilState
);
1133 if (states
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
)) {
1134 dynamic
->depth_bounds
.min
=
1135 pCreateInfo
->pDepthStencilState
->minDepthBounds
;
1136 dynamic
->depth_bounds
.max
=
1137 pCreateInfo
->pDepthStencilState
->maxDepthBounds
;
1140 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
)) {
1141 dynamic
->stencil_compare_mask
.front
=
1142 pCreateInfo
->pDepthStencilState
->front
.compareMask
;
1143 dynamic
->stencil_compare_mask
.back
=
1144 pCreateInfo
->pDepthStencilState
->back
.compareMask
;
1147 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
)) {
1148 dynamic
->stencil_write_mask
.front
=
1149 pCreateInfo
->pDepthStencilState
->front
.writeMask
;
1150 dynamic
->stencil_write_mask
.back
=
1151 pCreateInfo
->pDepthStencilState
->back
.writeMask
;
1154 if (states
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
)) {
1155 dynamic
->stencil_reference
.front
=
1156 pCreateInfo
->pDepthStencilState
->front
.reference
;
1157 dynamic
->stencil_reference
.back
=
1158 pCreateInfo
->pDepthStencilState
->back
.reference
;
1162 pipeline
->dynamic_state_mask
= states
;
1166 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo
*info
)
1169 struct anv_render_pass
*renderpass
= NULL
;
1170 struct anv_subpass
*subpass
= NULL
;
1172 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1173 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1175 assert(info
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
1177 renderpass
= anv_render_pass_from_handle(info
->renderPass
);
1180 assert(info
->subpass
< renderpass
->subpass_count
);
1181 subpass
= &renderpass
->subpasses
[info
->subpass
];
1183 assert(info
->stageCount
>= 1);
1184 assert(info
->pVertexInputState
);
1185 assert(info
->pInputAssemblyState
);
1186 assert(info
->pRasterizationState
);
1187 if (!info
->pRasterizationState
->rasterizerDiscardEnable
) {
1188 assert(info
->pViewportState
);
1189 assert(info
->pMultisampleState
);
1191 if (subpass
&& subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
)
1192 assert(info
->pDepthStencilState
);
1194 if (subpass
&& subpass
->color_count
> 0)
1195 assert(info
->pColorBlendState
);
1198 for (uint32_t i
= 0; i
< info
->stageCount
; ++i
) {
1199 switch (info
->pStages
[i
].stage
) {
1200 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
1201 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
1202 assert(info
->pTessellationState
);
1212 * Calculate the desired L3 partitioning based on the current state of the
1213 * pipeline. For now this simply returns the conservative defaults calculated
1214 * by get_default_l3_weights(), but we could probably do better by gathering
1215 * more statistics from the pipeline state (e.g. guess of expected URB usage
1216 * and bound surfaces), or by using feed-back from performance counters.
1219 anv_pipeline_setup_l3_config(struct anv_pipeline
*pipeline
, bool needs_slm
)
1221 const struct gen_device_info
*devinfo
= &pipeline
->device
->info
;
1223 const struct gen_l3_weights w
=
1224 gen_get_default_l3_weights(devinfo
, pipeline
->needs_data_cache
, needs_slm
);
1226 pipeline
->urb
.l3_config
= gen_get_l3_config(devinfo
, w
);
1227 pipeline
->urb
.total_size
=
1228 gen_get_l3_config_urb_size(devinfo
, pipeline
->urb
.l3_config
);
1232 anv_pipeline_init(struct anv_pipeline
*pipeline
,
1233 struct anv_device
*device
,
1234 struct anv_pipeline_cache
*cache
,
1235 const VkGraphicsPipelineCreateInfo
*pCreateInfo
,
1236 const VkAllocationCallbacks
*alloc
)
1240 anv_pipeline_validate_create_info(pCreateInfo
);
1243 alloc
= &device
->alloc
;
1245 pipeline
->device
= device
;
1247 ANV_FROM_HANDLE(anv_render_pass
, render_pass
, pCreateInfo
->renderPass
);
1248 assert(pCreateInfo
->subpass
< render_pass
->subpass_count
);
1249 pipeline
->subpass
= &render_pass
->subpasses
[pCreateInfo
->subpass
];
1251 pipeline
->layout
= anv_pipeline_layout_from_handle(pCreateInfo
->layout
);
1253 result
= anv_reloc_list_init(&pipeline
->batch_relocs
, alloc
);
1254 if (result
!= VK_SUCCESS
)
1257 pipeline
->batch
.alloc
= alloc
;
1258 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
1259 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
1260 pipeline
->batch
.relocs
= &pipeline
->batch_relocs
;
1261 pipeline
->batch
.status
= VK_SUCCESS
;
1263 copy_non_dynamic_state(pipeline
, pCreateInfo
);
1264 pipeline
->depth_clamp_enable
= pCreateInfo
->pRasterizationState
&&
1265 pCreateInfo
->pRasterizationState
->depthClampEnable
;
1267 pipeline
->sample_shading_enable
= pCreateInfo
->pMultisampleState
&&
1268 pCreateInfo
->pMultisampleState
->sampleShadingEnable
;
1270 pipeline
->needs_data_cache
= false;
1272 /* When we free the pipeline, we detect stages based on the NULL status
1273 * of various prog_data pointers. Make them NULL by default.
1275 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
1277 pipeline
->active_stages
= 0;
1279 const VkPipelineShaderStageCreateInfo
*pStages
[MESA_SHADER_STAGES
] = {};
1280 struct anv_shader_module
*modules
[MESA_SHADER_STAGES
] = {};
1281 for (uint32_t i
= 0; i
< pCreateInfo
->stageCount
; i
++) {
1282 gl_shader_stage stage
= ffs(pCreateInfo
->pStages
[i
].stage
) - 1;
1283 pStages
[stage
] = &pCreateInfo
->pStages
[i
];
1284 modules
[stage
] = anv_shader_module_from_handle(pStages
[stage
]->module
);
1287 if (modules
[MESA_SHADER_VERTEX
]) {
1288 result
= anv_pipeline_compile_vs(pipeline
, cache
, pCreateInfo
,
1289 modules
[MESA_SHADER_VERTEX
],
1290 pStages
[MESA_SHADER_VERTEX
]->pName
,
1291 pStages
[MESA_SHADER_VERTEX
]->pSpecializationInfo
);
1292 if (result
!= VK_SUCCESS
)
1296 if (modules
[MESA_SHADER_TESS_EVAL
]) {
1297 anv_pipeline_compile_tcs_tes(pipeline
, cache
, pCreateInfo
,
1298 modules
[MESA_SHADER_TESS_CTRL
],
1299 pStages
[MESA_SHADER_TESS_CTRL
]->pName
,
1300 pStages
[MESA_SHADER_TESS_CTRL
]->pSpecializationInfo
,
1301 modules
[MESA_SHADER_TESS_EVAL
],
1302 pStages
[MESA_SHADER_TESS_EVAL
]->pName
,
1303 pStages
[MESA_SHADER_TESS_EVAL
]->pSpecializationInfo
);
1306 if (modules
[MESA_SHADER_GEOMETRY
]) {
1307 result
= anv_pipeline_compile_gs(pipeline
, cache
, pCreateInfo
,
1308 modules
[MESA_SHADER_GEOMETRY
],
1309 pStages
[MESA_SHADER_GEOMETRY
]->pName
,
1310 pStages
[MESA_SHADER_GEOMETRY
]->pSpecializationInfo
);
1311 if (result
!= VK_SUCCESS
)
1315 if (modules
[MESA_SHADER_FRAGMENT
]) {
1316 result
= anv_pipeline_compile_fs(pipeline
, cache
, pCreateInfo
,
1317 modules
[MESA_SHADER_FRAGMENT
],
1318 pStages
[MESA_SHADER_FRAGMENT
]->pName
,
1319 pStages
[MESA_SHADER_FRAGMENT
]->pSpecializationInfo
);
1320 if (result
!= VK_SUCCESS
)
1324 assert(pipeline
->active_stages
& VK_SHADER_STAGE_VERTEX_BIT
);
1326 anv_pipeline_setup_l3_config(pipeline
, false);
1328 const VkPipelineVertexInputStateCreateInfo
*vi_info
=
1329 pCreateInfo
->pVertexInputState
;
1331 const uint64_t inputs_read
= get_vs_prog_data(pipeline
)->inputs_read
;
1333 pipeline
->vb_used
= 0;
1334 for (uint32_t i
= 0; i
< vi_info
->vertexAttributeDescriptionCount
; i
++) {
1335 const VkVertexInputAttributeDescription
*desc
=
1336 &vi_info
->pVertexAttributeDescriptions
[i
];
1338 if (inputs_read
& (1ull << (VERT_ATTRIB_GENERIC0
+ desc
->location
)))
1339 pipeline
->vb_used
|= 1 << desc
->binding
;
1342 for (uint32_t i
= 0; i
< vi_info
->vertexBindingDescriptionCount
; i
++) {
1343 const VkVertexInputBindingDescription
*desc
=
1344 &vi_info
->pVertexBindingDescriptions
[i
];
1346 pipeline
->binding_stride
[desc
->binding
] = desc
->stride
;
1348 /* Step rate is programmed per vertex element (attribute), not
1349 * binding. Set up a map of which bindings step per instance, for
1350 * reference by vertex element setup. */
1351 switch (desc
->inputRate
) {
1353 case VK_VERTEX_INPUT_RATE_VERTEX
:
1354 pipeline
->instancing_enable
[desc
->binding
] = false;
1356 case VK_VERTEX_INPUT_RATE_INSTANCE
:
1357 pipeline
->instancing_enable
[desc
->binding
] = true;
1362 const VkPipelineInputAssemblyStateCreateInfo
*ia_info
=
1363 pCreateInfo
->pInputAssemblyState
;
1364 const VkPipelineTessellationStateCreateInfo
*tess_info
=
1365 pCreateInfo
->pTessellationState
;
1366 pipeline
->primitive_restart
= ia_info
->primitiveRestartEnable
;
1368 if (anv_pipeline_has_stage(pipeline
, MESA_SHADER_TESS_EVAL
))
1369 pipeline
->topology
= _3DPRIM_PATCHLIST(tess_info
->patchControlPoints
);
1371 pipeline
->topology
= vk_to_gen_primitive_type
[ia_info
->topology
];
1376 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1377 if (pipeline
->shaders
[s
])
1378 anv_shader_bin_unref(device
, pipeline
->shaders
[s
]);
1381 anv_reloc_list_finish(&pipeline
->batch_relocs
, alloc
);