2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
35 /** \file anv_cmd_buffer.c
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state
= {
58 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
63 .stencil_compare_mask
= {
67 .stencil_write_mask
= {
71 .stencil_reference
= {
95 .primitive_topology
= 0,
96 .depth_test_enable
= 0,
97 .depth_write_enable
= 0,
98 .depth_compare_op
= 0,
99 .depth_bounds_test_enable
= 0,
100 .stencil_test_enable
= 0,
104 * Copy the dynamic state from src to dest based on the copy_mask.
106 * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
107 * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
109 * Returns a mask of the states which changed.
112 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
113 const struct anv_dynamic_state
*src
,
114 anv_cmd_dirty_mask_t copy_mask
)
116 anv_cmd_dirty_mask_t changed
= 0;
118 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
) {
119 dest
->viewport
.count
= src
->viewport
.count
;
120 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
121 src
->viewport
.count
);
122 changed
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
125 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
) {
126 dest
->scissor
.count
= src
->scissor
.count
;
127 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
129 changed
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
132 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
133 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
134 changed
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
137 #define ANV_CMP_COPY(field, flag) \
138 if (copy_mask & flag) { \
139 if (dest->field != src->field) { \
140 dest->field = src->field; \
145 ANV_CMP_COPY(line_width
, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
);
147 ANV_CMP_COPY(depth_bias
.bias
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
148 ANV_CMP_COPY(depth_bias
.clamp
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
149 ANV_CMP_COPY(depth_bias
.slope
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
151 ANV_CMP_COPY(depth_bounds
.min
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
);
152 ANV_CMP_COPY(depth_bounds
.max
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
);
154 ANV_CMP_COPY(stencil_compare_mask
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
);
155 ANV_CMP_COPY(stencil_compare_mask
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
);
157 ANV_CMP_COPY(stencil_write_mask
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
);
158 ANV_CMP_COPY(stencil_write_mask
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
);
160 ANV_CMP_COPY(stencil_reference
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
);
161 ANV_CMP_COPY(stencil_reference
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
);
163 ANV_CMP_COPY(line_stipple
.factor
, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
);
164 ANV_CMP_COPY(line_stipple
.pattern
, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
);
166 ANV_CMP_COPY(cull_mode
, ANV_CMD_DIRTY_DYNAMIC_CULL_MODE
);
167 ANV_CMP_COPY(front_face
, ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE
);
168 ANV_CMP_COPY(primitive_topology
, ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY
);
169 ANV_CMP_COPY(depth_test_enable
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE
);
170 ANV_CMP_COPY(depth_write_enable
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE
);
171 ANV_CMP_COPY(depth_compare_op
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP
);
172 ANV_CMP_COPY(depth_bounds_test_enable
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE
);
173 ANV_CMP_COPY(stencil_test_enable
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE
);
175 if (copy_mask
& VK_DYNAMIC_STATE_STENCIL_OP_EXT
) {
176 ANV_CMP_COPY(stencil_op
.front
.fail_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
177 ANV_CMP_COPY(stencil_op
.front
.pass_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
178 ANV_CMP_COPY(stencil_op
.front
.depth_fail_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
179 ANV_CMP_COPY(stencil_op
.front
.compare_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
180 ANV_CMP_COPY(stencil_op
.back
.fail_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
181 ANV_CMP_COPY(stencil_op
.back
.pass_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
182 ANV_CMP_COPY(stencil_op
.back
.depth_fail_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
183 ANV_CMP_COPY(stencil_op
.back
.compare_op
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
);
192 anv_cmd_state_init(struct anv_cmd_buffer
*cmd_buffer
)
194 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
196 memset(state
, 0, sizeof(*state
));
198 state
->current_pipeline
= UINT32_MAX
;
199 state
->restart_index
= UINT32_MAX
;
200 state
->gfx
.dynamic
= default_dynamic_state
;
204 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer
*cmd_buffer
,
205 struct anv_cmd_pipeline_state
*pipe_state
)
207 for (uint32_t i
= 0; i
< ARRAY_SIZE(pipe_state
->push_descriptors
); i
++) {
208 if (pipe_state
->push_descriptors
[i
]) {
209 anv_descriptor_set_layout_unref(cmd_buffer
->device
,
210 pipe_state
->push_descriptors
[i
]->set
.layout
);
211 vk_free(&cmd_buffer
->pool
->alloc
, pipe_state
->push_descriptors
[i
]);
217 anv_cmd_state_finish(struct anv_cmd_buffer
*cmd_buffer
)
219 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
221 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->gfx
.base
);
222 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->compute
.base
);
224 vk_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
228 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
230 anv_cmd_state_finish(cmd_buffer
);
231 anv_cmd_state_init(cmd_buffer
);
234 static VkResult
anv_create_cmd_buffer(
235 struct anv_device
* device
,
236 struct anv_cmd_pool
* pool
,
237 VkCommandBufferLevel level
,
238 VkCommandBuffer
* pCommandBuffer
)
240 struct anv_cmd_buffer
*cmd_buffer
;
243 cmd_buffer
= vk_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
244 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
245 if (cmd_buffer
== NULL
)
246 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
248 vk_object_base_init(&device
->vk
, &cmd_buffer
->base
,
249 VK_OBJECT_TYPE_COMMAND_BUFFER
);
251 cmd_buffer
->batch
.status
= VK_SUCCESS
;
253 cmd_buffer
->device
= device
;
254 cmd_buffer
->pool
= pool
;
255 cmd_buffer
->level
= level
;
257 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
258 if (result
!= VK_SUCCESS
)
261 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
262 &device
->surface_state_pool
, 4096);
263 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
264 &device
->dynamic_state_pool
, 16384);
266 anv_cmd_state_init(cmd_buffer
);
269 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
271 /* Init the pool_link so we can safefly call list_del when we destroy
274 list_inithead(&cmd_buffer
->pool_link
);
277 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
282 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
287 VkResult
anv_AllocateCommandBuffers(
289 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
290 VkCommandBuffer
* pCommandBuffers
)
292 ANV_FROM_HANDLE(anv_device
, device
, _device
);
293 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
295 VkResult result
= VK_SUCCESS
;
298 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
299 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
300 &pCommandBuffers
[i
]);
301 if (result
!= VK_SUCCESS
)
305 if (result
!= VK_SUCCESS
) {
306 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
308 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++)
309 pCommandBuffers
[i
] = VK_NULL_HANDLE
;
316 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
318 list_del(&cmd_buffer
->pool_link
);
320 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
322 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
323 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
325 anv_cmd_state_finish(cmd_buffer
);
327 vk_object_base_finish(&cmd_buffer
->base
);
328 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
331 void anv_FreeCommandBuffers(
333 VkCommandPool commandPool
,
334 uint32_t commandBufferCount
,
335 const VkCommandBuffer
* pCommandBuffers
)
337 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
338 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
343 anv_cmd_buffer_destroy(cmd_buffer
);
348 anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
)
350 cmd_buffer
->usage_flags
= 0;
351 cmd_buffer
->perf_query_pool
= NULL
;
352 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
353 anv_cmd_state_reset(cmd_buffer
);
355 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
356 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
357 &cmd_buffer
->device
->surface_state_pool
, 4096);
359 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
360 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
361 &cmd_buffer
->device
->dynamic_state_pool
, 16384);
365 VkResult
anv_ResetCommandBuffer(
366 VkCommandBuffer commandBuffer
,
367 VkCommandBufferResetFlags flags
)
369 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
370 return anv_cmd_buffer_reset(cmd_buffer
);
373 #define anv_genX_call(devinfo, func, ...) \
374 switch ((devinfo)->gen) { \
376 if ((devinfo)->is_haswell) { \
377 gen75_##func(__VA_ARGS__); \
379 gen7_##func(__VA_ARGS__); \
383 gen8_##func(__VA_ARGS__); \
386 gen9_##func(__VA_ARGS__); \
389 gen10_##func(__VA_ARGS__); \
392 gen11_##func(__VA_ARGS__); \
395 gen12_##func(__VA_ARGS__); \
398 assert(!"Unknown hardware generation"); \
402 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
404 anv_genX_call(&cmd_buffer
->device
->info
,
405 cmd_buffer_emit_state_base_address
,
410 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
411 const struct anv_image
*image
,
412 VkImageAspectFlagBits aspect
,
413 enum isl_aux_usage aux_usage
,
416 uint32_t layer_count
)
418 anv_genX_call(&cmd_buffer
->device
->info
,
419 cmd_buffer_mark_image_written
,
420 cmd_buffer
, image
, aspect
, aux_usage
,
421 level
, base_layer
, layer_count
);
425 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
)
427 anv_genX_call(&cmd_buffer
->device
->info
,
428 cmd_emit_conditional_render_predicate
,
433 mem_update(void *dst
, const void *src
, size_t size
)
435 if (memcmp(dst
, src
, size
) == 0)
438 memcpy(dst
, src
, size
);
443 set_dirty_for_bind_map(struct anv_cmd_buffer
*cmd_buffer
,
444 gl_shader_stage stage
,
445 const struct anv_pipeline_bind_map
*map
)
447 if (mem_update(cmd_buffer
->state
.surface_sha1s
[stage
],
448 map
->surface_sha1
, sizeof(map
->surface_sha1
)))
449 cmd_buffer
->state
.descriptors_dirty
|= mesa_to_vk_shader_stage(stage
);
451 if (mem_update(cmd_buffer
->state
.sampler_sha1s
[stage
],
452 map
->sampler_sha1
, sizeof(map
->sampler_sha1
)))
453 cmd_buffer
->state
.descriptors_dirty
|= mesa_to_vk_shader_stage(stage
);
455 if (mem_update(cmd_buffer
->state
.push_sha1s
[stage
],
456 map
->push_sha1
, sizeof(map
->push_sha1
)))
457 cmd_buffer
->state
.push_constants_dirty
|= mesa_to_vk_shader_stage(stage
);
460 void anv_CmdBindPipeline(
461 VkCommandBuffer commandBuffer
,
462 VkPipelineBindPoint pipelineBindPoint
,
463 VkPipeline _pipeline
)
465 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
466 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
468 switch (pipelineBindPoint
) {
469 case VK_PIPELINE_BIND_POINT_COMPUTE
: {
470 struct anv_compute_pipeline
*compute_pipeline
=
471 anv_pipeline_to_compute(pipeline
);
472 if (cmd_buffer
->state
.compute
.pipeline
== compute_pipeline
)
475 cmd_buffer
->state
.compute
.pipeline
= compute_pipeline
;
476 cmd_buffer
->state
.compute
.pipeline_dirty
= true;
477 set_dirty_for_bind_map(cmd_buffer
, MESA_SHADER_COMPUTE
,
478 &compute_pipeline
->cs
->bind_map
);
482 case VK_PIPELINE_BIND_POINT_GRAPHICS
: {
483 struct anv_graphics_pipeline
*gfx_pipeline
=
484 anv_pipeline_to_graphics(pipeline
);
485 if (cmd_buffer
->state
.gfx
.pipeline
== gfx_pipeline
)
488 cmd_buffer
->state
.gfx
.pipeline
= gfx_pipeline
;
489 cmd_buffer
->state
.gfx
.vb_dirty
|= gfx_pipeline
->vb_used
;
490 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
492 anv_foreach_stage(stage
, gfx_pipeline
->active_stages
) {
493 set_dirty_for_bind_map(cmd_buffer
, stage
,
494 &gfx_pipeline
->shaders
[stage
]->bind_map
);
497 /* Apply the dynamic state from the pipeline */
498 cmd_buffer
->state
.gfx
.dirty
|=
499 anv_dynamic_state_copy(&cmd_buffer
->state
.gfx
.dynamic
,
500 &gfx_pipeline
->dynamic_state
,
501 gfx_pipeline
->dynamic_state_mask
);
506 assert(!"invalid bind point");
511 void anv_CmdSetViewport(
512 VkCommandBuffer commandBuffer
,
513 uint32_t firstViewport
,
514 uint32_t viewportCount
,
515 const VkViewport
* pViewports
)
517 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
519 const uint32_t total_count
= firstViewport
+ viewportCount
;
520 if (cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
< total_count
)
521 cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
= total_count
;
523 memcpy(cmd_buffer
->state
.gfx
.dynamic
.viewport
.viewports
+ firstViewport
,
524 pViewports
, viewportCount
* sizeof(*pViewports
));
526 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
529 void anv_CmdSetViewportWithCountEXT(
530 VkCommandBuffer commandBuffer
,
531 uint32_t viewportCount
,
532 const VkViewport
* pViewports
)
534 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
536 cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
= viewportCount
;
538 memcpy(cmd_buffer
->state
.gfx
.dynamic
.viewport
.viewports
,
539 pViewports
, viewportCount
* sizeof(*pViewports
));
541 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
544 void anv_CmdSetScissor(
545 VkCommandBuffer commandBuffer
,
546 uint32_t firstScissor
,
547 uint32_t scissorCount
,
548 const VkRect2D
* pScissors
)
550 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
552 const uint32_t total_count
= firstScissor
+ scissorCount
;
553 if (cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
< total_count
)
554 cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
= total_count
;
556 memcpy(cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
+ firstScissor
,
557 pScissors
, scissorCount
* sizeof(*pScissors
));
559 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
562 void anv_CmdSetScissorWithCountEXT(
563 VkCommandBuffer commandBuffer
,
564 uint32_t scissorCount
,
565 const VkRect2D
* pScissors
)
567 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
569 cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
= scissorCount
;
571 memcpy(cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
,
572 pScissors
, scissorCount
* sizeof(*pScissors
));
574 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
577 void anv_CmdSetPrimitiveTopologyEXT(
578 VkCommandBuffer commandBuffer
,
579 VkPrimitiveTopology primitiveTopology
)
581 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
583 cmd_buffer
->state
.gfx
.dynamic
.primitive_topology
= primitiveTopology
;
585 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY
;
588 void anv_CmdSetLineWidth(
589 VkCommandBuffer commandBuffer
,
592 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
594 cmd_buffer
->state
.gfx
.dynamic
.line_width
= lineWidth
;
595 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
598 void anv_CmdSetDepthBias(
599 VkCommandBuffer commandBuffer
,
600 float depthBiasConstantFactor
,
601 float depthBiasClamp
,
602 float depthBiasSlopeFactor
)
604 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
606 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
607 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
608 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
610 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
613 void anv_CmdSetBlendConstants(
614 VkCommandBuffer commandBuffer
,
615 const float blendConstants
[4])
617 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
619 memcpy(cmd_buffer
->state
.gfx
.dynamic
.blend_constants
,
620 blendConstants
, sizeof(float) * 4);
622 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
625 void anv_CmdSetDepthBounds(
626 VkCommandBuffer commandBuffer
,
627 float minDepthBounds
,
628 float maxDepthBounds
)
630 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
632 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.min
= minDepthBounds
;
633 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
635 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
638 void anv_CmdSetStencilCompareMask(
639 VkCommandBuffer commandBuffer
,
640 VkStencilFaceFlags faceMask
,
641 uint32_t compareMask
)
643 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
645 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
646 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.front
= compareMask
;
647 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
648 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.back
= compareMask
;
650 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
653 void anv_CmdSetStencilWriteMask(
654 VkCommandBuffer commandBuffer
,
655 VkStencilFaceFlags faceMask
,
658 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
660 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
661 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.front
= writeMask
;
662 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
663 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.back
= writeMask
;
665 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
668 void anv_CmdSetStencilReference(
669 VkCommandBuffer commandBuffer
,
670 VkStencilFaceFlags faceMask
,
673 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
675 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
676 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.front
= reference
;
677 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
678 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.back
= reference
;
680 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
683 void anv_CmdSetLineStippleEXT(
684 VkCommandBuffer commandBuffer
,
685 uint32_t lineStippleFactor
,
686 uint16_t lineStipplePattern
)
688 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
690 cmd_buffer
->state
.gfx
.dynamic
.line_stipple
.factor
= lineStippleFactor
;
691 cmd_buffer
->state
.gfx
.dynamic
.line_stipple
.pattern
= lineStipplePattern
;
693 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
;
696 void anv_CmdSetCullModeEXT(
697 VkCommandBuffer commandBuffer
,
698 VkCullModeFlags cullMode
)
700 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
702 cmd_buffer
->state
.gfx
.dynamic
.cull_mode
= cullMode
;
704 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_CULL_MODE
;
707 void anv_CmdSetFrontFaceEXT(
708 VkCommandBuffer commandBuffer
,
709 VkFrontFace frontFace
)
711 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
713 cmd_buffer
->state
.gfx
.dynamic
.front_face
= frontFace
;
715 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE
;
718 void anv_CmdSetDepthTestEnableEXT(
719 VkCommandBuffer commandBuffer
,
720 VkBool32 depthTestEnable
)
723 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
725 cmd_buffer
->state
.gfx
.dynamic
.depth_test_enable
= depthTestEnable
;
727 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE
;
730 void anv_CmdSetDepthWriteEnableEXT(
731 VkCommandBuffer commandBuffer
,
732 VkBool32 depthWriteEnable
)
734 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
736 cmd_buffer
->state
.gfx
.dynamic
.depth_write_enable
= depthWriteEnable
;
738 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE
;
741 void anv_CmdSetDepthCompareOpEXT(
742 VkCommandBuffer commandBuffer
,
743 VkCompareOp depthCompareOp
)
745 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
747 cmd_buffer
->state
.gfx
.dynamic
.depth_compare_op
= depthCompareOp
;
749 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP
;
752 void anv_CmdSetDepthBoundsTestEnableEXT(
753 VkCommandBuffer commandBuffer
,
754 VkBool32 depthBoundsTestEnable
)
756 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
758 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds_test_enable
= depthBoundsTestEnable
;
760 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE
;
763 void anv_CmdSetStencilTestEnableEXT(
764 VkCommandBuffer commandBuffer
,
765 VkBool32 stencilTestEnable
)
767 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
769 cmd_buffer
->state
.gfx
.dynamic
.stencil_test_enable
= stencilTestEnable
;
771 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE
;
774 void anv_CmdSetStencilOpEXT(
775 VkCommandBuffer commandBuffer
,
776 VkStencilFaceFlags faceMask
,
779 VkStencilOp depthFailOp
,
780 VkCompareOp compareOp
)
782 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
784 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
) {
785 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.front
.fail_op
= failOp
;
786 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.front
.pass_op
= passOp
;
787 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.front
.depth_fail_op
= depthFailOp
;
788 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.front
.compare_op
= compareOp
;
791 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
) {
792 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.back
.fail_op
= failOp
;
793 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.back
.pass_op
= passOp
;
794 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.back
.depth_fail_op
= depthFailOp
;
795 cmd_buffer
->state
.gfx
.dynamic
.stencil_op
.back
.compare_op
= compareOp
;
798 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP
;
802 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
803 VkPipelineBindPoint bind_point
,
804 struct anv_pipeline_layout
*layout
,
806 struct anv_descriptor_set
*set
,
807 uint32_t *dynamic_offset_count
,
808 const uint32_t **dynamic_offsets
)
810 struct anv_descriptor_set_layout
*set_layout
=
811 layout
->set
[set_index
].layout
;
813 VkShaderStageFlags stages
= set_layout
->shader_stages
;
814 struct anv_cmd_pipeline_state
*pipe_state
;
816 switch (bind_point
) {
817 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
818 stages
&= VK_SHADER_STAGE_ALL_GRAPHICS
;
819 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
822 case VK_PIPELINE_BIND_POINT_COMPUTE
:
823 stages
&= VK_SHADER_STAGE_COMPUTE_BIT
;
824 pipe_state
= &cmd_buffer
->state
.compute
.base
;
828 unreachable("invalid bind point");
831 VkShaderStageFlags dirty_stages
= 0;
832 if (pipe_state
->descriptors
[set_index
] != set
) {
833 pipe_state
->descriptors
[set_index
] = set
;
834 dirty_stages
|= stages
;
837 /* If it's a push descriptor set, we have to flag things as dirty
838 * regardless of whether or not the CPU-side data structure changed as we
839 * may have edited in-place.
841 if (set
->pool
== NULL
)
842 dirty_stages
|= stages
;
844 if (dynamic_offsets
) {
845 if (set_layout
->dynamic_offset_count
> 0) {
846 uint32_t dynamic_offset_start
=
847 layout
->set
[set_index
].dynamic_offset_start
;
849 anv_foreach_stage(stage
, stages
) {
850 struct anv_push_constants
*push
=
851 &cmd_buffer
->state
.push_constants
[stage
];
852 uint32_t *push_offsets
=
853 &push
->dynamic_offsets
[dynamic_offset_start
];
855 /* Assert that everything is in range */
856 assert(set_layout
->dynamic_offset_count
<= *dynamic_offset_count
);
857 assert(dynamic_offset_start
+ set_layout
->dynamic_offset_count
<=
858 ARRAY_SIZE(push
->dynamic_offsets
));
860 unsigned mask
= set_layout
->stage_dynamic_offsets
[stage
];
861 STATIC_ASSERT(MAX_DYNAMIC_BUFFERS
<= sizeof(mask
) * 8);
863 int i
= u_bit_scan(&mask
);
864 if (push_offsets
[i
] != (*dynamic_offsets
)[i
]) {
865 push_offsets
[i
] = (*dynamic_offsets
)[i
];
866 dirty_stages
|= mesa_to_vk_shader_stage(stage
);
871 *dynamic_offsets
+= set_layout
->dynamic_offset_count
;
872 *dynamic_offset_count
-= set_layout
->dynamic_offset_count
;
876 cmd_buffer
->state
.descriptors_dirty
|= dirty_stages
;
877 cmd_buffer
->state
.push_constants_dirty
|= dirty_stages
;
880 void anv_CmdBindDescriptorSets(
881 VkCommandBuffer commandBuffer
,
882 VkPipelineBindPoint pipelineBindPoint
,
883 VkPipelineLayout _layout
,
885 uint32_t descriptorSetCount
,
886 const VkDescriptorSet
* pDescriptorSets
,
887 uint32_t dynamicOffsetCount
,
888 const uint32_t* pDynamicOffsets
)
890 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
891 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
893 assert(firstSet
+ descriptorSetCount
<= MAX_SETS
);
895 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
896 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
897 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
898 layout
, firstSet
+ i
, set
,
904 void anv_CmdBindVertexBuffers(
905 VkCommandBuffer commandBuffer
,
906 uint32_t firstBinding
,
907 uint32_t bindingCount
,
908 const VkBuffer
* pBuffers
,
909 const VkDeviceSize
* pOffsets
)
911 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
912 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
914 /* We have to defer setting up vertex buffer since we need the buffer
915 * stride from the pipeline. */
917 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
918 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
919 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
920 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
921 cmd_buffer
->state
.gfx
.vb_dirty
|= 1 << (firstBinding
+ i
);
925 void anv_CmdBindTransformFeedbackBuffersEXT(
926 VkCommandBuffer commandBuffer
,
927 uint32_t firstBinding
,
928 uint32_t bindingCount
,
929 const VkBuffer
* pBuffers
,
930 const VkDeviceSize
* pOffsets
,
931 const VkDeviceSize
* pSizes
)
933 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
934 struct anv_xfb_binding
*xfb
= cmd_buffer
->state
.xfb_bindings
;
936 /* We have to defer setting up vertex buffer since we need the buffer
937 * stride from the pipeline. */
939 assert(firstBinding
+ bindingCount
<= MAX_XFB_BUFFERS
);
940 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
941 if (pBuffers
[i
] == VK_NULL_HANDLE
) {
942 xfb
[firstBinding
+ i
].buffer
= NULL
;
944 ANV_FROM_HANDLE(anv_buffer
, buffer
, pBuffers
[i
]);
945 xfb
[firstBinding
+ i
].buffer
= buffer
;
946 xfb
[firstBinding
+ i
].offset
= pOffsets
[i
];
947 xfb
[firstBinding
+ i
].size
=
948 anv_buffer_get_range(buffer
, pOffsets
[i
],
949 pSizes
? pSizes
[i
] : VK_WHOLE_SIZE
);
955 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
958 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
959 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
960 return ISL_FORMAT_R32G32B32A32_FLOAT
;
962 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
963 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
964 return ISL_FORMAT_RAW
;
967 unreachable("Invalid descriptor type");
972 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
973 const void *data
, uint32_t size
, uint32_t alignment
)
975 struct anv_state state
;
977 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
978 memcpy(state
.map
, data
, size
);
980 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
986 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
987 uint32_t *a
, uint32_t *b
,
988 uint32_t dwords
, uint32_t alignment
)
990 struct anv_state state
;
993 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
994 dwords
* 4, alignment
);
996 for (uint32_t i
= 0; i
< dwords
; i
++)
999 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
1005 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1006 gl_shader_stage stage
)
1008 struct anv_push_constants
*data
=
1009 &cmd_buffer
->state
.push_constants
[stage
];
1011 struct anv_state state
=
1012 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1013 sizeof(struct anv_push_constants
),
1014 32 /* bottom 5 bits MBZ */);
1015 memcpy(state
.map
, data
, sizeof(struct anv_push_constants
));
1021 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
1023 struct anv_push_constants
*data
=
1024 &cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
1025 struct anv_compute_pipeline
*pipeline
= cmd_buffer
->state
.compute
.pipeline
;
1026 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
1027 const struct anv_push_range
*range
= &pipeline
->cs
->bind_map
.push_ranges
[0];
1029 const struct anv_cs_parameters cs_params
= anv_cs_parameters(pipeline
);
1030 const unsigned total_push_constants_size
=
1031 brw_cs_push_const_total_size(cs_prog_data
, cs_params
.threads
);
1032 if (total_push_constants_size
== 0)
1033 return (struct anv_state
) { .offset
= 0 };
1035 const unsigned push_constant_alignment
=
1036 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
1037 const unsigned aligned_total_push_constants_size
=
1038 ALIGN(total_push_constants_size
, push_constant_alignment
);
1039 struct anv_state state
=
1040 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1041 aligned_total_push_constants_size
,
1042 push_constant_alignment
);
1044 void *dst
= state
.map
;
1045 const void *src
= (char *)data
+ (range
->start
* 32);
1047 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
1048 memcpy(dst
, src
, cs_prog_data
->push
.cross_thread
.size
);
1049 dst
+= cs_prog_data
->push
.cross_thread
.size
;
1050 src
+= cs_prog_data
->push
.cross_thread
.size
;
1053 if (cs_prog_data
->push
.per_thread
.size
> 0) {
1054 for (unsigned t
= 0; t
< cs_params
.threads
; t
++) {
1055 memcpy(dst
, src
, cs_prog_data
->push
.per_thread
.size
);
1057 uint32_t *subgroup_id
= dst
+
1058 offsetof(struct anv_push_constants
, cs
.subgroup_id
) -
1059 (range
->start
* 32 + cs_prog_data
->push
.cross_thread
.size
);
1062 dst
+= cs_prog_data
->push
.per_thread
.size
;
1069 void anv_CmdPushConstants(
1070 VkCommandBuffer commandBuffer
,
1071 VkPipelineLayout layout
,
1072 VkShaderStageFlags stageFlags
,
1075 const void* pValues
)
1077 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1079 anv_foreach_stage(stage
, stageFlags
) {
1080 memcpy(cmd_buffer
->state
.push_constants
[stage
].client_data
+ offset
,
1084 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1087 VkResult
anv_CreateCommandPool(
1089 const VkCommandPoolCreateInfo
* pCreateInfo
,
1090 const VkAllocationCallbacks
* pAllocator
,
1091 VkCommandPool
* pCmdPool
)
1093 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1094 struct anv_cmd_pool
*pool
;
1096 pool
= vk_alloc2(&device
->vk
.alloc
, pAllocator
, sizeof(*pool
), 8,
1097 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1099 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1101 vk_object_base_init(&device
->vk
, &pool
->base
, VK_OBJECT_TYPE_COMMAND_POOL
);
1104 pool
->alloc
= *pAllocator
;
1106 pool
->alloc
= device
->vk
.alloc
;
1108 list_inithead(&pool
->cmd_buffers
);
1110 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1115 void anv_DestroyCommandPool(
1117 VkCommandPool commandPool
,
1118 const VkAllocationCallbacks
* pAllocator
)
1120 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1121 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1126 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1127 &pool
->cmd_buffers
, pool_link
) {
1128 anv_cmd_buffer_destroy(cmd_buffer
);
1131 vk_object_base_finish(&pool
->base
);
1132 vk_free2(&device
->vk
.alloc
, pAllocator
, pool
);
1135 VkResult
anv_ResetCommandPool(
1137 VkCommandPool commandPool
,
1138 VkCommandPoolResetFlags flags
)
1140 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1142 list_for_each_entry(struct anv_cmd_buffer
, cmd_buffer
,
1143 &pool
->cmd_buffers
, pool_link
) {
1144 anv_cmd_buffer_reset(cmd_buffer
);
1150 void anv_TrimCommandPool(
1152 VkCommandPool commandPool
,
1153 VkCommandPoolTrimFlags flags
)
1155 /* Nothing for us to do here. Our pools stay pretty tidy. */
1159 * Return NULL if the current subpass has no depthstencil attachment.
1161 const struct anv_image_view
*
1162 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1164 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1166 if (subpass
->depth_stencil_attachment
== NULL
)
1169 const struct anv_image_view
*iview
=
1170 cmd_buffer
->state
.attachments
[subpass
->depth_stencil_attachment
->attachment
].image_view
;
1172 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
1173 VK_IMAGE_ASPECT_STENCIL_BIT
));
1178 static struct anv_descriptor_set
*
1179 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
1180 VkPipelineBindPoint bind_point
,
1181 struct anv_descriptor_set_layout
*layout
,
1184 struct anv_cmd_pipeline_state
*pipe_state
;
1185 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
1186 pipe_state
= &cmd_buffer
->state
.compute
.base
;
1188 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
1189 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
1192 struct anv_push_descriptor_set
**push_set
=
1193 &pipe_state
->push_descriptors
[_set
];
1195 if (*push_set
== NULL
) {
1196 *push_set
= vk_zalloc(&cmd_buffer
->pool
->alloc
,
1197 sizeof(struct anv_push_descriptor_set
), 8,
1198 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1199 if (*push_set
== NULL
) {
1200 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1205 struct anv_descriptor_set
*set
= &(*push_set
)->set
;
1207 if (set
->layout
!= layout
) {
1209 anv_descriptor_set_layout_unref(cmd_buffer
->device
, set
->layout
);
1210 anv_descriptor_set_layout_ref(layout
);
1211 set
->layout
= layout
;
1213 set
->size
= anv_descriptor_set_layout_size(layout
);
1214 set
->buffer_view_count
= layout
->buffer_view_count
;
1215 set
->buffer_views
= (*push_set
)->buffer_views
;
1217 if (layout
->descriptor_buffer_size
&&
1218 ((*push_set
)->set_used_on_gpu
||
1219 set
->desc_mem
.alloc_size
< layout
->descriptor_buffer_size
)) {
1220 /* The previous buffer is either actively used by some GPU command (so
1221 * we can't modify it) or is too small. Allocate a new one.
1223 struct anv_state desc_mem
=
1224 anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
1225 layout
->descriptor_buffer_size
, 32);
1226 if (set
->desc_mem
.alloc_size
) {
1227 /* TODO: Do we really need to copy all the time? */
1228 memcpy(desc_mem
.map
, set
->desc_mem
.map
,
1229 MIN2(desc_mem
.alloc_size
, set
->desc_mem
.alloc_size
));
1231 set
->desc_mem
= desc_mem
;
1233 struct anv_address addr
= {
1234 .bo
= cmd_buffer
->dynamic_state_stream
.state_pool
->block_pool
.bo
,
1235 .offset
= set
->desc_mem
.offset
,
1238 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
1239 set
->desc_surface_state
=
1240 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
1241 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
1242 anv_fill_buffer_surface_state(cmd_buffer
->device
,
1243 set
->desc_surface_state
,
1244 ISL_FORMAT_R32G32B32A32_FLOAT
,
1245 addr
, layout
->descriptor_buffer_size
, 1);
1251 void anv_CmdPushDescriptorSetKHR(
1252 VkCommandBuffer commandBuffer
,
1253 VkPipelineBindPoint pipelineBindPoint
,
1254 VkPipelineLayout _layout
,
1256 uint32_t descriptorWriteCount
,
1257 const VkWriteDescriptorSet
* pDescriptorWrites
)
1259 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1260 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1262 assert(_set
< MAX_SETS
);
1264 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1266 struct anv_descriptor_set
*set
=
1267 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1272 /* Go through the user supplied descriptors. */
1273 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1274 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1276 switch (write
->descriptorType
) {
1277 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1278 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1279 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1280 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1281 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1282 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1283 anv_descriptor_set_write_image_view(cmd_buffer
->device
, set
,
1284 write
->pImageInfo
+ j
,
1285 write
->descriptorType
,
1287 write
->dstArrayElement
+ j
);
1291 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1292 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1293 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1294 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1295 write
->pTexelBufferView
[j
]);
1297 anv_descriptor_set_write_buffer_view(cmd_buffer
->device
, set
,
1298 write
->descriptorType
,
1301 write
->dstArrayElement
+ j
);
1305 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1306 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1307 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1308 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1309 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1310 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1312 anv_descriptor_set_write_buffer(cmd_buffer
->device
, set
,
1313 &cmd_buffer
->surface_state_stream
,
1314 write
->descriptorType
,
1317 write
->dstArrayElement
+ j
,
1318 write
->pBufferInfo
[j
].offset
,
1319 write
->pBufferInfo
[j
].range
);
1328 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1329 layout
, _set
, set
, NULL
, NULL
);
1332 void anv_CmdPushDescriptorSetWithTemplateKHR(
1333 VkCommandBuffer commandBuffer
,
1334 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1335 VkPipelineLayout _layout
,
1339 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1340 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1341 descriptorUpdateTemplate
);
1342 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1344 assert(_set
< MAX_PUSH_DESCRIPTORS
);
1346 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1348 struct anv_descriptor_set
*set
=
1349 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, template->bind_point
,
1354 anv_descriptor_set_write_template(cmd_buffer
->device
, set
,
1355 &cmd_buffer
->surface_state_stream
,
1359 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, template->bind_point
,
1360 layout
, _set
, set
, NULL
, NULL
);
1363 void anv_CmdSetDeviceMask(
1364 VkCommandBuffer commandBuffer
,
1365 uint32_t deviceMask
)