2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
35 /** \file anv_cmd_buffer.c
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state
= {
58 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
63 .stencil_compare_mask
= {
67 .stencil_write_mask
= {
71 .stencil_reference
= {
82 * Copy the dynamic state from src to dest based on the copy_mask.
84 * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
85 * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
87 * Returns a mask of the states which changed.
90 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
91 const struct anv_dynamic_state
*src
,
92 anv_cmd_dirty_mask_t copy_mask
)
94 anv_cmd_dirty_mask_t changed
= 0;
96 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
) {
97 dest
->viewport
.count
= src
->viewport
.count
;
98 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
100 changed
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
103 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
) {
104 dest
->scissor
.count
= src
->scissor
.count
;
105 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
107 changed
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
110 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
111 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
112 changed
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
115 #define ANV_CMP_COPY(field, flag) \
116 if (copy_mask & flag) { \
117 if (dest->field != src->field) { \
118 dest->field = src->field; \
123 ANV_CMP_COPY(line_width
, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
);
125 ANV_CMP_COPY(depth_bias
.bias
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
126 ANV_CMP_COPY(depth_bias
.clamp
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
127 ANV_CMP_COPY(depth_bias
.slope
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
129 ANV_CMP_COPY(depth_bounds
.min
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
);
130 ANV_CMP_COPY(depth_bounds
.max
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
);
132 ANV_CMP_COPY(stencil_compare_mask
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
);
133 ANV_CMP_COPY(stencil_compare_mask
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
);
135 ANV_CMP_COPY(stencil_write_mask
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
);
136 ANV_CMP_COPY(stencil_write_mask
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
);
138 ANV_CMP_COPY(stencil_reference
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
);
139 ANV_CMP_COPY(stencil_reference
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
);
141 ANV_CMP_COPY(line_stipple
.factor
, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
);
142 ANV_CMP_COPY(line_stipple
.pattern
, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
);
150 anv_cmd_state_init(struct anv_cmd_buffer
*cmd_buffer
)
152 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
154 memset(state
, 0, sizeof(*state
));
156 state
->current_pipeline
= UINT32_MAX
;
157 state
->restart_index
= UINT32_MAX
;
158 state
->gfx
.dynamic
= default_dynamic_state
;
162 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer
*cmd_buffer
,
163 struct anv_cmd_pipeline_state
*pipe_state
)
165 for (uint32_t i
= 0; i
< ARRAY_SIZE(pipe_state
->push_descriptors
); i
++) {
166 if (pipe_state
->push_descriptors
[i
]) {
167 anv_descriptor_set_layout_unref(cmd_buffer
->device
,
168 pipe_state
->push_descriptors
[i
]->set
.layout
);
169 vk_free(&cmd_buffer
->pool
->alloc
, pipe_state
->push_descriptors
[i
]);
175 anv_cmd_state_finish(struct anv_cmd_buffer
*cmd_buffer
)
177 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
179 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->gfx
.base
);
180 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->compute
.base
);
182 vk_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
186 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
188 anv_cmd_state_finish(cmd_buffer
);
189 anv_cmd_state_init(cmd_buffer
);
192 static VkResult
anv_create_cmd_buffer(
193 struct anv_device
* device
,
194 struct anv_cmd_pool
* pool
,
195 VkCommandBufferLevel level
,
196 VkCommandBuffer
* pCommandBuffer
)
198 struct anv_cmd_buffer
*cmd_buffer
;
201 cmd_buffer
= vk_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
202 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
203 if (cmd_buffer
== NULL
)
204 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
206 vk_object_base_init(&device
->vk
, &cmd_buffer
->base
,
207 VK_OBJECT_TYPE_COMMAND_BUFFER
);
209 cmd_buffer
->batch
.status
= VK_SUCCESS
;
211 cmd_buffer
->device
= device
;
212 cmd_buffer
->pool
= pool
;
213 cmd_buffer
->level
= level
;
215 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
216 if (result
!= VK_SUCCESS
)
219 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
220 &device
->surface_state_pool
, 4096);
221 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
222 &device
->dynamic_state_pool
, 16384);
224 anv_cmd_state_init(cmd_buffer
);
227 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
229 /* Init the pool_link so we can safefly call list_del when we destroy
232 list_inithead(&cmd_buffer
->pool_link
);
235 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
240 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
245 VkResult
anv_AllocateCommandBuffers(
247 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
248 VkCommandBuffer
* pCommandBuffers
)
250 ANV_FROM_HANDLE(anv_device
, device
, _device
);
251 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
253 VkResult result
= VK_SUCCESS
;
256 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
257 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
258 &pCommandBuffers
[i
]);
259 if (result
!= VK_SUCCESS
)
263 if (result
!= VK_SUCCESS
) {
264 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
266 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++)
267 pCommandBuffers
[i
] = VK_NULL_HANDLE
;
274 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
276 list_del(&cmd_buffer
->pool_link
);
278 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
280 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
281 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
283 anv_cmd_state_finish(cmd_buffer
);
285 vk_object_base_finish(&cmd_buffer
->base
);
286 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
289 void anv_FreeCommandBuffers(
291 VkCommandPool commandPool
,
292 uint32_t commandBufferCount
,
293 const VkCommandBuffer
* pCommandBuffers
)
295 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
296 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
301 anv_cmd_buffer_destroy(cmd_buffer
);
306 anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
)
308 cmd_buffer
->usage_flags
= 0;
309 cmd_buffer
->perf_query_pool
= NULL
;
310 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
311 anv_cmd_state_reset(cmd_buffer
);
313 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
314 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
315 &cmd_buffer
->device
->surface_state_pool
, 4096);
317 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
318 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
319 &cmd_buffer
->device
->dynamic_state_pool
, 16384);
323 VkResult
anv_ResetCommandBuffer(
324 VkCommandBuffer commandBuffer
,
325 VkCommandBufferResetFlags flags
)
327 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
328 return anv_cmd_buffer_reset(cmd_buffer
);
331 #define anv_genX_call(devinfo, func, ...) \
332 switch ((devinfo)->gen) { \
334 if ((devinfo)->is_haswell) { \
335 gen75_##func(__VA_ARGS__); \
337 gen7_##func(__VA_ARGS__); \
341 gen8_##func(__VA_ARGS__); \
344 gen9_##func(__VA_ARGS__); \
347 gen10_##func(__VA_ARGS__); \
350 gen11_##func(__VA_ARGS__); \
353 gen12_##func(__VA_ARGS__); \
356 assert(!"Unknown hardware generation"); \
360 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
362 anv_genX_call(&cmd_buffer
->device
->info
,
363 cmd_buffer_emit_state_base_address
,
368 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
369 const struct anv_image
*image
,
370 VkImageAspectFlagBits aspect
,
371 enum isl_aux_usage aux_usage
,
374 uint32_t layer_count
)
376 anv_genX_call(&cmd_buffer
->device
->info
,
377 cmd_buffer_mark_image_written
,
378 cmd_buffer
, image
, aspect
, aux_usage
,
379 level
, base_layer
, layer_count
);
383 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
)
385 anv_genX_call(&cmd_buffer
->device
->info
,
386 cmd_emit_conditional_render_predicate
,
391 mem_update(void *dst
, const void *src
, size_t size
)
393 if (memcmp(dst
, src
, size
) == 0)
396 memcpy(dst
, src
, size
);
401 set_dirty_for_bind_map(struct anv_cmd_buffer
*cmd_buffer
,
402 gl_shader_stage stage
,
403 const struct anv_pipeline_bind_map
*map
)
405 if (mem_update(cmd_buffer
->state
.surface_sha1s
[stage
],
406 map
->surface_sha1
, sizeof(map
->surface_sha1
)))
407 cmd_buffer
->state
.descriptors_dirty
|= mesa_to_vk_shader_stage(stage
);
409 if (mem_update(cmd_buffer
->state
.sampler_sha1s
[stage
],
410 map
->sampler_sha1
, sizeof(map
->sampler_sha1
)))
411 cmd_buffer
->state
.descriptors_dirty
|= mesa_to_vk_shader_stage(stage
);
413 if (mem_update(cmd_buffer
->state
.push_sha1s
[stage
],
414 map
->push_sha1
, sizeof(map
->push_sha1
)))
415 cmd_buffer
->state
.push_constants_dirty
|= mesa_to_vk_shader_stage(stage
);
418 void anv_CmdBindPipeline(
419 VkCommandBuffer commandBuffer
,
420 VkPipelineBindPoint pipelineBindPoint
,
421 VkPipeline _pipeline
)
423 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
424 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
426 switch (pipelineBindPoint
) {
427 case VK_PIPELINE_BIND_POINT_COMPUTE
: {
428 struct anv_compute_pipeline
*compute_pipeline
=
429 anv_pipeline_to_compute(pipeline
);
430 if (cmd_buffer
->state
.compute
.pipeline
== compute_pipeline
)
433 cmd_buffer
->state
.compute
.pipeline
= compute_pipeline
;
434 cmd_buffer
->state
.compute
.pipeline_dirty
= true;
435 set_dirty_for_bind_map(cmd_buffer
, MESA_SHADER_COMPUTE
,
436 &compute_pipeline
->cs
->bind_map
);
440 case VK_PIPELINE_BIND_POINT_GRAPHICS
: {
441 struct anv_graphics_pipeline
*gfx_pipeline
=
442 anv_pipeline_to_graphics(pipeline
);
443 if (cmd_buffer
->state
.gfx
.pipeline
== gfx_pipeline
)
446 cmd_buffer
->state
.gfx
.pipeline
= gfx_pipeline
;
447 cmd_buffer
->state
.gfx
.vb_dirty
|= gfx_pipeline
->vb_used
;
448 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
450 anv_foreach_stage(stage
, gfx_pipeline
->active_stages
) {
451 set_dirty_for_bind_map(cmd_buffer
, stage
,
452 &gfx_pipeline
->shaders
[stage
]->bind_map
);
455 /* Apply the dynamic state from the pipeline */
456 cmd_buffer
->state
.gfx
.dirty
|=
457 anv_dynamic_state_copy(&cmd_buffer
->state
.gfx
.dynamic
,
458 &gfx_pipeline
->dynamic_state
,
459 gfx_pipeline
->dynamic_state_mask
);
464 assert(!"invalid bind point");
469 void anv_CmdSetViewport(
470 VkCommandBuffer commandBuffer
,
471 uint32_t firstViewport
,
472 uint32_t viewportCount
,
473 const VkViewport
* pViewports
)
475 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
477 const uint32_t total_count
= firstViewport
+ viewportCount
;
478 if (cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
< total_count
)
479 cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
= total_count
;
481 memcpy(cmd_buffer
->state
.gfx
.dynamic
.viewport
.viewports
+ firstViewport
,
482 pViewports
, viewportCount
* sizeof(*pViewports
));
484 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
487 void anv_CmdSetScissor(
488 VkCommandBuffer commandBuffer
,
489 uint32_t firstScissor
,
490 uint32_t scissorCount
,
491 const VkRect2D
* pScissors
)
493 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
495 const uint32_t total_count
= firstScissor
+ scissorCount
;
496 if (cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
< total_count
)
497 cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
= total_count
;
499 memcpy(cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
+ firstScissor
,
500 pScissors
, scissorCount
* sizeof(*pScissors
));
502 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
505 void anv_CmdSetLineWidth(
506 VkCommandBuffer commandBuffer
,
509 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
511 cmd_buffer
->state
.gfx
.dynamic
.line_width
= lineWidth
;
512 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
515 void anv_CmdSetDepthBias(
516 VkCommandBuffer commandBuffer
,
517 float depthBiasConstantFactor
,
518 float depthBiasClamp
,
519 float depthBiasSlopeFactor
)
521 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
523 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
524 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
525 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
527 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
530 void anv_CmdSetBlendConstants(
531 VkCommandBuffer commandBuffer
,
532 const float blendConstants
[4])
534 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
536 memcpy(cmd_buffer
->state
.gfx
.dynamic
.blend_constants
,
537 blendConstants
, sizeof(float) * 4);
539 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
542 void anv_CmdSetDepthBounds(
543 VkCommandBuffer commandBuffer
,
544 float minDepthBounds
,
545 float maxDepthBounds
)
547 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
549 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.min
= minDepthBounds
;
550 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
552 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
555 void anv_CmdSetStencilCompareMask(
556 VkCommandBuffer commandBuffer
,
557 VkStencilFaceFlags faceMask
,
558 uint32_t compareMask
)
560 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
562 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
563 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.front
= compareMask
;
564 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
565 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.back
= compareMask
;
567 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
570 void anv_CmdSetStencilWriteMask(
571 VkCommandBuffer commandBuffer
,
572 VkStencilFaceFlags faceMask
,
575 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
577 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
578 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.front
= writeMask
;
579 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
580 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.back
= writeMask
;
582 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
585 void anv_CmdSetStencilReference(
586 VkCommandBuffer commandBuffer
,
587 VkStencilFaceFlags faceMask
,
590 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
592 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
593 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.front
= reference
;
594 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
595 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.back
= reference
;
597 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
600 void anv_CmdSetLineStippleEXT(
601 VkCommandBuffer commandBuffer
,
602 uint32_t lineStippleFactor
,
603 uint16_t lineStipplePattern
)
605 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
607 cmd_buffer
->state
.gfx
.dynamic
.line_stipple
.factor
= lineStippleFactor
;
608 cmd_buffer
->state
.gfx
.dynamic
.line_stipple
.pattern
= lineStipplePattern
;
610 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
;
614 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
615 VkPipelineBindPoint bind_point
,
616 struct anv_pipeline_layout
*layout
,
618 struct anv_descriptor_set
*set
,
619 uint32_t *dynamic_offset_count
,
620 const uint32_t **dynamic_offsets
)
622 struct anv_descriptor_set_layout
*set_layout
=
623 layout
->set
[set_index
].layout
;
625 VkShaderStageFlags stages
= set_layout
->shader_stages
;
626 struct anv_cmd_pipeline_state
*pipe_state
;
628 switch (bind_point
) {
629 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
630 stages
&= VK_SHADER_STAGE_ALL_GRAPHICS
;
631 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
634 case VK_PIPELINE_BIND_POINT_COMPUTE
:
635 stages
&= VK_SHADER_STAGE_COMPUTE_BIT
;
636 pipe_state
= &cmd_buffer
->state
.compute
.base
;
640 unreachable("invalid bind point");
643 VkShaderStageFlags dirty_stages
= 0;
644 if (pipe_state
->descriptors
[set_index
] != set
) {
645 pipe_state
->descriptors
[set_index
] = set
;
646 dirty_stages
|= stages
;
649 /* If it's a push descriptor set, we have to flag things as dirty
650 * regardless of whether or not the CPU-side data structure changed as we
651 * may have edited in-place.
653 if (set
->pool
== NULL
)
654 dirty_stages
|= stages
;
656 if (dynamic_offsets
) {
657 if (set_layout
->dynamic_offset_count
> 0) {
658 uint32_t dynamic_offset_start
=
659 layout
->set
[set_index
].dynamic_offset_start
;
661 anv_foreach_stage(stage
, stages
) {
662 struct anv_push_constants
*push
=
663 &cmd_buffer
->state
.push_constants
[stage
];
664 uint32_t *push_offsets
=
665 &push
->dynamic_offsets
[dynamic_offset_start
];
667 /* Assert that everything is in range */
668 assert(set_layout
->dynamic_offset_count
<= *dynamic_offset_count
);
669 assert(dynamic_offset_start
+ set_layout
->dynamic_offset_count
<=
670 ARRAY_SIZE(push
->dynamic_offsets
));
672 unsigned mask
= set_layout
->stage_dynamic_offsets
[stage
];
673 STATIC_ASSERT(MAX_DYNAMIC_BUFFERS
<= sizeof(mask
) * 8);
675 int i
= u_bit_scan(&mask
);
676 if (push_offsets
[i
] != (*dynamic_offsets
)[i
]) {
677 push_offsets
[i
] = (*dynamic_offsets
)[i
];
678 dirty_stages
|= mesa_to_vk_shader_stage(stage
);
683 *dynamic_offsets
+= set_layout
->dynamic_offset_count
;
684 *dynamic_offset_count
-= set_layout
->dynamic_offset_count
;
688 cmd_buffer
->state
.descriptors_dirty
|= dirty_stages
;
689 cmd_buffer
->state
.push_constants_dirty
|= dirty_stages
;
692 void anv_CmdBindDescriptorSets(
693 VkCommandBuffer commandBuffer
,
694 VkPipelineBindPoint pipelineBindPoint
,
695 VkPipelineLayout _layout
,
697 uint32_t descriptorSetCount
,
698 const VkDescriptorSet
* pDescriptorSets
,
699 uint32_t dynamicOffsetCount
,
700 const uint32_t* pDynamicOffsets
)
702 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
703 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
705 assert(firstSet
+ descriptorSetCount
<= MAX_SETS
);
707 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
708 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
709 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
710 layout
, firstSet
+ i
, set
,
716 void anv_CmdBindVertexBuffers(
717 VkCommandBuffer commandBuffer
,
718 uint32_t firstBinding
,
719 uint32_t bindingCount
,
720 const VkBuffer
* pBuffers
,
721 const VkDeviceSize
* pOffsets
)
723 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
724 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
726 /* We have to defer setting up vertex buffer since we need the buffer
727 * stride from the pipeline. */
729 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
730 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
731 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
732 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
733 cmd_buffer
->state
.gfx
.vb_dirty
|= 1 << (firstBinding
+ i
);
737 void anv_CmdBindTransformFeedbackBuffersEXT(
738 VkCommandBuffer commandBuffer
,
739 uint32_t firstBinding
,
740 uint32_t bindingCount
,
741 const VkBuffer
* pBuffers
,
742 const VkDeviceSize
* pOffsets
,
743 const VkDeviceSize
* pSizes
)
745 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
746 struct anv_xfb_binding
*xfb
= cmd_buffer
->state
.xfb_bindings
;
748 /* We have to defer setting up vertex buffer since we need the buffer
749 * stride from the pipeline. */
751 assert(firstBinding
+ bindingCount
<= MAX_XFB_BUFFERS
);
752 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
753 if (pBuffers
[i
] == VK_NULL_HANDLE
) {
754 xfb
[firstBinding
+ i
].buffer
= NULL
;
756 ANV_FROM_HANDLE(anv_buffer
, buffer
, pBuffers
[i
]);
757 xfb
[firstBinding
+ i
].buffer
= buffer
;
758 xfb
[firstBinding
+ i
].offset
= pOffsets
[i
];
759 xfb
[firstBinding
+ i
].size
=
760 anv_buffer_get_range(buffer
, pOffsets
[i
],
761 pSizes
? pSizes
[i
] : VK_WHOLE_SIZE
);
767 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
770 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
771 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
772 return ISL_FORMAT_R32G32B32A32_FLOAT
;
774 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
775 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
776 return ISL_FORMAT_RAW
;
779 unreachable("Invalid descriptor type");
784 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
785 const void *data
, uint32_t size
, uint32_t alignment
)
787 struct anv_state state
;
789 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
790 memcpy(state
.map
, data
, size
);
792 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
798 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
799 uint32_t *a
, uint32_t *b
,
800 uint32_t dwords
, uint32_t alignment
)
802 struct anv_state state
;
805 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
806 dwords
* 4, alignment
);
808 for (uint32_t i
= 0; i
< dwords
; i
++)
811 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
817 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
818 gl_shader_stage stage
)
820 struct anv_push_constants
*data
=
821 &cmd_buffer
->state
.push_constants
[stage
];
823 struct anv_state state
=
824 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
825 sizeof(struct anv_push_constants
),
826 32 /* bottom 5 bits MBZ */);
827 memcpy(state
.map
, data
, sizeof(struct anv_push_constants
));
833 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
835 struct anv_push_constants
*data
=
836 &cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
837 struct anv_compute_pipeline
*pipeline
= cmd_buffer
->state
.compute
.pipeline
;
838 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
839 const struct anv_push_range
*range
= &pipeline
->cs
->bind_map
.push_ranges
[0];
841 const uint32_t threads
= anv_cs_threads(pipeline
);
842 const unsigned total_push_constants_size
=
843 brw_cs_push_const_total_size(cs_prog_data
, threads
);
844 if (total_push_constants_size
== 0)
845 return (struct anv_state
) { .offset
= 0 };
847 const unsigned push_constant_alignment
=
848 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
849 const unsigned aligned_total_push_constants_size
=
850 ALIGN(total_push_constants_size
, push_constant_alignment
);
851 struct anv_state state
=
852 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
853 aligned_total_push_constants_size
,
854 push_constant_alignment
);
856 void *dst
= state
.map
;
857 const void *src
= (char *)data
+ (range
->start
* 32);
859 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
860 memcpy(dst
, src
, cs_prog_data
->push
.cross_thread
.size
);
861 dst
+= cs_prog_data
->push
.cross_thread
.size
;
862 src
+= cs_prog_data
->push
.cross_thread
.size
;
865 if (cs_prog_data
->push
.per_thread
.size
> 0) {
866 for (unsigned t
= 0; t
< threads
; t
++) {
867 memcpy(dst
, src
, cs_prog_data
->push
.per_thread
.size
);
869 uint32_t *subgroup_id
= dst
+
870 offsetof(struct anv_push_constants
, cs
.subgroup_id
) -
871 (range
->start
* 32 + cs_prog_data
->push
.cross_thread
.size
);
874 dst
+= cs_prog_data
->push
.per_thread
.size
;
881 void anv_CmdPushConstants(
882 VkCommandBuffer commandBuffer
,
883 VkPipelineLayout layout
,
884 VkShaderStageFlags stageFlags
,
889 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
891 anv_foreach_stage(stage
, stageFlags
) {
892 memcpy(cmd_buffer
->state
.push_constants
[stage
].client_data
+ offset
,
896 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
899 VkResult
anv_CreateCommandPool(
901 const VkCommandPoolCreateInfo
* pCreateInfo
,
902 const VkAllocationCallbacks
* pAllocator
,
903 VkCommandPool
* pCmdPool
)
905 ANV_FROM_HANDLE(anv_device
, device
, _device
);
906 struct anv_cmd_pool
*pool
;
908 pool
= vk_alloc2(&device
->vk
.alloc
, pAllocator
, sizeof(*pool
), 8,
909 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
911 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
913 vk_object_base_init(&device
->vk
, &pool
->base
, VK_OBJECT_TYPE_COMMAND_POOL
);
916 pool
->alloc
= *pAllocator
;
918 pool
->alloc
= device
->vk
.alloc
;
920 list_inithead(&pool
->cmd_buffers
);
922 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
927 void anv_DestroyCommandPool(
929 VkCommandPool commandPool
,
930 const VkAllocationCallbacks
* pAllocator
)
932 ANV_FROM_HANDLE(anv_device
, device
, _device
);
933 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
938 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
939 &pool
->cmd_buffers
, pool_link
) {
940 anv_cmd_buffer_destroy(cmd_buffer
);
943 vk_object_base_finish(&pool
->base
);
944 vk_free2(&device
->vk
.alloc
, pAllocator
, pool
);
947 VkResult
anv_ResetCommandPool(
949 VkCommandPool commandPool
,
950 VkCommandPoolResetFlags flags
)
952 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
954 list_for_each_entry(struct anv_cmd_buffer
, cmd_buffer
,
955 &pool
->cmd_buffers
, pool_link
) {
956 anv_cmd_buffer_reset(cmd_buffer
);
962 void anv_TrimCommandPool(
964 VkCommandPool commandPool
,
965 VkCommandPoolTrimFlags flags
)
967 /* Nothing for us to do here. Our pools stay pretty tidy. */
971 * Return NULL if the current subpass has no depthstencil attachment.
973 const struct anv_image_view
*
974 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
976 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
978 if (subpass
->depth_stencil_attachment
== NULL
)
981 const struct anv_image_view
*iview
=
982 cmd_buffer
->state
.attachments
[subpass
->depth_stencil_attachment
->attachment
].image_view
;
984 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
985 VK_IMAGE_ASPECT_STENCIL_BIT
));
990 static struct anv_descriptor_set
*
991 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
992 VkPipelineBindPoint bind_point
,
993 struct anv_descriptor_set_layout
*layout
,
996 struct anv_cmd_pipeline_state
*pipe_state
;
997 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
998 pipe_state
= &cmd_buffer
->state
.compute
.base
;
1000 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
1001 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
1004 struct anv_push_descriptor_set
**push_set
=
1005 &pipe_state
->push_descriptors
[_set
];
1007 if (*push_set
== NULL
) {
1008 *push_set
= vk_zalloc(&cmd_buffer
->pool
->alloc
,
1009 sizeof(struct anv_push_descriptor_set
), 8,
1010 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1011 if (*push_set
== NULL
) {
1012 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1017 struct anv_descriptor_set
*set
= &(*push_set
)->set
;
1019 if (set
->layout
!= layout
) {
1021 anv_descriptor_set_layout_unref(cmd_buffer
->device
, set
->layout
);
1022 anv_descriptor_set_layout_ref(layout
);
1023 set
->layout
= layout
;
1025 set
->size
= anv_descriptor_set_layout_size(layout
);
1026 set
->buffer_view_count
= layout
->buffer_view_count
;
1027 set
->buffer_views
= (*push_set
)->buffer_views
;
1029 if (layout
->descriptor_buffer_size
&&
1030 ((*push_set
)->set_used_on_gpu
||
1031 set
->desc_mem
.alloc_size
< layout
->descriptor_buffer_size
)) {
1032 /* The previous buffer is either actively used by some GPU command (so
1033 * we can't modify it) or is too small. Allocate a new one.
1035 struct anv_state desc_mem
=
1036 anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
1037 layout
->descriptor_buffer_size
, 32);
1038 if (set
->desc_mem
.alloc_size
) {
1039 /* TODO: Do we really need to copy all the time? */
1040 memcpy(desc_mem
.map
, set
->desc_mem
.map
,
1041 MIN2(desc_mem
.alloc_size
, set
->desc_mem
.alloc_size
));
1043 set
->desc_mem
= desc_mem
;
1045 struct anv_address addr
= {
1046 .bo
= cmd_buffer
->dynamic_state_stream
.state_pool
->block_pool
.bo
,
1047 .offset
= set
->desc_mem
.offset
,
1050 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
1051 set
->desc_surface_state
=
1052 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
1053 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
1054 anv_fill_buffer_surface_state(cmd_buffer
->device
,
1055 set
->desc_surface_state
,
1056 ISL_FORMAT_R32G32B32A32_FLOAT
,
1057 addr
, layout
->descriptor_buffer_size
, 1);
1063 void anv_CmdPushDescriptorSetKHR(
1064 VkCommandBuffer commandBuffer
,
1065 VkPipelineBindPoint pipelineBindPoint
,
1066 VkPipelineLayout _layout
,
1068 uint32_t descriptorWriteCount
,
1069 const VkWriteDescriptorSet
* pDescriptorWrites
)
1071 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1072 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1074 assert(_set
< MAX_SETS
);
1076 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1078 struct anv_descriptor_set
*set
=
1079 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1084 /* Go through the user supplied descriptors. */
1085 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1086 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1088 switch (write
->descriptorType
) {
1089 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1090 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1091 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1092 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1093 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1094 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1095 anv_descriptor_set_write_image_view(cmd_buffer
->device
, set
,
1096 write
->pImageInfo
+ j
,
1097 write
->descriptorType
,
1099 write
->dstArrayElement
+ j
);
1103 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1104 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1105 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1106 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1107 write
->pTexelBufferView
[j
]);
1109 anv_descriptor_set_write_buffer_view(cmd_buffer
->device
, set
,
1110 write
->descriptorType
,
1113 write
->dstArrayElement
+ j
);
1117 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1118 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1119 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1120 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1121 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1122 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1124 anv_descriptor_set_write_buffer(cmd_buffer
->device
, set
,
1125 &cmd_buffer
->surface_state_stream
,
1126 write
->descriptorType
,
1129 write
->dstArrayElement
+ j
,
1130 write
->pBufferInfo
[j
].offset
,
1131 write
->pBufferInfo
[j
].range
);
1140 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1141 layout
, _set
, set
, NULL
, NULL
);
1144 void anv_CmdPushDescriptorSetWithTemplateKHR(
1145 VkCommandBuffer commandBuffer
,
1146 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1147 VkPipelineLayout _layout
,
1151 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1152 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1153 descriptorUpdateTemplate
);
1154 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1156 assert(_set
< MAX_PUSH_DESCRIPTORS
);
1158 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1160 struct anv_descriptor_set
*set
=
1161 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, template->bind_point
,
1166 anv_descriptor_set_write_template(cmd_buffer
->device
, set
,
1167 &cmd_buffer
->surface_state_stream
,
1171 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, template->bind_point
,
1172 layout
, _set
, set
, NULL
, NULL
);
1175 void anv_CmdSetDeviceMask(
1176 VkCommandBuffer commandBuffer
,
1177 uint32_t deviceMask
)