2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
34 /** \file anv_cmd_buffer.c
36 * This file contains all of the stuff for emitting commands into a command
37 * buffer. This includes implementations of most of the vkCmd*
38 * entrypoints. This file is concerned entirely with state emission and
39 * not with the command buffer data structure itself. As far as this file
40 * is concerned, most of anv_cmd_buffer is magic.
43 /* TODO: These are taken from GLES. We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state
= {
57 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
62 .stencil_compare_mask
= {
66 .stencil_write_mask
= {
70 .stencil_reference
= {
77 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
78 const struct anv_dynamic_state
*src
,
81 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
82 dest
->viewport
.count
= src
->viewport
.count
;
83 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
87 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
88 dest
->scissor
.count
= src
->scissor
.count
;
89 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
93 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
94 dest
->line_width
= src
->line_width
;
96 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
97 dest
->depth_bias
= src
->depth_bias
;
99 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
100 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
102 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
103 dest
->depth_bounds
= src
->depth_bounds
;
105 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
106 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
108 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
109 dest
->stencil_write_mask
= src
->stencil_write_mask
;
111 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
112 dest
->stencil_reference
= src
->stencil_reference
;
116 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
118 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
120 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
121 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
122 memset(state
->binding_tables
, 0, sizeof(state
->binding_tables
));
123 memset(state
->samplers
, 0, sizeof(state
->samplers
));
125 /* 0 isn't a valid config. This ensures that we always configure L3$. */
126 cmd_buffer
->state
.current_l3_config
= 0;
130 state
->pending_pipe_bits
= 0;
131 state
->descriptors_dirty
= 0;
132 state
->push_constants_dirty
= 0;
133 state
->pipeline
= NULL
;
134 state
->push_constant_stages
= 0;
135 state
->restart_index
= UINT32_MAX
;
136 state
->dynamic
= default_dynamic_state
;
137 state
->need_query_wa
= true;
139 if (state
->attachments
!= NULL
) {
140 anv_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
141 state
->attachments
= NULL
;
144 state
->gen7
.index_buffer
= NULL
;
148 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
151 anv_cmd_state_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
152 const VkRenderPassBeginInfo
*info
)
154 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
155 ANV_FROM_HANDLE(anv_render_pass
, pass
, info
->renderPass
);
157 anv_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
159 if (pass
->attachment_count
== 0) {
160 state
->attachments
= NULL
;
164 state
->attachments
= anv_alloc(&cmd_buffer
->pool
->alloc
,
165 pass
->attachment_count
*
166 sizeof(state
->attachments
[0]),
167 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
168 if (state
->attachments
== NULL
) {
169 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
173 for (uint32_t i
= 0; i
< pass
->attachment_count
; ++i
) {
174 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
175 VkImageAspectFlags att_aspects
= vk_format_aspects(att
->format
);
176 VkImageAspectFlags clear_aspects
= 0;
178 if (att_aspects
== VK_IMAGE_ASPECT_COLOR_BIT
) {
179 /* color attachment */
180 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
181 clear_aspects
|= VK_IMAGE_ASPECT_COLOR_BIT
;
184 /* depthstencil attachment */
185 if ((att_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) &&
186 att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
187 clear_aspects
|= VK_IMAGE_ASPECT_DEPTH_BIT
;
189 if ((att_aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) &&
190 att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
191 clear_aspects
|= VK_IMAGE_ASPECT_STENCIL_BIT
;
195 state
->attachments
[i
].pending_clear_aspects
= clear_aspects
;
197 assert(info
->clearValueCount
> i
);
198 state
->attachments
[i
].clear_value
= info
->pClearValues
[i
];
204 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
205 gl_shader_stage stage
, uint32_t size
)
207 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
210 *ptr
= anv_alloc(&cmd_buffer
->pool
->alloc
, size
, 8,
211 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
213 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
214 } else if ((*ptr
)->size
< size
) {
215 *ptr
= anv_realloc(&cmd_buffer
->pool
->alloc
, *ptr
, size
, 8,
216 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
218 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
225 static VkResult
anv_create_cmd_buffer(
226 struct anv_device
* device
,
227 struct anv_cmd_pool
* pool
,
228 VkCommandBufferLevel level
,
229 VkCommandBuffer
* pCommandBuffer
)
231 struct anv_cmd_buffer
*cmd_buffer
;
234 cmd_buffer
= anv_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
235 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
236 if (cmd_buffer
== NULL
)
237 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
239 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
240 cmd_buffer
->device
= device
;
241 cmd_buffer
->pool
= pool
;
242 cmd_buffer
->level
= level
;
243 cmd_buffer
->state
.attachments
= NULL
;
245 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
246 if (result
!= VK_SUCCESS
)
249 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
250 &device
->surface_state_block_pool
);
251 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
252 &device
->dynamic_state_block_pool
);
255 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
257 /* Init the pool_link so we can safefly call list_del when we destroy
260 list_inithead(&cmd_buffer
->pool_link
);
263 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
268 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
273 VkResult
anv_AllocateCommandBuffers(
275 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
276 VkCommandBuffer
* pCommandBuffers
)
278 ANV_FROM_HANDLE(anv_device
, device
, _device
);
279 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
281 VkResult result
= VK_SUCCESS
;
284 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
285 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
286 &pCommandBuffers
[i
]);
287 if (result
!= VK_SUCCESS
)
291 if (result
!= VK_SUCCESS
)
292 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
299 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
301 list_del(&cmd_buffer
->pool_link
);
303 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
305 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
306 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
308 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->state
.attachments
);
309 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
312 void anv_FreeCommandBuffers(
314 VkCommandPool commandPool
,
315 uint32_t commandBufferCount
,
316 const VkCommandBuffer
* pCommandBuffers
)
318 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
319 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
321 anv_cmd_buffer_destroy(cmd_buffer
);
326 anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
)
328 cmd_buffer
->usage_flags
= 0;
329 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
330 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
331 anv_cmd_state_reset(cmd_buffer
);
333 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
334 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
335 &cmd_buffer
->device
->surface_state_block_pool
);
337 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
338 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
339 &cmd_buffer
->device
->dynamic_state_block_pool
);
343 VkResult
anv_ResetCommandBuffer(
344 VkCommandBuffer commandBuffer
,
345 VkCommandBufferResetFlags flags
)
347 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
348 return anv_cmd_buffer_reset(cmd_buffer
);
352 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
354 switch (cmd_buffer
->device
->info
.gen
) {
356 if (cmd_buffer
->device
->info
.is_haswell
)
357 return gen75_cmd_buffer_emit_state_base_address(cmd_buffer
);
359 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
361 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
363 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer
);
365 unreachable("unsupported gen\n");
369 void anv_CmdBindPipeline(
370 VkCommandBuffer commandBuffer
,
371 VkPipelineBindPoint pipelineBindPoint
,
372 VkPipeline _pipeline
)
374 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
375 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
377 switch (pipelineBindPoint
) {
378 case VK_PIPELINE_BIND_POINT_COMPUTE
:
379 cmd_buffer
->state
.compute_pipeline
= pipeline
;
380 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_DIRTY_PIPELINE
;
381 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
382 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
385 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
386 cmd_buffer
->state
.pipeline
= pipeline
;
387 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
388 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
389 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
390 cmd_buffer
->state
.descriptors_dirty
|= pipeline
->active_stages
;
392 /* Apply the dynamic state from the pipeline */
393 cmd_buffer
->state
.dirty
|= pipeline
->dynamic_state_mask
;
394 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
,
395 &pipeline
->dynamic_state
,
396 pipeline
->dynamic_state_mask
);
400 assert(!"invalid bind point");
405 void anv_CmdSetViewport(
406 VkCommandBuffer commandBuffer
,
407 uint32_t firstViewport
,
408 uint32_t viewportCount
,
409 const VkViewport
* pViewports
)
411 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
413 const uint32_t total_count
= firstViewport
+ viewportCount
;
414 if (cmd_buffer
->state
.dynamic
.viewport
.count
< total_count
)
415 cmd_buffer
->state
.dynamic
.viewport
.count
= total_count
;
417 memcpy(cmd_buffer
->state
.dynamic
.viewport
.viewports
+ firstViewport
,
418 pViewports
, viewportCount
* sizeof(*pViewports
));
420 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
423 void anv_CmdSetScissor(
424 VkCommandBuffer commandBuffer
,
425 uint32_t firstScissor
,
426 uint32_t scissorCount
,
427 const VkRect2D
* pScissors
)
429 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
431 const uint32_t total_count
= firstScissor
+ scissorCount
;
432 if (cmd_buffer
->state
.dynamic
.scissor
.count
< total_count
)
433 cmd_buffer
->state
.dynamic
.scissor
.count
= total_count
;
435 memcpy(cmd_buffer
->state
.dynamic
.scissor
.scissors
+ firstScissor
,
436 pScissors
, scissorCount
* sizeof(*pScissors
));
438 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
441 void anv_CmdSetLineWidth(
442 VkCommandBuffer commandBuffer
,
445 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
447 cmd_buffer
->state
.dynamic
.line_width
= lineWidth
;
448 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
451 void anv_CmdSetDepthBias(
452 VkCommandBuffer commandBuffer
,
453 float depthBiasConstantFactor
,
454 float depthBiasClamp
,
455 float depthBiasSlopeFactor
)
457 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
459 cmd_buffer
->state
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
460 cmd_buffer
->state
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
461 cmd_buffer
->state
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
463 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
466 void anv_CmdSetBlendConstants(
467 VkCommandBuffer commandBuffer
,
468 const float blendConstants
[4])
470 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
472 memcpy(cmd_buffer
->state
.dynamic
.blend_constants
,
473 blendConstants
, sizeof(float) * 4);
475 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
478 void anv_CmdSetDepthBounds(
479 VkCommandBuffer commandBuffer
,
480 float minDepthBounds
,
481 float maxDepthBounds
)
483 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
485 cmd_buffer
->state
.dynamic
.depth_bounds
.min
= minDepthBounds
;
486 cmd_buffer
->state
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
488 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
491 void anv_CmdSetStencilCompareMask(
492 VkCommandBuffer commandBuffer
,
493 VkStencilFaceFlags faceMask
,
494 uint32_t compareMask
)
496 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
498 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
499 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
500 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
501 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
503 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
506 void anv_CmdSetStencilWriteMask(
507 VkCommandBuffer commandBuffer
,
508 VkStencilFaceFlags faceMask
,
511 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
513 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
514 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
515 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
516 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
518 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
521 void anv_CmdSetStencilReference(
522 VkCommandBuffer commandBuffer
,
523 VkStencilFaceFlags faceMask
,
526 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
528 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
529 cmd_buffer
->state
.dynamic
.stencil_reference
.front
= reference
;
530 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
531 cmd_buffer
->state
.dynamic
.stencil_reference
.back
= reference
;
533 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
536 void anv_CmdBindDescriptorSets(
537 VkCommandBuffer commandBuffer
,
538 VkPipelineBindPoint pipelineBindPoint
,
539 VkPipelineLayout _layout
,
541 uint32_t descriptorSetCount
,
542 const VkDescriptorSet
* pDescriptorSets
,
543 uint32_t dynamicOffsetCount
,
544 const uint32_t* pDynamicOffsets
)
546 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
547 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
548 struct anv_descriptor_set_layout
*set_layout
;
550 assert(firstSet
+ descriptorSetCount
< MAX_SETS
);
552 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
553 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
554 set_layout
= layout
->set
[firstSet
+ i
].layout
;
556 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
] != set
) {
557 cmd_buffer
->state
.descriptors
[firstSet
+ i
] = set
;
558 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
561 if (set_layout
->dynamic_offset_count
> 0) {
562 anv_foreach_stage(s
, set_layout
->shader_stages
) {
563 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
, dynamic
);
565 struct anv_push_constants
*push
=
566 cmd_buffer
->state
.push_constants
[s
];
568 unsigned d
= layout
->set
[firstSet
+ i
].dynamic_offset_start
;
569 const uint32_t *offsets
= pDynamicOffsets
;
570 struct anv_descriptor
*desc
= set
->descriptors
;
572 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
573 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
576 unsigned array_size
= set_layout
->binding
[b
].array_size
;
577 for (unsigned j
= 0; j
< array_size
; j
++) {
578 push
->dynamic
[d
].offset
= *(offsets
++);
579 push
->dynamic
[d
].range
= (desc
->buffer_view
) ?
580 desc
->buffer_view
->range
: 0;
586 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
591 void anv_CmdBindVertexBuffers(
592 VkCommandBuffer commandBuffer
,
593 uint32_t firstBinding
,
594 uint32_t bindingCount
,
595 const VkBuffer
* pBuffers
,
596 const VkDeviceSize
* pOffsets
)
598 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
599 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
601 /* We have to defer setting up vertex buffer since we need the buffer
602 * stride from the pipeline. */
604 assert(firstBinding
+ bindingCount
< MAX_VBS
);
605 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
606 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
607 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
608 cmd_buffer
->state
.vb_dirty
|= 1 << (firstBinding
+ i
);
613 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
614 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
616 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
617 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
618 * the initial state to set the high bits to 0. */
620 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
622 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
623 state
.offset
+ dword
* 4, bo
, offset
);
627 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
630 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
631 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
632 return ISL_FORMAT_R32G32B32A32_FLOAT
;
634 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
635 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
636 return ISL_FORMAT_RAW
;
639 unreachable("Invalid descriptor type");
643 static struct anv_state
644 anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer
*cmd_buffer
,
645 struct anv_framebuffer
*fb
)
647 switch (cmd_buffer
->device
->info
.gen
) {
649 if (cmd_buffer
->device
->info
.is_haswell
) {
650 return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
652 return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
655 return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
657 return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
659 unreachable("Invalid hardware generation");
664 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
665 gl_shader_stage stage
,
666 struct anv_state
*bt_state
)
668 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
669 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
670 struct anv_pipeline
*pipeline
;
671 uint32_t bias
, state_offset
;
674 case MESA_SHADER_COMPUTE
:
675 pipeline
= cmd_buffer
->state
.compute_pipeline
;
679 pipeline
= cmd_buffer
->state
.pipeline
;
684 if (!anv_pipeline_has_stage(pipeline
, stage
)) {
685 *bt_state
= (struct anv_state
) { 0, };
689 struct anv_pipeline_bind_map
*map
= &pipeline
->shaders
[stage
]->bind_map
;
690 if (bias
+ map
->surface_count
== 0) {
691 *bt_state
= (struct anv_state
) { 0, };
695 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
696 bias
+ map
->surface_count
,
698 uint32_t *bt_map
= bt_state
->map
;
700 if (bt_state
->map
== NULL
)
701 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
703 if (stage
== MESA_SHADER_COMPUTE
&&
704 get_cs_prog_data(cmd_buffer
->state
.compute_pipeline
)->uses_num_work_groups
) {
705 struct anv_bo
*bo
= cmd_buffer
->state
.num_workgroups_bo
;
706 uint32_t bo_offset
= cmd_buffer
->state
.num_workgroups_offset
;
708 struct anv_state surface_state
;
710 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
712 const enum isl_format format
=
713 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
);
714 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
,
715 format
, bo_offset
, 12, 1);
717 bt_map
[0] = surface_state
.offset
+ state_offset
;
718 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
721 if (map
->surface_count
== 0)
724 if (map
->image_count
> 0) {
726 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, images
);
727 if (result
!= VK_SUCCESS
)
730 cmd_buffer
->state
.push_constants_dirty
|= 1 << stage
;
734 for (uint32_t s
= 0; s
< map
->surface_count
; s
++) {
735 struct anv_pipeline_binding
*binding
= &map
->surface_to_descriptor
[s
];
737 struct anv_state surface_state
;
741 if (binding
->set
== ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
) {
742 /* Color attachment binding */
743 assert(stage
== MESA_SHADER_FRAGMENT
);
744 assert(binding
->binding
== 0);
745 if (binding
->index
< subpass
->color_count
) {
746 const struct anv_image_view
*iview
=
747 fb
->attachments
[subpass
->color_attachments
[binding
->index
]];
749 assert(iview
->color_rt_surface_state
.alloc_size
);
750 surface_state
= iview
->color_rt_surface_state
;
751 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
752 iview
->bo
, iview
->offset
);
754 /* Null render target */
755 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
757 anv_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
760 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
764 struct anv_descriptor_set
*set
=
765 cmd_buffer
->state
.descriptors
[binding
->set
];
766 uint32_t offset
= set
->layout
->binding
[binding
->binding
].descriptor_index
;
767 struct anv_descriptor
*desc
= &set
->descriptors
[offset
+ binding
->index
];
769 switch (desc
->type
) {
770 case VK_DESCRIPTOR_TYPE_SAMPLER
:
771 /* Nothing for us to do here */
774 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
775 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
776 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
777 surface_state
= desc
->image_view
->sampler_surface_state
;
778 assert(surface_state
.alloc_size
);
779 bo
= desc
->image_view
->bo
;
780 bo_offset
= desc
->image_view
->offset
;
783 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
784 surface_state
= desc
->image_view
->storage_surface_state
;
785 assert(surface_state
.alloc_size
);
786 bo
= desc
->image_view
->bo
;
787 bo_offset
= desc
->image_view
->offset
;
789 struct brw_image_param
*image_param
=
790 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
792 *image_param
= desc
->image_view
->storage_image_param
;
793 image_param
->surface_idx
= bias
+ s
;
797 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
798 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
799 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
800 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
801 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
802 surface_state
= desc
->buffer_view
->surface_state
;
803 assert(surface_state
.alloc_size
);
804 bo
= desc
->buffer_view
->bo
;
805 bo_offset
= desc
->buffer_view
->offset
;
808 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
809 surface_state
= desc
->buffer_view
->storage_surface_state
;
810 assert(surface_state
.alloc_size
);
811 bo
= desc
->buffer_view
->bo
;
812 bo_offset
= desc
->buffer_view
->offset
;
814 struct brw_image_param
*image_param
=
815 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
817 *image_param
= desc
->buffer_view
->storage_image_param
;
818 image_param
->surface_idx
= bias
+ s
;
822 assert(!"Invalid descriptor type");
826 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
827 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
829 assert(image
== map
->image_count
);
832 if (!cmd_buffer
->device
->info
.has_llc
)
833 anv_state_clflush(*bt_state
);
839 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
840 gl_shader_stage stage
, struct anv_state
*state
)
842 struct anv_pipeline
*pipeline
;
844 if (stage
== MESA_SHADER_COMPUTE
)
845 pipeline
= cmd_buffer
->state
.compute_pipeline
;
847 pipeline
= cmd_buffer
->state
.pipeline
;
849 if (!anv_pipeline_has_stage(pipeline
, stage
)) {
850 *state
= (struct anv_state
) { 0, };
854 struct anv_pipeline_bind_map
*map
= &pipeline
->shaders
[stage
]->bind_map
;
855 if (map
->sampler_count
== 0) {
856 *state
= (struct anv_state
) { 0, };
860 uint32_t size
= map
->sampler_count
* 16;
861 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
863 if (state
->map
== NULL
)
864 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
866 for (uint32_t s
= 0; s
< map
->sampler_count
; s
++) {
867 struct anv_pipeline_binding
*binding
= &map
->sampler_to_descriptor
[s
];
868 struct anv_descriptor_set
*set
=
869 cmd_buffer
->state
.descriptors
[binding
->set
];
870 uint32_t offset
= set
->layout
->binding
[binding
->binding
].descriptor_index
;
871 struct anv_descriptor
*desc
= &set
->descriptors
[offset
+ binding
->index
];
873 if (desc
->type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
874 desc
->type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
877 struct anv_sampler
*sampler
= desc
->sampler
;
879 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
880 * happens to be zero.
885 memcpy(state
->map
+ (s
* 16),
886 sampler
->state
, sizeof(sampler
->state
));
889 if (!cmd_buffer
->device
->info
.has_llc
)
890 anv_state_clflush(*state
);
896 anv_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
898 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
899 cmd_buffer
->state
.pipeline
->active_stages
;
901 VkResult result
= VK_SUCCESS
;
902 anv_foreach_stage(s
, dirty
) {
903 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
904 &cmd_buffer
->state
.samplers
[s
]);
905 if (result
!= VK_SUCCESS
)
907 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
908 &cmd_buffer
->state
.binding_tables
[s
]);
909 if (result
!= VK_SUCCESS
)
913 if (result
!= VK_SUCCESS
) {
914 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
916 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
917 assert(result
== VK_SUCCESS
);
919 /* Re-emit state base addresses so we get the new surface state base
920 * address before we start emitting binding tables etc.
922 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
924 /* Re-emit all active binding tables */
925 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
926 anv_foreach_stage(s
, dirty
) {
927 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
928 &cmd_buffer
->state
.samplers
[s
]);
929 if (result
!= VK_SUCCESS
)
931 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
932 &cmd_buffer
->state
.binding_tables
[s
]);
933 if (result
!= VK_SUCCESS
)
938 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
944 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
945 const void *data
, uint32_t size
, uint32_t alignment
)
947 struct anv_state state
;
949 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
950 memcpy(state
.map
, data
, size
);
952 if (!cmd_buffer
->device
->info
.has_llc
)
953 anv_state_clflush(state
);
955 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
961 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
962 uint32_t *a
, uint32_t *b
,
963 uint32_t dwords
, uint32_t alignment
)
965 struct anv_state state
;
968 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
969 dwords
* 4, alignment
);
971 for (uint32_t i
= 0; i
< dwords
; i
++)
974 if (!cmd_buffer
->device
->info
.has_llc
)
975 anv_state_clflush(state
);
977 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
983 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
984 gl_shader_stage stage
)
986 /* If we don't have this stage, bail. */
987 if (!anv_pipeline_has_stage(cmd_buffer
->state
.pipeline
, stage
))
988 return (struct anv_state
) { .offset
= 0 };
990 struct anv_push_constants
*data
=
991 cmd_buffer
->state
.push_constants
[stage
];
992 const struct brw_stage_prog_data
*prog_data
=
993 anv_shader_bin_get_prog_data(cmd_buffer
->state
.pipeline
->shaders
[stage
]);
995 /* If we don't actually have any push constants, bail. */
996 if (data
== NULL
|| prog_data
== NULL
|| prog_data
->nr_params
== 0)
997 return (struct anv_state
) { .offset
= 0 };
999 struct anv_state state
=
1000 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1001 prog_data
->nr_params
* sizeof(float),
1002 32 /* bottom 5 bits MBZ */);
1004 /* Walk through the param array and fill the buffer with data */
1005 uint32_t *u32_map
= state
.map
;
1006 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
1007 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1008 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1011 if (!cmd_buffer
->device
->info
.has_llc
)
1012 anv_state_clflush(state
);
1018 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
1020 struct anv_push_constants
*data
=
1021 cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
1022 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1023 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
1024 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1026 /* If we don't actually have any push constants, bail. */
1027 if (cs_prog_data
->push
.total
.size
== 0)
1028 return (struct anv_state
) { .offset
= 0 };
1030 const unsigned push_constant_alignment
=
1031 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
1032 const unsigned aligned_total_push_constants_size
=
1033 ALIGN(cs_prog_data
->push
.total
.size
, push_constant_alignment
);
1034 struct anv_state state
=
1035 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1036 aligned_total_push_constants_size
,
1037 push_constant_alignment
);
1039 /* Walk through the param array and fill the buffer with data */
1040 uint32_t *u32_map
= state
.map
;
1042 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
1043 assert(cs_prog_data
->thread_local_id_index
< 0 ||
1044 cs_prog_data
->thread_local_id_index
>=
1045 cs_prog_data
->push
.cross_thread
.dwords
);
1046 for (unsigned i
= 0;
1047 i
< cs_prog_data
->push
.cross_thread
.dwords
;
1049 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1050 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1054 if (cs_prog_data
->push
.per_thread
.size
> 0) {
1055 for (unsigned t
= 0; t
< cs_prog_data
->threads
; t
++) {
1057 8 * (cs_prog_data
->push
.per_thread
.regs
* t
+
1058 cs_prog_data
->push
.cross_thread
.regs
);
1059 unsigned src
= cs_prog_data
->push
.cross_thread
.dwords
;
1060 for ( ; src
< prog_data
->nr_params
; src
++, dst
++) {
1061 if (src
!= cs_prog_data
->thread_local_id_index
) {
1062 uint32_t offset
= (uintptr_t)prog_data
->param
[src
];
1063 u32_map
[dst
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1065 u32_map
[dst
] = t
* cs_prog_data
->simd_size
;
1071 if (!cmd_buffer
->device
->info
.has_llc
)
1072 anv_state_clflush(state
);
1077 void anv_CmdPushConstants(
1078 VkCommandBuffer commandBuffer
,
1079 VkPipelineLayout layout
,
1080 VkShaderStageFlags stageFlags
,
1083 const void* pValues
)
1085 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1087 anv_foreach_stage(stage
, stageFlags
) {
1088 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
1090 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ offset
,
1094 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1097 VkResult
anv_CreateCommandPool(
1099 const VkCommandPoolCreateInfo
* pCreateInfo
,
1100 const VkAllocationCallbacks
* pAllocator
,
1101 VkCommandPool
* pCmdPool
)
1103 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1104 struct anv_cmd_pool
*pool
;
1106 pool
= anv_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
1107 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1109 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1112 pool
->alloc
= *pAllocator
;
1114 pool
->alloc
= device
->alloc
;
1116 list_inithead(&pool
->cmd_buffers
);
1118 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1123 void anv_DestroyCommandPool(
1125 VkCommandPool commandPool
,
1126 const VkAllocationCallbacks
* pAllocator
)
1128 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1129 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1131 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1132 &pool
->cmd_buffers
, pool_link
) {
1133 anv_cmd_buffer_destroy(cmd_buffer
);
1136 anv_free2(&device
->alloc
, pAllocator
, pool
);
1139 VkResult
anv_ResetCommandPool(
1141 VkCommandPool commandPool
,
1142 VkCommandPoolResetFlags flags
)
1144 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1146 list_for_each_entry(struct anv_cmd_buffer
, cmd_buffer
,
1147 &pool
->cmd_buffers
, pool_link
) {
1148 anv_cmd_buffer_reset(cmd_buffer
);
1155 * Return NULL if the current subpass has no depthstencil attachment.
1157 const struct anv_image_view
*
1158 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1160 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1161 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1163 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
1166 const struct anv_image_view
*iview
=
1167 fb
->attachments
[subpass
->depth_stencil_attachment
];
1169 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
1170 VK_IMAGE_ASPECT_STENCIL_BIT
));