2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state
= {
55 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
60 .stencil_compare_mask
= {
64 .stencil_write_mask
= {
68 .stencil_reference
= {
75 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
76 const struct anv_dynamic_state
*src
,
79 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
80 dest
->viewport
.count
= src
->viewport
.count
;
81 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
85 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
86 dest
->scissor
.count
= src
->scissor
.count
;
87 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
91 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
92 dest
->line_width
= src
->line_width
;
94 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
95 dest
->depth_bias
= src
->depth_bias
;
97 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
98 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
100 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
101 dest
->depth_bounds
= src
->depth_bounds
;
103 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
104 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
106 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
107 dest
->stencil_write_mask
= src
->stencil_write_mask
;
109 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
110 dest
->stencil_reference
= src
->stencil_reference
;
114 anv_cmd_state_init(struct anv_cmd_state
*state
)
116 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
117 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
121 state
->descriptors_dirty
= 0;
122 state
->push_constants_dirty
= 0;
123 state
->pipeline
= NULL
;
124 state
->restart_index
= UINT32_MAX
;
125 state
->dynamic
= default_dynamic_state
;
127 state
->gen7
.index_buffer
= NULL
;
131 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
132 gl_shader_stage stage
, uint32_t size
)
134 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
137 *ptr
= anv_alloc(&cmd_buffer
->pool
->alloc
, size
, 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
141 } else if ((*ptr
)->size
< size
) {
142 *ptr
= anv_realloc(&cmd_buffer
->pool
->alloc
, *ptr
, size
, 8,
143 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
145 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
152 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
153 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
154 (offsetof(struct anv_push_constants, field) + \
155 sizeof(cmd_buffer->state.push_constants[0]->field)))
157 static VkResult
anv_create_cmd_buffer(
158 struct anv_device
* device
,
159 struct anv_cmd_pool
* pool
,
160 VkCommandBufferLevel level
,
161 VkCommandBuffer
* pCommandBuffer
)
163 struct anv_cmd_buffer
*cmd_buffer
;
166 cmd_buffer
= anv_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
167 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
168 if (cmd_buffer
== NULL
)
169 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
171 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
172 cmd_buffer
->device
= device
;
173 cmd_buffer
->pool
= pool
;
175 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
176 if (result
!= VK_SUCCESS
)
179 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
180 &device
->surface_state_block_pool
);
181 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
182 &device
->dynamic_state_block_pool
);
184 cmd_buffer
->level
= level
;
185 cmd_buffer
->usage_flags
= 0;
187 anv_cmd_state_init(&cmd_buffer
->state
);
190 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
192 /* Init the pool_link so we can safefly call list_del when we destroy
195 list_inithead(&cmd_buffer
->pool_link
);
198 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
203 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
208 VkResult
anv_AllocateCommandBuffers(
210 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
211 VkCommandBuffer
* pCommandBuffers
)
213 ANV_FROM_HANDLE(anv_device
, device
, _device
);
214 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
216 VkResult result
= VK_SUCCESS
;
219 for (i
= 0; i
< pAllocateInfo
->bufferCount
; i
++) {
220 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
221 &pCommandBuffers
[i
]);
222 if (result
!= VK_SUCCESS
)
226 if (result
!= VK_SUCCESS
)
227 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
234 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
236 list_del(&cmd_buffer
->pool_link
);
238 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
240 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
241 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
243 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
246 void anv_FreeCommandBuffers(
248 VkCommandPool commandPool
,
249 uint32_t commandBufferCount
,
250 const VkCommandBuffer
* pCommandBuffers
)
252 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
253 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
255 anv_cmd_buffer_destroy(cmd_buffer
);
259 VkResult
anv_ResetCommandBuffer(
260 VkCommandBuffer commandBuffer
,
261 VkCommandBufferResetFlags flags
)
263 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
265 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
267 anv_cmd_state_init(&cmd_buffer
->state
);
273 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
275 switch (cmd_buffer
->device
->info
.gen
) {
277 if (cmd_buffer
->device
->info
.is_haswell
)
278 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
280 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
282 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
284 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer
);
286 unreachable("unsupported gen\n");
290 VkResult
anv_BeginCommandBuffer(
291 VkCommandBuffer commandBuffer
,
292 const VkCommandBufferBeginInfo
* pBeginInfo
)
294 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
296 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
298 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
300 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
301 cmd_buffer
->state
.framebuffer
=
302 anv_framebuffer_from_handle(pBeginInfo
->framebuffer
);
303 cmd_buffer
->state
.pass
=
304 anv_render_pass_from_handle(pBeginInfo
->renderPass
);
306 struct anv_subpass
*subpass
=
307 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->subpass
];
309 anv_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
312 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
313 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
318 VkResult
anv_EndCommandBuffer(
319 VkCommandBuffer commandBuffer
)
321 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
322 struct anv_device
*device
= cmd_buffer
->device
;
324 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
326 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
327 /* The algorithm used to compute the validate list is not threadsafe as
328 * it uses the bo->index field. We have to lock the device around it.
329 * Fortunately, the chances for contention here are probably very low.
331 pthread_mutex_lock(&device
->mutex
);
332 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
333 pthread_mutex_unlock(&device
->mutex
);
339 void anv_CmdBindPipeline(
340 VkCommandBuffer commandBuffer
,
341 VkPipelineBindPoint pipelineBindPoint
,
342 VkPipeline _pipeline
)
344 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
345 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
347 switch (pipelineBindPoint
) {
348 case VK_PIPELINE_BIND_POINT_COMPUTE
:
349 cmd_buffer
->state
.compute_pipeline
= pipeline
;
350 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_DIRTY_PIPELINE
;
351 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
354 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
355 cmd_buffer
->state
.pipeline
= pipeline
;
356 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
357 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
358 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
360 /* Apply the dynamic state from the pipeline */
361 cmd_buffer
->state
.dirty
|= pipeline
->dynamic_state_mask
;
362 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
,
363 &pipeline
->dynamic_state
,
364 pipeline
->dynamic_state_mask
);
368 assert(!"invalid bind point");
373 void anv_CmdSetViewport(
374 VkCommandBuffer commandBuffer
,
375 uint32_t viewportCount
,
376 const VkViewport
* pViewports
)
378 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
380 cmd_buffer
->state
.dynamic
.viewport
.count
= viewportCount
;
381 memcpy(cmd_buffer
->state
.dynamic
.viewport
.viewports
,
382 pViewports
, viewportCount
* sizeof(*pViewports
));
384 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
387 void anv_CmdSetScissor(
388 VkCommandBuffer commandBuffer
,
389 uint32_t scissorCount
,
390 const VkRect2D
* pScissors
)
392 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
394 cmd_buffer
->state
.dynamic
.scissor
.count
= scissorCount
;
395 memcpy(cmd_buffer
->state
.dynamic
.scissor
.scissors
,
396 pScissors
, scissorCount
* sizeof(*pScissors
));
398 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
401 void anv_CmdSetLineWidth(
402 VkCommandBuffer commandBuffer
,
405 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
407 cmd_buffer
->state
.dynamic
.line_width
= lineWidth
;
408 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
411 void anv_CmdSetDepthBias(
412 VkCommandBuffer commandBuffer
,
413 float depthBiasConstantFactor
,
414 float depthBiasClamp
,
415 float depthBiasSlopeFactor
)
417 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
419 cmd_buffer
->state
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
420 cmd_buffer
->state
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
421 cmd_buffer
->state
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
423 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
426 void anv_CmdSetBlendConstants(
427 VkCommandBuffer commandBuffer
,
428 const float blendConstants
[4])
430 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
432 memcpy(cmd_buffer
->state
.dynamic
.blend_constants
,
433 blendConstants
, sizeof(float) * 4);
435 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
438 void anv_CmdSetDepthBounds(
439 VkCommandBuffer commandBuffer
,
440 float minDepthBounds
,
441 float maxDepthBounds
)
443 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
445 cmd_buffer
->state
.dynamic
.depth_bounds
.min
= minDepthBounds
;
446 cmd_buffer
->state
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
448 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
451 void anv_CmdSetStencilCompareMask(
452 VkCommandBuffer commandBuffer
,
453 VkStencilFaceFlags faceMask
,
454 uint32_t compareMask
)
456 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
458 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
459 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
460 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
461 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
463 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
466 void anv_CmdSetStencilWriteMask(
467 VkCommandBuffer commandBuffer
,
468 VkStencilFaceFlags faceMask
,
471 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
473 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
474 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
475 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
476 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
478 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
481 void anv_CmdSetStencilReference(
482 VkCommandBuffer commandBuffer
,
483 VkStencilFaceFlags faceMask
,
486 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
488 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
489 cmd_buffer
->state
.dynamic
.stencil_reference
.front
= reference
;
490 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
491 cmd_buffer
->state
.dynamic
.stencil_reference
.back
= reference
;
493 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
496 void anv_CmdBindDescriptorSets(
497 VkCommandBuffer commandBuffer
,
498 VkPipelineBindPoint pipelineBindPoint
,
499 VkPipelineLayout _layout
,
501 uint32_t descriptorSetCount
,
502 const VkDescriptorSet
* pDescriptorSets
,
503 uint32_t dynamicOffsetCount
,
504 const uint32_t* pDynamicOffsets
)
506 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
507 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
508 struct anv_descriptor_set_layout
*set_layout
;
510 assert(firstSet
+ descriptorSetCount
< MAX_SETS
);
512 uint32_t dynamic_slot
= 0;
513 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
514 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
515 set_layout
= layout
->set
[firstSet
+ i
].layout
;
517 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
] != set
) {
518 cmd_buffer
->state
.descriptors
[firstSet
+ i
] = set
;
519 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
522 if (set_layout
->dynamic_offset_count
> 0) {
523 anv_foreach_stage(s
, set_layout
->shader_stages
) {
524 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
, dynamic
);
526 struct anv_push_constants
*push
=
527 cmd_buffer
->state
.push_constants
[s
];
529 unsigned d
= layout
->set
[firstSet
+ i
].dynamic_offset_start
;
530 const uint32_t *offsets
= pDynamicOffsets
+ dynamic_slot
;
531 struct anv_descriptor
*desc
= set
->descriptors
;
533 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
534 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
537 unsigned array_size
= set_layout
->binding
[b
].array_size
;
538 for (unsigned j
= 0; j
< array_size
; j
++) {
539 push
->dynamic
[d
].offset
= *(offsets
++);
540 push
->dynamic
[d
].range
= (desc
++)->range
;
545 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
550 void anv_CmdBindVertexBuffers(
551 VkCommandBuffer commandBuffer
,
552 uint32_t startBinding
,
553 uint32_t bindingCount
,
554 const VkBuffer
* pBuffers
,
555 const VkDeviceSize
* pOffsets
)
557 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
558 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
560 /* We have to defer setting up vertex buffer since we need the buffer
561 * stride from the pipeline. */
563 assert(startBinding
+ bindingCount
< MAX_VBS
);
564 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
565 vb
[startBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
566 vb
[startBinding
+ i
].offset
= pOffsets
[i
];
567 cmd_buffer
->state
.vb_dirty
|= 1 << (startBinding
+ i
);
572 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
573 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
575 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
576 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
577 * the initial state to set the high bits to 0. */
579 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
581 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
582 state
.offset
+ dword
* 4, bo
, offset
);
586 fill_descriptor_buffer_surface_state(struct anv_device
*device
, void *state
,
587 gl_shader_stage stage
,
588 VkDescriptorType type
,
589 uint32_t offset
, uint32_t range
)
595 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
596 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
597 if (device
->instance
->physicalDevice
.compiler
->scalar_stage
[stage
]) {
602 format
= VK_FORMAT_R32G32B32A32_SFLOAT
;
605 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
606 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
608 format
= VK_FORMAT_UNDEFINED
;
612 unreachable("Invalid descriptor type");
615 anv_fill_buffer_surface_state(device
, state
,
616 anv_format_for_vk_format(format
),
617 offset
, range
, stride
);
621 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
622 gl_shader_stage stage
,
623 struct anv_state
*bt_state
)
625 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
626 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
627 struct anv_pipeline_layout
*layout
;
628 uint32_t color_count
, bias
, state_offset
;
630 if (stage
== MESA_SHADER_COMPUTE
)
631 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
633 layout
= cmd_buffer
->state
.pipeline
->layout
;
635 if (stage
== MESA_SHADER_FRAGMENT
) {
637 color_count
= subpass
->color_count
;
643 /* This is a little awkward: layout can be NULL but we still have to
644 * allocate and set a binding table for the PS stage for render
646 uint32_t surface_count
= layout
? layout
->stage
[stage
].surface_count
: 0;
648 if (color_count
+ surface_count
== 0)
651 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
652 bias
+ surface_count
,
654 uint32_t *bt_map
= bt_state
->map
;
656 if (bt_state
->map
== NULL
)
657 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
659 for (uint32_t a
= 0; a
< color_count
; a
++) {
660 const struct anv_image_view
*iview
=
661 fb
->attachments
[subpass
->color_attachments
[a
]];
663 bt_map
[a
] = iview
->color_rt_surface_state
.offset
+ state_offset
;
664 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
665 iview
->bo
, iview
->offset
);
671 if (layout
->stage
[stage
].image_count
> 0) {
673 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, images
);
674 if (result
!= VK_SUCCESS
)
677 cmd_buffer
->state
.push_constants_dirty
|= 1 << stage
;
681 for (uint32_t s
= 0; s
< layout
->stage
[stage
].surface_count
; s
++) {
682 struct anv_pipeline_binding
*binding
=
683 &layout
->stage
[stage
].surface_to_descriptor
[s
];
684 struct anv_descriptor_set
*set
=
685 cmd_buffer
->state
.descriptors
[binding
->set
];
686 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
688 struct anv_state surface_state
;
692 switch (desc
->type
) {
693 case VK_DESCRIPTOR_TYPE_SAMPLER
:
694 /* Nothing for us to do here */
697 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
698 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
699 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
700 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
701 bo
= desc
->buffer
->bo
;
702 bo_offset
= desc
->buffer
->offset
+ desc
->offset
;
705 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
707 fill_descriptor_buffer_surface_state(cmd_buffer
->device
,
710 bo_offset
, desc
->range
);
712 if (!cmd_buffer
->device
->info
.has_llc
)
713 anv_state_clflush(surface_state
);
718 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
719 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
720 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
721 surface_state
= desc
->image_view
->nonrt_surface_state
;
722 bo
= desc
->image_view
->bo
;
723 bo_offset
= desc
->image_view
->offset
;
726 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
727 surface_state
= desc
->image_view
->storage_surface_state
;
728 bo
= desc
->image_view
->bo
;
729 bo_offset
= desc
->image_view
->offset
;
731 struct brw_image_param
*image_param
=
732 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
734 anv_image_view_fill_image_param(cmd_buffer
->device
, desc
->image_view
,
736 image_param
->surface_idx
= bias
+ s
;
740 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
741 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
742 assert(!"Unsupported descriptor type");
746 assert(!"Invalid descriptor type");
750 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
751 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
753 assert(image
== layout
->stage
[stage
].image_count
);
756 if (!cmd_buffer
->device
->info
.has_llc
)
757 anv_state_clflush(*bt_state
);
763 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
764 gl_shader_stage stage
, struct anv_state
*state
)
766 struct anv_pipeline_layout
*layout
;
767 uint32_t sampler_count
;
769 if (stage
== MESA_SHADER_COMPUTE
)
770 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
772 layout
= cmd_buffer
->state
.pipeline
->layout
;
774 sampler_count
= layout
? layout
->stage
[stage
].sampler_count
: 0;
775 if (sampler_count
== 0)
778 uint32_t size
= sampler_count
* 16;
779 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
781 if (state
->map
== NULL
)
782 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
784 for (uint32_t s
= 0; s
< layout
->stage
[stage
].sampler_count
; s
++) {
785 struct anv_pipeline_binding
*binding
=
786 &layout
->stage
[stage
].sampler_to_descriptor
[s
];
787 struct anv_descriptor_set
*set
=
788 cmd_buffer
->state
.descriptors
[binding
->set
];
789 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
791 if (desc
->type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
792 desc
->type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
795 struct anv_sampler
*sampler
= desc
->sampler
;
797 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
798 * happens to be zero.
803 memcpy(state
->map
+ (s
* 16),
804 sampler
->state
, sizeof(sampler
->state
));
807 if (!cmd_buffer
->device
->info
.has_llc
)
808 anv_state_clflush(*state
);
814 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
815 const void *data
, uint32_t size
, uint32_t alignment
)
817 struct anv_state state
;
819 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
820 memcpy(state
.map
, data
, size
);
822 if (!cmd_buffer
->device
->info
.has_llc
)
823 anv_state_clflush(state
);
825 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
831 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
832 uint32_t *a
, uint32_t *b
,
833 uint32_t dwords
, uint32_t alignment
)
835 struct anv_state state
;
838 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
839 dwords
* 4, alignment
);
841 for (uint32_t i
= 0; i
< dwords
; i
++)
844 if (!cmd_buffer
->device
->info
.has_llc
)
845 anv_state_clflush(state
);
847 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
853 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
854 struct anv_subpass
*subpass
)
856 switch (cmd_buffer
->device
->info
.gen
) {
858 gen7_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
861 gen8_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
864 gen9_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
867 unreachable("unsupported gen\n");
871 void anv_CmdSetEvent(
872 VkCommandBuffer commandBuffer
,
874 VkPipelineStageFlags stageMask
)
879 void anv_CmdResetEvent(
880 VkCommandBuffer commandBuffer
,
882 VkPipelineStageFlags stageMask
)
887 void anv_CmdWaitEvents(
888 VkCommandBuffer commandBuffer
,
890 const VkEvent
* pEvents
,
891 VkPipelineStageFlags srcStageMask
,
892 VkPipelineStageFlags destStageMask
,
893 uint32_t memBarrierCount
,
894 const void* const* ppMemBarriers
)
900 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
901 gl_shader_stage stage
)
903 struct anv_push_constants
*data
=
904 cmd_buffer
->state
.push_constants
[stage
];
905 struct brw_stage_prog_data
*prog_data
=
906 cmd_buffer
->state
.pipeline
->prog_data
[stage
];
908 /* If we don't actually have any push constants, bail. */
909 if (data
== NULL
|| prog_data
->nr_params
== 0)
910 return (struct anv_state
) { .offset
= 0 };
912 struct anv_state state
=
913 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
914 prog_data
->nr_params
* sizeof(float),
915 32 /* bottom 5 bits MBZ */);
917 /* Walk through the param array and fill the buffer with data */
918 uint32_t *u32_map
= state
.map
;
919 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
920 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
921 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
924 if (!cmd_buffer
->device
->info
.has_llc
)
925 anv_state_clflush(state
);
931 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
933 struct anv_push_constants
*data
=
934 cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
935 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
936 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
937 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
939 const unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
940 const unsigned push_constant_data_size
=
941 (local_id_dwords
+ prog_data
->nr_params
) * sizeof(gl_constant_value
);
942 const unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
943 const unsigned param_aligned_count
=
944 reg_aligned_constant_size
/ sizeof(uint32_t);
946 /* If we don't actually have any push constants, bail. */
947 if (reg_aligned_constant_size
== 0)
948 return (struct anv_state
) { .offset
= 0 };
950 const unsigned threads
= pipeline
->cs_thread_width_max
;
951 const unsigned total_push_constants_size
=
952 reg_aligned_constant_size
* threads
;
953 struct anv_state state
=
954 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
955 total_push_constants_size
,
956 32 /* bottom 5 bits MBZ */);
958 /* Walk through the param array and fill the buffer with data */
959 uint32_t *u32_map
= state
.map
;
961 brw_cs_fill_local_id_payload(cs_prog_data
, u32_map
, threads
,
962 reg_aligned_constant_size
);
964 /* Setup uniform data for the first thread */
965 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
966 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
967 u32_map
[local_id_dwords
+ i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
970 /* Copy uniform data from the first thread to every other thread */
971 const size_t uniform_data_size
= prog_data
->nr_params
* sizeof(uint32_t);
972 for (unsigned t
= 1; t
< threads
; t
++) {
973 memcpy(&u32_map
[t
* param_aligned_count
+ local_id_dwords
],
974 &u32_map
[local_id_dwords
],
978 if (!cmd_buffer
->device
->info
.has_llc
)
979 anv_state_clflush(state
);
984 void anv_CmdPushConstants(
985 VkCommandBuffer commandBuffer
,
986 VkPipelineLayout layout
,
987 VkShaderStageFlags stageFlags
,
992 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
994 anv_foreach_stage(stage
, stageFlags
) {
995 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
997 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ offset
,
1001 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1004 void anv_CmdExecuteCommands(
1005 VkCommandBuffer commandBuffer
,
1006 uint32_t commandBuffersCount
,
1007 const VkCommandBuffer
* pCmdBuffers
)
1009 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, commandBuffer
);
1011 assert(primary
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
1013 anv_assert(primary
->state
.subpass
== &primary
->state
.pass
->subpasses
[0]);
1015 for (uint32_t i
= 0; i
< commandBuffersCount
; i
++) {
1016 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
1018 assert(secondary
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
1020 anv_cmd_buffer_add_secondary(primary
, secondary
);
1024 VkResult
anv_CreateCommandPool(
1026 const VkCommandPoolCreateInfo
* pCreateInfo
,
1027 const VkAllocationCallbacks
* pAllocator
,
1028 VkCommandPool
* pCmdPool
)
1030 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1031 struct anv_cmd_pool
*pool
;
1033 pool
= anv_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
1034 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1036 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1039 pool
->alloc
= *pAllocator
;
1041 pool
->alloc
= device
->alloc
;
1043 list_inithead(&pool
->cmd_buffers
);
1045 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1050 void anv_DestroyCommandPool(
1052 VkCommandPool commandPool
,
1053 const VkAllocationCallbacks
* pAllocator
)
1055 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1056 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1058 anv_ResetCommandPool(_device
, commandPool
, 0);
1060 anv_free2(&device
->alloc
, pAllocator
, pool
);
1063 VkResult
anv_ResetCommandPool(
1065 VkCommandPool commandPool
,
1066 VkCommandPoolResetFlags flags
)
1068 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1070 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1071 &pool
->cmd_buffers
, pool_link
) {
1072 anv_cmd_buffer_destroy(cmd_buffer
);
1079 * Return NULL if the current subpass has no depthstencil attachment.
1081 const struct anv_image_view
*
1082 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1084 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1085 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1087 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
1090 const struct anv_image_view
*iview
=
1091 fb
->attachments
[subpass
->depth_stencil_attachment
];
1093 assert(anv_format_is_depth_or_stencil(iview
->format
));