2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
42 anv_cmd_state_init(struct anv_cmd_state
*state
)
44 state
->rs_state
= NULL
;
45 state
->vp_state
= NULL
;
46 state
->cb_state
= NULL
;
47 state
->ds_state
= NULL
;
48 memset(&state
->state_vf
, 0, sizeof(state
->state_vf
));
49 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
53 state
->descriptors_dirty
= 0;
54 state
->pipeline
= NULL
;
55 state
->vp_state
= NULL
;
56 state
->rs_state
= NULL
;
57 state
->ds_state
= NULL
;
59 state
->gen7
.index_buffer
= NULL
;
62 VkResult
anv_CreateCommandBuffer(
64 const VkCmdBufferCreateInfo
* pCreateInfo
,
65 VkCmdBuffer
* pCmdBuffer
)
67 ANV_FROM_HANDLE(anv_device
, device
, _device
);
68 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pCreateInfo
->cmdPool
);
69 struct anv_cmd_buffer
*cmd_buffer
;
72 cmd_buffer
= anv_device_alloc(device
, sizeof(*cmd_buffer
), 8,
73 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
74 if (cmd_buffer
== NULL
)
75 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
77 cmd_buffer
->device
= device
;
79 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
80 if (result
!= VK_SUCCESS
)
83 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
84 &device
->surface_state_block_pool
);
85 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
86 &device
->dynamic_state_block_pool
);
88 cmd_buffer
->level
= pCreateInfo
->level
;
89 cmd_buffer
->opt_flags
= 0;
91 anv_cmd_state_init(&cmd_buffer
->state
);
94 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
96 /* Init the pool_link so we can safefly call list_del when we destroy
99 list_inithead(&cmd_buffer
->pool_link
);
102 *pCmdBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
106 fail
: anv_device_free(device
, cmd_buffer
);
111 VkResult
anv_DestroyCommandBuffer(
113 VkCmdBuffer _cmd_buffer
)
115 ANV_FROM_HANDLE(anv_device
, device
, _device
);
116 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, _cmd_buffer
);
118 list_del(&cmd_buffer
->pool_link
);
120 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
122 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
123 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
124 anv_device_free(device
, cmd_buffer
);
129 VkResult
anv_ResetCommandBuffer(
130 VkCmdBuffer cmdBuffer
,
131 VkCmdBufferResetFlags flags
)
133 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
135 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
137 anv_cmd_state_init(&cmd_buffer
->state
);
143 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
145 switch (cmd_buffer
->device
->info
.gen
) {
147 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
149 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
151 unreachable("unsupported gen\n");
155 VkResult
anv_BeginCommandBuffer(
156 VkCmdBuffer cmdBuffer
,
157 const VkCmdBufferBeginInfo
* pBeginInfo
)
159 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
161 cmd_buffer
->opt_flags
= pBeginInfo
->flags
;
163 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
) {
164 cmd_buffer
->state
.framebuffer
=
165 anv_framebuffer_from_handle(pBeginInfo
->framebuffer
);
166 cmd_buffer
->state
.pass
=
167 anv_render_pass_from_handle(pBeginInfo
->renderPass
);
169 /* FIXME: We shouldn't be starting on the first subpass */
170 anv_cmd_buffer_begin_subpass(cmd_buffer
,
171 &cmd_buffer
->state
.pass
->subpasses
[0]);
174 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
175 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
180 VkResult
anv_EndCommandBuffer(
181 VkCmdBuffer cmdBuffer
)
183 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
184 struct anv_device
*device
= cmd_buffer
->device
;
186 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
188 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
) {
189 /* The algorithm used to compute the validate list is not threadsafe as
190 * it uses the bo->index field. We have to lock the device around it.
191 * Fortunately, the chances for contention here are probably very low.
193 pthread_mutex_lock(&device
->mutex
);
194 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
195 pthread_mutex_unlock(&device
->mutex
);
201 void anv_CmdBindPipeline(
202 VkCmdBuffer cmdBuffer
,
203 VkPipelineBindPoint pipelineBindPoint
,
204 VkPipeline _pipeline
)
206 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
207 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
209 switch (pipelineBindPoint
) {
210 case VK_PIPELINE_BIND_POINT_COMPUTE
:
211 cmd_buffer
->state
.compute_pipeline
= pipeline
;
212 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
215 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
216 cmd_buffer
->state
.pipeline
= pipeline
;
217 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
218 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
222 assert(!"invalid bind point");
227 void anv_CmdBindDynamicViewportState(
228 VkCmdBuffer cmdBuffer
,
229 VkDynamicViewportState dynamicViewportState
)
231 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
232 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, dynamicViewportState
);
234 cmd_buffer
->state
.vp_state
= vp_state
;
235 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_VP_DIRTY
;
238 void anv_CmdBindDynamicRasterState(
239 VkCmdBuffer cmdBuffer
,
240 VkDynamicRasterState dynamicRasterState
)
242 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
243 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, dynamicRasterState
);
245 cmd_buffer
->state
.rs_state
= rs_state
;
246 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_RS_DIRTY
;
249 void anv_CmdBindDynamicColorBlendState(
250 VkCmdBuffer cmdBuffer
,
251 VkDynamicColorBlendState dynamicColorBlendState
)
253 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
254 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, dynamicColorBlendState
);
256 cmd_buffer
->state
.cb_state
= cb_state
;
257 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_CB_DIRTY
;
260 void anv_CmdBindDynamicDepthStencilState(
261 VkCmdBuffer cmdBuffer
,
262 VkDynamicDepthStencilState dynamicDepthStencilState
)
264 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
265 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, dynamicDepthStencilState
);
267 cmd_buffer
->state
.ds_state
= ds_state
;
268 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_DS_DIRTY
;
271 void anv_CmdBindDescriptorSets(
272 VkCmdBuffer cmdBuffer
,
273 VkPipelineBindPoint pipelineBindPoint
,
274 VkPipelineLayout _layout
,
277 const VkDescriptorSet
* pDescriptorSets
,
278 uint32_t dynamicOffsetCount
,
279 const uint32_t* pDynamicOffsets
)
281 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
282 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
283 struct anv_descriptor_set_layout
*set_layout
;
285 assert(firstSet
+ setCount
< MAX_SETS
);
287 uint32_t dynamic_slot
= 0;
288 for (uint32_t i
= 0; i
< setCount
; i
++) {
289 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
290 set_layout
= layout
->set
[firstSet
+ i
].layout
;
292 cmd_buffer
->state
.descriptors
[firstSet
+ i
].set
= set
;
294 assert(set_layout
->num_dynamic_buffers
<
295 ARRAY_SIZE(cmd_buffer
->state
.descriptors
[0].dynamic_offsets
));
296 memcpy(cmd_buffer
->state
.descriptors
[firstSet
+ i
].dynamic_offsets
,
297 pDynamicOffsets
+ dynamic_slot
,
298 set_layout
->num_dynamic_buffers
* sizeof(*pDynamicOffsets
));
300 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
302 dynamic_slot
+= set_layout
->num_dynamic_buffers
;
306 void anv_CmdBindVertexBuffers(
307 VkCmdBuffer cmdBuffer
,
308 uint32_t startBinding
,
309 uint32_t bindingCount
,
310 const VkBuffer
* pBuffers
,
311 const VkDeviceSize
* pOffsets
)
313 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
314 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
316 /* We have to defer setting up vertex buffer since we need the buffer
317 * stride from the pipeline. */
319 assert(startBinding
+ bindingCount
< MAX_VBS
);
320 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
321 vb
[startBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
322 vb
[startBinding
+ i
].offset
= pOffsets
[i
];
323 cmd_buffer
->state
.vb_dirty
|= 1 << (startBinding
+ i
);
328 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
329 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
331 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
332 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
333 * the initial state to set the high bits to 0. */
335 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
337 *(uint32_t *)(state
.map
+ dword
* 4) =
338 anv_reloc_list_add(anv_cmd_buffer_current_surface_relocs(cmd_buffer
),
339 cmd_buffer
->device
, state
.offset
+ dword
* 4, bo
, offset
);
343 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
344 unsigned stage
, struct anv_state
*bt_state
)
346 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
347 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
348 struct anv_pipeline_layout
*layout
;
349 uint32_t attachments
, bias
, size
;
351 if (stage
== VK_SHADER_STAGE_COMPUTE
)
352 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
354 layout
= cmd_buffer
->state
.pipeline
->layout
;
356 if (stage
== VK_SHADER_STAGE_FRAGMENT
) {
358 attachments
= subpass
->color_count
;
364 /* This is a little awkward: layout can be NULL but we still have to
365 * allocate and set a binding table for the PS stage for render
367 uint32_t surface_count
= layout
? layout
->stage
[stage
].surface_count
: 0;
369 if (attachments
+ surface_count
== 0)
372 size
= (bias
+ surface_count
) * sizeof(uint32_t);
373 *bt_state
= anv_cmd_buffer_alloc_surface_state(cmd_buffer
, size
, 32);
374 uint32_t *bt_map
= bt_state
->map
;
376 if (bt_state
->map
== NULL
)
377 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
379 /* This is highly annoying. The Vulkan spec puts the depth-stencil
380 * attachments in with the color attachments. Unfortunately, thanks to
381 * other aspects of the API, we cana't really saparate them before this
382 * point. Therefore, we have to walk all of the attachments but only
383 * put the color attachments into the binding table.
385 for (uint32_t a
= 0; a
< attachments
; a
++) {
386 const struct anv_attachment_view
*attachment
=
387 fb
->attachments
[subpass
->color_attachments
[a
]];
389 assert(attachment
->attachment_type
== ANV_ATTACHMENT_VIEW_TYPE_COLOR
);
390 const struct anv_color_attachment_view
*view
=
391 (const struct anv_color_attachment_view
*)attachment
;
393 struct anv_state state
=
394 anv_cmd_buffer_alloc_surface_state(cmd_buffer
, 64, 64);
396 if (state
.map
== NULL
)
397 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
399 memcpy(state
.map
, view
->view
.surface_state
.map
, 64);
401 add_surface_state_reloc(cmd_buffer
, state
, view
->view
.bo
, view
->view
.offset
);
403 bt_map
[a
] = state
.offset
;
409 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
410 struct anv_descriptor_set_binding
*d
= &cmd_buffer
->state
.descriptors
[set
];
411 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
412 struct anv_descriptor_slot
*surface_slots
=
413 set_layout
->stage
[stage
].surface_start
;
415 uint32_t start
= bias
+ layout
->set
[set
].surface_start
[stage
];
417 for (uint32_t b
= 0; b
< set_layout
->stage
[stage
].surface_count
; b
++) {
418 struct anv_surface_view
*view
=
419 d
->set
->descriptors
[surface_slots
[b
].index
].view
;
424 struct anv_state state
=
425 anv_cmd_buffer_alloc_surface_state(cmd_buffer
, 64, 64);
427 if (state
.map
== NULL
)
428 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
431 if (surface_slots
[b
].dynamic_slot
>= 0) {
432 uint32_t dynamic_offset
=
433 d
->dynamic_offsets
[surface_slots
[b
].dynamic_slot
];
435 offset
= view
->offset
+ dynamic_offset
;
436 anv_fill_buffer_surface_state(cmd_buffer
->device
,
437 state
.map
, view
->format
, offset
,
438 view
->range
- dynamic_offset
);
440 offset
= view
->offset
;
441 memcpy(state
.map
, view
->surface_state
.map
, 64);
444 add_surface_state_reloc(cmd_buffer
, state
, view
->bo
, offset
);
446 bt_map
[start
+ b
] = state
.offset
;
454 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
455 unsigned stage
, struct anv_state
*state
)
457 struct anv_pipeline_layout
*layout
;
458 uint32_t sampler_count
;
460 if (stage
== VK_SHADER_STAGE_COMPUTE
)
461 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
463 layout
= cmd_buffer
->state
.pipeline
->layout
;
465 sampler_count
= layout
? layout
->stage
[stage
].sampler_count
: 0;
466 if (sampler_count
== 0)
469 uint32_t size
= sampler_count
* 16;
470 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
472 if (state
->map
== NULL
)
473 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
475 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
476 struct anv_descriptor_set_binding
*d
= &cmd_buffer
->state
.descriptors
[set
];
477 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
478 struct anv_descriptor_slot
*sampler_slots
=
479 set_layout
->stage
[stage
].sampler_start
;
481 uint32_t start
= layout
->set
[set
].sampler_start
[stage
];
483 for (uint32_t b
= 0; b
< set_layout
->stage
[stage
].sampler_count
; b
++) {
484 struct anv_sampler
*sampler
=
485 d
->set
->descriptors
[sampler_slots
[b
].index
].sampler
;
490 memcpy(state
->map
+ (start
+ b
) * 16,
491 sampler
->state
, sizeof(sampler
->state
));
499 flush_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
, uint32_t stage
)
501 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
504 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, stage
, &samplers
);
505 if (result
!= VK_SUCCESS
)
507 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, stage
, &surfaces
);
508 if (result
!= VK_SUCCESS
)
511 static const uint32_t sampler_state_opcodes
[] = {
512 [VK_SHADER_STAGE_VERTEX
] = 43,
513 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
514 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
515 [VK_SHADER_STAGE_GEOMETRY
] = 46,
516 [VK_SHADER_STAGE_FRAGMENT
] = 47,
517 [VK_SHADER_STAGE_COMPUTE
] = 0,
520 static const uint32_t binding_table_opcodes
[] = {
521 [VK_SHADER_STAGE_VERTEX
] = 38,
522 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
523 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
524 [VK_SHADER_STAGE_GEOMETRY
] = 41,
525 [VK_SHADER_STAGE_FRAGMENT
] = 42,
526 [VK_SHADER_STAGE_COMPUTE
] = 0,
529 if (samplers
.alloc_size
> 0) {
530 anv_batch_emit(&cmd_buffer
->batch
,
531 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
532 ._3DCommandSubOpcode
= sampler_state_opcodes
[stage
],
533 .PointertoVSSamplerState
= samplers
.offset
);
536 if (surfaces
.alloc_size
> 0) {
537 anv_batch_emit(&cmd_buffer
->batch
,
538 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS
,
539 ._3DCommandSubOpcode
= binding_table_opcodes
[stage
],
540 .PointertoVSBindingTable
= surfaces
.offset
);
547 anv_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
549 uint32_t s
, dirty
= cmd_buffer
->state
.descriptors_dirty
&
550 cmd_buffer
->state
.pipeline
->active_stages
;
552 VkResult result
= VK_SUCCESS
;
553 for_each_bit(s
, dirty
) {
554 result
= flush_descriptor_set(cmd_buffer
, s
);
555 if (result
!= VK_SUCCESS
)
559 if (result
!= VK_SUCCESS
) {
560 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
562 result
= anv_cmd_buffer_new_surface_state_bo(cmd_buffer
);
563 assert(result
== VK_SUCCESS
);
565 /* Re-emit state base addresses so we get the new surface state base
566 * address before we start emitting binding tables etc.
568 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
570 /* Re-emit all active binding tables */
571 for_each_bit(s
, cmd_buffer
->state
.pipeline
->active_stages
) {
572 result
= flush_descriptor_set(cmd_buffer
, s
);
574 /* It had better succeed this time */
575 assert(result
== VK_SUCCESS
);
579 cmd_buffer
->state
.descriptors_dirty
&= ~cmd_buffer
->state
.pipeline
->active_stages
;
583 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
584 uint32_t *a
, uint32_t dwords
, uint32_t alignment
)
586 struct anv_state state
;
588 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
589 dwords
* 4, alignment
);
590 memcpy(state
.map
, a
, dwords
* 4);
592 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, dwords
* 4));
598 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
599 uint32_t *a
, uint32_t *b
,
600 uint32_t dwords
, uint32_t alignment
)
602 struct anv_state state
;
605 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
606 dwords
* 4, alignment
);
608 for (uint32_t i
= 0; i
< dwords
; i
++)
611 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
617 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
618 struct anv_subpass
*subpass
)
620 switch (cmd_buffer
->device
->info
.gen
) {
622 gen7_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
625 gen8_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
628 unreachable("unsupported gen\n");
632 void anv_CmdSetEvent(
633 VkCmdBuffer cmdBuffer
,
635 VkPipelineStageFlags stageMask
)
640 void anv_CmdResetEvent(
641 VkCmdBuffer cmdBuffer
,
643 VkPipelineStageFlags stageMask
)
648 void anv_CmdWaitEvents(
649 VkCmdBuffer cmdBuffer
,
651 const VkEvent
* pEvents
,
652 VkPipelineStageFlags srcStageMask
,
653 VkPipelineStageFlags destStageMask
,
654 uint32_t memBarrierCount
,
655 const void* const* ppMemBarriers
)
660 void anv_CmdPushConstants(
661 VkCmdBuffer cmdBuffer
,
662 VkPipelineLayout layout
,
663 VkShaderStageFlags stageFlags
,
671 void anv_CmdExecuteCommands(
672 VkCmdBuffer cmdBuffer
,
673 uint32_t cmdBuffersCount
,
674 const VkCmdBuffer
* pCmdBuffers
)
676 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, cmdBuffer
);
678 assert(primary
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
680 anv_assert(primary
->state
.subpass
== &primary
->state
.pass
->subpasses
[0]);
682 for (uint32_t i
= 0; i
< cmdBuffersCount
; i
++) {
683 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
685 assert(secondary
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
);
687 anv_cmd_buffer_add_secondary(primary
, secondary
);
691 VkResult
anv_CreateCommandPool(
693 const VkCmdPoolCreateInfo
* pCreateInfo
,
696 ANV_FROM_HANDLE(anv_device
, device
, _device
);
697 struct anv_cmd_pool
*pool
;
699 pool
= anv_device_alloc(device
, sizeof(*pool
), 8,
700 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
702 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
704 list_inithead(&pool
->cmd_buffers
);
706 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
711 VkResult
anv_DestroyCommandPool(
715 ANV_FROM_HANDLE(anv_device
, device
, _device
);
716 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
718 anv_ResetCommandPool(_device
, cmdPool
, 0);
720 anv_device_free(device
, pool
);
725 VkResult
anv_ResetCommandPool(
728 VkCmdPoolResetFlags flags
)
730 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
732 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
733 &pool
->cmd_buffers
, pool_link
) {
734 anv_DestroyCommandBuffer(device
, anv_cmd_buffer_to_handle(cmd_buffer
));