2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
42 anv_cmd_state_init(struct anv_cmd_state
*state
)
44 state
->rs_state
= NULL
;
45 state
->vp_state
= NULL
;
46 state
->cb_state
= NULL
;
47 state
->ds_state
= NULL
;
48 memset(&state
->state_vf
, 0, sizeof(state
->state_vf
));
49 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
50 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
54 state
->descriptors_dirty
= 0;
55 state
->push_constants_dirty
= 0;
56 state
->pipeline
= NULL
;
57 state
->vp_state
= NULL
;
58 state
->rs_state
= NULL
;
59 state
->ds_state
= NULL
;
61 state
->gen7
.index_buffer
= NULL
;
65 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
66 VkShaderStage stage
, uint32_t size
)
68 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
71 *ptr
= anv_device_alloc(cmd_buffer
->device
, size
, 8,
72 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
76 } else if ((*ptr
)->size
< size
) {
77 void *new_data
= anv_device_alloc(cmd_buffer
->device
, size
, 8,
78 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
80 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
82 memcpy(new_data
, *ptr
, (*ptr
)->size
);
83 anv_device_free(cmd_buffer
->device
, *ptr
);
92 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
93 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
94 (offsetof(struct anv_push_constants, field) + \
95 sizeof(cmd_buffer->state.push_constants[0]->field)))
97 VkResult
anv_CreateCommandBuffer(
99 const VkCmdBufferCreateInfo
* pCreateInfo
,
100 VkCmdBuffer
* pCmdBuffer
)
102 ANV_FROM_HANDLE(anv_device
, device
, _device
);
103 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pCreateInfo
->cmdPool
);
104 struct anv_cmd_buffer
*cmd_buffer
;
107 cmd_buffer
= anv_device_alloc(device
, sizeof(*cmd_buffer
), 8,
108 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
109 if (cmd_buffer
== NULL
)
110 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
112 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
113 cmd_buffer
->device
= device
;
115 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
116 if (result
!= VK_SUCCESS
)
119 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
120 &device
->surface_state_block_pool
);
121 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
122 &device
->dynamic_state_block_pool
);
124 cmd_buffer
->level
= pCreateInfo
->level
;
125 cmd_buffer
->opt_flags
= 0;
127 anv_cmd_state_init(&cmd_buffer
->state
);
130 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
132 /* Init the pool_link so we can safefly call list_del when we destroy
135 list_inithead(&cmd_buffer
->pool_link
);
138 *pCmdBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
142 fail
: anv_device_free(device
, cmd_buffer
);
147 VkResult
anv_DestroyCommandBuffer(
149 VkCmdBuffer _cmd_buffer
)
151 ANV_FROM_HANDLE(anv_device
, device
, _device
);
152 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, _cmd_buffer
);
154 list_del(&cmd_buffer
->pool_link
);
156 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
158 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
159 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
160 anv_device_free(device
, cmd_buffer
);
165 VkResult
anv_ResetCommandBuffer(
166 VkCmdBuffer cmdBuffer
,
167 VkCmdBufferResetFlags flags
)
169 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
171 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
173 anv_cmd_state_init(&cmd_buffer
->state
);
179 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
181 switch (cmd_buffer
->device
->info
.gen
) {
183 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
185 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
187 unreachable("unsupported gen\n");
191 VkResult
anv_BeginCommandBuffer(
192 VkCmdBuffer cmdBuffer
,
193 const VkCmdBufferBeginInfo
* pBeginInfo
)
195 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
197 cmd_buffer
->opt_flags
= pBeginInfo
->flags
;
199 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
) {
200 cmd_buffer
->state
.framebuffer
=
201 anv_framebuffer_from_handle(pBeginInfo
->framebuffer
);
202 cmd_buffer
->state
.pass
=
203 anv_render_pass_from_handle(pBeginInfo
->renderPass
);
205 struct anv_subpass
*subpass
=
206 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->subpass
];
208 anv_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
211 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
212 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
217 VkResult
anv_EndCommandBuffer(
218 VkCmdBuffer cmdBuffer
)
220 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
221 struct anv_device
*device
= cmd_buffer
->device
;
223 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
225 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
) {
226 /* The algorithm used to compute the validate list is not threadsafe as
227 * it uses the bo->index field. We have to lock the device around it.
228 * Fortunately, the chances for contention here are probably very low.
230 pthread_mutex_lock(&device
->mutex
);
231 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
232 pthread_mutex_unlock(&device
->mutex
);
238 void anv_CmdBindPipeline(
239 VkCmdBuffer cmdBuffer
,
240 VkPipelineBindPoint pipelineBindPoint
,
241 VkPipeline _pipeline
)
243 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
244 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
246 switch (pipelineBindPoint
) {
247 case VK_PIPELINE_BIND_POINT_COMPUTE
:
248 cmd_buffer
->state
.compute_pipeline
= pipeline
;
249 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
250 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
253 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
254 cmd_buffer
->state
.pipeline
= pipeline
;
255 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
256 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
257 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
261 assert(!"invalid bind point");
266 void anv_CmdBindDynamicViewportState(
267 VkCmdBuffer cmdBuffer
,
268 VkDynamicViewportState dynamicViewportState
)
270 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
271 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, dynamicViewportState
);
273 cmd_buffer
->state
.vp_state
= vp_state
;
274 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_VP_DIRTY
;
277 void anv_CmdBindDynamicRasterState(
278 VkCmdBuffer cmdBuffer
,
279 VkDynamicRasterState dynamicRasterState
)
281 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
282 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, dynamicRasterState
);
284 cmd_buffer
->state
.rs_state
= rs_state
;
285 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_RS_DIRTY
;
288 void anv_CmdBindDynamicColorBlendState(
289 VkCmdBuffer cmdBuffer
,
290 VkDynamicColorBlendState dynamicColorBlendState
)
292 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
293 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, dynamicColorBlendState
);
295 cmd_buffer
->state
.cb_state
= cb_state
;
296 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_CB_DIRTY
;
299 void anv_CmdBindDynamicDepthStencilState(
300 VkCmdBuffer cmdBuffer
,
301 VkDynamicDepthStencilState dynamicDepthStencilState
)
303 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
304 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, dynamicDepthStencilState
);
306 cmd_buffer
->state
.ds_state
= ds_state
;
307 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_DS_DIRTY
;
310 void anv_CmdBindDescriptorSets(
311 VkCmdBuffer cmdBuffer
,
312 VkPipelineBindPoint pipelineBindPoint
,
313 VkPipelineLayout _layout
,
316 const VkDescriptorSet
* pDescriptorSets
,
317 uint32_t dynamicOffsetCount
,
318 const uint32_t* pDynamicOffsets
)
320 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
321 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
322 struct anv_descriptor_set_layout
*set_layout
;
324 assert(firstSet
+ setCount
< MAX_SETS
);
326 uint32_t dynamic_slot
= 0;
327 for (uint32_t i
= 0; i
< setCount
; i
++) {
328 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
329 set_layout
= layout
->set
[firstSet
+ i
].layout
;
331 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
].set
!= set
) {
332 cmd_buffer
->state
.descriptors
[firstSet
+ i
].set
= set
;
333 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
336 if (set_layout
->num_dynamic_buffers
> 0) {
338 for_each_bit(s
, set_layout
->shader_stages
) {
339 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
,
342 cmd_buffer
->state
.push_constants
[s
]->dynamic_offsets
+
343 layout
->set
[firstSet
+ i
].dynamic_offset_start
;
345 memcpy(offsets
, pDynamicOffsets
+ dynamic_slot
,
346 set_layout
->num_dynamic_buffers
* sizeof(*pDynamicOffsets
));
349 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
351 dynamic_slot
+= set_layout
->num_dynamic_buffers
;
356 void anv_CmdBindVertexBuffers(
357 VkCmdBuffer cmdBuffer
,
358 uint32_t startBinding
,
359 uint32_t bindingCount
,
360 const VkBuffer
* pBuffers
,
361 const VkDeviceSize
* pOffsets
)
363 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
364 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
366 /* We have to defer setting up vertex buffer since we need the buffer
367 * stride from the pipeline. */
369 assert(startBinding
+ bindingCount
< MAX_VBS
);
370 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
371 vb
[startBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
372 vb
[startBinding
+ i
].offset
= pOffsets
[i
];
373 cmd_buffer
->state
.vb_dirty
|= 1 << (startBinding
+ i
);
378 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
379 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
381 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
382 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
383 * the initial state to set the high bits to 0. */
385 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
387 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, cmd_buffer
->device
,
388 state
.offset
+ dword
* 4, bo
, offset
);
392 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
393 VkShaderStage stage
, struct anv_state
*bt_state
)
395 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
396 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
397 struct anv_pipeline_layout
*layout
;
398 uint32_t attachments
, bias
, state_offset
;
400 if (stage
== VK_SHADER_STAGE_COMPUTE
)
401 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
403 layout
= cmd_buffer
->state
.pipeline
->layout
;
405 if (stage
== VK_SHADER_STAGE_FRAGMENT
) {
407 attachments
= subpass
->color_count
;
413 /* This is a little awkward: layout can be NULL but we still have to
414 * allocate and set a binding table for the PS stage for render
416 uint32_t surface_count
= layout
? layout
->stage
[stage
].surface_count
: 0;
418 if (attachments
+ surface_count
== 0)
421 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
422 bias
+ surface_count
,
424 uint32_t *bt_map
= bt_state
->map
;
426 if (bt_state
->map
== NULL
)
427 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
429 /* This is highly annoying. The Vulkan spec puts the depth-stencil
430 * attachments in with the color attachments. Unfortunately, thanks to
431 * other aspects of the API, we cana't really saparate them before this
432 * point. Therefore, we have to walk all of the attachments but only
433 * put the color attachments into the binding table.
435 for (uint32_t a
= 0; a
< attachments
; a
++) {
436 const struct anv_attachment_view
*aview
=
437 fb
->attachments
[subpass
->color_attachments
[a
]];
438 const struct anv_image_view
*iview
= &aview
->image_view
;
440 assert(aview
->attachment_type
== ANV_ATTACHMENT_VIEW_TYPE_COLOR
);
442 bt_map
[a
] = iview
->surface_state
.offset
+ state_offset
;
443 add_surface_state_reloc(cmd_buffer
, iview
->surface_state
,
444 iview
->bo
, iview
->offset
);
450 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
451 struct anv_descriptor_set_binding
*d
= &cmd_buffer
->state
.descriptors
[set
];
452 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
453 struct anv_descriptor_slot
*surface_slots
=
454 set_layout
->stage
[stage
].surface_start
;
456 uint32_t start
= bias
+ layout
->set
[set
].stage
[stage
].surface_start
;
458 for (uint32_t b
= 0; b
< set_layout
->stage
[stage
].surface_count
; b
++) {
459 struct anv_descriptor
*desc
=
460 &d
->set
->descriptors
[surface_slots
[b
].index
];
462 const struct anv_state
*surface_state
;
466 switch (desc
->type
) {
467 case ANV_DESCRIPTOR_TYPE_EMPTY
:
468 case ANV_DESCRIPTOR_TYPE_SAMPLER
:
470 case ANV_DESCRIPTOR_TYPE_BUFFER_VIEW
:
471 surface_state
= &desc
->buffer_view
->surface_state
;
472 bo
= desc
->buffer_view
->bo
;
473 bo_offset
= desc
->buffer_view
->offset
;
475 case ANV_DESCRIPTOR_TYPE_IMAGE_VIEW
:
476 surface_state
= &desc
->image_view
->surface_state
;
477 bo
= desc
->image_view
->bo
;
478 bo_offset
= desc
->image_view
->offset
;
482 bt_map
[start
+ b
] = surface_state
->offset
+ state_offset
;
483 add_surface_state_reloc(cmd_buffer
, *surface_state
, bo
, bo_offset
);
491 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
492 VkShaderStage stage
, struct anv_state
*state
)
494 struct anv_pipeline_layout
*layout
;
495 uint32_t sampler_count
;
497 if (stage
== VK_SHADER_STAGE_COMPUTE
)
498 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
500 layout
= cmd_buffer
->state
.pipeline
->layout
;
502 sampler_count
= layout
? layout
->stage
[stage
].sampler_count
: 0;
503 if (sampler_count
== 0)
506 uint32_t size
= sampler_count
* 16;
507 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
509 if (state
->map
== NULL
)
510 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
512 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
513 struct anv_descriptor_set_binding
*d
= &cmd_buffer
->state
.descriptors
[set
];
514 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
515 struct anv_descriptor_slot
*sampler_slots
=
516 set_layout
->stage
[stage
].sampler_start
;
518 uint32_t start
= layout
->set
[set
].stage
[stage
].sampler_start
;
520 for (uint32_t b
= 0; b
< set_layout
->stage
[stage
].sampler_count
; b
++) {
521 struct anv_descriptor
*desc
=
522 &d
->set
->descriptors
[sampler_slots
[b
].index
];
524 if (desc
->type
!= ANV_DESCRIPTOR_TYPE_SAMPLER
)
527 struct anv_sampler
*sampler
= desc
->sampler
;
529 memcpy(state
->map
+ (start
+ b
) * 16,
530 sampler
->state
, sizeof(sampler
->state
));
538 flush_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
, VkShaderStage stage
)
540 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
543 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, stage
, &samplers
);
544 if (result
!= VK_SUCCESS
)
546 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, stage
, &surfaces
);
547 if (result
!= VK_SUCCESS
)
550 static const uint32_t sampler_state_opcodes
[] = {
551 [VK_SHADER_STAGE_VERTEX
] = 43,
552 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
553 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
554 [VK_SHADER_STAGE_GEOMETRY
] = 46,
555 [VK_SHADER_STAGE_FRAGMENT
] = 47,
556 [VK_SHADER_STAGE_COMPUTE
] = 0,
559 static const uint32_t binding_table_opcodes
[] = {
560 [VK_SHADER_STAGE_VERTEX
] = 38,
561 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
562 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
563 [VK_SHADER_STAGE_GEOMETRY
] = 41,
564 [VK_SHADER_STAGE_FRAGMENT
] = 42,
565 [VK_SHADER_STAGE_COMPUTE
] = 0,
568 if (samplers
.alloc_size
> 0) {
569 anv_batch_emit(&cmd_buffer
->batch
,
570 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
571 ._3DCommandSubOpcode
= sampler_state_opcodes
[stage
],
572 .PointertoVSSamplerState
= samplers
.offset
);
575 if (surfaces
.alloc_size
> 0) {
576 anv_batch_emit(&cmd_buffer
->batch
,
577 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS
,
578 ._3DCommandSubOpcode
= binding_table_opcodes
[stage
],
579 .PointertoVSBindingTable
= surfaces
.offset
);
586 anv_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
589 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
590 cmd_buffer
->state
.pipeline
->active_stages
;
592 VkResult result
= VK_SUCCESS
;
593 for_each_bit(s
, dirty
) {
594 result
= flush_descriptor_set(cmd_buffer
, s
);
595 if (result
!= VK_SUCCESS
)
599 if (result
!= VK_SUCCESS
) {
600 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
602 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
603 assert(result
== VK_SUCCESS
);
605 /* Re-emit state base addresses so we get the new surface state base
606 * address before we start emitting binding tables etc.
608 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
610 /* Re-emit all active binding tables */
611 for_each_bit(s
, cmd_buffer
->state
.pipeline
->active_stages
) {
612 result
= flush_descriptor_set(cmd_buffer
, s
);
614 /* It had better succeed this time */
615 assert(result
== VK_SUCCESS
);
619 cmd_buffer
->state
.descriptors_dirty
&= ~cmd_buffer
->state
.pipeline
->active_stages
;
623 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
624 uint32_t *a
, uint32_t dwords
, uint32_t alignment
)
626 struct anv_state state
;
628 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
629 dwords
* 4, alignment
);
630 memcpy(state
.map
, a
, dwords
* 4);
632 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, dwords
* 4));
638 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
639 uint32_t *a
, uint32_t *b
,
640 uint32_t dwords
, uint32_t alignment
)
642 struct anv_state state
;
645 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
646 dwords
* 4, alignment
);
648 for (uint32_t i
= 0; i
< dwords
; i
++)
651 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
657 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
658 struct anv_subpass
*subpass
)
660 switch (cmd_buffer
->device
->info
.gen
) {
662 gen7_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
665 gen8_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
668 unreachable("unsupported gen\n");
672 void anv_CmdSetEvent(
673 VkCmdBuffer cmdBuffer
,
675 VkPipelineStageFlags stageMask
)
680 void anv_CmdResetEvent(
681 VkCmdBuffer cmdBuffer
,
683 VkPipelineStageFlags stageMask
)
688 void anv_CmdWaitEvents(
689 VkCmdBuffer cmdBuffer
,
691 const VkEvent
* pEvents
,
692 VkPipelineStageFlags srcStageMask
,
693 VkPipelineStageFlags destStageMask
,
694 uint32_t memBarrierCount
,
695 const void* const* ppMemBarriers
)
701 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
704 struct anv_push_constants
*data
=
705 cmd_buffer
->state
.push_constants
[stage
];
706 struct brw_stage_prog_data
*prog_data
=
707 cmd_buffer
->state
.pipeline
->prog_data
[stage
];
709 /* If we don't actually have any push constants, bail. */
710 if (data
== NULL
|| prog_data
->nr_params
== 0)
711 return (struct anv_state
) { .offset
= 0 };
713 struct anv_state state
=
714 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
715 prog_data
->nr_params
* sizeof(float),
716 32 /* bottom 5 bits MBZ */);
718 /* Walk through the param array and fill the buffer with data */
719 uint32_t *u32_map
= state
.map
;
720 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
721 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
722 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
728 void anv_CmdPushConstants(
729 VkCmdBuffer cmdBuffer
,
730 VkPipelineLayout layout
,
731 VkShaderStageFlags stageFlags
,
736 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
739 for_each_bit(stage
, stageFlags
) {
740 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
742 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ start
,
746 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
749 void anv_CmdExecuteCommands(
750 VkCmdBuffer cmdBuffer
,
751 uint32_t cmdBuffersCount
,
752 const VkCmdBuffer
* pCmdBuffers
)
754 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, cmdBuffer
);
756 assert(primary
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
758 anv_assert(primary
->state
.subpass
== &primary
->state
.pass
->subpasses
[0]);
760 for (uint32_t i
= 0; i
< cmdBuffersCount
; i
++) {
761 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
763 assert(secondary
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
);
765 anv_cmd_buffer_add_secondary(primary
, secondary
);
769 VkResult
anv_CreateCommandPool(
771 const VkCmdPoolCreateInfo
* pCreateInfo
,
774 ANV_FROM_HANDLE(anv_device
, device
, _device
);
775 struct anv_cmd_pool
*pool
;
777 pool
= anv_device_alloc(device
, sizeof(*pool
), 8,
778 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
780 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
782 list_inithead(&pool
->cmd_buffers
);
784 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
789 VkResult
anv_DestroyCommandPool(
793 ANV_FROM_HANDLE(anv_device
, device
, _device
);
794 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
796 anv_ResetCommandPool(_device
, cmdPool
, 0);
798 anv_device_free(device
, pool
);
803 VkResult
anv_ResetCommandPool(
806 VkCmdPoolResetFlags flags
)
808 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
810 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
811 &pool
->cmd_buffers
, pool_link
) {
812 anv_DestroyCommandBuffer(device
, anv_cmd_buffer_to_handle(cmd_buffer
));
819 * Return NULL if the current subpass has no depthstencil attachment.
821 const struct anv_attachment_view
*
822 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
824 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
825 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
827 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
830 const struct anv_attachment_view
*aview
=
831 fb
->attachments
[subpass
->depth_stencil_attachment
];
833 assert(aview
->attachment_type
== ANV_ATTACHMENT_VIEW_TYPE_DEPTH_STENCIL
);