2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state
= {
55 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
60 .stencil_compare_mask
= {
64 .stencil_write_mask
= {
68 .stencil_reference
= {
75 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
76 const struct anv_dynamic_state
*src
,
79 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
80 dest
->viewport
.count
= src
->viewport
.count
;
81 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
85 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
86 dest
->scissor
.count
= src
->scissor
.count
;
87 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
91 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
92 dest
->line_width
= src
->line_width
;
94 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
95 dest
->depth_bias
= src
->depth_bias
;
97 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
98 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
100 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
101 dest
->depth_bounds
= src
->depth_bounds
;
103 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
104 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
106 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
107 dest
->stencil_write_mask
= src
->stencil_write_mask
;
109 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
110 dest
->stencil_reference
= src
->stencil_reference
;
114 anv_cmd_state_init(struct anv_cmd_state
*state
)
116 memset(&state
->state_vf
, 0, sizeof(state
->state_vf
));
117 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
118 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
122 state
->descriptors_dirty
= 0;
123 state
->push_constants_dirty
= 0;
124 state
->pipeline
= NULL
;
125 state
->dynamic
= default_dynamic_state
;
127 state
->gen7
.index_buffer
= NULL
;
131 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
132 VkShaderStage stage
, uint32_t size
)
134 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
137 *ptr
= anv_device_alloc(cmd_buffer
->device
, size
, 8,
138 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
142 } else if ((*ptr
)->size
< size
) {
143 void *new_data
= anv_device_alloc(cmd_buffer
->device
, size
, 8,
144 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
145 if (new_data
== NULL
)
146 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
148 memcpy(new_data
, *ptr
, (*ptr
)->size
);
149 anv_device_free(cmd_buffer
->device
, *ptr
);
158 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
159 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
160 (offsetof(struct anv_push_constants, field) + \
161 sizeof(cmd_buffer->state.push_constants[0]->field)))
163 VkResult
anv_CreateCommandBuffer(
165 const VkCmdBufferCreateInfo
* pCreateInfo
,
166 VkCmdBuffer
* pCmdBuffer
)
168 ANV_FROM_HANDLE(anv_device
, device
, _device
);
169 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pCreateInfo
->cmdPool
);
170 struct anv_cmd_buffer
*cmd_buffer
;
173 cmd_buffer
= anv_device_alloc(device
, sizeof(*cmd_buffer
), 8,
174 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
175 if (cmd_buffer
== NULL
)
176 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
178 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
179 cmd_buffer
->device
= device
;
181 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
182 if (result
!= VK_SUCCESS
)
185 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
186 &device
->surface_state_block_pool
);
187 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
188 &device
->dynamic_state_block_pool
);
190 cmd_buffer
->level
= pCreateInfo
->level
;
191 cmd_buffer
->opt_flags
= 0;
193 anv_cmd_state_init(&cmd_buffer
->state
);
196 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
198 /* Init the pool_link so we can safefly call list_del when we destroy
201 list_inithead(&cmd_buffer
->pool_link
);
204 *pCmdBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
208 fail
: anv_device_free(device
, cmd_buffer
);
213 void anv_DestroyCommandBuffer(
215 VkCmdBuffer _cmd_buffer
)
217 ANV_FROM_HANDLE(anv_device
, device
, _device
);
218 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, _cmd_buffer
);
220 list_del(&cmd_buffer
->pool_link
);
222 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
224 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
225 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
226 anv_device_free(device
, cmd_buffer
);
229 VkResult
anv_ResetCommandBuffer(
230 VkCmdBuffer cmdBuffer
,
231 VkCmdBufferResetFlags flags
)
233 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
235 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
237 anv_cmd_state_init(&cmd_buffer
->state
);
243 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
245 switch (cmd_buffer
->device
->info
.gen
) {
247 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
249 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
251 unreachable("unsupported gen\n");
255 VkResult
anv_BeginCommandBuffer(
256 VkCmdBuffer cmdBuffer
,
257 const VkCmdBufferBeginInfo
* pBeginInfo
)
259 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
261 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
263 cmd_buffer
->opt_flags
= pBeginInfo
->flags
;
265 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
) {
266 cmd_buffer
->state
.framebuffer
=
267 anv_framebuffer_from_handle(pBeginInfo
->framebuffer
);
268 cmd_buffer
->state
.pass
=
269 anv_render_pass_from_handle(pBeginInfo
->renderPass
);
271 struct anv_subpass
*subpass
=
272 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->subpass
];
274 anv_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
277 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
278 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
283 VkResult
anv_EndCommandBuffer(
284 VkCmdBuffer cmdBuffer
)
286 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
287 struct anv_device
*device
= cmd_buffer
->device
;
289 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
291 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
) {
292 /* The algorithm used to compute the validate list is not threadsafe as
293 * it uses the bo->index field. We have to lock the device around it.
294 * Fortunately, the chances for contention here are probably very low.
296 pthread_mutex_lock(&device
->mutex
);
297 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
298 pthread_mutex_unlock(&device
->mutex
);
304 void anv_CmdBindPipeline(
305 VkCmdBuffer cmdBuffer
,
306 VkPipelineBindPoint pipelineBindPoint
,
307 VkPipeline _pipeline
)
309 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
310 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
312 switch (pipelineBindPoint
) {
313 case VK_PIPELINE_BIND_POINT_COMPUTE
:
314 cmd_buffer
->state
.compute_pipeline
= pipeline
;
315 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_DIRTY_PIPELINE
;
316 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
319 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
320 cmd_buffer
->state
.pipeline
= pipeline
;
321 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
322 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
323 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
325 /* Apply the dynamic state from the pipeline */
326 cmd_buffer
->state
.dirty
|= pipeline
->dynamic_state_mask
;
327 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
,
328 &pipeline
->dynamic_state
,
329 pipeline
->dynamic_state_mask
);
333 assert(!"invalid bind point");
338 void anv_CmdSetViewport(
339 VkCmdBuffer cmdBuffer
,
340 uint32_t viewportCount
,
341 const VkViewport
* pViewports
)
343 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
345 cmd_buffer
->state
.dynamic
.viewport
.count
= viewportCount
;
346 memcpy(cmd_buffer
->state
.dynamic
.viewport
.viewports
,
347 pViewports
, viewportCount
* sizeof(*pViewports
));
349 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
352 void anv_CmdSetScissor(
353 VkCmdBuffer cmdBuffer
,
354 uint32_t scissorCount
,
355 const VkRect2D
* pScissors
)
357 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
359 cmd_buffer
->state
.dynamic
.scissor
.count
= scissorCount
;
360 memcpy(cmd_buffer
->state
.dynamic
.scissor
.scissors
,
361 pScissors
, scissorCount
* sizeof(*pScissors
));
363 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
366 void anv_CmdSetLineWidth(
367 VkCmdBuffer cmdBuffer
,
370 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
372 cmd_buffer
->state
.dynamic
.line_width
= lineWidth
;
373 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
376 void anv_CmdSetDepthBias(
377 VkCmdBuffer cmdBuffer
,
379 float depthBiasClamp
,
380 float slopeScaledDepthBias
)
382 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
384 cmd_buffer
->state
.dynamic
.depth_bias
.bias
= depthBias
;
385 cmd_buffer
->state
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
386 cmd_buffer
->state
.dynamic
.depth_bias
.slope_scaled
= slopeScaledDepthBias
;
388 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
391 void anv_CmdSetBlendConstants(
392 VkCmdBuffer cmdBuffer
,
393 const float blendConst
[4])
395 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
397 memcpy(cmd_buffer
->state
.dynamic
.blend_constants
,
398 blendConst
, sizeof(float) * 4);
400 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
403 void anv_CmdSetDepthBounds(
404 VkCmdBuffer cmdBuffer
,
405 float minDepthBounds
,
406 float maxDepthBounds
)
408 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
410 cmd_buffer
->state
.dynamic
.depth_bounds
.min
= minDepthBounds
;
411 cmd_buffer
->state
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
413 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
416 void anv_CmdSetStencilCompareMask(
417 VkCmdBuffer cmdBuffer
,
418 VkStencilFaceFlags faceMask
,
419 uint32_t stencilCompareMask
)
421 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
423 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
424 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
= stencilCompareMask
;
425 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
426 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
= stencilCompareMask
;
428 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
431 void anv_CmdSetStencilWriteMask(
432 VkCmdBuffer cmdBuffer
,
433 VkStencilFaceFlags faceMask
,
434 uint32_t stencilWriteMask
)
436 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
438 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
439 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
= stencilWriteMask
;
440 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
441 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
= stencilWriteMask
;
443 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
446 void anv_CmdSetStencilReference(
447 VkCmdBuffer cmdBuffer
,
448 VkStencilFaceFlags faceMask
,
449 uint32_t stencilReference
)
451 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
453 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
454 cmd_buffer
->state
.dynamic
.stencil_reference
.front
= stencilReference
;
455 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
456 cmd_buffer
->state
.dynamic
.stencil_reference
.back
= stencilReference
;
458 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
461 void anv_CmdBindDescriptorSets(
462 VkCmdBuffer cmdBuffer
,
463 VkPipelineBindPoint pipelineBindPoint
,
464 VkPipelineLayout _layout
,
467 const VkDescriptorSet
* pDescriptorSets
,
468 uint32_t dynamicOffsetCount
,
469 const uint32_t* pDynamicOffsets
)
471 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
472 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
473 struct anv_descriptor_set_layout
*set_layout
;
475 assert(firstSet
+ setCount
< MAX_SETS
);
477 uint32_t dynamic_slot
= 0;
478 for (uint32_t i
= 0; i
< setCount
; i
++) {
479 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
480 set_layout
= layout
->set
[firstSet
+ i
].layout
;
482 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
] != set
) {
483 cmd_buffer
->state
.descriptors
[firstSet
+ i
] = set
;
484 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
487 if (set_layout
->dynamic_offset_count
> 0) {
489 for_each_bit(s
, set_layout
->shader_stages
) {
490 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
, dynamic
);
492 struct anv_push_constants
*push
=
493 cmd_buffer
->state
.push_constants
[s
];
495 unsigned d
= layout
->set
[firstSet
+ i
].dynamic_offset_start
;
496 const uint32_t *offsets
= pDynamicOffsets
+ dynamic_slot
;
497 struct anv_descriptor
*desc
= set
->descriptors
;
499 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
500 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
503 unsigned array_size
= set_layout
->binding
[b
].array_size
;
504 for (unsigned j
= 0; j
< array_size
; j
++) {
505 push
->dynamic
[d
].offset
= *(offsets
++);
506 push
->dynamic
[d
].range
= (desc
++)->range
;
511 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
516 void anv_CmdBindVertexBuffers(
517 VkCmdBuffer cmdBuffer
,
518 uint32_t startBinding
,
519 uint32_t bindingCount
,
520 const VkBuffer
* pBuffers
,
521 const VkDeviceSize
* pOffsets
)
523 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
524 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
526 /* We have to defer setting up vertex buffer since we need the buffer
527 * stride from the pipeline. */
529 assert(startBinding
+ bindingCount
< MAX_VBS
);
530 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
531 vb
[startBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
532 vb
[startBinding
+ i
].offset
= pOffsets
[i
];
533 cmd_buffer
->state
.vb_dirty
|= 1 << (startBinding
+ i
);
538 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
539 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
541 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
542 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
543 * the initial state to set the high bits to 0. */
545 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
547 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, cmd_buffer
->device
,
548 state
.offset
+ dword
* 4, bo
, offset
);
552 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
553 VkShaderStage stage
, struct anv_state
*bt_state
)
555 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
556 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
557 struct anv_pipeline_layout
*layout
;
558 uint32_t color_count
, bias
, state_offset
;
560 if (stage
== VK_SHADER_STAGE_COMPUTE
)
561 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
563 layout
= cmd_buffer
->state
.pipeline
->layout
;
565 if (stage
== VK_SHADER_STAGE_FRAGMENT
) {
567 color_count
= subpass
->color_count
;
573 /* This is a little awkward: layout can be NULL but we still have to
574 * allocate and set a binding table for the PS stage for render
576 uint32_t surface_count
= layout
? layout
->stage
[stage
].surface_count
: 0;
578 if (color_count
+ surface_count
== 0)
581 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
582 bias
+ surface_count
,
584 uint32_t *bt_map
= bt_state
->map
;
586 if (bt_state
->map
== NULL
)
587 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
589 for (uint32_t a
= 0; a
< color_count
; a
++) {
590 const struct anv_image_view
*iview
=
591 fb
->attachments
[subpass
->color_attachments
[a
]];
593 bt_map
[a
] = iview
->color_rt_surface_state
.offset
+ state_offset
;
594 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
595 iview
->bo
, iview
->offset
);
601 for (uint32_t s
= 0; s
< layout
->stage
[stage
].surface_count
; s
++) {
602 struct anv_pipeline_binding
*binding
=
603 &layout
->stage
[stage
].surface_to_descriptor
[s
];
604 struct anv_descriptor_set
*set
=
605 cmd_buffer
->state
.descriptors
[binding
->set
];
606 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
608 struct anv_state surface_state
;
612 switch (desc
->type
) {
613 case ANV_DESCRIPTOR_TYPE_EMPTY
:
614 case ANV_DESCRIPTOR_TYPE_SAMPLER
:
615 /* Nothing for us to do here */
617 case ANV_DESCRIPTOR_TYPE_BUFFER_AND_OFFSET
: {
618 bo
= desc
->buffer
->bo
;
619 bo_offset
= desc
->buffer
->offset
+ desc
->offset
;
622 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
623 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
.map
,
624 anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT
),
625 bo_offset
, desc
->range
);
628 case ANV_DESCRIPTOR_TYPE_IMAGE_VIEW
:
629 case ANV_DESCRIPTOR_TYPE_IMAGE_VIEW_AND_SAMPLER
:
630 surface_state
= desc
->image_view
->nonrt_surface_state
;
631 bo
= desc
->image_view
->bo
;
632 bo_offset
= desc
->image_view
->offset
;
636 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
637 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
644 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
645 VkShaderStage stage
, struct anv_state
*state
)
647 struct anv_pipeline_layout
*layout
;
648 uint32_t sampler_count
;
650 if (stage
== VK_SHADER_STAGE_COMPUTE
)
651 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
653 layout
= cmd_buffer
->state
.pipeline
->layout
;
655 sampler_count
= layout
? layout
->stage
[stage
].sampler_count
: 0;
656 if (sampler_count
== 0)
659 uint32_t size
= sampler_count
* 16;
660 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
662 if (state
->map
== NULL
)
663 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
665 for (uint32_t s
= 0; s
< layout
->stage
[stage
].sampler_count
; s
++) {
666 struct anv_pipeline_binding
*binding
=
667 &layout
->stage
[stage
].sampler_to_descriptor
[s
];
668 struct anv_descriptor_set
*set
=
669 cmd_buffer
->state
.descriptors
[binding
->set
];
670 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
672 if (desc
->type
!= ANV_DESCRIPTOR_TYPE_SAMPLER
&&
673 desc
->type
!= ANV_DESCRIPTOR_TYPE_IMAGE_VIEW_AND_SAMPLER
)
676 struct anv_sampler
*sampler
= desc
->sampler
;
678 /* FIXME: We shouldn't have to do this */
682 memcpy(state
->map
+ (s
* 16),
683 sampler
->state
, sizeof(sampler
->state
));
690 flush_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
, VkShaderStage stage
)
692 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
695 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, stage
, &samplers
);
696 if (result
!= VK_SUCCESS
)
698 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, stage
, &surfaces
);
699 if (result
!= VK_SUCCESS
)
702 static const uint32_t sampler_state_opcodes
[] = {
703 [VK_SHADER_STAGE_VERTEX
] = 43,
704 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
705 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
706 [VK_SHADER_STAGE_GEOMETRY
] = 46,
707 [VK_SHADER_STAGE_FRAGMENT
] = 47,
708 [VK_SHADER_STAGE_COMPUTE
] = 0,
711 static const uint32_t binding_table_opcodes
[] = {
712 [VK_SHADER_STAGE_VERTEX
] = 38,
713 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
714 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
715 [VK_SHADER_STAGE_GEOMETRY
] = 41,
716 [VK_SHADER_STAGE_FRAGMENT
] = 42,
717 [VK_SHADER_STAGE_COMPUTE
] = 0,
720 if (samplers
.alloc_size
> 0) {
721 anv_batch_emit(&cmd_buffer
->batch
,
722 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
723 ._3DCommandSubOpcode
= sampler_state_opcodes
[stage
],
724 .PointertoVSSamplerState
= samplers
.offset
);
727 if (surfaces
.alloc_size
> 0) {
728 anv_batch_emit(&cmd_buffer
->batch
,
729 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS
,
730 ._3DCommandSubOpcode
= binding_table_opcodes
[stage
],
731 .PointertoVSBindingTable
= surfaces
.offset
);
738 anv_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
741 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
742 cmd_buffer
->state
.pipeline
->active_stages
;
744 VkResult result
= VK_SUCCESS
;
745 for_each_bit(s
, dirty
) {
746 result
= flush_descriptor_set(cmd_buffer
, s
);
747 if (result
!= VK_SUCCESS
)
751 if (result
!= VK_SUCCESS
) {
752 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
754 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
755 assert(result
== VK_SUCCESS
);
757 /* Re-emit state base addresses so we get the new surface state base
758 * address before we start emitting binding tables etc.
760 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
762 /* Re-emit all active binding tables */
763 for_each_bit(s
, cmd_buffer
->state
.pipeline
->active_stages
) {
764 result
= flush_descriptor_set(cmd_buffer
, s
);
766 /* It had better succeed this time */
767 assert(result
== VK_SUCCESS
);
771 cmd_buffer
->state
.descriptors_dirty
&= ~cmd_buffer
->state
.pipeline
->active_stages
;
775 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
776 uint32_t *a
, uint32_t dwords
, uint32_t alignment
)
778 struct anv_state state
;
780 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
781 dwords
* 4, alignment
);
782 memcpy(state
.map
, a
, dwords
* 4);
784 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, dwords
* 4));
790 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
791 uint32_t *a
, uint32_t *b
,
792 uint32_t dwords
, uint32_t alignment
)
794 struct anv_state state
;
797 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
798 dwords
* 4, alignment
);
800 for (uint32_t i
= 0; i
< dwords
; i
++)
803 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
809 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
810 struct anv_subpass
*subpass
)
812 switch (cmd_buffer
->device
->info
.gen
) {
814 gen7_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
817 gen8_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
820 unreachable("unsupported gen\n");
825 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
826 uint32_t count
, const VkViewport
*viewports
)
828 struct anv_state sf_clip_state
=
829 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
830 struct anv_state cc_state
=
831 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
833 for (uint32_t i
= 0; i
< count
; i
++) {
834 const VkViewport
*vp
= &viewports
[i
];
836 /* The gen7 state struct has just the matrix and guardband fields, the
837 * gen8 struct adds the min/max viewport fields. */
838 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
839 .ViewportMatrixElementm00
= vp
->width
/ 2,
840 .ViewportMatrixElementm11
= vp
->height
/ 2,
841 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
842 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
843 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
844 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
845 .XMinClipGuardband
= -1.0f
,
846 .XMaxClipGuardband
= 1.0f
,
847 .YMinClipGuardband
= -1.0f
,
848 .YMaxClipGuardband
= 1.0f
,
849 .XMinViewPort
= vp
->originX
,
850 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
851 .YMinViewPort
= vp
->originY
,
852 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
855 struct GEN7_CC_VIEWPORT cc_viewport
= {
856 .MinimumDepth
= vp
->minDepth
,
857 .MaximumDepth
= vp
->maxDepth
860 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, sf_clip_state
.map
+ i
* 64,
862 GEN7_CC_VIEWPORT_pack(NULL
, cc_state
.map
+ i
* 32, &cc_viewport
);
865 anv_batch_emit(&cmd_buffer
->batch
,
866 GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
867 .CCViewportPointer
= cc_state
.offset
);
868 anv_batch_emit(&cmd_buffer
->batch
,
869 GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
870 .SFClipViewportPointer
= sf_clip_state
.offset
);
874 anv_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
876 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
877 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
878 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
880 /* If viewport count is 0, this is taken to mean "use the default" */
881 emit_viewport_state(cmd_buffer
, 1,
885 .width
= cmd_buffer
->state
.framebuffer
->width
,
886 .height
= cmd_buffer
->state
.framebuffer
->height
,
893 static inline int64_t
894 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
905 emit_scissor_state(struct anv_cmd_buffer
*cmd_buffer
,
906 uint32_t count
, const VkRect2D
*scissors
)
908 struct anv_state scissor_state
=
909 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 32, 32);
911 for (uint32_t i
= 0; i
< count
; i
++) {
912 const VkRect2D
*s
= &scissors
[i
];
914 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
915 * ymax < ymin for empty clips. In case clip x, y, width height are all
916 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
917 * what we want. Just special case empty clips and produce a canonical
919 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
920 .ScissorRectangleYMin
= 1,
921 .ScissorRectangleXMin
= 1,
922 .ScissorRectangleYMax
= 0,
923 .ScissorRectangleXMax
= 0
926 const int max
= 0xffff;
927 struct GEN7_SCISSOR_RECT scissor
= {
928 /* Do this math using int64_t so overflow gets clamped correctly. */
929 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
930 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
931 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
932 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
935 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
936 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 32,
939 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 32, &scissor
);
943 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_SCISSOR_STATE_POINTERS
,
944 .ScissorRectPointer
= scissor_state
.offset
);
948 anv_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
950 if (cmd_buffer
->state
.dynamic
.scissor
.count
> 0) {
951 emit_scissor_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.scissor
.count
,
952 cmd_buffer
->state
.dynamic
.scissor
.scissors
);
954 /* Emit a default scissor based on the currently bound framebuffer */
955 emit_scissor_state(cmd_buffer
, 1,
957 .offset
= { .x
= 0, .y
= 0, },
959 .width
= cmd_buffer
->state
.framebuffer
->width
,
960 .height
= cmd_buffer
->state
.framebuffer
->height
,
966 void anv_CmdSetEvent(
967 VkCmdBuffer cmdBuffer
,
969 VkPipelineStageFlags stageMask
)
974 void anv_CmdResetEvent(
975 VkCmdBuffer cmdBuffer
,
977 VkPipelineStageFlags stageMask
)
982 void anv_CmdWaitEvents(
983 VkCmdBuffer cmdBuffer
,
985 const VkEvent
* pEvents
,
986 VkPipelineStageFlags srcStageMask
,
987 VkPipelineStageFlags destStageMask
,
988 uint32_t memBarrierCount
,
989 const void* const* ppMemBarriers
)
995 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
998 struct anv_push_constants
*data
=
999 cmd_buffer
->state
.push_constants
[stage
];
1000 struct brw_stage_prog_data
*prog_data
=
1001 cmd_buffer
->state
.pipeline
->prog_data
[stage
];
1003 /* If we don't actually have any push constants, bail. */
1004 if (data
== NULL
|| prog_data
->nr_params
== 0)
1005 return (struct anv_state
) { .offset
= 0 };
1007 struct anv_state state
=
1008 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1009 prog_data
->nr_params
* sizeof(float),
1010 32 /* bottom 5 bits MBZ */);
1012 /* Walk through the param array and fill the buffer with data */
1013 uint32_t *u32_map
= state
.map
;
1014 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
1015 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1016 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1022 void anv_CmdPushConstants(
1023 VkCmdBuffer cmdBuffer
,
1024 VkPipelineLayout layout
,
1025 VkShaderStageFlags stageFlags
,
1030 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
1031 VkShaderStage stage
;
1033 for_each_bit(stage
, stageFlags
) {
1034 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
1036 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ start
,
1040 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1043 void anv_CmdExecuteCommands(
1044 VkCmdBuffer cmdBuffer
,
1045 uint32_t cmdBuffersCount
,
1046 const VkCmdBuffer
* pCmdBuffers
)
1048 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, cmdBuffer
);
1050 assert(primary
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
1052 anv_assert(primary
->state
.subpass
== &primary
->state
.pass
->subpasses
[0]);
1054 for (uint32_t i
= 0; i
< cmdBuffersCount
; i
++) {
1055 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
1057 assert(secondary
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
);
1059 anv_cmd_buffer_add_secondary(primary
, secondary
);
1063 VkResult
anv_CreateCommandPool(
1065 const VkCmdPoolCreateInfo
* pCreateInfo
,
1066 VkCmdPool
* pCmdPool
)
1068 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1069 struct anv_cmd_pool
*pool
;
1071 pool
= anv_device_alloc(device
, sizeof(*pool
), 8,
1072 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1074 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1076 list_inithead(&pool
->cmd_buffers
);
1078 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1083 void anv_DestroyCommandPool(
1087 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1088 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
1090 anv_ResetCommandPool(_device
, cmdPool
, 0);
1092 anv_device_free(device
, pool
);
1095 VkResult
anv_ResetCommandPool(
1098 VkCmdPoolResetFlags flags
)
1100 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
1102 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1103 &pool
->cmd_buffers
, pool_link
) {
1104 anv_DestroyCommandBuffer(device
, anv_cmd_buffer_to_handle(cmd_buffer
));
1111 * Return NULL if the current subpass has no depthstencil attachment.
1113 const struct anv_image_view
*
1114 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1116 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1117 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1119 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
1122 const struct anv_image_view
*iview
=
1123 fb
->attachments
[subpass
->depth_stencil_attachment
];
1125 assert(anv_format_is_depth_or_stencil(iview
->format
));