2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state
= {
55 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
60 .stencil_compare_mask
= {
64 .stencil_write_mask
= {
68 .stencil_reference
= {
75 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
76 const struct anv_dynamic_state
*src
,
79 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
80 dest
->viewport
.count
= src
->viewport
.count
;
81 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
85 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
86 dest
->scissor
.count
= src
->scissor
.count
;
87 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
91 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
92 dest
->line_width
= src
->line_width
;
94 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
95 dest
->depth_bias
= src
->depth_bias
;
97 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
98 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
100 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
101 dest
->depth_bounds
= src
->depth_bounds
;
103 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
104 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
106 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
107 dest
->stencil_write_mask
= src
->stencil_write_mask
;
109 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
110 dest
->stencil_reference
= src
->stencil_reference
;
114 anv_cmd_state_init(struct anv_cmd_state
*state
)
116 memset(&state
->state_vf
, 0, sizeof(state
->state_vf
));
117 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
118 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
122 state
->descriptors_dirty
= 0;
123 state
->push_constants_dirty
= 0;
124 state
->pipeline
= NULL
;
125 state
->dynamic
= default_dynamic_state
;
127 state
->gen7
.index_buffer
= NULL
;
131 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
132 VkShaderStage stage
, uint32_t size
)
134 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
137 *ptr
= anv_device_alloc(cmd_buffer
->device
, size
, 8,
138 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
142 } else if ((*ptr
)->size
< size
) {
143 void *new_data
= anv_device_alloc(cmd_buffer
->device
, size
, 8,
144 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
145 if (new_data
== NULL
)
146 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
148 memcpy(new_data
, *ptr
, (*ptr
)->size
);
149 anv_device_free(cmd_buffer
->device
, *ptr
);
158 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
159 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
160 (offsetof(struct anv_push_constants, field) + \
161 sizeof(cmd_buffer->state.push_constants[0]->field)))
163 VkResult
anv_CreateCommandBuffer(
165 const VkCmdBufferCreateInfo
* pCreateInfo
,
166 VkCmdBuffer
* pCmdBuffer
)
168 ANV_FROM_HANDLE(anv_device
, device
, _device
);
169 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pCreateInfo
->cmdPool
);
170 struct anv_cmd_buffer
*cmd_buffer
;
173 cmd_buffer
= anv_device_alloc(device
, sizeof(*cmd_buffer
), 8,
174 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
175 if (cmd_buffer
== NULL
)
176 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
178 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
179 cmd_buffer
->device
= device
;
181 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
182 if (result
!= VK_SUCCESS
)
185 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
186 &device
->surface_state_block_pool
);
187 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
188 &device
->dynamic_state_block_pool
);
190 cmd_buffer
->level
= pCreateInfo
->level
;
191 cmd_buffer
->opt_flags
= 0;
193 anv_cmd_state_init(&cmd_buffer
->state
);
196 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
198 /* Init the pool_link so we can safefly call list_del when we destroy
201 list_inithead(&cmd_buffer
->pool_link
);
204 *pCmdBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
208 fail
: anv_device_free(device
, cmd_buffer
);
213 void anv_DestroyCommandBuffer(
215 VkCmdBuffer _cmd_buffer
)
217 ANV_FROM_HANDLE(anv_device
, device
, _device
);
218 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, _cmd_buffer
);
220 list_del(&cmd_buffer
->pool_link
);
222 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
224 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
225 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
226 anv_device_free(device
, cmd_buffer
);
229 VkResult
anv_ResetCommandBuffer(
230 VkCmdBuffer cmdBuffer
,
231 VkCmdBufferResetFlags flags
)
233 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
235 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
237 anv_cmd_state_init(&cmd_buffer
->state
);
243 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
245 switch (cmd_buffer
->device
->info
.gen
) {
247 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
249 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
251 unreachable("unsupported gen\n");
255 VkResult
anv_BeginCommandBuffer(
256 VkCmdBuffer cmdBuffer
,
257 const VkCmdBufferBeginInfo
* pBeginInfo
)
259 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
261 cmd_buffer
->opt_flags
= pBeginInfo
->flags
;
263 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
) {
264 cmd_buffer
->state
.framebuffer
=
265 anv_framebuffer_from_handle(pBeginInfo
->framebuffer
);
266 cmd_buffer
->state
.pass
=
267 anv_render_pass_from_handle(pBeginInfo
->renderPass
);
269 struct anv_subpass
*subpass
=
270 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->subpass
];
272 anv_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
275 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
276 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
281 VkResult
anv_EndCommandBuffer(
282 VkCmdBuffer cmdBuffer
)
284 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
285 struct anv_device
*device
= cmd_buffer
->device
;
287 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
289 if (cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
) {
290 /* The algorithm used to compute the validate list is not threadsafe as
291 * it uses the bo->index field. We have to lock the device around it.
292 * Fortunately, the chances for contention here are probably very low.
294 pthread_mutex_lock(&device
->mutex
);
295 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
296 pthread_mutex_unlock(&device
->mutex
);
302 void anv_CmdBindPipeline(
303 VkCmdBuffer cmdBuffer
,
304 VkPipelineBindPoint pipelineBindPoint
,
305 VkPipeline _pipeline
)
307 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
308 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
310 switch (pipelineBindPoint
) {
311 case VK_PIPELINE_BIND_POINT_COMPUTE
:
312 cmd_buffer
->state
.compute_pipeline
= pipeline
;
313 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
314 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
317 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
318 cmd_buffer
->state
.pipeline
= pipeline
;
319 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
320 cmd_buffer
->state
.dirty
|= ANV_CMD_BUFFER_PIPELINE_DIRTY
;
321 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
323 /* Apply the dynamic state from the pipeline */
324 cmd_buffer
->state
.dirty
|= pipeline
->dynamic_state_mask
;
325 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
,
326 &pipeline
->dynamic_state
,
327 pipeline
->dynamic_state_mask
);
331 assert(!"invalid bind point");
336 void anv_CmdSetViewport(
337 VkCmdBuffer cmdBuffer
,
338 uint32_t viewportCount
,
339 const VkViewport
* pViewports
)
341 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
343 cmd_buffer
->state
.dynamic
.viewport
.count
= viewportCount
;
344 memcpy(cmd_buffer
->state
.dynamic
.viewport
.viewports
,
345 pViewports
, viewportCount
* sizeof(*pViewports
));
347 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_VIEWPORT_DIRTY
;
350 void anv_CmdSetScissor(
351 VkCmdBuffer cmdBuffer
,
352 uint32_t scissorCount
,
353 const VkRect2D
* pScissors
)
355 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
357 cmd_buffer
->state
.dynamic
.scissor
.count
= scissorCount
;
358 memcpy(cmd_buffer
->state
.dynamic
.scissor
.scissors
,
359 pScissors
, scissorCount
* sizeof(*pScissors
));
361 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_SCISSOR_DIRTY
;
364 void anv_CmdSetLineWidth(
365 VkCmdBuffer cmdBuffer
,
368 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
370 cmd_buffer
->state
.dynamic
.line_width
= lineWidth
;
372 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_LINE_WIDTH_DIRTY
;
375 void anv_CmdSetDepthBias(
376 VkCmdBuffer cmdBuffer
,
378 float depthBiasClamp
,
379 float slopeScaledDepthBias
)
381 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
383 cmd_buffer
->state
.dynamic
.depth_bias
.bias
= depthBias
;
384 cmd_buffer
->state
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
385 cmd_buffer
->state
.dynamic
.depth_bias
.slope_scaled
= slopeScaledDepthBias
;
387 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_DEPTH_BIAS_DIRTY
;
390 void anv_CmdSetBlendConstants(
391 VkCmdBuffer cmdBuffer
,
392 const float blendConst
[4])
394 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
396 memcpy(cmd_buffer
->state
.dynamic
.blend_constants
,
397 blendConst
, sizeof(float) * 4);
399 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_BLEND_CONSTANTS_DIRTY
;
402 void anv_CmdSetDepthBounds(
403 VkCmdBuffer cmdBuffer
,
404 float minDepthBounds
,
405 float maxDepthBounds
)
407 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
409 cmd_buffer
->state
.dynamic
.depth_bounds
.min
= minDepthBounds
;
410 cmd_buffer
->state
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
412 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_DEPTH_BOUNDS_DIRTY
;
415 void anv_CmdSetStencilCompareMask(
416 VkCmdBuffer cmdBuffer
,
417 VkStencilFaceFlags faceMask
,
418 uint32_t stencilCompareMask
)
420 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
422 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
423 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
= stencilCompareMask
;
424 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
425 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
= stencilCompareMask
;
427 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_STENCIL_COMPARE_MASK_DIRTY
;
430 void anv_CmdSetStencilWriteMask(
431 VkCmdBuffer cmdBuffer
,
432 VkStencilFaceFlags faceMask
,
433 uint32_t stencilWriteMask
)
435 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
437 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
438 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
= stencilWriteMask
;
439 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
440 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
= stencilWriteMask
;
442 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_STENCIL_WRITE_MASK_DIRTY
;
445 void anv_CmdSetStencilReference(
446 VkCmdBuffer cmdBuffer
,
447 VkStencilFaceFlags faceMask
,
448 uint32_t stencilReference
)
450 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
452 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
453 cmd_buffer
->state
.dynamic
.stencil_reference
.front
= stencilReference
;
454 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
455 cmd_buffer
->state
.dynamic
.stencil_reference
.back
= stencilReference
;
457 cmd_buffer
->state
.dirty
|= ANV_DYNAMIC_STENCIL_REFERENCE_DIRTY
;
460 void anv_CmdBindDescriptorSets(
461 VkCmdBuffer cmdBuffer
,
462 VkPipelineBindPoint pipelineBindPoint
,
463 VkPipelineLayout _layout
,
466 const VkDescriptorSet
* pDescriptorSets
,
467 uint32_t dynamicOffsetCount
,
468 const uint32_t* pDynamicOffsets
)
470 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
471 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
472 struct anv_descriptor_set_layout
*set_layout
;
474 assert(firstSet
+ setCount
< MAX_SETS
);
476 uint32_t dynamic_slot
= 0;
477 for (uint32_t i
= 0; i
< setCount
; i
++) {
478 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
479 set_layout
= layout
->set
[firstSet
+ i
].layout
;
481 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
].set
!= set
) {
482 cmd_buffer
->state
.descriptors
[firstSet
+ i
].set
= set
;
483 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
486 if (set_layout
->dynamic_offset_count
> 0) {
488 for_each_bit(s
, set_layout
->shader_stages
) {
489 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
,
492 cmd_buffer
->state
.push_constants
[s
]->dynamic_offsets
+
493 layout
->set
[firstSet
+ i
].dynamic_offset_start
;
495 typed_memcpy(offsets
, pDynamicOffsets
+ dynamic_slot
,
496 set_layout
->dynamic_offset_count
);
499 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
501 dynamic_slot
+= set_layout
->dynamic_offset_count
;
506 void anv_CmdBindVertexBuffers(
507 VkCmdBuffer cmdBuffer
,
508 uint32_t startBinding
,
509 uint32_t bindingCount
,
510 const VkBuffer
* pBuffers
,
511 const VkDeviceSize
* pOffsets
)
513 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
514 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
516 /* We have to defer setting up vertex buffer since we need the buffer
517 * stride from the pipeline. */
519 assert(startBinding
+ bindingCount
< MAX_VBS
);
520 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
521 vb
[startBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
522 vb
[startBinding
+ i
].offset
= pOffsets
[i
];
523 cmd_buffer
->state
.vb_dirty
|= 1 << (startBinding
+ i
);
528 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
529 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
531 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
532 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
533 * the initial state to set the high bits to 0. */
535 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
537 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, cmd_buffer
->device
,
538 state
.offset
+ dword
* 4, bo
, offset
);
542 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
543 VkShaderStage stage
, struct anv_state
*bt_state
)
545 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
546 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
547 struct anv_pipeline_layout
*layout
;
548 uint32_t attachments
, bias
, state_offset
;
550 if (stage
== VK_SHADER_STAGE_COMPUTE
)
551 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
553 layout
= cmd_buffer
->state
.pipeline
->layout
;
555 if (stage
== VK_SHADER_STAGE_FRAGMENT
) {
557 attachments
= subpass
->color_count
;
563 /* This is a little awkward: layout can be NULL but we still have to
564 * allocate and set a binding table for the PS stage for render
566 uint32_t surface_count
= layout
? layout
->stage
[stage
].surface_count
: 0;
568 if (attachments
+ surface_count
== 0)
571 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
572 bias
+ surface_count
,
574 uint32_t *bt_map
= bt_state
->map
;
576 if (bt_state
->map
== NULL
)
577 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
579 /* This is highly annoying. The Vulkan spec puts the depth-stencil
580 * attachments in with the color attachments. Unfortunately, thanks to
581 * other aspects of the API, we cana't really saparate them before this
582 * point. Therefore, we have to walk all of the attachments but only
583 * put the color attachments into the binding table.
585 for (uint32_t a
= 0; a
< attachments
; a
++) {
586 const struct anv_image_view
*iview
=
587 fb
->attachments
[subpass
->color_attachments
[a
]];
589 bt_map
[a
] = iview
->color_rt_surface_state
.offset
+ state_offset
;
590 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
591 iview
->bo
, iview
->offset
);
597 for (uint32_t s
= 0; s
< layout
->stage
[stage
].surface_count
; s
++) {
598 struct anv_pipeline_binding
*binding
=
599 &layout
->stage
[stage
].surface_to_descriptor
[s
];
600 struct anv_descriptor_set
*set
=
601 cmd_buffer
->state
.descriptors
[binding
->set
].set
;
602 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
604 const struct anv_state
*surface_state
;
608 switch (desc
->type
) {
609 case ANV_DESCRIPTOR_TYPE_EMPTY
:
610 case ANV_DESCRIPTOR_TYPE_SAMPLER
:
612 case ANV_DESCRIPTOR_TYPE_BUFFER_VIEW
:
613 surface_state
= &desc
->buffer_view
->surface_state
;
614 bo
= desc
->buffer_view
->bo
;
615 bo_offset
= desc
->buffer_view
->offset
;
617 case ANV_DESCRIPTOR_TYPE_IMAGE_VIEW
:
618 surface_state
= &desc
->image_view
->nonrt_surface_state
;
619 bo
= desc
->image_view
->bo
;
620 bo_offset
= desc
->image_view
->offset
;
624 bt_map
[bias
+ s
] = surface_state
->offset
+ state_offset
;
625 add_surface_state_reloc(cmd_buffer
, *surface_state
, bo
, bo_offset
);
632 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
633 VkShaderStage stage
, struct anv_state
*state
)
635 struct anv_pipeline_layout
*layout
;
636 uint32_t sampler_count
;
638 if (stage
== VK_SHADER_STAGE_COMPUTE
)
639 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
641 layout
= cmd_buffer
->state
.pipeline
->layout
;
643 sampler_count
= layout
? layout
->stage
[stage
].sampler_count
: 0;
644 if (sampler_count
== 0)
647 uint32_t size
= sampler_count
* 16;
648 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
650 if (state
->map
== NULL
)
651 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
653 for (uint32_t s
= 0; s
< layout
->stage
[stage
].sampler_count
; s
++) {
654 struct anv_pipeline_binding
*binding
=
655 &layout
->stage
[stage
].sampler_to_descriptor
[s
];
656 struct anv_descriptor_set
*set
=
657 cmd_buffer
->state
.descriptors
[binding
->set
].set
;
658 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
660 if (desc
->type
!= ANV_DESCRIPTOR_TYPE_SAMPLER
)
663 struct anv_sampler
*sampler
= desc
->sampler
;
665 memcpy(state
->map
+ (s
* 16),
666 sampler
->state
, sizeof(sampler
->state
));
673 flush_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
, VkShaderStage stage
)
675 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
678 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, stage
, &samplers
);
679 if (result
!= VK_SUCCESS
)
681 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, stage
, &surfaces
);
682 if (result
!= VK_SUCCESS
)
685 static const uint32_t sampler_state_opcodes
[] = {
686 [VK_SHADER_STAGE_VERTEX
] = 43,
687 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
688 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
689 [VK_SHADER_STAGE_GEOMETRY
] = 46,
690 [VK_SHADER_STAGE_FRAGMENT
] = 47,
691 [VK_SHADER_STAGE_COMPUTE
] = 0,
694 static const uint32_t binding_table_opcodes
[] = {
695 [VK_SHADER_STAGE_VERTEX
] = 38,
696 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
697 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
698 [VK_SHADER_STAGE_GEOMETRY
] = 41,
699 [VK_SHADER_STAGE_FRAGMENT
] = 42,
700 [VK_SHADER_STAGE_COMPUTE
] = 0,
703 if (samplers
.alloc_size
> 0) {
704 anv_batch_emit(&cmd_buffer
->batch
,
705 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
706 ._3DCommandSubOpcode
= sampler_state_opcodes
[stage
],
707 .PointertoVSSamplerState
= samplers
.offset
);
710 if (surfaces
.alloc_size
> 0) {
711 anv_batch_emit(&cmd_buffer
->batch
,
712 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS
,
713 ._3DCommandSubOpcode
= binding_table_opcodes
[stage
],
714 .PointertoVSBindingTable
= surfaces
.offset
);
721 anv_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
724 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
725 cmd_buffer
->state
.pipeline
->active_stages
;
727 VkResult result
= VK_SUCCESS
;
728 for_each_bit(s
, dirty
) {
729 result
= flush_descriptor_set(cmd_buffer
, s
);
730 if (result
!= VK_SUCCESS
)
734 if (result
!= VK_SUCCESS
) {
735 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
737 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
738 assert(result
== VK_SUCCESS
);
740 /* Re-emit state base addresses so we get the new surface state base
741 * address before we start emitting binding tables etc.
743 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
745 /* Re-emit all active binding tables */
746 for_each_bit(s
, cmd_buffer
->state
.pipeline
->active_stages
) {
747 result
= flush_descriptor_set(cmd_buffer
, s
);
749 /* It had better succeed this time */
750 assert(result
== VK_SUCCESS
);
754 cmd_buffer
->state
.descriptors_dirty
&= ~cmd_buffer
->state
.pipeline
->active_stages
;
758 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
759 uint32_t *a
, uint32_t dwords
, uint32_t alignment
)
761 struct anv_state state
;
763 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
764 dwords
* 4, alignment
);
765 memcpy(state
.map
, a
, dwords
* 4);
767 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, dwords
* 4));
773 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
774 uint32_t *a
, uint32_t *b
,
775 uint32_t dwords
, uint32_t alignment
)
777 struct anv_state state
;
780 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
781 dwords
* 4, alignment
);
783 for (uint32_t i
= 0; i
< dwords
; i
++)
786 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
792 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
793 struct anv_subpass
*subpass
)
795 switch (cmd_buffer
->device
->info
.gen
) {
797 gen7_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
800 gen8_cmd_buffer_begin_subpass(cmd_buffer
, subpass
);
803 unreachable("unsupported gen\n");
808 emit_viewport_state(struct anv_cmd_buffer
*cmd_buffer
,
809 uint32_t count
, const VkViewport
*viewports
)
811 struct anv_state sf_clip_state
=
812 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 64, 64);
813 struct anv_state cc_state
=
814 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
816 for (uint32_t i
= 0; i
< count
; i
++) {
817 const VkViewport
*vp
= &viewports
[i
];
819 /* The gen7 state struct has just the matrix and guardband fields, the
820 * gen8 struct adds the min/max viewport fields. */
821 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
822 .ViewportMatrixElementm00
= vp
->width
/ 2,
823 .ViewportMatrixElementm11
= vp
->height
/ 2,
824 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
825 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
826 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
827 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
828 .XMinClipGuardband
= -1.0f
,
829 .XMaxClipGuardband
= 1.0f
,
830 .YMinClipGuardband
= -1.0f
,
831 .YMaxClipGuardband
= 1.0f
,
832 .XMinViewPort
= vp
->originX
,
833 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
834 .YMinViewPort
= vp
->originY
,
835 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
838 struct GEN7_CC_VIEWPORT cc_viewport
= {
839 .MinimumDepth
= vp
->minDepth
,
840 .MaximumDepth
= vp
->maxDepth
843 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, sf_clip_state
.map
+ i
* 64,
845 GEN7_CC_VIEWPORT_pack(NULL
, cc_state
.map
+ i
* 32, &cc_viewport
);
848 anv_batch_emit(&cmd_buffer
->batch
,
849 GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
850 .CCViewportPointer
= cc_state
.offset
);
851 anv_batch_emit(&cmd_buffer
->batch
,
852 GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
853 .SFClipViewportPointer
= sf_clip_state
.offset
);
857 anv_cmd_buffer_emit_viewport(struct anv_cmd_buffer
*cmd_buffer
)
859 if (cmd_buffer
->state
.dynamic
.viewport
.count
> 0) {
860 emit_viewport_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.viewport
.count
,
861 cmd_buffer
->state
.dynamic
.viewport
.viewports
);
863 /* If viewport count is 0, this is taken to mean "use the default" */
864 emit_viewport_state(cmd_buffer
, 1,
868 .width
= cmd_buffer
->state
.framebuffer
->width
,
869 .height
= cmd_buffer
->state
.framebuffer
->height
,
876 static inline int64_t
877 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
888 emit_scissor_state(struct anv_cmd_buffer
*cmd_buffer
,
889 uint32_t count
, const VkRect2D
*scissors
)
891 struct anv_state scissor_state
=
892 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 32, 32);
894 for (uint32_t i
= 0; i
< count
; i
++) {
895 const VkRect2D
*s
= &scissors
[i
];
897 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
898 * ymax < ymin for empty clips. In case clip x, y, width height are all
899 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
900 * what we want. Just special case empty clips and produce a canonical
902 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
903 .ScissorRectangleYMin
= 1,
904 .ScissorRectangleXMin
= 1,
905 .ScissorRectangleYMax
= 0,
906 .ScissorRectangleXMax
= 0
909 const int max
= 0xffff;
910 struct GEN7_SCISSOR_RECT scissor
= {
911 /* Do this math using int64_t so overflow gets clamped correctly. */
912 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
913 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
914 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
915 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
918 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
919 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 32,
922 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 32, &scissor
);
926 anv_batch_emit(&cmd_buffer
->batch
, GEN8_3DSTATE_SCISSOR_STATE_POINTERS
,
927 .ScissorRectPointer
= scissor_state
.offset
);
931 anv_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
933 if (cmd_buffer
->state
.dynamic
.scissor
.count
> 0) {
934 emit_scissor_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.scissor
.count
,
935 cmd_buffer
->state
.dynamic
.scissor
.scissors
);
937 /* Emit a default scissor based on the currently bound framebuffer */
938 emit_scissor_state(cmd_buffer
, 1,
940 .offset
= { .x
= 0, .y
= 0, },
942 .width
= cmd_buffer
->state
.framebuffer
->width
,
943 .height
= cmd_buffer
->state
.framebuffer
->height
,
949 void anv_CmdSetEvent(
950 VkCmdBuffer cmdBuffer
,
952 VkPipelineStageFlags stageMask
)
957 void anv_CmdResetEvent(
958 VkCmdBuffer cmdBuffer
,
960 VkPipelineStageFlags stageMask
)
965 void anv_CmdWaitEvents(
966 VkCmdBuffer cmdBuffer
,
968 const VkEvent
* pEvents
,
969 VkPipelineStageFlags srcStageMask
,
970 VkPipelineStageFlags destStageMask
,
971 uint32_t memBarrierCount
,
972 const void* const* ppMemBarriers
)
978 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
981 struct anv_push_constants
*data
=
982 cmd_buffer
->state
.push_constants
[stage
];
983 struct brw_stage_prog_data
*prog_data
=
984 cmd_buffer
->state
.pipeline
->prog_data
[stage
];
986 /* If we don't actually have any push constants, bail. */
987 if (data
== NULL
|| prog_data
->nr_params
== 0)
988 return (struct anv_state
) { .offset
= 0 };
990 struct anv_state state
=
991 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
992 prog_data
->nr_params
* sizeof(float),
993 32 /* bottom 5 bits MBZ */);
995 /* Walk through the param array and fill the buffer with data */
996 uint32_t *u32_map
= state
.map
;
997 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
998 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
999 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1005 void anv_CmdPushConstants(
1006 VkCmdBuffer cmdBuffer
,
1007 VkPipelineLayout layout
,
1008 VkShaderStageFlags stageFlags
,
1013 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
1014 VkShaderStage stage
;
1016 for_each_bit(stage
, stageFlags
) {
1017 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
1019 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ start
,
1023 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1026 void anv_CmdExecuteCommands(
1027 VkCmdBuffer cmdBuffer
,
1028 uint32_t cmdBuffersCount
,
1029 const VkCmdBuffer
* pCmdBuffers
)
1031 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, cmdBuffer
);
1033 assert(primary
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
1035 anv_assert(primary
->state
.subpass
== &primary
->state
.pass
->subpasses
[0]);
1037 for (uint32_t i
= 0; i
< cmdBuffersCount
; i
++) {
1038 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
1040 assert(secondary
->level
== VK_CMD_BUFFER_LEVEL_SECONDARY
);
1042 anv_cmd_buffer_add_secondary(primary
, secondary
);
1046 VkResult
anv_CreateCommandPool(
1048 const VkCmdPoolCreateInfo
* pCreateInfo
,
1049 VkCmdPool
* pCmdPool
)
1051 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1052 struct anv_cmd_pool
*pool
;
1054 pool
= anv_device_alloc(device
, sizeof(*pool
), 8,
1055 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1057 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1059 list_inithead(&pool
->cmd_buffers
);
1061 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1066 void anv_DestroyCommandPool(
1070 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1071 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
1073 anv_ResetCommandPool(_device
, cmdPool
, 0);
1075 anv_device_free(device
, pool
);
1078 VkResult
anv_ResetCommandPool(
1081 VkCmdPoolResetFlags flags
)
1083 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, cmdPool
);
1085 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1086 &pool
->cmd_buffers
, pool_link
) {
1087 anv_DestroyCommandBuffer(device
, anv_cmd_buffer_to_handle(cmd_buffer
));
1094 * Return NULL if the current subpass has no depthstencil attachment.
1096 const struct anv_image_view
*
1097 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1099 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1100 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1102 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
1105 const struct anv_image_view
*iview
=
1106 fb
->attachments
[subpass
->depth_stencil_attachment
];
1108 assert(anv_format_is_depth_or_stencil(iview
->format
));