2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
34 /** \file anv_cmd_buffer.c
36 * This file contains all of the stuff for emitting commands into a command
37 * buffer. This includes implementations of most of the vkCmd*
38 * entrypoints. This file is concerned entirely with state emission and
39 * not with the command buffer data structure itself. As far as this file
40 * is concerned, most of anv_cmd_buffer is magic.
43 /* TODO: These are taken from GLES. We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state
= {
57 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
62 .stencil_compare_mask
= {
66 .stencil_write_mask
= {
70 .stencil_reference
= {
77 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
78 const struct anv_dynamic_state
*src
,
81 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
82 dest
->viewport
.count
= src
->viewport
.count
;
83 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
87 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
88 dest
->scissor
.count
= src
->scissor
.count
;
89 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
93 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
94 dest
->line_width
= src
->line_width
;
96 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
97 dest
->depth_bias
= src
->depth_bias
;
99 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
100 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
102 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
103 dest
->depth_bounds
= src
->depth_bounds
;
105 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
106 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
108 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
109 dest
->stencil_write_mask
= src
->stencil_write_mask
;
111 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
112 dest
->stencil_reference
= src
->stencil_reference
;
116 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
118 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
120 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
121 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
122 memset(state
->binding_tables
, 0, sizeof(state
->binding_tables
));
123 memset(state
->samplers
, 0, sizeof(state
->samplers
));
125 /* 0 isn't a valid config. This ensures that we always configure L3$. */
126 cmd_buffer
->state
.current_l3_config
= 0;
130 state
->descriptors_dirty
= 0;
131 state
->push_constants_dirty
= 0;
132 state
->pipeline
= NULL
;
133 state
->push_constant_stages
= 0;
134 state
->restart_index
= UINT32_MAX
;
135 state
->dynamic
= default_dynamic_state
;
136 state
->need_query_wa
= true;
138 if (state
->attachments
!= NULL
) {
139 anv_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
140 state
->attachments
= NULL
;
143 state
->gen7
.index_buffer
= NULL
;
147 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
150 anv_cmd_state_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
151 const VkRenderPassBeginInfo
*info
)
153 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
154 ANV_FROM_HANDLE(anv_render_pass
, pass
, info
->renderPass
);
156 anv_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
158 if (pass
->attachment_count
== 0) {
159 state
->attachments
= NULL
;
163 state
->attachments
= anv_alloc(&cmd_buffer
->pool
->alloc
,
164 pass
->attachment_count
*
165 sizeof(state
->attachments
[0]),
166 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
167 if (state
->attachments
== NULL
) {
168 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
172 for (uint32_t i
= 0; i
< pass
->attachment_count
; ++i
) {
173 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
174 VkImageAspectFlags att_aspects
= vk_format_aspects(att
->format
);
175 VkImageAspectFlags clear_aspects
= 0;
177 if (att_aspects
== VK_IMAGE_ASPECT_COLOR_BIT
) {
178 /* color attachment */
179 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
180 clear_aspects
|= VK_IMAGE_ASPECT_COLOR_BIT
;
183 /* depthstencil attachment */
184 if ((att_aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) &&
185 att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
186 clear_aspects
|= VK_IMAGE_ASPECT_DEPTH_BIT
;
188 if ((att_aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) &&
189 att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
190 clear_aspects
|= VK_IMAGE_ASPECT_STENCIL_BIT
;
194 state
->attachments
[i
].pending_clear_aspects
= clear_aspects
;
196 assert(info
->clearValueCount
> i
);
197 state
->attachments
[i
].clear_value
= info
->pClearValues
[i
];
203 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
204 gl_shader_stage stage
, uint32_t size
)
206 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
209 *ptr
= anv_alloc(&cmd_buffer
->pool
->alloc
, size
, 8,
210 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
212 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
213 } else if ((*ptr
)->size
< size
) {
214 *ptr
= anv_realloc(&cmd_buffer
->pool
->alloc
, *ptr
, size
, 8,
215 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
217 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
224 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
225 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
226 (offsetof(struct anv_push_constants, field) + \
227 sizeof(cmd_buffer->state.push_constants[0]->field)))
229 static VkResult
anv_create_cmd_buffer(
230 struct anv_device
* device
,
231 struct anv_cmd_pool
* pool
,
232 VkCommandBufferLevel level
,
233 VkCommandBuffer
* pCommandBuffer
)
235 struct anv_cmd_buffer
*cmd_buffer
;
238 cmd_buffer
= anv_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
239 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
240 if (cmd_buffer
== NULL
)
241 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
243 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
244 cmd_buffer
->device
= device
;
245 cmd_buffer
->pool
= pool
;
246 cmd_buffer
->level
= level
;
247 cmd_buffer
->state
.attachments
= NULL
;
249 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
250 if (result
!= VK_SUCCESS
)
253 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
254 &device
->surface_state_block_pool
);
255 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
256 &device
->dynamic_state_block_pool
);
259 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
261 /* Init the pool_link so we can safefly call list_del when we destroy
264 list_inithead(&cmd_buffer
->pool_link
);
267 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
272 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
277 VkResult
anv_AllocateCommandBuffers(
279 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
280 VkCommandBuffer
* pCommandBuffers
)
282 ANV_FROM_HANDLE(anv_device
, device
, _device
);
283 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
285 VkResult result
= VK_SUCCESS
;
288 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
289 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
290 &pCommandBuffers
[i
]);
291 if (result
!= VK_SUCCESS
)
295 if (result
!= VK_SUCCESS
)
296 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
303 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
305 list_del(&cmd_buffer
->pool_link
);
307 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
309 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
310 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
312 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->state
.attachments
);
313 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
316 void anv_FreeCommandBuffers(
318 VkCommandPool commandPool
,
319 uint32_t commandBufferCount
,
320 const VkCommandBuffer
* pCommandBuffers
)
322 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
323 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
325 anv_cmd_buffer_destroy(cmd_buffer
);
329 VkResult
anv_ResetCommandBuffer(
330 VkCommandBuffer commandBuffer
,
331 VkCommandBufferResetFlags flags
)
333 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
335 cmd_buffer
->usage_flags
= 0;
336 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
337 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
338 anv_cmd_state_reset(cmd_buffer
);
340 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
341 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
342 &cmd_buffer
->device
->surface_state_block_pool
);
344 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
345 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
346 &cmd_buffer
->device
->dynamic_state_block_pool
);
352 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
354 switch (cmd_buffer
->device
->info
.gen
) {
356 if (cmd_buffer
->device
->info
.is_haswell
)
357 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
359 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
361 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
363 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer
);
365 unreachable("unsupported gen\n");
369 VkResult
anv_BeginCommandBuffer(
370 VkCommandBuffer commandBuffer
,
371 const VkCommandBufferBeginInfo
* pBeginInfo
)
373 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
375 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
376 * command buffer's state. Otherwise, we must *reset* its state. In both
379 * From the Vulkan 1.0 spec:
381 * If a command buffer is in the executable state and the command buffer
382 * was allocated from a command pool with the
383 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
384 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
385 * as if vkResetCommandBuffer had been called with
386 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
387 * the command buffer in the recording state.
389 anv_ResetCommandBuffer(commandBuffer
, /*flags*/ 0);
391 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
393 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
||
394 !(cmd_buffer
->usage_flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
));
396 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
398 if (cmd_buffer
->usage_flags
&
399 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
400 cmd_buffer
->state
.framebuffer
=
401 anv_framebuffer_from_handle(pBeginInfo
->pInheritanceInfo
->framebuffer
);
402 cmd_buffer
->state
.pass
=
403 anv_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
405 struct anv_subpass
*subpass
=
406 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
408 anv_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
414 VkResult
anv_EndCommandBuffer(
415 VkCommandBuffer commandBuffer
)
417 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
418 struct anv_device
*device
= cmd_buffer
->device
;
420 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
422 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
423 /* The algorithm used to compute the validate list is not threadsafe as
424 * it uses the bo->index field. We have to lock the device around it.
425 * Fortunately, the chances for contention here are probably very low.
427 pthread_mutex_lock(&device
->mutex
);
428 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
429 pthread_mutex_unlock(&device
->mutex
);
435 void anv_CmdBindPipeline(
436 VkCommandBuffer commandBuffer
,
437 VkPipelineBindPoint pipelineBindPoint
,
438 VkPipeline _pipeline
)
440 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
441 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
443 switch (pipelineBindPoint
) {
444 case VK_PIPELINE_BIND_POINT_COMPUTE
:
445 cmd_buffer
->state
.compute_pipeline
= pipeline
;
446 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_DIRTY_PIPELINE
;
447 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
448 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
451 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
452 cmd_buffer
->state
.pipeline
= pipeline
;
453 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
454 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
455 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
456 cmd_buffer
->state
.descriptors_dirty
|= pipeline
->active_stages
;
458 /* Apply the dynamic state from the pipeline */
459 cmd_buffer
->state
.dirty
|= pipeline
->dynamic_state_mask
;
460 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
,
461 &pipeline
->dynamic_state
,
462 pipeline
->dynamic_state_mask
);
466 assert(!"invalid bind point");
471 void anv_CmdSetViewport(
472 VkCommandBuffer commandBuffer
,
473 uint32_t firstViewport
,
474 uint32_t viewportCount
,
475 const VkViewport
* pViewports
)
477 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
479 const uint32_t total_count
= firstViewport
+ viewportCount
;
480 if (cmd_buffer
->state
.dynamic
.viewport
.count
< total_count
)
481 cmd_buffer
->state
.dynamic
.viewport
.count
= total_count
;
483 memcpy(cmd_buffer
->state
.dynamic
.viewport
.viewports
+ firstViewport
,
484 pViewports
, viewportCount
* sizeof(*pViewports
));
486 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
489 void anv_CmdSetScissor(
490 VkCommandBuffer commandBuffer
,
491 uint32_t firstScissor
,
492 uint32_t scissorCount
,
493 const VkRect2D
* pScissors
)
495 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
497 const uint32_t total_count
= firstScissor
+ scissorCount
;
498 if (cmd_buffer
->state
.dynamic
.scissor
.count
< total_count
)
499 cmd_buffer
->state
.dynamic
.scissor
.count
= total_count
;
501 memcpy(cmd_buffer
->state
.dynamic
.scissor
.scissors
+ firstScissor
,
502 pScissors
, scissorCount
* sizeof(*pScissors
));
504 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
507 void anv_CmdSetLineWidth(
508 VkCommandBuffer commandBuffer
,
511 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
513 cmd_buffer
->state
.dynamic
.line_width
= lineWidth
;
514 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
517 void anv_CmdSetDepthBias(
518 VkCommandBuffer commandBuffer
,
519 float depthBiasConstantFactor
,
520 float depthBiasClamp
,
521 float depthBiasSlopeFactor
)
523 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
525 cmd_buffer
->state
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
526 cmd_buffer
->state
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
527 cmd_buffer
->state
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
529 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
532 void anv_CmdSetBlendConstants(
533 VkCommandBuffer commandBuffer
,
534 const float blendConstants
[4])
536 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
538 memcpy(cmd_buffer
->state
.dynamic
.blend_constants
,
539 blendConstants
, sizeof(float) * 4);
541 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
544 void anv_CmdSetDepthBounds(
545 VkCommandBuffer commandBuffer
,
546 float minDepthBounds
,
547 float maxDepthBounds
)
549 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
551 cmd_buffer
->state
.dynamic
.depth_bounds
.min
= minDepthBounds
;
552 cmd_buffer
->state
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
554 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
557 void anv_CmdSetStencilCompareMask(
558 VkCommandBuffer commandBuffer
,
559 VkStencilFaceFlags faceMask
,
560 uint32_t compareMask
)
562 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
564 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
565 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
566 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
567 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
569 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
572 void anv_CmdSetStencilWriteMask(
573 VkCommandBuffer commandBuffer
,
574 VkStencilFaceFlags faceMask
,
577 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
579 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
580 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
581 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
582 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
584 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
587 void anv_CmdSetStencilReference(
588 VkCommandBuffer commandBuffer
,
589 VkStencilFaceFlags faceMask
,
592 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
594 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
595 cmd_buffer
->state
.dynamic
.stencil_reference
.front
= reference
;
596 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
597 cmd_buffer
->state
.dynamic
.stencil_reference
.back
= reference
;
599 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
602 void anv_CmdBindDescriptorSets(
603 VkCommandBuffer commandBuffer
,
604 VkPipelineBindPoint pipelineBindPoint
,
605 VkPipelineLayout _layout
,
607 uint32_t descriptorSetCount
,
608 const VkDescriptorSet
* pDescriptorSets
,
609 uint32_t dynamicOffsetCount
,
610 const uint32_t* pDynamicOffsets
)
612 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
613 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
614 struct anv_descriptor_set_layout
*set_layout
;
616 assert(firstSet
+ descriptorSetCount
< MAX_SETS
);
618 uint32_t dynamic_slot
= 0;
619 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
620 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
621 set_layout
= layout
->set
[firstSet
+ i
].layout
;
623 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
] != set
) {
624 cmd_buffer
->state
.descriptors
[firstSet
+ i
] = set
;
625 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
628 if (set_layout
->dynamic_offset_count
> 0) {
629 anv_foreach_stage(s
, set_layout
->shader_stages
) {
630 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
, dynamic
);
632 struct anv_push_constants
*push
=
633 cmd_buffer
->state
.push_constants
[s
];
635 unsigned d
= layout
->set
[firstSet
+ i
].dynamic_offset_start
;
636 const uint32_t *offsets
= pDynamicOffsets
+ dynamic_slot
;
637 struct anv_descriptor
*desc
= set
->descriptors
;
639 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
640 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
643 unsigned array_size
= set_layout
->binding
[b
].array_size
;
644 for (unsigned j
= 0; j
< array_size
; j
++) {
646 if (desc
->buffer_view
)
647 range
= desc
->buffer_view
->range
;
648 push
->dynamic
[d
].offset
= *(offsets
++);
649 push
->dynamic
[d
].range
= range
;
655 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
660 void anv_CmdBindVertexBuffers(
661 VkCommandBuffer commandBuffer
,
662 uint32_t firstBinding
,
663 uint32_t bindingCount
,
664 const VkBuffer
* pBuffers
,
665 const VkDeviceSize
* pOffsets
)
667 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
668 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
670 /* We have to defer setting up vertex buffer since we need the buffer
671 * stride from the pipeline. */
673 assert(firstBinding
+ bindingCount
< MAX_VBS
);
674 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
675 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
676 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
677 cmd_buffer
->state
.vb_dirty
|= 1 << (firstBinding
+ i
);
682 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
683 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
685 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
686 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
687 * the initial state to set the high bits to 0. */
689 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
691 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
692 state
.offset
+ dword
* 4, bo
, offset
);
696 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
699 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
700 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
701 return ISL_FORMAT_R32G32B32A32_FLOAT
;
703 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
704 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
705 return ISL_FORMAT_RAW
;
708 unreachable("Invalid descriptor type");
712 static struct anv_state
713 anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer
*cmd_buffer
,
714 struct anv_framebuffer
*fb
)
716 switch (cmd_buffer
->device
->info
.gen
) {
718 if (cmd_buffer
->device
->info
.is_haswell
) {
719 return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
721 return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
724 return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
726 return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
728 unreachable("Invalid hardware generation");
733 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
734 gl_shader_stage stage
,
735 struct anv_state
*bt_state
)
737 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
738 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
739 struct anv_pipeline_bind_map
*map
;
740 uint32_t bias
, state_offset
;
743 case MESA_SHADER_COMPUTE
:
744 map
= &cmd_buffer
->state
.compute_pipeline
->bindings
[stage
];
748 map
= &cmd_buffer
->state
.pipeline
->bindings
[stage
];
753 if (bias
+ map
->surface_count
== 0) {
754 *bt_state
= (struct anv_state
) { 0, };
758 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
759 bias
+ map
->surface_count
,
761 uint32_t *bt_map
= bt_state
->map
;
763 if (bt_state
->map
== NULL
)
764 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
766 if (stage
== MESA_SHADER_COMPUTE
&&
767 get_cs_prog_data(cmd_buffer
->state
.compute_pipeline
)->uses_num_work_groups
) {
768 struct anv_bo
*bo
= cmd_buffer
->state
.num_workgroups_bo
;
769 uint32_t bo_offset
= cmd_buffer
->state
.num_workgroups_offset
;
771 struct anv_state surface_state
;
773 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
775 const enum isl_format format
=
776 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
);
777 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
,
778 format
, bo_offset
, 12, 1);
780 bt_map
[0] = surface_state
.offset
+ state_offset
;
781 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
784 if (map
->surface_count
== 0)
787 if (map
->image_count
> 0) {
789 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, images
);
790 if (result
!= VK_SUCCESS
)
793 cmd_buffer
->state
.push_constants_dirty
|= 1 << stage
;
797 for (uint32_t s
= 0; s
< map
->surface_count
; s
++) {
798 struct anv_pipeline_binding
*binding
= &map
->surface_to_descriptor
[s
];
800 struct anv_state surface_state
;
804 if (binding
->set
== ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
) {
805 /* Color attachment binding */
806 assert(stage
== MESA_SHADER_FRAGMENT
);
807 if (binding
->offset
< subpass
->color_count
) {
808 const struct anv_image_view
*iview
=
809 fb
->attachments
[subpass
->color_attachments
[binding
->offset
]];
811 assert(iview
->color_rt_surface_state
.alloc_size
);
812 surface_state
= iview
->color_rt_surface_state
;
813 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
814 iview
->bo
, iview
->offset
);
816 /* Null render target */
817 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
819 anv_cmd_buffer_alloc_null_surface_state(cmd_buffer
, fb
);
822 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
826 struct anv_descriptor_set
*set
=
827 cmd_buffer
->state
.descriptors
[binding
->set
];
828 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
830 switch (desc
->type
) {
831 case VK_DESCRIPTOR_TYPE_SAMPLER
:
832 /* Nothing for us to do here */
835 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
836 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
837 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
838 surface_state
= desc
->image_view
->sampler_surface_state
;
839 assert(surface_state
.alloc_size
);
840 bo
= desc
->image_view
->bo
;
841 bo_offset
= desc
->image_view
->offset
;
844 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
845 surface_state
= desc
->image_view
->storage_surface_state
;
846 assert(surface_state
.alloc_size
);
847 bo
= desc
->image_view
->bo
;
848 bo_offset
= desc
->image_view
->offset
;
850 struct brw_image_param
*image_param
=
851 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
853 *image_param
= desc
->image_view
->storage_image_param
;
854 image_param
->surface_idx
= bias
+ s
;
858 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
859 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
860 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
861 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
862 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
863 surface_state
= desc
->buffer_view
->surface_state
;
864 assert(surface_state
.alloc_size
);
865 bo
= desc
->buffer_view
->bo
;
866 bo_offset
= desc
->buffer_view
->offset
;
869 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
870 surface_state
= desc
->buffer_view
->storage_surface_state
;
871 assert(surface_state
.alloc_size
);
872 bo
= desc
->buffer_view
->bo
;
873 bo_offset
= desc
->buffer_view
->offset
;
875 struct brw_image_param
*image_param
=
876 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
878 *image_param
= desc
->buffer_view
->storage_image_param
;
879 image_param
->surface_idx
= bias
+ s
;
883 assert(!"Invalid descriptor type");
887 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
888 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
890 assert(image
== map
->image_count
);
893 if (!cmd_buffer
->device
->info
.has_llc
)
894 anv_state_clflush(*bt_state
);
900 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
901 gl_shader_stage stage
, struct anv_state
*state
)
903 struct anv_pipeline_bind_map
*map
;
905 if (stage
== MESA_SHADER_COMPUTE
)
906 map
= &cmd_buffer
->state
.compute_pipeline
->bindings
[stage
];
908 map
= &cmd_buffer
->state
.pipeline
->bindings
[stage
];
910 if (map
->sampler_count
== 0) {
911 *state
= (struct anv_state
) { 0, };
915 uint32_t size
= map
->sampler_count
* 16;
916 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
918 if (state
->map
== NULL
)
919 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
921 for (uint32_t s
= 0; s
< map
->sampler_count
; s
++) {
922 struct anv_pipeline_binding
*binding
= &map
->sampler_to_descriptor
[s
];
923 struct anv_descriptor_set
*set
=
924 cmd_buffer
->state
.descriptors
[binding
->set
];
925 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
927 if (desc
->type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
928 desc
->type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
931 struct anv_sampler
*sampler
= desc
->sampler
;
933 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
934 * happens to be zero.
939 memcpy(state
->map
+ (s
* 16),
940 sampler
->state
, sizeof(sampler
->state
));
943 if (!cmd_buffer
->device
->info
.has_llc
)
944 anv_state_clflush(*state
);
950 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
951 const void *data
, uint32_t size
, uint32_t alignment
)
953 struct anv_state state
;
955 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
956 memcpy(state
.map
, data
, size
);
958 if (!cmd_buffer
->device
->info
.has_llc
)
959 anv_state_clflush(state
);
961 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
967 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
968 uint32_t *a
, uint32_t *b
,
969 uint32_t dwords
, uint32_t alignment
)
971 struct anv_state state
;
974 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
975 dwords
* 4, alignment
);
977 for (uint32_t i
= 0; i
< dwords
; i
++)
980 if (!cmd_buffer
->device
->info
.has_llc
)
981 anv_state_clflush(state
);
983 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
989 * @brief Setup the command buffer for recording commands inside the given
992 * This does not record all commands needed for starting the subpass.
993 * Starting the subpass may require additional commands.
995 * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
996 * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
997 * command buffer for recording commands for some subpass. But only the first
998 * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
1001 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer
*cmd_buffer
,
1002 struct anv_subpass
*subpass
)
1004 switch (cmd_buffer
->device
->info
.gen
) {
1006 if (cmd_buffer
->device
->info
.is_haswell
) {
1007 gen75_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
1009 gen7_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
1013 gen8_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
1016 gen9_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
1019 unreachable("unsupported gen\n");
1024 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
1025 gl_shader_stage stage
)
1027 struct anv_push_constants
*data
=
1028 cmd_buffer
->state
.push_constants
[stage
];
1029 const struct brw_stage_prog_data
*prog_data
=
1030 cmd_buffer
->state
.pipeline
->prog_data
[stage
];
1032 /* If we don't actually have any push constants, bail. */
1033 if (data
== NULL
|| prog_data
->nr_params
== 0)
1034 return (struct anv_state
) { .offset
= 0 };
1036 struct anv_state state
=
1037 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1038 prog_data
->nr_params
* sizeof(float),
1039 32 /* bottom 5 bits MBZ */);
1041 /* Walk through the param array and fill the buffer with data */
1042 uint32_t *u32_map
= state
.map
;
1043 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
1044 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1045 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1048 if (!cmd_buffer
->device
->info
.has_llc
)
1049 anv_state_clflush(state
);
1055 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
1057 struct anv_push_constants
*data
=
1058 cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
1059 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1060 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
1061 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1063 const unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
1064 const unsigned push_constant_data_size
=
1065 (local_id_dwords
+ prog_data
->nr_params
) * 4;
1066 const unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
1067 const unsigned param_aligned_count
=
1068 reg_aligned_constant_size
/ sizeof(uint32_t);
1070 /* If we don't actually have any push constants, bail. */
1071 if (reg_aligned_constant_size
== 0)
1072 return (struct anv_state
) { .offset
= 0 };
1074 const unsigned threads
= pipeline
->cs_thread_width_max
;
1075 const unsigned total_push_constants_size
=
1076 reg_aligned_constant_size
* threads
;
1077 const unsigned push_constant_alignment
=
1078 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
1079 const unsigned aligned_total_push_constants_size
=
1080 ALIGN(total_push_constants_size
, push_constant_alignment
);
1081 struct anv_state state
=
1082 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1083 aligned_total_push_constants_size
,
1084 push_constant_alignment
);
1086 /* Walk through the param array and fill the buffer with data */
1087 uint32_t *u32_map
= state
.map
;
1089 brw_cs_fill_local_id_payload(cs_prog_data
, u32_map
, threads
,
1090 reg_aligned_constant_size
);
1092 /* Setup uniform data for the first thread */
1093 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
1094 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1095 u32_map
[local_id_dwords
+ i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1098 /* Copy uniform data from the first thread to every other thread */
1099 const size_t uniform_data_size
= prog_data
->nr_params
* sizeof(uint32_t);
1100 for (unsigned t
= 1; t
< threads
; t
++) {
1101 memcpy(&u32_map
[t
* param_aligned_count
+ local_id_dwords
],
1102 &u32_map
[local_id_dwords
],
1106 if (!cmd_buffer
->device
->info
.has_llc
)
1107 anv_state_clflush(state
);
1112 void anv_CmdPushConstants(
1113 VkCommandBuffer commandBuffer
,
1114 VkPipelineLayout layout
,
1115 VkShaderStageFlags stageFlags
,
1118 const void* pValues
)
1120 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1122 anv_foreach_stage(stage
, stageFlags
) {
1123 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
1125 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ offset
,
1129 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1132 void anv_CmdExecuteCommands(
1133 VkCommandBuffer commandBuffer
,
1134 uint32_t commandBufferCount
,
1135 const VkCommandBuffer
* pCmdBuffers
)
1137 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, commandBuffer
);
1139 assert(primary
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
1141 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1142 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
1144 assert(secondary
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
1146 anv_cmd_buffer_add_secondary(primary
, secondary
);
1150 VkResult
anv_CreateCommandPool(
1152 const VkCommandPoolCreateInfo
* pCreateInfo
,
1153 const VkAllocationCallbacks
* pAllocator
,
1154 VkCommandPool
* pCmdPool
)
1156 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1157 struct anv_cmd_pool
*pool
;
1159 pool
= anv_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
1160 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1162 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1165 pool
->alloc
= *pAllocator
;
1167 pool
->alloc
= device
->alloc
;
1169 list_inithead(&pool
->cmd_buffers
);
1171 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1176 void anv_DestroyCommandPool(
1178 VkCommandPool commandPool
,
1179 const VkAllocationCallbacks
* pAllocator
)
1181 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1182 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1184 anv_ResetCommandPool(_device
, commandPool
, 0);
1186 anv_free2(&device
->alloc
, pAllocator
, pool
);
1189 VkResult
anv_ResetCommandPool(
1191 VkCommandPool commandPool
,
1192 VkCommandPoolResetFlags flags
)
1194 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1196 /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1197 * Vulkan 1.0 spec requires that it only reset them:
1199 * Resetting a command pool recycles all of the resources from all of
1200 * the command buffers allocated from the command pool back to the
1201 * command pool. All command buffers that have been allocated from the
1202 * command pool are put in the initial state.
1204 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1205 &pool
->cmd_buffers
, pool_link
) {
1206 anv_cmd_buffer_destroy(cmd_buffer
);
1213 * Return NULL if the current subpass has no depthstencil attachment.
1215 const struct anv_image_view
*
1216 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1218 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1219 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1221 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
1224 const struct anv_image_view
*iview
=
1225 fb
->attachments
[subpass
->depth_stencil_attachment
];
1227 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
1228 VK_IMAGE_ASPECT_STENCIL_BIT
));