2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "anv_private.h"
29 #include "nir/nir_builder.h"
32 * Vertex attributes used by all pipelines.
35 struct anv_vue_header vue_header
;
36 float position
[2]; /**< 3DPRIM_RECTLIST */
37 float tex_position
[2];
41 meta_resolve_save(struct anv_meta_saved_state
*saved_state
,
42 struct anv_cmd_buffer
*cmd_buffer
)
44 anv_meta_save(saved_state
, cmd_buffer
,
45 (1 << VK_DYNAMIC_STATE_VIEWPORT
) |
46 (1 << VK_DYNAMIC_STATE_SCISSOR
));
48 cmd_buffer
->state
.dynamic
.viewport
.count
= 0;
49 cmd_buffer
->state
.dynamic
.scissor
.count
= 0;
53 meta_resolve_restore(struct anv_meta_saved_state
*saved_state
,
54 struct anv_cmd_buffer
*cmd_buffer
)
56 anv_meta_restore(saved_state
, cmd_buffer
);
60 get_pipeline_h(struct anv_device
*device
, uint32_t samples
)
62 uint32_t i
= ffs(samples
) - 2; /* log2(samples) - 1 */
65 assert(i
< ARRAY_SIZE(device
->meta_state
.resolve
.pipelines
));
67 return &device
->meta_state
.resolve
.pipelines
[i
];
73 const struct glsl_type
*vec4
= glsl_vec4_type();
76 nir_variable
*a_position
;
77 nir_variable
*v_position
;
78 nir_variable
*a_tex_position
;
79 nir_variable
*v_tex_position
;
81 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_VERTEX
, NULL
);
82 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_resolve_vs");
84 a_position
= nir_variable_create(b
.shader
, nir_var_shader_in
, vec4
,
86 a_position
->data
.location
= VERT_ATTRIB_GENERIC0
;
88 v_position
= nir_variable_create(b
.shader
, nir_var_shader_out
, vec4
,
90 v_position
->data
.location
= VARYING_SLOT_POS
;
92 a_tex_position
= nir_variable_create(b
.shader
, nir_var_shader_in
, vec4
,
94 a_tex_position
->data
.location
= VERT_ATTRIB_GENERIC1
;
96 v_tex_position
= nir_variable_create(b
.shader
, nir_var_shader_out
, vec4
,
98 v_tex_position
->data
.location
= VARYING_SLOT_VAR0
;
100 nir_copy_var(&b
, v_position
, a_position
);
101 nir_copy_var(&b
, v_tex_position
, a_tex_position
);
107 build_nir_fs(uint32_t num_samples
)
109 const struct glsl_type
*vec4
= glsl_vec4_type();
111 const struct glsl_type
*sampler2DMS
=
112 glsl_sampler_type(GLSL_SAMPLER_DIM_MS
,
118 nir_variable
*u_tex
; /* uniform sampler */
119 nir_variable
*v_position
; /* vec4, varying fragment position */
120 nir_variable
*v_tex_position
; /* vec4, varying texture coordinate */
121 nir_variable
*f_color
; /* vec4, fragment output color */
122 nir_ssa_def
*accum
; /* vec4, accumulation of sample values */
124 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
125 b
.shader
->info
.name
= ralloc_asprintf(b
.shader
,
126 "meta_resolve_fs_samples%02d",
129 u_tex
= nir_variable_create(b
.shader
, nir_var_uniform
, sampler2DMS
,
131 u_tex
->data
.descriptor_set
= 0;
132 u_tex
->data
.binding
= 0;
134 v_position
= nir_variable_create(b
.shader
, nir_var_shader_in
, vec4
,
136 v_position
->data
.location
= VARYING_SLOT_POS
;
137 v_position
->data
.origin_upper_left
= true;
139 v_tex_position
= nir_variable_create(b
.shader
, nir_var_shader_in
, vec4
,
141 v_tex_position
->data
.location
= VARYING_SLOT_VAR0
;
143 f_color
= nir_variable_create(b
.shader
, nir_var_shader_out
, vec4
,
145 f_color
->data
.location
= FRAG_RESULT_DATA0
;
147 accum
= nir_imm_vec4(&b
, 0, 0, 0, 0);
149 nir_ssa_def
*tex_position_ivec
=
150 nir_f2i(&b
, nir_load_var(&b
, v_tex_position
));
152 for (uint32_t i
= 0; i
< num_samples
; ++i
) {
155 tex
= nir_tex_instr_create(b
.shader
, /*num_srcs*/ 2);
156 tex
->texture
= nir_deref_var_create(tex
, u_tex
);
157 tex
->sampler
= nir_deref_var_create(tex
, u_tex
);
158 tex
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
159 tex
->op
= nir_texop_txf_ms
;
160 tex
->src
[0].src
= nir_src_for_ssa(tex_position_ivec
);
161 tex
->src
[0].src_type
= nir_tex_src_coord
;
162 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(&b
, i
));
163 tex
->src
[1].src_type
= nir_tex_src_ms_index
;
164 tex
->dest_type
= nir_type_float
;
165 tex
->is_array
= false;
166 tex
->coord_components
= 3;
167 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
168 nir_builder_instr_insert(&b
, &tex
->instr
);
170 accum
= nir_fadd(&b
, accum
, &tex
->dest
.ssa
);
173 accum
= nir_fdiv(&b
, accum
, nir_imm_float(&b
, num_samples
));
174 nir_store_var(&b
, f_color
, accum
, /*writemask*/ 4);
180 create_pass(struct anv_device
*device
)
183 VkDevice device_h
= anv_device_to_handle(device
);
184 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
186 result
= anv_CreateRenderPass(device_h
,
187 &(VkRenderPassCreateInfo
) {
188 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
189 .attachmentCount
= 1,
190 .pAttachments
= &(VkAttachmentDescription
) {
191 .format
= VK_FORMAT_UNDEFINED
, /* Our shaders don't care */
193 .loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
,
194 .storeOp
= VK_ATTACHMENT_STORE_OP_STORE
,
195 .initialLayout
= VK_IMAGE_LAYOUT_GENERAL
,
196 .finalLayout
= VK_IMAGE_LAYOUT_GENERAL
,
199 .pSubpasses
= &(VkSubpassDescription
) {
200 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
201 .inputAttachmentCount
= 0,
202 .colorAttachmentCount
= 1,
203 .pColorAttachments
= &(VkAttachmentReference
) {
205 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
207 .pResolveAttachments
= NULL
,
208 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
209 .attachment
= VK_ATTACHMENT_UNUSED
,
211 .preserveAttachmentCount
= 0,
212 .pPreserveAttachments
= NULL
,
214 .dependencyCount
= 0,
217 &device
->meta_state
.resolve
.pass
);
223 create_pipeline(struct anv_device
*device
,
224 uint32_t num_samples
,
225 VkShaderModule vs_module_h
)
228 VkDevice device_h
= anv_device_to_handle(device
);
230 struct anv_shader_module fs_module
= {
231 .nir
= build_nir_fs(num_samples
),
234 if (!fs_module
.nir
) {
235 /* XXX: Need more accurate error */
236 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
240 result
= anv_graphics_pipeline_create(device_h
,
242 &(VkGraphicsPipelineCreateInfo
) {
243 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
245 .pStages
= (VkPipelineShaderStageCreateInfo
[]) {
247 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
248 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
249 .module
= vs_module_h
,
253 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
254 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
255 .module
= anv_shader_module_to_handle(&fs_module
),
259 .pVertexInputState
= &(VkPipelineVertexInputStateCreateInfo
) {
260 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
261 .vertexBindingDescriptionCount
= 1,
262 .pVertexBindingDescriptions
= (VkVertexInputBindingDescription
[]) {
265 .stride
= sizeof(struct vertex_attrs
),
266 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
269 .vertexAttributeDescriptionCount
= 3,
270 .pVertexAttributeDescriptions
= (VkVertexInputAttributeDescription
[]) {
275 .format
= VK_FORMAT_R32G32B32A32_UINT
,
276 .offset
= offsetof(struct vertex_attrs
, vue_header
),
282 .format
= VK_FORMAT_R32G32_SFLOAT
,
283 .offset
= offsetof(struct vertex_attrs
, position
),
286 /* Texture Coordinate */
289 .format
= VK_FORMAT_R32G32_SFLOAT
,
290 .offset
= offsetof(struct vertex_attrs
, tex_position
),
294 .pInputAssemblyState
= &(VkPipelineInputAssemblyStateCreateInfo
) {
295 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
296 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
297 .primitiveRestartEnable
= false,
299 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
300 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
304 .pRasterizationState
= &(VkPipelineRasterizationStateCreateInfo
) {
305 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
306 .depthClampEnable
= false,
307 .rasterizerDiscardEnable
= false,
308 .polygonMode
= VK_POLYGON_MODE_FILL
,
309 .cullMode
= VK_CULL_MODE_NONE
,
310 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
312 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
313 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
314 .rasterizationSamples
= 1,
315 .sampleShadingEnable
= false,
316 .pSampleMask
= (VkSampleMask
[]) { 0x1 },
317 .alphaToCoverageEnable
= false,
318 .alphaToOneEnable
= false,
320 .pColorBlendState
= &(VkPipelineColorBlendStateCreateInfo
) {
321 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
322 .logicOpEnable
= false,
323 .attachmentCount
= 1,
324 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
326 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
327 VK_COLOR_COMPONENT_G_BIT
|
328 VK_COLOR_COMPONENT_B_BIT
|
329 VK_COLOR_COMPONENT_A_BIT
,
333 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
334 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
335 .dynamicStateCount
= 2,
336 .pDynamicStates
= (VkDynamicState
[]) {
337 VK_DYNAMIC_STATE_VIEWPORT
,
338 VK_DYNAMIC_STATE_SCISSOR
,
341 .layout
= device
->meta_state
.resolve
.pipeline_layout
,
342 .renderPass
= device
->meta_state
.resolve
.pass
,
345 &(struct anv_graphics_pipeline_create_info
) {
346 .color_attachment_count
= -1,
347 .use_repclear
= false,
348 .disable_viewport
= true,
349 .disable_scissor
= true,
353 &device
->meta_state
.alloc
,
354 get_pipeline_h(device
, num_samples
));
355 if (result
!= VK_SUCCESS
)
361 ralloc_free(fs_module
.nir
);
366 anv_device_finish_meta_resolve_state(struct anv_device
*device
)
368 struct anv_meta_state
*state
= &device
->meta_state
;
369 VkDevice device_h
= anv_device_to_handle(device
);
370 VkRenderPass pass_h
= device
->meta_state
.resolve
.pass
;
371 VkPipelineLayout pipeline_layout_h
= device
->meta_state
.resolve
.pipeline_layout
;
372 VkDescriptorSetLayout ds_layout_h
= device
->meta_state
.resolve
.ds_layout
;
373 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
376 ANV_CALL(DestroyRenderPass
)(device_h
, pass_h
,
377 &device
->meta_state
.alloc
);
379 if (pipeline_layout_h
)
380 ANV_CALL(DestroyPipelineLayout
)(device_h
, pipeline_layout_h
, alloc
);
383 ANV_CALL(DestroyDescriptorSetLayout
)(device_h
, ds_layout_h
, alloc
);
385 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->resolve
.pipelines
); ++i
) {
386 VkPipeline pipeline_h
= state
->resolve
.pipelines
[i
];
389 ANV_CALL(DestroyPipeline
)(device_h
, pipeline_h
, alloc
);
395 anv_device_init_meta_resolve_state(struct anv_device
*device
)
397 VkResult res
= VK_SUCCESS
;
398 VkDevice device_h
= anv_device_to_handle(device
);
399 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
401 const isl_sample_count_mask_t sample_count_mask
=
402 isl_device_get_sample_counts(&device
->isl_dev
);
404 zero(device
->meta_state
.resolve
);
406 struct anv_shader_module vs_module
= { .nir
= build_nir_vs() };
407 if (!vs_module
.nir
) {
408 /* XXX: Need more accurate error */
409 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
413 VkShaderModule vs_module_h
= anv_shader_module_to_handle(&vs_module
);
415 res
= anv_CreateDescriptorSetLayout(device_h
,
416 &(VkDescriptorSetLayoutCreateInfo
) {
417 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
419 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
422 .descriptorType
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
423 .descriptorCount
= 1,
424 .stageFlags
= VK_SHADER_STAGE_FRAGMENT_BIT
,
429 &device
->meta_state
.resolve
.ds_layout
);
430 if (res
!= VK_SUCCESS
)
433 res
= anv_CreatePipelineLayout(device_h
,
434 &(VkPipelineLayoutCreateInfo
) {
435 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
437 .pSetLayouts
= (VkDescriptorSetLayout
[]) {
438 device
->meta_state
.resolve
.ds_layout
,
442 &device
->meta_state
.resolve
.pipeline_layout
);
443 if (res
!= VK_SUCCESS
)
446 res
= create_pass(device
);
447 if (res
!= VK_SUCCESS
)
451 i
< ARRAY_SIZE(device
->meta_state
.resolve
.pipelines
); ++i
) {
453 uint32_t sample_count
= 1 << (1 + i
);
454 if (!(sample_count_mask
& sample_count
))
457 res
= create_pipeline(device
, sample_count
, vs_module_h
);
458 if (res
!= VK_SUCCESS
)
465 anv_device_finish_meta_resolve_state(device
);
468 ralloc_free(vs_module
.nir
);
474 emit_resolve(struct anv_cmd_buffer
*cmd_buffer
,
475 struct anv_image_view
*src_iview
,
476 const VkOffset2D
*src_offset
,
477 struct anv_image_view
*dest_iview
,
478 const VkOffset2D
*dest_offset
,
479 const VkExtent2D
*resolve_extent
)
481 struct anv_device
*device
= cmd_buffer
->device
;
482 VkDevice device_h
= anv_device_to_handle(device
);
483 VkCommandBuffer cmd_buffer_h
= anv_cmd_buffer_to_handle(cmd_buffer
);
484 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
485 const struct anv_image
*src_image
= src_iview
->image
;
487 const struct vertex_attrs vertex_data
[3] = {
491 dest_offset
->x
+ resolve_extent
->width
,
492 dest_offset
->y
+ resolve_extent
->height
,
495 src_offset
->x
+ resolve_extent
->width
,
496 src_offset
->y
+ resolve_extent
->height
,
503 dest_offset
->y
+ resolve_extent
->height
,
507 src_offset
->y
+ resolve_extent
->height
,
523 struct anv_state vertex_mem
=
524 anv_cmd_buffer_emit_dynamic(cmd_buffer
, vertex_data
,
525 sizeof(vertex_data
), 16);
527 struct anv_buffer vertex_buffer
= {
529 .size
= sizeof(vertex_data
),
530 .bo
= &cmd_buffer
->dynamic_state_stream
.block_pool
->bo
,
531 .offset
= vertex_mem
.offset
,
534 VkBuffer vertex_buffer_h
= anv_buffer_to_handle(&vertex_buffer
);
536 anv_CmdBindVertexBuffers(cmd_buffer_h
,
539 (VkBuffer
[]) { vertex_buffer_h
},
540 (VkDeviceSize
[]) { 0 });
543 ANV_CALL(CreateSampler
)(device_h
,
544 &(VkSamplerCreateInfo
) {
545 .sType
= VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
,
546 .magFilter
= VK_FILTER_NEAREST
,
547 .minFilter
= VK_FILTER_NEAREST
,
548 .mipmapMode
= VK_SAMPLER_MIPMAP_MODE_NEAREST
,
549 .addressModeU
= VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE
,
550 .addressModeV
= VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE
,
551 .addressModeW
= VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE
,
553 .anisotropyEnable
= false,
554 .compareEnable
= false,
557 .unnormalizedCoordinates
= false,
559 &cmd_buffer
->pool
->alloc
,
562 VkDescriptorPool desc_pool
;
563 anv_CreateDescriptorPool(anv_device_to_handle(device
),
564 &(const VkDescriptorPoolCreateInfo
) {
565 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
,
570 .pPoolSizes
= (VkDescriptorPoolSize
[]) {
572 .type
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
576 }, &cmd_buffer
->pool
->alloc
, &desc_pool
);
578 VkDescriptorSet desc_set_h
;
579 anv_AllocateDescriptorSets(device_h
,
580 &(VkDescriptorSetAllocateInfo
) {
581 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO
,
582 .descriptorPool
= desc_pool
,
583 .descriptorSetCount
= 1,
584 .pSetLayouts
= (VkDescriptorSetLayout
[]) {
585 device
->meta_state
.resolve
.ds_layout
,
590 anv_UpdateDescriptorSets(device_h
,
592 (VkWriteDescriptorSet
[]) {
594 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
595 .dstSet
= desc_set_h
,
597 .dstArrayElement
= 0,
598 .descriptorCount
= 1,
599 .descriptorType
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
600 .pImageInfo
= (VkDescriptorImageInfo
[]) {
602 .sampler
= sampler_h
,
603 .imageView
= anv_image_view_to_handle(src_iview
),
604 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
612 ANV_CALL(CmdSetViewport
)(cmd_buffer_h
,
620 .height
= fb
->height
,
626 ANV_CALL(CmdSetScissor
)(cmd_buffer_h
,
632 .extent
= (VkExtent2D
) { fb
->width
, fb
->height
},
636 VkPipeline pipeline_h
= *get_pipeline_h(device
, src_image
->samples
);
637 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, pipeline_h
);
639 if (cmd_buffer
->state
.pipeline
!= pipeline
) {
640 anv_CmdBindPipeline(cmd_buffer_h
, VK_PIPELINE_BIND_POINT_GRAPHICS
,
644 anv_CmdBindDescriptorSets(cmd_buffer_h
,
645 VK_PIPELINE_BIND_POINT_GRAPHICS
,
646 device
->meta_state
.resolve
.pipeline_layout
,
649 (VkDescriptorSet
[]) {
655 ANV_CALL(CmdDraw
)(cmd_buffer_h
, 3, 1, 0, 0);
657 /* All objects below are consumed by the draw call. We may safely destroy
660 anv_DestroyDescriptorPool(anv_device_to_handle(device
),
661 desc_pool
, &cmd_buffer
->pool
->alloc
);
662 anv_DestroySampler(device_h
, sampler_h
,
663 &cmd_buffer
->pool
->alloc
);
666 void anv_CmdResolveImage(
667 VkCommandBuffer cmd_buffer_h
,
669 VkImageLayout src_image_layout
,
670 VkImage dest_image_h
,
671 VkImageLayout dest_image_layout
,
672 uint32_t region_count
,
673 const VkImageResolve
* regions
)
675 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmd_buffer_h
);
676 ANV_FROM_HANDLE(anv_image
, src_image
, src_image_h
);
677 ANV_FROM_HANDLE(anv_image
, dest_image
, dest_image_h
);
678 struct anv_device
*device
= cmd_buffer
->device
;
679 struct anv_meta_saved_state state
;
680 VkDevice device_h
= anv_device_to_handle(device
);
682 meta_resolve_save(&state
, cmd_buffer
);
684 assert(src_image
->samples
> 1);
685 assert(dest_image
->samples
== 1);
687 if (src_image
->samples
>= 16) {
688 /* See commit aa3f9aaf31e9056a255f9e0472ebdfdaa60abe54 for the
689 * glBlitFramebuffer workaround for samples >= 16.
691 anv_finishme("vkCmdResolveImage: need interpolation workaround when "
695 if (src_image
->array_size
> 1)
696 anv_finishme("vkCmdResolveImage: multisample array images");
698 for (uint32_t r
= 0; r
< region_count
; ++r
) {
699 const VkImageResolve
*region
= ®ions
[r
];
701 /* From the Vulkan 1.0 spec:
703 * - The aspectMask member of srcSubresource and dstSubresource must
704 * only contain VK_IMAGE_ASPECT_COLOR_BIT
706 * - The layerCount member of srcSubresource and dstSubresource must
709 assert(region
->srcSubresource
.aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
710 assert(region
->dstSubresource
.aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
711 assert(region
->srcSubresource
.layerCount
==
712 region
->dstSubresource
.layerCount
);
714 const uint32_t src_base_layer
=
715 anv_meta_get_iview_layer(src_image
, ®ion
->srcSubresource
,
718 const uint32_t dest_base_layer
=
719 anv_meta_get_iview_layer(dest_image
, ®ion
->dstSubresource
,
723 * From Vulkan 1.0.6 spec: 18.6 Resolving Multisample Images
725 * extent is the size in texels of the source image to resolve in width,
726 * height and depth. 1D images use only x and width. 2D images use x, y,
727 * width and height. 3D images use x, y, z, width, height and depth.
729 * srcOffset and dstOffset select the initial x, y, and z offsets in
730 * texels of the sub-regions of the source and destination image data.
731 * extent is the size in texels of the source image to resolve in width,
732 * height and depth. 1D images use only x and width. 2D images use x, y,
733 * width and height. 3D images use x, y, z, width, height and depth.
735 const struct VkExtent3D extent
=
736 anv_sanitize_image_extent(src_image
->type
, region
->extent
);
737 const struct VkOffset3D srcOffset
=
738 anv_sanitize_image_offset(src_image
->type
, region
->srcOffset
);
739 const struct VkOffset3D dstOffset
=
740 anv_sanitize_image_offset(dest_image
->type
, region
->dstOffset
);
743 for (uint32_t layer
= 0; layer
< region
->srcSubresource
.layerCount
;
746 struct anv_image_view src_iview
;
747 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
748 &(VkImageViewCreateInfo
) {
749 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
750 .image
= src_image_h
,
751 .viewType
= anv_meta_get_view_type(src_image
),
752 .format
= src_image
->format
->vk_format
,
753 .subresourceRange
= {
754 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
755 .baseMipLevel
= region
->srcSubresource
.mipLevel
,
757 .baseArrayLayer
= src_base_layer
+ layer
,
761 cmd_buffer
, 0, VK_IMAGE_USAGE_SAMPLED_BIT
);
763 struct anv_image_view dest_iview
;
764 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
765 &(VkImageViewCreateInfo
) {
766 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
767 .image
= dest_image_h
,
768 .viewType
= anv_meta_get_view_type(dest_image
),
769 .format
= dest_image
->format
->vk_format
,
770 .subresourceRange
= {
771 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
772 .baseMipLevel
= region
->dstSubresource
.mipLevel
,
774 .baseArrayLayer
= dest_base_layer
+ layer
,
778 cmd_buffer
, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
781 anv_CreateFramebuffer(device_h
,
782 &(VkFramebufferCreateInfo
) {
783 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
784 .attachmentCount
= 1,
785 .pAttachments
= (VkImageView
[]) {
786 anv_image_view_to_handle(&dest_iview
),
788 .width
= anv_minify(dest_image
->extent
.width
,
789 region
->dstSubresource
.mipLevel
),
790 .height
= anv_minify(dest_image
->extent
.height
,
791 region
->dstSubresource
.mipLevel
),
794 &cmd_buffer
->pool
->alloc
,
797 ANV_CALL(CmdBeginRenderPass
)(cmd_buffer_h
,
798 &(VkRenderPassBeginInfo
) {
799 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
800 .renderPass
= device
->meta_state
.resolve
.pass
,
812 .clearValueCount
= 0,
813 .pClearValues
= NULL
,
815 VK_SUBPASS_CONTENTS_INLINE
);
817 emit_resolve(cmd_buffer
,
829 .width
= extent
.width
,
830 .height
= extent
.height
,
833 ANV_CALL(CmdEndRenderPass
)(cmd_buffer_h
);
835 anv_DestroyFramebuffer(device_h
, fb_h
,
836 &cmd_buffer
->pool
->alloc
);
840 meta_resolve_restore(&state
, cmd_buffer
);
844 * Emit any needed resolves for the current subpass.
847 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
)
849 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
850 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
851 struct anv_meta_saved_state saved_state
;
853 /* FINISHME(perf): Skip clears for resolve attachments.
855 * From the Vulkan 1.0 spec:
857 * If the first use of an attachment in a render pass is as a resolve
858 * attachment, then the loadOp is effectively ignored as the resolve is
859 * guaranteed to overwrite all pixels in the render area.
862 if (!subpass
->has_resolve
)
865 meta_resolve_save(&saved_state
, cmd_buffer
);
867 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
868 uint32_t src_att
= subpass
->color_attachments
[i
];
869 uint32_t dest_att
= subpass
->resolve_attachments
[i
];
871 if (dest_att
== VK_ATTACHMENT_UNUSED
)
874 struct anv_image_view
*src_iview
= fb
->attachments
[src_att
];
875 struct anv_image_view
*dest_iview
= fb
->attachments
[dest_att
];
877 struct anv_subpass resolve_subpass
= {
879 .color_attachments
= (uint32_t[]) { dest_att
},
880 .depth_stencil_attachment
= VK_ATTACHMENT_UNUSED
,
883 anv_cmd_buffer_set_subpass(cmd_buffer
, &resolve_subpass
);
885 /* Subpass resolves must respect the render area. We can ignore the
886 * render area here because vkCmdBeginRenderPass set the render area
887 * with 3DSTATE_DRAWING_RECTANGLE.
889 * XXX(chadv): Does the hardware really respect
890 * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
892 emit_resolve(cmd_buffer
,
894 &(VkOffset2D
) { 0, 0 },
896 &(VkOffset2D
) { 0, 0 },
897 &(VkExtent2D
) { fb
->width
, fb
->height
});
900 cmd_buffer
->state
.subpass
= subpass
;
901 meta_resolve_restore(&saved_state
, cmd_buffer
);