2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "nir/nir_builder.h"
32 * Vertex attributes used by all pipelines.
35 float position
[2]; /**< 3DPRIM_RECTLIST */
38 /* passthrough vertex shader */
42 const struct glsl_type
*vec4
= glsl_vec4_type();
45 nir_variable
*a_position
;
46 nir_variable
*v_position
;
48 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_VERTEX
, NULL
);
49 b
.shader
->info
->name
= ralloc_strdup(b
.shader
, "meta_resolve_vs");
51 a_position
= nir_variable_create(b
.shader
, nir_var_shader_in
, vec4
,
53 a_position
->data
.location
= VERT_ATTRIB_GENERIC0
;
55 v_position
= nir_variable_create(b
.shader
, nir_var_shader_out
, vec4
,
57 v_position
->data
.location
= VARYING_SLOT_POS
;
59 nir_copy_var(&b
, v_position
, a_position
);
64 /* simple passthrough shader */
68 const struct glsl_type
*vec4
= glsl_vec4_type();
70 nir_variable
*f_color
; /* vec4, fragment output color */
72 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
73 b
.shader
->info
->name
= ralloc_asprintf(b
.shader
,
76 f_color
= nir_variable_create(b
.shader
, nir_var_shader_out
, vec4
,
78 f_color
->data
.location
= FRAG_RESULT_DATA0
;
79 nir_store_var(&b
, f_color
, nir_imm_vec4(&b
, 0.0, 0.0, 0.0, 1.0), 0xf);
85 create_pass(struct radv_device
*device
)
88 VkDevice device_h
= radv_device_to_handle(device
);
89 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
90 VkAttachmentDescription attachments
[2];
93 for (i
= 0; i
< 2; i
++) {
94 attachments
[i
].format
= VK_FORMAT_UNDEFINED
;
95 attachments
[i
].samples
= 1;
96 attachments
[i
].loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
;
97 attachments
[i
].storeOp
= VK_ATTACHMENT_STORE_OP_STORE
;
99 attachments
[0].initialLayout
= VK_IMAGE_LAYOUT_GENERAL
;
100 attachments
[0].finalLayout
= VK_IMAGE_LAYOUT_GENERAL
;
101 attachments
[1].initialLayout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
;
102 attachments
[1].finalLayout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
;
104 result
= radv_CreateRenderPass(device_h
,
105 &(VkRenderPassCreateInfo
) {
106 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
107 .attachmentCount
= 2,
108 .pAttachments
= attachments
,
110 .pSubpasses
= &(VkSubpassDescription
) {
111 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
112 .inputAttachmentCount
= 0,
113 .colorAttachmentCount
= 2,
114 .pColorAttachments
= (VkAttachmentReference
[]) {
117 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
121 .layout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
,
124 .pResolveAttachments
= NULL
,
125 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
126 .attachment
= VK_ATTACHMENT_UNUSED
,
128 .preserveAttachmentCount
= 0,
129 .pPreserveAttachments
= NULL
,
131 .dependencyCount
= 0,
134 &device
->meta_state
.resolve
.pass
);
140 create_pipeline(struct radv_device
*device
,
141 VkShaderModule vs_module_h
)
144 VkDevice device_h
= radv_device_to_handle(device
);
146 struct radv_shader_module fs_module
= {
147 .nir
= build_nir_fs(),
150 if (!fs_module
.nir
) {
151 /* XXX: Need more accurate error */
152 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
156 result
= radv_graphics_pipeline_create(device_h
,
157 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
158 &(VkGraphicsPipelineCreateInfo
) {
159 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
161 .pStages
= (VkPipelineShaderStageCreateInfo
[]) {
163 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
164 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
165 .module
= vs_module_h
,
169 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
170 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
171 .module
= radv_shader_module_to_handle(&fs_module
),
175 .pVertexInputState
= &(VkPipelineVertexInputStateCreateInfo
) {
176 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
177 .vertexBindingDescriptionCount
= 1,
178 .pVertexBindingDescriptions
= (VkVertexInputBindingDescription
[]) {
181 .stride
= sizeof(struct vertex_attrs
),
182 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
185 .vertexAttributeDescriptionCount
= 1,
186 .pVertexAttributeDescriptions
= (VkVertexInputAttributeDescription
[]) {
191 .format
= VK_FORMAT_R32G32_SFLOAT
,
192 .offset
= offsetof(struct vertex_attrs
, position
),
196 .pInputAssemblyState
= &(VkPipelineInputAssemblyStateCreateInfo
) {
197 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
198 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
199 .primitiveRestartEnable
= false,
201 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
202 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
206 .pRasterizationState
= &(VkPipelineRasterizationStateCreateInfo
) {
207 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
208 .depthClampEnable
= false,
209 .rasterizerDiscardEnable
= false,
210 .polygonMode
= VK_POLYGON_MODE_FILL
,
211 .cullMode
= VK_CULL_MODE_NONE
,
212 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
214 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
215 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
216 .rasterizationSamples
= 1,
217 .sampleShadingEnable
= false,
219 .alphaToCoverageEnable
= false,
220 .alphaToOneEnable
= false,
222 .pColorBlendState
= &(VkPipelineColorBlendStateCreateInfo
) {
223 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
224 .logicOpEnable
= false,
225 .attachmentCount
= 2,
226 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
228 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
229 VK_COLOR_COMPONENT_G_BIT
|
230 VK_COLOR_COMPONENT_B_BIT
|
231 VK_COLOR_COMPONENT_A_BIT
,
239 .pDynamicState
= NULL
,
240 .renderPass
= device
->meta_state
.resolve
.pass
,
243 &(struct radv_graphics_pipeline_create_info
) {
244 .use_rectlist
= true,
245 .custom_blend_mode
= V_028808_CB_RESOLVE
,
247 &device
->meta_state
.alloc
,
248 &device
->meta_state
.resolve
.pipeline
);
249 if (result
!= VK_SUCCESS
)
255 ralloc_free(fs_module
.nir
);
260 radv_device_finish_meta_resolve_state(struct radv_device
*device
)
262 struct radv_meta_state
*state
= &device
->meta_state
;
263 VkDevice device_h
= radv_device_to_handle(device
);
264 VkRenderPass pass_h
= device
->meta_state
.resolve
.pass
;
265 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
268 radv_DestroyRenderPass(device_h
, pass_h
,
269 &device
->meta_state
.alloc
);
271 VkPipeline pipeline_h
= state
->resolve
.pipeline
;
273 radv_DestroyPipeline(device_h
, pipeline_h
, alloc
);
278 radv_device_init_meta_resolve_state(struct radv_device
*device
)
280 VkResult res
= VK_SUCCESS
;
282 zero(device
->meta_state
.resolve
);
284 struct radv_shader_module vs_module
= { .nir
= build_nir_vs() };
285 if (!vs_module
.nir
) {
286 /* XXX: Need more accurate error */
287 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
291 res
= create_pass(device
);
292 if (res
!= VK_SUCCESS
)
295 VkShaderModule vs_module_h
= radv_shader_module_to_handle(&vs_module
);
296 res
= create_pipeline(device
, vs_module_h
);
297 if (res
!= VK_SUCCESS
)
303 radv_device_finish_meta_resolve_state(device
);
306 ralloc_free(vs_module
.nir
);
312 emit_resolve(struct radv_cmd_buffer
*cmd_buffer
,
313 const VkOffset2D
*dest_offset
,
314 const VkExtent2D
*resolve_extent
)
316 struct radv_device
*device
= cmd_buffer
->device
;
317 VkCommandBuffer cmd_buffer_h
= radv_cmd_buffer_to_handle(cmd_buffer
);
319 const struct vertex_attrs vertex_data
[3] = {
329 dest_offset
->y
+ resolve_extent
->height
,
334 dest_offset
->x
+ resolve_extent
->width
,
340 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
;
341 radv_cmd_buffer_upload_data(cmd_buffer
, sizeof(vertex_data
), 16, vertex_data
, &offset
);
342 struct radv_buffer vertex_buffer
= {
344 .size
= sizeof(vertex_data
),
345 .bo
= cmd_buffer
->upload
.upload_bo
,
349 VkBuffer vertex_buffer_h
= radv_buffer_to_handle(&vertex_buffer
);
351 radv_CmdBindVertexBuffers(cmd_buffer_h
,
354 (VkBuffer
[]) { vertex_buffer_h
},
355 (VkDeviceSize
[]) { 0 });
357 VkPipeline pipeline_h
= device
->meta_state
.resolve
.pipeline
;
358 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pipeline_h
);
360 if (cmd_buffer
->state
.pipeline
!= pipeline
) {
361 radv_CmdBindPipeline(cmd_buffer_h
, VK_PIPELINE_BIND_POINT_GRAPHICS
,
365 radv_CmdDraw(cmd_buffer_h
, 3, 1, 0, 0);
366 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
;
367 si_emit_cache_flush(cmd_buffer
);
370 void radv_CmdResolveImage(
371 VkCommandBuffer cmd_buffer_h
,
373 VkImageLayout src_image_layout
,
374 VkImage dest_image_h
,
375 VkImageLayout dest_image_layout
,
376 uint32_t region_count
,
377 const VkImageResolve
* regions
)
379 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, cmd_buffer_h
);
380 RADV_FROM_HANDLE(radv_image
, src_image
, src_image_h
);
381 RADV_FROM_HANDLE(radv_image
, dest_image
, dest_image_h
);
382 struct radv_device
*device
= cmd_buffer
->device
;
383 struct radv_meta_saved_state saved_state
;
384 VkDevice device_h
= radv_device_to_handle(device
);
385 bool use_compute_resolve
= false;
387 /* we can use the hw resolve only for single full resolves */
388 if (region_count
== 1) {
389 if (regions
[0].srcOffset
.x
||
390 regions
[0].srcOffset
.y
||
391 regions
[0].srcOffset
.z
)
392 use_compute_resolve
= true;
393 if (regions
[0].dstOffset
.x
||
394 regions
[0].dstOffset
.y
||
395 regions
[0].dstOffset
.z
)
396 use_compute_resolve
= true;
398 if (regions
[0].extent
.width
!= src_image
->extent
.width
||
399 regions
[0].extent
.height
!= src_image
->extent
.height
||
400 regions
[0].extent
.depth
!= src_image
->extent
.depth
)
401 use_compute_resolve
= true;
403 use_compute_resolve
= true;
405 if (use_compute_resolve
) {
407 radv_meta_resolve_compute_image(cmd_buffer
,
412 region_count
, regions
);
416 radv_meta_save_graphics_reset_vport_scissor(&saved_state
, cmd_buffer
);
418 assert(src_image
->samples
> 1);
419 assert(dest_image
->samples
== 1);
421 if (src_image
->samples
>= 16) {
422 /* See commit aa3f9aaf31e9056a255f9e0472ebdfdaa60abe54 for the
423 * glBlitFramebuffer workaround for samples >= 16.
425 radv_finishme("vkCmdResolveImage: need interpolation workaround when "
429 if (src_image
->array_size
> 1)
430 radv_finishme("vkCmdResolveImage: multisample array images");
432 if (dest_image
->surface
.dcc_size
) {
433 radv_initialize_dcc(cmd_buffer
, dest_image
, 0xffffffff);
435 for (uint32_t r
= 0; r
< region_count
; ++r
) {
436 const VkImageResolve
*region
= ®ions
[r
];
438 /* From the Vulkan 1.0 spec:
440 * - The aspectMask member of srcSubresource and dstSubresource must
441 * only contain VK_IMAGE_ASPECT_COLOR_BIT
443 * - The layerCount member of srcSubresource and dstSubresource must
446 assert(region
->srcSubresource
.aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
447 assert(region
->dstSubresource
.aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
448 assert(region
->srcSubresource
.layerCount
==
449 region
->dstSubresource
.layerCount
);
451 const uint32_t src_base_layer
=
452 radv_meta_get_iview_layer(src_image
, ®ion
->srcSubresource
,
455 const uint32_t dest_base_layer
=
456 radv_meta_get_iview_layer(dest_image
, ®ion
->dstSubresource
,
460 * From Vulkan 1.0.6 spec: 18.6 Resolving Multisample Images
462 * extent is the size in texels of the source image to resolve in width,
463 * height and depth. 1D images use only x and width. 2D images use x, y,
464 * width and height. 3D images use x, y, z, width, height and depth.
466 * srcOffset and dstOffset select the initial x, y, and z offsets in
467 * texels of the sub-regions of the source and destination image data.
468 * extent is the size in texels of the source image to resolve in width,
469 * height and depth. 1D images use only x and width. 2D images use x, y,
470 * width and height. 3D images use x, y, z, width, height and depth.
472 const struct VkExtent3D extent
=
473 radv_sanitize_image_extent(src_image
->type
, region
->extent
);
474 const struct VkOffset3D dstOffset
=
475 radv_sanitize_image_offset(dest_image
->type
, region
->dstOffset
);
478 for (uint32_t layer
= 0; layer
< region
->srcSubresource
.layerCount
;
481 struct radv_image_view src_iview
;
482 radv_image_view_init(&src_iview
, cmd_buffer
->device
,
483 &(VkImageViewCreateInfo
) {
484 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
485 .image
= src_image_h
,
486 .viewType
= radv_meta_get_view_type(src_image
),
487 .format
= src_image
->vk_format
,
488 .subresourceRange
= {
489 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
490 .baseMipLevel
= region
->srcSubresource
.mipLevel
,
492 .baseArrayLayer
= src_base_layer
+ layer
,
496 cmd_buffer
, VK_IMAGE_USAGE_SAMPLED_BIT
);
498 struct radv_image_view dest_iview
;
499 radv_image_view_init(&dest_iview
, cmd_buffer
->device
,
500 &(VkImageViewCreateInfo
) {
501 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
502 .image
= dest_image_h
,
503 .viewType
= radv_meta_get_view_type(dest_image
),
504 .format
= dest_image
->vk_format
,
505 .subresourceRange
= {
506 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
507 .baseMipLevel
= region
->dstSubresource
.mipLevel
,
509 .baseArrayLayer
= dest_base_layer
+ layer
,
513 cmd_buffer
, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
516 radv_CreateFramebuffer(device_h
,
517 &(VkFramebufferCreateInfo
) {
518 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
519 .attachmentCount
= 2,
520 .pAttachments
= (VkImageView
[]) {
521 radv_image_view_to_handle(&src_iview
),
522 radv_image_view_to_handle(&dest_iview
),
524 .width
= radv_minify(dest_image
->extent
.width
,
525 region
->dstSubresource
.mipLevel
),
526 .height
= radv_minify(dest_image
->extent
.height
,
527 region
->dstSubresource
.mipLevel
),
530 &cmd_buffer
->pool
->alloc
,
533 radv_CmdBeginRenderPass(cmd_buffer_h
,
534 &(VkRenderPassBeginInfo
) {
535 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
536 .renderPass
= device
->meta_state
.resolve
.pass
,
548 .clearValueCount
= 0,
549 .pClearValues
= NULL
,
551 VK_SUBPASS_CONTENTS_INLINE
);
553 emit_resolve(cmd_buffer
,
559 .width
= extent
.width
,
560 .height
= extent
.height
,
563 radv_CmdEndRenderPass(cmd_buffer_h
);
565 radv_DestroyFramebuffer(device_h
, fb_h
,
566 &cmd_buffer
->pool
->alloc
);
570 radv_meta_restore(&saved_state
, cmd_buffer
);
574 * Emit any needed resolves for the current subpass.
577 radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer
*cmd_buffer
)
579 struct radv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
580 const struct radv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
581 struct radv_meta_saved_state saved_state
;
583 /* FINISHME(perf): Skip clears for resolve attachments.
585 * From the Vulkan 1.0 spec:
587 * If the first use of an attachment in a render pass is as a resolve
588 * attachment, then the loadOp is effectively ignored as the resolve is
589 * guaranteed to overwrite all pixels in the render area.
592 if (!subpass
->has_resolve
)
595 radv_meta_save_graphics_reset_vport_scissor(&saved_state
, cmd_buffer
);
597 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
598 VkAttachmentReference src_att
= subpass
->color_attachments
[i
];
599 VkAttachmentReference dest_att
= subpass
->resolve_attachments
[i
];
600 struct radv_image
*dst_img
= cmd_buffer
->state
.framebuffer
->attachments
[dest_att
.attachment
].attachment
->image
;
601 if (dest_att
.attachment
== VK_ATTACHMENT_UNUSED
)
604 if (dst_img
->surface
.dcc_size
) {
605 radv_initialize_dcc(cmd_buffer
, dst_img
, 0xffffffff);
606 cmd_buffer
->state
.attachments
[dest_att
.attachment
].current_layout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
;
609 struct radv_subpass resolve_subpass
= {
611 .color_attachments
= (VkAttachmentReference
[]) { src_att
, dest_att
},
612 .depth_stencil_attachment
= { .attachment
= VK_ATTACHMENT_UNUSED
},
615 radv_cmd_buffer_set_subpass(cmd_buffer
, &resolve_subpass
, false);
617 /* Subpass resolves must respect the render area. We can ignore the
618 * render area here because vkCmdBeginRenderPass set the render area
619 * with 3DSTATE_DRAWING_RECTANGLE.
621 * XXX(chadv): Does the hardware really respect
622 * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
624 emit_resolve(cmd_buffer
,
625 &(VkOffset2D
) { 0, 0 },
626 &(VkExtent2D
) { fb
->width
, fb
->height
});
629 cmd_buffer
->state
.subpass
= subpass
;
630 radv_meta_restore(&saved_state
, cmd_buffer
);