2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "nir/nir_builder.h"
36 const struct glsl_type
*vec4
= glsl_vec4_type();
38 nir_variable
*f_color
; /* vec4, fragment output color */
40 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
41 b
.shader
->info
.name
= ralloc_asprintf(b
.shader
,
44 f_color
= nir_variable_create(b
.shader
, nir_var_shader_out
, vec4
,
46 f_color
->data
.location
= FRAG_RESULT_DATA0
;
47 nir_store_var(&b
, f_color
, nir_imm_vec4(&b
, 0.0, 0.0, 0.0, 1.0), 0xf);
53 create_pass(struct radv_device
*device
)
56 VkDevice device_h
= radv_device_to_handle(device
);
57 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
58 VkAttachmentDescription attachments
[2];
61 for (i
= 0; i
< 2; i
++) {
62 attachments
[i
].format
= VK_FORMAT_UNDEFINED
;
63 attachments
[i
].samples
= 1;
64 attachments
[i
].loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
;
65 attachments
[i
].storeOp
= VK_ATTACHMENT_STORE_OP_STORE
;
67 attachments
[0].initialLayout
= VK_IMAGE_LAYOUT_GENERAL
;
68 attachments
[0].finalLayout
= VK_IMAGE_LAYOUT_GENERAL
;
69 attachments
[1].initialLayout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
;
70 attachments
[1].finalLayout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
;
72 result
= radv_CreateRenderPass(device_h
,
73 &(VkRenderPassCreateInfo
) {
74 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
76 .pAttachments
= attachments
,
78 .pSubpasses
= &(VkSubpassDescription
) {
79 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
80 .inputAttachmentCount
= 0,
81 .colorAttachmentCount
= 2,
82 .pColorAttachments
= (VkAttachmentReference
[]) {
85 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
89 .layout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
,
92 .pResolveAttachments
= NULL
,
93 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
94 .attachment
= VK_ATTACHMENT_UNUSED
,
96 .preserveAttachmentCount
= 0,
97 .pPreserveAttachments
= NULL
,
102 &device
->meta_state
.resolve
.pass
);
108 create_pipeline(struct radv_device
*device
,
109 VkShaderModule vs_module_h
)
112 VkDevice device_h
= radv_device_to_handle(device
);
114 struct radv_shader_module fs_module
= {
115 .nir
= build_nir_fs(),
118 if (!fs_module
.nir
) {
119 /* XXX: Need more accurate error */
120 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
124 result
= radv_graphics_pipeline_create(device_h
,
125 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
126 &(VkGraphicsPipelineCreateInfo
) {
127 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
129 .pStages
= (VkPipelineShaderStageCreateInfo
[]) {
131 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
132 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
133 .module
= vs_module_h
,
137 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
138 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
139 .module
= radv_shader_module_to_handle(&fs_module
),
143 .pVertexInputState
= &(VkPipelineVertexInputStateCreateInfo
) {
144 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
145 .vertexBindingDescriptionCount
= 0,
146 .vertexAttributeDescriptionCount
= 0,
148 .pInputAssemblyState
= &(VkPipelineInputAssemblyStateCreateInfo
) {
149 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
150 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
151 .primitiveRestartEnable
= false,
153 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
154 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
158 .pRasterizationState
= &(VkPipelineRasterizationStateCreateInfo
) {
159 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
160 .depthClampEnable
= false,
161 .rasterizerDiscardEnable
= false,
162 .polygonMode
= VK_POLYGON_MODE_FILL
,
163 .cullMode
= VK_CULL_MODE_NONE
,
164 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
166 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
167 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
168 .rasterizationSamples
= 1,
169 .sampleShadingEnable
= false,
171 .alphaToCoverageEnable
= false,
172 .alphaToOneEnable
= false,
174 .pColorBlendState
= &(VkPipelineColorBlendStateCreateInfo
) {
175 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
176 .logicOpEnable
= false,
177 .attachmentCount
= 2,
178 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
180 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
181 VK_COLOR_COMPONENT_G_BIT
|
182 VK_COLOR_COMPONENT_B_BIT
|
183 VK_COLOR_COMPONENT_A_BIT
,
191 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
192 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
193 .dynamicStateCount
= 2,
194 .pDynamicStates
= (VkDynamicState
[]) {
195 VK_DYNAMIC_STATE_VIEWPORT
,
196 VK_DYNAMIC_STATE_SCISSOR
,
199 .renderPass
= device
->meta_state
.resolve
.pass
,
202 &(struct radv_graphics_pipeline_create_info
) {
203 .use_rectlist
= true,
204 .custom_blend_mode
= V_028808_CB_RESOLVE
,
206 &device
->meta_state
.alloc
,
207 &device
->meta_state
.resolve
.pipeline
);
208 if (result
!= VK_SUCCESS
)
214 ralloc_free(fs_module
.nir
);
219 radv_device_finish_meta_resolve_state(struct radv_device
*device
)
221 struct radv_meta_state
*state
= &device
->meta_state
;
222 VkDevice device_h
= radv_device_to_handle(device
);
223 VkRenderPass pass_h
= device
->meta_state
.resolve
.pass
;
224 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
227 radv_DestroyRenderPass(device_h
, pass_h
,
228 &device
->meta_state
.alloc
);
230 VkPipeline pipeline_h
= state
->resolve
.pipeline
;
232 radv_DestroyPipeline(device_h
, pipeline_h
, alloc
);
237 radv_device_init_meta_resolve_state(struct radv_device
*device
)
239 VkResult res
= VK_SUCCESS
;
241 zero(device
->meta_state
.resolve
);
243 struct radv_shader_module vs_module
= { .nir
= radv_meta_build_nir_vs_generate_vertices() };
244 if (!vs_module
.nir
) {
245 /* XXX: Need more accurate error */
246 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
250 res
= create_pass(device
);
251 if (res
!= VK_SUCCESS
)
254 VkShaderModule vs_module_h
= radv_shader_module_to_handle(&vs_module
);
255 res
= create_pipeline(device
, vs_module_h
);
256 if (res
!= VK_SUCCESS
)
262 radv_device_finish_meta_resolve_state(device
);
265 ralloc_free(vs_module
.nir
);
271 emit_resolve(struct radv_cmd_buffer
*cmd_buffer
,
272 const VkOffset2D
*dest_offset
,
273 const VkExtent2D
*resolve_extent
)
275 struct radv_device
*device
= cmd_buffer
->device
;
276 VkCommandBuffer cmd_buffer_h
= radv_cmd_buffer_to_handle(cmd_buffer
);
278 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
;
280 VkPipeline pipeline_h
= device
->meta_state
.resolve
.pipeline
;
281 RADV_FROM_HANDLE(radv_pipeline
, pipeline
, pipeline_h
);
283 if (cmd_buffer
->state
.pipeline
!= pipeline
) {
284 radv_CmdBindPipeline(cmd_buffer_h
, VK_PIPELINE_BIND_POINT_GRAPHICS
,
288 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1, &(VkViewport
) {
291 .width
= resolve_extent
->width
,
292 .height
= resolve_extent
->height
,
297 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1, &(VkRect2D
) {
298 .offset
= *dest_offset
,
299 .extent
= *resolve_extent
,
302 radv_CmdDraw(cmd_buffer_h
, 3, 1, 0, 0);
303 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
;
306 enum radv_resolve_method
{
312 static void radv_pick_resolve_method_images(struct radv_image
*src_image
,
313 struct radv_image
*dest_image
,
314 enum radv_resolve_method
*method
)
317 if (dest_image
->surface
.micro_tile_mode
!= src_image
->surface
.micro_tile_mode
) {
318 if (dest_image
->surface
.num_dcc_levels
> 0)
319 *method
= RESOLVE_FRAGMENT
;
321 *method
= RESOLVE_COMPUTE
;
325 void radv_CmdResolveImage(
326 VkCommandBuffer cmd_buffer_h
,
328 VkImageLayout src_image_layout
,
329 VkImage dest_image_h
,
330 VkImageLayout dest_image_layout
,
331 uint32_t region_count
,
332 const VkImageResolve
* regions
)
334 RADV_FROM_HANDLE(radv_cmd_buffer
, cmd_buffer
, cmd_buffer_h
);
335 RADV_FROM_HANDLE(radv_image
, src_image
, src_image_h
);
336 RADV_FROM_HANDLE(radv_image
, dest_image
, dest_image_h
);
337 struct radv_device
*device
= cmd_buffer
->device
;
338 struct radv_meta_saved_state saved_state
;
339 VkDevice device_h
= radv_device_to_handle(device
);
340 enum radv_resolve_method resolve_method
= RESOLVE_HW
;
341 /* we can use the hw resolve only for single full resolves */
342 if (region_count
== 1) {
343 if (regions
[0].srcOffset
.x
||
344 regions
[0].srcOffset
.y
||
345 regions
[0].srcOffset
.z
)
346 resolve_method
= RESOLVE_COMPUTE
;
347 if (regions
[0].dstOffset
.x
||
348 regions
[0].dstOffset
.y
||
349 regions
[0].dstOffset
.z
)
350 resolve_method
= RESOLVE_COMPUTE
;
352 if (regions
[0].extent
.width
!= src_image
->info
.width
||
353 regions
[0].extent
.height
!= src_image
->info
.height
||
354 regions
[0].extent
.depth
!= src_image
->info
.depth
)
355 resolve_method
= RESOLVE_COMPUTE
;
357 resolve_method
= RESOLVE_COMPUTE
;
359 radv_pick_resolve_method_images(src_image
, dest_image
,
362 if (resolve_method
== RESOLVE_FRAGMENT
) {
363 radv_meta_resolve_fragment_image(cmd_buffer
,
368 region_count
, regions
);
372 if (resolve_method
== RESOLVE_COMPUTE
) {
373 radv_meta_resolve_compute_image(cmd_buffer
,
378 region_count
, regions
);
382 radv_meta_save_graphics_reset_vport_scissor_novertex(&saved_state
, cmd_buffer
);
384 assert(src_image
->info
.samples
> 1);
385 if (src_image
->info
.samples
<= 1) {
386 /* this causes GPU hangs if we get past here */
387 fprintf(stderr
, "radv: Illegal resolve operation (src not multisampled), will hang GPU.");
390 assert(dest_image
->info
.samples
== 1);
392 if (src_image
->info
.samples
>= 16) {
393 /* See commit aa3f9aaf31e9056a255f9e0472ebdfdaa60abe54 for the
394 * glBlitFramebuffer workaround for samples >= 16.
396 radv_finishme("vkCmdResolveImage: need interpolation workaround when "
400 if (src_image
->info
.array_size
> 1)
401 radv_finishme("vkCmdResolveImage: multisample array images");
403 if (dest_image
->surface
.dcc_size
) {
404 radv_initialize_dcc(cmd_buffer
, dest_image
, 0xffffffff);
406 for (uint32_t r
= 0; r
< region_count
; ++r
) {
407 const VkImageResolve
*region
= ®ions
[r
];
409 /* From the Vulkan 1.0 spec:
411 * - The aspectMask member of srcSubresource and dstSubresource must
412 * only contain VK_IMAGE_ASPECT_COLOR_BIT
414 * - The layerCount member of srcSubresource and dstSubresource must
417 assert(region
->srcSubresource
.aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
418 assert(region
->dstSubresource
.aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
419 assert(region
->srcSubresource
.layerCount
==
420 region
->dstSubresource
.layerCount
);
422 const uint32_t src_base_layer
=
423 radv_meta_get_iview_layer(src_image
, ®ion
->srcSubresource
,
426 const uint32_t dest_base_layer
=
427 radv_meta_get_iview_layer(dest_image
, ®ion
->dstSubresource
,
431 * From Vulkan 1.0.6 spec: 18.6 Resolving Multisample Images
433 * extent is the size in texels of the source image to resolve in width,
434 * height and depth. 1D images use only x and width. 2D images use x, y,
435 * width and height. 3D images use x, y, z, width, height and depth.
437 * srcOffset and dstOffset select the initial x, y, and z offsets in
438 * texels of the sub-regions of the source and destination image data.
439 * extent is the size in texels of the source image to resolve in width,
440 * height and depth. 1D images use only x and width. 2D images use x, y,
441 * width and height. 3D images use x, y, z, width, height and depth.
443 const struct VkExtent3D extent
=
444 radv_sanitize_image_extent(src_image
->type
, region
->extent
);
445 const struct VkOffset3D dstOffset
=
446 radv_sanitize_image_offset(dest_image
->type
, region
->dstOffset
);
449 for (uint32_t layer
= 0; layer
< region
->srcSubresource
.layerCount
;
452 struct radv_image_view src_iview
;
453 radv_image_view_init(&src_iview
, cmd_buffer
->device
,
454 &(VkImageViewCreateInfo
) {
455 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
456 .image
= src_image_h
,
457 .viewType
= radv_meta_get_view_type(src_image
),
458 .format
= src_image
->vk_format
,
459 .subresourceRange
= {
460 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
461 .baseMipLevel
= region
->srcSubresource
.mipLevel
,
463 .baseArrayLayer
= src_base_layer
+ layer
,
468 struct radv_image_view dest_iview
;
469 radv_image_view_init(&dest_iview
, cmd_buffer
->device
,
470 &(VkImageViewCreateInfo
) {
471 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
472 .image
= dest_image_h
,
473 .viewType
= radv_meta_get_view_type(dest_image
),
474 .format
= dest_image
->vk_format
,
475 .subresourceRange
= {
476 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
477 .baseMipLevel
= region
->dstSubresource
.mipLevel
,
479 .baseArrayLayer
= dest_base_layer
+ layer
,
485 radv_CreateFramebuffer(device_h
,
486 &(VkFramebufferCreateInfo
) {
487 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
488 .attachmentCount
= 2,
489 .pAttachments
= (VkImageView
[]) {
490 radv_image_view_to_handle(&src_iview
),
491 radv_image_view_to_handle(&dest_iview
),
493 .width
= radv_minify(dest_image
->info
.width
,
494 region
->dstSubresource
.mipLevel
),
495 .height
= radv_minify(dest_image
->info
.height
,
496 region
->dstSubresource
.mipLevel
),
499 &cmd_buffer
->pool
->alloc
,
502 radv_CmdBeginRenderPass(cmd_buffer_h
,
503 &(VkRenderPassBeginInfo
) {
504 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
505 .renderPass
= device
->meta_state
.resolve
.pass
,
517 .clearValueCount
= 0,
518 .pClearValues
= NULL
,
520 VK_SUBPASS_CONTENTS_INLINE
);
522 emit_resolve(cmd_buffer
,
528 .width
= extent
.width
,
529 .height
= extent
.height
,
532 radv_CmdEndRenderPass(cmd_buffer_h
);
534 radv_DestroyFramebuffer(device_h
, fb_h
,
535 &cmd_buffer
->pool
->alloc
);
539 radv_meta_restore(&saved_state
, cmd_buffer
);
543 * Emit any needed resolves for the current subpass.
546 radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer
*cmd_buffer
)
548 struct radv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
549 const struct radv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
550 struct radv_meta_saved_state saved_state
;
551 enum radv_resolve_method resolve_method
= RESOLVE_HW
;
553 /* FINISHME(perf): Skip clears for resolve attachments.
555 * From the Vulkan 1.0 spec:
557 * If the first use of an attachment in a render pass is as a resolve
558 * attachment, then the loadOp is effectively ignored as the resolve is
559 * guaranteed to overwrite all pixels in the render area.
562 if (!subpass
->has_resolve
)
565 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
566 VkAttachmentReference src_att
= subpass
->color_attachments
[i
];
567 VkAttachmentReference dest_att
= subpass
->resolve_attachments
[i
];
569 if (src_att
.attachment
== VK_ATTACHMENT_UNUSED
||
570 dest_att
.attachment
== VK_ATTACHMENT_UNUSED
)
573 struct radv_image
*dst_img
= cmd_buffer
->state
.framebuffer
->attachments
[dest_att
.attachment
].attachment
->image
;
574 struct radv_image
*src_img
= cmd_buffer
->state
.framebuffer
->attachments
[src_att
.attachment
].attachment
->image
;
576 radv_pick_resolve_method_images(dst_img
, src_img
, &resolve_method
);
577 if (resolve_method
== RESOLVE_FRAGMENT
) {
582 if (resolve_method
== RESOLVE_COMPUTE
) {
583 radv_cmd_buffer_resolve_subpass_cs(cmd_buffer
);
585 } else if (resolve_method
== RESOLVE_FRAGMENT
) {
586 radv_cmd_buffer_resolve_subpass_fs(cmd_buffer
);
590 radv_meta_save_graphics_reset_vport_scissor_novertex(&saved_state
, cmd_buffer
);
592 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
593 VkAttachmentReference src_att
= subpass
->color_attachments
[i
];
594 VkAttachmentReference dest_att
= subpass
->resolve_attachments
[i
];
596 if (src_att
.attachment
== VK_ATTACHMENT_UNUSED
||
597 dest_att
.attachment
== VK_ATTACHMENT_UNUSED
)
600 struct radv_image
*dst_img
= cmd_buffer
->state
.framebuffer
->attachments
[dest_att
.attachment
].attachment
->image
;
602 if (dst_img
->surface
.dcc_size
) {
603 radv_initialize_dcc(cmd_buffer
, dst_img
, 0xffffffff);
604 cmd_buffer
->state
.attachments
[dest_att
.attachment
].current_layout
= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
;
607 struct radv_subpass resolve_subpass
= {
609 .color_attachments
= (VkAttachmentReference
[]) { src_att
, dest_att
},
610 .depth_stencil_attachment
= { .attachment
= VK_ATTACHMENT_UNUSED
},
613 radv_cmd_buffer_set_subpass(cmd_buffer
, &resolve_subpass
, false);
615 /* Subpass resolves must respect the render area. We can ignore the
616 * render area here because vkCmdBeginRenderPass set the render area
617 * with 3DSTATE_DRAWING_RECTANGLE.
619 * XXX(chadv): Does the hardware really respect
620 * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
622 emit_resolve(cmd_buffer
,
623 &(VkOffset2D
) { 0, 0 },
624 &(VkExtent2D
) { fb
->width
, fb
->height
});
627 cmd_buffer
->state
.subpass
= subpass
;
628 radv_meta_restore(&saved_state
, cmd_buffer
);