2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "radv_meta.h"
28 #include "radv_private.h"
33 build_dcc_decompress_compute_shader(struct radv_device
*dev
)
36 const struct glsl_type
*buf_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
40 const struct glsl_type
*img_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
44 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
45 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "dcc_decompress_compute");
47 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
48 b
.shader
->info
.cs
.local_size
[0] = 16;
49 b
.shader
->info
.cs
.local_size
[1] = 16;
50 b
.shader
->info
.cs
.local_size
[2] = 1;
51 nir_variable
*input_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
53 input_img
->data
.descriptor_set
= 0;
54 input_img
->data
.binding
= 0;
56 nir_variable
*output_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
58 output_img
->data
.descriptor_set
= 0;
59 output_img
->data
.binding
= 1;
61 nir_ssa_def
*invoc_id
= nir_load_system_value(&b
, nir_intrinsic_load_local_invocation_id
, 0);
62 nir_ssa_def
*wg_id
= nir_load_system_value(&b
, nir_intrinsic_load_work_group_id
, 0);
63 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
64 b
.shader
->info
.cs
.local_size
[0],
65 b
.shader
->info
.cs
.local_size
[1],
66 b
.shader
->info
.cs
.local_size
[2], 0);
68 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
69 nir_ssa_def
*input_img_deref
= &nir_build_deref_var(&b
, input_img
)->dest
.ssa
;
71 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 3);
72 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
73 tex
->op
= nir_texop_txf
;
74 tex
->src
[0].src_type
= nir_tex_src_coord
;
75 tex
->src
[0].src
= nir_src_for_ssa(nir_channels(&b
, global_id
, 3));
76 tex
->src
[1].src_type
= nir_tex_src_lod
;
77 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(&b
, 0));
78 tex
->src
[2].src_type
= nir_tex_src_texture_deref
;
79 tex
->src
[2].src
= nir_src_for_ssa(input_img_deref
);
80 tex
->dest_type
= nir_type_float
;
81 tex
->is_array
= false;
82 tex
->coord_components
= 2;
84 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
85 nir_builder_instr_insert(&b
, &tex
->instr
);
87 nir_intrinsic_instr
*membar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_memory_barrier
);
88 nir_builder_instr_insert(&b
, &membar
->instr
);
90 nir_intrinsic_instr
*bar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_barrier
);
91 nir_builder_instr_insert(&b
, &bar
->instr
);
93 nir_ssa_def
*outval
= &tex
->dest
.ssa
;
94 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_image_deref_store
);
95 store
->src
[0] = nir_src_for_ssa(&nir_build_deref_var(&b
, output_img
)->dest
.ssa
);
96 store
->src
[1] = nir_src_for_ssa(global_id
);
97 store
->src
[2] = nir_src_for_ssa(nir_ssa_undef(&b
, 1, 32));
98 store
->src
[3] = nir_src_for_ssa(outval
);
100 nir_builder_instr_insert(&b
, &store
->instr
);
105 create_dcc_compress_compute(struct radv_device
*device
)
107 VkResult result
= VK_SUCCESS
;
108 struct radv_shader_module cs
= { .nir
= NULL
};
110 cs
.nir
= build_dcc_decompress_compute_shader(device
);
112 VkDescriptorSetLayoutCreateInfo ds_create_info
= {
113 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
114 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
116 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
119 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
120 .descriptorCount
= 1,
121 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
122 .pImmutableSamplers
= NULL
126 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
127 .descriptorCount
= 1,
128 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
129 .pImmutableSamplers
= NULL
134 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
136 &device
->meta_state
.alloc
,
137 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
);
138 if (result
!= VK_SUCCESS
)
142 VkPipelineLayoutCreateInfo pl_create_info
= {
143 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
145 .pSetLayouts
= &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
,
146 .pushConstantRangeCount
= 1,
147 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 8},
150 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
152 &device
->meta_state
.alloc
,
153 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
);
154 if (result
!= VK_SUCCESS
)
159 VkPipelineShaderStageCreateInfo pipeline_shader_stage
= {
160 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
161 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
162 .module
= radv_shader_module_to_handle(&cs
),
164 .pSpecializationInfo
= NULL
,
167 VkComputePipelineCreateInfo vk_pipeline_info
= {
168 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
169 .stage
= pipeline_shader_stage
,
171 .layout
= device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
174 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
175 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
176 1, &vk_pipeline_info
, NULL
,
177 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
178 if (result
!= VK_SUCCESS
)
187 create_pass(struct radv_device
*device
)
190 VkDevice device_h
= radv_device_to_handle(device
);
191 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
192 VkAttachmentDescription attachment
;
194 attachment
.format
= VK_FORMAT_UNDEFINED
;
195 attachment
.samples
= 1;
196 attachment
.loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
;
197 attachment
.storeOp
= VK_ATTACHMENT_STORE_OP_STORE
;
198 attachment
.initialLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
199 attachment
.finalLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
201 result
= radv_CreateRenderPass(device_h
,
202 &(VkRenderPassCreateInfo
) {
203 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
204 .attachmentCount
= 1,
205 .pAttachments
= &attachment
,
207 .pSubpasses
= &(VkSubpassDescription
) {
208 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
209 .inputAttachmentCount
= 0,
210 .colorAttachmentCount
= 1,
211 .pColorAttachments
= (VkAttachmentReference
[]) {
214 .layout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
,
217 .pResolveAttachments
= NULL
,
218 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
219 .attachment
= VK_ATTACHMENT_UNUSED
,
221 .preserveAttachmentCount
= 0,
222 .pPreserveAttachments
= NULL
,
224 .dependencyCount
= 0,
227 &device
->meta_state
.fast_clear_flush
.pass
);
233 create_pipeline_layout(struct radv_device
*device
, VkPipelineLayout
*layout
)
235 VkPipelineLayoutCreateInfo pl_create_info
= {
236 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
239 .pushConstantRangeCount
= 0,
240 .pPushConstantRanges
= NULL
,
243 return radv_CreatePipelineLayout(radv_device_to_handle(device
),
245 &device
->meta_state
.alloc
,
250 create_pipeline(struct radv_device
*device
,
251 VkShaderModule vs_module_h
,
252 VkPipelineLayout layout
)
255 VkDevice device_h
= radv_device_to_handle(device
);
257 struct radv_shader_module fs_module
= {
258 .nir
= radv_meta_build_nir_fs_noop(),
261 if (!fs_module
.nir
) {
262 /* XXX: Need more accurate error */
263 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
267 const VkPipelineShaderStageCreateInfo stages
[2] = {
269 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
270 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
271 .module
= vs_module_h
,
275 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
276 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
277 .module
= radv_shader_module_to_handle(&fs_module
),
282 const VkPipelineVertexInputStateCreateInfo vi_state
= {
283 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
284 .vertexBindingDescriptionCount
= 0,
285 .vertexAttributeDescriptionCount
= 0,
288 const VkPipelineInputAssemblyStateCreateInfo ia_state
= {
289 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
290 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
291 .primitiveRestartEnable
= false,
294 const VkPipelineColorBlendStateCreateInfo blend_state
= {
295 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
296 .logicOpEnable
= false,
297 .attachmentCount
= 1,
298 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
300 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
301 VK_COLOR_COMPONENT_G_BIT
|
302 VK_COLOR_COMPONENT_B_BIT
|
303 VK_COLOR_COMPONENT_A_BIT
,
307 const VkPipelineRasterizationStateCreateInfo rs_state
= {
308 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
309 .depthClampEnable
= false,
310 .rasterizerDiscardEnable
= false,
311 .polygonMode
= VK_POLYGON_MODE_FILL
,
312 .cullMode
= VK_CULL_MODE_NONE
,
313 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
316 result
= radv_graphics_pipeline_create(device_h
,
317 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
318 &(VkGraphicsPipelineCreateInfo
) {
319 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
323 .pVertexInputState
= &vi_state
,
324 .pInputAssemblyState
= &ia_state
,
326 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
327 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
331 .pRasterizationState
= &rs_state
,
332 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
333 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
334 .rasterizationSamples
= 1,
335 .sampleShadingEnable
= false,
337 .alphaToCoverageEnable
= false,
338 .alphaToOneEnable
= false,
340 .pColorBlendState
= &blend_state
,
341 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
342 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
343 .dynamicStateCount
= 2,
344 .pDynamicStates
= (VkDynamicState
[]) {
345 VK_DYNAMIC_STATE_VIEWPORT
,
346 VK_DYNAMIC_STATE_SCISSOR
,
350 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
353 &(struct radv_graphics_pipeline_create_info
) {
354 .use_rectlist
= true,
355 .custom_blend_mode
= V_028808_CB_ELIMINATE_FAST_CLEAR
,
357 &device
->meta_state
.alloc
,
358 &device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
);
359 if (result
!= VK_SUCCESS
)
362 result
= radv_graphics_pipeline_create(device_h
,
363 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
364 &(VkGraphicsPipelineCreateInfo
) {
365 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
369 .pVertexInputState
= &vi_state
,
370 .pInputAssemblyState
= &ia_state
,
372 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
373 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
377 .pRasterizationState
= &rs_state
,
378 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
379 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
380 .rasterizationSamples
= 1,
381 .sampleShadingEnable
= false,
383 .alphaToCoverageEnable
= false,
384 .alphaToOneEnable
= false,
386 .pColorBlendState
= &blend_state
,
387 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
388 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
389 .dynamicStateCount
= 2,
390 .pDynamicStates
= (VkDynamicState
[]) {
391 VK_DYNAMIC_STATE_VIEWPORT
,
392 VK_DYNAMIC_STATE_SCISSOR
,
396 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
399 &(struct radv_graphics_pipeline_create_info
) {
400 .use_rectlist
= true,
401 .custom_blend_mode
= V_028808_CB_FMASK_DECOMPRESS
,
403 &device
->meta_state
.alloc
,
404 &device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
);
405 if (result
!= VK_SUCCESS
)
408 result
= radv_graphics_pipeline_create(device_h
,
409 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
410 &(VkGraphicsPipelineCreateInfo
) {
411 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
415 .pVertexInputState
= &vi_state
,
416 .pInputAssemblyState
= &ia_state
,
418 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
419 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
423 .pRasterizationState
= &rs_state
,
424 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
425 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
426 .rasterizationSamples
= 1,
427 .sampleShadingEnable
= false,
429 .alphaToCoverageEnable
= false,
430 .alphaToOneEnable
= false,
432 .pColorBlendState
= &blend_state
,
433 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
434 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
435 .dynamicStateCount
= 2,
436 .pDynamicStates
= (VkDynamicState
[]) {
437 VK_DYNAMIC_STATE_VIEWPORT
,
438 VK_DYNAMIC_STATE_SCISSOR
,
442 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
445 &(struct radv_graphics_pipeline_create_info
) {
446 .use_rectlist
= true,
447 .custom_blend_mode
= V_028808_CB_DCC_DECOMPRESS
,
449 &device
->meta_state
.alloc
,
450 &device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
);
451 if (result
!= VK_SUCCESS
)
457 ralloc_free(fs_module
.nir
);
462 radv_device_finish_meta_fast_clear_flush_state(struct radv_device
*device
)
464 struct radv_meta_state
*state
= &device
->meta_state
;
466 radv_DestroyPipeline(radv_device_to_handle(device
),
467 state
->fast_clear_flush
.dcc_decompress_pipeline
,
469 radv_DestroyPipeline(radv_device_to_handle(device
),
470 state
->fast_clear_flush
.fmask_decompress_pipeline
,
472 radv_DestroyPipeline(radv_device_to_handle(device
),
473 state
->fast_clear_flush
.cmask_eliminate_pipeline
,
475 radv_DestroyRenderPass(radv_device_to_handle(device
),
476 state
->fast_clear_flush
.pass
, &state
->alloc
);
477 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
478 state
->fast_clear_flush
.p_layout
,
481 radv_DestroyPipeline(radv_device_to_handle(device
),
482 state
->fast_clear_flush
.dcc_decompress_compute_pipeline
,
484 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
485 state
->fast_clear_flush
.dcc_decompress_compute_p_layout
,
487 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
488 state
->fast_clear_flush
.dcc_decompress_compute_ds_layout
,
493 radv_device_init_meta_fast_clear_flush_state(struct radv_device
*device
)
495 VkResult res
= VK_SUCCESS
;
497 struct radv_shader_module vs_module
= { .nir
= radv_meta_build_nir_vs_generate_vertices() };
498 if (!vs_module
.nir
) {
499 /* XXX: Need more accurate error */
500 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
504 res
= create_pass(device
);
505 if (res
!= VK_SUCCESS
)
508 res
= create_pipeline_layout(device
,
509 &device
->meta_state
.fast_clear_flush
.p_layout
);
510 if (res
!= VK_SUCCESS
)
513 VkShaderModule vs_module_h
= radv_shader_module_to_handle(&vs_module
);
514 res
= create_pipeline(device
, vs_module_h
,
515 device
->meta_state
.fast_clear_flush
.p_layout
);
516 if (res
!= VK_SUCCESS
)
519 res
= create_dcc_compress_compute(device
);
520 if (res
!= VK_SUCCESS
)
526 radv_device_finish_meta_fast_clear_flush_state(device
);
529 ralloc_free(vs_module
.nir
);
535 emit_fast_clear_flush(struct radv_cmd_buffer
*cmd_buffer
,
536 const VkExtent2D
*resolve_extent
,
539 VkCommandBuffer cmd_buffer_h
= radv_cmd_buffer_to_handle(cmd_buffer
);
541 radv_CmdBindPipeline(cmd_buffer_h
, VK_PIPELINE_BIND_POINT_GRAPHICS
,
544 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1, &(VkViewport
) {
547 .width
= resolve_extent
->width
,
548 .height
= resolve_extent
->height
,
553 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1, &(VkRect2D
) {
554 .offset
= (VkOffset2D
) { 0, 0 },
555 .extent
= (VkExtent2D
) { resolve_extent
->width
, resolve_extent
->height
},
558 radv_CmdDraw(cmd_buffer_h
, 3, 1, 0, 0);
559 cmd_buffer
->state
.flush_bits
|= (RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
560 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
);
564 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer
*cmd_buffer
,
565 struct radv_image
*image
, bool value
)
570 va
= radv_buffer_get_va(image
->bo
) + image
->offset
;
571 va
+= image
->dcc_pred_offset
;
574 si_emit_set_predication_state(cmd_buffer
, true, va
);
580 radv_emit_color_decompress(struct radv_cmd_buffer
*cmd_buffer
,
581 struct radv_image
*image
,
582 const VkImageSubresourceRange
*subresourceRange
,
585 struct radv_meta_saved_state saved_state
;
586 VkDevice device_h
= radv_device_to_handle(cmd_buffer
->device
);
587 VkCommandBuffer cmd_buffer_h
= radv_cmd_buffer_to_handle(cmd_buffer
);
588 uint32_t layer_count
= radv_get_layerCount(image
, subresourceRange
);
589 bool old_predicating
;
592 assert(cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
);
594 radv_meta_save(&saved_state
, cmd_buffer
,
595 RADV_META_SAVE_GRAPHICS_PIPELINE
|
596 RADV_META_SAVE_PASS
);
598 if (decompress_dcc
&& radv_image_has_dcc(image
)) {
599 pipeline
= cmd_buffer
->device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
;
600 } else if (radv_image_has_fmask(image
)) {
601 pipeline
= cmd_buffer
->device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
;
603 pipeline
= cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
;
606 if (radv_image_has_dcc(image
)) {
607 old_predicating
= cmd_buffer
->state
.predicating
;
609 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, true);
610 cmd_buffer
->state
.predicating
= true;
612 for (uint32_t layer
= 0; layer
< layer_count
; ++layer
) {
613 struct radv_image_view iview
;
615 radv_image_view_init(&iview
, cmd_buffer
->device
,
616 &(VkImageViewCreateInfo
) {
617 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
618 .image
= radv_image_to_handle(image
),
619 .viewType
= radv_meta_get_view_type(image
),
620 .format
= image
->vk_format
,
621 .subresourceRange
= {
622 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
625 .baseArrayLayer
= subresourceRange
->baseArrayLayer
+ layer
,
631 radv_CreateFramebuffer(device_h
,
632 &(VkFramebufferCreateInfo
) {
633 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
634 .attachmentCount
= 1,
635 .pAttachments
= (VkImageView
[]) {
636 radv_image_view_to_handle(&iview
)
638 .width
= image
->info
.width
,
639 .height
= image
->info
.height
,
642 &cmd_buffer
->pool
->alloc
,
645 radv_CmdBeginRenderPass(cmd_buffer_h
,
646 &(VkRenderPassBeginInfo
) {
647 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
648 .renderPass
= cmd_buffer
->device
->meta_state
.fast_clear_flush
.pass
,
660 .clearValueCount
= 0,
661 .pClearValues
= NULL
,
663 VK_SUBPASS_CONTENTS_INLINE
);
665 emit_fast_clear_flush(cmd_buffer
,
666 &(VkExtent2D
) { image
->info
.width
, image
->info
.height
},
668 radv_CmdEndRenderPass(cmd_buffer_h
);
670 radv_DestroyFramebuffer(device_h
, fb_h
,
671 &cmd_buffer
->pool
->alloc
);
674 if (radv_image_has_dcc(image
)) {
675 cmd_buffer
->state
.predicating
= old_predicating
;
677 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, false);
679 /* Clear the image's fast-clear eliminate predicate because
680 * FMASK and DCC also imply a fast-clear eliminate.
682 radv_set_dcc_need_cmask_elim_pred(cmd_buffer
, image
, false);
684 if (cmd_buffer
->state
.predication_type
!= -1) {
685 /* Restore previous conditional rendering user state. */
686 si_emit_set_predication_state(cmd_buffer
,
687 cmd_buffer
->state
.predication_type
,
688 cmd_buffer
->state
.predication_va
);
691 radv_meta_restore(&saved_state
, cmd_buffer
);
695 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer
*cmd_buffer
,
696 struct radv_image
*image
,
697 const VkImageSubresourceRange
*subresourceRange
)
699 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, false);
703 radv_decompress_dcc_gfx(struct radv_cmd_buffer
*cmd_buffer
,
704 struct radv_image
*image
,
705 const VkImageSubresourceRange
*subresourceRange
)
707 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, true);
711 radv_decompress_dcc_compute(struct radv_cmd_buffer
*cmd_buffer
,
712 struct radv_image
*image
,
713 const VkImageSubresourceRange
*subresourceRange
)
715 struct radv_meta_saved_state saved_state
;
716 struct radv_image_view iview
= {0};
717 struct radv_device
*device
= cmd_buffer
->device
;
719 /* This assumes the image is 2d with 1 layer and 1 mipmap level */
720 struct radv_cmd_state
*state
= &cmd_buffer
->state
;
722 state
->flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
723 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
725 radv_meta_save(&saved_state
, cmd_buffer
, RADV_META_SAVE_DESCRIPTORS
|
726 RADV_META_SAVE_COMPUTE_PIPELINE
);
728 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
729 VK_PIPELINE_BIND_POINT_COMPUTE
,
730 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
732 radv_image_view_init(&iview
, cmd_buffer
->device
,
733 &(VkImageViewCreateInfo
) {
734 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
735 .image
= radv_image_to_handle(image
),
736 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
737 .format
= image
->vk_format
,
738 .subresourceRange
= {
739 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
747 radv_meta_push_descriptor_set(cmd_buffer
,
748 VK_PIPELINE_BIND_POINT_COMPUTE
,
749 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
751 2, /* descriptorWriteCount */
752 (VkWriteDescriptorSet
[]) {
754 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
756 .dstArrayElement
= 0,
757 .descriptorCount
= 1,
758 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
759 .pImageInfo
= (VkDescriptorImageInfo
[]) {
761 .sampler
= VK_NULL_HANDLE
,
762 .imageView
= radv_image_view_to_handle(&iview
),
763 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
768 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
770 .dstArrayElement
= 0,
771 .descriptorCount
= 1,
772 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
773 .pImageInfo
= (VkDescriptorImageInfo
[]) {
775 .sampler
= VK_NULL_HANDLE
,
776 .imageView
= radv_image_view_to_handle(&iview
),
777 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
783 radv_unaligned_dispatch(cmd_buffer
, image
->info
.width
, image
->info
.height
, 1);
785 /* The fill buffer below does its own saving */
786 radv_meta_restore(&saved_state
, cmd_buffer
);
788 state
->flush_bits
|= RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
789 RADV_CMD_FLAG_INV_VMEM_L1
;
791 state
->flush_bits
|= radv_clear_dcc(cmd_buffer
, image
, 0xffffffff);
793 state
->flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
794 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
798 radv_decompress_dcc(struct radv_cmd_buffer
*cmd_buffer
,
799 struct radv_image
*image
,
800 const VkImageSubresourceRange
*subresourceRange
)
802 if (cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
)
803 radv_decompress_dcc_gfx(cmd_buffer
, image
, subresourceRange
);
805 radv_decompress_dcc_compute(cmd_buffer
, image
, subresourceRange
);