2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "radv_meta.h"
28 #include "radv_private.h"
33 build_dcc_decompress_compute_shader(struct radv_device
*dev
)
36 const struct glsl_type
*buf_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
40 const struct glsl_type
*img_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
44 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
45 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "dcc_decompress_compute");
47 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
48 b
.shader
->info
.cs
.local_size
[0] = 16;
49 b
.shader
->info
.cs
.local_size
[1] = 16;
50 b
.shader
->info
.cs
.local_size
[2] = 1;
51 nir_variable
*input_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
53 input_img
->data
.descriptor_set
= 0;
54 input_img
->data
.binding
= 0;
56 nir_variable
*output_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
58 output_img
->data
.descriptor_set
= 0;
59 output_img
->data
.binding
= 1;
61 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
62 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
63 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
64 b
.shader
->info
.cs
.local_size
[0],
65 b
.shader
->info
.cs
.local_size
[1],
66 b
.shader
->info
.cs
.local_size
[2], 0);
68 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
69 nir_ssa_def
*input_img_deref
= &nir_build_deref_var(&b
, input_img
)->dest
.ssa
;
71 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 3);
72 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
73 tex
->op
= nir_texop_txf
;
74 tex
->src
[0].src_type
= nir_tex_src_coord
;
75 tex
->src
[0].src
= nir_src_for_ssa(nir_channels(&b
, global_id
, 3));
76 tex
->src
[1].src_type
= nir_tex_src_lod
;
77 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(&b
, 0));
78 tex
->src
[2].src_type
= nir_tex_src_texture_deref
;
79 tex
->src
[2].src
= nir_src_for_ssa(input_img_deref
);
80 tex
->dest_type
= nir_type_float
;
81 tex
->is_array
= false;
82 tex
->coord_components
= 2;
84 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
85 nir_builder_instr_insert(&b
, &tex
->instr
);
87 nir_intrinsic_instr
*membar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_memory_barrier
);
88 nir_builder_instr_insert(&b
, &membar
->instr
);
90 nir_intrinsic_instr
*bar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_barrier
);
91 nir_builder_instr_insert(&b
, &bar
->instr
);
93 nir_ssa_def
*outval
= &tex
->dest
.ssa
;
94 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_image_deref_store
);
95 store
->num_components
= 4;
96 store
->src
[0] = nir_src_for_ssa(&nir_build_deref_var(&b
, output_img
)->dest
.ssa
);
97 store
->src
[1] = nir_src_for_ssa(global_id
);
98 store
->src
[2] = nir_src_for_ssa(nir_ssa_undef(&b
, 1, 32));
99 store
->src
[3] = nir_src_for_ssa(outval
);
101 nir_builder_instr_insert(&b
, &store
->instr
);
106 create_dcc_compress_compute(struct radv_device
*device
)
108 VkResult result
= VK_SUCCESS
;
109 struct radv_shader_module cs
= { .nir
= NULL
};
111 cs
.nir
= build_dcc_decompress_compute_shader(device
);
113 VkDescriptorSetLayoutCreateInfo ds_create_info
= {
114 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
115 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
117 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
120 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
121 .descriptorCount
= 1,
122 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
123 .pImmutableSamplers
= NULL
127 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
128 .descriptorCount
= 1,
129 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
130 .pImmutableSamplers
= NULL
135 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
137 &device
->meta_state
.alloc
,
138 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
);
139 if (result
!= VK_SUCCESS
)
143 VkPipelineLayoutCreateInfo pl_create_info
= {
144 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
146 .pSetLayouts
= &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
,
147 .pushConstantRangeCount
= 1,
148 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 8},
151 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
153 &device
->meta_state
.alloc
,
154 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
);
155 if (result
!= VK_SUCCESS
)
160 VkPipelineShaderStageCreateInfo pipeline_shader_stage
= {
161 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
162 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
163 .module
= radv_shader_module_to_handle(&cs
),
165 .pSpecializationInfo
= NULL
,
168 VkComputePipelineCreateInfo vk_pipeline_info
= {
169 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
170 .stage
= pipeline_shader_stage
,
172 .layout
= device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
175 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
176 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
177 1, &vk_pipeline_info
, NULL
,
178 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
179 if (result
!= VK_SUCCESS
)
188 create_pass(struct radv_device
*device
)
191 VkDevice device_h
= radv_device_to_handle(device
);
192 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
193 VkAttachmentDescription attachment
;
195 attachment
.format
= VK_FORMAT_UNDEFINED
;
196 attachment
.samples
= 1;
197 attachment
.loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
;
198 attachment
.storeOp
= VK_ATTACHMENT_STORE_OP_STORE
;
199 attachment
.initialLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
200 attachment
.finalLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
202 result
= radv_CreateRenderPass(device_h
,
203 &(VkRenderPassCreateInfo
) {
204 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
205 .attachmentCount
= 1,
206 .pAttachments
= &attachment
,
208 .pSubpasses
= &(VkSubpassDescription
) {
209 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
210 .inputAttachmentCount
= 0,
211 .colorAttachmentCount
= 1,
212 .pColorAttachments
= (VkAttachmentReference
[]) {
215 .layout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
,
218 .pResolveAttachments
= NULL
,
219 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
220 .attachment
= VK_ATTACHMENT_UNUSED
,
222 .preserveAttachmentCount
= 0,
223 .pPreserveAttachments
= NULL
,
225 .dependencyCount
= 0,
228 &device
->meta_state
.fast_clear_flush
.pass
);
234 create_pipeline_layout(struct radv_device
*device
, VkPipelineLayout
*layout
)
236 VkPipelineLayoutCreateInfo pl_create_info
= {
237 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
240 .pushConstantRangeCount
= 0,
241 .pPushConstantRanges
= NULL
,
244 return radv_CreatePipelineLayout(radv_device_to_handle(device
),
246 &device
->meta_state
.alloc
,
251 create_pipeline(struct radv_device
*device
,
252 VkShaderModule vs_module_h
,
253 VkPipelineLayout layout
)
256 VkDevice device_h
= radv_device_to_handle(device
);
258 struct radv_shader_module fs_module
= {
259 .nir
= radv_meta_build_nir_fs_noop(),
262 if (!fs_module
.nir
) {
263 /* XXX: Need more accurate error */
264 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
268 const VkPipelineShaderStageCreateInfo stages
[2] = {
270 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
271 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
272 .module
= vs_module_h
,
276 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
277 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
278 .module
= radv_shader_module_to_handle(&fs_module
),
283 const VkPipelineVertexInputStateCreateInfo vi_state
= {
284 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
285 .vertexBindingDescriptionCount
= 0,
286 .vertexAttributeDescriptionCount
= 0,
289 const VkPipelineInputAssemblyStateCreateInfo ia_state
= {
290 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
291 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
292 .primitiveRestartEnable
= false,
295 const VkPipelineColorBlendStateCreateInfo blend_state
= {
296 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
297 .logicOpEnable
= false,
298 .attachmentCount
= 1,
299 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
301 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
302 VK_COLOR_COMPONENT_G_BIT
|
303 VK_COLOR_COMPONENT_B_BIT
|
304 VK_COLOR_COMPONENT_A_BIT
,
308 const VkPipelineRasterizationStateCreateInfo rs_state
= {
309 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
310 .depthClampEnable
= false,
311 .rasterizerDiscardEnable
= false,
312 .polygonMode
= VK_POLYGON_MODE_FILL
,
313 .cullMode
= VK_CULL_MODE_NONE
,
314 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
317 result
= radv_graphics_pipeline_create(device_h
,
318 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
319 &(VkGraphicsPipelineCreateInfo
) {
320 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
324 .pVertexInputState
= &vi_state
,
325 .pInputAssemblyState
= &ia_state
,
327 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
328 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
332 .pRasterizationState
= &rs_state
,
333 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
334 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
335 .rasterizationSamples
= 1,
336 .sampleShadingEnable
= false,
338 .alphaToCoverageEnable
= false,
339 .alphaToOneEnable
= false,
341 .pColorBlendState
= &blend_state
,
342 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
343 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
344 .dynamicStateCount
= 2,
345 .pDynamicStates
= (VkDynamicState
[]) {
346 VK_DYNAMIC_STATE_VIEWPORT
,
347 VK_DYNAMIC_STATE_SCISSOR
,
351 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
354 &(struct radv_graphics_pipeline_create_info
) {
355 .use_rectlist
= true,
356 .custom_blend_mode
= V_028808_CB_ELIMINATE_FAST_CLEAR
,
358 &device
->meta_state
.alloc
,
359 &device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
);
360 if (result
!= VK_SUCCESS
)
363 result
= radv_graphics_pipeline_create(device_h
,
364 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
365 &(VkGraphicsPipelineCreateInfo
) {
366 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
370 .pVertexInputState
= &vi_state
,
371 .pInputAssemblyState
= &ia_state
,
373 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
374 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
378 .pRasterizationState
= &rs_state
,
379 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
380 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
381 .rasterizationSamples
= 1,
382 .sampleShadingEnable
= false,
384 .alphaToCoverageEnable
= false,
385 .alphaToOneEnable
= false,
387 .pColorBlendState
= &blend_state
,
388 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
389 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
390 .dynamicStateCount
= 2,
391 .pDynamicStates
= (VkDynamicState
[]) {
392 VK_DYNAMIC_STATE_VIEWPORT
,
393 VK_DYNAMIC_STATE_SCISSOR
,
397 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
400 &(struct radv_graphics_pipeline_create_info
) {
401 .use_rectlist
= true,
402 .custom_blend_mode
= V_028808_CB_FMASK_DECOMPRESS
,
404 &device
->meta_state
.alloc
,
405 &device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
);
406 if (result
!= VK_SUCCESS
)
409 result
= radv_graphics_pipeline_create(device_h
,
410 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
411 &(VkGraphicsPipelineCreateInfo
) {
412 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
416 .pVertexInputState
= &vi_state
,
417 .pInputAssemblyState
= &ia_state
,
419 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
420 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
424 .pRasterizationState
= &rs_state
,
425 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
426 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
427 .rasterizationSamples
= 1,
428 .sampleShadingEnable
= false,
430 .alphaToCoverageEnable
= false,
431 .alphaToOneEnable
= false,
433 .pColorBlendState
= &blend_state
,
434 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
435 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
436 .dynamicStateCount
= 2,
437 .pDynamicStates
= (VkDynamicState
[]) {
438 VK_DYNAMIC_STATE_VIEWPORT
,
439 VK_DYNAMIC_STATE_SCISSOR
,
443 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
446 &(struct radv_graphics_pipeline_create_info
) {
447 .use_rectlist
= true,
448 .custom_blend_mode
= V_028808_CB_DCC_DECOMPRESS
,
450 &device
->meta_state
.alloc
,
451 &device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
);
452 if (result
!= VK_SUCCESS
)
458 ralloc_free(fs_module
.nir
);
463 radv_device_finish_meta_fast_clear_flush_state(struct radv_device
*device
)
465 struct radv_meta_state
*state
= &device
->meta_state
;
467 radv_DestroyPipeline(radv_device_to_handle(device
),
468 state
->fast_clear_flush
.dcc_decompress_pipeline
,
470 radv_DestroyPipeline(radv_device_to_handle(device
),
471 state
->fast_clear_flush
.fmask_decompress_pipeline
,
473 radv_DestroyPipeline(radv_device_to_handle(device
),
474 state
->fast_clear_flush
.cmask_eliminate_pipeline
,
476 radv_DestroyRenderPass(radv_device_to_handle(device
),
477 state
->fast_clear_flush
.pass
, &state
->alloc
);
478 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
479 state
->fast_clear_flush
.p_layout
,
482 radv_DestroyPipeline(radv_device_to_handle(device
),
483 state
->fast_clear_flush
.dcc_decompress_compute_pipeline
,
485 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
486 state
->fast_clear_flush
.dcc_decompress_compute_p_layout
,
488 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
489 state
->fast_clear_flush
.dcc_decompress_compute_ds_layout
,
494 radv_device_init_meta_fast_clear_flush_state_internal(struct radv_device
*device
)
496 VkResult res
= VK_SUCCESS
;
498 mtx_lock(&device
->meta_state
.mtx
);
499 if (device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
) {
500 mtx_unlock(&device
->meta_state
.mtx
);
504 struct radv_shader_module vs_module
= { .nir
= radv_meta_build_nir_vs_generate_vertices() };
505 if (!vs_module
.nir
) {
506 /* XXX: Need more accurate error */
507 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
511 res
= create_pass(device
);
512 if (res
!= VK_SUCCESS
)
515 res
= create_pipeline_layout(device
,
516 &device
->meta_state
.fast_clear_flush
.p_layout
);
517 if (res
!= VK_SUCCESS
)
520 VkShaderModule vs_module_h
= radv_shader_module_to_handle(&vs_module
);
521 res
= create_pipeline(device
, vs_module_h
,
522 device
->meta_state
.fast_clear_flush
.p_layout
);
523 if (res
!= VK_SUCCESS
)
526 res
= create_dcc_compress_compute(device
);
527 if (res
!= VK_SUCCESS
)
533 radv_device_finish_meta_fast_clear_flush_state(device
);
536 ralloc_free(vs_module
.nir
);
537 mtx_unlock(&device
->meta_state
.mtx
);
544 radv_device_init_meta_fast_clear_flush_state(struct radv_device
*device
, bool on_demand
)
549 return radv_device_init_meta_fast_clear_flush_state_internal(device
);
553 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer
*cmd_buffer
,
554 struct radv_image
*image
,
555 uint64_t pred_offset
, bool value
)
560 va
= radv_buffer_get_va(image
->bo
) + image
->offset
;
564 si_emit_set_predication_state(cmd_buffer
, true, va
);
568 radv_process_color_image_layer(struct radv_cmd_buffer
*cmd_buffer
,
569 struct radv_image
*image
,
570 const VkImageSubresourceRange
*range
,
571 int level
, int layer
)
573 struct radv_device
*device
= cmd_buffer
->device
;
574 struct radv_image_view iview
;
575 uint32_t width
, height
;
577 width
= radv_minify(image
->info
.width
, range
->baseMipLevel
+ level
);
578 height
= radv_minify(image
->info
.height
, range
->baseMipLevel
+ level
);
580 radv_image_view_init(&iview
, device
,
581 &(VkImageViewCreateInfo
) {
582 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
583 .image
= radv_image_to_handle(image
),
584 .viewType
= radv_meta_get_view_type(image
),
585 .format
= image
->vk_format
,
586 .subresourceRange
= {
587 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
588 .baseMipLevel
= range
->baseMipLevel
+ level
,
590 .baseArrayLayer
= range
->baseArrayLayer
+ layer
,
596 radv_CreateFramebuffer(radv_device_to_handle(device
),
597 &(VkFramebufferCreateInfo
) {
598 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
599 .attachmentCount
= 1,
600 .pAttachments
= (VkImageView
[]) {
601 radv_image_view_to_handle(&iview
)
606 }, &cmd_buffer
->pool
->alloc
, &fb_h
);
608 radv_CmdBeginRenderPass(radv_cmd_buffer_to_handle(cmd_buffer
),
609 &(VkRenderPassBeginInfo
) {
610 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
611 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
623 .clearValueCount
= 0,
624 .pClearValues
= NULL
,
625 }, VK_SUBPASS_CONTENTS_INLINE
);
627 radv_CmdDraw(radv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
629 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
630 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
632 radv_CmdEndRenderPass(radv_cmd_buffer_to_handle(cmd_buffer
));
634 radv_DestroyFramebuffer(radv_device_to_handle(device
), fb_h
,
635 &cmd_buffer
->pool
->alloc
);
639 radv_process_color_image(struct radv_cmd_buffer
*cmd_buffer
,
640 struct radv_image
*image
,
641 const VkImageSubresourceRange
*subresourceRange
,
644 struct radv_meta_saved_state saved_state
;
645 VkPipeline
*pipeline
;
647 if (decompress_dcc
&& radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
648 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
;
649 } else if (radv_image_has_fmask(image
) && !image
->tc_compatible_cmask
) {
650 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
;
652 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
;
658 ret
= radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer
->device
);
659 if (ret
!= VK_SUCCESS
) {
660 cmd_buffer
->record_result
= ret
;
665 radv_meta_save(&saved_state
, cmd_buffer
,
666 RADV_META_SAVE_GRAPHICS_PIPELINE
|
667 RADV_META_SAVE_PASS
);
669 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
670 VK_PIPELINE_BIND_POINT_GRAPHICS
, *pipeline
);
672 for (uint32_t l
= 0; l
< radv_get_levelCount(image
, subresourceRange
); ++l
) {
673 uint32_t width
, height
;
675 /* Do not decompress levels without DCC. */
676 if (decompress_dcc
&&
677 !radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
+ l
))
680 width
= radv_minify(image
->info
.width
,
681 subresourceRange
->baseMipLevel
+ l
);
682 height
= radv_minify(image
->info
.height
,
683 subresourceRange
->baseMipLevel
+ l
);
685 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
695 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
698 .extent
= { width
, height
},
701 for (uint32_t s
= 0; s
< radv_get_layerCount(image
, subresourceRange
); s
++) {
702 radv_process_color_image_layer(cmd_buffer
, image
,
703 subresourceRange
, l
, s
);
707 radv_meta_restore(&saved_state
, cmd_buffer
);
711 radv_emit_color_decompress(struct radv_cmd_buffer
*cmd_buffer
,
712 struct radv_image
*image
,
713 const VkImageSubresourceRange
*subresourceRange
,
716 bool old_predicating
= false;
718 assert(cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
);
720 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
721 uint64_t pred_offset
= decompress_dcc
? image
->dcc_pred_offset
:
722 image
->fce_pred_offset
;
723 pred_offset
+= 8 * subresourceRange
->baseMipLevel
;
725 old_predicating
= cmd_buffer
->state
.predicating
;
727 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, pred_offset
, true);
728 cmd_buffer
->state
.predicating
= true;
731 radv_process_color_image(cmd_buffer
, image
, subresourceRange
,
734 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
735 uint64_t pred_offset
= decompress_dcc
? image
->dcc_pred_offset
:
736 image
->fce_pred_offset
;
737 pred_offset
+= 8 * subresourceRange
->baseMipLevel
;
739 cmd_buffer
->state
.predicating
= old_predicating
;
741 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, pred_offset
, false);
743 if (cmd_buffer
->state
.predication_type
!= -1) {
744 /* Restore previous conditional rendering user state. */
745 si_emit_set_predication_state(cmd_buffer
,
746 cmd_buffer
->state
.predication_type
,
747 cmd_buffer
->state
.predication_va
);
751 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
752 /* Clear the image's fast-clear eliminate predicate because
753 * FMASK and DCC also imply a fast-clear eliminate.
755 radv_update_fce_metadata(cmd_buffer
, image
, subresourceRange
, false);
757 /* Mark the image as being decompressed. */
759 radv_update_dcc_metadata(cmd_buffer
, image
, subresourceRange
, false);
764 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer
*cmd_buffer
,
765 struct radv_image
*image
,
766 const VkImageSubresourceRange
*subresourceRange
)
768 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, false);
772 radv_decompress_dcc_gfx(struct radv_cmd_buffer
*cmd_buffer
,
773 struct radv_image
*image
,
774 const VkImageSubresourceRange
*subresourceRange
)
776 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, true);
780 radv_decompress_dcc_compute(struct radv_cmd_buffer
*cmd_buffer
,
781 struct radv_image
*image
,
782 const VkImageSubresourceRange
*subresourceRange
)
784 struct radv_meta_saved_state saved_state
;
785 struct radv_image_view iview
= {0};
786 struct radv_device
*device
= cmd_buffer
->device
;
788 /* This assumes the image is 2d with 1 layer */
789 struct radv_cmd_state
*state
= &cmd_buffer
->state
;
791 state
->flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
792 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
794 if (!cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
) {
795 VkResult ret
= radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer
->device
);
796 if (ret
!= VK_SUCCESS
) {
797 cmd_buffer
->record_result
= ret
;
802 radv_meta_save(&saved_state
, cmd_buffer
, RADV_META_SAVE_DESCRIPTORS
|
803 RADV_META_SAVE_COMPUTE_PIPELINE
);
805 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
806 VK_PIPELINE_BIND_POINT_COMPUTE
,
807 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
809 for (uint32_t l
= 0; l
< radv_get_levelCount(image
, subresourceRange
); l
++) {
810 uint32_t width
, height
;
812 /* Do not decompress levels without DCC. */
813 if (!radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
+ l
))
816 width
= radv_minify(image
->info
.width
,
817 subresourceRange
->baseMipLevel
+ l
);
818 height
= radv_minify(image
->info
.height
,
819 subresourceRange
->baseMipLevel
+ l
);
821 for (uint32_t s
= 0; s
< radv_get_layerCount(image
, subresourceRange
); s
++) {
822 radv_image_view_init(&iview
, cmd_buffer
->device
,
823 &(VkImageViewCreateInfo
) {
824 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
825 .image
= radv_image_to_handle(image
),
826 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
827 .format
= image
->vk_format
,
828 .subresourceRange
= {
829 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
830 .baseMipLevel
= subresourceRange
->baseMipLevel
+ l
,
832 .baseArrayLayer
= subresourceRange
->baseArrayLayer
+ s
,
837 radv_meta_push_descriptor_set(cmd_buffer
,
838 VK_PIPELINE_BIND_POINT_COMPUTE
,
839 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
841 2, /* descriptorWriteCount */
842 (VkWriteDescriptorSet
[]) {
844 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
846 .dstArrayElement
= 0,
847 .descriptorCount
= 1,
848 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
849 .pImageInfo
= (VkDescriptorImageInfo
[]) {
851 .sampler
= VK_NULL_HANDLE
,
852 .imageView
= radv_image_view_to_handle(&iview
),
853 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
858 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
860 .dstArrayElement
= 0,
861 .descriptorCount
= 1,
862 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
863 .pImageInfo
= (VkDescriptorImageInfo
[]) {
865 .sampler
= VK_NULL_HANDLE
,
866 .imageView
= radv_image_view_to_handle(&iview
),
867 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
873 radv_unaligned_dispatch(cmd_buffer
, width
, height
, 1);
877 /* Mark this image as actually being decompressed. */
878 radv_update_dcc_metadata(cmd_buffer
, image
, subresourceRange
, false);
880 /* The fill buffer below does its own saving */
881 radv_meta_restore(&saved_state
, cmd_buffer
);
883 state
->flush_bits
|= RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
884 RADV_CMD_FLAG_INV_VCACHE
;
887 /* Initialize the DCC metadata as "fully expanded". */
888 radv_initialize_dcc(cmd_buffer
, image
, subresourceRange
, 0xffffffff);
892 radv_decompress_dcc(struct radv_cmd_buffer
*cmd_buffer
,
893 struct radv_image
*image
,
894 const VkImageSubresourceRange
*subresourceRange
)
896 if (cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
)
897 radv_decompress_dcc_gfx(cmd_buffer
, image
, subresourceRange
);
899 radv_decompress_dcc_compute(cmd_buffer
, image
, subresourceRange
);