2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "radv_meta.h"
28 #include "radv_private.h"
33 build_dcc_decompress_compute_shader(struct radv_device
*dev
)
36 const struct glsl_type
*buf_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
40 const struct glsl_type
*img_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
44 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
45 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "dcc_decompress_compute");
47 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
48 b
.shader
->info
.cs
.local_size
[0] = 16;
49 b
.shader
->info
.cs
.local_size
[1] = 16;
50 b
.shader
->info
.cs
.local_size
[2] = 1;
51 nir_variable
*input_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
53 input_img
->data
.descriptor_set
= 0;
54 input_img
->data
.binding
= 0;
56 nir_variable
*output_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
58 output_img
->data
.descriptor_set
= 0;
59 output_img
->data
.binding
= 1;
61 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
62 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
63 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
64 b
.shader
->info
.cs
.local_size
[0],
65 b
.shader
->info
.cs
.local_size
[1],
66 b
.shader
->info
.cs
.local_size
[2], 0);
68 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
69 nir_ssa_def
*input_img_deref
= &nir_build_deref_var(&b
, input_img
)->dest
.ssa
;
71 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 3);
72 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
73 tex
->op
= nir_texop_txf
;
74 tex
->src
[0].src_type
= nir_tex_src_coord
;
75 tex
->src
[0].src
= nir_src_for_ssa(nir_channels(&b
, global_id
, 3));
76 tex
->src
[1].src_type
= nir_tex_src_lod
;
77 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(&b
, 0));
78 tex
->src
[2].src_type
= nir_tex_src_texture_deref
;
79 tex
->src
[2].src
= nir_src_for_ssa(input_img_deref
);
80 tex
->dest_type
= nir_type_float
;
81 tex
->is_array
= false;
82 tex
->coord_components
= 2;
84 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
85 nir_builder_instr_insert(&b
, &tex
->instr
);
87 nir_intrinsic_instr
*membar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_memory_barrier
);
88 nir_builder_instr_insert(&b
, &membar
->instr
);
90 nir_intrinsic_instr
*bar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_barrier
);
91 nir_builder_instr_insert(&b
, &bar
->instr
);
93 nir_ssa_def
*outval
= &tex
->dest
.ssa
;
94 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_image_deref_store
);
95 store
->num_components
= 4;
96 store
->src
[0] = nir_src_for_ssa(&nir_build_deref_var(&b
, output_img
)->dest
.ssa
);
97 store
->src
[1] = nir_src_for_ssa(global_id
);
98 store
->src
[2] = nir_src_for_ssa(nir_ssa_undef(&b
, 1, 32));
99 store
->src
[3] = nir_src_for_ssa(outval
);
100 store
->src
[4] = nir_src_for_ssa(nir_imm_int(&b
, 0));
102 nir_builder_instr_insert(&b
, &store
->instr
);
107 create_dcc_compress_compute(struct radv_device
*device
)
109 VkResult result
= VK_SUCCESS
;
110 struct radv_shader_module cs
= { .nir
= NULL
};
112 cs
.nir
= build_dcc_decompress_compute_shader(device
);
114 VkDescriptorSetLayoutCreateInfo ds_create_info
= {
115 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
116 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
118 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
121 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
122 .descriptorCount
= 1,
123 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
124 .pImmutableSamplers
= NULL
128 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
129 .descriptorCount
= 1,
130 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
131 .pImmutableSamplers
= NULL
136 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
138 &device
->meta_state
.alloc
,
139 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
);
140 if (result
!= VK_SUCCESS
)
144 VkPipelineLayoutCreateInfo pl_create_info
= {
145 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
147 .pSetLayouts
= &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
,
148 .pushConstantRangeCount
= 1,
149 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 8},
152 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
154 &device
->meta_state
.alloc
,
155 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
);
156 if (result
!= VK_SUCCESS
)
161 VkPipelineShaderStageCreateInfo pipeline_shader_stage
= {
162 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
163 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
164 .module
= radv_shader_module_to_handle(&cs
),
166 .pSpecializationInfo
= NULL
,
169 VkComputePipelineCreateInfo vk_pipeline_info
= {
170 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
171 .stage
= pipeline_shader_stage
,
173 .layout
= device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
176 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
177 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
178 1, &vk_pipeline_info
, NULL
,
179 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
180 if (result
!= VK_SUCCESS
)
189 create_pass(struct radv_device
*device
)
192 VkDevice device_h
= radv_device_to_handle(device
);
193 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
194 VkAttachmentDescription attachment
;
196 attachment
.format
= VK_FORMAT_UNDEFINED
;
197 attachment
.samples
= 1;
198 attachment
.loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
;
199 attachment
.storeOp
= VK_ATTACHMENT_STORE_OP_STORE
;
200 attachment
.initialLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
201 attachment
.finalLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
203 result
= radv_CreateRenderPass(device_h
,
204 &(VkRenderPassCreateInfo
) {
205 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
206 .attachmentCount
= 1,
207 .pAttachments
= &attachment
,
209 .pSubpasses
= &(VkSubpassDescription
) {
210 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
211 .inputAttachmentCount
= 0,
212 .colorAttachmentCount
= 1,
213 .pColorAttachments
= (VkAttachmentReference
[]) {
216 .layout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
,
219 .pResolveAttachments
= NULL
,
220 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
221 .attachment
= VK_ATTACHMENT_UNUSED
,
223 .preserveAttachmentCount
= 0,
224 .pPreserveAttachments
= NULL
,
226 .dependencyCount
= 0,
229 &device
->meta_state
.fast_clear_flush
.pass
);
235 create_pipeline_layout(struct radv_device
*device
, VkPipelineLayout
*layout
)
237 VkPipelineLayoutCreateInfo pl_create_info
= {
238 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
241 .pushConstantRangeCount
= 0,
242 .pPushConstantRanges
= NULL
,
245 return radv_CreatePipelineLayout(radv_device_to_handle(device
),
247 &device
->meta_state
.alloc
,
252 create_pipeline(struct radv_device
*device
,
253 VkShaderModule vs_module_h
,
254 VkPipelineLayout layout
)
257 VkDevice device_h
= radv_device_to_handle(device
);
259 struct radv_shader_module fs_module
= {
260 .nir
= radv_meta_build_nir_fs_noop(),
263 if (!fs_module
.nir
) {
264 /* XXX: Need more accurate error */
265 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
269 const VkPipelineShaderStageCreateInfo stages
[2] = {
271 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
272 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
273 .module
= vs_module_h
,
277 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
278 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
279 .module
= radv_shader_module_to_handle(&fs_module
),
284 const VkPipelineVertexInputStateCreateInfo vi_state
= {
285 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
286 .vertexBindingDescriptionCount
= 0,
287 .vertexAttributeDescriptionCount
= 0,
290 const VkPipelineInputAssemblyStateCreateInfo ia_state
= {
291 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
292 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
293 .primitiveRestartEnable
= false,
296 const VkPipelineColorBlendStateCreateInfo blend_state
= {
297 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
298 .logicOpEnable
= false,
299 .attachmentCount
= 1,
300 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
302 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
303 VK_COLOR_COMPONENT_G_BIT
|
304 VK_COLOR_COMPONENT_B_BIT
|
305 VK_COLOR_COMPONENT_A_BIT
,
309 const VkPipelineRasterizationStateCreateInfo rs_state
= {
310 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
311 .depthClampEnable
= false,
312 .rasterizerDiscardEnable
= false,
313 .polygonMode
= VK_POLYGON_MODE_FILL
,
314 .cullMode
= VK_CULL_MODE_NONE
,
315 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
318 result
= radv_graphics_pipeline_create(device_h
,
319 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
320 &(VkGraphicsPipelineCreateInfo
) {
321 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
325 .pVertexInputState
= &vi_state
,
326 .pInputAssemblyState
= &ia_state
,
328 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
329 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
333 .pRasterizationState
= &rs_state
,
334 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
335 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
336 .rasterizationSamples
= 1,
337 .sampleShadingEnable
= false,
339 .alphaToCoverageEnable
= false,
340 .alphaToOneEnable
= false,
342 .pColorBlendState
= &blend_state
,
343 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
344 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
345 .dynamicStateCount
= 2,
346 .pDynamicStates
= (VkDynamicState
[]) {
347 VK_DYNAMIC_STATE_VIEWPORT
,
348 VK_DYNAMIC_STATE_SCISSOR
,
352 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
355 &(struct radv_graphics_pipeline_create_info
) {
356 .use_rectlist
= true,
357 .custom_blend_mode
= V_028808_CB_ELIMINATE_FAST_CLEAR
,
359 &device
->meta_state
.alloc
,
360 &device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
);
361 if (result
!= VK_SUCCESS
)
364 result
= radv_graphics_pipeline_create(device_h
,
365 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
366 &(VkGraphicsPipelineCreateInfo
) {
367 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
371 .pVertexInputState
= &vi_state
,
372 .pInputAssemblyState
= &ia_state
,
374 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
375 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
379 .pRasterizationState
= &rs_state
,
380 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
381 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
382 .rasterizationSamples
= 1,
383 .sampleShadingEnable
= false,
385 .alphaToCoverageEnable
= false,
386 .alphaToOneEnable
= false,
388 .pColorBlendState
= &blend_state
,
389 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
390 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
391 .dynamicStateCount
= 2,
392 .pDynamicStates
= (VkDynamicState
[]) {
393 VK_DYNAMIC_STATE_VIEWPORT
,
394 VK_DYNAMIC_STATE_SCISSOR
,
398 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
401 &(struct radv_graphics_pipeline_create_info
) {
402 .use_rectlist
= true,
403 .custom_blend_mode
= V_028808_CB_FMASK_DECOMPRESS
,
405 &device
->meta_state
.alloc
,
406 &device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
);
407 if (result
!= VK_SUCCESS
)
410 result
= radv_graphics_pipeline_create(device_h
,
411 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
412 &(VkGraphicsPipelineCreateInfo
) {
413 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
417 .pVertexInputState
= &vi_state
,
418 .pInputAssemblyState
= &ia_state
,
420 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
421 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
425 .pRasterizationState
= &rs_state
,
426 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
427 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
428 .rasterizationSamples
= 1,
429 .sampleShadingEnable
= false,
431 .alphaToCoverageEnable
= false,
432 .alphaToOneEnable
= false,
434 .pColorBlendState
= &blend_state
,
435 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
436 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
437 .dynamicStateCount
= 2,
438 .pDynamicStates
= (VkDynamicState
[]) {
439 VK_DYNAMIC_STATE_VIEWPORT
,
440 VK_DYNAMIC_STATE_SCISSOR
,
444 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
447 &(struct radv_graphics_pipeline_create_info
) {
448 .use_rectlist
= true,
449 .custom_blend_mode
= V_028808_CB_DCC_DECOMPRESS
,
451 &device
->meta_state
.alloc
,
452 &device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
);
453 if (result
!= VK_SUCCESS
)
459 ralloc_free(fs_module
.nir
);
464 radv_device_finish_meta_fast_clear_flush_state(struct radv_device
*device
)
466 struct radv_meta_state
*state
= &device
->meta_state
;
468 radv_DestroyPipeline(radv_device_to_handle(device
),
469 state
->fast_clear_flush
.dcc_decompress_pipeline
,
471 radv_DestroyPipeline(radv_device_to_handle(device
),
472 state
->fast_clear_flush
.fmask_decompress_pipeline
,
474 radv_DestroyPipeline(radv_device_to_handle(device
),
475 state
->fast_clear_flush
.cmask_eliminate_pipeline
,
477 radv_DestroyRenderPass(radv_device_to_handle(device
),
478 state
->fast_clear_flush
.pass
, &state
->alloc
);
479 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
480 state
->fast_clear_flush
.p_layout
,
483 radv_DestroyPipeline(radv_device_to_handle(device
),
484 state
->fast_clear_flush
.dcc_decompress_compute_pipeline
,
486 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
487 state
->fast_clear_flush
.dcc_decompress_compute_p_layout
,
489 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
490 state
->fast_clear_flush
.dcc_decompress_compute_ds_layout
,
495 radv_device_init_meta_fast_clear_flush_state_internal(struct radv_device
*device
)
497 VkResult res
= VK_SUCCESS
;
499 mtx_lock(&device
->meta_state
.mtx
);
500 if (device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
) {
501 mtx_unlock(&device
->meta_state
.mtx
);
505 struct radv_shader_module vs_module
= { .nir
= radv_meta_build_nir_vs_generate_vertices() };
506 if (!vs_module
.nir
) {
507 /* XXX: Need more accurate error */
508 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
512 res
= create_pass(device
);
513 if (res
!= VK_SUCCESS
)
516 res
= create_pipeline_layout(device
,
517 &device
->meta_state
.fast_clear_flush
.p_layout
);
518 if (res
!= VK_SUCCESS
)
521 VkShaderModule vs_module_h
= radv_shader_module_to_handle(&vs_module
);
522 res
= create_pipeline(device
, vs_module_h
,
523 device
->meta_state
.fast_clear_flush
.p_layout
);
524 if (res
!= VK_SUCCESS
)
527 res
= create_dcc_compress_compute(device
);
528 if (res
!= VK_SUCCESS
)
534 radv_device_finish_meta_fast_clear_flush_state(device
);
537 ralloc_free(vs_module
.nir
);
538 mtx_unlock(&device
->meta_state
.mtx
);
545 radv_device_init_meta_fast_clear_flush_state(struct radv_device
*device
, bool on_demand
)
550 return radv_device_init_meta_fast_clear_flush_state_internal(device
);
554 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer
*cmd_buffer
,
555 struct radv_image
*image
,
556 uint64_t pred_offset
, bool value
)
561 va
= radv_buffer_get_va(image
->bo
) + image
->offset
;
565 si_emit_set_predication_state(cmd_buffer
, true, va
);
569 radv_process_color_image_layer(struct radv_cmd_buffer
*cmd_buffer
,
570 struct radv_image
*image
,
571 const VkImageSubresourceRange
*range
,
572 int level
, int layer
)
574 struct radv_device
*device
= cmd_buffer
->device
;
575 struct radv_image_view iview
;
576 uint32_t width
, height
;
578 width
= radv_minify(image
->info
.width
, range
->baseMipLevel
+ level
);
579 height
= radv_minify(image
->info
.height
, range
->baseMipLevel
+ level
);
581 radv_image_view_init(&iview
, device
,
582 &(VkImageViewCreateInfo
) {
583 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
584 .image
= radv_image_to_handle(image
),
585 .viewType
= radv_meta_get_view_type(image
),
586 .format
= image
->vk_format
,
587 .subresourceRange
= {
588 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
589 .baseMipLevel
= range
->baseMipLevel
+ level
,
591 .baseArrayLayer
= range
->baseArrayLayer
+ layer
,
597 radv_CreateFramebuffer(radv_device_to_handle(device
),
598 &(VkFramebufferCreateInfo
) {
599 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
600 .attachmentCount
= 1,
601 .pAttachments
= (VkImageView
[]) {
602 radv_image_view_to_handle(&iview
)
607 }, &cmd_buffer
->pool
->alloc
, &fb_h
);
609 radv_CmdBeginRenderPass(radv_cmd_buffer_to_handle(cmd_buffer
),
610 &(VkRenderPassBeginInfo
) {
611 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
612 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
624 .clearValueCount
= 0,
625 .pClearValues
= NULL
,
626 }, VK_SUBPASS_CONTENTS_INLINE
);
628 radv_CmdDraw(radv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
630 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
631 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
633 radv_CmdEndRenderPass(radv_cmd_buffer_to_handle(cmd_buffer
));
635 radv_DestroyFramebuffer(radv_device_to_handle(device
), fb_h
,
636 &cmd_buffer
->pool
->alloc
);
640 radv_process_color_image(struct radv_cmd_buffer
*cmd_buffer
,
641 struct radv_image
*image
,
642 const VkImageSubresourceRange
*subresourceRange
,
645 struct radv_meta_saved_state saved_state
;
646 VkPipeline
*pipeline
;
648 if (decompress_dcc
&& radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
649 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
;
650 } else if (radv_image_has_fmask(image
) && !image
->tc_compatible_cmask
) {
651 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
;
653 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
;
659 ret
= radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer
->device
);
660 if (ret
!= VK_SUCCESS
) {
661 cmd_buffer
->record_result
= ret
;
666 radv_meta_save(&saved_state
, cmd_buffer
,
667 RADV_META_SAVE_GRAPHICS_PIPELINE
|
668 RADV_META_SAVE_PASS
);
670 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
671 VK_PIPELINE_BIND_POINT_GRAPHICS
, *pipeline
);
673 for (uint32_t l
= 0; l
< radv_get_levelCount(image
, subresourceRange
); ++l
) {
674 uint32_t width
, height
;
676 /* Do not decompress levels without DCC. */
677 if (decompress_dcc
&&
678 !radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
+ l
))
681 width
= radv_minify(image
->info
.width
,
682 subresourceRange
->baseMipLevel
+ l
);
683 height
= radv_minify(image
->info
.height
,
684 subresourceRange
->baseMipLevel
+ l
);
686 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
696 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
699 .extent
= { width
, height
},
702 for (uint32_t s
= 0; s
< radv_get_layerCount(image
, subresourceRange
); s
++) {
703 radv_process_color_image_layer(cmd_buffer
, image
,
704 subresourceRange
, l
, s
);
708 radv_meta_restore(&saved_state
, cmd_buffer
);
712 radv_emit_color_decompress(struct radv_cmd_buffer
*cmd_buffer
,
713 struct radv_image
*image
,
714 const VkImageSubresourceRange
*subresourceRange
,
717 bool old_predicating
= false;
719 assert(cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
);
721 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
722 uint64_t pred_offset
= decompress_dcc
? image
->dcc_pred_offset
:
723 image
->fce_pred_offset
;
724 pred_offset
+= 8 * subresourceRange
->baseMipLevel
;
726 old_predicating
= cmd_buffer
->state
.predicating
;
728 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, pred_offset
, true);
729 cmd_buffer
->state
.predicating
= true;
732 radv_process_color_image(cmd_buffer
, image
, subresourceRange
,
735 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
736 uint64_t pred_offset
= decompress_dcc
? image
->dcc_pred_offset
:
737 image
->fce_pred_offset
;
738 pred_offset
+= 8 * subresourceRange
->baseMipLevel
;
740 cmd_buffer
->state
.predicating
= old_predicating
;
742 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, pred_offset
, false);
744 if (cmd_buffer
->state
.predication_type
!= -1) {
745 /* Restore previous conditional rendering user state. */
746 si_emit_set_predication_state(cmd_buffer
,
747 cmd_buffer
->state
.predication_type
,
748 cmd_buffer
->state
.predication_va
);
752 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
753 /* Clear the image's fast-clear eliminate predicate because
754 * FMASK and DCC also imply a fast-clear eliminate.
756 radv_update_fce_metadata(cmd_buffer
, image
, subresourceRange
, false);
758 /* Mark the image as being decompressed. */
760 radv_update_dcc_metadata(cmd_buffer
, image
, subresourceRange
, false);
765 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer
*cmd_buffer
,
766 struct radv_image
*image
,
767 const VkImageSubresourceRange
*subresourceRange
)
769 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, false);
773 radv_decompress_dcc_gfx(struct radv_cmd_buffer
*cmd_buffer
,
774 struct radv_image
*image
,
775 const VkImageSubresourceRange
*subresourceRange
)
777 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, true);
781 radv_decompress_dcc_compute(struct radv_cmd_buffer
*cmd_buffer
,
782 struct radv_image
*image
,
783 const VkImageSubresourceRange
*subresourceRange
)
785 struct radv_meta_saved_state saved_state
;
786 struct radv_image_view load_iview
= {0};
787 struct radv_image_view store_iview
= {0};
788 struct radv_device
*device
= cmd_buffer
->device
;
790 /* This assumes the image is 2d with 1 layer */
791 struct radv_cmd_state
*state
= &cmd_buffer
->state
;
793 state
->flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
794 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
796 if (!cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
) {
797 VkResult ret
= radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer
->device
);
798 if (ret
!= VK_SUCCESS
) {
799 cmd_buffer
->record_result
= ret
;
804 radv_meta_save(&saved_state
, cmd_buffer
, RADV_META_SAVE_DESCRIPTORS
|
805 RADV_META_SAVE_COMPUTE_PIPELINE
);
807 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
808 VK_PIPELINE_BIND_POINT_COMPUTE
,
809 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
811 for (uint32_t l
= 0; l
< radv_get_levelCount(image
, subresourceRange
); l
++) {
812 uint32_t width
, height
;
814 /* Do not decompress levels without DCC. */
815 if (!radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
+ l
))
818 width
= radv_minify(image
->info
.width
,
819 subresourceRange
->baseMipLevel
+ l
);
820 height
= radv_minify(image
->info
.height
,
821 subresourceRange
->baseMipLevel
+ l
);
823 for (uint32_t s
= 0; s
< radv_get_layerCount(image
, subresourceRange
); s
++) {
824 radv_image_view_init(&load_iview
, cmd_buffer
->device
,
825 &(VkImageViewCreateInfo
) {
826 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
827 .image
= radv_image_to_handle(image
),
828 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
829 .format
= image
->vk_format
,
830 .subresourceRange
= {
831 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
832 .baseMipLevel
= subresourceRange
->baseMipLevel
+ l
,
834 .baseArrayLayer
= subresourceRange
->baseArrayLayer
+ s
,
838 radv_image_view_init(&store_iview
, cmd_buffer
->device
,
839 &(VkImageViewCreateInfo
) {
840 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
841 .image
= radv_image_to_handle(image
),
842 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
843 .format
= image
->vk_format
,
844 .subresourceRange
= {
845 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
846 .baseMipLevel
= subresourceRange
->baseMipLevel
+ l
,
848 .baseArrayLayer
= subresourceRange
->baseArrayLayer
+ s
,
851 }, &(struct radv_image_view_extra_create_info
) {
852 .disable_compression
= true
855 radv_meta_push_descriptor_set(cmd_buffer
,
856 VK_PIPELINE_BIND_POINT_COMPUTE
,
857 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
859 2, /* descriptorWriteCount */
860 (VkWriteDescriptorSet
[]) {
862 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
864 .dstArrayElement
= 0,
865 .descriptorCount
= 1,
866 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
867 .pImageInfo
= (VkDescriptorImageInfo
[]) {
869 .sampler
= VK_NULL_HANDLE
,
870 .imageView
= radv_image_view_to_handle(&load_iview
),
871 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
876 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
878 .dstArrayElement
= 0,
879 .descriptorCount
= 1,
880 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
881 .pImageInfo
= (VkDescriptorImageInfo
[]) {
883 .sampler
= VK_NULL_HANDLE
,
884 .imageView
= radv_image_view_to_handle(&store_iview
),
885 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
891 radv_unaligned_dispatch(cmd_buffer
, width
, height
, 1);
895 /* Mark this image as actually being decompressed. */
896 radv_update_dcc_metadata(cmd_buffer
, image
, subresourceRange
, false);
898 /* The fill buffer below does its own saving */
899 radv_meta_restore(&saved_state
, cmd_buffer
);
901 state
->flush_bits
|= RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
902 RADV_CMD_FLAG_INV_VCACHE
;
905 /* Initialize the DCC metadata as "fully expanded". */
906 radv_initialize_dcc(cmd_buffer
, image
, subresourceRange
, 0xffffffff);
910 radv_decompress_dcc(struct radv_cmd_buffer
*cmd_buffer
,
911 struct radv_image
*image
,
912 const VkImageSubresourceRange
*subresourceRange
)
914 if (cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
)
915 radv_decompress_dcc_gfx(cmd_buffer
, image
, subresourceRange
);
917 radv_decompress_dcc_compute(cmd_buffer
, image
, subresourceRange
);