2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "radv_meta.h"
28 #include "radv_private.h"
33 build_dcc_decompress_compute_shader(struct radv_device
*dev
)
36 const struct glsl_type
*buf_type
= glsl_sampler_type(GLSL_SAMPLER_DIM_2D
,
40 const struct glsl_type
*img_type
= glsl_image_type(GLSL_SAMPLER_DIM_2D
,
43 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_COMPUTE
, NULL
);
44 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "dcc_decompress_compute");
46 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
47 b
.shader
->info
.cs
.local_size
[0] = 16;
48 b
.shader
->info
.cs
.local_size
[1] = 16;
49 b
.shader
->info
.cs
.local_size
[2] = 1;
50 nir_variable
*input_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
52 input_img
->data
.descriptor_set
= 0;
53 input_img
->data
.binding
= 0;
55 nir_variable
*output_img
= nir_variable_create(b
.shader
, nir_var_uniform
,
57 output_img
->data
.descriptor_set
= 0;
58 output_img
->data
.binding
= 1;
60 nir_ssa_def
*invoc_id
= nir_load_local_invocation_id(&b
);
61 nir_ssa_def
*wg_id
= nir_load_work_group_id(&b
);
62 nir_ssa_def
*block_size
= nir_imm_ivec4(&b
,
63 b
.shader
->info
.cs
.local_size
[0],
64 b
.shader
->info
.cs
.local_size
[1],
65 b
.shader
->info
.cs
.local_size
[2], 0);
67 nir_ssa_def
*global_id
= nir_iadd(&b
, nir_imul(&b
, wg_id
, block_size
), invoc_id
);
68 nir_ssa_def
*input_img_deref
= &nir_build_deref_var(&b
, input_img
)->dest
.ssa
;
70 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 3);
71 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
72 tex
->op
= nir_texop_txf
;
73 tex
->src
[0].src_type
= nir_tex_src_coord
;
74 tex
->src
[0].src
= nir_src_for_ssa(nir_channels(&b
, global_id
, 3));
75 tex
->src
[1].src_type
= nir_tex_src_lod
;
76 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(&b
, 0));
77 tex
->src
[2].src_type
= nir_tex_src_texture_deref
;
78 tex
->src
[2].src
= nir_src_for_ssa(input_img_deref
);
79 tex
->dest_type
= nir_type_float
;
80 tex
->is_array
= false;
81 tex
->coord_components
= 2;
83 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
84 nir_builder_instr_insert(&b
, &tex
->instr
);
86 nir_intrinsic_instr
*membar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_memory_barrier
);
87 nir_builder_instr_insert(&b
, &membar
->instr
);
89 nir_intrinsic_instr
*bar
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_control_barrier
);
90 nir_builder_instr_insert(&b
, &bar
->instr
);
92 nir_ssa_def
*outval
= &tex
->dest
.ssa
;
93 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_image_deref_store
);
94 store
->num_components
= 4;
95 store
->src
[0] = nir_src_for_ssa(&nir_build_deref_var(&b
, output_img
)->dest
.ssa
);
96 store
->src
[1] = nir_src_for_ssa(global_id
);
97 store
->src
[2] = nir_src_for_ssa(nir_ssa_undef(&b
, 1, 32));
98 store
->src
[3] = nir_src_for_ssa(outval
);
99 store
->src
[4] = nir_src_for_ssa(nir_imm_int(&b
, 0));
101 nir_builder_instr_insert(&b
, &store
->instr
);
106 create_dcc_compress_compute(struct radv_device
*device
)
108 VkResult result
= VK_SUCCESS
;
109 struct radv_shader_module cs
= { .nir
= NULL
};
111 cs
.nir
= build_dcc_decompress_compute_shader(device
);
113 VkDescriptorSetLayoutCreateInfo ds_create_info
= {
114 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
115 .flags
= VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
,
117 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
120 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
121 .descriptorCount
= 1,
122 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
123 .pImmutableSamplers
= NULL
127 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
128 .descriptorCount
= 1,
129 .stageFlags
= VK_SHADER_STAGE_COMPUTE_BIT
,
130 .pImmutableSamplers
= NULL
135 result
= radv_CreateDescriptorSetLayout(radv_device_to_handle(device
),
137 &device
->meta_state
.alloc
,
138 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
);
139 if (result
!= VK_SUCCESS
)
143 VkPipelineLayoutCreateInfo pl_create_info
= {
144 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
146 .pSetLayouts
= &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_ds_layout
,
147 .pushConstantRangeCount
= 1,
148 .pPushConstantRanges
= &(VkPushConstantRange
){VK_SHADER_STAGE_COMPUTE_BIT
, 0, 8},
151 result
= radv_CreatePipelineLayout(radv_device_to_handle(device
),
153 &device
->meta_state
.alloc
,
154 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
);
155 if (result
!= VK_SUCCESS
)
160 VkPipelineShaderStageCreateInfo pipeline_shader_stage
= {
161 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
162 .stage
= VK_SHADER_STAGE_COMPUTE_BIT
,
163 .module
= radv_shader_module_to_handle(&cs
),
165 .pSpecializationInfo
= NULL
,
168 VkComputePipelineCreateInfo vk_pipeline_info
= {
169 .sType
= VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
,
170 .stage
= pipeline_shader_stage
,
172 .layout
= device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
175 result
= radv_CreateComputePipelines(radv_device_to_handle(device
),
176 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
177 1, &vk_pipeline_info
, NULL
,
178 &device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
179 if (result
!= VK_SUCCESS
)
188 create_pass(struct radv_device
*device
)
191 VkDevice device_h
= radv_device_to_handle(device
);
192 const VkAllocationCallbacks
*alloc
= &device
->meta_state
.alloc
;
193 VkAttachmentDescription attachment
;
195 attachment
.format
= VK_FORMAT_UNDEFINED
;
196 attachment
.samples
= 1;
197 attachment
.loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
;
198 attachment
.storeOp
= VK_ATTACHMENT_STORE_OP_STORE
;
199 attachment
.initialLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
200 attachment
.finalLayout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
;
202 result
= radv_CreateRenderPass(device_h
,
203 &(VkRenderPassCreateInfo
) {
204 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
205 .attachmentCount
= 1,
206 .pAttachments
= &attachment
,
208 .pSubpasses
= &(VkSubpassDescription
) {
209 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
210 .inputAttachmentCount
= 0,
211 .colorAttachmentCount
= 1,
212 .pColorAttachments
= (VkAttachmentReference
[]) {
215 .layout
= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
,
218 .pResolveAttachments
= NULL
,
219 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
220 .attachment
= VK_ATTACHMENT_UNUSED
,
222 .preserveAttachmentCount
= 0,
223 .pPreserveAttachments
= NULL
,
225 .dependencyCount
= 2,
226 .pDependencies
= (VkSubpassDependency
[]) {
228 .srcSubpass
= VK_SUBPASS_EXTERNAL
,
230 .srcStageMask
= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
,
231 .dstStageMask
= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
,
238 .dstSubpass
= VK_SUBPASS_EXTERNAL
,
239 .srcStageMask
= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
,
240 .dstStageMask
= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
,
248 &device
->meta_state
.fast_clear_flush
.pass
);
254 create_pipeline_layout(struct radv_device
*device
, VkPipelineLayout
*layout
)
256 VkPipelineLayoutCreateInfo pl_create_info
= {
257 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
260 .pushConstantRangeCount
= 0,
261 .pPushConstantRanges
= NULL
,
264 return radv_CreatePipelineLayout(radv_device_to_handle(device
),
266 &device
->meta_state
.alloc
,
271 create_pipeline(struct radv_device
*device
,
272 VkShaderModule vs_module_h
,
273 VkPipelineLayout layout
)
276 VkDevice device_h
= radv_device_to_handle(device
);
278 struct radv_shader_module fs_module
= {
279 .nir
= radv_meta_build_nir_fs_noop(),
282 if (!fs_module
.nir
) {
283 /* XXX: Need more accurate error */
284 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
288 const VkPipelineShaderStageCreateInfo stages
[2] = {
290 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
291 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
292 .module
= vs_module_h
,
296 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
297 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
298 .module
= radv_shader_module_to_handle(&fs_module
),
303 const VkPipelineVertexInputStateCreateInfo vi_state
= {
304 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
305 .vertexBindingDescriptionCount
= 0,
306 .vertexAttributeDescriptionCount
= 0,
309 const VkPipelineInputAssemblyStateCreateInfo ia_state
= {
310 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
311 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
312 .primitiveRestartEnable
= false,
315 const VkPipelineColorBlendStateCreateInfo blend_state
= {
316 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
317 .logicOpEnable
= false,
318 .attachmentCount
= 1,
319 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
321 .colorWriteMask
= VK_COLOR_COMPONENT_R_BIT
|
322 VK_COLOR_COMPONENT_G_BIT
|
323 VK_COLOR_COMPONENT_B_BIT
|
324 VK_COLOR_COMPONENT_A_BIT
,
328 const VkPipelineRasterizationStateCreateInfo rs_state
= {
329 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
330 .depthClampEnable
= false,
331 .rasterizerDiscardEnable
= false,
332 .polygonMode
= VK_POLYGON_MODE_FILL
,
333 .cullMode
= VK_CULL_MODE_NONE
,
334 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
,
337 result
= radv_graphics_pipeline_create(device_h
,
338 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
339 &(VkGraphicsPipelineCreateInfo
) {
340 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
344 .pVertexInputState
= &vi_state
,
345 .pInputAssemblyState
= &ia_state
,
347 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
348 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
352 .pRasterizationState
= &rs_state
,
353 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
354 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
355 .rasterizationSamples
= 1,
356 .sampleShadingEnable
= false,
358 .alphaToCoverageEnable
= false,
359 .alphaToOneEnable
= false,
361 .pColorBlendState
= &blend_state
,
362 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
363 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
364 .dynamicStateCount
= 2,
365 .pDynamicStates
= (VkDynamicState
[]) {
366 VK_DYNAMIC_STATE_VIEWPORT
,
367 VK_DYNAMIC_STATE_SCISSOR
,
371 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
374 &(struct radv_graphics_pipeline_create_info
) {
375 .use_rectlist
= true,
376 .custom_blend_mode
= V_028808_CB_ELIMINATE_FAST_CLEAR
,
378 &device
->meta_state
.alloc
,
379 &device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
);
380 if (result
!= VK_SUCCESS
)
383 result
= radv_graphics_pipeline_create(device_h
,
384 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
385 &(VkGraphicsPipelineCreateInfo
) {
386 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
390 .pVertexInputState
= &vi_state
,
391 .pInputAssemblyState
= &ia_state
,
393 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
394 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
398 .pRasterizationState
= &rs_state
,
399 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
400 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
401 .rasterizationSamples
= 1,
402 .sampleShadingEnable
= false,
404 .alphaToCoverageEnable
= false,
405 .alphaToOneEnable
= false,
407 .pColorBlendState
= &blend_state
,
408 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
409 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
410 .dynamicStateCount
= 2,
411 .pDynamicStates
= (VkDynamicState
[]) {
412 VK_DYNAMIC_STATE_VIEWPORT
,
413 VK_DYNAMIC_STATE_SCISSOR
,
417 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
420 &(struct radv_graphics_pipeline_create_info
) {
421 .use_rectlist
= true,
422 .custom_blend_mode
= V_028808_CB_FMASK_DECOMPRESS
,
424 &device
->meta_state
.alloc
,
425 &device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
);
426 if (result
!= VK_SUCCESS
)
429 result
= radv_graphics_pipeline_create(device_h
,
430 radv_pipeline_cache_to_handle(&device
->meta_state
.cache
),
431 &(VkGraphicsPipelineCreateInfo
) {
432 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
436 .pVertexInputState
= &vi_state
,
437 .pInputAssemblyState
= &ia_state
,
439 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
440 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
444 .pRasterizationState
= &rs_state
,
445 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
446 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
447 .rasterizationSamples
= 1,
448 .sampleShadingEnable
= false,
450 .alphaToCoverageEnable
= false,
451 .alphaToOneEnable
= false,
453 .pColorBlendState
= &blend_state
,
454 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
455 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
456 .dynamicStateCount
= 2,
457 .pDynamicStates
= (VkDynamicState
[]) {
458 VK_DYNAMIC_STATE_VIEWPORT
,
459 VK_DYNAMIC_STATE_SCISSOR
,
463 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
466 &(struct radv_graphics_pipeline_create_info
) {
467 .use_rectlist
= true,
468 .custom_blend_mode
= V_028808_CB_DCC_DECOMPRESS
,
470 &device
->meta_state
.alloc
,
471 &device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
);
472 if (result
!= VK_SUCCESS
)
478 ralloc_free(fs_module
.nir
);
483 radv_device_finish_meta_fast_clear_flush_state(struct radv_device
*device
)
485 struct radv_meta_state
*state
= &device
->meta_state
;
487 radv_DestroyPipeline(radv_device_to_handle(device
),
488 state
->fast_clear_flush
.dcc_decompress_pipeline
,
490 radv_DestroyPipeline(radv_device_to_handle(device
),
491 state
->fast_clear_flush
.fmask_decompress_pipeline
,
493 radv_DestroyPipeline(radv_device_to_handle(device
),
494 state
->fast_clear_flush
.cmask_eliminate_pipeline
,
496 radv_DestroyRenderPass(radv_device_to_handle(device
),
497 state
->fast_clear_flush
.pass
, &state
->alloc
);
498 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
499 state
->fast_clear_flush
.p_layout
,
502 radv_DestroyPipeline(radv_device_to_handle(device
),
503 state
->fast_clear_flush
.dcc_decompress_compute_pipeline
,
505 radv_DestroyPipelineLayout(radv_device_to_handle(device
),
506 state
->fast_clear_flush
.dcc_decompress_compute_p_layout
,
508 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device
),
509 state
->fast_clear_flush
.dcc_decompress_compute_ds_layout
,
514 radv_device_init_meta_fast_clear_flush_state_internal(struct radv_device
*device
)
516 VkResult res
= VK_SUCCESS
;
518 mtx_lock(&device
->meta_state
.mtx
);
519 if (device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
) {
520 mtx_unlock(&device
->meta_state
.mtx
);
524 struct radv_shader_module vs_module
= { .nir
= radv_meta_build_nir_vs_generate_vertices() };
525 if (!vs_module
.nir
) {
526 /* XXX: Need more accurate error */
527 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
531 res
= create_pass(device
);
532 if (res
!= VK_SUCCESS
)
535 res
= create_pipeline_layout(device
,
536 &device
->meta_state
.fast_clear_flush
.p_layout
);
537 if (res
!= VK_SUCCESS
)
540 VkShaderModule vs_module_h
= radv_shader_module_to_handle(&vs_module
);
541 res
= create_pipeline(device
, vs_module_h
,
542 device
->meta_state
.fast_clear_flush
.p_layout
);
543 if (res
!= VK_SUCCESS
)
546 res
= create_dcc_compress_compute(device
);
547 if (res
!= VK_SUCCESS
)
553 radv_device_finish_meta_fast_clear_flush_state(device
);
556 ralloc_free(vs_module
.nir
);
557 mtx_unlock(&device
->meta_state
.mtx
);
564 radv_device_init_meta_fast_clear_flush_state(struct radv_device
*device
, bool on_demand
)
569 return radv_device_init_meta_fast_clear_flush_state_internal(device
);
573 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer
*cmd_buffer
,
574 struct radv_image
*image
,
575 uint64_t pred_offset
, bool value
)
580 va
= radv_buffer_get_va(image
->bo
) + image
->offset
;
584 si_emit_set_predication_state(cmd_buffer
, true, va
);
588 radv_process_color_image_layer(struct radv_cmd_buffer
*cmd_buffer
,
589 struct radv_image
*image
,
590 const VkImageSubresourceRange
*range
,
591 int level
, int layer
)
593 struct radv_device
*device
= cmd_buffer
->device
;
594 struct radv_image_view iview
;
595 uint32_t width
, height
;
597 width
= radv_minify(image
->info
.width
, range
->baseMipLevel
+ level
);
598 height
= radv_minify(image
->info
.height
, range
->baseMipLevel
+ level
);
600 radv_image_view_init(&iview
, device
,
601 &(VkImageViewCreateInfo
) {
602 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
603 .image
= radv_image_to_handle(image
),
604 .viewType
= radv_meta_get_view_type(image
),
605 .format
= image
->vk_format
,
606 .subresourceRange
= {
607 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
608 .baseMipLevel
= range
->baseMipLevel
+ level
,
610 .baseArrayLayer
= range
->baseArrayLayer
+ layer
,
616 radv_CreateFramebuffer(radv_device_to_handle(device
),
617 &(VkFramebufferCreateInfo
) {
618 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
619 .attachmentCount
= 1,
620 .pAttachments
= (VkImageView
[]) {
621 radv_image_view_to_handle(&iview
)
626 }, &cmd_buffer
->pool
->alloc
, &fb_h
);
628 radv_cmd_buffer_begin_render_pass(cmd_buffer
,
629 &(VkRenderPassBeginInfo
) {
630 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
631 .renderPass
= device
->meta_state
.fast_clear_flush
.pass
,
635 .extent
= { width
, height
, }
637 .clearValueCount
= 0,
638 .pClearValues
= NULL
,
641 radv_cmd_buffer_set_subpass(cmd_buffer
,
642 &cmd_buffer
->state
.pass
->subpasses
[0]);
644 radv_CmdDraw(radv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
646 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
647 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
649 radv_cmd_buffer_end_render_pass(cmd_buffer
);
651 radv_DestroyFramebuffer(radv_device_to_handle(device
), fb_h
,
652 &cmd_buffer
->pool
->alloc
);
656 radv_process_color_image(struct radv_cmd_buffer
*cmd_buffer
,
657 struct radv_image
*image
,
658 const VkImageSubresourceRange
*subresourceRange
,
661 struct radv_meta_saved_state saved_state
;
662 VkPipeline
*pipeline
;
664 if (decompress_dcc
&& radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
665 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.dcc_decompress_pipeline
;
666 } else if (radv_image_has_fmask(image
) && !image
->tc_compatible_cmask
) {
667 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.fmask_decompress_pipeline
;
669 pipeline
= &cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
;
675 ret
= radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer
->device
);
676 if (ret
!= VK_SUCCESS
) {
677 cmd_buffer
->record_result
= ret
;
682 radv_meta_save(&saved_state
, cmd_buffer
,
683 RADV_META_SAVE_GRAPHICS_PIPELINE
|
684 RADV_META_SAVE_PASS
);
686 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
687 VK_PIPELINE_BIND_POINT_GRAPHICS
, *pipeline
);
689 for (uint32_t l
= 0; l
< radv_get_levelCount(image
, subresourceRange
); ++l
) {
690 uint32_t width
, height
;
692 /* Do not decompress levels without DCC. */
693 if (decompress_dcc
&&
694 !radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
+ l
))
697 width
= radv_minify(image
->info
.width
,
698 subresourceRange
->baseMipLevel
+ l
);
699 height
= radv_minify(image
->info
.height
,
700 subresourceRange
->baseMipLevel
+ l
);
702 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
712 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
715 .extent
= { width
, height
},
718 for (uint32_t s
= 0; s
< radv_get_layerCount(image
, subresourceRange
); s
++) {
719 radv_process_color_image_layer(cmd_buffer
, image
,
720 subresourceRange
, l
, s
);
724 radv_meta_restore(&saved_state
, cmd_buffer
);
728 radv_emit_color_decompress(struct radv_cmd_buffer
*cmd_buffer
,
729 struct radv_image
*image
,
730 const VkImageSubresourceRange
*subresourceRange
,
733 bool old_predicating
= false;
735 assert(cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
);
737 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
738 uint64_t pred_offset
= decompress_dcc
? image
->dcc_pred_offset
:
739 image
->fce_pred_offset
;
740 pred_offset
+= 8 * subresourceRange
->baseMipLevel
;
742 old_predicating
= cmd_buffer
->state
.predicating
;
744 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, pred_offset
, true);
745 cmd_buffer
->state
.predicating
= true;
748 radv_process_color_image(cmd_buffer
, image
, subresourceRange
,
751 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
752 uint64_t pred_offset
= decompress_dcc
? image
->dcc_pred_offset
:
753 image
->fce_pred_offset
;
754 pred_offset
+= 8 * subresourceRange
->baseMipLevel
;
756 cmd_buffer
->state
.predicating
= old_predicating
;
758 radv_emit_set_predication_state_from_image(cmd_buffer
, image
, pred_offset
, false);
760 if (cmd_buffer
->state
.predication_type
!= -1) {
761 /* Restore previous conditional rendering user state. */
762 si_emit_set_predication_state(cmd_buffer
,
763 cmd_buffer
->state
.predication_type
,
764 cmd_buffer
->state
.predication_va
);
768 if (radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
)) {
769 /* Clear the image's fast-clear eliminate predicate because
770 * FMASK and DCC also imply a fast-clear eliminate.
772 radv_update_fce_metadata(cmd_buffer
, image
, subresourceRange
, false);
774 /* Mark the image as being decompressed. */
776 radv_update_dcc_metadata(cmd_buffer
, image
, subresourceRange
, false);
781 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer
*cmd_buffer
,
782 struct radv_image
*image
,
783 const VkImageSubresourceRange
*subresourceRange
)
785 struct radv_barrier_data barrier
= {};
787 if (radv_image_has_fmask(image
)) {
788 barrier
.layout_transitions
.fmask_decompress
= 1;
790 barrier
.layout_transitions
.fast_clear_eliminate
= 1;
792 radv_describe_layout_transition(cmd_buffer
, &barrier
);
794 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, false);
798 radv_decompress_dcc_gfx(struct radv_cmd_buffer
*cmd_buffer
,
799 struct radv_image
*image
,
800 const VkImageSubresourceRange
*subresourceRange
)
802 radv_emit_color_decompress(cmd_buffer
, image
, subresourceRange
, true);
806 radv_decompress_dcc_compute(struct radv_cmd_buffer
*cmd_buffer
,
807 struct radv_image
*image
,
808 const VkImageSubresourceRange
*subresourceRange
)
810 struct radv_meta_saved_state saved_state
;
811 struct radv_image_view load_iview
= {0};
812 struct radv_image_view store_iview
= {0};
813 struct radv_device
*device
= cmd_buffer
->device
;
815 /* This assumes the image is 2d with 1 layer */
816 struct radv_cmd_state
*state
= &cmd_buffer
->state
;
818 state
->flush_bits
|= RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
819 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
;
821 if (!cmd_buffer
->device
->meta_state
.fast_clear_flush
.cmask_eliminate_pipeline
) {
822 VkResult ret
= radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer
->device
);
823 if (ret
!= VK_SUCCESS
) {
824 cmd_buffer
->record_result
= ret
;
829 radv_meta_save(&saved_state
, cmd_buffer
, RADV_META_SAVE_DESCRIPTORS
|
830 RADV_META_SAVE_COMPUTE_PIPELINE
);
832 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer
),
833 VK_PIPELINE_BIND_POINT_COMPUTE
,
834 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_pipeline
);
836 for (uint32_t l
= 0; l
< radv_get_levelCount(image
, subresourceRange
); l
++) {
837 uint32_t width
, height
;
839 /* Do not decompress levels without DCC. */
840 if (!radv_dcc_enabled(image
, subresourceRange
->baseMipLevel
+ l
))
843 width
= radv_minify(image
->info
.width
,
844 subresourceRange
->baseMipLevel
+ l
);
845 height
= radv_minify(image
->info
.height
,
846 subresourceRange
->baseMipLevel
+ l
);
848 for (uint32_t s
= 0; s
< radv_get_layerCount(image
, subresourceRange
); s
++) {
849 radv_image_view_init(&load_iview
, cmd_buffer
->device
,
850 &(VkImageViewCreateInfo
) {
851 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
852 .image
= radv_image_to_handle(image
),
853 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
854 .format
= image
->vk_format
,
855 .subresourceRange
= {
856 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
857 .baseMipLevel
= subresourceRange
->baseMipLevel
+ l
,
859 .baseArrayLayer
= subresourceRange
->baseArrayLayer
+ s
,
863 radv_image_view_init(&store_iview
, cmd_buffer
->device
,
864 &(VkImageViewCreateInfo
) {
865 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
866 .image
= radv_image_to_handle(image
),
867 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
868 .format
= image
->vk_format
,
869 .subresourceRange
= {
870 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
871 .baseMipLevel
= subresourceRange
->baseMipLevel
+ l
,
873 .baseArrayLayer
= subresourceRange
->baseArrayLayer
+ s
,
876 }, &(struct radv_image_view_extra_create_info
) {
877 .disable_compression
= true
880 radv_meta_push_descriptor_set(cmd_buffer
,
881 VK_PIPELINE_BIND_POINT_COMPUTE
,
882 device
->meta_state
.fast_clear_flush
.dcc_decompress_compute_p_layout
,
884 2, /* descriptorWriteCount */
885 (VkWriteDescriptorSet
[]) {
887 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
889 .dstArrayElement
= 0,
890 .descriptorCount
= 1,
891 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
892 .pImageInfo
= (VkDescriptorImageInfo
[]) {
894 .sampler
= VK_NULL_HANDLE
,
895 .imageView
= radv_image_view_to_handle(&load_iview
),
896 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
901 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
903 .dstArrayElement
= 0,
904 .descriptorCount
= 1,
905 .descriptorType
= VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
,
906 .pImageInfo
= (VkDescriptorImageInfo
[]) {
908 .sampler
= VK_NULL_HANDLE
,
909 .imageView
= radv_image_view_to_handle(&store_iview
),
910 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
916 radv_unaligned_dispatch(cmd_buffer
, width
, height
, 1);
920 /* Mark this image as actually being decompressed. */
921 radv_update_dcc_metadata(cmd_buffer
, image
, subresourceRange
, false);
923 /* The fill buffer below does its own saving */
924 radv_meta_restore(&saved_state
, cmd_buffer
);
926 state
->flush_bits
|= RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
927 RADV_CMD_FLAG_INV_VCACHE
;
930 /* Initialize the DCC metadata as "fully expanded". */
931 radv_initialize_dcc(cmd_buffer
, image
, subresourceRange
, 0xffffffff);
935 radv_decompress_dcc(struct radv_cmd_buffer
*cmd_buffer
,
936 struct radv_image
*image
,
937 const VkImageSubresourceRange
*subresourceRange
)
939 struct radv_barrier_data barrier
= {};
941 barrier
.layout_transitions
.dcc_decompress
= 1;
942 radv_describe_layout_transition(cmd_buffer
, &barrier
);
944 if (cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
)
945 radv_decompress_dcc_gfx(cmd_buffer
, image
, subresourceRange
);
947 radv_decompress_dcc_compute(cmd_buffer
, image
, subresourceRange
);