e0e83c2754ff7595e0c3c7147d26dbd91bd242f6
[mesa.git] / src / amd / vulkan / radv_meta_fast_clear.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "sid.h"
30
31
32 static nir_shader *
33 build_dcc_decompress_compute_shader(struct radv_device *dev)
34 {
35 nir_builder b;
36 const struct glsl_type *buf_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
37 false,
38 false,
39 GLSL_TYPE_FLOAT);
40 const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
41 false,
42 false,
43 GLSL_TYPE_FLOAT);
44 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
45 b.shader->info.name = ralloc_strdup(b.shader, "dcc_decompress_compute");
46
47 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
48 b.shader->info.cs.local_size[0] = 16;
49 b.shader->info.cs.local_size[1] = 16;
50 b.shader->info.cs.local_size[2] = 1;
51 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
52 buf_type, "s_tex");
53 input_img->data.descriptor_set = 0;
54 input_img->data.binding = 0;
55
56 nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
57 img_type, "out_img");
58 output_img->data.descriptor_set = 0;
59 output_img->data.binding = 1;
60
61 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
62 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
63 nir_ssa_def *block_size = nir_imm_ivec4(&b,
64 b.shader->info.cs.local_size[0],
65 b.shader->info.cs.local_size[1],
66 b.shader->info.cs.local_size[2], 0);
67
68 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
69 nir_ssa_def *input_img_deref = &nir_build_deref_var(&b, input_img)->dest.ssa;
70
71 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 3);
72 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
73 tex->op = nir_texop_txf;
74 tex->src[0].src_type = nir_tex_src_coord;
75 tex->src[0].src = nir_src_for_ssa(nir_channels(&b, global_id, 3));
76 tex->src[1].src_type = nir_tex_src_lod;
77 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
78 tex->src[2].src_type = nir_tex_src_texture_deref;
79 tex->src[2].src = nir_src_for_ssa(input_img_deref);
80 tex->dest_type = nir_type_float;
81 tex->is_array = false;
82 tex->coord_components = 2;
83
84 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
85 nir_builder_instr_insert(&b, &tex->instr);
86
87 nir_intrinsic_instr *membar = nir_intrinsic_instr_create(b.shader, nir_intrinsic_memory_barrier);
88 nir_builder_instr_insert(&b, &membar->instr);
89
90 nir_intrinsic_instr *bar = nir_intrinsic_instr_create(b.shader, nir_intrinsic_barrier);
91 nir_builder_instr_insert(&b, &bar->instr);
92
93 nir_ssa_def *outval = &tex->dest.ssa;
94 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_deref_store);
95 store->num_components = 4;
96 store->src[0] = nir_src_for_ssa(&nir_build_deref_var(&b, output_img)->dest.ssa);
97 store->src[1] = nir_src_for_ssa(global_id);
98 store->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
99 store->src[3] = nir_src_for_ssa(outval);
100 store->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0));
101
102 nir_builder_instr_insert(&b, &store->instr);
103 return b.shader;
104 }
105
106 static VkResult
107 create_dcc_compress_compute(struct radv_device *device)
108 {
109 VkResult result = VK_SUCCESS;
110 struct radv_shader_module cs = { .nir = NULL };
111
112 cs.nir = build_dcc_decompress_compute_shader(device);
113
114 VkDescriptorSetLayoutCreateInfo ds_create_info = {
115 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
116 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
117 .bindingCount = 2,
118 .pBindings = (VkDescriptorSetLayoutBinding[]) {
119 {
120 .binding = 0,
121 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
122 .descriptorCount = 1,
123 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
124 .pImmutableSamplers = NULL
125 },
126 {
127 .binding = 1,
128 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
129 .descriptorCount = 1,
130 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
131 .pImmutableSamplers = NULL
132 },
133 }
134 };
135
136 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
137 &ds_create_info,
138 &device->meta_state.alloc,
139 &device->meta_state.fast_clear_flush.dcc_decompress_compute_ds_layout);
140 if (result != VK_SUCCESS)
141 goto cleanup;
142
143
144 VkPipelineLayoutCreateInfo pl_create_info = {
145 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
146 .setLayoutCount = 1,
147 .pSetLayouts = &device->meta_state.fast_clear_flush.dcc_decompress_compute_ds_layout,
148 .pushConstantRangeCount = 1,
149 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 8},
150 };
151
152 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
153 &pl_create_info,
154 &device->meta_state.alloc,
155 &device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout);
156 if (result != VK_SUCCESS)
157 goto cleanup;
158
159 /* compute shader */
160
161 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
162 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
163 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
164 .module = radv_shader_module_to_handle(&cs),
165 .pName = "main",
166 .pSpecializationInfo = NULL,
167 };
168
169 VkComputePipelineCreateInfo vk_pipeline_info = {
170 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
171 .stage = pipeline_shader_stage,
172 .flags = 0,
173 .layout = device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout,
174 };
175
176 result = radv_CreateComputePipelines(radv_device_to_handle(device),
177 radv_pipeline_cache_to_handle(&device->meta_state.cache),
178 1, &vk_pipeline_info, NULL,
179 &device->meta_state.fast_clear_flush.dcc_decompress_compute_pipeline);
180 if (result != VK_SUCCESS)
181 goto cleanup;
182
183 cleanup:
184 ralloc_free(cs.nir);
185 return result;
186 }
187
188 static VkResult
189 create_pass(struct radv_device *device)
190 {
191 VkResult result;
192 VkDevice device_h = radv_device_to_handle(device);
193 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
194 VkAttachmentDescription attachment;
195
196 attachment.format = VK_FORMAT_UNDEFINED;
197 attachment.samples = 1;
198 attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
199 attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
200 attachment.initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
201 attachment.finalLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
202
203 result = radv_CreateRenderPass(device_h,
204 &(VkRenderPassCreateInfo) {
205 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
206 .attachmentCount = 1,
207 .pAttachments = &attachment,
208 .subpassCount = 1,
209 .pSubpasses = &(VkSubpassDescription) {
210 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
211 .inputAttachmentCount = 0,
212 .colorAttachmentCount = 1,
213 .pColorAttachments = (VkAttachmentReference[]) {
214 {
215 .attachment = 0,
216 .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
217 },
218 },
219 .pResolveAttachments = NULL,
220 .pDepthStencilAttachment = &(VkAttachmentReference) {
221 .attachment = VK_ATTACHMENT_UNUSED,
222 },
223 .preserveAttachmentCount = 0,
224 .pPreserveAttachments = NULL,
225 },
226 .dependencyCount = 0,
227 },
228 alloc,
229 &device->meta_state.fast_clear_flush.pass);
230
231 return result;
232 }
233
234 static VkResult
235 create_pipeline_layout(struct radv_device *device, VkPipelineLayout *layout)
236 {
237 VkPipelineLayoutCreateInfo pl_create_info = {
238 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
239 .setLayoutCount = 0,
240 .pSetLayouts = NULL,
241 .pushConstantRangeCount = 0,
242 .pPushConstantRanges = NULL,
243 };
244
245 return radv_CreatePipelineLayout(radv_device_to_handle(device),
246 &pl_create_info,
247 &device->meta_state.alloc,
248 layout);
249 }
250
251 static VkResult
252 create_pipeline(struct radv_device *device,
253 VkShaderModule vs_module_h,
254 VkPipelineLayout layout)
255 {
256 VkResult result;
257 VkDevice device_h = radv_device_to_handle(device);
258
259 struct radv_shader_module fs_module = {
260 .nir = radv_meta_build_nir_fs_noop(),
261 };
262
263 if (!fs_module.nir) {
264 /* XXX: Need more accurate error */
265 result = VK_ERROR_OUT_OF_HOST_MEMORY;
266 goto cleanup;
267 }
268
269 const VkPipelineShaderStageCreateInfo stages[2] = {
270 {
271 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
272 .stage = VK_SHADER_STAGE_VERTEX_BIT,
273 .module = vs_module_h,
274 .pName = "main",
275 },
276 {
277 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
278 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
279 .module = radv_shader_module_to_handle(&fs_module),
280 .pName = "main",
281 },
282 };
283
284 const VkPipelineVertexInputStateCreateInfo vi_state = {
285 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
286 .vertexBindingDescriptionCount = 0,
287 .vertexAttributeDescriptionCount = 0,
288 };
289
290 const VkPipelineInputAssemblyStateCreateInfo ia_state = {
291 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
292 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
293 .primitiveRestartEnable = false,
294 };
295
296 const VkPipelineColorBlendStateCreateInfo blend_state = {
297 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
298 .logicOpEnable = false,
299 .attachmentCount = 1,
300 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
301 {
302 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
303 VK_COLOR_COMPONENT_G_BIT |
304 VK_COLOR_COMPONENT_B_BIT |
305 VK_COLOR_COMPONENT_A_BIT,
306 },
307 }
308 };
309 const VkPipelineRasterizationStateCreateInfo rs_state = {
310 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
311 .depthClampEnable = false,
312 .rasterizerDiscardEnable = false,
313 .polygonMode = VK_POLYGON_MODE_FILL,
314 .cullMode = VK_CULL_MODE_NONE,
315 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
316 };
317
318 result = radv_graphics_pipeline_create(device_h,
319 radv_pipeline_cache_to_handle(&device->meta_state.cache),
320 &(VkGraphicsPipelineCreateInfo) {
321 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
322 .stageCount = 2,
323 .pStages = stages,
324
325 .pVertexInputState = &vi_state,
326 .pInputAssemblyState = &ia_state,
327
328 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
329 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
330 .viewportCount = 1,
331 .scissorCount = 1,
332 },
333 .pRasterizationState = &rs_state,
334 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
335 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
336 .rasterizationSamples = 1,
337 .sampleShadingEnable = false,
338 .pSampleMask = NULL,
339 .alphaToCoverageEnable = false,
340 .alphaToOneEnable = false,
341 },
342 .pColorBlendState = &blend_state,
343 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
344 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
345 .dynamicStateCount = 2,
346 .pDynamicStates = (VkDynamicState[]) {
347 VK_DYNAMIC_STATE_VIEWPORT,
348 VK_DYNAMIC_STATE_SCISSOR,
349 },
350 },
351 .layout = layout,
352 .renderPass = device->meta_state.fast_clear_flush.pass,
353 .subpass = 0,
354 },
355 &(struct radv_graphics_pipeline_create_info) {
356 .use_rectlist = true,
357 .custom_blend_mode = V_028808_CB_ELIMINATE_FAST_CLEAR,
358 },
359 &device->meta_state.alloc,
360 &device->meta_state.fast_clear_flush.cmask_eliminate_pipeline);
361 if (result != VK_SUCCESS)
362 goto cleanup;
363
364 result = radv_graphics_pipeline_create(device_h,
365 radv_pipeline_cache_to_handle(&device->meta_state.cache),
366 &(VkGraphicsPipelineCreateInfo) {
367 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
368 .stageCount = 2,
369 .pStages = stages,
370
371 .pVertexInputState = &vi_state,
372 .pInputAssemblyState = &ia_state,
373
374 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
375 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
376 .viewportCount = 1,
377 .scissorCount = 1,
378 },
379 .pRasterizationState = &rs_state,
380 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
381 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
382 .rasterizationSamples = 1,
383 .sampleShadingEnable = false,
384 .pSampleMask = NULL,
385 .alphaToCoverageEnable = false,
386 .alphaToOneEnable = false,
387 },
388 .pColorBlendState = &blend_state,
389 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
390 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
391 .dynamicStateCount = 2,
392 .pDynamicStates = (VkDynamicState[]) {
393 VK_DYNAMIC_STATE_VIEWPORT,
394 VK_DYNAMIC_STATE_SCISSOR,
395 },
396 },
397 .layout = layout,
398 .renderPass = device->meta_state.fast_clear_flush.pass,
399 .subpass = 0,
400 },
401 &(struct radv_graphics_pipeline_create_info) {
402 .use_rectlist = true,
403 .custom_blend_mode = V_028808_CB_FMASK_DECOMPRESS,
404 },
405 &device->meta_state.alloc,
406 &device->meta_state.fast_clear_flush.fmask_decompress_pipeline);
407 if (result != VK_SUCCESS)
408 goto cleanup;
409
410 result = radv_graphics_pipeline_create(device_h,
411 radv_pipeline_cache_to_handle(&device->meta_state.cache),
412 &(VkGraphicsPipelineCreateInfo) {
413 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
414 .stageCount = 2,
415 .pStages = stages,
416
417 .pVertexInputState = &vi_state,
418 .pInputAssemblyState = &ia_state,
419
420 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
421 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
422 .viewportCount = 1,
423 .scissorCount = 1,
424 },
425 .pRasterizationState = &rs_state,
426 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
427 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
428 .rasterizationSamples = 1,
429 .sampleShadingEnable = false,
430 .pSampleMask = NULL,
431 .alphaToCoverageEnable = false,
432 .alphaToOneEnable = false,
433 },
434 .pColorBlendState = &blend_state,
435 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
436 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
437 .dynamicStateCount = 2,
438 .pDynamicStates = (VkDynamicState[]) {
439 VK_DYNAMIC_STATE_VIEWPORT,
440 VK_DYNAMIC_STATE_SCISSOR,
441 },
442 },
443 .layout = layout,
444 .renderPass = device->meta_state.fast_clear_flush.pass,
445 .subpass = 0,
446 },
447 &(struct radv_graphics_pipeline_create_info) {
448 .use_rectlist = true,
449 .custom_blend_mode = V_028808_CB_DCC_DECOMPRESS,
450 },
451 &device->meta_state.alloc,
452 &device->meta_state.fast_clear_flush.dcc_decompress_pipeline);
453 if (result != VK_SUCCESS)
454 goto cleanup;
455
456 goto cleanup;
457
458 cleanup:
459 ralloc_free(fs_module.nir);
460 return result;
461 }
462
463 void
464 radv_device_finish_meta_fast_clear_flush_state(struct radv_device *device)
465 {
466 struct radv_meta_state *state = &device->meta_state;
467
468 radv_DestroyPipeline(radv_device_to_handle(device),
469 state->fast_clear_flush.dcc_decompress_pipeline,
470 &state->alloc);
471 radv_DestroyPipeline(radv_device_to_handle(device),
472 state->fast_clear_flush.fmask_decompress_pipeline,
473 &state->alloc);
474 radv_DestroyPipeline(radv_device_to_handle(device),
475 state->fast_clear_flush.cmask_eliminate_pipeline,
476 &state->alloc);
477 radv_DestroyRenderPass(radv_device_to_handle(device),
478 state->fast_clear_flush.pass, &state->alloc);
479 radv_DestroyPipelineLayout(radv_device_to_handle(device),
480 state->fast_clear_flush.p_layout,
481 &state->alloc);
482
483 radv_DestroyPipeline(radv_device_to_handle(device),
484 state->fast_clear_flush.dcc_decompress_compute_pipeline,
485 &state->alloc);
486 radv_DestroyPipelineLayout(radv_device_to_handle(device),
487 state->fast_clear_flush.dcc_decompress_compute_p_layout,
488 &state->alloc);
489 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
490 state->fast_clear_flush.dcc_decompress_compute_ds_layout,
491 &state->alloc);
492 }
493
494 static VkResult
495 radv_device_init_meta_fast_clear_flush_state_internal(struct radv_device *device)
496 {
497 VkResult res = VK_SUCCESS;
498
499 mtx_lock(&device->meta_state.mtx);
500 if (device->meta_state.fast_clear_flush.cmask_eliminate_pipeline) {
501 mtx_unlock(&device->meta_state.mtx);
502 return VK_SUCCESS;
503 }
504
505 struct radv_shader_module vs_module = { .nir = radv_meta_build_nir_vs_generate_vertices() };
506 if (!vs_module.nir) {
507 /* XXX: Need more accurate error */
508 res = VK_ERROR_OUT_OF_HOST_MEMORY;
509 goto fail;
510 }
511
512 res = create_pass(device);
513 if (res != VK_SUCCESS)
514 goto fail;
515
516 res = create_pipeline_layout(device,
517 &device->meta_state.fast_clear_flush.p_layout);
518 if (res != VK_SUCCESS)
519 goto fail;
520
521 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
522 res = create_pipeline(device, vs_module_h,
523 device->meta_state.fast_clear_flush.p_layout);
524 if (res != VK_SUCCESS)
525 goto fail;
526
527 res = create_dcc_compress_compute(device);
528 if (res != VK_SUCCESS)
529 goto fail;
530
531 goto cleanup;
532
533 fail:
534 radv_device_finish_meta_fast_clear_flush_state(device);
535
536 cleanup:
537 ralloc_free(vs_module.nir);
538 mtx_unlock(&device->meta_state.mtx);
539
540 return res;
541 }
542
543
544 VkResult
545 radv_device_init_meta_fast_clear_flush_state(struct radv_device *device, bool on_demand)
546 {
547 if (on_demand)
548 return VK_SUCCESS;
549
550 return radv_device_init_meta_fast_clear_flush_state_internal(device);
551 }
552
553 static void
554 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer *cmd_buffer,
555 struct radv_image *image,
556 uint64_t pred_offset, bool value)
557 {
558 uint64_t va = 0;
559
560 if (value) {
561 va = radv_buffer_get_va(image->bo) + image->offset;
562 va += pred_offset;
563 }
564
565 si_emit_set_predication_state(cmd_buffer, true, va);
566 }
567
568 static void
569 radv_process_color_image_layer(struct radv_cmd_buffer *cmd_buffer,
570 struct radv_image *image,
571 const VkImageSubresourceRange *range,
572 int level, int layer)
573 {
574 struct radv_device *device = cmd_buffer->device;
575 struct radv_image_view iview;
576 uint32_t width, height;
577
578 width = radv_minify(image->info.width, range->baseMipLevel + level);
579 height = radv_minify(image->info.height, range->baseMipLevel + level);
580
581 radv_image_view_init(&iview, device,
582 &(VkImageViewCreateInfo) {
583 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
584 .image = radv_image_to_handle(image),
585 .viewType = radv_meta_get_view_type(image),
586 .format = image->vk_format,
587 .subresourceRange = {
588 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
589 .baseMipLevel = range->baseMipLevel + level,
590 .levelCount = 1,
591 .baseArrayLayer = range->baseArrayLayer + layer,
592 .layerCount = 1,
593 },
594 }, NULL);
595
596 VkFramebuffer fb_h;
597 radv_CreateFramebuffer(radv_device_to_handle(device),
598 &(VkFramebufferCreateInfo) {
599 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
600 .attachmentCount = 1,
601 .pAttachments = (VkImageView[]) {
602 radv_image_view_to_handle(&iview)
603 },
604 .width = width,
605 .height = height,
606 .layers = 1
607 }, &cmd_buffer->pool->alloc, &fb_h);
608
609 radv_CmdBeginRenderPass(radv_cmd_buffer_to_handle(cmd_buffer),
610 &(VkRenderPassBeginInfo) {
611 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
612 .renderPass = device->meta_state.fast_clear_flush.pass,
613 .framebuffer = fb_h,
614 .renderArea = {
615 .offset = {
616 0,
617 0,
618 },
619 .extent = {
620 width,
621 height,
622 }
623 },
624 .clearValueCount = 0,
625 .pClearValues = NULL,
626 }, VK_SUBPASS_CONTENTS_INLINE);
627
628 radv_CmdDraw(radv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
629
630 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
631 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
632
633 radv_CmdEndRenderPass(radv_cmd_buffer_to_handle(cmd_buffer));
634
635 radv_DestroyFramebuffer(radv_device_to_handle(device), fb_h,
636 &cmd_buffer->pool->alloc);
637 }
638
639 static void
640 radv_process_color_image(struct radv_cmd_buffer *cmd_buffer,
641 struct radv_image *image,
642 const VkImageSubresourceRange *subresourceRange,
643 bool decompress_dcc)
644 {
645 struct radv_meta_saved_state saved_state;
646 VkPipeline *pipeline;
647
648 if (decompress_dcc && radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
649 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.dcc_decompress_pipeline;
650 } else if (radv_image_has_fmask(image) && !image->tc_compatible_cmask) {
651 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.fmask_decompress_pipeline;
652 } else {
653 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline;
654 }
655
656 if (!*pipeline) {
657 VkResult ret;
658
659 ret = radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer->device);
660 if (ret != VK_SUCCESS) {
661 cmd_buffer->record_result = ret;
662 return;
663 }
664 }
665
666 radv_meta_save(&saved_state, cmd_buffer,
667 RADV_META_SAVE_GRAPHICS_PIPELINE |
668 RADV_META_SAVE_PASS);
669
670 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
671 VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
672
673 for (uint32_t l = 0; l < radv_get_levelCount(image, subresourceRange); ++l) {
674 uint32_t width, height;
675
676 /* Do not decompress levels without DCC. */
677 if (decompress_dcc &&
678 !radv_dcc_enabled(image, subresourceRange->baseMipLevel + l))
679 continue;
680
681 width = radv_minify(image->info.width,
682 subresourceRange->baseMipLevel + l);
683 height = radv_minify(image->info.height,
684 subresourceRange->baseMipLevel + l);
685
686 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
687 &(VkViewport) {
688 .x = 0,
689 .y = 0,
690 .width = width,
691 .height = height,
692 .minDepth = 0.0f,
693 .maxDepth = 1.0f
694 });
695
696 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
697 &(VkRect2D) {
698 .offset = { 0, 0 },
699 .extent = { width, height },
700 });
701
702 for (uint32_t s = 0; s < radv_get_layerCount(image, subresourceRange); s++) {
703 radv_process_color_image_layer(cmd_buffer, image,
704 subresourceRange, l, s);
705 }
706 }
707
708 radv_meta_restore(&saved_state, cmd_buffer);
709 }
710
711 static void
712 radv_emit_color_decompress(struct radv_cmd_buffer *cmd_buffer,
713 struct radv_image *image,
714 const VkImageSubresourceRange *subresourceRange,
715 bool decompress_dcc)
716 {
717 bool old_predicating = false;
718
719 assert(cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL);
720
721 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
722 uint64_t pred_offset = decompress_dcc ? image->dcc_pred_offset :
723 image->fce_pred_offset;
724 pred_offset += 8 * subresourceRange->baseMipLevel;
725
726 old_predicating = cmd_buffer->state.predicating;
727
728 radv_emit_set_predication_state_from_image(cmd_buffer, image, pred_offset, true);
729 cmd_buffer->state.predicating = true;
730 }
731
732 radv_process_color_image(cmd_buffer, image, subresourceRange,
733 decompress_dcc);
734
735 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
736 uint64_t pred_offset = decompress_dcc ? image->dcc_pred_offset :
737 image->fce_pred_offset;
738 pred_offset += 8 * subresourceRange->baseMipLevel;
739
740 cmd_buffer->state.predicating = old_predicating;
741
742 radv_emit_set_predication_state_from_image(cmd_buffer, image, pred_offset, false);
743
744 if (cmd_buffer->state.predication_type != -1) {
745 /* Restore previous conditional rendering user state. */
746 si_emit_set_predication_state(cmd_buffer,
747 cmd_buffer->state.predication_type,
748 cmd_buffer->state.predication_va);
749 }
750 }
751
752 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
753 /* Clear the image's fast-clear eliminate predicate because
754 * FMASK and DCC also imply a fast-clear eliminate.
755 */
756 radv_update_fce_metadata(cmd_buffer, image, subresourceRange, false);
757
758 /* Mark the image as being decompressed. */
759 if (decompress_dcc)
760 radv_update_dcc_metadata(cmd_buffer, image, subresourceRange, false);
761 }
762 }
763
764 void
765 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer *cmd_buffer,
766 struct radv_image *image,
767 const VkImageSubresourceRange *subresourceRange)
768 {
769 radv_emit_color_decompress(cmd_buffer, image, subresourceRange, false);
770 }
771
772 static void
773 radv_decompress_dcc_gfx(struct radv_cmd_buffer *cmd_buffer,
774 struct radv_image *image,
775 const VkImageSubresourceRange *subresourceRange)
776 {
777 radv_emit_color_decompress(cmd_buffer, image, subresourceRange, true);
778 }
779
780 static void
781 radv_decompress_dcc_compute(struct radv_cmd_buffer *cmd_buffer,
782 struct radv_image *image,
783 const VkImageSubresourceRange *subresourceRange)
784 {
785 struct radv_meta_saved_state saved_state;
786 struct radv_image_view load_iview = {0};
787 struct radv_image_view store_iview = {0};
788 struct radv_device *device = cmd_buffer->device;
789
790 /* This assumes the image is 2d with 1 layer */
791 struct radv_cmd_state *state = &cmd_buffer->state;
792
793 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
794 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
795
796 if (!cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline) {
797 VkResult ret = radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer->device);
798 if (ret != VK_SUCCESS) {
799 cmd_buffer->record_result = ret;
800 return;
801 }
802 }
803
804 radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_DESCRIPTORS |
805 RADV_META_SAVE_COMPUTE_PIPELINE);
806
807 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
808 VK_PIPELINE_BIND_POINT_COMPUTE,
809 device->meta_state.fast_clear_flush.dcc_decompress_compute_pipeline);
810
811 for (uint32_t l = 0; l < radv_get_levelCount(image, subresourceRange); l++) {
812 uint32_t width, height;
813
814 /* Do not decompress levels without DCC. */
815 if (!radv_dcc_enabled(image, subresourceRange->baseMipLevel + l))
816 continue;
817
818 width = radv_minify(image->info.width,
819 subresourceRange->baseMipLevel + l);
820 height = radv_minify(image->info.height,
821 subresourceRange->baseMipLevel + l);
822
823 for (uint32_t s = 0; s < radv_get_layerCount(image, subresourceRange); s++) {
824 radv_image_view_init(&load_iview, cmd_buffer->device,
825 &(VkImageViewCreateInfo) {
826 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
827 .image = radv_image_to_handle(image),
828 .viewType = VK_IMAGE_VIEW_TYPE_2D,
829 .format = image->vk_format,
830 .subresourceRange = {
831 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
832 .baseMipLevel = subresourceRange->baseMipLevel + l,
833 .levelCount = 1,
834 .baseArrayLayer = subresourceRange->baseArrayLayer + s,
835 .layerCount = 1
836 },
837 }, NULL);
838 radv_image_view_init(&store_iview, cmd_buffer->device,
839 &(VkImageViewCreateInfo) {
840 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
841 .image = radv_image_to_handle(image),
842 .viewType = VK_IMAGE_VIEW_TYPE_2D,
843 .format = image->vk_format,
844 .subresourceRange = {
845 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
846 .baseMipLevel = subresourceRange->baseMipLevel + l,
847 .levelCount = 1,
848 .baseArrayLayer = subresourceRange->baseArrayLayer + s,
849 .layerCount = 1
850 },
851 }, &(struct radv_image_view_extra_create_info) {
852 .disable_compression = true
853 });
854
855 radv_meta_push_descriptor_set(cmd_buffer,
856 VK_PIPELINE_BIND_POINT_COMPUTE,
857 device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout,
858 0, /* set */
859 2, /* descriptorWriteCount */
860 (VkWriteDescriptorSet[]) {
861 {
862 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
863 .dstBinding = 0,
864 .dstArrayElement = 0,
865 .descriptorCount = 1,
866 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
867 .pImageInfo = (VkDescriptorImageInfo[]) {
868 {
869 .sampler = VK_NULL_HANDLE,
870 .imageView = radv_image_view_to_handle(&load_iview),
871 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
872 },
873 }
874 },
875 {
876 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
877 .dstBinding = 1,
878 .dstArrayElement = 0,
879 .descriptorCount = 1,
880 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
881 .pImageInfo = (VkDescriptorImageInfo[]) {
882 {
883 .sampler = VK_NULL_HANDLE,
884 .imageView = radv_image_view_to_handle(&store_iview),
885 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
886 },
887 }
888 }
889 });
890
891 radv_unaligned_dispatch(cmd_buffer, width, height, 1);
892 }
893 }
894
895 /* Mark this image as actually being decompressed. */
896 radv_update_dcc_metadata(cmd_buffer, image, subresourceRange, false);
897
898 /* The fill buffer below does its own saving */
899 radv_meta_restore(&saved_state, cmd_buffer);
900
901 state->flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
902 RADV_CMD_FLAG_INV_VCACHE;
903
904
905 /* Initialize the DCC metadata as "fully expanded". */
906 radv_initialize_dcc(cmd_buffer, image, subresourceRange, 0xffffffff);
907 }
908
909 void
910 radv_decompress_dcc(struct radv_cmd_buffer *cmd_buffer,
911 struct radv_image *image,
912 const VkImageSubresourceRange *subresourceRange)
913 {
914 if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL)
915 radv_decompress_dcc_gfx(cmd_buffer, image, subresourceRange);
916 else
917 radv_decompress_dcc_compute(cmd_buffer, image, subresourceRange);
918 }