radv: only enable TC-compat HTILE for images readable by a shader
[mesa.git] / src / amd / vulkan / radv_meta_fast_clear.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "sid.h"
30
31
32 static nir_shader *
33 build_dcc_decompress_compute_shader(struct radv_device *dev)
34 {
35 nir_builder b;
36 const struct glsl_type *buf_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
37 false,
38 false,
39 GLSL_TYPE_FLOAT);
40 const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
41 false,
42 false,
43 GLSL_TYPE_FLOAT);
44 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
45 b.shader->info.name = ralloc_strdup(b.shader, "dcc_decompress_compute");
46
47 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
48 b.shader->info.cs.local_size[0] = 16;
49 b.shader->info.cs.local_size[1] = 16;
50 b.shader->info.cs.local_size[2] = 1;
51 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
52 buf_type, "s_tex");
53 input_img->data.descriptor_set = 0;
54 input_img->data.binding = 0;
55
56 nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
57 img_type, "out_img");
58 output_img->data.descriptor_set = 0;
59 output_img->data.binding = 1;
60
61 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
62 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
63 nir_ssa_def *block_size = nir_imm_ivec4(&b,
64 b.shader->info.cs.local_size[0],
65 b.shader->info.cs.local_size[1],
66 b.shader->info.cs.local_size[2], 0);
67
68 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
69 nir_ssa_def *input_img_deref = &nir_build_deref_var(&b, input_img)->dest.ssa;
70
71 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 3);
72 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
73 tex->op = nir_texop_txf;
74 tex->src[0].src_type = nir_tex_src_coord;
75 tex->src[0].src = nir_src_for_ssa(nir_channels(&b, global_id, 3));
76 tex->src[1].src_type = nir_tex_src_lod;
77 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
78 tex->src[2].src_type = nir_tex_src_texture_deref;
79 tex->src[2].src = nir_src_for_ssa(input_img_deref);
80 tex->dest_type = nir_type_float;
81 tex->is_array = false;
82 tex->coord_components = 2;
83
84 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
85 nir_builder_instr_insert(&b, &tex->instr);
86
87 nir_intrinsic_instr *membar = nir_intrinsic_instr_create(b.shader, nir_intrinsic_memory_barrier);
88 nir_builder_instr_insert(&b, &membar->instr);
89
90 nir_intrinsic_instr *bar = nir_intrinsic_instr_create(b.shader, nir_intrinsic_control_barrier);
91 nir_builder_instr_insert(&b, &bar->instr);
92
93 nir_ssa_def *outval = &tex->dest.ssa;
94 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_deref_store);
95 store->num_components = 4;
96 store->src[0] = nir_src_for_ssa(&nir_build_deref_var(&b, output_img)->dest.ssa);
97 store->src[1] = nir_src_for_ssa(global_id);
98 store->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
99 store->src[3] = nir_src_for_ssa(outval);
100 store->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0));
101
102 nir_builder_instr_insert(&b, &store->instr);
103 return b.shader;
104 }
105
106 static VkResult
107 create_dcc_compress_compute(struct radv_device *device)
108 {
109 VkResult result = VK_SUCCESS;
110 struct radv_shader_module cs = { .nir = NULL };
111
112 cs.nir = build_dcc_decompress_compute_shader(device);
113
114 VkDescriptorSetLayoutCreateInfo ds_create_info = {
115 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
116 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
117 .bindingCount = 2,
118 .pBindings = (VkDescriptorSetLayoutBinding[]) {
119 {
120 .binding = 0,
121 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
122 .descriptorCount = 1,
123 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
124 .pImmutableSamplers = NULL
125 },
126 {
127 .binding = 1,
128 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
129 .descriptorCount = 1,
130 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
131 .pImmutableSamplers = NULL
132 },
133 }
134 };
135
136 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
137 &ds_create_info,
138 &device->meta_state.alloc,
139 &device->meta_state.fast_clear_flush.dcc_decompress_compute_ds_layout);
140 if (result != VK_SUCCESS)
141 goto cleanup;
142
143
144 VkPipelineLayoutCreateInfo pl_create_info = {
145 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
146 .setLayoutCount = 1,
147 .pSetLayouts = &device->meta_state.fast_clear_flush.dcc_decompress_compute_ds_layout,
148 .pushConstantRangeCount = 1,
149 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 8},
150 };
151
152 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
153 &pl_create_info,
154 &device->meta_state.alloc,
155 &device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout);
156 if (result != VK_SUCCESS)
157 goto cleanup;
158
159 /* compute shader */
160
161 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
162 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
163 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
164 .module = radv_shader_module_to_handle(&cs),
165 .pName = "main",
166 .pSpecializationInfo = NULL,
167 };
168
169 VkComputePipelineCreateInfo vk_pipeline_info = {
170 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
171 .stage = pipeline_shader_stage,
172 .flags = 0,
173 .layout = device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout,
174 };
175
176 result = radv_CreateComputePipelines(radv_device_to_handle(device),
177 radv_pipeline_cache_to_handle(&device->meta_state.cache),
178 1, &vk_pipeline_info, NULL,
179 &device->meta_state.fast_clear_flush.dcc_decompress_compute_pipeline);
180 if (result != VK_SUCCESS)
181 goto cleanup;
182
183 cleanup:
184 ralloc_free(cs.nir);
185 return result;
186 }
187
188 static VkResult
189 create_pass(struct radv_device *device)
190 {
191 VkResult result;
192 VkDevice device_h = radv_device_to_handle(device);
193 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
194 VkAttachmentDescription attachment;
195
196 attachment.format = VK_FORMAT_UNDEFINED;
197 attachment.samples = 1;
198 attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
199 attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
200 attachment.initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
201 attachment.finalLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
202
203 result = radv_CreateRenderPass(device_h,
204 &(VkRenderPassCreateInfo) {
205 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
206 .attachmentCount = 1,
207 .pAttachments = &attachment,
208 .subpassCount = 1,
209 .pSubpasses = &(VkSubpassDescription) {
210 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
211 .inputAttachmentCount = 0,
212 .colorAttachmentCount = 1,
213 .pColorAttachments = (VkAttachmentReference[]) {
214 {
215 .attachment = 0,
216 .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
217 },
218 },
219 .pResolveAttachments = NULL,
220 .pDepthStencilAttachment = &(VkAttachmentReference) {
221 .attachment = VK_ATTACHMENT_UNUSED,
222 },
223 .preserveAttachmentCount = 0,
224 .pPreserveAttachments = NULL,
225 },
226 .dependencyCount = 2,
227 .pDependencies = (VkSubpassDependency[]) {
228 {
229 .srcSubpass = VK_SUBPASS_EXTERNAL,
230 .dstSubpass = 0,
231 .srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
232 .dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
233 .srcAccessMask = 0,
234 .dstAccessMask = 0,
235 .dependencyFlags = 0
236 },
237 {
238 .srcSubpass = 0,
239 .dstSubpass = VK_SUBPASS_EXTERNAL,
240 .srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
241 .dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
242 .srcAccessMask = 0,
243 .dstAccessMask = 0,
244 .dependencyFlags = 0
245 }
246 },
247 },
248 alloc,
249 &device->meta_state.fast_clear_flush.pass);
250
251 return result;
252 }
253
254 static VkResult
255 create_pipeline_layout(struct radv_device *device, VkPipelineLayout *layout)
256 {
257 VkPipelineLayoutCreateInfo pl_create_info = {
258 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
259 .setLayoutCount = 0,
260 .pSetLayouts = NULL,
261 .pushConstantRangeCount = 0,
262 .pPushConstantRanges = NULL,
263 };
264
265 return radv_CreatePipelineLayout(radv_device_to_handle(device),
266 &pl_create_info,
267 &device->meta_state.alloc,
268 layout);
269 }
270
271 static VkResult
272 create_pipeline(struct radv_device *device,
273 VkShaderModule vs_module_h,
274 VkPipelineLayout layout)
275 {
276 VkResult result;
277 VkDevice device_h = radv_device_to_handle(device);
278
279 struct radv_shader_module fs_module = {
280 .nir = radv_meta_build_nir_fs_noop(),
281 };
282
283 if (!fs_module.nir) {
284 /* XXX: Need more accurate error */
285 result = VK_ERROR_OUT_OF_HOST_MEMORY;
286 goto cleanup;
287 }
288
289 const VkPipelineShaderStageCreateInfo stages[2] = {
290 {
291 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
292 .stage = VK_SHADER_STAGE_VERTEX_BIT,
293 .module = vs_module_h,
294 .pName = "main",
295 },
296 {
297 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
298 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
299 .module = radv_shader_module_to_handle(&fs_module),
300 .pName = "main",
301 },
302 };
303
304 const VkPipelineVertexInputStateCreateInfo vi_state = {
305 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
306 .vertexBindingDescriptionCount = 0,
307 .vertexAttributeDescriptionCount = 0,
308 };
309
310 const VkPipelineInputAssemblyStateCreateInfo ia_state = {
311 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
312 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
313 .primitiveRestartEnable = false,
314 };
315
316 const VkPipelineColorBlendStateCreateInfo blend_state = {
317 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
318 .logicOpEnable = false,
319 .attachmentCount = 1,
320 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
321 {
322 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
323 VK_COLOR_COMPONENT_G_BIT |
324 VK_COLOR_COMPONENT_B_BIT |
325 VK_COLOR_COMPONENT_A_BIT,
326 },
327 }
328 };
329 const VkPipelineRasterizationStateCreateInfo rs_state = {
330 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
331 .depthClampEnable = false,
332 .rasterizerDiscardEnable = false,
333 .polygonMode = VK_POLYGON_MODE_FILL,
334 .cullMode = VK_CULL_MODE_NONE,
335 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
336 };
337
338 result = radv_graphics_pipeline_create(device_h,
339 radv_pipeline_cache_to_handle(&device->meta_state.cache),
340 &(VkGraphicsPipelineCreateInfo) {
341 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
342 .stageCount = 2,
343 .pStages = stages,
344
345 .pVertexInputState = &vi_state,
346 .pInputAssemblyState = &ia_state,
347
348 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
349 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
350 .viewportCount = 1,
351 .scissorCount = 1,
352 },
353 .pRasterizationState = &rs_state,
354 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
355 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
356 .rasterizationSamples = 1,
357 .sampleShadingEnable = false,
358 .pSampleMask = NULL,
359 .alphaToCoverageEnable = false,
360 .alphaToOneEnable = false,
361 },
362 .pColorBlendState = &blend_state,
363 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
364 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
365 .dynamicStateCount = 2,
366 .pDynamicStates = (VkDynamicState[]) {
367 VK_DYNAMIC_STATE_VIEWPORT,
368 VK_DYNAMIC_STATE_SCISSOR,
369 },
370 },
371 .layout = layout,
372 .renderPass = device->meta_state.fast_clear_flush.pass,
373 .subpass = 0,
374 },
375 &(struct radv_graphics_pipeline_create_info) {
376 .use_rectlist = true,
377 .custom_blend_mode = V_028808_CB_ELIMINATE_FAST_CLEAR,
378 },
379 &device->meta_state.alloc,
380 &device->meta_state.fast_clear_flush.cmask_eliminate_pipeline);
381 if (result != VK_SUCCESS)
382 goto cleanup;
383
384 result = radv_graphics_pipeline_create(device_h,
385 radv_pipeline_cache_to_handle(&device->meta_state.cache),
386 &(VkGraphicsPipelineCreateInfo) {
387 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
388 .stageCount = 2,
389 .pStages = stages,
390
391 .pVertexInputState = &vi_state,
392 .pInputAssemblyState = &ia_state,
393
394 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
395 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
396 .viewportCount = 1,
397 .scissorCount = 1,
398 },
399 .pRasterizationState = &rs_state,
400 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
401 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
402 .rasterizationSamples = 1,
403 .sampleShadingEnable = false,
404 .pSampleMask = NULL,
405 .alphaToCoverageEnable = false,
406 .alphaToOneEnable = false,
407 },
408 .pColorBlendState = &blend_state,
409 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
410 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
411 .dynamicStateCount = 2,
412 .pDynamicStates = (VkDynamicState[]) {
413 VK_DYNAMIC_STATE_VIEWPORT,
414 VK_DYNAMIC_STATE_SCISSOR,
415 },
416 },
417 .layout = layout,
418 .renderPass = device->meta_state.fast_clear_flush.pass,
419 .subpass = 0,
420 },
421 &(struct radv_graphics_pipeline_create_info) {
422 .use_rectlist = true,
423 .custom_blend_mode = V_028808_CB_FMASK_DECOMPRESS,
424 },
425 &device->meta_state.alloc,
426 &device->meta_state.fast_clear_flush.fmask_decompress_pipeline);
427 if (result != VK_SUCCESS)
428 goto cleanup;
429
430 result = radv_graphics_pipeline_create(device_h,
431 radv_pipeline_cache_to_handle(&device->meta_state.cache),
432 &(VkGraphicsPipelineCreateInfo) {
433 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
434 .stageCount = 2,
435 .pStages = stages,
436
437 .pVertexInputState = &vi_state,
438 .pInputAssemblyState = &ia_state,
439
440 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
441 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
442 .viewportCount = 1,
443 .scissorCount = 1,
444 },
445 .pRasterizationState = &rs_state,
446 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
447 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
448 .rasterizationSamples = 1,
449 .sampleShadingEnable = false,
450 .pSampleMask = NULL,
451 .alphaToCoverageEnable = false,
452 .alphaToOneEnable = false,
453 },
454 .pColorBlendState = &blend_state,
455 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
456 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
457 .dynamicStateCount = 2,
458 .pDynamicStates = (VkDynamicState[]) {
459 VK_DYNAMIC_STATE_VIEWPORT,
460 VK_DYNAMIC_STATE_SCISSOR,
461 },
462 },
463 .layout = layout,
464 .renderPass = device->meta_state.fast_clear_flush.pass,
465 .subpass = 0,
466 },
467 &(struct radv_graphics_pipeline_create_info) {
468 .use_rectlist = true,
469 .custom_blend_mode = V_028808_CB_DCC_DECOMPRESS,
470 },
471 &device->meta_state.alloc,
472 &device->meta_state.fast_clear_flush.dcc_decompress_pipeline);
473 if (result != VK_SUCCESS)
474 goto cleanup;
475
476 goto cleanup;
477
478 cleanup:
479 ralloc_free(fs_module.nir);
480 return result;
481 }
482
483 void
484 radv_device_finish_meta_fast_clear_flush_state(struct radv_device *device)
485 {
486 struct radv_meta_state *state = &device->meta_state;
487
488 radv_DestroyPipeline(radv_device_to_handle(device),
489 state->fast_clear_flush.dcc_decompress_pipeline,
490 &state->alloc);
491 radv_DestroyPipeline(radv_device_to_handle(device),
492 state->fast_clear_flush.fmask_decompress_pipeline,
493 &state->alloc);
494 radv_DestroyPipeline(radv_device_to_handle(device),
495 state->fast_clear_flush.cmask_eliminate_pipeline,
496 &state->alloc);
497 radv_DestroyRenderPass(radv_device_to_handle(device),
498 state->fast_clear_flush.pass, &state->alloc);
499 radv_DestroyPipelineLayout(radv_device_to_handle(device),
500 state->fast_clear_flush.p_layout,
501 &state->alloc);
502
503 radv_DestroyPipeline(radv_device_to_handle(device),
504 state->fast_clear_flush.dcc_decompress_compute_pipeline,
505 &state->alloc);
506 radv_DestroyPipelineLayout(radv_device_to_handle(device),
507 state->fast_clear_flush.dcc_decompress_compute_p_layout,
508 &state->alloc);
509 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
510 state->fast_clear_flush.dcc_decompress_compute_ds_layout,
511 &state->alloc);
512 }
513
514 static VkResult
515 radv_device_init_meta_fast_clear_flush_state_internal(struct radv_device *device)
516 {
517 VkResult res = VK_SUCCESS;
518
519 mtx_lock(&device->meta_state.mtx);
520 if (device->meta_state.fast_clear_flush.cmask_eliminate_pipeline) {
521 mtx_unlock(&device->meta_state.mtx);
522 return VK_SUCCESS;
523 }
524
525 struct radv_shader_module vs_module = { .nir = radv_meta_build_nir_vs_generate_vertices() };
526 if (!vs_module.nir) {
527 /* XXX: Need more accurate error */
528 res = VK_ERROR_OUT_OF_HOST_MEMORY;
529 goto fail;
530 }
531
532 res = create_pass(device);
533 if (res != VK_SUCCESS)
534 goto fail;
535
536 res = create_pipeline_layout(device,
537 &device->meta_state.fast_clear_flush.p_layout);
538 if (res != VK_SUCCESS)
539 goto fail;
540
541 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
542 res = create_pipeline(device, vs_module_h,
543 device->meta_state.fast_clear_flush.p_layout);
544 if (res != VK_SUCCESS)
545 goto fail;
546
547 res = create_dcc_compress_compute(device);
548 if (res != VK_SUCCESS)
549 goto fail;
550
551 goto cleanup;
552
553 fail:
554 radv_device_finish_meta_fast_clear_flush_state(device);
555
556 cleanup:
557 ralloc_free(vs_module.nir);
558 mtx_unlock(&device->meta_state.mtx);
559
560 return res;
561 }
562
563
564 VkResult
565 radv_device_init_meta_fast_clear_flush_state(struct radv_device *device, bool on_demand)
566 {
567 if (on_demand)
568 return VK_SUCCESS;
569
570 return radv_device_init_meta_fast_clear_flush_state_internal(device);
571 }
572
573 static void
574 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer *cmd_buffer,
575 struct radv_image *image,
576 uint64_t pred_offset, bool value)
577 {
578 uint64_t va = 0;
579
580 if (value) {
581 va = radv_buffer_get_va(image->bo) + image->offset;
582 va += pred_offset;
583 }
584
585 si_emit_set_predication_state(cmd_buffer, true, va);
586 }
587
588 static void
589 radv_process_color_image_layer(struct radv_cmd_buffer *cmd_buffer,
590 struct radv_image *image,
591 const VkImageSubresourceRange *range,
592 int level, int layer)
593 {
594 struct radv_device *device = cmd_buffer->device;
595 struct radv_image_view iview;
596 uint32_t width, height;
597
598 width = radv_minify(image->info.width, range->baseMipLevel + level);
599 height = radv_minify(image->info.height, range->baseMipLevel + level);
600
601 radv_image_view_init(&iview, device,
602 &(VkImageViewCreateInfo) {
603 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
604 .image = radv_image_to_handle(image),
605 .viewType = radv_meta_get_view_type(image),
606 .format = image->vk_format,
607 .subresourceRange = {
608 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
609 .baseMipLevel = range->baseMipLevel + level,
610 .levelCount = 1,
611 .baseArrayLayer = range->baseArrayLayer + layer,
612 .layerCount = 1,
613 },
614 }, NULL);
615
616 VkFramebuffer fb_h;
617 radv_CreateFramebuffer(radv_device_to_handle(device),
618 &(VkFramebufferCreateInfo) {
619 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
620 .attachmentCount = 1,
621 .pAttachments = (VkImageView[]) {
622 radv_image_view_to_handle(&iview)
623 },
624 .width = width,
625 .height = height,
626 .layers = 1
627 }, &cmd_buffer->pool->alloc, &fb_h);
628
629 radv_cmd_buffer_begin_render_pass(cmd_buffer,
630 &(VkRenderPassBeginInfo) {
631 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
632 .renderPass = device->meta_state.fast_clear_flush.pass,
633 .framebuffer = fb_h,
634 .renderArea = {
635 .offset = { 0, 0, },
636 .extent = { width, height, }
637 },
638 .clearValueCount = 0,
639 .pClearValues = NULL,
640 });
641
642 radv_cmd_buffer_set_subpass(cmd_buffer,
643 &cmd_buffer->state.pass->subpasses[0]);
644
645 radv_CmdDraw(radv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
646
647 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
648 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
649
650 radv_cmd_buffer_end_render_pass(cmd_buffer);
651
652 radv_DestroyFramebuffer(radv_device_to_handle(device), fb_h,
653 &cmd_buffer->pool->alloc);
654 }
655
656 static void
657 radv_process_color_image(struct radv_cmd_buffer *cmd_buffer,
658 struct radv_image *image,
659 const VkImageSubresourceRange *subresourceRange,
660 bool decompress_dcc)
661 {
662 struct radv_meta_saved_state saved_state;
663 VkPipeline *pipeline;
664
665 if (decompress_dcc && radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
666 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.dcc_decompress_pipeline;
667 } else if (radv_image_has_fmask(image) && !image->tc_compatible_cmask) {
668 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.fmask_decompress_pipeline;
669 } else {
670 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline;
671 }
672
673 if (!*pipeline) {
674 VkResult ret;
675
676 ret = radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer->device);
677 if (ret != VK_SUCCESS) {
678 cmd_buffer->record_result = ret;
679 return;
680 }
681 }
682
683 radv_meta_save(&saved_state, cmd_buffer,
684 RADV_META_SAVE_GRAPHICS_PIPELINE |
685 RADV_META_SAVE_PASS);
686
687 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
688 VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
689
690 for (uint32_t l = 0; l < radv_get_levelCount(image, subresourceRange); ++l) {
691 uint32_t width, height;
692
693 /* Do not decompress levels without DCC. */
694 if (decompress_dcc &&
695 !radv_dcc_enabled(image, subresourceRange->baseMipLevel + l))
696 continue;
697
698 width = radv_minify(image->info.width,
699 subresourceRange->baseMipLevel + l);
700 height = radv_minify(image->info.height,
701 subresourceRange->baseMipLevel + l);
702
703 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
704 &(VkViewport) {
705 .x = 0,
706 .y = 0,
707 .width = width,
708 .height = height,
709 .minDepth = 0.0f,
710 .maxDepth = 1.0f
711 });
712
713 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
714 &(VkRect2D) {
715 .offset = { 0, 0 },
716 .extent = { width, height },
717 });
718
719 for (uint32_t s = 0; s < radv_get_layerCount(image, subresourceRange); s++) {
720 radv_process_color_image_layer(cmd_buffer, image,
721 subresourceRange, l, s);
722 }
723 }
724
725 radv_meta_restore(&saved_state, cmd_buffer);
726 }
727
728 static void
729 radv_emit_color_decompress(struct radv_cmd_buffer *cmd_buffer,
730 struct radv_image *image,
731 const VkImageSubresourceRange *subresourceRange,
732 bool decompress_dcc)
733 {
734 bool old_predicating = false;
735
736 assert(cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL);
737
738 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
739 uint64_t pred_offset = decompress_dcc ? image->dcc_pred_offset :
740 image->fce_pred_offset;
741 pred_offset += 8 * subresourceRange->baseMipLevel;
742
743 old_predicating = cmd_buffer->state.predicating;
744
745 radv_emit_set_predication_state_from_image(cmd_buffer, image, pred_offset, true);
746 cmd_buffer->state.predicating = true;
747 }
748
749 radv_process_color_image(cmd_buffer, image, subresourceRange,
750 decompress_dcc);
751
752 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
753 uint64_t pred_offset = decompress_dcc ? image->dcc_pred_offset :
754 image->fce_pred_offset;
755 pred_offset += 8 * subresourceRange->baseMipLevel;
756
757 cmd_buffer->state.predicating = old_predicating;
758
759 radv_emit_set_predication_state_from_image(cmd_buffer, image, pred_offset, false);
760
761 if (cmd_buffer->state.predication_type != -1) {
762 /* Restore previous conditional rendering user state. */
763 si_emit_set_predication_state(cmd_buffer,
764 cmd_buffer->state.predication_type,
765 cmd_buffer->state.predication_va);
766 }
767 }
768
769 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
770 /* Clear the image's fast-clear eliminate predicate because
771 * FMASK and DCC also imply a fast-clear eliminate.
772 */
773 radv_update_fce_metadata(cmd_buffer, image, subresourceRange, false);
774
775 /* Mark the image as being decompressed. */
776 if (decompress_dcc)
777 radv_update_dcc_metadata(cmd_buffer, image, subresourceRange, false);
778 }
779 }
780
781 void
782 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer *cmd_buffer,
783 struct radv_image *image,
784 const VkImageSubresourceRange *subresourceRange)
785 {
786 struct radv_barrier_data barrier = {};
787
788 if (radv_image_has_fmask(image)) {
789 barrier.layout_transitions.fmask_decompress = 1;
790 } else {
791 barrier.layout_transitions.fast_clear_eliminate = 1;
792 }
793 radv_describe_layout_transition(cmd_buffer, &barrier);
794
795 radv_emit_color_decompress(cmd_buffer, image, subresourceRange, false);
796 }
797
798 static void
799 radv_decompress_dcc_gfx(struct radv_cmd_buffer *cmd_buffer,
800 struct radv_image *image,
801 const VkImageSubresourceRange *subresourceRange)
802 {
803 radv_emit_color_decompress(cmd_buffer, image, subresourceRange, true);
804 }
805
806 static void
807 radv_decompress_dcc_compute(struct radv_cmd_buffer *cmd_buffer,
808 struct radv_image *image,
809 const VkImageSubresourceRange *subresourceRange)
810 {
811 struct radv_meta_saved_state saved_state;
812 struct radv_image_view load_iview = {0};
813 struct radv_image_view store_iview = {0};
814 struct radv_device *device = cmd_buffer->device;
815
816 /* This assumes the image is 2d with 1 layer */
817 struct radv_cmd_state *state = &cmd_buffer->state;
818
819 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
820 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
821
822 if (!cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline) {
823 VkResult ret = radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer->device);
824 if (ret != VK_SUCCESS) {
825 cmd_buffer->record_result = ret;
826 return;
827 }
828 }
829
830 radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_DESCRIPTORS |
831 RADV_META_SAVE_COMPUTE_PIPELINE);
832
833 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
834 VK_PIPELINE_BIND_POINT_COMPUTE,
835 device->meta_state.fast_clear_flush.dcc_decompress_compute_pipeline);
836
837 for (uint32_t l = 0; l < radv_get_levelCount(image, subresourceRange); l++) {
838 uint32_t width, height;
839
840 /* Do not decompress levels without DCC. */
841 if (!radv_dcc_enabled(image, subresourceRange->baseMipLevel + l))
842 continue;
843
844 width = radv_minify(image->info.width,
845 subresourceRange->baseMipLevel + l);
846 height = radv_minify(image->info.height,
847 subresourceRange->baseMipLevel + l);
848
849 for (uint32_t s = 0; s < radv_get_layerCount(image, subresourceRange); s++) {
850 radv_image_view_init(&load_iview, cmd_buffer->device,
851 &(VkImageViewCreateInfo) {
852 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
853 .image = radv_image_to_handle(image),
854 .viewType = VK_IMAGE_VIEW_TYPE_2D,
855 .format = image->vk_format,
856 .subresourceRange = {
857 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
858 .baseMipLevel = subresourceRange->baseMipLevel + l,
859 .levelCount = 1,
860 .baseArrayLayer = subresourceRange->baseArrayLayer + s,
861 .layerCount = 1
862 },
863 }, NULL);
864 radv_image_view_init(&store_iview, cmd_buffer->device,
865 &(VkImageViewCreateInfo) {
866 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
867 .image = radv_image_to_handle(image),
868 .viewType = VK_IMAGE_VIEW_TYPE_2D,
869 .format = image->vk_format,
870 .subresourceRange = {
871 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
872 .baseMipLevel = subresourceRange->baseMipLevel + l,
873 .levelCount = 1,
874 .baseArrayLayer = subresourceRange->baseArrayLayer + s,
875 .layerCount = 1
876 },
877 }, &(struct radv_image_view_extra_create_info) {
878 .disable_compression = true
879 });
880
881 radv_meta_push_descriptor_set(cmd_buffer,
882 VK_PIPELINE_BIND_POINT_COMPUTE,
883 device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout,
884 0, /* set */
885 2, /* descriptorWriteCount */
886 (VkWriteDescriptorSet[]) {
887 {
888 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
889 .dstBinding = 0,
890 .dstArrayElement = 0,
891 .descriptorCount = 1,
892 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
893 .pImageInfo = (VkDescriptorImageInfo[]) {
894 {
895 .sampler = VK_NULL_HANDLE,
896 .imageView = radv_image_view_to_handle(&load_iview),
897 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
898 },
899 }
900 },
901 {
902 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
903 .dstBinding = 1,
904 .dstArrayElement = 0,
905 .descriptorCount = 1,
906 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
907 .pImageInfo = (VkDescriptorImageInfo[]) {
908 {
909 .sampler = VK_NULL_HANDLE,
910 .imageView = radv_image_view_to_handle(&store_iview),
911 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
912 },
913 }
914 }
915 });
916
917 radv_unaligned_dispatch(cmd_buffer, width, height, 1);
918 }
919 }
920
921 /* Mark this image as actually being decompressed. */
922 radv_update_dcc_metadata(cmd_buffer, image, subresourceRange, false);
923
924 /* The fill buffer below does its own saving */
925 radv_meta_restore(&saved_state, cmd_buffer);
926
927 state->flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
928 RADV_CMD_FLAG_INV_VCACHE;
929
930
931 /* Initialize the DCC metadata as "fully expanded". */
932 radv_initialize_dcc(cmd_buffer, image, subresourceRange, 0xffffffff);
933 }
934
935 void
936 radv_decompress_dcc(struct radv_cmd_buffer *cmd_buffer,
937 struct radv_image *image,
938 const VkImageSubresourceRange *subresourceRange)
939 {
940 struct radv_barrier_data barrier = {};
941
942 barrier.layout_transitions.dcc_decompress = 1;
943 radv_describe_layout_transition(cmd_buffer, &barrier);
944
945 if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL)
946 radv_decompress_dcc_gfx(cmd_buffer, image, subresourceRange);
947 else
948 radv_decompress_dcc_compute(cmd_buffer, image, subresourceRange);
949 }