radv: constify all radv_pipeline_generate_*() helpers
[mesa.git] / src / amd / vulkan / radv_meta_fast_clear.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "sid.h"
30
31
32 static nir_shader *
33 build_dcc_decompress_compute_shader(struct radv_device *dev)
34 {
35 nir_builder b;
36 const struct glsl_type *buf_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
37 false,
38 false,
39 GLSL_TYPE_FLOAT);
40 const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_2D,
41 false,
42 GLSL_TYPE_FLOAT);
43 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
44 b.shader->info.name = ralloc_strdup(b.shader, "dcc_decompress_compute");
45
46 /* We need at least 16/16/1 to cover an entire DCC block in a single workgroup. */
47 b.shader->info.cs.local_size[0] = 16;
48 b.shader->info.cs.local_size[1] = 16;
49 b.shader->info.cs.local_size[2] = 1;
50 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
51 buf_type, "s_tex");
52 input_img->data.descriptor_set = 0;
53 input_img->data.binding = 0;
54
55 nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
56 img_type, "out_img");
57 output_img->data.descriptor_set = 0;
58 output_img->data.binding = 1;
59
60 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
61 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
62 nir_ssa_def *block_size = nir_imm_ivec4(&b,
63 b.shader->info.cs.local_size[0],
64 b.shader->info.cs.local_size[1],
65 b.shader->info.cs.local_size[2], 0);
66
67 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
68 nir_ssa_def *input_img_deref = &nir_build_deref_var(&b, input_img)->dest.ssa;
69
70 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 3);
71 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
72 tex->op = nir_texop_txf;
73 tex->src[0].src_type = nir_tex_src_coord;
74 tex->src[0].src = nir_src_for_ssa(nir_channels(&b, global_id, 3));
75 tex->src[1].src_type = nir_tex_src_lod;
76 tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0));
77 tex->src[2].src_type = nir_tex_src_texture_deref;
78 tex->src[2].src = nir_src_for_ssa(input_img_deref);
79 tex->dest_type = nir_type_float;
80 tex->is_array = false;
81 tex->coord_components = 2;
82
83 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
84 nir_builder_instr_insert(&b, &tex->instr);
85
86 nir_intrinsic_instr *membar = nir_intrinsic_instr_create(b.shader, nir_intrinsic_memory_barrier);
87 nir_builder_instr_insert(&b, &membar->instr);
88
89 nir_intrinsic_instr *bar = nir_intrinsic_instr_create(b.shader, nir_intrinsic_control_barrier);
90 nir_builder_instr_insert(&b, &bar->instr);
91
92 nir_ssa_def *outval = &tex->dest.ssa;
93 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_deref_store);
94 store->num_components = 4;
95 store->src[0] = nir_src_for_ssa(&nir_build_deref_var(&b, output_img)->dest.ssa);
96 store->src[1] = nir_src_for_ssa(global_id);
97 store->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
98 store->src[3] = nir_src_for_ssa(outval);
99 store->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0));
100
101 nir_builder_instr_insert(&b, &store->instr);
102 return b.shader;
103 }
104
105 static VkResult
106 create_dcc_compress_compute(struct radv_device *device)
107 {
108 VkResult result = VK_SUCCESS;
109 struct radv_shader_module cs = { .nir = NULL };
110
111 cs.nir = build_dcc_decompress_compute_shader(device);
112
113 VkDescriptorSetLayoutCreateInfo ds_create_info = {
114 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
115 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
116 .bindingCount = 2,
117 .pBindings = (VkDescriptorSetLayoutBinding[]) {
118 {
119 .binding = 0,
120 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
121 .descriptorCount = 1,
122 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
123 .pImmutableSamplers = NULL
124 },
125 {
126 .binding = 1,
127 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
128 .descriptorCount = 1,
129 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
130 .pImmutableSamplers = NULL
131 },
132 }
133 };
134
135 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
136 &ds_create_info,
137 &device->meta_state.alloc,
138 &device->meta_state.fast_clear_flush.dcc_decompress_compute_ds_layout);
139 if (result != VK_SUCCESS)
140 goto cleanup;
141
142
143 VkPipelineLayoutCreateInfo pl_create_info = {
144 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
145 .setLayoutCount = 1,
146 .pSetLayouts = &device->meta_state.fast_clear_flush.dcc_decompress_compute_ds_layout,
147 .pushConstantRangeCount = 1,
148 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 8},
149 };
150
151 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
152 &pl_create_info,
153 &device->meta_state.alloc,
154 &device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout);
155 if (result != VK_SUCCESS)
156 goto cleanup;
157
158 /* compute shader */
159
160 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
161 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
162 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
163 .module = radv_shader_module_to_handle(&cs),
164 .pName = "main",
165 .pSpecializationInfo = NULL,
166 };
167
168 VkComputePipelineCreateInfo vk_pipeline_info = {
169 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
170 .stage = pipeline_shader_stage,
171 .flags = 0,
172 .layout = device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout,
173 };
174
175 result = radv_CreateComputePipelines(radv_device_to_handle(device),
176 radv_pipeline_cache_to_handle(&device->meta_state.cache),
177 1, &vk_pipeline_info, NULL,
178 &device->meta_state.fast_clear_flush.dcc_decompress_compute_pipeline);
179 if (result != VK_SUCCESS)
180 goto cleanup;
181
182 cleanup:
183 ralloc_free(cs.nir);
184 return result;
185 }
186
187 static VkResult
188 create_pass(struct radv_device *device)
189 {
190 VkResult result;
191 VkDevice device_h = radv_device_to_handle(device);
192 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
193 VkAttachmentDescription attachment;
194
195 attachment.format = VK_FORMAT_UNDEFINED;
196 attachment.samples = 1;
197 attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
198 attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
199 attachment.initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
200 attachment.finalLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
201
202 result = radv_CreateRenderPass(device_h,
203 &(VkRenderPassCreateInfo) {
204 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
205 .attachmentCount = 1,
206 .pAttachments = &attachment,
207 .subpassCount = 1,
208 .pSubpasses = &(VkSubpassDescription) {
209 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
210 .inputAttachmentCount = 0,
211 .colorAttachmentCount = 1,
212 .pColorAttachments = (VkAttachmentReference[]) {
213 {
214 .attachment = 0,
215 .layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
216 },
217 },
218 .pResolveAttachments = NULL,
219 .pDepthStencilAttachment = &(VkAttachmentReference) {
220 .attachment = VK_ATTACHMENT_UNUSED,
221 },
222 .preserveAttachmentCount = 0,
223 .pPreserveAttachments = NULL,
224 },
225 .dependencyCount = 2,
226 .pDependencies = (VkSubpassDependency[]) {
227 {
228 .srcSubpass = VK_SUBPASS_EXTERNAL,
229 .dstSubpass = 0,
230 .srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
231 .dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
232 .srcAccessMask = 0,
233 .dstAccessMask = 0,
234 .dependencyFlags = 0
235 },
236 {
237 .srcSubpass = 0,
238 .dstSubpass = VK_SUBPASS_EXTERNAL,
239 .srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
240 .dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
241 .srcAccessMask = 0,
242 .dstAccessMask = 0,
243 .dependencyFlags = 0
244 }
245 },
246 },
247 alloc,
248 &device->meta_state.fast_clear_flush.pass);
249
250 return result;
251 }
252
253 static VkResult
254 create_pipeline_layout(struct radv_device *device, VkPipelineLayout *layout)
255 {
256 VkPipelineLayoutCreateInfo pl_create_info = {
257 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
258 .setLayoutCount = 0,
259 .pSetLayouts = NULL,
260 .pushConstantRangeCount = 0,
261 .pPushConstantRanges = NULL,
262 };
263
264 return radv_CreatePipelineLayout(radv_device_to_handle(device),
265 &pl_create_info,
266 &device->meta_state.alloc,
267 layout);
268 }
269
270 static VkResult
271 create_pipeline(struct radv_device *device,
272 VkShaderModule vs_module_h,
273 VkPipelineLayout layout)
274 {
275 VkResult result;
276 VkDevice device_h = radv_device_to_handle(device);
277
278 struct radv_shader_module fs_module = {
279 .nir = radv_meta_build_nir_fs_noop(),
280 };
281
282 if (!fs_module.nir) {
283 /* XXX: Need more accurate error */
284 result = VK_ERROR_OUT_OF_HOST_MEMORY;
285 goto cleanup;
286 }
287
288 const VkPipelineShaderStageCreateInfo stages[2] = {
289 {
290 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
291 .stage = VK_SHADER_STAGE_VERTEX_BIT,
292 .module = vs_module_h,
293 .pName = "main",
294 },
295 {
296 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
297 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
298 .module = radv_shader_module_to_handle(&fs_module),
299 .pName = "main",
300 },
301 };
302
303 const VkPipelineVertexInputStateCreateInfo vi_state = {
304 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
305 .vertexBindingDescriptionCount = 0,
306 .vertexAttributeDescriptionCount = 0,
307 };
308
309 const VkPipelineInputAssemblyStateCreateInfo ia_state = {
310 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
311 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
312 .primitiveRestartEnable = false,
313 };
314
315 const VkPipelineColorBlendStateCreateInfo blend_state = {
316 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
317 .logicOpEnable = false,
318 .attachmentCount = 1,
319 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
320 {
321 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
322 VK_COLOR_COMPONENT_G_BIT |
323 VK_COLOR_COMPONENT_B_BIT |
324 VK_COLOR_COMPONENT_A_BIT,
325 },
326 }
327 };
328 const VkPipelineRasterizationStateCreateInfo rs_state = {
329 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
330 .depthClampEnable = false,
331 .rasterizerDiscardEnable = false,
332 .polygonMode = VK_POLYGON_MODE_FILL,
333 .cullMode = VK_CULL_MODE_NONE,
334 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
335 };
336
337 result = radv_graphics_pipeline_create(device_h,
338 radv_pipeline_cache_to_handle(&device->meta_state.cache),
339 &(VkGraphicsPipelineCreateInfo) {
340 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
341 .stageCount = 2,
342 .pStages = stages,
343
344 .pVertexInputState = &vi_state,
345 .pInputAssemblyState = &ia_state,
346
347 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
348 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
349 .viewportCount = 1,
350 .scissorCount = 1,
351 },
352 .pRasterizationState = &rs_state,
353 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
354 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
355 .rasterizationSamples = 1,
356 .sampleShadingEnable = false,
357 .pSampleMask = NULL,
358 .alphaToCoverageEnable = false,
359 .alphaToOneEnable = false,
360 },
361 .pColorBlendState = &blend_state,
362 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
363 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
364 .dynamicStateCount = 2,
365 .pDynamicStates = (VkDynamicState[]) {
366 VK_DYNAMIC_STATE_VIEWPORT,
367 VK_DYNAMIC_STATE_SCISSOR,
368 },
369 },
370 .layout = layout,
371 .renderPass = device->meta_state.fast_clear_flush.pass,
372 .subpass = 0,
373 },
374 &(struct radv_graphics_pipeline_create_info) {
375 .use_rectlist = true,
376 .custom_blend_mode = V_028808_CB_ELIMINATE_FAST_CLEAR,
377 },
378 &device->meta_state.alloc,
379 &device->meta_state.fast_clear_flush.cmask_eliminate_pipeline);
380 if (result != VK_SUCCESS)
381 goto cleanup;
382
383 result = radv_graphics_pipeline_create(device_h,
384 radv_pipeline_cache_to_handle(&device->meta_state.cache),
385 &(VkGraphicsPipelineCreateInfo) {
386 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
387 .stageCount = 2,
388 .pStages = stages,
389
390 .pVertexInputState = &vi_state,
391 .pInputAssemblyState = &ia_state,
392
393 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
394 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
395 .viewportCount = 1,
396 .scissorCount = 1,
397 },
398 .pRasterizationState = &rs_state,
399 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
400 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
401 .rasterizationSamples = 1,
402 .sampleShadingEnable = false,
403 .pSampleMask = NULL,
404 .alphaToCoverageEnable = false,
405 .alphaToOneEnable = false,
406 },
407 .pColorBlendState = &blend_state,
408 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
409 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
410 .dynamicStateCount = 2,
411 .pDynamicStates = (VkDynamicState[]) {
412 VK_DYNAMIC_STATE_VIEWPORT,
413 VK_DYNAMIC_STATE_SCISSOR,
414 },
415 },
416 .layout = layout,
417 .renderPass = device->meta_state.fast_clear_flush.pass,
418 .subpass = 0,
419 },
420 &(struct radv_graphics_pipeline_create_info) {
421 .use_rectlist = true,
422 .custom_blend_mode = V_028808_CB_FMASK_DECOMPRESS,
423 },
424 &device->meta_state.alloc,
425 &device->meta_state.fast_clear_flush.fmask_decompress_pipeline);
426 if (result != VK_SUCCESS)
427 goto cleanup;
428
429 result = radv_graphics_pipeline_create(device_h,
430 radv_pipeline_cache_to_handle(&device->meta_state.cache),
431 &(VkGraphicsPipelineCreateInfo) {
432 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
433 .stageCount = 2,
434 .pStages = stages,
435
436 .pVertexInputState = &vi_state,
437 .pInputAssemblyState = &ia_state,
438
439 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
440 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
441 .viewportCount = 1,
442 .scissorCount = 1,
443 },
444 .pRasterizationState = &rs_state,
445 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
446 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
447 .rasterizationSamples = 1,
448 .sampleShadingEnable = false,
449 .pSampleMask = NULL,
450 .alphaToCoverageEnable = false,
451 .alphaToOneEnable = false,
452 },
453 .pColorBlendState = &blend_state,
454 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
455 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
456 .dynamicStateCount = 2,
457 .pDynamicStates = (VkDynamicState[]) {
458 VK_DYNAMIC_STATE_VIEWPORT,
459 VK_DYNAMIC_STATE_SCISSOR,
460 },
461 },
462 .layout = layout,
463 .renderPass = device->meta_state.fast_clear_flush.pass,
464 .subpass = 0,
465 },
466 &(struct radv_graphics_pipeline_create_info) {
467 .use_rectlist = true,
468 .custom_blend_mode = V_028808_CB_DCC_DECOMPRESS,
469 },
470 &device->meta_state.alloc,
471 &device->meta_state.fast_clear_flush.dcc_decompress_pipeline);
472 if (result != VK_SUCCESS)
473 goto cleanup;
474
475 goto cleanup;
476
477 cleanup:
478 ralloc_free(fs_module.nir);
479 return result;
480 }
481
482 void
483 radv_device_finish_meta_fast_clear_flush_state(struct radv_device *device)
484 {
485 struct radv_meta_state *state = &device->meta_state;
486
487 radv_DestroyPipeline(radv_device_to_handle(device),
488 state->fast_clear_flush.dcc_decompress_pipeline,
489 &state->alloc);
490 radv_DestroyPipeline(radv_device_to_handle(device),
491 state->fast_clear_flush.fmask_decompress_pipeline,
492 &state->alloc);
493 radv_DestroyPipeline(radv_device_to_handle(device),
494 state->fast_clear_flush.cmask_eliminate_pipeline,
495 &state->alloc);
496 radv_DestroyRenderPass(radv_device_to_handle(device),
497 state->fast_clear_flush.pass, &state->alloc);
498 radv_DestroyPipelineLayout(radv_device_to_handle(device),
499 state->fast_clear_flush.p_layout,
500 &state->alloc);
501
502 radv_DestroyPipeline(radv_device_to_handle(device),
503 state->fast_clear_flush.dcc_decompress_compute_pipeline,
504 &state->alloc);
505 radv_DestroyPipelineLayout(radv_device_to_handle(device),
506 state->fast_clear_flush.dcc_decompress_compute_p_layout,
507 &state->alloc);
508 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
509 state->fast_clear_flush.dcc_decompress_compute_ds_layout,
510 &state->alloc);
511 }
512
513 static VkResult
514 radv_device_init_meta_fast_clear_flush_state_internal(struct radv_device *device)
515 {
516 VkResult res = VK_SUCCESS;
517
518 mtx_lock(&device->meta_state.mtx);
519 if (device->meta_state.fast_clear_flush.cmask_eliminate_pipeline) {
520 mtx_unlock(&device->meta_state.mtx);
521 return VK_SUCCESS;
522 }
523
524 struct radv_shader_module vs_module = { .nir = radv_meta_build_nir_vs_generate_vertices() };
525 if (!vs_module.nir) {
526 /* XXX: Need more accurate error */
527 res = VK_ERROR_OUT_OF_HOST_MEMORY;
528 goto fail;
529 }
530
531 res = create_pass(device);
532 if (res != VK_SUCCESS)
533 goto fail;
534
535 res = create_pipeline_layout(device,
536 &device->meta_state.fast_clear_flush.p_layout);
537 if (res != VK_SUCCESS)
538 goto fail;
539
540 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
541 res = create_pipeline(device, vs_module_h,
542 device->meta_state.fast_clear_flush.p_layout);
543 if (res != VK_SUCCESS)
544 goto fail;
545
546 res = create_dcc_compress_compute(device);
547 if (res != VK_SUCCESS)
548 goto fail;
549
550 goto cleanup;
551
552 fail:
553 radv_device_finish_meta_fast_clear_flush_state(device);
554
555 cleanup:
556 ralloc_free(vs_module.nir);
557 mtx_unlock(&device->meta_state.mtx);
558
559 return res;
560 }
561
562
563 VkResult
564 radv_device_init_meta_fast_clear_flush_state(struct radv_device *device, bool on_demand)
565 {
566 if (on_demand)
567 return VK_SUCCESS;
568
569 return radv_device_init_meta_fast_clear_flush_state_internal(device);
570 }
571
572 static void
573 radv_emit_set_predication_state_from_image(struct radv_cmd_buffer *cmd_buffer,
574 struct radv_image *image,
575 uint64_t pred_offset, bool value)
576 {
577 uint64_t va = 0;
578
579 if (value) {
580 va = radv_buffer_get_va(image->bo) + image->offset;
581 va += pred_offset;
582 }
583
584 si_emit_set_predication_state(cmd_buffer, true, va);
585 }
586
587 static void
588 radv_process_color_image_layer(struct radv_cmd_buffer *cmd_buffer,
589 struct radv_image *image,
590 const VkImageSubresourceRange *range,
591 int level, int layer)
592 {
593 struct radv_device *device = cmd_buffer->device;
594 struct radv_image_view iview;
595 uint32_t width, height;
596
597 width = radv_minify(image->info.width, range->baseMipLevel + level);
598 height = radv_minify(image->info.height, range->baseMipLevel + level);
599
600 radv_image_view_init(&iview, device,
601 &(VkImageViewCreateInfo) {
602 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
603 .image = radv_image_to_handle(image),
604 .viewType = radv_meta_get_view_type(image),
605 .format = image->vk_format,
606 .subresourceRange = {
607 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
608 .baseMipLevel = range->baseMipLevel + level,
609 .levelCount = 1,
610 .baseArrayLayer = range->baseArrayLayer + layer,
611 .layerCount = 1,
612 },
613 }, NULL);
614
615 VkFramebuffer fb_h;
616 radv_CreateFramebuffer(radv_device_to_handle(device),
617 &(VkFramebufferCreateInfo) {
618 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
619 .attachmentCount = 1,
620 .pAttachments = (VkImageView[]) {
621 radv_image_view_to_handle(&iview)
622 },
623 .width = width,
624 .height = height,
625 .layers = 1
626 }, &cmd_buffer->pool->alloc, &fb_h);
627
628 radv_cmd_buffer_begin_render_pass(cmd_buffer,
629 &(VkRenderPassBeginInfo) {
630 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
631 .renderPass = device->meta_state.fast_clear_flush.pass,
632 .framebuffer = fb_h,
633 .renderArea = {
634 .offset = { 0, 0, },
635 .extent = { width, height, }
636 },
637 .clearValueCount = 0,
638 .pClearValues = NULL,
639 });
640
641 radv_cmd_buffer_set_subpass(cmd_buffer,
642 &cmd_buffer->state.pass->subpasses[0]);
643
644 radv_CmdDraw(radv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
645
646 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
647 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
648
649 radv_cmd_buffer_end_render_pass(cmd_buffer);
650
651 radv_DestroyFramebuffer(radv_device_to_handle(device), fb_h,
652 &cmd_buffer->pool->alloc);
653 }
654
655 static void
656 radv_process_color_image(struct radv_cmd_buffer *cmd_buffer,
657 struct radv_image *image,
658 const VkImageSubresourceRange *subresourceRange,
659 bool decompress_dcc)
660 {
661 struct radv_meta_saved_state saved_state;
662 VkPipeline *pipeline;
663
664 if (decompress_dcc && radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
665 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.dcc_decompress_pipeline;
666 } else if (radv_image_has_fmask(image) && !image->tc_compatible_cmask) {
667 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.fmask_decompress_pipeline;
668 } else {
669 pipeline = &cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline;
670 }
671
672 if (!*pipeline) {
673 VkResult ret;
674
675 ret = radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer->device);
676 if (ret != VK_SUCCESS) {
677 cmd_buffer->record_result = ret;
678 return;
679 }
680 }
681
682 radv_meta_save(&saved_state, cmd_buffer,
683 RADV_META_SAVE_GRAPHICS_PIPELINE |
684 RADV_META_SAVE_PASS);
685
686 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
687 VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
688
689 for (uint32_t l = 0; l < radv_get_levelCount(image, subresourceRange); ++l) {
690 uint32_t width, height;
691
692 /* Do not decompress levels without DCC. */
693 if (decompress_dcc &&
694 !radv_dcc_enabled(image, subresourceRange->baseMipLevel + l))
695 continue;
696
697 width = radv_minify(image->info.width,
698 subresourceRange->baseMipLevel + l);
699 height = radv_minify(image->info.height,
700 subresourceRange->baseMipLevel + l);
701
702 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
703 &(VkViewport) {
704 .x = 0,
705 .y = 0,
706 .width = width,
707 .height = height,
708 .minDepth = 0.0f,
709 .maxDepth = 1.0f
710 });
711
712 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
713 &(VkRect2D) {
714 .offset = { 0, 0 },
715 .extent = { width, height },
716 });
717
718 for (uint32_t s = 0; s < radv_get_layerCount(image, subresourceRange); s++) {
719 radv_process_color_image_layer(cmd_buffer, image,
720 subresourceRange, l, s);
721 }
722 }
723
724 radv_meta_restore(&saved_state, cmd_buffer);
725 }
726
727 static void
728 radv_emit_color_decompress(struct radv_cmd_buffer *cmd_buffer,
729 struct radv_image *image,
730 const VkImageSubresourceRange *subresourceRange,
731 bool decompress_dcc)
732 {
733 bool old_predicating = false;
734
735 assert(cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL);
736
737 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
738 uint64_t pred_offset = decompress_dcc ? image->dcc_pred_offset :
739 image->fce_pred_offset;
740 pred_offset += 8 * subresourceRange->baseMipLevel;
741
742 old_predicating = cmd_buffer->state.predicating;
743
744 radv_emit_set_predication_state_from_image(cmd_buffer, image, pred_offset, true);
745 cmd_buffer->state.predicating = true;
746 }
747
748 radv_process_color_image(cmd_buffer, image, subresourceRange,
749 decompress_dcc);
750
751 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
752 uint64_t pred_offset = decompress_dcc ? image->dcc_pred_offset :
753 image->fce_pred_offset;
754 pred_offset += 8 * subresourceRange->baseMipLevel;
755
756 cmd_buffer->state.predicating = old_predicating;
757
758 radv_emit_set_predication_state_from_image(cmd_buffer, image, pred_offset, false);
759
760 if (cmd_buffer->state.predication_type != -1) {
761 /* Restore previous conditional rendering user state. */
762 si_emit_set_predication_state(cmd_buffer,
763 cmd_buffer->state.predication_type,
764 cmd_buffer->state.predication_va);
765 }
766 }
767
768 if (radv_dcc_enabled(image, subresourceRange->baseMipLevel)) {
769 /* Clear the image's fast-clear eliminate predicate because
770 * FMASK and DCC also imply a fast-clear eliminate.
771 */
772 radv_update_fce_metadata(cmd_buffer, image, subresourceRange, false);
773
774 /* Mark the image as being decompressed. */
775 if (decompress_dcc)
776 radv_update_dcc_metadata(cmd_buffer, image, subresourceRange, false);
777 }
778 }
779
780 void
781 radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer *cmd_buffer,
782 struct radv_image *image,
783 const VkImageSubresourceRange *subresourceRange)
784 {
785 struct radv_barrier_data barrier = {};
786
787 if (radv_image_has_fmask(image)) {
788 barrier.layout_transitions.fmask_decompress = 1;
789 } else {
790 barrier.layout_transitions.fast_clear_eliminate = 1;
791 }
792 radv_describe_layout_transition(cmd_buffer, &barrier);
793
794 radv_emit_color_decompress(cmd_buffer, image, subresourceRange, false);
795 }
796
797 static void
798 radv_decompress_dcc_gfx(struct radv_cmd_buffer *cmd_buffer,
799 struct radv_image *image,
800 const VkImageSubresourceRange *subresourceRange)
801 {
802 radv_emit_color_decompress(cmd_buffer, image, subresourceRange, true);
803 }
804
805 static void
806 radv_decompress_dcc_compute(struct radv_cmd_buffer *cmd_buffer,
807 struct radv_image *image,
808 const VkImageSubresourceRange *subresourceRange)
809 {
810 struct radv_meta_saved_state saved_state;
811 struct radv_image_view load_iview = {0};
812 struct radv_image_view store_iview = {0};
813 struct radv_device *device = cmd_buffer->device;
814
815 /* This assumes the image is 2d with 1 layer */
816 struct radv_cmd_state *state = &cmd_buffer->state;
817
818 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
819 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
820
821 if (!cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline) {
822 VkResult ret = radv_device_init_meta_fast_clear_flush_state_internal(cmd_buffer->device);
823 if (ret != VK_SUCCESS) {
824 cmd_buffer->record_result = ret;
825 return;
826 }
827 }
828
829 radv_meta_save(&saved_state, cmd_buffer, RADV_META_SAVE_DESCRIPTORS |
830 RADV_META_SAVE_COMPUTE_PIPELINE);
831
832 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
833 VK_PIPELINE_BIND_POINT_COMPUTE,
834 device->meta_state.fast_clear_flush.dcc_decompress_compute_pipeline);
835
836 for (uint32_t l = 0; l < radv_get_levelCount(image, subresourceRange); l++) {
837 uint32_t width, height;
838
839 /* Do not decompress levels without DCC. */
840 if (!radv_dcc_enabled(image, subresourceRange->baseMipLevel + l))
841 continue;
842
843 width = radv_minify(image->info.width,
844 subresourceRange->baseMipLevel + l);
845 height = radv_minify(image->info.height,
846 subresourceRange->baseMipLevel + l);
847
848 for (uint32_t s = 0; s < radv_get_layerCount(image, subresourceRange); s++) {
849 radv_image_view_init(&load_iview, cmd_buffer->device,
850 &(VkImageViewCreateInfo) {
851 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
852 .image = radv_image_to_handle(image),
853 .viewType = VK_IMAGE_VIEW_TYPE_2D,
854 .format = image->vk_format,
855 .subresourceRange = {
856 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
857 .baseMipLevel = subresourceRange->baseMipLevel + l,
858 .levelCount = 1,
859 .baseArrayLayer = subresourceRange->baseArrayLayer + s,
860 .layerCount = 1
861 },
862 }, NULL);
863 radv_image_view_init(&store_iview, cmd_buffer->device,
864 &(VkImageViewCreateInfo) {
865 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
866 .image = radv_image_to_handle(image),
867 .viewType = VK_IMAGE_VIEW_TYPE_2D,
868 .format = image->vk_format,
869 .subresourceRange = {
870 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
871 .baseMipLevel = subresourceRange->baseMipLevel + l,
872 .levelCount = 1,
873 .baseArrayLayer = subresourceRange->baseArrayLayer + s,
874 .layerCount = 1
875 },
876 }, &(struct radv_image_view_extra_create_info) {
877 .disable_compression = true
878 });
879
880 radv_meta_push_descriptor_set(cmd_buffer,
881 VK_PIPELINE_BIND_POINT_COMPUTE,
882 device->meta_state.fast_clear_flush.dcc_decompress_compute_p_layout,
883 0, /* set */
884 2, /* descriptorWriteCount */
885 (VkWriteDescriptorSet[]) {
886 {
887 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
888 .dstBinding = 0,
889 .dstArrayElement = 0,
890 .descriptorCount = 1,
891 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
892 .pImageInfo = (VkDescriptorImageInfo[]) {
893 {
894 .sampler = VK_NULL_HANDLE,
895 .imageView = radv_image_view_to_handle(&load_iview),
896 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
897 },
898 }
899 },
900 {
901 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
902 .dstBinding = 1,
903 .dstArrayElement = 0,
904 .descriptorCount = 1,
905 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
906 .pImageInfo = (VkDescriptorImageInfo[]) {
907 {
908 .sampler = VK_NULL_HANDLE,
909 .imageView = radv_image_view_to_handle(&store_iview),
910 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
911 },
912 }
913 }
914 });
915
916 radv_unaligned_dispatch(cmd_buffer, width, height, 1);
917 }
918 }
919
920 /* Mark this image as actually being decompressed. */
921 radv_update_dcc_metadata(cmd_buffer, image, subresourceRange, false);
922
923 /* The fill buffer below does its own saving */
924 radv_meta_restore(&saved_state, cmd_buffer);
925
926 state->flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
927 RADV_CMD_FLAG_INV_VCACHE;
928
929
930 /* Initialize the DCC metadata as "fully expanded". */
931 radv_initialize_dcc(cmd_buffer, image, subresourceRange, 0xffffffff);
932 }
933
934 void
935 radv_decompress_dcc(struct radv_cmd_buffer *cmd_buffer,
936 struct radv_image *image,
937 const VkImageSubresourceRange *subresourceRange)
938 {
939 struct radv_barrier_data barrier = {};
940
941 barrier.layout_transitions.dcc_decompress = 1;
942 radv_describe_layout_transition(cmd_buffer, &barrier);
943
944 if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL)
945 radv_decompress_dcc_gfx(cmd_buffer, image, subresourceRange);
946 else
947 radv_decompress_dcc_compute(cmd_buffer, image, subresourceRange);
948 }