radv: fix DCC fast clear code for intensity formats
[mesa.git] / src / amd / vulkan / radv_meta_clear.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "radv_debug.h"
25 #include "radv_meta.h"
26 #include "radv_private.h"
27 #include "nir/nir_builder.h"
28
29 #include "util/format_rgb9e5.h"
30 #include "vk_format.h"
31
32 enum {
33 DEPTH_CLEAR_SLOW,
34 DEPTH_CLEAR_FAST_EXPCLEAR,
35 DEPTH_CLEAR_FAST_NO_EXPCLEAR
36 };
37
38 static void
39 build_color_shaders(struct nir_shader **out_vs,
40 struct nir_shader **out_fs,
41 uint32_t frag_output)
42 {
43 nir_builder vs_b;
44 nir_builder fs_b;
45
46 nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
47 nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
48
49 vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_color_vs");
50 fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_color_fs");
51
52 const struct glsl_type *position_type = glsl_vec4_type();
53 const struct glsl_type *color_type = glsl_vec4_type();
54
55 nir_variable *vs_out_pos =
56 nir_variable_create(vs_b.shader, nir_var_shader_out, position_type,
57 "gl_Position");
58 vs_out_pos->data.location = VARYING_SLOT_POS;
59
60 nir_intrinsic_instr *in_color_load = nir_intrinsic_instr_create(fs_b.shader, nir_intrinsic_load_push_constant);
61 nir_intrinsic_set_base(in_color_load, 0);
62 nir_intrinsic_set_range(in_color_load, 16);
63 in_color_load->src[0] = nir_src_for_ssa(nir_imm_int(&fs_b, 0));
64 in_color_load->num_components = 4;
65 nir_ssa_dest_init(&in_color_load->instr, &in_color_load->dest, 4, 32, "clear color");
66 nir_builder_instr_insert(&fs_b, &in_color_load->instr);
67
68 nir_variable *fs_out_color =
69 nir_variable_create(fs_b.shader, nir_var_shader_out, color_type,
70 "f_color");
71 fs_out_color->data.location = FRAG_RESULT_DATA0 + frag_output;
72
73 nir_store_var(&fs_b, fs_out_color, &in_color_load->dest.ssa, 0xf);
74
75 nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&vs_b);
76 nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);
77
78 const struct glsl_type *layer_type = glsl_int_type();
79 nir_variable *vs_out_layer =
80 nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type,
81 "v_layer");
82 vs_out_layer->data.location = VARYING_SLOT_LAYER;
83 vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
84 nir_ssa_def *inst_id = nir_load_instance_id(&vs_b);
85 nir_ssa_def *base_instance = nir_load_base_instance(&vs_b);
86
87 nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
88 nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);
89
90 *out_vs = vs_b.shader;
91 *out_fs = fs_b.shader;
92 }
93
94 static VkResult
95 create_pipeline(struct radv_device *device,
96 struct radv_render_pass *render_pass,
97 uint32_t samples,
98 struct nir_shader *vs_nir,
99 struct nir_shader *fs_nir,
100 const VkPipelineVertexInputStateCreateInfo *vi_state,
101 const VkPipelineDepthStencilStateCreateInfo *ds_state,
102 const VkPipelineColorBlendStateCreateInfo *cb_state,
103 const VkPipelineLayout layout,
104 const struct radv_graphics_pipeline_create_info *extra,
105 const VkAllocationCallbacks *alloc,
106 VkPipeline *pipeline)
107 {
108 VkDevice device_h = radv_device_to_handle(device);
109 VkResult result;
110
111 struct radv_shader_module vs_m = { .nir = vs_nir };
112 struct radv_shader_module fs_m = { .nir = fs_nir };
113
114 result = radv_graphics_pipeline_create(device_h,
115 radv_pipeline_cache_to_handle(&device->meta_state.cache),
116 &(VkGraphicsPipelineCreateInfo) {
117 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
118 .stageCount = fs_nir ? 2 : 1,
119 .pStages = (VkPipelineShaderStageCreateInfo[]) {
120 {
121 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
122 .stage = VK_SHADER_STAGE_VERTEX_BIT,
123 .module = radv_shader_module_to_handle(&vs_m),
124 .pName = "main",
125 },
126 {
127 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
128 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
129 .module = radv_shader_module_to_handle(&fs_m),
130 .pName = "main",
131 },
132 },
133 .pVertexInputState = vi_state,
134 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
135 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
136 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
137 .primitiveRestartEnable = false,
138 },
139 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
140 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
141 .viewportCount = 1,
142 .scissorCount = 1,
143 },
144 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
145 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
146 .rasterizerDiscardEnable = false,
147 .polygonMode = VK_POLYGON_MODE_FILL,
148 .cullMode = VK_CULL_MODE_NONE,
149 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
150 .depthBiasEnable = false,
151 },
152 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
153 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
154 .rasterizationSamples = samples,
155 .sampleShadingEnable = false,
156 .pSampleMask = NULL,
157 .alphaToCoverageEnable = false,
158 .alphaToOneEnable = false,
159 },
160 .pDepthStencilState = ds_state,
161 .pColorBlendState = cb_state,
162 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
163 /* The meta clear pipeline declares all state as dynamic.
164 * As a consequence, vkCmdBindPipeline writes no dynamic state
165 * to the cmd buffer. Therefore, at the end of the meta clear,
166 * we need only restore dynamic state was vkCmdSet.
167 */
168 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
169 .dynamicStateCount = 8,
170 .pDynamicStates = (VkDynamicState[]) {
171 /* Everything except stencil write mask */
172 VK_DYNAMIC_STATE_VIEWPORT,
173 VK_DYNAMIC_STATE_SCISSOR,
174 VK_DYNAMIC_STATE_LINE_WIDTH,
175 VK_DYNAMIC_STATE_DEPTH_BIAS,
176 VK_DYNAMIC_STATE_BLEND_CONSTANTS,
177 VK_DYNAMIC_STATE_DEPTH_BOUNDS,
178 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
179 VK_DYNAMIC_STATE_STENCIL_REFERENCE,
180 },
181 },
182 .layout = layout,
183 .flags = 0,
184 .renderPass = radv_render_pass_to_handle(render_pass),
185 .subpass = 0,
186 },
187 extra,
188 alloc,
189 pipeline);
190
191 ralloc_free(vs_nir);
192 ralloc_free(fs_nir);
193
194 return result;
195 }
196
197 static VkResult
198 create_color_renderpass(struct radv_device *device,
199 VkFormat vk_format,
200 uint32_t samples,
201 VkRenderPass *pass)
202 {
203 mtx_lock(&device->meta_state.mtx);
204 if (*pass) {
205 mtx_unlock (&device->meta_state.mtx);
206 return VK_SUCCESS;
207 }
208
209 VkResult result = radv_CreateRenderPass(radv_device_to_handle(device),
210 &(VkRenderPassCreateInfo) {
211 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
212 .attachmentCount = 1,
213 .pAttachments = &(VkAttachmentDescription) {
214 .format = vk_format,
215 .samples = samples,
216 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
217 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
218 .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
219 .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
220 },
221 .subpassCount = 1,
222 .pSubpasses = &(VkSubpassDescription) {
223 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
224 .inputAttachmentCount = 0,
225 .colorAttachmentCount = 1,
226 .pColorAttachments = &(VkAttachmentReference) {
227 .attachment = 0,
228 .layout = VK_IMAGE_LAYOUT_GENERAL,
229 },
230 .pResolveAttachments = NULL,
231 .pDepthStencilAttachment = &(VkAttachmentReference) {
232 .attachment = VK_ATTACHMENT_UNUSED,
233 .layout = VK_IMAGE_LAYOUT_GENERAL,
234 },
235 .preserveAttachmentCount = 0,
236 .pPreserveAttachments = NULL,
237 },
238 .dependencyCount = 0,
239 }, &device->meta_state.alloc, pass);
240 mtx_unlock(&device->meta_state.mtx);
241 return result;
242 }
243
244 static VkResult
245 create_color_pipeline(struct radv_device *device,
246 uint32_t samples,
247 uint32_t frag_output,
248 VkPipeline *pipeline,
249 VkRenderPass pass)
250 {
251 struct nir_shader *vs_nir;
252 struct nir_shader *fs_nir;
253 VkResult result;
254
255 mtx_lock(&device->meta_state.mtx);
256 if (*pipeline) {
257 mtx_unlock(&device->meta_state.mtx);
258 return VK_SUCCESS;
259 }
260
261 build_color_shaders(&vs_nir, &fs_nir, frag_output);
262
263 const VkPipelineVertexInputStateCreateInfo vi_state = {
264 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
265 .vertexBindingDescriptionCount = 0,
266 .vertexAttributeDescriptionCount = 0,
267 };
268
269 const VkPipelineDepthStencilStateCreateInfo ds_state = {
270 .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
271 .depthTestEnable = false,
272 .depthWriteEnable = false,
273 .depthBoundsTestEnable = false,
274 .stencilTestEnable = false,
275 };
276
277 VkPipelineColorBlendAttachmentState blend_attachment_state[MAX_RTS] = { 0 };
278 blend_attachment_state[frag_output] = (VkPipelineColorBlendAttachmentState) {
279 .blendEnable = false,
280 .colorWriteMask = VK_COLOR_COMPONENT_A_BIT |
281 VK_COLOR_COMPONENT_R_BIT |
282 VK_COLOR_COMPONENT_G_BIT |
283 VK_COLOR_COMPONENT_B_BIT,
284 };
285
286 const VkPipelineColorBlendStateCreateInfo cb_state = {
287 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
288 .logicOpEnable = false,
289 .attachmentCount = MAX_RTS,
290 .pAttachments = blend_attachment_state
291 };
292
293
294 struct radv_graphics_pipeline_create_info extra = {
295 .use_rectlist = true,
296 };
297 result = create_pipeline(device, radv_render_pass_from_handle(pass),
298 samples, vs_nir, fs_nir, &vi_state, &ds_state, &cb_state,
299 device->meta_state.clear_color_p_layout,
300 &extra, &device->meta_state.alloc, pipeline);
301
302 mtx_unlock(&device->meta_state.mtx);
303 return result;
304 }
305
306 static void
307 finish_meta_clear_htile_mask_state(struct radv_device *device)
308 {
309 struct radv_meta_state *state = &device->meta_state;
310
311 radv_DestroyPipeline(radv_device_to_handle(device),
312 state->clear_htile_mask_pipeline,
313 &state->alloc);
314 radv_DestroyPipelineLayout(radv_device_to_handle(device),
315 state->clear_htile_mask_p_layout,
316 &state->alloc);
317 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
318 state->clear_htile_mask_ds_layout,
319 &state->alloc);
320 }
321
322 void
323 radv_device_finish_meta_clear_state(struct radv_device *device)
324 {
325 struct radv_meta_state *state = &device->meta_state;
326
327 for (uint32_t i = 0; i < ARRAY_SIZE(state->clear); ++i) {
328 for (uint32_t j = 0; j < ARRAY_SIZE(state->clear[i].color_pipelines); ++j) {
329 radv_DestroyPipeline(radv_device_to_handle(device),
330 state->clear[i].color_pipelines[j],
331 &state->alloc);
332 radv_DestroyRenderPass(radv_device_to_handle(device),
333 state->clear[i].render_pass[j],
334 &state->alloc);
335 }
336
337 for (uint32_t j = 0; j < NUM_DEPTH_CLEAR_PIPELINES; j++) {
338 radv_DestroyPipeline(radv_device_to_handle(device),
339 state->clear[i].depth_only_pipeline[j],
340 &state->alloc);
341 radv_DestroyPipeline(radv_device_to_handle(device),
342 state->clear[i].stencil_only_pipeline[j],
343 &state->alloc);
344 radv_DestroyPipeline(radv_device_to_handle(device),
345 state->clear[i].depthstencil_pipeline[j],
346 &state->alloc);
347 }
348 radv_DestroyRenderPass(radv_device_to_handle(device),
349 state->clear[i].depthstencil_rp,
350 &state->alloc);
351 }
352 radv_DestroyPipelineLayout(radv_device_to_handle(device),
353 state->clear_color_p_layout,
354 &state->alloc);
355 radv_DestroyPipelineLayout(radv_device_to_handle(device),
356 state->clear_depth_p_layout,
357 &state->alloc);
358
359 finish_meta_clear_htile_mask_state(device);
360 }
361
362 static void
363 emit_color_clear(struct radv_cmd_buffer *cmd_buffer,
364 const VkClearAttachment *clear_att,
365 const VkClearRect *clear_rect,
366 uint32_t view_mask)
367 {
368 struct radv_device *device = cmd_buffer->device;
369 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
370 const uint32_t subpass_att = clear_att->colorAttachment;
371 const uint32_t pass_att = subpass->color_attachments[subpass_att].attachment;
372 const struct radv_image_view *iview = cmd_buffer->state.attachments ?
373 cmd_buffer->state.attachments[pass_att].iview : NULL;
374 uint32_t samples, samples_log2;
375 VkFormat format;
376 unsigned fs_key;
377 VkClearColorValue clear_value = clear_att->clearValue.color;
378 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
379 VkPipeline pipeline;
380
381 /* When a framebuffer is bound to the current command buffer, get the
382 * number of samples from it. Otherwise, get the number of samples from
383 * the render pass because it's likely a secondary command buffer.
384 */
385 if (iview) {
386 samples = iview->image->info.samples;
387 format = iview->vk_format;
388 } else {
389 samples = cmd_buffer->state.pass->attachments[pass_att].samples;
390 format = cmd_buffer->state.pass->attachments[pass_att].format;
391 }
392
393 samples_log2 = ffs(samples) - 1;
394 fs_key = radv_format_meta_fs_key(format);
395
396 if (fs_key == -1) {
397 radv_finishme("color clears incomplete");
398 return;
399 }
400
401 if (device->meta_state.clear[samples_log2].render_pass[fs_key] == VK_NULL_HANDLE) {
402 VkResult ret = create_color_renderpass(device, radv_fs_key_format_exemplars[fs_key],
403 samples,
404 &device->meta_state.clear[samples_log2].render_pass[fs_key]);
405 if (ret != VK_SUCCESS) {
406 cmd_buffer->record_result = ret;
407 return;
408 }
409 }
410
411 if (device->meta_state.clear[samples_log2].color_pipelines[fs_key] == VK_NULL_HANDLE) {
412 VkResult ret = create_color_pipeline(device, samples, 0,
413 &device->meta_state.clear[samples_log2].color_pipelines[fs_key],
414 device->meta_state.clear[samples_log2].render_pass[fs_key]);
415 if (ret != VK_SUCCESS) {
416 cmd_buffer->record_result = ret;
417 return;
418 }
419 }
420
421 pipeline = device->meta_state.clear[samples_log2].color_pipelines[fs_key];
422 if (!pipeline) {
423 radv_finishme("color clears incomplete");
424 return;
425 }
426 assert(samples_log2 < ARRAY_SIZE(device->meta_state.clear));
427 assert(pipeline);
428 assert(clear_att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
429 assert(clear_att->colorAttachment < subpass->color_count);
430
431 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
432 device->meta_state.clear_color_p_layout,
433 VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16,
434 &clear_value);
435
436 struct radv_subpass clear_subpass = {
437 .color_count = 1,
438 .color_attachments = (struct radv_subpass_attachment[]) {
439 subpass->color_attachments[clear_att->colorAttachment]
440 },
441 .depth_stencil_attachment = NULL,
442 };
443
444 radv_cmd_buffer_set_subpass(cmd_buffer, &clear_subpass);
445
446 radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
447 pipeline);
448
449 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1, &(VkViewport) {
450 .x = clear_rect->rect.offset.x,
451 .y = clear_rect->rect.offset.y,
452 .width = clear_rect->rect.extent.width,
453 .height = clear_rect->rect.extent.height,
454 .minDepth = 0.0f,
455 .maxDepth = 1.0f
456 });
457
458 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1, &clear_rect->rect);
459
460 if (view_mask) {
461 unsigned i;
462 for_each_bit(i, view_mask)
463 radv_CmdDraw(cmd_buffer_h, 3, 1, 0, i);
464 } else {
465 radv_CmdDraw(cmd_buffer_h, 3, clear_rect->layerCount, 0, clear_rect->baseArrayLayer);
466 }
467
468 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
469 }
470
471
472 static void
473 build_depthstencil_shader(struct nir_shader **out_vs, struct nir_shader **out_fs)
474 {
475 nir_builder vs_b, fs_b;
476
477 nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
478 nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
479
480 vs_b.shader->info.name = ralloc_strdup(vs_b.shader, "meta_clear_depthstencil_vs");
481 fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "meta_clear_depthstencil_fs");
482 const struct glsl_type *position_out_type = glsl_vec4_type();
483
484 nir_variable *vs_out_pos =
485 nir_variable_create(vs_b.shader, nir_var_shader_out, position_out_type,
486 "gl_Position");
487 vs_out_pos->data.location = VARYING_SLOT_POS;
488
489 nir_intrinsic_instr *in_color_load = nir_intrinsic_instr_create(fs_b.shader, nir_intrinsic_load_push_constant);
490 nir_intrinsic_set_base(in_color_load, 0);
491 nir_intrinsic_set_range(in_color_load, 4);
492 in_color_load->src[0] = nir_src_for_ssa(nir_imm_int(&fs_b, 0));
493 in_color_load->num_components = 1;
494 nir_ssa_dest_init(&in_color_load->instr, &in_color_load->dest, 1, 32, "depth value");
495 nir_builder_instr_insert(&fs_b, &in_color_load->instr);
496
497 nir_variable *fs_out_depth =
498 nir_variable_create(fs_b.shader, nir_var_shader_out,
499 glsl_int_type(), "f_depth");
500 fs_out_depth->data.location = FRAG_RESULT_DEPTH;
501 nir_store_var(&fs_b, fs_out_depth, &in_color_load->dest.ssa, 0x1);
502
503 nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&vs_b);
504 nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);
505
506 const struct glsl_type *layer_type = glsl_int_type();
507 nir_variable *vs_out_layer =
508 nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type,
509 "v_layer");
510 vs_out_layer->data.location = VARYING_SLOT_LAYER;
511 vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
512 nir_ssa_def *inst_id = nir_load_instance_id(&vs_b);
513 nir_ssa_def *base_instance = nir_load_base_instance(&vs_b);
514
515 nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
516 nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);
517
518 *out_vs = vs_b.shader;
519 *out_fs = fs_b.shader;
520 }
521
522 static VkResult
523 create_depthstencil_renderpass(struct radv_device *device,
524 uint32_t samples,
525 VkRenderPass *render_pass)
526 {
527 mtx_lock(&device->meta_state.mtx);
528 if (*render_pass) {
529 mtx_unlock(&device->meta_state.mtx);
530 return VK_SUCCESS;
531 }
532
533 VkResult result = radv_CreateRenderPass(radv_device_to_handle(device),
534 &(VkRenderPassCreateInfo) {
535 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
536 .attachmentCount = 1,
537 .pAttachments = &(VkAttachmentDescription) {
538 .format = VK_FORMAT_D32_SFLOAT_S8_UINT,
539 .samples = samples,
540 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
541 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
542 .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
543 .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
544 },
545 .subpassCount = 1,
546 .pSubpasses = &(VkSubpassDescription) {
547 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
548 .inputAttachmentCount = 0,
549 .colorAttachmentCount = 0,
550 .pColorAttachments = NULL,
551 .pResolveAttachments = NULL,
552 .pDepthStencilAttachment = &(VkAttachmentReference) {
553 .attachment = 0,
554 .layout = VK_IMAGE_LAYOUT_GENERAL,
555 },
556 .preserveAttachmentCount = 0,
557 .pPreserveAttachments = NULL,
558 },
559 .dependencyCount = 0,
560 }, &device->meta_state.alloc, render_pass);
561 mtx_unlock(&device->meta_state.mtx);
562 return result;
563 }
564
565 static VkResult
566 create_depthstencil_pipeline(struct radv_device *device,
567 VkImageAspectFlags aspects,
568 uint32_t samples,
569 int index,
570 VkPipeline *pipeline,
571 VkRenderPass render_pass)
572 {
573 struct nir_shader *vs_nir, *fs_nir;
574 VkResult result;
575
576 mtx_lock(&device->meta_state.mtx);
577 if (*pipeline) {
578 mtx_unlock(&device->meta_state.mtx);
579 return VK_SUCCESS;
580 }
581
582 build_depthstencil_shader(&vs_nir, &fs_nir);
583
584 const VkPipelineVertexInputStateCreateInfo vi_state = {
585 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
586 .vertexBindingDescriptionCount = 0,
587 .vertexAttributeDescriptionCount = 0,
588 };
589
590 const VkPipelineDepthStencilStateCreateInfo ds_state = {
591 .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
592 .depthTestEnable = (aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
593 .depthCompareOp = VK_COMPARE_OP_ALWAYS,
594 .depthWriteEnable = (aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
595 .depthBoundsTestEnable = false,
596 .stencilTestEnable = (aspects & VK_IMAGE_ASPECT_STENCIL_BIT),
597 .front = {
598 .passOp = VK_STENCIL_OP_REPLACE,
599 .compareOp = VK_COMPARE_OP_ALWAYS,
600 .writeMask = UINT32_MAX,
601 .reference = 0, /* dynamic */
602 },
603 .back = { 0 /* dont care */ },
604 };
605
606 const VkPipelineColorBlendStateCreateInfo cb_state = {
607 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
608 .logicOpEnable = false,
609 .attachmentCount = 0,
610 .pAttachments = NULL,
611 };
612
613 struct radv_graphics_pipeline_create_info extra = {
614 .use_rectlist = true,
615 };
616
617 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
618 extra.db_depth_clear = index == DEPTH_CLEAR_SLOW ? false : true;
619 extra.db_depth_disable_expclear = index == DEPTH_CLEAR_FAST_NO_EXPCLEAR ? true : false;
620 }
621 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
622 extra.db_stencil_clear = index == DEPTH_CLEAR_SLOW ? false : true;
623 extra.db_stencil_disable_expclear = index == DEPTH_CLEAR_FAST_NO_EXPCLEAR ? true : false;
624 }
625 result = create_pipeline(device, radv_render_pass_from_handle(render_pass),
626 samples, vs_nir, fs_nir, &vi_state, &ds_state, &cb_state,
627 device->meta_state.clear_depth_p_layout,
628 &extra, &device->meta_state.alloc, pipeline);
629
630 mtx_unlock(&device->meta_state.mtx);
631 return result;
632 }
633
634 static bool depth_view_can_fast_clear(struct radv_cmd_buffer *cmd_buffer,
635 const struct radv_image_view *iview,
636 VkImageAspectFlags aspects,
637 VkImageLayout layout,
638 bool in_render_loop,
639 const VkClearRect *clear_rect,
640 VkClearDepthStencilValue clear_value)
641 {
642 if (!iview)
643 return false;
644
645 uint32_t queue_mask = radv_image_queue_family_mask(iview->image,
646 cmd_buffer->queue_family_index,
647 cmd_buffer->queue_family_index);
648 if (clear_rect->rect.offset.x || clear_rect->rect.offset.y ||
649 clear_rect->rect.extent.width != iview->extent.width ||
650 clear_rect->rect.extent.height != iview->extent.height)
651 return false;
652 if (radv_image_is_tc_compat_htile(iview->image) &&
653 (((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && clear_value.depth != 0.0 &&
654 clear_value.depth != 1.0) ||
655 ((aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && clear_value.stencil != 0)))
656 return false;
657 if (radv_image_has_htile(iview->image) &&
658 iview->base_mip == 0 &&
659 iview->base_layer == 0 &&
660 iview->layer_count == iview->image->info.array_size &&
661 radv_layout_is_htile_compressed(iview->image, layout, in_render_loop, queue_mask) &&
662 radv_image_extent_compare(iview->image, &iview->extent))
663 return true;
664 return false;
665 }
666
667 static VkPipeline
668 pick_depthstencil_pipeline(struct radv_cmd_buffer *cmd_buffer,
669 struct radv_meta_state *meta_state,
670 const struct radv_image_view *iview,
671 int samples_log2,
672 VkImageAspectFlags aspects,
673 VkImageLayout layout,
674 bool in_render_loop,
675 const VkClearRect *clear_rect,
676 VkClearDepthStencilValue clear_value)
677 {
678 bool fast = depth_view_can_fast_clear(cmd_buffer, iview, aspects, layout,
679 in_render_loop, clear_rect, clear_value);
680 int index = DEPTH_CLEAR_SLOW;
681 VkPipeline *pipeline;
682
683 if (fast) {
684 /* we don't know the previous clear values, so we always have
685 * the NO_EXPCLEAR path */
686 index = DEPTH_CLEAR_FAST_NO_EXPCLEAR;
687 }
688
689 switch (aspects) {
690 case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
691 pipeline = &meta_state->clear[samples_log2].depthstencil_pipeline[index];
692 break;
693 case VK_IMAGE_ASPECT_DEPTH_BIT:
694 pipeline = &meta_state->clear[samples_log2].depth_only_pipeline[index];
695 break;
696 case VK_IMAGE_ASPECT_STENCIL_BIT:
697 pipeline = &meta_state->clear[samples_log2].stencil_only_pipeline[index];
698 break;
699 default:
700 unreachable("expected depth or stencil aspect");
701 }
702
703 if (cmd_buffer->device->meta_state.clear[samples_log2].depthstencil_rp == VK_NULL_HANDLE) {
704 VkResult ret = create_depthstencil_renderpass(cmd_buffer->device, 1u << samples_log2,
705 &cmd_buffer->device->meta_state.clear[samples_log2].depthstencil_rp);
706 if (ret != VK_SUCCESS) {
707 cmd_buffer->record_result = ret;
708 return VK_NULL_HANDLE;
709 }
710 }
711
712 if (*pipeline == VK_NULL_HANDLE) {
713 VkResult ret = create_depthstencil_pipeline(cmd_buffer->device, aspects, 1u << samples_log2, index,
714 pipeline, cmd_buffer->device->meta_state.clear[samples_log2].depthstencil_rp);
715 if (ret != VK_SUCCESS) {
716 cmd_buffer->record_result = ret;
717 return VK_NULL_HANDLE;
718 }
719 }
720 return *pipeline;
721 }
722
723 static void
724 emit_depthstencil_clear(struct radv_cmd_buffer *cmd_buffer,
725 const VkClearAttachment *clear_att,
726 const VkClearRect *clear_rect,
727 struct radv_subpass_attachment *ds_att,
728 uint32_t view_mask)
729 {
730 struct radv_device *device = cmd_buffer->device;
731 struct radv_meta_state *meta_state = &device->meta_state;
732 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
733 const uint32_t pass_att = ds_att->attachment;
734 VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
735 VkImageAspectFlags aspects = clear_att->aspectMask;
736 const struct radv_image_view *iview = cmd_buffer->state.attachments ?
737 cmd_buffer->state.attachments[pass_att].iview : NULL;
738 uint32_t samples, samples_log2;
739 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
740
741 /* When a framebuffer is bound to the current command buffer, get the
742 * number of samples from it. Otherwise, get the number of samples from
743 * the render pass because it's likely a secondary command buffer.
744 */
745 if (iview) {
746 samples = iview->image->info.samples;
747 } else {
748 samples = cmd_buffer->state.pass->attachments[pass_att].samples;
749 }
750
751 samples_log2 = ffs(samples) - 1;
752
753 assert(pass_att != VK_ATTACHMENT_UNUSED);
754
755 if (!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
756 clear_value.depth = 1.0f;
757
758 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
759 device->meta_state.clear_depth_p_layout,
760 VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4,
761 &clear_value.depth);
762
763 uint32_t prev_reference = cmd_buffer->state.dynamic.stencil_reference.front;
764 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
765 radv_CmdSetStencilReference(cmd_buffer_h, VK_STENCIL_FACE_FRONT_BIT,
766 clear_value.stencil);
767 }
768
769 VkPipeline pipeline = pick_depthstencil_pipeline(cmd_buffer,
770 meta_state,
771 iview,
772 samples_log2,
773 aspects,
774 ds_att->layout,
775 ds_att->in_render_loop,
776 clear_rect,
777 clear_value);
778 if (!pipeline)
779 return;
780
781 struct radv_subpass clear_subpass = {
782 .color_count = 0,
783 .color_attachments = NULL,
784 .depth_stencil_attachment = ds_att,
785 };
786
787 radv_cmd_buffer_set_subpass(cmd_buffer, &clear_subpass);
788
789 radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
790 pipeline);
791
792 if (depth_view_can_fast_clear(cmd_buffer, iview, aspects,
793 ds_att->layout, ds_att->in_render_loop,
794 clear_rect, clear_value))
795 radv_update_ds_clear_metadata(cmd_buffer, iview,
796 clear_value, aspects);
797
798 radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1, &(VkViewport) {
799 .x = clear_rect->rect.offset.x,
800 .y = clear_rect->rect.offset.y,
801 .width = clear_rect->rect.extent.width,
802 .height = clear_rect->rect.extent.height,
803 .minDepth = 0.0f,
804 .maxDepth = 1.0f
805 });
806
807 radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1, &clear_rect->rect);
808
809 if (view_mask) {
810 unsigned i;
811 for_each_bit(i, view_mask)
812 radv_CmdDraw(cmd_buffer_h, 3, 1, 0, i);
813 } else {
814 radv_CmdDraw(cmd_buffer_h, 3, clear_rect->layerCount, 0, clear_rect->baseArrayLayer);
815 }
816
817 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
818 radv_CmdSetStencilReference(cmd_buffer_h, VK_STENCIL_FACE_FRONT_BIT,
819 prev_reference);
820 }
821
822 radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
823 }
824
825 static uint32_t
826 clear_htile_mask(struct radv_cmd_buffer *cmd_buffer,
827 struct radeon_winsys_bo *bo, uint64_t offset, uint64_t size,
828 uint32_t htile_value, uint32_t htile_mask)
829 {
830 struct radv_device *device = cmd_buffer->device;
831 struct radv_meta_state *state = &device->meta_state;
832 uint64_t block_count = round_up_u64(size, 1024);
833 struct radv_meta_saved_state saved_state;
834
835 radv_meta_save(&saved_state, cmd_buffer,
836 RADV_META_SAVE_COMPUTE_PIPELINE |
837 RADV_META_SAVE_CONSTANTS |
838 RADV_META_SAVE_DESCRIPTORS);
839
840 struct radv_buffer dst_buffer = {
841 .bo = bo,
842 .offset = offset,
843 .size = size
844 };
845
846 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
847 VK_PIPELINE_BIND_POINT_COMPUTE,
848 state->clear_htile_mask_pipeline);
849
850 radv_meta_push_descriptor_set(cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE,
851 state->clear_htile_mask_p_layout,
852 0, /* set */
853 1, /* descriptorWriteCount */
854 (VkWriteDescriptorSet[]) {
855 {
856 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
857 .dstBinding = 0,
858 .dstArrayElement = 0,
859 .descriptorCount = 1,
860 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
861 .pBufferInfo = &(VkDescriptorBufferInfo) {
862 .buffer = radv_buffer_to_handle(&dst_buffer),
863 .offset = 0,
864 .range = size
865 }
866 }
867 });
868
869 const unsigned constants[2] = {
870 htile_value & htile_mask,
871 ~htile_mask,
872 };
873
874 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
875 state->clear_htile_mask_p_layout,
876 VK_SHADER_STAGE_COMPUTE_BIT, 0, 8,
877 constants);
878
879 radv_CmdDispatch(radv_cmd_buffer_to_handle(cmd_buffer), block_count, 1, 1);
880
881 radv_meta_restore(&saved_state, cmd_buffer);
882
883 return RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
884 RADV_CMD_FLAG_INV_VCACHE |
885 RADV_CMD_FLAG_WB_L2;
886 }
887
888 static uint32_t
889 radv_get_htile_fast_clear_value(const struct radv_image *image,
890 VkClearDepthStencilValue value)
891 {
892 uint32_t clear_value;
893
894 if (!image->planes[0].surface.has_stencil) {
895 clear_value = value.depth ? 0xfffffff0 : 0;
896 } else {
897 clear_value = value.depth ? 0xfffc0000 : 0;
898 }
899
900 return clear_value;
901 }
902
903 static uint32_t
904 radv_get_htile_mask(const struct radv_image *image, VkImageAspectFlags aspects)
905 {
906 uint32_t mask = 0;
907
908 if (!image->planes[0].surface.has_stencil) {
909 /* All the HTILE buffer is used when there is no stencil. */
910 mask = UINT32_MAX;
911 } else {
912 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
913 mask |= 0xfffffc0f;
914 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
915 mask |= 0x000003f0;
916 }
917
918 return mask;
919 }
920
921 static bool
922 radv_is_fast_clear_depth_allowed(VkClearDepthStencilValue value)
923 {
924 return value.depth == 1.0f || value.depth == 0.0f;
925 }
926
927 static bool
928 radv_is_fast_clear_stencil_allowed(VkClearDepthStencilValue value)
929 {
930 return value.stencil == 0;
931 }
932
933 /**
934 * Determine if the given image can be fast cleared.
935 */
936 static bool
937 radv_image_can_fast_clear(struct radv_device *device, struct radv_image *image)
938 {
939 if (device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS)
940 return false;
941
942 if (vk_format_is_color(image->vk_format)) {
943 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
944 return false;
945
946 /* RB+ doesn't work with CMASK fast clear on Stoney. */
947 if (!radv_image_has_dcc(image) &&
948 device->physical_device->rad_info.family == CHIP_STONEY)
949 return false;
950 } else {
951 if (!radv_image_has_htile(image))
952 return false;
953 }
954
955 /* Do not fast clears 3D images. */
956 if (image->type == VK_IMAGE_TYPE_3D)
957 return false;
958
959 return true;
960 }
961
962 /**
963 * Determine if the given image view can be fast cleared.
964 */
965 static bool
966 radv_image_view_can_fast_clear(struct radv_device *device,
967 const struct radv_image_view *iview)
968 {
969 struct radv_image *image;
970
971 if (!iview)
972 return false;
973 image = iview->image;
974
975 /* Only fast clear if the image itself can be fast cleared. */
976 if (!radv_image_can_fast_clear(device, image))
977 return false;
978
979 /* Only fast clear if all layers are bound. */
980 if (iview->base_layer > 0 ||
981 iview->layer_count != image->info.array_size)
982 return false;
983
984 /* Only fast clear if the view covers the whole image. */
985 if (!radv_image_extent_compare(image, &iview->extent))
986 return false;
987
988 return true;
989 }
990
991 static bool
992 radv_can_fast_clear_depth(struct radv_cmd_buffer *cmd_buffer,
993 const struct radv_image_view *iview,
994 VkImageLayout image_layout,
995 bool in_render_loop,
996 VkImageAspectFlags aspects,
997 const VkClearRect *clear_rect,
998 const VkClearDepthStencilValue clear_value,
999 uint32_t view_mask)
1000 {
1001 if (!radv_image_view_can_fast_clear(cmd_buffer->device, iview))
1002 return false;
1003
1004 if (!radv_layout_is_htile_compressed(iview->image, image_layout, in_render_loop,
1005 radv_image_queue_family_mask(iview->image,
1006 cmd_buffer->queue_family_index,
1007 cmd_buffer->queue_family_index)))
1008 return false;
1009
1010 if (clear_rect->rect.offset.x || clear_rect->rect.offset.y ||
1011 clear_rect->rect.extent.width != iview->image->info.width ||
1012 clear_rect->rect.extent.height != iview->image->info.height)
1013 return false;
1014
1015 if (view_mask && (iview->image->info.array_size >= 32 ||
1016 (1u << iview->image->info.array_size) - 1u != view_mask))
1017 return false;
1018 if (!view_mask && clear_rect->baseArrayLayer != 0)
1019 return false;
1020 if (!view_mask && clear_rect->layerCount != iview->image->info.array_size)
1021 return false;
1022
1023 if (cmd_buffer->device->physical_device->rad_info.chip_class != GFX9 &&
1024 (!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT) ||
1025 ((vk_format_aspects(iview->image->vk_format) & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1026 !(aspects & VK_IMAGE_ASPECT_STENCIL_BIT))))
1027 return false;
1028
1029 if (((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1030 !radv_is_fast_clear_depth_allowed(clear_value)) ||
1031 ((aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1032 !radv_is_fast_clear_stencil_allowed(clear_value)))
1033 return false;
1034
1035 return true;
1036 }
1037
1038 static void
1039 radv_fast_clear_depth(struct radv_cmd_buffer *cmd_buffer,
1040 const struct radv_image_view *iview,
1041 const VkClearAttachment *clear_att,
1042 enum radv_cmd_flush_bits *pre_flush,
1043 enum radv_cmd_flush_bits *post_flush)
1044 {
1045 VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
1046 VkImageAspectFlags aspects = clear_att->aspectMask;
1047 uint32_t clear_word, flush_bits;
1048 uint32_t htile_mask;
1049
1050 clear_word = radv_get_htile_fast_clear_value(iview->image, clear_value);
1051 htile_mask = radv_get_htile_mask(iview->image, aspects);
1052
1053 if (pre_flush) {
1054 cmd_buffer->state.flush_bits |= (RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1055 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) & ~ *pre_flush;
1056 *pre_flush |= cmd_buffer->state.flush_bits;
1057 }
1058
1059 if (htile_mask == UINT_MAX) {
1060 /* Clear the whole HTILE buffer. */
1061 flush_bits = radv_fill_buffer(cmd_buffer, iview->image->bo,
1062 iview->image->offset + iview->image->htile_offset,
1063 iview->image->planes[0].surface.htile_size, clear_word);
1064 } else {
1065 /* Only clear depth or stencil bytes in the HTILE buffer. */
1066 /* TODO: Implement that path for GFX10. */
1067 assert(cmd_buffer->device->physical_device->rad_info.chip_class == GFX9);
1068 flush_bits = clear_htile_mask(cmd_buffer, iview->image->bo,
1069 iview->image->offset + iview->image->htile_offset,
1070 iview->image->planes[0].surface.htile_size, clear_word,
1071 htile_mask);
1072 }
1073
1074 radv_update_ds_clear_metadata(cmd_buffer, iview, clear_value, aspects);
1075 if (post_flush) {
1076 *post_flush |= flush_bits;
1077 }
1078 }
1079
1080 static nir_shader *
1081 build_clear_htile_mask_shader()
1082 {
1083 nir_builder b;
1084
1085 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
1086 b.shader->info.name = ralloc_strdup(b.shader, "meta_clear_htile_mask");
1087 b.shader->info.cs.local_size[0] = 64;
1088 b.shader->info.cs.local_size[1] = 1;
1089 b.shader->info.cs.local_size[2] = 1;
1090
1091 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
1092 nir_ssa_def *wg_id = nir_load_work_group_id(&b);
1093 nir_ssa_def *block_size = nir_imm_ivec4(&b,
1094 b.shader->info.cs.local_size[0],
1095 b.shader->info.cs.local_size[1],
1096 b.shader->info.cs.local_size[2], 0);
1097
1098 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
1099
1100 nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
1101 offset = nir_channel(&b, offset, 0);
1102
1103 nir_intrinsic_instr *buf =
1104 nir_intrinsic_instr_create(b.shader,
1105 nir_intrinsic_vulkan_resource_index);
1106
1107 buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
1108 buf->num_components = 1;
1109 nir_intrinsic_set_desc_set(buf, 0);
1110 nir_intrinsic_set_binding(buf, 0);
1111 nir_ssa_dest_init(&buf->instr, &buf->dest, buf->num_components, 32, NULL);
1112 nir_builder_instr_insert(&b, &buf->instr);
1113
1114 nir_intrinsic_instr *constants =
1115 nir_intrinsic_instr_create(b.shader,
1116 nir_intrinsic_load_push_constant);
1117 nir_intrinsic_set_base(constants, 0);
1118 nir_intrinsic_set_range(constants, 8);
1119 constants->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
1120 constants->num_components = 2;
1121 nir_ssa_dest_init(&constants->instr, &constants->dest, 2, 32, "constants");
1122 nir_builder_instr_insert(&b, &constants->instr);
1123
1124 nir_intrinsic_instr *load =
1125 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
1126 load->src[0] = nir_src_for_ssa(&buf->dest.ssa);
1127 load->src[1] = nir_src_for_ssa(offset);
1128 nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
1129 load->num_components = 4;
1130 nir_builder_instr_insert(&b, &load->instr);
1131
1132 /* data = (data & ~htile_mask) | (htile_value & htile_mask) */
1133 nir_ssa_def *data =
1134 nir_iand(&b, &load->dest.ssa,
1135 nir_channel(&b, &constants->dest.ssa, 1));
1136 data = nir_ior(&b, data, nir_channel(&b, &constants->dest.ssa, 0));
1137
1138 nir_intrinsic_instr *store =
1139 nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
1140 store->src[0] = nir_src_for_ssa(data);
1141 store->src[1] = nir_src_for_ssa(&buf->dest.ssa);
1142 store->src[2] = nir_src_for_ssa(offset);
1143 nir_intrinsic_set_write_mask(store, 0xf);
1144 nir_intrinsic_set_access(store, ACCESS_NON_READABLE);
1145 store->num_components = 4;
1146 nir_builder_instr_insert(&b, &store->instr);
1147
1148 return b.shader;
1149 }
1150
1151 static VkResult
1152 init_meta_clear_htile_mask_state(struct radv_device *device)
1153 {
1154 struct radv_meta_state *state = &device->meta_state;
1155 struct radv_shader_module cs = { .nir = NULL };
1156 VkResult result;
1157
1158 cs.nir = build_clear_htile_mask_shader();
1159
1160 VkDescriptorSetLayoutCreateInfo ds_layout_info = {
1161 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1162 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
1163 .bindingCount = 1,
1164 .pBindings = (VkDescriptorSetLayoutBinding[]) {
1165 {
1166 .binding = 0,
1167 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1168 .descriptorCount = 1,
1169 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
1170 .pImmutableSamplers = NULL
1171 },
1172 }
1173 };
1174
1175 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
1176 &ds_layout_info, &state->alloc,
1177 &state->clear_htile_mask_ds_layout);
1178 if (result != VK_SUCCESS)
1179 goto fail;
1180
1181 VkPipelineLayoutCreateInfo p_layout_info = {
1182 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1183 .setLayoutCount = 1,
1184 .pSetLayouts = &state->clear_htile_mask_ds_layout,
1185 .pushConstantRangeCount = 1,
1186 .pPushConstantRanges = &(VkPushConstantRange){
1187 VK_SHADER_STAGE_COMPUTE_BIT, 0, 8,
1188 },
1189 };
1190
1191 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
1192 &p_layout_info, &state->alloc,
1193 &state->clear_htile_mask_p_layout);
1194 if (result != VK_SUCCESS)
1195 goto fail;
1196
1197 VkPipelineShaderStageCreateInfo shader_stage = {
1198 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1199 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
1200 .module = radv_shader_module_to_handle(&cs),
1201 .pName = "main",
1202 .pSpecializationInfo = NULL,
1203 };
1204
1205 VkComputePipelineCreateInfo pipeline_info = {
1206 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1207 .stage = shader_stage,
1208 .flags = 0,
1209 .layout = state->clear_htile_mask_p_layout,
1210 };
1211
1212 result = radv_CreateComputePipelines(radv_device_to_handle(device),
1213 radv_pipeline_cache_to_handle(&state->cache),
1214 1, &pipeline_info, NULL,
1215 &state->clear_htile_mask_pipeline);
1216
1217 ralloc_free(cs.nir);
1218 return result;
1219 fail:
1220 ralloc_free(cs.nir);
1221 return result;
1222 }
1223
1224 VkResult
1225 radv_device_init_meta_clear_state(struct radv_device *device, bool on_demand)
1226 {
1227 VkResult res;
1228 struct radv_meta_state *state = &device->meta_state;
1229
1230 VkPipelineLayoutCreateInfo pl_color_create_info = {
1231 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1232 .setLayoutCount = 0,
1233 .pushConstantRangeCount = 1,
1234 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16},
1235 };
1236
1237 res = radv_CreatePipelineLayout(radv_device_to_handle(device),
1238 &pl_color_create_info,
1239 &device->meta_state.alloc,
1240 &device->meta_state.clear_color_p_layout);
1241 if (res != VK_SUCCESS)
1242 goto fail;
1243
1244 VkPipelineLayoutCreateInfo pl_depth_create_info = {
1245 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1246 .setLayoutCount = 0,
1247 .pushConstantRangeCount = 1,
1248 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4},
1249 };
1250
1251 res = radv_CreatePipelineLayout(radv_device_to_handle(device),
1252 &pl_depth_create_info,
1253 &device->meta_state.alloc,
1254 &device->meta_state.clear_depth_p_layout);
1255 if (res != VK_SUCCESS)
1256 goto fail;
1257
1258 res = init_meta_clear_htile_mask_state(device);
1259 if (res != VK_SUCCESS)
1260 goto fail;
1261
1262 if (on_demand)
1263 return VK_SUCCESS;
1264
1265 for (uint32_t i = 0; i < ARRAY_SIZE(state->clear); ++i) {
1266 uint32_t samples = 1 << i;
1267 for (uint32_t j = 0; j < NUM_META_FS_KEYS; ++j) {
1268 VkFormat format = radv_fs_key_format_exemplars[j];
1269 unsigned fs_key = radv_format_meta_fs_key(format);
1270 assert(!state->clear[i].color_pipelines[fs_key]);
1271
1272 res = create_color_renderpass(device, format, samples,
1273 &state->clear[i].render_pass[fs_key]);
1274 if (res != VK_SUCCESS)
1275 goto fail;
1276
1277 res = create_color_pipeline(device, samples, 0, &state->clear[i].color_pipelines[fs_key],
1278 state->clear[i].render_pass[fs_key]);
1279 if (res != VK_SUCCESS)
1280 goto fail;
1281
1282 }
1283
1284 res = create_depthstencil_renderpass(device,
1285 samples,
1286 &state->clear[i].depthstencil_rp);
1287 if (res != VK_SUCCESS)
1288 goto fail;
1289
1290 for (uint32_t j = 0; j < NUM_DEPTH_CLEAR_PIPELINES; j++) {
1291 res = create_depthstencil_pipeline(device,
1292 VK_IMAGE_ASPECT_DEPTH_BIT,
1293 samples,
1294 j,
1295 &state->clear[i].depth_only_pipeline[j],
1296 state->clear[i].depthstencil_rp);
1297 if (res != VK_SUCCESS)
1298 goto fail;
1299
1300 res = create_depthstencil_pipeline(device,
1301 VK_IMAGE_ASPECT_STENCIL_BIT,
1302 samples,
1303 j,
1304 &state->clear[i].stencil_only_pipeline[j],
1305 state->clear[i].depthstencil_rp);
1306 if (res != VK_SUCCESS)
1307 goto fail;
1308
1309 res = create_depthstencil_pipeline(device,
1310 VK_IMAGE_ASPECT_DEPTH_BIT |
1311 VK_IMAGE_ASPECT_STENCIL_BIT,
1312 samples,
1313 j,
1314 &state->clear[i].depthstencil_pipeline[j],
1315 state->clear[i].depthstencil_rp);
1316 if (res != VK_SUCCESS)
1317 goto fail;
1318 }
1319 }
1320 return VK_SUCCESS;
1321
1322 fail:
1323 radv_device_finish_meta_clear_state(device);
1324 return res;
1325 }
1326
1327 static uint32_t
1328 radv_get_cmask_fast_clear_value(const struct radv_image *image)
1329 {
1330 uint32_t value = 0; /* Default value when no DCC. */
1331
1332 /* The fast-clear value is different for images that have both DCC and
1333 * CMASK metadata.
1334 */
1335 if (radv_image_has_dcc(image)) {
1336 /* DCC fast clear with MSAA should clear CMASK to 0xC. */
1337 return image->info.samples > 1 ? 0xcccccccc : 0xffffffff;
1338 }
1339
1340 return value;
1341 }
1342
1343 uint32_t
1344 radv_clear_cmask(struct radv_cmd_buffer *cmd_buffer,
1345 struct radv_image *image,
1346 const VkImageSubresourceRange *range, uint32_t value)
1347 {
1348 uint64_t offset = image->offset + image->cmask_offset;
1349 uint64_t size;
1350
1351 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1352 /* TODO: clear layers. */
1353 size = image->planes[0].surface.cmask_size;
1354 } else {
1355 unsigned cmask_slice_size =
1356 image->planes[0].surface.cmask_slice_size;
1357
1358 offset += cmask_slice_size * range->baseArrayLayer;
1359 size = cmask_slice_size * radv_get_layerCount(image, range);
1360 }
1361
1362 return radv_fill_buffer(cmd_buffer, image->bo, offset, size, value);
1363 }
1364
1365
1366 uint32_t
1367 radv_clear_fmask(struct radv_cmd_buffer *cmd_buffer,
1368 struct radv_image *image,
1369 const VkImageSubresourceRange *range, uint32_t value)
1370 {
1371 uint64_t offset = image->offset + image->fmask_offset;
1372 uint64_t size;
1373
1374 /* MSAA images do not support mipmap levels. */
1375 assert(range->baseMipLevel == 0 &&
1376 radv_get_levelCount(image, range) == 1);
1377
1378 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1379 /* TODO: clear layers. */
1380 size = image->planes[0].surface.fmask_size;
1381 } else {
1382 unsigned fmask_slice_size =
1383 image->planes[0].surface.u.legacy.fmask.slice_size;
1384
1385
1386 offset += fmask_slice_size * range->baseArrayLayer;
1387 size = fmask_slice_size * radv_get_layerCount(image, range);
1388 }
1389
1390 return radv_fill_buffer(cmd_buffer, image->bo, offset, size, value);
1391 }
1392
1393 uint32_t
1394 radv_clear_dcc(struct radv_cmd_buffer *cmd_buffer,
1395 struct radv_image *image,
1396 const VkImageSubresourceRange *range, uint32_t value)
1397 {
1398 uint32_t level_count = radv_get_levelCount(image, range);
1399 uint32_t flush_bits = 0;
1400
1401 /* Mark the image as being compressed. */
1402 radv_update_dcc_metadata(cmd_buffer, image, range, true);
1403
1404 for (uint32_t l = 0; l < level_count; l++) {
1405 uint64_t offset = image->offset + image->dcc_offset;
1406 uint32_t level = range->baseMipLevel + l;
1407 uint64_t size;
1408
1409 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1410 /* Mipmap levels aren't implemented. */
1411 assert(level == 0);
1412 size = image->planes[0].surface.dcc_size;
1413 } else {
1414 const struct legacy_surf_level *surf_level =
1415 &image->planes[0].surface.u.legacy.level[level];
1416
1417 /* If dcc_fast_clear_size is 0 (which might happens for
1418 * mipmaps) the fill buffer operation below is a no-op.
1419 * This can only happen during initialization as the
1420 * fast clear path fallbacks to slow clears if one
1421 * level can't be fast cleared.
1422 */
1423 offset += surf_level->dcc_offset +
1424 surf_level->dcc_slice_fast_clear_size * range->baseArrayLayer;
1425 size = surf_level->dcc_slice_fast_clear_size * radv_get_layerCount(image, range);
1426 }
1427
1428 flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
1429 size, value);
1430 }
1431
1432 return flush_bits;
1433 }
1434
1435 uint32_t
1436 radv_clear_htile(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
1437 const VkImageSubresourceRange *range, uint32_t value)
1438 {
1439 unsigned layer_count = radv_get_layerCount(image, range);
1440 uint64_t size = image->planes[0].surface.htile_slice_size * layer_count;
1441 uint64_t offset = image->offset + image->htile_offset +
1442 image->planes[0].surface.htile_slice_size * range->baseArrayLayer;
1443
1444 return radv_fill_buffer(cmd_buffer, image->bo, offset, size, value);
1445 }
1446
1447 enum {
1448 RADV_DCC_CLEAR_REG = 0x20202020U,
1449 RADV_DCC_CLEAR_MAIN_1 = 0x80808080U,
1450 RADV_DCC_CLEAR_SECONDARY_1 = 0x40404040U
1451 };
1452
1453 static void vi_get_fast_clear_parameters(struct radv_device *device,
1454 VkFormat image_format,
1455 VkFormat view_format,
1456 const VkClearColorValue *clear_value,
1457 uint32_t* reset_value,
1458 bool *can_avoid_fast_clear_elim)
1459 {
1460 bool values[4] = {};
1461 int extra_channel;
1462 bool main_value = false;
1463 bool extra_value = false;
1464 int i;
1465 *can_avoid_fast_clear_elim = false;
1466
1467 *reset_value = RADV_DCC_CLEAR_REG;
1468
1469 const struct vk_format_description *desc = vk_format_description(view_format);
1470 if (view_format == VK_FORMAT_B10G11R11_UFLOAT_PACK32 ||
1471 view_format == VK_FORMAT_R5G6B5_UNORM_PACK16 ||
1472 view_format == VK_FORMAT_B5G6R5_UNORM_PACK16)
1473 extra_channel = -1;
1474 else if (desc->layout == VK_FORMAT_LAYOUT_PLAIN) {
1475 if (vi_alpha_is_on_msb(device, view_format))
1476 extra_channel = desc->nr_channels - 1;
1477 else
1478 extra_channel = 0;
1479 } else
1480 return;
1481
1482 bool image_alpha_is_on_msb = vi_alpha_is_on_msb(device, image_format);
1483 bool view_alpha_is_on_msb = vi_alpha_is_on_msb(device, view_format);
1484
1485 for (i = 0; i < 4; i++) {
1486 int index = desc->swizzle[i] - VK_SWIZZLE_X;
1487 if (desc->swizzle[i] < VK_SWIZZLE_X ||
1488 desc->swizzle[i] > VK_SWIZZLE_W)
1489 continue;
1490
1491 if (desc->channel[i].pure_integer &&
1492 desc->channel[i].type == VK_FORMAT_TYPE_SIGNED) {
1493 /* Use the maximum value for clamping the clear color. */
1494 int max = u_bit_consecutive(0, desc->channel[i].size - 1);
1495
1496 values[i] = clear_value->int32[i] != 0;
1497 if (clear_value->int32[i] != 0 && MIN2(clear_value->int32[i], max) != max)
1498 return;
1499 } else if (desc->channel[i].pure_integer &&
1500 desc->channel[i].type == VK_FORMAT_TYPE_UNSIGNED) {
1501 /* Use the maximum value for clamping the clear color. */
1502 unsigned max = u_bit_consecutive(0, desc->channel[i].size);
1503
1504 values[i] = clear_value->uint32[i] != 0U;
1505 if (clear_value->uint32[i] != 0U && MIN2(clear_value->uint32[i], max) != max)
1506 return;
1507 } else {
1508 values[i] = clear_value->float32[i] != 0.0F;
1509 if (clear_value->float32[i] != 0.0F && clear_value->float32[i] != 1.0F)
1510 return;
1511 }
1512
1513 if (index == extra_channel)
1514 extra_value = values[i];
1515 else
1516 main_value = values[i];
1517 }
1518
1519 /* If alpha isn't present, make it the same as color, and vice versa. */
1520 if (!extra_value)
1521 extra_value = main_value;
1522 else if (!main_value)
1523 main_value = extra_value;
1524
1525 if (main_value != extra_value) {
1526 assert(image_alpha_is_on_msb == view_alpha_is_on_msb);
1527 return; /* require ELIMINATE_FAST_CLEAR */
1528 }
1529
1530 for (int i = 0; i < 4; ++i)
1531 if (values[i] != main_value &&
1532 desc->swizzle[i] - VK_SWIZZLE_X != extra_channel &&
1533 desc->swizzle[i] >= VK_SWIZZLE_X &&
1534 desc->swizzle[i] <= VK_SWIZZLE_W)
1535 return;
1536
1537 *can_avoid_fast_clear_elim = true;
1538 *reset_value = 0;
1539 if (main_value)
1540 *reset_value |= RADV_DCC_CLEAR_MAIN_1;
1541
1542 if (extra_value)
1543 *reset_value |= RADV_DCC_CLEAR_SECONDARY_1;
1544 return;
1545 }
1546
1547 static bool
1548 radv_can_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1549 const struct radv_image_view *iview,
1550 VkImageLayout image_layout,
1551 bool in_render_loop,
1552 const VkClearRect *clear_rect,
1553 VkClearColorValue clear_value,
1554 uint32_t view_mask)
1555 {
1556 uint32_t clear_color[2];
1557
1558 if (!radv_image_view_can_fast_clear(cmd_buffer->device, iview))
1559 return false;
1560
1561 if (!radv_layout_can_fast_clear(iview->image, image_layout, in_render_loop,
1562 radv_image_queue_family_mask(iview->image,
1563 cmd_buffer->queue_family_index,
1564 cmd_buffer->queue_family_index)))
1565 return false;
1566
1567 if (clear_rect->rect.offset.x || clear_rect->rect.offset.y ||
1568 clear_rect->rect.extent.width != iview->image->info.width ||
1569 clear_rect->rect.extent.height != iview->image->info.height)
1570 return false;
1571
1572 if (view_mask && (iview->image->info.array_size >= 32 ||
1573 (1u << iview->image->info.array_size) - 1u != view_mask))
1574 return false;
1575 if (!view_mask && clear_rect->baseArrayLayer != 0)
1576 return false;
1577 if (!view_mask && clear_rect->layerCount != iview->image->info.array_size)
1578 return false;
1579
1580 /* DCC */
1581 if (!radv_format_pack_clear_color(iview->vk_format,
1582 clear_color, &clear_value))
1583 return false;
1584
1585 if (radv_dcc_enabled(iview->image, iview->base_mip)) {
1586 bool can_avoid_fast_clear_elim;
1587 uint32_t reset_value;
1588
1589 vi_get_fast_clear_parameters(cmd_buffer->device,
1590 iview->image->vk_format,
1591 iview->vk_format,
1592 &clear_value, &reset_value,
1593 &can_avoid_fast_clear_elim);
1594
1595 if (iview->image->info.samples > 1) {
1596 /* DCC fast clear with MSAA should clear CMASK. */
1597 /* FIXME: This doesn't work for now. There is a
1598 * hardware bug with fast clears and DCC for MSAA
1599 * textures. AMDVLK has a workaround but it doesn't
1600 * seem to work here. Note that we might emit useless
1601 * CB flushes but that shouldn't matter.
1602 */
1603 if (!can_avoid_fast_clear_elim)
1604 return false;
1605 }
1606
1607 if (iview->image->info.levels > 1 &&
1608 cmd_buffer->device->physical_device->rad_info.chip_class == GFX8) {
1609 for (uint32_t l = 0; l < iview->level_count; l++) {
1610 uint32_t level = iview->base_mip + l;
1611 struct legacy_surf_level *surf_level =
1612 &iview->image->planes[0].surface.u.legacy.level[level];
1613
1614 /* Do not fast clears if one level can't be
1615 * fast cleared.
1616 */
1617 if (!surf_level->dcc_fast_clear_size)
1618 return false;
1619 }
1620 }
1621 }
1622
1623 return true;
1624 }
1625
1626
1627 static void
1628 radv_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1629 const struct radv_image_view *iview,
1630 const VkClearAttachment *clear_att,
1631 uint32_t subpass_att,
1632 enum radv_cmd_flush_bits *pre_flush,
1633 enum radv_cmd_flush_bits *post_flush)
1634 {
1635 VkClearColorValue clear_value = clear_att->clearValue.color;
1636 uint32_t clear_color[2], flush_bits = 0;
1637 uint32_t cmask_clear_value;
1638 VkImageSubresourceRange range = {
1639 .aspectMask = iview->aspect_mask,
1640 .baseMipLevel = iview->base_mip,
1641 .levelCount = iview->level_count,
1642 .baseArrayLayer = iview->base_layer,
1643 .layerCount = iview->layer_count,
1644 };
1645
1646 if (pre_flush) {
1647 cmd_buffer->state.flush_bits |= (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1648 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) & ~ *pre_flush;
1649 *pre_flush |= cmd_buffer->state.flush_bits;
1650 }
1651
1652 /* DCC */
1653 radv_format_pack_clear_color(iview->vk_format, clear_color, &clear_value);
1654
1655 cmask_clear_value = radv_get_cmask_fast_clear_value(iview->image);
1656
1657 /* clear cmask buffer */
1658 if (radv_dcc_enabled(iview->image, iview->base_mip)) {
1659 uint32_t reset_value;
1660 bool can_avoid_fast_clear_elim;
1661 bool need_decompress_pass = false;
1662
1663 vi_get_fast_clear_parameters(cmd_buffer->device,
1664 iview->image->vk_format,
1665 iview->vk_format,
1666 &clear_value, &reset_value,
1667 &can_avoid_fast_clear_elim);
1668
1669 if (radv_image_has_cmask(iview->image)) {
1670 flush_bits = radv_clear_cmask(cmd_buffer, iview->image,
1671 &range, cmask_clear_value);
1672
1673 need_decompress_pass = true;
1674 }
1675
1676 if (!can_avoid_fast_clear_elim)
1677 need_decompress_pass = true;
1678
1679 flush_bits |= radv_clear_dcc(cmd_buffer, iview->image, &range,
1680 reset_value);
1681
1682 radv_update_fce_metadata(cmd_buffer, iview->image, &range,
1683 need_decompress_pass);
1684 } else {
1685 flush_bits = radv_clear_cmask(cmd_buffer, iview->image,
1686 &range, cmask_clear_value);
1687 }
1688
1689 if (post_flush) {
1690 *post_flush |= flush_bits;
1691 }
1692
1693 radv_update_color_clear_metadata(cmd_buffer, iview, subpass_att,
1694 clear_color);
1695 }
1696
1697 /**
1698 * The parameters mean that same as those in vkCmdClearAttachments.
1699 */
1700 static void
1701 emit_clear(struct radv_cmd_buffer *cmd_buffer,
1702 const VkClearAttachment *clear_att,
1703 const VkClearRect *clear_rect,
1704 enum radv_cmd_flush_bits *pre_flush,
1705 enum radv_cmd_flush_bits *post_flush,
1706 uint32_t view_mask,
1707 bool ds_resolve_clear)
1708 {
1709 const struct radv_framebuffer *fb = cmd_buffer->state.framebuffer;
1710 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1711 VkImageAspectFlags aspects = clear_att->aspectMask;
1712
1713 if (aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
1714 const uint32_t subpass_att = clear_att->colorAttachment;
1715 assert(subpass_att < subpass->color_count);
1716 const uint32_t pass_att = subpass->color_attachments[subpass_att].attachment;
1717 if (pass_att == VK_ATTACHMENT_UNUSED)
1718 return;
1719
1720 VkImageLayout image_layout = subpass->color_attachments[subpass_att].layout;
1721 bool in_render_loop = subpass->color_attachments[subpass_att].in_render_loop;
1722 const struct radv_image_view *iview = fb ? cmd_buffer->state.attachments[pass_att].iview : NULL;
1723 VkClearColorValue clear_value = clear_att->clearValue.color;
1724
1725 if (radv_can_fast_clear_color(cmd_buffer, iview, image_layout, in_render_loop,
1726 clear_rect, clear_value, view_mask)) {
1727 radv_fast_clear_color(cmd_buffer, iview, clear_att,
1728 subpass_att, pre_flush,
1729 post_flush);
1730 } else {
1731 emit_color_clear(cmd_buffer, clear_att, clear_rect, view_mask);
1732 }
1733 } else {
1734 struct radv_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
1735
1736 if (ds_resolve_clear)
1737 ds_att = subpass->ds_resolve_attachment;
1738
1739 if (!ds_att || ds_att->attachment == VK_ATTACHMENT_UNUSED)
1740 return;
1741
1742 VkImageLayout image_layout = ds_att->layout;
1743 bool in_render_loop = ds_att->in_render_loop;
1744 const struct radv_image_view *iview = fb ? cmd_buffer->state.attachments[ds_att->attachment].iview : NULL;
1745 VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
1746
1747 assert(aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1748 VK_IMAGE_ASPECT_STENCIL_BIT));
1749
1750 if (radv_can_fast_clear_depth(cmd_buffer, iview, image_layout,
1751 in_render_loop, aspects, clear_rect,
1752 clear_value, view_mask)) {
1753 radv_fast_clear_depth(cmd_buffer, iview, clear_att,
1754 pre_flush, post_flush);
1755 } else {
1756 emit_depthstencil_clear(cmd_buffer, clear_att, clear_rect,
1757 ds_att, view_mask);
1758 }
1759 }
1760 }
1761
1762 static inline bool
1763 radv_attachment_needs_clear(struct radv_cmd_state *cmd_state, uint32_t a)
1764 {
1765 uint32_t view_mask = cmd_state->subpass->view_mask;
1766 return (a != VK_ATTACHMENT_UNUSED &&
1767 cmd_state->attachments[a].pending_clear_aspects &&
1768 (!view_mask || (view_mask & ~cmd_state->attachments[a].cleared_views)));
1769 }
1770
1771 static bool
1772 radv_subpass_needs_clear(struct radv_cmd_buffer *cmd_buffer)
1773 {
1774 struct radv_cmd_state *cmd_state = &cmd_buffer->state;
1775 uint32_t a;
1776
1777 if (!cmd_state->subpass)
1778 return false;
1779
1780 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1781 a = cmd_state->subpass->color_attachments[i].attachment;
1782 if (radv_attachment_needs_clear(cmd_state, a))
1783 return true;
1784 }
1785
1786 if (cmd_state->subpass->depth_stencil_attachment) {
1787 a = cmd_state->subpass->depth_stencil_attachment->attachment;
1788 if (radv_attachment_needs_clear(cmd_state, a))
1789 return true;
1790 }
1791
1792 if (!cmd_state->subpass->ds_resolve_attachment)
1793 return false;
1794
1795 a = cmd_state->subpass->ds_resolve_attachment->attachment;
1796 return radv_attachment_needs_clear(cmd_state, a);
1797 }
1798
1799 static void
1800 radv_subpass_clear_attachment(struct radv_cmd_buffer *cmd_buffer,
1801 struct radv_attachment_state *attachment,
1802 const VkClearAttachment *clear_att,
1803 enum radv_cmd_flush_bits *pre_flush,
1804 enum radv_cmd_flush_bits *post_flush,
1805 bool ds_resolve_clear)
1806 {
1807 struct radv_cmd_state *cmd_state = &cmd_buffer->state;
1808 uint32_t view_mask = cmd_state->subpass->view_mask;
1809
1810 VkClearRect clear_rect = {
1811 .rect = cmd_state->render_area,
1812 .baseArrayLayer = 0,
1813 .layerCount = cmd_state->framebuffer->layers,
1814 };
1815
1816 emit_clear(cmd_buffer, clear_att, &clear_rect, pre_flush, post_flush,
1817 view_mask & ~attachment->cleared_views, ds_resolve_clear);
1818 if (view_mask)
1819 attachment->cleared_views |= view_mask;
1820 else
1821 attachment->pending_clear_aspects = 0;
1822 }
1823
1824 /**
1825 * Emit any pending attachment clears for the current subpass.
1826 *
1827 * @see radv_attachment_state::pending_clear_aspects
1828 */
1829 void
1830 radv_cmd_buffer_clear_subpass(struct radv_cmd_buffer *cmd_buffer)
1831 {
1832 struct radv_cmd_state *cmd_state = &cmd_buffer->state;
1833 struct radv_meta_saved_state saved_state;
1834 enum radv_cmd_flush_bits pre_flush = 0;
1835 enum radv_cmd_flush_bits post_flush = 0;
1836
1837 if (!radv_subpass_needs_clear(cmd_buffer))
1838 return;
1839
1840 radv_meta_save(&saved_state, cmd_buffer,
1841 RADV_META_SAVE_GRAPHICS_PIPELINE |
1842 RADV_META_SAVE_CONSTANTS);
1843
1844 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1845 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1846
1847 if (!radv_attachment_needs_clear(cmd_state, a))
1848 continue;
1849
1850 assert(cmd_state->attachments[a].pending_clear_aspects ==
1851 VK_IMAGE_ASPECT_COLOR_BIT);
1852
1853 VkClearAttachment clear_att = {
1854 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1855 .colorAttachment = i, /* Use attachment index relative to subpass */
1856 .clearValue = cmd_state->attachments[a].clear_value,
1857 };
1858
1859 radv_subpass_clear_attachment(cmd_buffer,
1860 &cmd_state->attachments[a],
1861 &clear_att, &pre_flush,
1862 &post_flush, false);
1863 }
1864
1865 if (cmd_state->subpass->depth_stencil_attachment) {
1866 uint32_t ds = cmd_state->subpass->depth_stencil_attachment->attachment;
1867 if (radv_attachment_needs_clear(cmd_state, ds)) {
1868 VkClearAttachment clear_att = {
1869 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1870 .clearValue = cmd_state->attachments[ds].clear_value,
1871 };
1872
1873 radv_subpass_clear_attachment(cmd_buffer,
1874 &cmd_state->attachments[ds],
1875 &clear_att, &pre_flush,
1876 &post_flush, false);
1877 }
1878 }
1879
1880 if (cmd_state->subpass->ds_resolve_attachment) {
1881 uint32_t ds_resolve = cmd_state->subpass->ds_resolve_attachment->attachment;
1882 if (radv_attachment_needs_clear(cmd_state, ds_resolve)) {
1883 VkClearAttachment clear_att = {
1884 .aspectMask = cmd_state->attachments[ds_resolve].pending_clear_aspects,
1885 .clearValue = cmd_state->attachments[ds_resolve].clear_value,
1886 };
1887
1888 radv_subpass_clear_attachment(cmd_buffer,
1889 &cmd_state->attachments[ds_resolve],
1890 &clear_att, &pre_flush,
1891 &post_flush, true);
1892 }
1893 }
1894
1895 radv_meta_restore(&saved_state, cmd_buffer);
1896 cmd_buffer->state.flush_bits |= post_flush;
1897 }
1898
1899 static void
1900 radv_clear_image_layer(struct radv_cmd_buffer *cmd_buffer,
1901 struct radv_image *image,
1902 VkImageLayout image_layout,
1903 const VkImageSubresourceRange *range,
1904 VkFormat format, int level, int layer,
1905 const VkClearValue *clear_val)
1906 {
1907 VkDevice device_h = radv_device_to_handle(cmd_buffer->device);
1908 struct radv_image_view iview;
1909 uint32_t width = radv_minify(image->info.width, range->baseMipLevel + level);
1910 uint32_t height = radv_minify(image->info.height, range->baseMipLevel + level);
1911
1912 radv_image_view_init(&iview, cmd_buffer->device,
1913 &(VkImageViewCreateInfo) {
1914 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
1915 .image = radv_image_to_handle(image),
1916 .viewType = radv_meta_get_view_type(image),
1917 .format = format,
1918 .subresourceRange = {
1919 .aspectMask = range->aspectMask,
1920 .baseMipLevel = range->baseMipLevel + level,
1921 .levelCount = 1,
1922 .baseArrayLayer = range->baseArrayLayer + layer,
1923 .layerCount = 1
1924 },
1925 }, NULL);
1926
1927 VkFramebuffer fb;
1928 radv_CreateFramebuffer(device_h,
1929 &(VkFramebufferCreateInfo) {
1930 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
1931 .attachmentCount = 1,
1932 .pAttachments = (VkImageView[]) {
1933 radv_image_view_to_handle(&iview),
1934 },
1935 .width = width,
1936 .height = height,
1937 .layers = 1
1938 },
1939 &cmd_buffer->pool->alloc,
1940 &fb);
1941
1942 VkAttachmentDescription att_desc = {
1943 .format = iview.vk_format,
1944 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
1945 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
1946 .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
1947 .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
1948 .initialLayout = image_layout,
1949 .finalLayout = image_layout,
1950 };
1951
1952 VkSubpassDescription subpass_desc = {
1953 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
1954 .inputAttachmentCount = 0,
1955 .colorAttachmentCount = 0,
1956 .pColorAttachments = NULL,
1957 .pResolveAttachments = NULL,
1958 .pDepthStencilAttachment = NULL,
1959 .preserveAttachmentCount = 0,
1960 .pPreserveAttachments = NULL,
1961 };
1962
1963 const VkAttachmentReference att_ref = {
1964 .attachment = 0,
1965 .layout = image_layout,
1966 };
1967
1968 if (range->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1969 subpass_desc.colorAttachmentCount = 1;
1970 subpass_desc.pColorAttachments = &att_ref;
1971 } else {
1972 subpass_desc.pDepthStencilAttachment = &att_ref;
1973 }
1974
1975 VkRenderPass pass;
1976 radv_CreateRenderPass(device_h,
1977 &(VkRenderPassCreateInfo) {
1978 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
1979 .attachmentCount = 1,
1980 .pAttachments = &att_desc,
1981 .subpassCount = 1,
1982 .pSubpasses = &subpass_desc,
1983 },
1984 &cmd_buffer->pool->alloc,
1985 &pass);
1986
1987 radv_CmdBeginRenderPass(radv_cmd_buffer_to_handle(cmd_buffer),
1988 &(VkRenderPassBeginInfo) {
1989 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
1990 .renderArea = {
1991 .offset = { 0, 0, },
1992 .extent = {
1993 .width = width,
1994 .height = height,
1995 },
1996 },
1997 .renderPass = pass,
1998 .framebuffer = fb,
1999 .clearValueCount = 0,
2000 .pClearValues = NULL,
2001 },
2002 VK_SUBPASS_CONTENTS_INLINE);
2003
2004 VkClearAttachment clear_att = {
2005 .aspectMask = range->aspectMask,
2006 .colorAttachment = 0,
2007 .clearValue = *clear_val,
2008 };
2009
2010 VkClearRect clear_rect = {
2011 .rect = {
2012 .offset = { 0, 0 },
2013 .extent = { width, height },
2014 },
2015 .baseArrayLayer = range->baseArrayLayer,
2016 .layerCount = 1, /* FINISHME: clear multi-layer framebuffer */
2017 };
2018
2019 emit_clear(cmd_buffer, &clear_att, &clear_rect, NULL, NULL, 0, false);
2020
2021 radv_CmdEndRenderPass(radv_cmd_buffer_to_handle(cmd_buffer));
2022 radv_DestroyRenderPass(device_h, pass,
2023 &cmd_buffer->pool->alloc);
2024 radv_DestroyFramebuffer(device_h, fb,
2025 &cmd_buffer->pool->alloc);
2026 }
2027
2028 /**
2029 * Return TRUE if a fast color or depth clear has been performed.
2030 */
2031 static bool
2032 radv_fast_clear_range(struct radv_cmd_buffer *cmd_buffer,
2033 struct radv_image *image,
2034 VkFormat format,
2035 VkImageLayout image_layout,
2036 bool in_render_loop,
2037 const VkImageSubresourceRange *range,
2038 const VkClearValue *clear_val)
2039 {
2040 struct radv_image_view iview;
2041
2042 radv_image_view_init(&iview, cmd_buffer->device,
2043 &(VkImageViewCreateInfo) {
2044 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
2045 .image = radv_image_to_handle(image),
2046 .viewType = radv_meta_get_view_type(image),
2047 .format = image->vk_format,
2048 .subresourceRange = {
2049 .aspectMask = range->aspectMask,
2050 .baseMipLevel = range->baseMipLevel,
2051 .levelCount = range->levelCount,
2052 .baseArrayLayer = range->baseArrayLayer,
2053 .layerCount = range->layerCount,
2054 },
2055 }, NULL);
2056
2057 VkClearRect clear_rect = {
2058 .rect = {
2059 .offset = { 0, 0 },
2060 .extent = {
2061 radv_minify(image->info.width, range->baseMipLevel),
2062 radv_minify(image->info.height, range->baseMipLevel),
2063 },
2064 },
2065 .baseArrayLayer = range->baseArrayLayer,
2066 .layerCount = range->layerCount,
2067 };
2068
2069 VkClearAttachment clear_att = {
2070 .aspectMask = range->aspectMask,
2071 .colorAttachment = 0,
2072 .clearValue = *clear_val,
2073 };
2074
2075 if (vk_format_is_color(format)) {
2076 if (radv_can_fast_clear_color(cmd_buffer, &iview, image_layout,
2077 in_render_loop, &clear_rect,
2078 clear_att.clearValue.color, 0)) {
2079 radv_fast_clear_color(cmd_buffer, &iview, &clear_att,
2080 clear_att.colorAttachment,
2081 NULL, NULL);
2082 return true;
2083 }
2084 } else {
2085 if (radv_can_fast_clear_depth(cmd_buffer, &iview, image_layout,
2086 in_render_loop,range->aspectMask,
2087 &clear_rect, clear_att.clearValue.depthStencil,
2088 0)) {
2089 radv_fast_clear_depth(cmd_buffer, &iview, &clear_att,
2090 NULL, NULL);
2091 return true;
2092 }
2093 }
2094
2095 return false;
2096 }
2097
2098 static void
2099 radv_cmd_clear_image(struct radv_cmd_buffer *cmd_buffer,
2100 struct radv_image *image,
2101 VkImageLayout image_layout,
2102 const VkClearValue *clear_value,
2103 uint32_t range_count,
2104 const VkImageSubresourceRange *ranges,
2105 bool cs)
2106 {
2107 VkFormat format = image->vk_format;
2108 VkClearValue internal_clear_value = *clear_value;
2109
2110 if (format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32) {
2111 uint32_t value;
2112 format = VK_FORMAT_R32_UINT;
2113 value = float3_to_rgb9e5(clear_value->color.float32);
2114 internal_clear_value.color.uint32[0] = value;
2115 }
2116
2117 if (format == VK_FORMAT_R4G4_UNORM_PACK8) {
2118 uint8_t r, g;
2119 format = VK_FORMAT_R8_UINT;
2120 r = float_to_ubyte(clear_value->color.float32[0]) >> 4;
2121 g = float_to_ubyte(clear_value->color.float32[1]) >> 4;
2122 internal_clear_value.color.uint32[0] = (r << 4) | (g & 0xf);
2123 }
2124
2125 if (format == VK_FORMAT_R32G32B32_UINT ||
2126 format == VK_FORMAT_R32G32B32_SINT ||
2127 format == VK_FORMAT_R32G32B32_SFLOAT)
2128 cs = true;
2129
2130 for (uint32_t r = 0; r < range_count; r++) {
2131 const VkImageSubresourceRange *range = &ranges[r];
2132
2133 /* Try to perform a fast clear first, otherwise fallback to
2134 * the legacy path.
2135 */
2136 if (!cs &&
2137 radv_fast_clear_range(cmd_buffer, image, format,
2138 image_layout, false, range,
2139 &internal_clear_value)) {
2140 continue;
2141 }
2142
2143 for (uint32_t l = 0; l < radv_get_levelCount(image, range); ++l) {
2144 const uint32_t layer_count = image->type == VK_IMAGE_TYPE_3D ?
2145 radv_minify(image->info.depth, range->baseMipLevel + l) :
2146 radv_get_layerCount(image, range);
2147 for (uint32_t s = 0; s < layer_count; ++s) {
2148
2149 if (cs) {
2150 struct radv_meta_blit2d_surf surf;
2151 surf.format = format;
2152 surf.image = image;
2153 surf.level = range->baseMipLevel + l;
2154 surf.layer = range->baseArrayLayer + s;
2155 surf.aspect_mask = range->aspectMask;
2156 radv_meta_clear_image_cs(cmd_buffer, &surf,
2157 &internal_clear_value.color);
2158 } else {
2159 radv_clear_image_layer(cmd_buffer, image, image_layout,
2160 range, format, l, s, &internal_clear_value);
2161 }
2162 }
2163 }
2164 }
2165 }
2166
2167 void radv_CmdClearColorImage(
2168 VkCommandBuffer commandBuffer,
2169 VkImage image_h,
2170 VkImageLayout imageLayout,
2171 const VkClearColorValue* pColor,
2172 uint32_t rangeCount,
2173 const VkImageSubresourceRange* pRanges)
2174 {
2175 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2176 RADV_FROM_HANDLE(radv_image, image, image_h);
2177 struct radv_meta_saved_state saved_state;
2178 bool cs = cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE;
2179
2180 if (cs) {
2181 radv_meta_save(&saved_state, cmd_buffer,
2182 RADV_META_SAVE_COMPUTE_PIPELINE |
2183 RADV_META_SAVE_CONSTANTS |
2184 RADV_META_SAVE_DESCRIPTORS);
2185 } else {
2186 radv_meta_save(&saved_state, cmd_buffer,
2187 RADV_META_SAVE_GRAPHICS_PIPELINE |
2188 RADV_META_SAVE_CONSTANTS);
2189 }
2190
2191 radv_cmd_clear_image(cmd_buffer, image, imageLayout,
2192 (const VkClearValue *) pColor,
2193 rangeCount, pRanges, cs);
2194
2195 radv_meta_restore(&saved_state, cmd_buffer);
2196 }
2197
2198 void radv_CmdClearDepthStencilImage(
2199 VkCommandBuffer commandBuffer,
2200 VkImage image_h,
2201 VkImageLayout imageLayout,
2202 const VkClearDepthStencilValue* pDepthStencil,
2203 uint32_t rangeCount,
2204 const VkImageSubresourceRange* pRanges)
2205 {
2206 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2207 RADV_FROM_HANDLE(radv_image, image, image_h);
2208 struct radv_meta_saved_state saved_state;
2209
2210 radv_meta_save(&saved_state, cmd_buffer,
2211 RADV_META_SAVE_GRAPHICS_PIPELINE |
2212 RADV_META_SAVE_CONSTANTS);
2213
2214 radv_cmd_clear_image(cmd_buffer, image, imageLayout,
2215 (const VkClearValue *) pDepthStencil,
2216 rangeCount, pRanges, false);
2217
2218 radv_meta_restore(&saved_state, cmd_buffer);
2219 }
2220
2221 void radv_CmdClearAttachments(
2222 VkCommandBuffer commandBuffer,
2223 uint32_t attachmentCount,
2224 const VkClearAttachment* pAttachments,
2225 uint32_t rectCount,
2226 const VkClearRect* pRects)
2227 {
2228 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2229 struct radv_meta_saved_state saved_state;
2230 enum radv_cmd_flush_bits pre_flush = 0;
2231 enum radv_cmd_flush_bits post_flush = 0;
2232
2233 if (!cmd_buffer->state.subpass)
2234 return;
2235
2236 radv_meta_save(&saved_state, cmd_buffer,
2237 RADV_META_SAVE_GRAPHICS_PIPELINE |
2238 RADV_META_SAVE_CONSTANTS);
2239
2240 /* FINISHME: We can do better than this dumb loop. It thrashes too much
2241 * state.
2242 */
2243 for (uint32_t a = 0; a < attachmentCount; ++a) {
2244 for (uint32_t r = 0; r < rectCount; ++r) {
2245 emit_clear(cmd_buffer, &pAttachments[a], &pRects[r], &pre_flush, &post_flush,
2246 cmd_buffer->state.subpass->view_mask, false);
2247 }
2248 }
2249
2250 radv_meta_restore(&saved_state, cmd_buffer);
2251 cmd_buffer->state.flush_bits |= post_flush;
2252 }