radv: handle subpass cache flushes
[mesa.git] / src / amd / vulkan / radv_meta_resolve.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "nir/nir_builder.h"
30 #include "sid.h"
31 /**
32 * Vertex attributes used by all pipelines.
33 */
34 struct vertex_attrs {
35 float position[2]; /**< 3DPRIM_RECTLIST */
36 };
37
38 /* passthrough vertex shader */
39 static nir_shader *
40 build_nir_vs(void)
41 {
42 const struct glsl_type *vec4 = glsl_vec4_type();
43
44 nir_builder b;
45 nir_variable *a_position;
46 nir_variable *v_position;
47
48 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
49 b.shader->info->name = ralloc_strdup(b.shader, "meta_resolve_vs");
50
51 a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
52 "a_position");
53 a_position->data.location = VERT_ATTRIB_GENERIC0;
54
55 v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
56 "gl_Position");
57 v_position->data.location = VARYING_SLOT_POS;
58
59 nir_copy_var(&b, v_position, a_position);
60
61 return b.shader;
62 }
63
64 /* simple passthrough shader */
65 static nir_shader *
66 build_nir_fs(void)
67 {
68 const struct glsl_type *vec4 = glsl_vec4_type();
69 nir_builder b;
70 nir_variable *f_color; /* vec4, fragment output color */
71
72 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
73 b.shader->info->name = ralloc_asprintf(b.shader,
74 "meta_resolve_fs");
75
76 f_color = nir_variable_create(b.shader, nir_var_shader_out, vec4,
77 "f_color");
78 f_color->data.location = FRAG_RESULT_DATA0;
79 nir_store_var(&b, f_color, nir_imm_vec4(&b, 0.0, 0.0, 0.0, 1.0), 0xf);
80
81 return b.shader;
82 }
83
84 static VkResult
85 create_pass(struct radv_device *device)
86 {
87 VkResult result;
88 VkDevice device_h = radv_device_to_handle(device);
89 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
90 VkAttachmentDescription attachments[2];
91 int i;
92
93 for (i = 0; i < 2; i++) {
94 attachments[i].format = VK_FORMAT_UNDEFINED;
95 attachments[i].samples = 1;
96 attachments[i].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
97 attachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
98 }
99 attachments[0].initialLayout = VK_IMAGE_LAYOUT_GENERAL;
100 attachments[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
101 attachments[1].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
102 attachments[1].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
103
104 result = radv_CreateRenderPass(device_h,
105 &(VkRenderPassCreateInfo) {
106 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
107 .attachmentCount = 2,
108 .pAttachments = attachments,
109 .subpassCount = 1,
110 .pSubpasses = &(VkSubpassDescription) {
111 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
112 .inputAttachmentCount = 0,
113 .colorAttachmentCount = 2,
114 .pColorAttachments = (VkAttachmentReference[]) {
115 {
116 .attachment = 0,
117 .layout = VK_IMAGE_LAYOUT_GENERAL,
118 },
119 {
120 .attachment = 1,
121 .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
122 },
123 },
124 .pResolveAttachments = NULL,
125 .pDepthStencilAttachment = &(VkAttachmentReference) {
126 .attachment = VK_ATTACHMENT_UNUSED,
127 },
128 .preserveAttachmentCount = 0,
129 .pPreserveAttachments = NULL,
130 },
131 .dependencyCount = 0,
132 },
133 alloc,
134 &device->meta_state.resolve.pass);
135
136 return result;
137 }
138
139 static VkResult
140 create_pipeline(struct radv_device *device,
141 VkShaderModule vs_module_h)
142 {
143 VkResult result;
144 VkDevice device_h = radv_device_to_handle(device);
145
146 struct radv_shader_module fs_module = {
147 .nir = build_nir_fs(),
148 };
149
150 if (!fs_module.nir) {
151 /* XXX: Need more accurate error */
152 result = VK_ERROR_OUT_OF_HOST_MEMORY;
153 goto cleanup;
154 }
155
156 result = radv_graphics_pipeline_create(device_h,
157 radv_pipeline_cache_to_handle(&device->meta_state.cache),
158 &(VkGraphicsPipelineCreateInfo) {
159 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
160 .stageCount = 2,
161 .pStages = (VkPipelineShaderStageCreateInfo[]) {
162 {
163 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
164 .stage = VK_SHADER_STAGE_VERTEX_BIT,
165 .module = vs_module_h,
166 .pName = "main",
167 },
168 {
169 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
170 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
171 .module = radv_shader_module_to_handle(&fs_module),
172 .pName = "main",
173 },
174 },
175 .pVertexInputState = &(VkPipelineVertexInputStateCreateInfo) {
176 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
177 .vertexBindingDescriptionCount = 1,
178 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
179 {
180 .binding = 0,
181 .stride = sizeof(struct vertex_attrs),
182 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
183 },
184 },
185 .vertexAttributeDescriptionCount = 1,
186 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
187 {
188 /* Position */
189 .location = 0,
190 .binding = 0,
191 .format = VK_FORMAT_R32G32_SFLOAT,
192 .offset = offsetof(struct vertex_attrs, position),
193 },
194 },
195 },
196 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
197 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
198 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
199 .primitiveRestartEnable = false,
200 },
201 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
202 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
203 .viewportCount = 0,
204 .scissorCount = 0,
205 },
206 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
207 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
208 .depthClampEnable = false,
209 .rasterizerDiscardEnable = false,
210 .polygonMode = VK_POLYGON_MODE_FILL,
211 .cullMode = VK_CULL_MODE_NONE,
212 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
213 },
214 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
215 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
216 .rasterizationSamples = 1,
217 .sampleShadingEnable = false,
218 .pSampleMask = NULL,
219 .alphaToCoverageEnable = false,
220 .alphaToOneEnable = false,
221 },
222 .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
223 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
224 .logicOpEnable = false,
225 .attachmentCount = 2,
226 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
227 {
228 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
229 VK_COLOR_COMPONENT_G_BIT |
230 VK_COLOR_COMPONENT_B_BIT |
231 VK_COLOR_COMPONENT_A_BIT,
232 },
233 {
234 .colorWriteMask = 0,
235
236 }
237 },
238 },
239 .pDynamicState = NULL,
240 .renderPass = device->meta_state.resolve.pass,
241 .subpass = 0,
242 },
243 &(struct radv_graphics_pipeline_create_info) {
244 .use_rectlist = true,
245 .custom_blend_mode = V_028808_CB_RESOLVE,
246 },
247 &device->meta_state.alloc,
248 &device->meta_state.resolve.pipeline);
249 if (result != VK_SUCCESS)
250 goto cleanup;
251
252 goto cleanup;
253
254 cleanup:
255 ralloc_free(fs_module.nir);
256 return result;
257 }
258
259 void
260 radv_device_finish_meta_resolve_state(struct radv_device *device)
261 {
262 struct radv_meta_state *state = &device->meta_state;
263 VkDevice device_h = radv_device_to_handle(device);
264 VkRenderPass pass_h = device->meta_state.resolve.pass;
265 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
266
267 if (pass_h)
268 radv_DestroyRenderPass(device_h, pass_h,
269 &device->meta_state.alloc);
270
271 VkPipeline pipeline_h = state->resolve.pipeline;
272 if (pipeline_h) {
273 radv_DestroyPipeline(device_h, pipeline_h, alloc);
274 }
275 }
276
277 VkResult
278 radv_device_init_meta_resolve_state(struct radv_device *device)
279 {
280 VkResult res = VK_SUCCESS;
281
282 zero(device->meta_state.resolve);
283
284 struct radv_shader_module vs_module = { .nir = build_nir_vs() };
285 if (!vs_module.nir) {
286 /* XXX: Need more accurate error */
287 res = VK_ERROR_OUT_OF_HOST_MEMORY;
288 goto fail;
289 }
290
291 res = create_pass(device);
292 if (res != VK_SUCCESS)
293 goto fail;
294
295 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
296 res = create_pipeline(device, vs_module_h);
297 if (res != VK_SUCCESS)
298 goto fail;
299
300 goto cleanup;
301
302 fail:
303 radv_device_finish_meta_resolve_state(device);
304
305 cleanup:
306 ralloc_free(vs_module.nir);
307
308 return res;
309 }
310
311 static void
312 emit_resolve(struct radv_cmd_buffer *cmd_buffer,
313 const VkOffset2D *dest_offset,
314 const VkExtent2D *resolve_extent)
315 {
316 struct radv_device *device = cmd_buffer->device;
317 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
318 uint32_t offset;
319 const struct vertex_attrs vertex_data[3] = {
320 {
321 .position = {
322 dest_offset->x,
323 dest_offset->y,
324 },
325 },
326 {
327 .position = {
328 dest_offset->x,
329 dest_offset->y + resolve_extent->height,
330 },
331 },
332 {
333 .position = {
334 dest_offset->x + resolve_extent->width,
335 dest_offset->y,
336 },
337 },
338 };
339
340 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
341 radv_cmd_buffer_upload_data(cmd_buffer, sizeof(vertex_data), 16, vertex_data, &offset);
342 struct radv_buffer vertex_buffer = {
343 .device = device,
344 .size = sizeof(vertex_data),
345 .bo = cmd_buffer->upload.upload_bo,
346 .offset = offset,
347 };
348
349 VkBuffer vertex_buffer_h = radv_buffer_to_handle(&vertex_buffer);
350
351 radv_CmdBindVertexBuffers(cmd_buffer_h,
352 /*firstBinding*/ 0,
353 /*bindingCount*/ 1,
354 (VkBuffer[]) { vertex_buffer_h },
355 (VkDeviceSize[]) { 0 });
356
357 VkPipeline pipeline_h = device->meta_state.resolve.pipeline;
358 RADV_FROM_HANDLE(radv_pipeline, pipeline, pipeline_h);
359
360 if (cmd_buffer->state.pipeline != pipeline) {
361 radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
362 pipeline_h);
363 }
364
365 radv_CmdDraw(cmd_buffer_h, 3, 1, 0, 0);
366 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
367 si_emit_cache_flush(cmd_buffer);
368 }
369
370 void radv_CmdResolveImage(
371 VkCommandBuffer cmd_buffer_h,
372 VkImage src_image_h,
373 VkImageLayout src_image_layout,
374 VkImage dest_image_h,
375 VkImageLayout dest_image_layout,
376 uint32_t region_count,
377 const VkImageResolve* regions)
378 {
379 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, cmd_buffer_h);
380 RADV_FROM_HANDLE(radv_image, src_image, src_image_h);
381 RADV_FROM_HANDLE(radv_image, dest_image, dest_image_h);
382 struct radv_device *device = cmd_buffer->device;
383 struct radv_meta_saved_state saved_state;
384 VkDevice device_h = radv_device_to_handle(device);
385 bool use_compute_resolve = false;
386
387 /* we can use the hw resolve only for single full resolves */
388 if (region_count == 1) {
389 if (regions[0].srcOffset.x ||
390 regions[0].srcOffset.y ||
391 regions[0].srcOffset.z)
392 use_compute_resolve = true;
393 if (regions[0].dstOffset.x ||
394 regions[0].dstOffset.y ||
395 regions[0].dstOffset.z)
396 use_compute_resolve = true;
397
398 if (regions[0].extent.width != src_image->extent.width ||
399 regions[0].extent.height != src_image->extent.height ||
400 regions[0].extent.depth != src_image->extent.depth)
401 use_compute_resolve = true;
402 } else
403 use_compute_resolve = true;
404
405 if (use_compute_resolve) {
406
407 radv_meta_resolve_compute_image(cmd_buffer,
408 src_image,
409 src_image_layout,
410 dest_image,
411 dest_image_layout,
412 region_count, regions);
413 return;
414 }
415
416 radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
417
418 assert(src_image->samples > 1);
419 assert(dest_image->samples == 1);
420
421 if (src_image->samples >= 16) {
422 /* See commit aa3f9aaf31e9056a255f9e0472ebdfdaa60abe54 for the
423 * glBlitFramebuffer workaround for samples >= 16.
424 */
425 radv_finishme("vkCmdResolveImage: need interpolation workaround when "
426 "samples >= 16");
427 }
428
429 if (src_image->array_size > 1)
430 radv_finishme("vkCmdResolveImage: multisample array images");
431
432 if (dest_image->surface.dcc_size) {
433 radv_initialize_dcc(cmd_buffer, dest_image, 0xffffffff);
434 }
435 for (uint32_t r = 0; r < region_count; ++r) {
436 const VkImageResolve *region = &regions[r];
437
438 /* From the Vulkan 1.0 spec:
439 *
440 * - The aspectMask member of srcSubresource and dstSubresource must
441 * only contain VK_IMAGE_ASPECT_COLOR_BIT
442 *
443 * - The layerCount member of srcSubresource and dstSubresource must
444 * match
445 */
446 assert(region->srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
447 assert(region->dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
448 assert(region->srcSubresource.layerCount ==
449 region->dstSubresource.layerCount);
450
451 const uint32_t src_base_layer =
452 radv_meta_get_iview_layer(src_image, &region->srcSubresource,
453 &region->srcOffset);
454
455 const uint32_t dest_base_layer =
456 radv_meta_get_iview_layer(dest_image, &region->dstSubresource,
457 &region->dstOffset);
458
459 /**
460 * From Vulkan 1.0.6 spec: 18.6 Resolving Multisample Images
461 *
462 * extent is the size in texels of the source image to resolve in width,
463 * height and depth. 1D images use only x and width. 2D images use x, y,
464 * width and height. 3D images use x, y, z, width, height and depth.
465 *
466 * srcOffset and dstOffset select the initial x, y, and z offsets in
467 * texels of the sub-regions of the source and destination image data.
468 * extent is the size in texels of the source image to resolve in width,
469 * height and depth. 1D images use only x and width. 2D images use x, y,
470 * width and height. 3D images use x, y, z, width, height and depth.
471 */
472 const struct VkExtent3D extent =
473 radv_sanitize_image_extent(src_image->type, region->extent);
474 const struct VkOffset3D dstOffset =
475 radv_sanitize_image_offset(dest_image->type, region->dstOffset);
476
477
478 for (uint32_t layer = 0; layer < region->srcSubresource.layerCount;
479 ++layer) {
480
481 struct radv_image_view src_iview;
482 radv_image_view_init(&src_iview, cmd_buffer->device,
483 &(VkImageViewCreateInfo) {
484 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
485 .image = src_image_h,
486 .viewType = radv_meta_get_view_type(src_image),
487 .format = src_image->vk_format,
488 .subresourceRange = {
489 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
490 .baseMipLevel = region->srcSubresource.mipLevel,
491 .levelCount = 1,
492 .baseArrayLayer = src_base_layer + layer,
493 .layerCount = 1,
494 },
495 },
496 cmd_buffer, VK_IMAGE_USAGE_SAMPLED_BIT);
497
498 struct radv_image_view dest_iview;
499 radv_image_view_init(&dest_iview, cmd_buffer->device,
500 &(VkImageViewCreateInfo) {
501 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
502 .image = dest_image_h,
503 .viewType = radv_meta_get_view_type(dest_image),
504 .format = dest_image->vk_format,
505 .subresourceRange = {
506 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
507 .baseMipLevel = region->dstSubresource.mipLevel,
508 .levelCount = 1,
509 .baseArrayLayer = dest_base_layer + layer,
510 .layerCount = 1,
511 },
512 },
513 cmd_buffer, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
514
515 VkFramebuffer fb_h;
516 radv_CreateFramebuffer(device_h,
517 &(VkFramebufferCreateInfo) {
518 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
519 .attachmentCount = 2,
520 .pAttachments = (VkImageView[]) {
521 radv_image_view_to_handle(&src_iview),
522 radv_image_view_to_handle(&dest_iview),
523 },
524 .width = radv_minify(dest_image->extent.width,
525 region->dstSubresource.mipLevel),
526 .height = radv_minify(dest_image->extent.height,
527 region->dstSubresource.mipLevel),
528 .layers = 1
529 },
530 &cmd_buffer->pool->alloc,
531 &fb_h);
532
533 radv_CmdBeginRenderPass(cmd_buffer_h,
534 &(VkRenderPassBeginInfo) {
535 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
536 .renderPass = device->meta_state.resolve.pass,
537 .framebuffer = fb_h,
538 .renderArea = {
539 .offset = {
540 dstOffset.x,
541 dstOffset.y,
542 },
543 .extent = {
544 extent.width,
545 extent.height,
546 }
547 },
548 .clearValueCount = 0,
549 .pClearValues = NULL,
550 },
551 VK_SUBPASS_CONTENTS_INLINE);
552
553 emit_resolve(cmd_buffer,
554 &(VkOffset2D) {
555 .x = dstOffset.x,
556 .y = dstOffset.y,
557 },
558 &(VkExtent2D) {
559 .width = extent.width,
560 .height = extent.height,
561 });
562
563 radv_CmdEndRenderPass(cmd_buffer_h);
564
565 radv_DestroyFramebuffer(device_h, fb_h,
566 &cmd_buffer->pool->alloc);
567 }
568 }
569
570 radv_meta_restore(&saved_state, cmd_buffer);
571 }
572
573 /**
574 * Emit any needed resolves for the current subpass.
575 */
576 void
577 radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer *cmd_buffer)
578 {
579 struct radv_framebuffer *fb = cmd_buffer->state.framebuffer;
580 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
581 struct radv_meta_saved_state saved_state;
582
583 /* FINISHME(perf): Skip clears for resolve attachments.
584 *
585 * From the Vulkan 1.0 spec:
586 *
587 * If the first use of an attachment in a render pass is as a resolve
588 * attachment, then the loadOp is effectively ignored as the resolve is
589 * guaranteed to overwrite all pixels in the render area.
590 */
591
592 if (!subpass->has_resolve)
593 return;
594
595 radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
596
597 for (uint32_t i = 0; i < subpass->color_count; ++i) {
598 VkAttachmentReference src_att = subpass->color_attachments[i];
599 VkAttachmentReference dest_att = subpass->resolve_attachments[i];
600 struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image;
601 if (dest_att.attachment == VK_ATTACHMENT_UNUSED)
602 continue;
603
604 if (dst_img->surface.dcc_size) {
605 radv_initialize_dcc(cmd_buffer, dst_img, 0xffffffff);
606 cmd_buffer->state.attachments[dest_att.attachment].current_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
607 }
608
609 struct radv_subpass resolve_subpass = {
610 .color_count = 2,
611 .color_attachments = (VkAttachmentReference[]) { src_att, dest_att },
612 .depth_stencil_attachment = { .attachment = VK_ATTACHMENT_UNUSED },
613 };
614
615 radv_cmd_buffer_set_subpass(cmd_buffer, &resolve_subpass, false);
616
617 /* Subpass resolves must respect the render area. We can ignore the
618 * render area here because vkCmdBeginRenderPass set the render area
619 * with 3DSTATE_DRAWING_RECTANGLE.
620 *
621 * XXX(chadv): Does the hardware really respect
622 * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
623 */
624 emit_resolve(cmd_buffer,
625 &(VkOffset2D) { 0, 0 },
626 &(VkExtent2D) { fb->width, fb->height });
627 }
628
629 cmd_buffer->state.subpass = subpass;
630 radv_meta_restore(&saved_state, cmd_buffer);
631 }