105270cb74219ee1dd9217efc0da23602303fdb1
[mesa.git] / src / amd / vulkan / radv_meta_resolve.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "radv_meta.h"
28 #include "radv_private.h"
29 #include "nir/nir_builder.h"
30 #include "sid.h"
31 /**
32 * Vertex attributes used by all pipelines.
33 */
34 struct vertex_attrs {
35 float position[2]; /**< 3DPRIM_RECTLIST */
36 };
37
38 /* passthrough vertex shader */
39 static nir_shader *
40 build_nir_vs(void)
41 {
42 const struct glsl_type *vec4 = glsl_vec4_type();
43
44 nir_builder b;
45 nir_variable *a_position;
46 nir_variable *v_position;
47
48 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
49 b.shader->info->name = ralloc_strdup(b.shader, "meta_resolve_vs");
50
51 a_position = nir_variable_create(b.shader, nir_var_shader_in, vec4,
52 "a_position");
53 a_position->data.location = VERT_ATTRIB_GENERIC0;
54
55 v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
56 "gl_Position");
57 v_position->data.location = VARYING_SLOT_POS;
58
59 nir_copy_var(&b, v_position, a_position);
60
61 return b.shader;
62 }
63
64 /* simple passthrough shader */
65 static nir_shader *
66 build_nir_fs(void)
67 {
68 const struct glsl_type *vec4 = glsl_vec4_type();
69 nir_builder b;
70 nir_variable *f_color; /* vec4, fragment output color */
71
72 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
73 b.shader->info->name = ralloc_asprintf(b.shader,
74 "meta_resolve_fs");
75
76 f_color = nir_variable_create(b.shader, nir_var_shader_out, vec4,
77 "f_color");
78 f_color->data.location = FRAG_RESULT_DATA0;
79 nir_store_var(&b, f_color, nir_imm_vec4(&b, 0.0, 0.0, 0.0, 1.0), 0xf);
80
81 return b.shader;
82 }
83
84 static VkResult
85 create_pass(struct radv_device *device)
86 {
87 VkResult result;
88 VkDevice device_h = radv_device_to_handle(device);
89 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
90 VkAttachmentDescription attachments[2];
91 int i;
92
93 for (i = 0; i < 2; i++) {
94 attachments[i].format = VK_FORMAT_UNDEFINED;
95 attachments[i].samples = 1;
96 attachments[i].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
97 attachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
98 }
99 attachments[0].initialLayout = VK_IMAGE_LAYOUT_GENERAL;
100 attachments[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
101 attachments[1].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
102 attachments[1].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
103
104 result = radv_CreateRenderPass(device_h,
105 &(VkRenderPassCreateInfo) {
106 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
107 .attachmentCount = 2,
108 .pAttachments = attachments,
109 .subpassCount = 1,
110 .pSubpasses = &(VkSubpassDescription) {
111 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
112 .inputAttachmentCount = 0,
113 .colorAttachmentCount = 2,
114 .pColorAttachments = (VkAttachmentReference[]) {
115 {
116 .attachment = 0,
117 .layout = VK_IMAGE_LAYOUT_GENERAL,
118 },
119 {
120 .attachment = 1,
121 .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
122 },
123 },
124 .pResolveAttachments = NULL,
125 .pDepthStencilAttachment = &(VkAttachmentReference) {
126 .attachment = VK_ATTACHMENT_UNUSED,
127 },
128 .preserveAttachmentCount = 0,
129 .pPreserveAttachments = NULL,
130 },
131 .dependencyCount = 0,
132 },
133 alloc,
134 &device->meta_state.resolve.pass);
135
136 return result;
137 }
138
139 static VkResult
140 create_pipeline(struct radv_device *device,
141 VkShaderModule vs_module_h)
142 {
143 VkResult result;
144 VkDevice device_h = radv_device_to_handle(device);
145
146 struct radv_shader_module fs_module = {
147 .nir = build_nir_fs(),
148 };
149
150 if (!fs_module.nir) {
151 /* XXX: Need more accurate error */
152 result = VK_ERROR_OUT_OF_HOST_MEMORY;
153 goto cleanup;
154 }
155
156 result = radv_graphics_pipeline_create(device_h,
157 radv_pipeline_cache_to_handle(&device->meta_state.cache),
158 &(VkGraphicsPipelineCreateInfo) {
159 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
160 .stageCount = 2,
161 .pStages = (VkPipelineShaderStageCreateInfo[]) {
162 {
163 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
164 .stage = VK_SHADER_STAGE_VERTEX_BIT,
165 .module = vs_module_h,
166 .pName = "main",
167 },
168 {
169 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
170 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
171 .module = radv_shader_module_to_handle(&fs_module),
172 .pName = "main",
173 },
174 },
175 .pVertexInputState = &(VkPipelineVertexInputStateCreateInfo) {
176 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
177 .vertexBindingDescriptionCount = 1,
178 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
179 {
180 .binding = 0,
181 .stride = sizeof(struct vertex_attrs),
182 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
183 },
184 },
185 .vertexAttributeDescriptionCount = 1,
186 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
187 {
188 /* Position */
189 .location = 0,
190 .binding = 0,
191 .format = VK_FORMAT_R32G32_SFLOAT,
192 .offset = offsetof(struct vertex_attrs, position),
193 },
194 },
195 },
196 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
197 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
198 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
199 .primitiveRestartEnable = false,
200 },
201 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
202 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
203 .viewportCount = 0,
204 .scissorCount = 0,
205 },
206 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
207 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
208 .depthClampEnable = false,
209 .rasterizerDiscardEnable = false,
210 .polygonMode = VK_POLYGON_MODE_FILL,
211 .cullMode = VK_CULL_MODE_NONE,
212 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
213 },
214 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
215 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
216 .rasterizationSamples = 1,
217 .sampleShadingEnable = false,
218 .pSampleMask = NULL,
219 .alphaToCoverageEnable = false,
220 .alphaToOneEnable = false,
221 },
222 .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
223 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
224 .logicOpEnable = false,
225 .attachmentCount = 2,
226 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
227 {
228 .colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
229 VK_COLOR_COMPONENT_G_BIT |
230 VK_COLOR_COMPONENT_B_BIT |
231 VK_COLOR_COMPONENT_A_BIT,
232 },
233 {
234 .colorWriteMask = 0,
235
236 }
237 },
238 },
239 .pDynamicState = NULL,
240 .renderPass = device->meta_state.resolve.pass,
241 .subpass = 0,
242 },
243 &(struct radv_graphics_pipeline_create_info) {
244 .use_rectlist = true,
245 .custom_blend_mode = V_028808_CB_RESOLVE,
246 },
247 &device->meta_state.alloc,
248 &device->meta_state.resolve.pipeline);
249 if (result != VK_SUCCESS)
250 goto cleanup;
251
252 goto cleanup;
253
254 cleanup:
255 ralloc_free(fs_module.nir);
256 return result;
257 }
258
259 void
260 radv_device_finish_meta_resolve_state(struct radv_device *device)
261 {
262 struct radv_meta_state *state = &device->meta_state;
263 VkDevice device_h = radv_device_to_handle(device);
264 VkRenderPass pass_h = device->meta_state.resolve.pass;
265 const VkAllocationCallbacks *alloc = &device->meta_state.alloc;
266
267 if (pass_h)
268 radv_DestroyRenderPass(device_h, pass_h,
269 &device->meta_state.alloc);
270
271 VkPipeline pipeline_h = state->resolve.pipeline;
272 if (pipeline_h) {
273 radv_DestroyPipeline(device_h, pipeline_h, alloc);
274 }
275 }
276
277 VkResult
278 radv_device_init_meta_resolve_state(struct radv_device *device)
279 {
280 VkResult res = VK_SUCCESS;
281
282 zero(device->meta_state.resolve);
283
284 struct radv_shader_module vs_module = { .nir = build_nir_vs() };
285 if (!vs_module.nir) {
286 /* XXX: Need more accurate error */
287 res = VK_ERROR_OUT_OF_HOST_MEMORY;
288 goto fail;
289 }
290
291 res = create_pass(device);
292 if (res != VK_SUCCESS)
293 goto fail;
294
295 VkShaderModule vs_module_h = radv_shader_module_to_handle(&vs_module);
296 res = create_pipeline(device, vs_module_h);
297 if (res != VK_SUCCESS)
298 goto fail;
299
300 goto cleanup;
301
302 fail:
303 radv_device_finish_meta_resolve_state(device);
304
305 cleanup:
306 ralloc_free(vs_module.nir);
307
308 return res;
309 }
310
311 static void
312 emit_resolve(struct radv_cmd_buffer *cmd_buffer,
313 const VkOffset2D *dest_offset,
314 const VkExtent2D *resolve_extent)
315 {
316 struct radv_device *device = cmd_buffer->device;
317 VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
318 uint32_t offset;
319 const struct vertex_attrs vertex_data[3] = {
320 {
321 .position = {
322 dest_offset->x,
323 dest_offset->y,
324 },
325 },
326 {
327 .position = {
328 dest_offset->x,
329 dest_offset->y + resolve_extent->height,
330 },
331 },
332 {
333 .position = {
334 dest_offset->x + resolve_extent->width,
335 dest_offset->y,
336 },
337 },
338 };
339
340 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
341 radv_cmd_buffer_upload_data(cmd_buffer, sizeof(vertex_data), 16, vertex_data, &offset);
342 struct radv_buffer vertex_buffer = {
343 .device = device,
344 .size = sizeof(vertex_data),
345 .bo = cmd_buffer->upload.upload_bo,
346 .offset = offset,
347 };
348
349 VkBuffer vertex_buffer_h = radv_buffer_to_handle(&vertex_buffer);
350
351 radv_CmdBindVertexBuffers(cmd_buffer_h,
352 /*firstBinding*/ 0,
353 /*bindingCount*/ 1,
354 (VkBuffer[]) { vertex_buffer_h },
355 (VkDeviceSize[]) { 0 });
356
357 VkPipeline pipeline_h = device->meta_state.resolve.pipeline;
358 RADV_FROM_HANDLE(radv_pipeline, pipeline, pipeline_h);
359
360 if (cmd_buffer->state.pipeline != pipeline) {
361 radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
362 pipeline_h);
363 }
364
365 radv_CmdDraw(cmd_buffer_h, 3, 1, 0, 0);
366 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
367 }
368
369 void radv_CmdResolveImage(
370 VkCommandBuffer cmd_buffer_h,
371 VkImage src_image_h,
372 VkImageLayout src_image_layout,
373 VkImage dest_image_h,
374 VkImageLayout dest_image_layout,
375 uint32_t region_count,
376 const VkImageResolve* regions)
377 {
378 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, cmd_buffer_h);
379 RADV_FROM_HANDLE(radv_image, src_image, src_image_h);
380 RADV_FROM_HANDLE(radv_image, dest_image, dest_image_h);
381 struct radv_device *device = cmd_buffer->device;
382 struct radv_meta_saved_state saved_state;
383 VkDevice device_h = radv_device_to_handle(device);
384 bool use_compute_resolve = false;
385
386 /* we can use the hw resolve only for single full resolves */
387 if (region_count == 1) {
388 if (regions[0].srcOffset.x ||
389 regions[0].srcOffset.y ||
390 regions[0].srcOffset.z)
391 use_compute_resolve = true;
392 if (regions[0].dstOffset.x ||
393 regions[0].dstOffset.y ||
394 regions[0].dstOffset.z)
395 use_compute_resolve = true;
396
397 if (regions[0].extent.width != src_image->extent.width ||
398 regions[0].extent.height != src_image->extent.height ||
399 regions[0].extent.depth != src_image->extent.depth)
400 use_compute_resolve = true;
401 } else
402 use_compute_resolve = true;
403
404 if (use_compute_resolve) {
405
406 radv_meta_resolve_compute_image(cmd_buffer,
407 src_image,
408 src_image_layout,
409 dest_image,
410 dest_image_layout,
411 region_count, regions);
412 return;
413 }
414
415 radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
416
417 assert(src_image->samples > 1);
418 assert(dest_image->samples == 1);
419
420 if (src_image->samples >= 16) {
421 /* See commit aa3f9aaf31e9056a255f9e0472ebdfdaa60abe54 for the
422 * glBlitFramebuffer workaround for samples >= 16.
423 */
424 radv_finishme("vkCmdResolveImage: need interpolation workaround when "
425 "samples >= 16");
426 }
427
428 if (src_image->array_size > 1)
429 radv_finishme("vkCmdResolveImage: multisample array images");
430
431 if (dest_image->surface.dcc_size) {
432 radv_initialize_dcc(cmd_buffer, dest_image, 0xffffffff);
433 }
434 for (uint32_t r = 0; r < region_count; ++r) {
435 const VkImageResolve *region = &regions[r];
436
437 /* From the Vulkan 1.0 spec:
438 *
439 * - The aspectMask member of srcSubresource and dstSubresource must
440 * only contain VK_IMAGE_ASPECT_COLOR_BIT
441 *
442 * - The layerCount member of srcSubresource and dstSubresource must
443 * match
444 */
445 assert(region->srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
446 assert(region->dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
447 assert(region->srcSubresource.layerCount ==
448 region->dstSubresource.layerCount);
449
450 const uint32_t src_base_layer =
451 radv_meta_get_iview_layer(src_image, &region->srcSubresource,
452 &region->srcOffset);
453
454 const uint32_t dest_base_layer =
455 radv_meta_get_iview_layer(dest_image, &region->dstSubresource,
456 &region->dstOffset);
457
458 /**
459 * From Vulkan 1.0.6 spec: 18.6 Resolving Multisample Images
460 *
461 * extent is the size in texels of the source image to resolve in width,
462 * height and depth. 1D images use only x and width. 2D images use x, y,
463 * width and height. 3D images use x, y, z, width, height and depth.
464 *
465 * srcOffset and dstOffset select the initial x, y, and z offsets in
466 * texels of the sub-regions of the source and destination image data.
467 * extent is the size in texels of the source image to resolve in width,
468 * height and depth. 1D images use only x and width. 2D images use x, y,
469 * width and height. 3D images use x, y, z, width, height and depth.
470 */
471 const struct VkExtent3D extent =
472 radv_sanitize_image_extent(src_image->type, region->extent);
473 const struct VkOffset3D dstOffset =
474 radv_sanitize_image_offset(dest_image->type, region->dstOffset);
475
476
477 for (uint32_t layer = 0; layer < region->srcSubresource.layerCount;
478 ++layer) {
479
480 struct radv_image_view src_iview;
481 radv_image_view_init(&src_iview, cmd_buffer->device,
482 &(VkImageViewCreateInfo) {
483 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
484 .image = src_image_h,
485 .viewType = radv_meta_get_view_type(src_image),
486 .format = src_image->vk_format,
487 .subresourceRange = {
488 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
489 .baseMipLevel = region->srcSubresource.mipLevel,
490 .levelCount = 1,
491 .baseArrayLayer = src_base_layer + layer,
492 .layerCount = 1,
493 },
494 },
495 cmd_buffer, VK_IMAGE_USAGE_SAMPLED_BIT);
496
497 struct radv_image_view dest_iview;
498 radv_image_view_init(&dest_iview, cmd_buffer->device,
499 &(VkImageViewCreateInfo) {
500 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
501 .image = dest_image_h,
502 .viewType = radv_meta_get_view_type(dest_image),
503 .format = dest_image->vk_format,
504 .subresourceRange = {
505 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
506 .baseMipLevel = region->dstSubresource.mipLevel,
507 .levelCount = 1,
508 .baseArrayLayer = dest_base_layer + layer,
509 .layerCount = 1,
510 },
511 },
512 cmd_buffer, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
513
514 VkFramebuffer fb_h;
515 radv_CreateFramebuffer(device_h,
516 &(VkFramebufferCreateInfo) {
517 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
518 .attachmentCount = 2,
519 .pAttachments = (VkImageView[]) {
520 radv_image_view_to_handle(&src_iview),
521 radv_image_view_to_handle(&dest_iview),
522 },
523 .width = radv_minify(dest_image->extent.width,
524 region->dstSubresource.mipLevel),
525 .height = radv_minify(dest_image->extent.height,
526 region->dstSubresource.mipLevel),
527 .layers = 1
528 },
529 &cmd_buffer->pool->alloc,
530 &fb_h);
531
532 radv_CmdBeginRenderPass(cmd_buffer_h,
533 &(VkRenderPassBeginInfo) {
534 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
535 .renderPass = device->meta_state.resolve.pass,
536 .framebuffer = fb_h,
537 .renderArea = {
538 .offset = {
539 dstOffset.x,
540 dstOffset.y,
541 },
542 .extent = {
543 extent.width,
544 extent.height,
545 }
546 },
547 .clearValueCount = 0,
548 .pClearValues = NULL,
549 },
550 VK_SUBPASS_CONTENTS_INLINE);
551
552 emit_resolve(cmd_buffer,
553 &(VkOffset2D) {
554 .x = dstOffset.x,
555 .y = dstOffset.y,
556 },
557 &(VkExtent2D) {
558 .width = extent.width,
559 .height = extent.height,
560 });
561
562 radv_CmdEndRenderPass(cmd_buffer_h);
563
564 radv_DestroyFramebuffer(device_h, fb_h,
565 &cmd_buffer->pool->alloc);
566 }
567 }
568
569 radv_meta_restore(&saved_state, cmd_buffer);
570 }
571
572 /**
573 * Emit any needed resolves for the current subpass.
574 */
575 void
576 radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer *cmd_buffer)
577 {
578 struct radv_framebuffer *fb = cmd_buffer->state.framebuffer;
579 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
580 struct radv_meta_saved_state saved_state;
581
582 /* FINISHME(perf): Skip clears for resolve attachments.
583 *
584 * From the Vulkan 1.0 spec:
585 *
586 * If the first use of an attachment in a render pass is as a resolve
587 * attachment, then the loadOp is effectively ignored as the resolve is
588 * guaranteed to overwrite all pixels in the render area.
589 */
590
591 if (!subpass->has_resolve)
592 return;
593
594 radv_meta_save_graphics_reset_vport_scissor(&saved_state, cmd_buffer);
595
596 for (uint32_t i = 0; i < subpass->color_count; ++i) {
597 VkAttachmentReference src_att = subpass->color_attachments[i];
598 VkAttachmentReference dest_att = subpass->resolve_attachments[i];
599 struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image;
600 if (dest_att.attachment == VK_ATTACHMENT_UNUSED)
601 continue;
602
603 if (dst_img->surface.dcc_size) {
604 radv_initialize_dcc(cmd_buffer, dst_img, 0xffffffff);
605 cmd_buffer->state.attachments[dest_att.attachment].current_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
606 }
607
608 struct radv_subpass resolve_subpass = {
609 .color_count = 2,
610 .color_attachments = (VkAttachmentReference[]) { src_att, dest_att },
611 .depth_stencil_attachment = { .attachment = VK_ATTACHMENT_UNUSED },
612 };
613
614 radv_cmd_buffer_set_subpass(cmd_buffer, &resolve_subpass, false);
615
616 /* Subpass resolves must respect the render area. We can ignore the
617 * render area here because vkCmdBeginRenderPass set the render area
618 * with 3DSTATE_DRAWING_RECTANGLE.
619 *
620 * XXX(chadv): Does the hardware really respect
621 * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
622 */
623 emit_resolve(cmd_buffer,
624 &(VkOffset2D) { 0, 0 },
625 &(VkExtent2D) { fb->width, fb->height });
626 }
627
628 cmd_buffer->state.subpass = subpass;
629 radv_meta_restore(&saved_state, cmd_buffer);
630 }