radv: Don't use SRGB format for image stores during resolve.
[mesa.git] / src / amd / vulkan / radv_meta_resolve_cs.c
1 /*
2 * Copyright © 2016 Dave Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24
25 #include <assert.h>
26 #include <stdbool.h>
27
28 #include "radv_meta.h"
29 #include "radv_private.h"
30 #include "nir/nir_builder.h"
31 #include "sid.h"
32 #include "vk_format.h"
33
34 static nir_shader *
35 build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_srgb, int samples)
36 {
37 nir_builder b;
38 char name[64];
39 const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_MS,
40 false,
41 false,
42 GLSL_TYPE_FLOAT);
43 const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D,
44 false,
45 false,
46 GLSL_TYPE_FLOAT);
47 snprintf(name, 64, "meta_resolve_cs-%d-%s", samples, is_integer ? "int" : (is_srgb ? "srgb" : "float"));
48 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
49 b.shader->info.name = ralloc_strdup(b.shader, name);
50 b.shader->info.cs.local_size[0] = 16;
51 b.shader->info.cs.local_size[1] = 16;
52 b.shader->info.cs.local_size[2] = 1;
53
54 nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform,
55 sampler_type, "s_tex");
56 input_img->data.descriptor_set = 0;
57 input_img->data.binding = 0;
58
59 nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform,
60 img_type, "out_img");
61 output_img->data.descriptor_set = 0;
62 output_img->data.binding = 1;
63 nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
64 nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
65 nir_ssa_def *block_size = nir_imm_ivec4(&b,
66 b.shader->info.cs.local_size[0],
67 b.shader->info.cs.local_size[1],
68 b.shader->info.cs.local_size[2], 0);
69
70 nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
71
72 nir_intrinsic_instr *src_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
73 nir_intrinsic_set_base(src_offset, 0);
74 nir_intrinsic_set_range(src_offset, 16);
75 src_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
76 src_offset->num_components = 2;
77 nir_ssa_dest_init(&src_offset->instr, &src_offset->dest, 2, 32, "src_offset");
78 nir_builder_instr_insert(&b, &src_offset->instr);
79
80 nir_intrinsic_instr *dst_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant);
81 nir_intrinsic_set_base(dst_offset, 0);
82 nir_intrinsic_set_range(dst_offset, 16);
83 dst_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 8));
84 dst_offset->num_components = 2;
85 nir_ssa_dest_init(&dst_offset->instr, &dst_offset->dest, 2, 32, "dst_offset");
86 nir_builder_instr_insert(&b, &dst_offset->instr);
87
88 nir_ssa_def *img_coord = nir_channels(&b, nir_iadd(&b, global_id, &src_offset->dest.ssa), 0x3);
89 nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color");
90
91 radv_meta_build_resolve_shader_core(&b, is_integer, is_srgb, samples,
92 input_img, color, img_coord);
93
94 nir_ssa_def *outval = nir_load_var(&b, color);
95 nir_ssa_def *coord = nir_iadd(&b, global_id, &dst_offset->dest.ssa);
96 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store);
97 store->src[0] = nir_src_for_ssa(coord);
98 store->src[1] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
99 store->src[2] = nir_src_for_ssa(outval);
100 store->variables[0] = nir_deref_var_create(store, output_img);
101 nir_builder_instr_insert(&b, &store->instr);
102 return b.shader;
103 }
104
105
106 static VkResult
107 create_layout(struct radv_device *device)
108 {
109 VkResult result;
110 /*
111 * two descriptors one for the image being sampled
112 * one for the buffer being written.
113 */
114 VkDescriptorSetLayoutCreateInfo ds_create_info = {
115 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
116 .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
117 .bindingCount = 2,
118 .pBindings = (VkDescriptorSetLayoutBinding[]) {
119 {
120 .binding = 0,
121 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
122 .descriptorCount = 1,
123 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
124 .pImmutableSamplers = NULL
125 },
126 {
127 .binding = 1,
128 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
129 .descriptorCount = 1,
130 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
131 .pImmutableSamplers = NULL
132 },
133 }
134 };
135
136 result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
137 &ds_create_info,
138 &device->meta_state.alloc,
139 &device->meta_state.resolve_compute.ds_layout);
140 if (result != VK_SUCCESS)
141 goto fail;
142
143
144 VkPipelineLayoutCreateInfo pl_create_info = {
145 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
146 .setLayoutCount = 1,
147 .pSetLayouts = &device->meta_state.resolve_compute.ds_layout,
148 .pushConstantRangeCount = 1,
149 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
150 };
151
152 result = radv_CreatePipelineLayout(radv_device_to_handle(device),
153 &pl_create_info,
154 &device->meta_state.alloc,
155 &device->meta_state.resolve_compute.p_layout);
156 if (result != VK_SUCCESS)
157 goto fail;
158 return VK_SUCCESS;
159 fail:
160 return result;
161 }
162
163 static VkResult
164 create_resolve_pipeline(struct radv_device *device,
165 int samples,
166 bool is_integer,
167 bool is_srgb,
168 VkPipeline *pipeline)
169 {
170 VkResult result;
171 struct radv_shader_module cs = { .nir = NULL };
172
173 cs.nir = build_resolve_compute_shader(device, is_integer, is_srgb, samples);
174
175 /* compute shader */
176
177 VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
178 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
179 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
180 .module = radv_shader_module_to_handle(&cs),
181 .pName = "main",
182 .pSpecializationInfo = NULL,
183 };
184
185 VkComputePipelineCreateInfo vk_pipeline_info = {
186 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
187 .stage = pipeline_shader_stage,
188 .flags = 0,
189 .layout = device->meta_state.resolve_compute.p_layout,
190 };
191
192 result = radv_CreateComputePipelines(radv_device_to_handle(device),
193 radv_pipeline_cache_to_handle(&device->meta_state.cache),
194 1, &vk_pipeline_info, NULL,
195 pipeline);
196 if (result != VK_SUCCESS)
197 goto fail;
198
199 ralloc_free(cs.nir);
200 return VK_SUCCESS;
201 fail:
202 ralloc_free(cs.nir);
203 return result;
204 }
205
206 VkResult
207 radv_device_init_meta_resolve_compute_state(struct radv_device *device)
208 {
209 struct radv_meta_state *state = &device->meta_state;
210 VkResult res;
211 memset(&device->meta_state.resolve_compute, 0, sizeof(device->meta_state.resolve_compute));
212
213 res = create_layout(device);
214 if (res != VK_SUCCESS)
215 return res;
216
217 for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) {
218 uint32_t samples = 1 << i;
219
220 res = create_resolve_pipeline(device, samples, false, false,
221 &state->resolve_compute.rc[i].pipeline);
222
223 res = create_resolve_pipeline(device, samples, true, false,
224 &state->resolve_compute.rc[i].i_pipeline);
225
226 res = create_resolve_pipeline(device, samples, false, true,
227 &state->resolve_compute.rc[i].srgb_pipeline);
228
229 }
230
231 return res;
232 }
233
234 void
235 radv_device_finish_meta_resolve_compute_state(struct radv_device *device)
236 {
237 struct radv_meta_state *state = &device->meta_state;
238 for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) {
239 radv_DestroyPipeline(radv_device_to_handle(device),
240 state->resolve_compute.rc[i].pipeline,
241 &state->alloc);
242
243 radv_DestroyPipeline(radv_device_to_handle(device),
244 state->resolve_compute.rc[i].i_pipeline,
245 &state->alloc);
246
247 radv_DestroyPipeline(radv_device_to_handle(device),
248 state->resolve_compute.rc[i].srgb_pipeline,
249 &state->alloc);
250 }
251
252 radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
253 state->resolve_compute.ds_layout,
254 &state->alloc);
255 radv_DestroyPipelineLayout(radv_device_to_handle(device),
256 state->resolve_compute.p_layout,
257 &state->alloc);
258 }
259
260 static void
261 emit_resolve(struct radv_cmd_buffer *cmd_buffer,
262 struct radv_image_view *src_iview,
263 struct radv_image_view *dest_iview,
264 const VkOffset2D *src_offset,
265 const VkOffset2D *dest_offset,
266 const VkExtent2D *resolve_extent)
267 {
268 struct radv_device *device = cmd_buffer->device;
269 const uint32_t samples = src_iview->image->info.samples;
270 const uint32_t samples_log2 = ffs(samples) - 1;
271 radv_meta_push_descriptor_set(cmd_buffer,
272 VK_PIPELINE_BIND_POINT_COMPUTE,
273 device->meta_state.resolve_compute.p_layout,
274 0, /* set */
275 2, /* descriptorWriteCount */
276 (VkWriteDescriptorSet[]) {
277 {
278 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
279 .dstBinding = 0,
280 .dstArrayElement = 0,
281 .descriptorCount = 1,
282 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
283 .pImageInfo = (VkDescriptorImageInfo[]) {
284 {
285 .sampler = VK_NULL_HANDLE,
286 .imageView = radv_image_view_to_handle(src_iview),
287 .imageLayout = VK_IMAGE_LAYOUT_GENERAL },
288 }
289 },
290 {
291 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
292 .dstBinding = 1,
293 .dstArrayElement = 0,
294 .descriptorCount = 1,
295 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
296 .pImageInfo = (VkDescriptorImageInfo[]) {
297 {
298 .sampler = VK_NULL_HANDLE,
299 .imageView = radv_image_view_to_handle(dest_iview),
300 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
301 },
302 }
303 }
304 });
305
306 VkPipeline pipeline;
307 if (vk_format_is_int(src_iview->image->vk_format))
308 pipeline = device->meta_state.resolve_compute.rc[samples_log2].i_pipeline;
309 else if (vk_format_is_srgb(src_iview->image->vk_format))
310 pipeline = device->meta_state.resolve_compute.rc[samples_log2].srgb_pipeline;
311 else
312 pipeline = device->meta_state.resolve_compute.rc[samples_log2].pipeline;
313 if (cmd_buffer->state.compute_pipeline != radv_pipeline_from_handle(pipeline)) {
314 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
315 VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
316 }
317
318 unsigned push_constants[4] = {
319 src_offset->x,
320 src_offset->y,
321 dest_offset->x,
322 dest_offset->y,
323 };
324 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
325 device->meta_state.resolve_compute.p_layout,
326 VK_SHADER_STAGE_COMPUTE_BIT, 0, 16,
327 push_constants);
328 radv_unaligned_dispatch(cmd_buffer, resolve_extent->width, resolve_extent->height, 1);
329
330 }
331
332 void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer,
333 struct radv_image *src_image,
334 VkImageLayout src_image_layout,
335 struct radv_image *dest_image,
336 VkImageLayout dest_image_layout,
337 uint32_t region_count,
338 const VkImageResolve *regions)
339 {
340 struct radv_meta_saved_compute_state saved_state;
341
342 for (uint32_t r = 0; r < region_count; ++r) {
343 const VkImageResolve *region = &regions[r];
344 const uint32_t src_base_layer =
345 radv_meta_get_iview_layer(src_image, &region->srcSubresource,
346 &region->srcOffset);
347 VkImageSubresourceRange range;
348 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
349 range.baseMipLevel = region->srcSubresource.mipLevel;
350 range.levelCount = 1;
351 range.baseArrayLayer = src_base_layer;
352 range.layerCount = region->srcSubresource.layerCount;
353 radv_fast_clear_flush_image_inplace(cmd_buffer, src_image, &range);
354 }
355
356 radv_meta_save_compute(&saved_state, cmd_buffer, 16);
357
358 for (uint32_t r = 0; r < region_count; ++r) {
359 const VkImageResolve *region = &regions[r];
360
361 assert(region->srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
362 assert(region->dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
363 assert(region->srcSubresource.layerCount == region->dstSubresource.layerCount);
364
365 const uint32_t src_base_layer =
366 radv_meta_get_iview_layer(src_image, &region->srcSubresource,
367 &region->srcOffset);
368
369 const uint32_t dest_base_layer =
370 radv_meta_get_iview_layer(dest_image, &region->dstSubresource,
371 &region->dstOffset);
372
373 const struct VkExtent3D extent =
374 radv_sanitize_image_extent(src_image->type, region->extent);
375 const struct VkOffset3D srcOffset =
376 radv_sanitize_image_offset(src_image->type, region->srcOffset);
377 const struct VkOffset3D dstOffset =
378 radv_sanitize_image_offset(dest_image->type, region->dstOffset);
379
380 for (uint32_t layer = 0; layer < region->srcSubresource.layerCount;
381 ++layer) {
382
383 struct radv_image_view src_iview;
384 radv_image_view_init(&src_iview, cmd_buffer->device,
385 &(VkImageViewCreateInfo) {
386 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
387 .image = radv_image_to_handle(src_image),
388 .viewType = radv_meta_get_view_type(src_image),
389 .format = src_image->vk_format,
390 .subresourceRange = {
391 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
392 .baseMipLevel = region->srcSubresource.mipLevel,
393 .levelCount = 1,
394 .baseArrayLayer = src_base_layer + layer,
395 .layerCount = 1,
396 },
397 });
398
399 struct radv_image_view dest_iview;
400 radv_image_view_init(&dest_iview, cmd_buffer->device,
401 &(VkImageViewCreateInfo) {
402 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
403 .image = radv_image_to_handle(dest_image),
404 .viewType = radv_meta_get_view_type(dest_image),
405 .format = vk_to_non_srgb_format(dest_image->vk_format),
406 .subresourceRange = {
407 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
408 .baseMipLevel = region->dstSubresource.mipLevel,
409 .levelCount = 1,
410 .baseArrayLayer = dest_base_layer + layer,
411 .layerCount = 1,
412 },
413 });
414
415 emit_resolve(cmd_buffer,
416 &src_iview,
417 &dest_iview,
418 &(VkOffset2D) {srcOffset.x, srcOffset.y },
419 &(VkOffset2D) {dstOffset.x, dstOffset.y },
420 &(VkExtent2D) {extent.width, extent.height });
421 }
422 }
423 radv_meta_restore_compute(&saved_state, cmd_buffer, 16);
424 }
425
426 /**
427 * Emit any needed resolves for the current subpass.
428 */
429 void
430 radv_cmd_buffer_resolve_subpass_cs(struct radv_cmd_buffer *cmd_buffer)
431 {
432 struct radv_framebuffer *fb = cmd_buffer->state.framebuffer;
433 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
434 struct radv_meta_saved_compute_state saved_state;
435 /* FINISHME(perf): Skip clears for resolve attachments.
436 *
437 * From the Vulkan 1.0 spec:
438 *
439 * If the first use of an attachment in a render pass is as a resolve
440 * attachment, then the loadOp is effectively ignored as the resolve is
441 * guaranteed to overwrite all pixels in the render area.
442 */
443
444 if (!subpass->has_resolve)
445 return;
446
447 for (uint32_t i = 0; i < subpass->color_count; ++i) {
448 VkAttachmentReference src_att = subpass->color_attachments[i];
449 VkAttachmentReference dest_att = subpass->resolve_attachments[i];
450
451 if (src_att.attachment == VK_ATTACHMENT_UNUSED ||
452 dest_att.attachment == VK_ATTACHMENT_UNUSED)
453 continue;
454
455 struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image;
456 struct radv_image_view *src_iview = cmd_buffer->state.framebuffer->attachments[src_att.attachment].attachment;
457
458 if (dst_img->surface.dcc_size) {
459 radv_initialize_dcc(cmd_buffer, dst_img, 0xffffffff);
460 cmd_buffer->state.attachments[dest_att.attachment].current_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
461 }
462
463 VkImageSubresourceRange range;
464 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
465 range.baseMipLevel = 0;
466 range.levelCount = 1;
467 range.baseArrayLayer = 0;
468 range.layerCount = 1;
469 radv_fast_clear_flush_image_inplace(cmd_buffer, src_iview->image, &range);
470 }
471
472 radv_meta_save_compute(&saved_state, cmd_buffer, 16);
473
474 for (uint32_t i = 0; i < subpass->color_count; ++i) {
475 VkAttachmentReference src_att = subpass->color_attachments[i];
476 VkAttachmentReference dest_att = subpass->resolve_attachments[i];
477 struct radv_image_view *src_iview = cmd_buffer->state.framebuffer->attachments[src_att.attachment].attachment;
478 struct radv_image_view *dst_iview = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment;
479 if (dest_att.attachment == VK_ATTACHMENT_UNUSED)
480 continue;
481
482 struct radv_subpass resolve_subpass = {
483 .color_count = 1,
484 .color_attachments = (VkAttachmentReference[]) { dest_att },
485 .depth_stencil_attachment = { .attachment = VK_ATTACHMENT_UNUSED },
486 };
487
488 radv_cmd_buffer_set_subpass(cmd_buffer, &resolve_subpass, false);
489
490 /* Subpass resolves must respect the render area. We can ignore the
491 * render area here because vkCmdBeginRenderPass set the render area
492 * with 3DSTATE_DRAWING_RECTANGLE.
493 *
494 * XXX(chadv): Does the hardware really respect
495 * 3DSTATE_DRAWING_RECTANGLE when draing a 3DPRIM_RECTLIST?
496 */
497 emit_resolve(cmd_buffer,
498 src_iview,
499 dst_iview,
500 &(VkOffset2D) { 0, 0 },
501 &(VkOffset2D) { 0, 0 },
502 &(VkExtent2D) { fb->width, fb->height });
503 }
504
505 radv_meta_restore_compute(&saved_state, cmd_buffer, 16);
506
507 for (uint32_t i = 0; i < subpass->color_count; ++i) {
508 VkAttachmentReference dest_att = subpass->resolve_attachments[i];
509 struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image;
510 if (dest_att.attachment == VK_ATTACHMENT_UNUSED)
511 continue;
512 VkImageSubresourceRange range;
513 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
514 range.baseMipLevel = 0;
515 range.levelCount = 1;
516 range.baseArrayLayer = 0;
517 range.layerCount = 1;
518 radv_fast_clear_flush_image_inplace(cmd_buffer, dst_img, &range);
519 }
520 }