anv/blorp: Turn off AUX after doing a CCS_D resolve
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static void
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 /* The cache already has a reference and it's not going anywhere so there
76 * is no need to hold a second reference.
77 */
78 anv_shader_bin_unref(device, bin);
79
80 *kernel_out = bin->kernel.offset;
81 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
82 }
83
84 void
85 anv_device_init_blorp(struct anv_device *device)
86 {
87 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
88 blorp_init(&device->blorp, device, &device->isl_dev);
89 device->blorp.compiler = device->instance->physicalDevice.compiler;
90 device->blorp.mocs.tex = device->default_mocs;
91 device->blorp.mocs.rb = device->default_mocs;
92 device->blorp.mocs.vb = device->default_mocs;
93 device->blorp.lookup_shader = lookup_blorp_shader;
94 device->blorp.upload_shader = upload_blorp_shader;
95 switch (device->info.gen) {
96 case 7:
97 if (device->info.is_haswell) {
98 device->blorp.exec = gen75_blorp_exec;
99 } else {
100 device->blorp.exec = gen7_blorp_exec;
101 }
102 break;
103 case 8:
104 device->blorp.exec = gen8_blorp_exec;
105 break;
106 case 9:
107 device->blorp.exec = gen9_blorp_exec;
108 break;
109 default:
110 unreachable("Unknown hardware generation");
111 }
112 }
113
114 void
115 anv_device_finish_blorp(struct anv_device *device)
116 {
117 blorp_finish(&device->blorp);
118 anv_pipeline_cache_finish(&device->blorp_shader_cache);
119 }
120
121 static void
122 get_blorp_surf_for_anv_buffer(struct anv_device *device,
123 struct anv_buffer *buffer, uint64_t offset,
124 uint32_t width, uint32_t height,
125 uint32_t row_pitch, enum isl_format format,
126 struct blorp_surf *blorp_surf,
127 struct isl_surf *isl_surf)
128 {
129 const struct isl_format_layout *fmtl =
130 isl_format_get_layout(format);
131
132 /* ASTC is the only format which doesn't support linear layouts.
133 * Create an equivalently sized surface with ISL to get around this.
134 */
135 if (fmtl->txc == ISL_TXC_ASTC) {
136 /* Use an equivalently sized format */
137 format = ISL_FORMAT_R32G32B32A32_UINT;
138 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
139
140 /* Shrink the dimensions for the new format */
141 width = DIV_ROUND_UP(width, fmtl->bw);
142 height = DIV_ROUND_UP(height, fmtl->bh);
143 }
144
145 *blorp_surf = (struct blorp_surf) {
146 .surf = isl_surf,
147 .addr = {
148 .buffer = buffer->bo,
149 .offset = buffer->offset + offset,
150 },
151 };
152
153 isl_surf_init(&device->isl_dev, isl_surf,
154 .dim = ISL_SURF_DIM_2D,
155 .format = format,
156 .width = width,
157 .height = height,
158 .depth = 1,
159 .levels = 1,
160 .array_len = 1,
161 .samples = 1,
162 .min_pitch = row_pitch,
163 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
164 ISL_SURF_USAGE_RENDER_TARGET_BIT,
165 .tiling_flags = ISL_TILING_LINEAR_BIT);
166 assert(isl_surf->row_pitch == row_pitch);
167 }
168
169 static void
170 get_blorp_surf_for_anv_image(const struct anv_image *image,
171 VkImageAspectFlags aspect,
172 enum isl_aux_usage aux_usage,
173 struct blorp_surf *blorp_surf)
174 {
175 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
176 aux_usage == ISL_AUX_USAGE_HIZ)
177 aux_usage = ISL_AUX_USAGE_NONE;
178
179 const struct anv_surface *surface =
180 anv_image_get_surface_for_aspect_mask(image, aspect);
181
182 *blorp_surf = (struct blorp_surf) {
183 .surf = &surface->isl,
184 .addr = {
185 .buffer = image->bo,
186 .offset = image->offset + surface->offset,
187 },
188 };
189
190 if (aux_usage != ISL_AUX_USAGE_NONE) {
191 blorp_surf->aux_surf = &image->aux_surface.isl,
192 blorp_surf->aux_addr = (struct blorp_address) {
193 .buffer = image->bo,
194 .offset = image->offset + image->aux_surface.offset,
195 };
196 blorp_surf->aux_usage = aux_usage;
197 }
198 }
199
200 void anv_CmdCopyImage(
201 VkCommandBuffer commandBuffer,
202 VkImage srcImage,
203 VkImageLayout srcImageLayout,
204 VkImage dstImage,
205 VkImageLayout dstImageLayout,
206 uint32_t regionCount,
207 const VkImageCopy* pRegions)
208 {
209 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
210 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
211 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
212
213 struct blorp_batch batch;
214 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
215
216 for (unsigned r = 0; r < regionCount; r++) {
217 VkOffset3D srcOffset =
218 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
219 VkOffset3D dstOffset =
220 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
221 VkExtent3D extent =
222 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
223
224 unsigned dst_base_layer, layer_count;
225 if (dst_image->type == VK_IMAGE_TYPE_3D) {
226 dst_base_layer = pRegions[r].dstOffset.z;
227 layer_count = pRegions[r].extent.depth;
228 } else {
229 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
230 layer_count = pRegions[r].dstSubresource.layerCount;
231 }
232
233 unsigned src_base_layer;
234 if (src_image->type == VK_IMAGE_TYPE_3D) {
235 src_base_layer = pRegions[r].srcOffset.z;
236 } else {
237 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
238 assert(pRegions[r].srcSubresource.layerCount == layer_count);
239 }
240
241 assert(pRegions[r].srcSubresource.aspectMask ==
242 pRegions[r].dstSubresource.aspectMask);
243
244 uint32_t a;
245 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
246 VkImageAspectFlagBits aspect = (1 << a);
247
248 struct blorp_surf src_surf, dst_surf;
249 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
250 &src_surf);
251 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
252 &dst_surf);
253
254 for (unsigned i = 0; i < layer_count; i++) {
255 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
256 src_base_layer + i,
257 &dst_surf, pRegions[r].dstSubresource.mipLevel,
258 dst_base_layer + i,
259 srcOffset.x, srcOffset.y,
260 dstOffset.x, dstOffset.y,
261 extent.width, extent.height);
262 }
263 }
264 }
265
266 blorp_batch_finish(&batch);
267 }
268
269 static void
270 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
271 struct anv_buffer *anv_buffer,
272 struct anv_image *anv_image,
273 uint32_t regionCount,
274 const VkBufferImageCopy* pRegions,
275 bool buffer_to_image)
276 {
277 struct blorp_batch batch;
278 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
279
280 struct {
281 struct blorp_surf surf;
282 uint32_t level;
283 VkOffset3D offset;
284 } image, buffer, *src, *dst;
285
286 buffer.level = 0;
287 buffer.offset = (VkOffset3D) { 0, 0, 0 };
288
289 if (buffer_to_image) {
290 src = &buffer;
291 dst = &image;
292 } else {
293 src = &image;
294 dst = &buffer;
295 }
296
297 for (unsigned r = 0; r < regionCount; r++) {
298 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
299
300 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
301 &image.surf);
302 image.offset =
303 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
304 image.level = pRegions[r].imageSubresource.mipLevel;
305
306 VkExtent3D extent =
307 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
308 if (anv_image->type != VK_IMAGE_TYPE_3D) {
309 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
310 extent.depth = pRegions[r].imageSubresource.layerCount;
311 }
312
313 const enum isl_format buffer_format =
314 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
315 aspect, VK_IMAGE_TILING_LINEAR);
316
317 const VkExtent3D bufferImageExtent = {
318 .width = pRegions[r].bufferRowLength ?
319 pRegions[r].bufferRowLength : extent.width,
320 .height = pRegions[r].bufferImageHeight ?
321 pRegions[r].bufferImageHeight : extent.height,
322 };
323
324 const struct isl_format_layout *buffer_fmtl =
325 isl_format_get_layout(buffer_format);
326
327 const uint32_t buffer_row_pitch =
328 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
329 (buffer_fmtl->bpb / 8);
330
331 const uint32_t buffer_layer_stride =
332 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
333 buffer_row_pitch;
334
335 struct isl_surf buffer_isl_surf;
336 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
337 anv_buffer, pRegions[r].bufferOffset,
338 extent.width, extent.height,
339 buffer_row_pitch, buffer_format,
340 &buffer.surf, &buffer_isl_surf);
341
342 for (unsigned z = 0; z < extent.depth; z++) {
343 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
344 &dst->surf, dst->level, dst->offset.z,
345 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
346 extent.width, extent.height);
347
348 image.offset.z++;
349 buffer.surf.addr.offset += buffer_layer_stride;
350 }
351 }
352
353 blorp_batch_finish(&batch);
354 }
355
356 void anv_CmdCopyBufferToImage(
357 VkCommandBuffer commandBuffer,
358 VkBuffer srcBuffer,
359 VkImage dstImage,
360 VkImageLayout dstImageLayout,
361 uint32_t regionCount,
362 const VkBufferImageCopy* pRegions)
363 {
364 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
365 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
366 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
367
368 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
369 regionCount, pRegions, true);
370 }
371
372 void anv_CmdCopyImageToBuffer(
373 VkCommandBuffer commandBuffer,
374 VkImage srcImage,
375 VkImageLayout srcImageLayout,
376 VkBuffer dstBuffer,
377 uint32_t regionCount,
378 const VkBufferImageCopy* pRegions)
379 {
380 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
381 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
382 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
383
384 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
385 regionCount, pRegions, false);
386 }
387
388 static bool
389 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
390 {
391 bool flip = false;
392 if (*src0 > *src1) {
393 unsigned tmp = *src0;
394 *src0 = *src1;
395 *src1 = tmp;
396 flip = !flip;
397 }
398
399 if (*dst0 > *dst1) {
400 unsigned tmp = *dst0;
401 *dst0 = *dst1;
402 *dst1 = tmp;
403 flip = !flip;
404 }
405
406 return flip;
407 }
408
409 void anv_CmdBlitImage(
410 VkCommandBuffer commandBuffer,
411 VkImage srcImage,
412 VkImageLayout srcImageLayout,
413 VkImage dstImage,
414 VkImageLayout dstImageLayout,
415 uint32_t regionCount,
416 const VkImageBlit* pRegions,
417 VkFilter filter)
418
419 {
420 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
421 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
422 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
423
424 struct blorp_surf src, dst;
425
426 uint32_t gl_filter;
427 switch (filter) {
428 case VK_FILTER_NEAREST:
429 gl_filter = 0x2600; /* GL_NEAREST */
430 break;
431 case VK_FILTER_LINEAR:
432 gl_filter = 0x2601; /* GL_LINEAR */
433 break;
434 default:
435 unreachable("Invalid filter");
436 }
437
438 struct blorp_batch batch;
439 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
440
441 for (unsigned r = 0; r < regionCount; r++) {
442 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
443 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
444
445 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
446 src_image->aux_usage, &src);
447 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
448 dst_image->aux_usage, &dst);
449
450 struct anv_format src_format =
451 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
452 src_res->aspectMask, src_image->tiling);
453 struct anv_format dst_format =
454 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
455 dst_res->aspectMask, dst_image->tiling);
456
457 unsigned dst_start, dst_end;
458 if (dst_image->type == VK_IMAGE_TYPE_3D) {
459 assert(dst_res->baseArrayLayer == 0);
460 dst_start = pRegions[r].dstOffsets[0].z;
461 dst_end = pRegions[r].dstOffsets[1].z;
462 } else {
463 dst_start = dst_res->baseArrayLayer;
464 dst_end = dst_start + dst_res->layerCount;
465 }
466
467 unsigned src_start, src_end;
468 if (src_image->type == VK_IMAGE_TYPE_3D) {
469 assert(src_res->baseArrayLayer == 0);
470 src_start = pRegions[r].srcOffsets[0].z;
471 src_end = pRegions[r].srcOffsets[1].z;
472 } else {
473 src_start = src_res->baseArrayLayer;
474 src_end = src_start + src_res->layerCount;
475 }
476
477 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
478 float src_z_step = (float)(src_end + 1 - src_start) /
479 (float)(dst_end + 1 - dst_start);
480
481 if (flip_z) {
482 src_start = src_end;
483 src_z_step *= -1;
484 }
485
486 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
487 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
488 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
489 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
490 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
491
492 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
493 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
494 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
495 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
496 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
497
498 const unsigned num_layers = dst_end - dst_start;
499 for (unsigned i = 0; i < num_layers; i++) {
500 unsigned dst_z = dst_start + i;
501 unsigned src_z = src_start + i * src_z_step;
502
503 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
504 src_format.isl_format, src_format.swizzle,
505 &dst, dst_res->mipLevel, dst_z,
506 dst_format.isl_format,
507 anv_swizzle_for_render(dst_format.swizzle),
508 src_x0, src_y0, src_x1, src_y1,
509 dst_x0, dst_y0, dst_x1, dst_y1,
510 gl_filter, flip_x, flip_y);
511 }
512
513 }
514
515 blorp_batch_finish(&batch);
516 }
517
518 static enum isl_format
519 isl_format_for_size(unsigned size_B)
520 {
521 switch (size_B) {
522 case 1: return ISL_FORMAT_R8_UINT;
523 case 2: return ISL_FORMAT_R8G8_UINT;
524 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
525 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
526 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
527 default:
528 unreachable("Not a power-of-two format size");
529 }
530 }
531
532 static void
533 do_buffer_copy(struct blorp_batch *batch,
534 struct anv_bo *src, uint64_t src_offset,
535 struct anv_bo *dst, uint64_t dst_offset,
536 int width, int height, int block_size)
537 {
538 struct anv_device *device = batch->blorp->driver_ctx;
539
540 /* The actual format we pick doesn't matter as blorp will throw it away.
541 * The only thing that actually matters is the size.
542 */
543 enum isl_format format = isl_format_for_size(block_size);
544
545 struct isl_surf surf;
546 isl_surf_init(&device->isl_dev, &surf,
547 .dim = ISL_SURF_DIM_2D,
548 .format = format,
549 .width = width,
550 .height = height,
551 .depth = 1,
552 .levels = 1,
553 .array_len = 1,
554 .samples = 1,
555 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
556 ISL_SURF_USAGE_RENDER_TARGET_BIT,
557 .tiling_flags = ISL_TILING_LINEAR_BIT);
558 assert(surf.row_pitch == width * block_size);
559
560 struct blorp_surf src_blorp_surf = {
561 .surf = &surf,
562 .addr = {
563 .buffer = src,
564 .offset = src_offset,
565 },
566 };
567
568 struct blorp_surf dst_blorp_surf = {
569 .surf = &surf,
570 .addr = {
571 .buffer = dst,
572 .offset = dst_offset,
573 },
574 };
575
576 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
577 0, 0, 0, 0, width, height);
578 }
579
580 /**
581 * Returns the greatest common divisor of a and b that is a power of two.
582 */
583 static inline uint64_t
584 gcd_pow2_u64(uint64_t a, uint64_t b)
585 {
586 assert(a > 0 || b > 0);
587
588 unsigned a_log2 = ffsll(a) - 1;
589 unsigned b_log2 = ffsll(b) - 1;
590
591 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
592 * case, the MIN2() will take the other one. If both are 0 then we will
593 * hit the assert above.
594 */
595 return 1 << MIN2(a_log2, b_log2);
596 }
597
598 /* This is maximum possible width/height our HW can handle */
599 #define MAX_SURFACE_DIM (1ull << 14)
600
601 void anv_CmdCopyBuffer(
602 VkCommandBuffer commandBuffer,
603 VkBuffer srcBuffer,
604 VkBuffer dstBuffer,
605 uint32_t regionCount,
606 const VkBufferCopy* pRegions)
607 {
608 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
609 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
610 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
611
612 struct blorp_batch batch;
613 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
614
615 for (unsigned r = 0; r < regionCount; r++) {
616 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
617 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
618 uint64_t copy_size = pRegions[r].size;
619
620 /* First, we compute the biggest format that can be used with the
621 * given offsets and size.
622 */
623 int bs = 16;
624 bs = gcd_pow2_u64(bs, src_offset);
625 bs = gcd_pow2_u64(bs, dst_offset);
626 bs = gcd_pow2_u64(bs, pRegions[r].size);
627
628 /* First, we make a bunch of max-sized copies */
629 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
630 while (copy_size >= max_copy_size) {
631 do_buffer_copy(&batch, src_buffer->bo, src_offset,
632 dst_buffer->bo, dst_offset,
633 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
634 copy_size -= max_copy_size;
635 src_offset += max_copy_size;
636 dst_offset += max_copy_size;
637 }
638
639 /* Now make a max-width copy */
640 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
641 assert(height < MAX_SURFACE_DIM);
642 if (height != 0) {
643 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
644 do_buffer_copy(&batch, src_buffer->bo, src_offset,
645 dst_buffer->bo, dst_offset,
646 MAX_SURFACE_DIM, height, bs);
647 copy_size -= rect_copy_size;
648 src_offset += rect_copy_size;
649 dst_offset += rect_copy_size;
650 }
651
652 /* Finally, make a small copy to finish it off */
653 if (copy_size != 0) {
654 do_buffer_copy(&batch, src_buffer->bo, src_offset,
655 dst_buffer->bo, dst_offset,
656 copy_size / bs, 1, bs);
657 }
658 }
659
660 blorp_batch_finish(&batch);
661 }
662
663 void anv_CmdUpdateBuffer(
664 VkCommandBuffer commandBuffer,
665 VkBuffer dstBuffer,
666 VkDeviceSize dstOffset,
667 VkDeviceSize dataSize,
668 const void* pData)
669 {
670 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
671 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
672
673 struct blorp_batch batch;
674 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
675
676 /* We can't quite grab a full block because the state stream needs a
677 * little data at the top to build its linked list.
678 */
679 const uint32_t max_update_size =
680 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
681
682 assert(max_update_size < MAX_SURFACE_DIM * 4);
683
684 while (dataSize) {
685 const uint32_t copy_size = MIN2(dataSize, max_update_size);
686
687 struct anv_state tmp_data =
688 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
689
690 memcpy(tmp_data.map, pData, copy_size);
691
692 int bs = 16;
693 bs = gcd_pow2_u64(bs, dstOffset);
694 bs = gcd_pow2_u64(bs, copy_size);
695
696 do_buffer_copy(&batch,
697 &cmd_buffer->device->dynamic_state_block_pool.bo,
698 tmp_data.offset,
699 dst_buffer->bo, dst_buffer->offset + dstOffset,
700 copy_size / bs, 1, bs);
701
702 dataSize -= copy_size;
703 dstOffset += copy_size;
704 pData = (void *)pData + copy_size;
705 }
706
707 blorp_batch_finish(&batch);
708 }
709
710 void anv_CmdFillBuffer(
711 VkCommandBuffer commandBuffer,
712 VkBuffer dstBuffer,
713 VkDeviceSize dstOffset,
714 VkDeviceSize fillSize,
715 uint32_t data)
716 {
717 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
718 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
719 struct blorp_surf surf;
720 struct isl_surf isl_surf;
721
722 struct blorp_batch batch;
723 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
724
725 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
726
727 /* From the Vulkan spec:
728 *
729 * "size is the number of bytes to fill, and must be either a multiple
730 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
731 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
732 * buffer is not a multiple of 4, then the nearest smaller multiple is
733 * used."
734 */
735 fillSize &= ~3ull;
736
737 /* First, we compute the biggest format that can be used with the
738 * given offsets and size.
739 */
740 int bs = 16;
741 bs = gcd_pow2_u64(bs, dstOffset);
742 bs = gcd_pow2_u64(bs, fillSize);
743 enum isl_format isl_format = isl_format_for_size(bs);
744
745 union isl_color_value color = {
746 .u32 = { data, data, data, data },
747 };
748
749 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
750 while (fillSize >= max_fill_size) {
751 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
752 dst_buffer, dstOffset,
753 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
754 MAX_SURFACE_DIM * bs, isl_format,
755 &surf, &isl_surf);
756
757 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
758 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
759 color, NULL);
760 fillSize -= max_fill_size;
761 dstOffset += max_fill_size;
762 }
763
764 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
765 assert(height < MAX_SURFACE_DIM);
766 if (height != 0) {
767 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
768 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
769 dst_buffer, dstOffset,
770 MAX_SURFACE_DIM, height,
771 MAX_SURFACE_DIM * bs, isl_format,
772 &surf, &isl_surf);
773
774 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
775 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
776 color, NULL);
777 fillSize -= rect_fill_size;
778 dstOffset += rect_fill_size;
779 }
780
781 if (fillSize != 0) {
782 const uint32_t width = fillSize / bs;
783 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
784 dst_buffer, dstOffset,
785 width, 1,
786 width * bs, isl_format,
787 &surf, &isl_surf);
788
789 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
790 0, 0, 1, 0, 0, width, 1,
791 color, NULL);
792 }
793
794 blorp_batch_finish(&batch);
795 }
796
797 void anv_CmdClearColorImage(
798 VkCommandBuffer commandBuffer,
799 VkImage _image,
800 VkImageLayout imageLayout,
801 const VkClearColorValue* pColor,
802 uint32_t rangeCount,
803 const VkImageSubresourceRange* pRanges)
804 {
805 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
806 ANV_FROM_HANDLE(anv_image, image, _image);
807
808 static const bool color_write_disable[4] = { false, false, false, false };
809
810 struct blorp_batch batch;
811 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
812
813 struct blorp_surf surf;
814 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
815 image->aux_usage, &surf);
816
817 for (unsigned r = 0; r < rangeCount; r++) {
818 if (pRanges[r].aspectMask == 0)
819 continue;
820
821 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
822
823 struct anv_format src_format =
824 anv_get_format(&cmd_buffer->device->info, image->vk_format,
825 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
826
827 unsigned base_layer = pRanges[r].baseArrayLayer;
828 unsigned layer_count = pRanges[r].layerCount;
829
830 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
831 const unsigned level = pRanges[r].baseMipLevel + i;
832 const unsigned level_width = anv_minify(image->extent.width, level);
833 const unsigned level_height = anv_minify(image->extent.height, level);
834
835 if (image->type == VK_IMAGE_TYPE_3D) {
836 base_layer = 0;
837 layer_count = anv_minify(image->extent.depth, level);
838 }
839
840 blorp_clear(&batch, &surf,
841 src_format.isl_format, src_format.swizzle,
842 level, base_layer, layer_count,
843 0, 0, level_width, level_height,
844 vk_to_isl_color(*pColor), color_write_disable);
845 }
846 }
847
848 blorp_batch_finish(&batch);
849 }
850
851 void anv_CmdClearDepthStencilImage(
852 VkCommandBuffer commandBuffer,
853 VkImage image_h,
854 VkImageLayout imageLayout,
855 const VkClearDepthStencilValue* pDepthStencil,
856 uint32_t rangeCount,
857 const VkImageSubresourceRange* pRanges)
858 {
859 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
860 ANV_FROM_HANDLE(anv_image, image, image_h);
861
862 struct blorp_batch batch;
863 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
864
865 struct blorp_surf depth, stencil;
866 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
867 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
868 ISL_AUX_USAGE_NONE, &depth);
869 } else {
870 memset(&depth, 0, sizeof(depth));
871 }
872
873 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
874 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
875 ISL_AUX_USAGE_NONE, &stencil);
876 } else {
877 memset(&stencil, 0, sizeof(stencil));
878 }
879
880 for (unsigned r = 0; r < rangeCount; r++) {
881 if (pRanges[r].aspectMask == 0)
882 continue;
883
884 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
885 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
886
887 unsigned base_layer = pRanges[r].baseArrayLayer;
888 unsigned layer_count = pRanges[r].layerCount;
889
890 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
891 const unsigned level = pRanges[r].baseMipLevel + i;
892 const unsigned level_width = anv_minify(image->extent.width, level);
893 const unsigned level_height = anv_minify(image->extent.height, level);
894
895 if (image->type == VK_IMAGE_TYPE_3D)
896 layer_count = anv_minify(image->extent.depth, level);
897
898 blorp_clear_depth_stencil(&batch, &depth, &stencil,
899 level, base_layer, layer_count,
900 0, 0, level_width, level_height,
901 clear_depth, pDepthStencil->depth,
902 clear_stencil ? 0xff : 0,
903 pDepthStencil->stencil);
904 }
905 }
906
907 blorp_batch_finish(&batch);
908 }
909
910 struct anv_state
911 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
912 uint32_t num_entries,
913 uint32_t *state_offset)
914 {
915 struct anv_state bt_state =
916 anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
917 state_offset);
918 if (bt_state.map == NULL) {
919 /* We ran out of space. Grab a new binding table block. */
920 MAYBE_UNUSED VkResult result =
921 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
922 assert(result == VK_SUCCESS);
923
924 /* Re-emit state base addresses so we get the new surface state base
925 * address before we start emitting binding tables etc.
926 */
927 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
928
929 bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
930 state_offset);
931 assert(bt_state.map != NULL);
932 }
933
934 return bt_state;
935 }
936
937 static uint32_t
938 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
939 struct anv_state surface_state)
940 {
941 uint32_t state_offset;
942 struct anv_state bt_state =
943 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset);
944
945 uint32_t *bt_map = bt_state.map;
946 bt_map[0] = surface_state.offset + state_offset;
947
948 return bt_state.offset;
949 }
950
951 static void
952 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
953 struct blorp_batch *batch,
954 const VkClearAttachment *attachment,
955 uint32_t rectCount, const VkClearRect *pRects)
956 {
957 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
958 const uint32_t color_att = attachment->colorAttachment;
959 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
960
961 if (att_idx == VK_ATTACHMENT_UNUSED)
962 return;
963
964 struct anv_render_pass_attachment *pass_att =
965 &cmd_buffer->state.pass->attachments[att_idx];
966 struct anv_attachment_state *att_state =
967 &cmd_buffer->state.attachments[att_idx];
968
969 uint32_t binding_table =
970 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state);
971
972 union isl_color_value clear_color =
973 vk_to_isl_color(attachment->clearValue.color);
974
975 for (uint32_t r = 0; r < rectCount; ++r) {
976 const VkOffset2D offset = pRects[r].rect.offset;
977 const VkExtent2D extent = pRects[r].rect.extent;
978 blorp_clear_attachments(batch, binding_table,
979 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
980 pRects[r].baseArrayLayer,
981 pRects[r].layerCount,
982 offset.x, offset.y,
983 offset.x + extent.width, offset.y + extent.height,
984 true, clear_color, false, 0.0f, 0, 0);
985 }
986 }
987
988 static void
989 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
990 struct blorp_batch *batch,
991 const VkClearAttachment *attachment,
992 uint32_t rectCount, const VkClearRect *pRects)
993 {
994 static const union isl_color_value color_value = { .u32 = { 0, } };
995 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
996 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
997
998 if (att_idx == VK_ATTACHMENT_UNUSED)
999 return;
1000
1001 struct anv_render_pass_attachment *pass_att =
1002 &cmd_buffer->state.pass->attachments[att_idx];
1003
1004 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1005 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1006
1007 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1008 if (clear_depth) {
1009 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1010 pass_att->format,
1011 VK_IMAGE_ASPECT_DEPTH_BIT,
1012 VK_IMAGE_TILING_OPTIMAL);
1013 }
1014
1015 uint32_t binding_table =
1016 binding_table_for_surface_state(cmd_buffer,
1017 cmd_buffer->state.null_surface_state);
1018
1019 for (uint32_t r = 0; r < rectCount; ++r) {
1020 const VkOffset2D offset = pRects[r].rect.offset;
1021 const VkExtent2D extent = pRects[r].rect.extent;
1022 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1023 blorp_clear_attachments(batch, binding_table,
1024 depth_format, pass_att->samples,
1025 pRects[r].baseArrayLayer,
1026 pRects[r].layerCount,
1027 offset.x, offset.y,
1028 offset.x + extent.width, offset.y + extent.height,
1029 false, color_value,
1030 clear_depth, value.depth,
1031 clear_stencil ? 0xff : 0, value.stencil);
1032 }
1033 }
1034
1035 void anv_CmdClearAttachments(
1036 VkCommandBuffer commandBuffer,
1037 uint32_t attachmentCount,
1038 const VkClearAttachment* pAttachments,
1039 uint32_t rectCount,
1040 const VkClearRect* pRects)
1041 {
1042 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1043
1044 /* Because this gets called within a render pass, we tell blorp not to
1045 * trash our depth and stencil buffers.
1046 */
1047 struct blorp_batch batch;
1048 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1049 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1050
1051 for (uint32_t a = 0; a < attachmentCount; ++a) {
1052 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1053 clear_color_attachment(cmd_buffer, &batch,
1054 &pAttachments[a],
1055 rectCount, pRects);
1056 } else {
1057 clear_depth_stencil_attachment(cmd_buffer, &batch,
1058 &pAttachments[a],
1059 rectCount, pRects);
1060 }
1061 }
1062
1063 blorp_batch_finish(&batch);
1064 }
1065
1066 enum subpass_stage {
1067 SUBPASS_STAGE_LOAD,
1068 SUBPASS_STAGE_DRAW,
1069 SUBPASS_STAGE_RESOLVE,
1070 };
1071
1072 static bool
1073 attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
1074 struct anv_render_pass_attachment *att,
1075 enum subpass_stage stage)
1076 {
1077 struct anv_render_pass *pass = cmd_buffer->state.pass;
1078 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1079
1080 /* We handle this subpass specially based on the current stage */
1081 enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
1082 switch (stage) {
1083 case SUBPASS_STAGE_LOAD:
1084 if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
1085 return true;
1086 break;
1087
1088 case SUBPASS_STAGE_DRAW:
1089 if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
1090 return true;
1091 break;
1092
1093 default:
1094 break;
1095 }
1096
1097 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1098 usage = att->subpass_usage[s];
1099
1100 /* If this attachment is going to be used as an input in this or any
1101 * future subpass, then we need to flush its cache and invalidate the
1102 * texture cache.
1103 */
1104 if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1105 return true;
1106
1107 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1108 /* We found another subpass that draws to this attachment. We'll
1109 * wait to resolve until then.
1110 */
1111 return false;
1112 }
1113 }
1114
1115 return false;
1116 }
1117
1118 static void
1119 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
1120 enum subpass_stage stage)
1121 {
1122 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1123 struct anv_render_pass *pass = cmd_buffer->state.pass;
1124
1125 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1126 uint32_t att = subpass->color_attachments[i].attachment;
1127 assert(att < pass->attachment_count);
1128 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1129 cmd_buffer->state.pending_pipe_bits |=
1130 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1131 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1132 }
1133 }
1134
1135 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1136 uint32_t att = subpass->depth_stencil_attachment.attachment;
1137 assert(att < pass->attachment_count);
1138 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1139 cmd_buffer->state.pending_pipe_bits |=
1140 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1141 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1142 }
1143 }
1144 }
1145
1146 static bool
1147 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1148 {
1149 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1150 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1151
1152 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1153 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1154 if (cmd_state->attachments[a].pending_clear_aspects) {
1155 return true;
1156 }
1157 }
1158
1159 if (ds != VK_ATTACHMENT_UNUSED &&
1160 cmd_state->attachments[ds].pending_clear_aspects) {
1161 return true;
1162 }
1163
1164 return false;
1165 }
1166
1167 void
1168 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1169 {
1170 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1171 const VkRect2D render_area = cmd_buffer->state.render_area;
1172
1173
1174 if (!subpass_needs_clear(cmd_buffer))
1175 return;
1176
1177 /* Because this gets called within a render pass, we tell blorp not to
1178 * trash our depth and stencil buffers.
1179 */
1180 struct blorp_batch batch;
1181 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1182 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1183
1184 VkClearRect clear_rect = {
1185 .rect = cmd_buffer->state.render_area,
1186 .baseArrayLayer = 0,
1187 .layerCount = cmd_buffer->state.framebuffer->layers,
1188 };
1189
1190 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1191 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1192 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1193 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1194
1195 if (!att_state->pending_clear_aspects)
1196 continue;
1197
1198 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1199
1200 struct anv_image_view *iview = fb->attachments[a];
1201 const struct anv_image *image = iview->image;
1202 struct blorp_surf surf;
1203 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1204 att_state->aux_usage, &surf);
1205
1206 if (att_state->fast_clear) {
1207 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1208
1209 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1210 *
1211 * "After Render target fast clear, pipe-control with color cache
1212 * write-flush must be issued before sending any DRAW commands on
1213 * that render target."
1214 *
1215 * This comment is a bit cryptic and doesn't really tell you what's
1216 * going or what's really needed. It appears that fast clear ops are
1217 * not properly synchronized with other drawing. This means that we
1218 * cannot have a fast clear operation in the pipe at the same time as
1219 * other regular drawing operations. We need to use a PIPE_CONTROL
1220 * to ensure that the contents of the previous draw hit the render
1221 * target before we resolve and then use a second PIPE_CONTROL after
1222 * the resolve to ensure that it is completed before any additional
1223 * drawing occurs.
1224 */
1225 cmd_buffer->state.pending_pipe_bits |=
1226 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1227
1228 blorp_fast_clear(&batch, &surf, iview->isl.format,
1229 iview->isl.base_level,
1230 iview->isl.base_array_layer, fb->layers,
1231 render_area.offset.x, render_area.offset.y,
1232 render_area.offset.x + render_area.extent.width,
1233 render_area.offset.y + render_area.extent.height);
1234
1235 cmd_buffer->state.pending_pipe_bits |=
1236 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1237 } else {
1238 blorp_clear(&batch, &surf, iview->isl.format,
1239 anv_swizzle_for_render(iview->isl.swizzle),
1240 iview->isl.base_level,
1241 iview->isl.base_array_layer, fb->layers,
1242 render_area.offset.x, render_area.offset.y,
1243 render_area.offset.x + render_area.extent.width,
1244 render_area.offset.y + render_area.extent.height,
1245 vk_to_isl_color(att_state->clear_value.color), NULL);
1246 }
1247
1248 att_state->pending_clear_aspects = 0;
1249 }
1250
1251 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1252
1253 if (ds != VK_ATTACHMENT_UNUSED &&
1254 cmd_state->attachments[ds].pending_clear_aspects) {
1255
1256 VkClearAttachment clear_att = {
1257 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1258 .clearValue = cmd_state->attachments[ds].clear_value,
1259 };
1260
1261
1262 const uint8_t gen = cmd_buffer->device->info.gen;
1263 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1264 ISL_AUX_USAGE_HIZ;
1265 const struct anv_image_view *iview = fb->attachments[ds];
1266
1267 if (clear_with_hiz) {
1268 const bool clear_depth = clear_att.aspectMask &
1269 VK_IMAGE_ASPECT_DEPTH_BIT;
1270 const bool clear_stencil = clear_att.aspectMask &
1271 VK_IMAGE_ASPECT_STENCIL_BIT;
1272
1273 /* Check against restrictions for depth buffer clearing. A great GPU
1274 * performance benefit isn't expected when using the HZ sequence for
1275 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1276 * a stencil clear in addition to using the BLORP-fallback for depth.
1277 */
1278 if (clear_depth) {
1279 if (!blorp_can_hiz_clear_depth(gen, iview->isl.format,
1280 iview->image->samples,
1281 render_area.offset.x,
1282 render_area.offset.y,
1283 render_area.offset.x +
1284 render_area.extent.width,
1285 render_area.offset.y +
1286 render_area.extent.height)) {
1287 clear_with_hiz = false;
1288 } else if (clear_att.clearValue.depthStencil.depth !=
1289 ANV_HZ_FC_VAL) {
1290 /* Don't enable fast depth clears for any color not equal to
1291 * ANV_HZ_FC_VAL.
1292 */
1293 clear_with_hiz = false;
1294 } else if (gen == 8 &&
1295 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1296 iview->aspect_mask,
1297 iview->image->samples)) {
1298 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1299 * fast-cleared portion of a HiZ buffer. Testing has revealed
1300 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1301 * not support this feature at all.
1302 */
1303 clear_with_hiz = false;
1304 }
1305 }
1306
1307 if (clear_with_hiz) {
1308 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1309 render_area.offset.x,
1310 render_area.offset.y,
1311 render_area.offset.x +
1312 render_area.extent.width,
1313 render_area.offset.y +
1314 render_area.extent.height,
1315 clear_depth, clear_stencil,
1316 clear_att.clearValue.
1317 depthStencil.stencil);
1318 }
1319 }
1320
1321 if (!clear_with_hiz) {
1322 clear_depth_stencil_attachment(cmd_buffer, &batch,
1323 &clear_att, 1, &clear_rect);
1324 }
1325
1326 cmd_state->attachments[ds].pending_clear_aspects = 0;
1327 }
1328
1329 blorp_batch_finish(&batch);
1330
1331 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
1332 }
1333
1334 static void
1335 resolve_image(struct blorp_batch *batch,
1336 const struct anv_image *src_image,
1337 uint32_t src_level, uint32_t src_layer,
1338 const struct anv_image *dst_image,
1339 uint32_t dst_level, uint32_t dst_layer,
1340 VkImageAspectFlags aspect_mask,
1341 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1342 uint32_t width, uint32_t height)
1343 {
1344 assert(src_image->type == VK_IMAGE_TYPE_2D);
1345 assert(src_image->samples > 1);
1346 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1347 assert(dst_image->samples == 1);
1348
1349 uint32_t a;
1350 for_each_bit(a, aspect_mask) {
1351 VkImageAspectFlagBits aspect = 1 << a;
1352
1353 struct blorp_surf src_surf, dst_surf;
1354 get_blorp_surf_for_anv_image(src_image, aspect,
1355 src_image->aux_usage, &src_surf);
1356 get_blorp_surf_for_anv_image(dst_image, aspect,
1357 dst_image->aux_usage, &dst_surf);
1358
1359 blorp_blit(batch,
1360 &src_surf, src_level, src_layer,
1361 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1362 &dst_surf, dst_level, dst_layer,
1363 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1364 src_x, src_y, src_x + width, src_y + height,
1365 dst_x, dst_y, dst_x + width, dst_y + height,
1366 0x2600 /* GL_NEAREST */, false, false);
1367 }
1368 }
1369
1370 void anv_CmdResolveImage(
1371 VkCommandBuffer commandBuffer,
1372 VkImage srcImage,
1373 VkImageLayout srcImageLayout,
1374 VkImage dstImage,
1375 VkImageLayout dstImageLayout,
1376 uint32_t regionCount,
1377 const VkImageResolve* pRegions)
1378 {
1379 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1380 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1381 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1382
1383 struct blorp_batch batch;
1384 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1385
1386 for (uint32_t r = 0; r < regionCount; r++) {
1387 assert(pRegions[r].srcSubresource.aspectMask ==
1388 pRegions[r].dstSubresource.aspectMask);
1389 assert(pRegions[r].srcSubresource.layerCount ==
1390 pRegions[r].dstSubresource.layerCount);
1391
1392 const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
1393
1394 for (uint32_t layer = 0; layer < layer_count; layer++) {
1395 resolve_image(&batch,
1396 src_image, pRegions[r].srcSubresource.mipLevel,
1397 pRegions[r].srcSubresource.baseArrayLayer + layer,
1398 dst_image, pRegions[r].dstSubresource.mipLevel,
1399 pRegions[r].dstSubresource.baseArrayLayer + layer,
1400 pRegions[r].dstSubresource.aspectMask,
1401 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1402 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1403 pRegions[r].extent.width, pRegions[r].extent.height);
1404 }
1405 }
1406
1407 blorp_batch_finish(&batch);
1408 }
1409
1410 static void
1411 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
1412 struct blorp_batch *batch,
1413 uint32_t att)
1414 {
1415 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1416 struct anv_attachment_state *att_state =
1417 &cmd_buffer->state.attachments[att];
1418
1419 if (att_state->aux_usage == ISL_AUX_USAGE_NONE ||
1420 att_state->aux_usage == ISL_AUX_USAGE_MCS)
1421 return; /* Nothing to resolve */
1422
1423 assert(att_state->aux_usage == ISL_AUX_USAGE_CCS_E ||
1424 att_state->aux_usage == ISL_AUX_USAGE_CCS_D);
1425
1426 struct anv_render_pass *pass = cmd_buffer->state.pass;
1427 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1428
1429 /* Scan forward to see what all ways this attachment will be used.
1430 * Ideally, we would like to resolve in the same subpass as the last write
1431 * of a particular attachment. That way we only resolve once but it's
1432 * still hot in the cache.
1433 */
1434 bool found_draw = false;
1435 enum anv_subpass_usage usage = 0;
1436 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1437 usage |= pass->attachments[att].subpass_usage[s];
1438
1439 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1440 /* We found another subpass that draws to this attachment. We'll
1441 * wait to resolve until then.
1442 */
1443 found_draw = true;
1444 break;
1445 }
1446 }
1447
1448 struct anv_image_view *iview = fb->attachments[att];
1449 const struct anv_image *image = iview->image;
1450 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1451
1452 enum blorp_fast_clear_op resolve_op = BLORP_FAST_CLEAR_OP_NONE;
1453 if (!found_draw) {
1454 /* This is the last subpass that writes to this attachment so we need to
1455 * resolve here. Ideally, we would like to only resolve if the storeOp
1456 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1457 * that the CCS bits are set to "resolved" because there may be copy or
1458 * blit operations (which may ignore CCS) between now and the next time
1459 * we render and we need to ensure that anything they write will be
1460 * respected in the next render. Unfortunately, the hardware does not
1461 * provide us with any sort of "invalidate" pass that sets the CCS to
1462 * "resolved" without writing to the render target.
1463 */
1464 if (iview->image->aux_usage != ISL_AUX_USAGE_CCS_E) {
1465 /* The image destination surface doesn't support compression outside
1466 * the render pass. We need a full resolve.
1467 */
1468 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1469 } else if (att_state->fast_clear) {
1470 /* We don't know what to do with clear colors outside the render
1471 * pass. We need a partial resolve. Only transparent black is
1472 * built into the surface state object and thus no resolve is
1473 * required for this case.
1474 */
1475 if (att_state->clear_value.color.uint32[0] ||
1476 att_state->clear_value.color.uint32[1] ||
1477 att_state->clear_value.color.uint32[2] ||
1478 att_state->clear_value.color.uint32[3])
1479 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1480 } else {
1481 /* The image "natively" supports all the compression we care about
1482 * and we don't need to resolve at all. If this is the case, we also
1483 * don't need to resolve for any of the input attachment cases below.
1484 */
1485 }
1486 } else if (usage & ANV_SUBPASS_USAGE_INPUT) {
1487 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1488 * can frequently sample from them with no resolves at all.
1489 */
1490 if (att_state->aux_usage != att_state->input_aux_usage) {
1491 assert(att_state->input_aux_usage == ISL_AUX_USAGE_NONE);
1492 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1493 } else if (!att_state->clear_color_is_zero_one) {
1494 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1495 *
1496 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1497 * is fast cleared with non-0/1 clear value, this RT must be
1498 * partially resolved (refer to Partial Resolve operation) before
1499 * binding this surface to Sampler."
1500 */
1501 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1502 }
1503 }
1504
1505 if (resolve_op == BLORP_FAST_CLEAR_OP_NONE)
1506 return;
1507
1508 struct blorp_surf surf;
1509 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1510 att_state->aux_usage, &surf);
1511 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1512
1513 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1514 *
1515 * "When performing a render target resolve, PIPE_CONTROL with end of
1516 * pipe sync must be delivered."
1517 *
1518 * This comment is a bit cryptic and doesn't really tell you what's going
1519 * or what's really needed. It appears that fast clear ops are not
1520 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1521 * to ensure that the contents of the previous draw hit the render target
1522 * before we resolve and then use a second PIPE_CONTROL after the resolve
1523 * to ensure that it is completed before any additional drawing occurs.
1524 */
1525 cmd_buffer->state.pending_pipe_bits |=
1526 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1527
1528 for (uint32_t layer = 0; layer < fb->layers; layer++) {
1529 blorp_ccs_resolve(batch, &surf,
1530 iview->isl.base_level,
1531 iview->isl.base_array_layer + layer,
1532 iview->isl.format, resolve_op);
1533 }
1534
1535 cmd_buffer->state.pending_pipe_bits |=
1536 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1537
1538 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1539 att_state->fast_clear = false;
1540 if (att_state->aux_usage == ISL_AUX_USAGE_CCS_D)
1541 att_state->aux_usage = ISL_AUX_USAGE_NONE;
1542 }
1543
1544 void
1545 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1546 {
1547 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1548 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1549
1550
1551 struct blorp_batch batch;
1552 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1553
1554 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1555 ccs_resolve_attachment(cmd_buffer, &batch,
1556 subpass->color_attachments[i].attachment);
1557 }
1558
1559 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
1560
1561 if (subpass->has_resolve) {
1562 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1563 uint32_t src_att = subpass->color_attachments[i].attachment;
1564 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1565
1566 if (dst_att == VK_ATTACHMENT_UNUSED)
1567 continue;
1568
1569 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1570 /* From the Vulkan 1.0 spec:
1571 *
1572 * If the first use of an attachment in a render pass is as a
1573 * resolve attachment, then the loadOp is effectively ignored
1574 * as the resolve is guaranteed to overwrite all pixels in the
1575 * render area.
1576 */
1577 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1578 }
1579
1580 struct anv_image_view *src_iview = fb->attachments[src_att];
1581 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1582
1583 const VkRect2D render_area = cmd_buffer->state.render_area;
1584
1585 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1586 resolve_image(&batch, src_iview->image,
1587 src_iview->isl.base_level,
1588 src_iview->isl.base_array_layer,
1589 dst_iview->image,
1590 dst_iview->isl.base_level,
1591 dst_iview->isl.base_array_layer,
1592 src_iview->aspect_mask,
1593 render_area.offset.x, render_area.offset.y,
1594 render_area.offset.x, render_area.offset.y,
1595 render_area.extent.width, render_area.extent.height);
1596
1597 ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
1598 }
1599
1600 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
1601 }
1602
1603 blorp_batch_finish(&batch);
1604 }
1605
1606 void
1607 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1608 const struct anv_image *image,
1609 enum blorp_hiz_op op)
1610 {
1611 assert(image);
1612
1613 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1614 * don't perform such a resolve on gens that don't support it.
1615 */
1616 if (cmd_buffer->device->info.gen < 8 ||
1617 image->aux_usage != ISL_AUX_USAGE_HIZ)
1618 return;
1619
1620 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1621 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1622
1623 struct blorp_batch batch;
1624 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1625
1626 struct blorp_surf surf;
1627 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1628 ISL_AUX_USAGE_NONE, &surf);
1629
1630 /* Manually add the aux HiZ surf */
1631 surf.aux_surf = &image->aux_surface.isl,
1632 surf.aux_addr = (struct blorp_address) {
1633 .buffer = image->bo,
1634 .offset = image->offset + image->aux_surface.offset,
1635 };
1636 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1637
1638 surf.clear_color.u32[0] = (uint32_t) ANV_HZ_FC_VAL;
1639
1640 blorp_gen6_hiz_op(&batch, &surf, 0, 0, op);
1641 blorp_batch_finish(&batch);
1642 }