anv/blorp: sample input attachments with resolves on BDW
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.mocs.tex = device->default_mocs;
96 device->blorp.mocs.rb = device->default_mocs;
97 device->blorp.mocs.vb = device->default_mocs;
98 device->blorp.lookup_shader = lookup_blorp_shader;
99 device->blorp.upload_shader = upload_blorp_shader;
100 switch (device->info.gen) {
101 case 7:
102 if (device->info.is_haswell) {
103 device->blorp.exec = gen75_blorp_exec;
104 } else {
105 device->blorp.exec = gen7_blorp_exec;
106 }
107 break;
108 case 8:
109 device->blorp.exec = gen8_blorp_exec;
110 break;
111 case 9:
112 device->blorp.exec = gen9_blorp_exec;
113 break;
114 default:
115 unreachable("Unknown hardware generation");
116 }
117 }
118
119 void
120 anv_device_finish_blorp(struct anv_device *device)
121 {
122 blorp_finish(&device->blorp);
123 anv_pipeline_cache_finish(&device->blorp_shader_cache);
124 }
125
126 static void
127 get_blorp_surf_for_anv_buffer(struct anv_device *device,
128 struct anv_buffer *buffer, uint64_t offset,
129 uint32_t width, uint32_t height,
130 uint32_t row_pitch, enum isl_format format,
131 struct blorp_surf *blorp_surf,
132 struct isl_surf *isl_surf)
133 {
134 const struct isl_format_layout *fmtl =
135 isl_format_get_layout(format);
136 bool ok UNUSED;
137
138 /* ASTC is the only format which doesn't support linear layouts.
139 * Create an equivalently sized surface with ISL to get around this.
140 */
141 if (fmtl->txc == ISL_TXC_ASTC) {
142 /* Use an equivalently sized format */
143 format = ISL_FORMAT_R32G32B32A32_UINT;
144 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
145
146 /* Shrink the dimensions for the new format */
147 width = DIV_ROUND_UP(width, fmtl->bw);
148 height = DIV_ROUND_UP(height, fmtl->bh);
149 }
150
151 *blorp_surf = (struct blorp_surf) {
152 .surf = isl_surf,
153 .addr = {
154 .buffer = buffer->bo,
155 .offset = buffer->offset + offset,
156 },
157 };
158
159 ok = isl_surf_init(&device->isl_dev, isl_surf,
160 .dim = ISL_SURF_DIM_2D,
161 .format = format,
162 .width = width,
163 .height = height,
164 .depth = 1,
165 .levels = 1,
166 .array_len = 1,
167 .samples = 1,
168 .row_pitch = row_pitch,
169 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
170 ISL_SURF_USAGE_RENDER_TARGET_BIT,
171 .tiling_flags = ISL_TILING_LINEAR_BIT);
172 assert(ok);
173 }
174
175 static void
176 get_blorp_surf_for_anv_image(const struct anv_image *image,
177 VkImageAspectFlags aspect,
178 enum isl_aux_usage aux_usage,
179 struct blorp_surf *blorp_surf)
180 {
181 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
182 aux_usage == ISL_AUX_USAGE_HIZ)
183 aux_usage = ISL_AUX_USAGE_NONE;
184
185 const struct anv_surface *surface =
186 anv_image_get_surface_for_aspect_mask(image, aspect);
187
188 *blorp_surf = (struct blorp_surf) {
189 .surf = &surface->isl,
190 .addr = {
191 .buffer = image->bo,
192 .offset = image->offset + surface->offset,
193 },
194 };
195
196 if (aux_usage != ISL_AUX_USAGE_NONE) {
197 blorp_surf->aux_surf = &image->aux_surface.isl,
198 blorp_surf->aux_addr = (struct blorp_address) {
199 .buffer = image->bo,
200 .offset = image->offset + image->aux_surface.offset,
201 };
202 blorp_surf->aux_usage = aux_usage;
203 }
204 }
205
206 void anv_CmdCopyImage(
207 VkCommandBuffer commandBuffer,
208 VkImage srcImage,
209 VkImageLayout srcImageLayout,
210 VkImage dstImage,
211 VkImageLayout dstImageLayout,
212 uint32_t regionCount,
213 const VkImageCopy* pRegions)
214 {
215 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
216 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
217 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
218
219 struct blorp_batch batch;
220 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
221
222 for (unsigned r = 0; r < regionCount; r++) {
223 VkOffset3D srcOffset =
224 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
225 VkOffset3D dstOffset =
226 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
227 VkExtent3D extent =
228 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
229
230 unsigned dst_base_layer, layer_count;
231 if (dst_image->type == VK_IMAGE_TYPE_3D) {
232 dst_base_layer = pRegions[r].dstOffset.z;
233 layer_count = pRegions[r].extent.depth;
234 } else {
235 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
236 layer_count =
237 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
238 }
239
240 unsigned src_base_layer;
241 if (src_image->type == VK_IMAGE_TYPE_3D) {
242 src_base_layer = pRegions[r].srcOffset.z;
243 } else {
244 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
245 assert(layer_count ==
246 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
247 }
248
249 assert(pRegions[r].srcSubresource.aspectMask ==
250 pRegions[r].dstSubresource.aspectMask);
251
252 uint32_t a;
253 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
254 VkImageAspectFlagBits aspect = (1 << a);
255
256 struct blorp_surf src_surf, dst_surf;
257 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
258 &src_surf);
259 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
260 &dst_surf);
261
262 for (unsigned i = 0; i < layer_count; i++) {
263 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
264 src_base_layer + i,
265 &dst_surf, pRegions[r].dstSubresource.mipLevel,
266 dst_base_layer + i,
267 srcOffset.x, srcOffset.y,
268 dstOffset.x, dstOffset.y,
269 extent.width, extent.height);
270 }
271 }
272 }
273
274 blorp_batch_finish(&batch);
275 }
276
277 static void
278 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
279 struct anv_buffer *anv_buffer,
280 struct anv_image *anv_image,
281 uint32_t regionCount,
282 const VkBufferImageCopy* pRegions,
283 bool buffer_to_image)
284 {
285 struct blorp_batch batch;
286 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
287
288 struct {
289 struct blorp_surf surf;
290 uint32_t level;
291 VkOffset3D offset;
292 } image, buffer, *src, *dst;
293
294 buffer.level = 0;
295 buffer.offset = (VkOffset3D) { 0, 0, 0 };
296
297 if (buffer_to_image) {
298 src = &buffer;
299 dst = &image;
300 } else {
301 src = &image;
302 dst = &buffer;
303 }
304
305 for (unsigned r = 0; r < regionCount; r++) {
306 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
307
308 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
309 &image.surf);
310 image.offset =
311 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
312 image.level = pRegions[r].imageSubresource.mipLevel;
313
314 VkExtent3D extent =
315 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
316 if (anv_image->type != VK_IMAGE_TYPE_3D) {
317 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
318 extent.depth =
319 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
320 }
321
322 const enum isl_format buffer_format =
323 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
324 aspect, VK_IMAGE_TILING_LINEAR);
325
326 const VkExtent3D bufferImageExtent = {
327 .width = pRegions[r].bufferRowLength ?
328 pRegions[r].bufferRowLength : extent.width,
329 .height = pRegions[r].bufferImageHeight ?
330 pRegions[r].bufferImageHeight : extent.height,
331 };
332
333 const struct isl_format_layout *buffer_fmtl =
334 isl_format_get_layout(buffer_format);
335
336 const uint32_t buffer_row_pitch =
337 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
338 (buffer_fmtl->bpb / 8);
339
340 const uint32_t buffer_layer_stride =
341 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
342 buffer_row_pitch;
343
344 struct isl_surf buffer_isl_surf;
345 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
346 anv_buffer, pRegions[r].bufferOffset,
347 extent.width, extent.height,
348 buffer_row_pitch, buffer_format,
349 &buffer.surf, &buffer_isl_surf);
350
351 for (unsigned z = 0; z < extent.depth; z++) {
352 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
353 &dst->surf, dst->level, dst->offset.z,
354 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
355 extent.width, extent.height);
356
357 image.offset.z++;
358 buffer.surf.addr.offset += buffer_layer_stride;
359 }
360 }
361
362 blorp_batch_finish(&batch);
363 }
364
365 void anv_CmdCopyBufferToImage(
366 VkCommandBuffer commandBuffer,
367 VkBuffer srcBuffer,
368 VkImage dstImage,
369 VkImageLayout dstImageLayout,
370 uint32_t regionCount,
371 const VkBufferImageCopy* pRegions)
372 {
373 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
374 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
375 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
376
377 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
378 regionCount, pRegions, true);
379 }
380
381 void anv_CmdCopyImageToBuffer(
382 VkCommandBuffer commandBuffer,
383 VkImage srcImage,
384 VkImageLayout srcImageLayout,
385 VkBuffer dstBuffer,
386 uint32_t regionCount,
387 const VkBufferImageCopy* pRegions)
388 {
389 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
390 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
391 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
392
393 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
394 regionCount, pRegions, false);
395 }
396
397 static bool
398 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
399 {
400 bool flip = false;
401 if (*src0 > *src1) {
402 unsigned tmp = *src0;
403 *src0 = *src1;
404 *src1 = tmp;
405 flip = !flip;
406 }
407
408 if (*dst0 > *dst1) {
409 unsigned tmp = *dst0;
410 *dst0 = *dst1;
411 *dst1 = tmp;
412 flip = !flip;
413 }
414
415 return flip;
416 }
417
418 void anv_CmdBlitImage(
419 VkCommandBuffer commandBuffer,
420 VkImage srcImage,
421 VkImageLayout srcImageLayout,
422 VkImage dstImage,
423 VkImageLayout dstImageLayout,
424 uint32_t regionCount,
425 const VkImageBlit* pRegions,
426 VkFilter filter)
427
428 {
429 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
430 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
431 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
432
433 struct blorp_surf src, dst;
434
435 uint32_t gl_filter;
436 switch (filter) {
437 case VK_FILTER_NEAREST:
438 gl_filter = 0x2600; /* GL_NEAREST */
439 break;
440 case VK_FILTER_LINEAR:
441 gl_filter = 0x2601; /* GL_LINEAR */
442 break;
443 default:
444 unreachable("Invalid filter");
445 }
446
447 struct blorp_batch batch;
448 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
449
450 for (unsigned r = 0; r < regionCount; r++) {
451 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
452 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
453
454 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
455 src_image->aux_usage, &src);
456 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
457 dst_image->aux_usage, &dst);
458
459 struct anv_format src_format =
460 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
461 src_res->aspectMask, src_image->tiling);
462 struct anv_format dst_format =
463 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
464 dst_res->aspectMask, dst_image->tiling);
465
466 unsigned dst_start, dst_end;
467 if (dst_image->type == VK_IMAGE_TYPE_3D) {
468 assert(dst_res->baseArrayLayer == 0);
469 dst_start = pRegions[r].dstOffsets[0].z;
470 dst_end = pRegions[r].dstOffsets[1].z;
471 } else {
472 dst_start = dst_res->baseArrayLayer;
473 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
474 }
475
476 unsigned src_start, src_end;
477 if (src_image->type == VK_IMAGE_TYPE_3D) {
478 assert(src_res->baseArrayLayer == 0);
479 src_start = pRegions[r].srcOffsets[0].z;
480 src_end = pRegions[r].srcOffsets[1].z;
481 } else {
482 src_start = src_res->baseArrayLayer;
483 src_end = src_start + anv_get_layerCount(src_image, src_res);
484 }
485
486 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
487 float src_z_step = (float)(src_end + 1 - src_start) /
488 (float)(dst_end + 1 - dst_start);
489
490 if (flip_z) {
491 src_start = src_end;
492 src_z_step *= -1;
493 }
494
495 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
496 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
497 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
498 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
499 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
500
501 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
502 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
503 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
504 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
505 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
506
507 const unsigned num_layers = dst_end - dst_start;
508 for (unsigned i = 0; i < num_layers; i++) {
509 unsigned dst_z = dst_start + i;
510 unsigned src_z = src_start + i * src_z_step;
511
512 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
513 src_format.isl_format, src_format.swizzle,
514 &dst, dst_res->mipLevel, dst_z,
515 dst_format.isl_format,
516 anv_swizzle_for_render(dst_format.swizzle),
517 src_x0, src_y0, src_x1, src_y1,
518 dst_x0, dst_y0, dst_x1, dst_y1,
519 gl_filter, flip_x, flip_y);
520 }
521
522 }
523
524 blorp_batch_finish(&batch);
525 }
526
527 static enum isl_format
528 isl_format_for_size(unsigned size_B)
529 {
530 switch (size_B) {
531 case 1: return ISL_FORMAT_R8_UINT;
532 case 2: return ISL_FORMAT_R8G8_UINT;
533 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
534 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
535 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
536 default:
537 unreachable("Not a power-of-two format size");
538 }
539 }
540
541 static void
542 do_buffer_copy(struct blorp_batch *batch,
543 struct anv_bo *src, uint64_t src_offset,
544 struct anv_bo *dst, uint64_t dst_offset,
545 int width, int height, int block_size)
546 {
547 struct anv_device *device = batch->blorp->driver_ctx;
548
549 /* The actual format we pick doesn't matter as blorp will throw it away.
550 * The only thing that actually matters is the size.
551 */
552 enum isl_format format = isl_format_for_size(block_size);
553
554 struct isl_surf surf;
555 isl_surf_init(&device->isl_dev, &surf,
556 .dim = ISL_SURF_DIM_2D,
557 .format = format,
558 .width = width,
559 .height = height,
560 .depth = 1,
561 .levels = 1,
562 .array_len = 1,
563 .samples = 1,
564 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
565 ISL_SURF_USAGE_RENDER_TARGET_BIT,
566 .tiling_flags = ISL_TILING_LINEAR_BIT);
567 assert(surf.row_pitch == width * block_size);
568
569 struct blorp_surf src_blorp_surf = {
570 .surf = &surf,
571 .addr = {
572 .buffer = src,
573 .offset = src_offset,
574 },
575 };
576
577 struct blorp_surf dst_blorp_surf = {
578 .surf = &surf,
579 .addr = {
580 .buffer = dst,
581 .offset = dst_offset,
582 },
583 };
584
585 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
586 0, 0, 0, 0, width, height);
587 }
588
589 /**
590 * Returns the greatest common divisor of a and b that is a power of two.
591 */
592 static inline uint64_t
593 gcd_pow2_u64(uint64_t a, uint64_t b)
594 {
595 assert(a > 0 || b > 0);
596
597 unsigned a_log2 = ffsll(a) - 1;
598 unsigned b_log2 = ffsll(b) - 1;
599
600 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
601 * case, the MIN2() will take the other one. If both are 0 then we will
602 * hit the assert above.
603 */
604 return 1 << MIN2(a_log2, b_log2);
605 }
606
607 /* This is maximum possible width/height our HW can handle */
608 #define MAX_SURFACE_DIM (1ull << 14)
609
610 void anv_CmdCopyBuffer(
611 VkCommandBuffer commandBuffer,
612 VkBuffer srcBuffer,
613 VkBuffer dstBuffer,
614 uint32_t regionCount,
615 const VkBufferCopy* pRegions)
616 {
617 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
618 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
619 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
620
621 struct blorp_batch batch;
622 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
623
624 for (unsigned r = 0; r < regionCount; r++) {
625 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
626 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
627 uint64_t copy_size = pRegions[r].size;
628
629 /* First, we compute the biggest format that can be used with the
630 * given offsets and size.
631 */
632 int bs = 16;
633 bs = gcd_pow2_u64(bs, src_offset);
634 bs = gcd_pow2_u64(bs, dst_offset);
635 bs = gcd_pow2_u64(bs, pRegions[r].size);
636
637 /* First, we make a bunch of max-sized copies */
638 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
639 while (copy_size >= max_copy_size) {
640 do_buffer_copy(&batch, src_buffer->bo, src_offset,
641 dst_buffer->bo, dst_offset,
642 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
643 copy_size -= max_copy_size;
644 src_offset += max_copy_size;
645 dst_offset += max_copy_size;
646 }
647
648 /* Now make a max-width copy */
649 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
650 assert(height < MAX_SURFACE_DIM);
651 if (height != 0) {
652 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
653 do_buffer_copy(&batch, src_buffer->bo, src_offset,
654 dst_buffer->bo, dst_offset,
655 MAX_SURFACE_DIM, height, bs);
656 copy_size -= rect_copy_size;
657 src_offset += rect_copy_size;
658 dst_offset += rect_copy_size;
659 }
660
661 /* Finally, make a small copy to finish it off */
662 if (copy_size != 0) {
663 do_buffer_copy(&batch, src_buffer->bo, src_offset,
664 dst_buffer->bo, dst_offset,
665 copy_size / bs, 1, bs);
666 }
667 }
668
669 blorp_batch_finish(&batch);
670 }
671
672 void anv_CmdUpdateBuffer(
673 VkCommandBuffer commandBuffer,
674 VkBuffer dstBuffer,
675 VkDeviceSize dstOffset,
676 VkDeviceSize dataSize,
677 const void* pData)
678 {
679 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
680 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
681
682 struct blorp_batch batch;
683 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
684
685 /* We can't quite grab a full block because the state stream needs a
686 * little data at the top to build its linked list.
687 */
688 const uint32_t max_update_size =
689 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
690
691 assert(max_update_size < MAX_SURFACE_DIM * 4);
692
693 while (dataSize) {
694 const uint32_t copy_size = MIN2(dataSize, max_update_size);
695
696 struct anv_state tmp_data =
697 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
698
699 memcpy(tmp_data.map, pData, copy_size);
700
701 int bs = 16;
702 bs = gcd_pow2_u64(bs, dstOffset);
703 bs = gcd_pow2_u64(bs, copy_size);
704
705 do_buffer_copy(&batch,
706 &cmd_buffer->device->dynamic_state_block_pool.bo,
707 tmp_data.offset,
708 dst_buffer->bo, dst_buffer->offset + dstOffset,
709 copy_size / bs, 1, bs);
710
711 dataSize -= copy_size;
712 dstOffset += copy_size;
713 pData = (void *)pData + copy_size;
714 }
715
716 blorp_batch_finish(&batch);
717 }
718
719 void anv_CmdFillBuffer(
720 VkCommandBuffer commandBuffer,
721 VkBuffer dstBuffer,
722 VkDeviceSize dstOffset,
723 VkDeviceSize fillSize,
724 uint32_t data)
725 {
726 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
727 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
728 struct blorp_surf surf;
729 struct isl_surf isl_surf;
730
731 struct blorp_batch batch;
732 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
733
734 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
735
736 /* From the Vulkan spec:
737 *
738 * "size is the number of bytes to fill, and must be either a multiple
739 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
740 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
741 * buffer is not a multiple of 4, then the nearest smaller multiple is
742 * used."
743 */
744 fillSize &= ~3ull;
745
746 /* First, we compute the biggest format that can be used with the
747 * given offsets and size.
748 */
749 int bs = 16;
750 bs = gcd_pow2_u64(bs, dstOffset);
751 bs = gcd_pow2_u64(bs, fillSize);
752 enum isl_format isl_format = isl_format_for_size(bs);
753
754 union isl_color_value color = {
755 .u32 = { data, data, data, data },
756 };
757
758 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
759 while (fillSize >= max_fill_size) {
760 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
761 dst_buffer, dstOffset,
762 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
763 MAX_SURFACE_DIM * bs, isl_format,
764 &surf, &isl_surf);
765
766 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
767 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
768 color, NULL);
769 fillSize -= max_fill_size;
770 dstOffset += max_fill_size;
771 }
772
773 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
774 assert(height < MAX_SURFACE_DIM);
775 if (height != 0) {
776 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
777 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
778 dst_buffer, dstOffset,
779 MAX_SURFACE_DIM, height,
780 MAX_SURFACE_DIM * bs, isl_format,
781 &surf, &isl_surf);
782
783 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
784 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
785 color, NULL);
786 fillSize -= rect_fill_size;
787 dstOffset += rect_fill_size;
788 }
789
790 if (fillSize != 0) {
791 const uint32_t width = fillSize / bs;
792 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
793 dst_buffer, dstOffset,
794 width, 1,
795 width * bs, isl_format,
796 &surf, &isl_surf);
797
798 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
799 0, 0, 1, 0, 0, width, 1,
800 color, NULL);
801 }
802
803 blorp_batch_finish(&batch);
804 }
805
806 void anv_CmdClearColorImage(
807 VkCommandBuffer commandBuffer,
808 VkImage _image,
809 VkImageLayout imageLayout,
810 const VkClearColorValue* pColor,
811 uint32_t rangeCount,
812 const VkImageSubresourceRange* pRanges)
813 {
814 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
815 ANV_FROM_HANDLE(anv_image, image, _image);
816
817 static const bool color_write_disable[4] = { false, false, false, false };
818
819 struct blorp_batch batch;
820 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
821
822 struct blorp_surf surf;
823 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
824 image->aux_usage, &surf);
825
826 for (unsigned r = 0; r < rangeCount; r++) {
827 if (pRanges[r].aspectMask == 0)
828 continue;
829
830 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
831
832 struct anv_format src_format =
833 anv_get_format(&cmd_buffer->device->info, image->vk_format,
834 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
835
836 unsigned base_layer = pRanges[r].baseArrayLayer;
837 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
838
839 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
840 const unsigned level = pRanges[r].baseMipLevel + i;
841 const unsigned level_width = anv_minify(image->extent.width, level);
842 const unsigned level_height = anv_minify(image->extent.height, level);
843
844 if (image->type == VK_IMAGE_TYPE_3D) {
845 base_layer = 0;
846 layer_count = anv_minify(image->extent.depth, level);
847 }
848
849 blorp_clear(&batch, &surf,
850 src_format.isl_format, src_format.swizzle,
851 level, base_layer, layer_count,
852 0, 0, level_width, level_height,
853 vk_to_isl_color(*pColor), color_write_disable);
854 }
855 }
856
857 blorp_batch_finish(&batch);
858 }
859
860 void anv_CmdClearDepthStencilImage(
861 VkCommandBuffer commandBuffer,
862 VkImage image_h,
863 VkImageLayout imageLayout,
864 const VkClearDepthStencilValue* pDepthStencil,
865 uint32_t rangeCount,
866 const VkImageSubresourceRange* pRanges)
867 {
868 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
869 ANV_FROM_HANDLE(anv_image, image, image_h);
870
871 struct blorp_batch batch;
872 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
873
874 struct blorp_surf depth, stencil;
875 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
876 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
877 ISL_AUX_USAGE_NONE, &depth);
878 } else {
879 memset(&depth, 0, sizeof(depth));
880 }
881
882 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
883 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
884 ISL_AUX_USAGE_NONE, &stencil);
885 } else {
886 memset(&stencil, 0, sizeof(stencil));
887 }
888
889 for (unsigned r = 0; r < rangeCount; r++) {
890 if (pRanges[r].aspectMask == 0)
891 continue;
892
893 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
894 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
895
896 unsigned base_layer = pRanges[r].baseArrayLayer;
897 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
898
899 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
900 const unsigned level = pRanges[r].baseMipLevel + i;
901 const unsigned level_width = anv_minify(image->extent.width, level);
902 const unsigned level_height = anv_minify(image->extent.height, level);
903
904 if (image->type == VK_IMAGE_TYPE_3D)
905 layer_count = anv_minify(image->extent.depth, level);
906
907 blorp_clear_depth_stencil(&batch, &depth, &stencil,
908 level, base_layer, layer_count,
909 0, 0, level_width, level_height,
910 clear_depth, pDepthStencil->depth,
911 clear_stencil ? 0xff : 0,
912 pDepthStencil->stencil);
913 }
914 }
915
916 blorp_batch_finish(&batch);
917 }
918
919 VkResult
920 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
921 uint32_t num_entries,
922 uint32_t *state_offset,
923 struct anv_state *bt_state)
924 {
925 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
926 state_offset);
927 if (bt_state->map == NULL) {
928 /* We ran out of space. Grab a new binding table block. */
929 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
930 if (result != VK_SUCCESS)
931 return result;
932
933 /* Re-emit state base addresses so we get the new surface state base
934 * address before we start emitting binding tables etc.
935 */
936 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
937
938 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
939 state_offset);
940 assert(bt_state->map != NULL);
941 }
942
943 return VK_SUCCESS;
944 }
945
946 static VkResult
947 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
948 struct anv_state surface_state,
949 uint32_t *bt_offset)
950 {
951 uint32_t state_offset;
952 struct anv_state bt_state;
953
954 VkResult result =
955 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
956 &bt_state);
957 if (result != VK_SUCCESS)
958 return result;
959
960 uint32_t *bt_map = bt_state.map;
961 bt_map[0] = surface_state.offset + state_offset;
962
963 *bt_offset = bt_state.offset;
964 return VK_SUCCESS;
965 }
966
967 static void
968 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
969 struct blorp_batch *batch,
970 const VkClearAttachment *attachment,
971 uint32_t rectCount, const VkClearRect *pRects)
972 {
973 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
974 const uint32_t color_att = attachment->colorAttachment;
975 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
976
977 if (att_idx == VK_ATTACHMENT_UNUSED)
978 return;
979
980 struct anv_render_pass_attachment *pass_att =
981 &cmd_buffer->state.pass->attachments[att_idx];
982 struct anv_attachment_state *att_state =
983 &cmd_buffer->state.attachments[att_idx];
984
985 uint32_t binding_table;
986 VkResult result =
987 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state,
988 &binding_table);
989 if (result != VK_SUCCESS)
990 return;
991
992 union isl_color_value clear_color =
993 vk_to_isl_color(attachment->clearValue.color);
994
995 for (uint32_t r = 0; r < rectCount; ++r) {
996 const VkOffset2D offset = pRects[r].rect.offset;
997 const VkExtent2D extent = pRects[r].rect.extent;
998 blorp_clear_attachments(batch, binding_table,
999 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1000 pRects[r].baseArrayLayer,
1001 pRects[r].layerCount,
1002 offset.x, offset.y,
1003 offset.x + extent.width, offset.y + extent.height,
1004 true, clear_color, false, 0.0f, 0, 0);
1005 }
1006 }
1007
1008 static void
1009 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1010 struct blorp_batch *batch,
1011 const VkClearAttachment *attachment,
1012 uint32_t rectCount, const VkClearRect *pRects)
1013 {
1014 static const union isl_color_value color_value = { .u32 = { 0, } };
1015 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1016 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1017
1018 if (att_idx == VK_ATTACHMENT_UNUSED)
1019 return;
1020
1021 struct anv_render_pass_attachment *pass_att =
1022 &cmd_buffer->state.pass->attachments[att_idx];
1023
1024 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1025 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1026
1027 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1028 if (clear_depth) {
1029 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1030 pass_att->format,
1031 VK_IMAGE_ASPECT_DEPTH_BIT,
1032 VK_IMAGE_TILING_OPTIMAL);
1033 }
1034
1035 uint32_t binding_table;
1036 VkResult result =
1037 binding_table_for_surface_state(cmd_buffer,
1038 cmd_buffer->state.null_surface_state,
1039 &binding_table);
1040 if (result != VK_SUCCESS)
1041 return;
1042
1043 for (uint32_t r = 0; r < rectCount; ++r) {
1044 const VkOffset2D offset = pRects[r].rect.offset;
1045 const VkExtent2D extent = pRects[r].rect.extent;
1046 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1047 blorp_clear_attachments(batch, binding_table,
1048 depth_format, pass_att->samples,
1049 pRects[r].baseArrayLayer,
1050 pRects[r].layerCount,
1051 offset.x, offset.y,
1052 offset.x + extent.width, offset.y + extent.height,
1053 false, color_value,
1054 clear_depth, value.depth,
1055 clear_stencil ? 0xff : 0, value.stencil);
1056 }
1057 }
1058
1059 void anv_CmdClearAttachments(
1060 VkCommandBuffer commandBuffer,
1061 uint32_t attachmentCount,
1062 const VkClearAttachment* pAttachments,
1063 uint32_t rectCount,
1064 const VkClearRect* pRects)
1065 {
1066 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1067
1068 /* Because this gets called within a render pass, we tell blorp not to
1069 * trash our depth and stencil buffers.
1070 */
1071 struct blorp_batch batch;
1072 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1073 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1074
1075 for (uint32_t a = 0; a < attachmentCount; ++a) {
1076 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1077 clear_color_attachment(cmd_buffer, &batch,
1078 &pAttachments[a],
1079 rectCount, pRects);
1080 } else {
1081 clear_depth_stencil_attachment(cmd_buffer, &batch,
1082 &pAttachments[a],
1083 rectCount, pRects);
1084 }
1085 }
1086
1087 blorp_batch_finish(&batch);
1088 }
1089
1090 enum subpass_stage {
1091 SUBPASS_STAGE_LOAD,
1092 SUBPASS_STAGE_DRAW,
1093 SUBPASS_STAGE_RESOLVE,
1094 };
1095
1096 static bool
1097 attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
1098 struct anv_render_pass_attachment *att,
1099 enum subpass_stage stage)
1100 {
1101 struct anv_render_pass *pass = cmd_buffer->state.pass;
1102 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1103
1104 /* We handle this subpass specially based on the current stage */
1105 enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
1106 switch (stage) {
1107 case SUBPASS_STAGE_LOAD:
1108 if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
1109 return true;
1110 break;
1111
1112 case SUBPASS_STAGE_DRAW:
1113 if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
1114 return true;
1115 break;
1116
1117 default:
1118 break;
1119 }
1120
1121 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1122 usage = att->subpass_usage[s];
1123
1124 /* If this attachment is going to be used as an input in this or any
1125 * future subpass, then we need to flush its cache and invalidate the
1126 * texture cache.
1127 */
1128 if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1129 return true;
1130
1131 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1132 /* We found another subpass that draws to this attachment. We'll
1133 * wait to resolve until then.
1134 */
1135 return false;
1136 }
1137 }
1138
1139 return false;
1140 }
1141
1142 static void
1143 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
1144 enum subpass_stage stage)
1145 {
1146 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1147 struct anv_render_pass *pass = cmd_buffer->state.pass;
1148
1149 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1150 uint32_t att = subpass->color_attachments[i].attachment;
1151 assert(att < pass->attachment_count);
1152 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1153 cmd_buffer->state.pending_pipe_bits |=
1154 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1155 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1156 }
1157 }
1158
1159 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1160 uint32_t att = subpass->depth_stencil_attachment.attachment;
1161 assert(att < pass->attachment_count);
1162 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1163 cmd_buffer->state.pending_pipe_bits |=
1164 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1165 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1166 }
1167 }
1168 }
1169
1170 static bool
1171 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1172 {
1173 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1174 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1175
1176 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1177 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1178 if (cmd_state->attachments[a].pending_clear_aspects) {
1179 return true;
1180 }
1181 }
1182
1183 if (ds != VK_ATTACHMENT_UNUSED &&
1184 cmd_state->attachments[ds].pending_clear_aspects) {
1185 return true;
1186 }
1187
1188 return false;
1189 }
1190
1191 void
1192 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1193 {
1194 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1195 const VkRect2D render_area = cmd_buffer->state.render_area;
1196
1197
1198 if (!subpass_needs_clear(cmd_buffer))
1199 return;
1200
1201 /* Because this gets called within a render pass, we tell blorp not to
1202 * trash our depth and stencil buffers.
1203 */
1204 struct blorp_batch batch;
1205 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1206 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1207
1208 VkClearRect clear_rect = {
1209 .rect = cmd_buffer->state.render_area,
1210 .baseArrayLayer = 0,
1211 .layerCount = cmd_buffer->state.framebuffer->layers,
1212 };
1213
1214 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1215 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1216 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1217 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1218
1219 if (!att_state->pending_clear_aspects)
1220 continue;
1221
1222 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1223
1224 struct anv_image_view *iview = fb->attachments[a];
1225 const struct anv_image *image = iview->image;
1226 struct blorp_surf surf;
1227 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1228 att_state->aux_usage, &surf);
1229
1230 if (att_state->fast_clear) {
1231 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1232
1233 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1234 *
1235 * "After Render target fast clear, pipe-control with color cache
1236 * write-flush must be issued before sending any DRAW commands on
1237 * that render target."
1238 *
1239 * This comment is a bit cryptic and doesn't really tell you what's
1240 * going or what's really needed. It appears that fast clear ops are
1241 * not properly synchronized with other drawing. This means that we
1242 * cannot have a fast clear operation in the pipe at the same time as
1243 * other regular drawing operations. We need to use a PIPE_CONTROL
1244 * to ensure that the contents of the previous draw hit the render
1245 * target before we resolve and then use a second PIPE_CONTROL after
1246 * the resolve to ensure that it is completed before any additional
1247 * drawing occurs.
1248 */
1249 cmd_buffer->state.pending_pipe_bits |=
1250 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1251
1252 blorp_fast_clear(&batch, &surf, iview->isl.format,
1253 iview->isl.base_level,
1254 iview->isl.base_array_layer, fb->layers,
1255 render_area.offset.x, render_area.offset.y,
1256 render_area.offset.x + render_area.extent.width,
1257 render_area.offset.y + render_area.extent.height);
1258
1259 cmd_buffer->state.pending_pipe_bits |=
1260 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1261 } else {
1262 blorp_clear(&batch, &surf, iview->isl.format,
1263 anv_swizzle_for_render(iview->isl.swizzle),
1264 iview->isl.base_level,
1265 iview->isl.base_array_layer, fb->layers,
1266 render_area.offset.x, render_area.offset.y,
1267 render_area.offset.x + render_area.extent.width,
1268 render_area.offset.y + render_area.extent.height,
1269 vk_to_isl_color(att_state->clear_value.color), NULL);
1270 }
1271
1272 att_state->pending_clear_aspects = 0;
1273 }
1274
1275 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1276
1277 if (ds != VK_ATTACHMENT_UNUSED &&
1278 cmd_state->attachments[ds].pending_clear_aspects) {
1279
1280 VkClearAttachment clear_att = {
1281 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1282 .clearValue = cmd_state->attachments[ds].clear_value,
1283 };
1284
1285
1286 const uint8_t gen = cmd_buffer->device->info.gen;
1287 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1288 ISL_AUX_USAGE_HIZ;
1289 const struct anv_image_view *iview = fb->attachments[ds];
1290
1291 if (clear_with_hiz) {
1292 const bool clear_depth = clear_att.aspectMask &
1293 VK_IMAGE_ASPECT_DEPTH_BIT;
1294 const bool clear_stencil = clear_att.aspectMask &
1295 VK_IMAGE_ASPECT_STENCIL_BIT;
1296
1297 /* Check against restrictions for depth buffer clearing. A great GPU
1298 * performance benefit isn't expected when using the HZ sequence for
1299 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1300 * a stencil clear in addition to using the BLORP-fallback for depth.
1301 */
1302 if (clear_depth) {
1303 if (!blorp_can_hiz_clear_depth(gen, iview->isl.format,
1304 iview->image->samples,
1305 render_area.offset.x,
1306 render_area.offset.y,
1307 render_area.offset.x +
1308 render_area.extent.width,
1309 render_area.offset.y +
1310 render_area.extent.height)) {
1311 clear_with_hiz = false;
1312 } else if (clear_att.clearValue.depthStencil.depth !=
1313 ANV_HZ_FC_VAL) {
1314 /* Don't enable fast depth clears for any color not equal to
1315 * ANV_HZ_FC_VAL.
1316 */
1317 clear_with_hiz = false;
1318 } else if (gen == 8 &&
1319 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1320 iview->aspect_mask,
1321 iview->image->samples)) {
1322 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1323 * fast-cleared portion of a HiZ buffer. Testing has revealed
1324 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1325 * not support this feature at all.
1326 */
1327 clear_with_hiz = false;
1328 }
1329 }
1330
1331 if (clear_with_hiz) {
1332 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1333 render_area.offset.x,
1334 render_area.offset.y,
1335 render_area.offset.x +
1336 render_area.extent.width,
1337 render_area.offset.y +
1338 render_area.extent.height,
1339 clear_depth, clear_stencil,
1340 clear_att.clearValue.
1341 depthStencil.stencil);
1342 }
1343 }
1344
1345 if (!clear_with_hiz) {
1346 clear_depth_stencil_attachment(cmd_buffer, &batch,
1347 &clear_att, 1, &clear_rect);
1348 }
1349
1350 cmd_state->attachments[ds].pending_clear_aspects = 0;
1351 }
1352
1353 blorp_batch_finish(&batch);
1354
1355 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
1356 }
1357
1358 static void
1359 resolve_image(struct blorp_batch *batch,
1360 const struct anv_image *src_image,
1361 uint32_t src_level, uint32_t src_layer,
1362 const struct anv_image *dst_image,
1363 uint32_t dst_level, uint32_t dst_layer,
1364 VkImageAspectFlags aspect_mask,
1365 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1366 uint32_t width, uint32_t height)
1367 {
1368 assert(src_image->type == VK_IMAGE_TYPE_2D);
1369 assert(src_image->samples > 1);
1370 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1371 assert(dst_image->samples == 1);
1372
1373 uint32_t a;
1374 for_each_bit(a, aspect_mask) {
1375 VkImageAspectFlagBits aspect = 1 << a;
1376
1377 struct blorp_surf src_surf, dst_surf;
1378 get_blorp_surf_for_anv_image(src_image, aspect,
1379 src_image->aux_usage, &src_surf);
1380 get_blorp_surf_for_anv_image(dst_image, aspect,
1381 dst_image->aux_usage, &dst_surf);
1382
1383 blorp_blit(batch,
1384 &src_surf, src_level, src_layer,
1385 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1386 &dst_surf, dst_level, dst_layer,
1387 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1388 src_x, src_y, src_x + width, src_y + height,
1389 dst_x, dst_y, dst_x + width, dst_y + height,
1390 0x2600 /* GL_NEAREST */, false, false);
1391 }
1392 }
1393
1394 void anv_CmdResolveImage(
1395 VkCommandBuffer commandBuffer,
1396 VkImage srcImage,
1397 VkImageLayout srcImageLayout,
1398 VkImage dstImage,
1399 VkImageLayout dstImageLayout,
1400 uint32_t regionCount,
1401 const VkImageResolve* pRegions)
1402 {
1403 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1404 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1405 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1406
1407 struct blorp_batch batch;
1408 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1409
1410 for (uint32_t r = 0; r < regionCount; r++) {
1411 assert(pRegions[r].srcSubresource.aspectMask ==
1412 pRegions[r].dstSubresource.aspectMask);
1413 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1414 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1415
1416 const uint32_t layer_count =
1417 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1418
1419 for (uint32_t layer = 0; layer < layer_count; layer++) {
1420 resolve_image(&batch,
1421 src_image, pRegions[r].srcSubresource.mipLevel,
1422 pRegions[r].srcSubresource.baseArrayLayer + layer,
1423 dst_image, pRegions[r].dstSubresource.mipLevel,
1424 pRegions[r].dstSubresource.baseArrayLayer + layer,
1425 pRegions[r].dstSubresource.aspectMask,
1426 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1427 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1428 pRegions[r].extent.width, pRegions[r].extent.height);
1429 }
1430 }
1431
1432 blorp_batch_finish(&batch);
1433 }
1434
1435 static void
1436 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
1437 struct blorp_batch *batch,
1438 uint32_t att)
1439 {
1440 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1441 struct anv_attachment_state *att_state =
1442 &cmd_buffer->state.attachments[att];
1443
1444 if (att_state->aux_usage == ISL_AUX_USAGE_NONE ||
1445 att_state->aux_usage == ISL_AUX_USAGE_MCS)
1446 return; /* Nothing to resolve */
1447
1448 assert(att_state->aux_usage == ISL_AUX_USAGE_CCS_E ||
1449 att_state->aux_usage == ISL_AUX_USAGE_CCS_D);
1450
1451 struct anv_render_pass *pass = cmd_buffer->state.pass;
1452 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1453
1454 /* Scan forward to see what all ways this attachment will be used.
1455 * Ideally, we would like to resolve in the same subpass as the last write
1456 * of a particular attachment. That way we only resolve once but it's
1457 * still hot in the cache.
1458 */
1459 bool found_draw = false;
1460 bool self_dep = false;
1461 enum anv_subpass_usage usage = 0;
1462 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1463 usage |= pass->attachments[att].subpass_usage[s];
1464
1465 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1466 /* We found another subpass that draws to this attachment. We'll
1467 * wait to resolve until then.
1468 */
1469 found_draw = true;
1470 if (pass->attachments[att].subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1471 self_dep = true;
1472 break;
1473 }
1474 }
1475
1476 struct anv_image_view *iview = fb->attachments[att];
1477 const struct anv_image *image = iview->image;
1478 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1479
1480 enum blorp_fast_clear_op resolve_op = BLORP_FAST_CLEAR_OP_NONE;
1481 if (!found_draw) {
1482 /* This is the last subpass that writes to this attachment so we need to
1483 * resolve here. Ideally, we would like to only resolve if the storeOp
1484 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1485 * that the CCS bits are set to "resolved" because there may be copy or
1486 * blit operations (which may ignore CCS) between now and the next time
1487 * we render and we need to ensure that anything they write will be
1488 * respected in the next render. Unfortunately, the hardware does not
1489 * provide us with any sort of "invalidate" pass that sets the CCS to
1490 * "resolved" without writing to the render target.
1491 */
1492 if (iview->image->aux_usage != ISL_AUX_USAGE_CCS_E) {
1493 /* The image destination surface doesn't support compression outside
1494 * the render pass. We need a full resolve.
1495 */
1496 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1497 } else if (att_state->fast_clear) {
1498 /* We don't know what to do with clear colors outside the render
1499 * pass. We need a partial resolve. Only transparent black is
1500 * built into the surface state object and thus no resolve is
1501 * required for this case.
1502 */
1503 if (att_state->clear_value.color.uint32[0] ||
1504 att_state->clear_value.color.uint32[1] ||
1505 att_state->clear_value.color.uint32[2] ||
1506 att_state->clear_value.color.uint32[3])
1507 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1508 } else {
1509 /* The image "natively" supports all the compression we care about
1510 * and we don't need to resolve at all. If this is the case, we also
1511 * don't need to resolve for any of the input attachment cases below.
1512 */
1513 }
1514 } else if (usage & ANV_SUBPASS_USAGE_INPUT) {
1515 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1516 * can frequently sample from them with no resolves at all.
1517 */
1518 if (att_state->aux_usage != att_state->input_aux_usage) {
1519 assert(att_state->input_aux_usage == ISL_AUX_USAGE_NONE);
1520 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1521 } else if (!att_state->clear_color_is_zero_one) {
1522 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1523 *
1524 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1525 * is fast cleared with non-0/1 clear value, this RT must be
1526 * partially resolved (refer to Partial Resolve operation) before
1527 * binding this surface to Sampler."
1528 */
1529 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1530 } else if (cmd_buffer->device->info.gen == 8 && self_dep &&
1531 att_state->input_aux_usage == ISL_AUX_USAGE_CCS_D) {
1532 /* On Broadwell we still need to do resolves when there is a
1533 * self-dependency because HW could not see fast-clears and works
1534 * on the render cache as if there was regular non-fast-clear surface.
1535 * To avoid any inconsistency, we force the resolve.
1536 */
1537 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1538 }
1539 }
1540
1541 if (resolve_op == BLORP_FAST_CLEAR_OP_NONE)
1542 return;
1543
1544 struct blorp_surf surf;
1545 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1546 att_state->aux_usage, &surf);
1547 if (att_state->fast_clear)
1548 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1549
1550 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1551 *
1552 * "When performing a render target resolve, PIPE_CONTROL with end of
1553 * pipe sync must be delivered."
1554 *
1555 * This comment is a bit cryptic and doesn't really tell you what's going
1556 * or what's really needed. It appears that fast clear ops are not
1557 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1558 * to ensure that the contents of the previous draw hit the render target
1559 * before we resolve and then use a second PIPE_CONTROL after the resolve
1560 * to ensure that it is completed before any additional drawing occurs.
1561 */
1562 cmd_buffer->state.pending_pipe_bits |=
1563 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1564
1565 for (uint32_t layer = 0; layer < fb->layers; layer++) {
1566 blorp_ccs_resolve(batch, &surf,
1567 iview->isl.base_level,
1568 iview->isl.base_array_layer + layer,
1569 iview->isl.format, resolve_op);
1570 }
1571
1572 cmd_buffer->state.pending_pipe_bits |=
1573 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1574
1575 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1576 att_state->fast_clear = false;
1577 if (att_state->aux_usage == ISL_AUX_USAGE_CCS_D)
1578 att_state->aux_usage = ISL_AUX_USAGE_NONE;
1579 }
1580
1581 void
1582 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1583 {
1584 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1585 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1586
1587
1588 struct blorp_batch batch;
1589 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1590
1591 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1592 ccs_resolve_attachment(cmd_buffer, &batch,
1593 subpass->color_attachments[i].attachment);
1594 }
1595
1596 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
1597
1598 if (subpass->has_resolve) {
1599 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1600 uint32_t src_att = subpass->color_attachments[i].attachment;
1601 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1602
1603 if (dst_att == VK_ATTACHMENT_UNUSED)
1604 continue;
1605
1606 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1607 /* From the Vulkan 1.0 spec:
1608 *
1609 * If the first use of an attachment in a render pass is as a
1610 * resolve attachment, then the loadOp is effectively ignored
1611 * as the resolve is guaranteed to overwrite all pixels in the
1612 * render area.
1613 */
1614 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1615 }
1616
1617 struct anv_image_view *src_iview = fb->attachments[src_att];
1618 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1619
1620 const VkRect2D render_area = cmd_buffer->state.render_area;
1621
1622 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1623 resolve_image(&batch, src_iview->image,
1624 src_iview->isl.base_level,
1625 src_iview->isl.base_array_layer,
1626 dst_iview->image,
1627 dst_iview->isl.base_level,
1628 dst_iview->isl.base_array_layer,
1629 src_iview->aspect_mask,
1630 render_area.offset.x, render_area.offset.y,
1631 render_area.offset.x, render_area.offset.y,
1632 render_area.extent.width, render_area.extent.height);
1633
1634 ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
1635 }
1636
1637 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
1638 }
1639
1640 blorp_batch_finish(&batch);
1641 }
1642
1643 void
1644 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1645 const struct anv_image *image,
1646 enum blorp_hiz_op op)
1647 {
1648 assert(image);
1649
1650 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1651 * don't perform such a resolve on gens that don't support it.
1652 */
1653 if (cmd_buffer->device->info.gen < 8 ||
1654 image->aux_usage != ISL_AUX_USAGE_HIZ)
1655 return;
1656
1657 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1658 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1659
1660 struct blorp_batch batch;
1661 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1662
1663 struct blorp_surf surf;
1664 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1665 ISL_AUX_USAGE_NONE, &surf);
1666
1667 /* Manually add the aux HiZ surf */
1668 surf.aux_surf = &image->aux_surface.isl,
1669 surf.aux_addr = (struct blorp_address) {
1670 .buffer = image->bo,
1671 .offset = image->offset + image->aux_surface.offset,
1672 };
1673 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1674
1675 surf.clear_color.u32[0] = (uint32_t) ANV_HZ_FC_VAL;
1676
1677 blorp_gen6_hiz_op(&batch, &surf, 0, 0, op);
1678 blorp_batch_finish(&batch);
1679 }