anv/blorp: Fix a crash in CmdClearColorImage
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.mocs.tex = device->default_mocs;
96 device->blorp.mocs.rb = device->default_mocs;
97 device->blorp.mocs.vb = device->default_mocs;
98 device->blorp.lookup_shader = lookup_blorp_shader;
99 device->blorp.upload_shader = upload_blorp_shader;
100 switch (device->info.gen) {
101 case 7:
102 if (device->info.is_haswell) {
103 device->blorp.exec = gen75_blorp_exec;
104 } else {
105 device->blorp.exec = gen7_blorp_exec;
106 }
107 break;
108 case 8:
109 device->blorp.exec = gen8_blorp_exec;
110 break;
111 case 9:
112 device->blorp.exec = gen9_blorp_exec;
113 break;
114 default:
115 unreachable("Unknown hardware generation");
116 }
117 }
118
119 void
120 anv_device_finish_blorp(struct anv_device *device)
121 {
122 blorp_finish(&device->blorp);
123 anv_pipeline_cache_finish(&device->blorp_shader_cache);
124 }
125
126 static void
127 get_blorp_surf_for_anv_buffer(struct anv_device *device,
128 struct anv_buffer *buffer, uint64_t offset,
129 uint32_t width, uint32_t height,
130 uint32_t row_pitch, enum isl_format format,
131 struct blorp_surf *blorp_surf,
132 struct isl_surf *isl_surf)
133 {
134 const struct isl_format_layout *fmtl =
135 isl_format_get_layout(format);
136
137 /* ASTC is the only format which doesn't support linear layouts.
138 * Create an equivalently sized surface with ISL to get around this.
139 */
140 if (fmtl->txc == ISL_TXC_ASTC) {
141 /* Use an equivalently sized format */
142 format = ISL_FORMAT_R32G32B32A32_UINT;
143 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
144
145 /* Shrink the dimensions for the new format */
146 width = DIV_ROUND_UP(width, fmtl->bw);
147 height = DIV_ROUND_UP(height, fmtl->bh);
148 }
149
150 *blorp_surf = (struct blorp_surf) {
151 .surf = isl_surf,
152 .addr = {
153 .buffer = buffer->bo,
154 .offset = buffer->offset + offset,
155 },
156 };
157
158 isl_surf_init(&device->isl_dev, isl_surf,
159 .dim = ISL_SURF_DIM_2D,
160 .format = format,
161 .width = width,
162 .height = height,
163 .depth = 1,
164 .levels = 1,
165 .array_len = 1,
166 .samples = 1,
167 .min_pitch = row_pitch,
168 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
169 ISL_SURF_USAGE_RENDER_TARGET_BIT,
170 .tiling_flags = ISL_TILING_LINEAR_BIT);
171 assert(isl_surf->row_pitch == row_pitch);
172 }
173
174 static void
175 get_blorp_surf_for_anv_image(const struct anv_image *image,
176 VkImageAspectFlags aspect,
177 enum isl_aux_usage aux_usage,
178 struct blorp_surf *blorp_surf)
179 {
180 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
181 aux_usage == ISL_AUX_USAGE_HIZ)
182 aux_usage = ISL_AUX_USAGE_NONE;
183
184 const struct anv_surface *surface =
185 anv_image_get_surface_for_aspect_mask(image, aspect);
186
187 *blorp_surf = (struct blorp_surf) {
188 .surf = &surface->isl,
189 .addr = {
190 .buffer = image->bo,
191 .offset = image->offset + surface->offset,
192 },
193 };
194
195 if (aux_usage != ISL_AUX_USAGE_NONE) {
196 blorp_surf->aux_surf = &image->aux_surface.isl,
197 blorp_surf->aux_addr = (struct blorp_address) {
198 .buffer = image->bo,
199 .offset = image->offset + image->aux_surface.offset,
200 };
201 blorp_surf->aux_usage = aux_usage;
202 }
203 }
204
205 void anv_CmdCopyImage(
206 VkCommandBuffer commandBuffer,
207 VkImage srcImage,
208 VkImageLayout srcImageLayout,
209 VkImage dstImage,
210 VkImageLayout dstImageLayout,
211 uint32_t regionCount,
212 const VkImageCopy* pRegions)
213 {
214 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
215 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
216 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
217
218 struct blorp_batch batch;
219 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
220
221 for (unsigned r = 0; r < regionCount; r++) {
222 VkOffset3D srcOffset =
223 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
224 VkOffset3D dstOffset =
225 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
226 VkExtent3D extent =
227 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
228
229 unsigned dst_base_layer, layer_count;
230 if (dst_image->type == VK_IMAGE_TYPE_3D) {
231 dst_base_layer = pRegions[r].dstOffset.z;
232 layer_count = pRegions[r].extent.depth;
233 } else {
234 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
235 layer_count = pRegions[r].dstSubresource.layerCount;
236 }
237
238 unsigned src_base_layer;
239 if (src_image->type == VK_IMAGE_TYPE_3D) {
240 src_base_layer = pRegions[r].srcOffset.z;
241 } else {
242 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
243 assert(pRegions[r].srcSubresource.layerCount == layer_count);
244 }
245
246 assert(pRegions[r].srcSubresource.aspectMask ==
247 pRegions[r].dstSubresource.aspectMask);
248
249 uint32_t a;
250 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
251 VkImageAspectFlagBits aspect = (1 << a);
252
253 struct blorp_surf src_surf, dst_surf;
254 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
255 &src_surf);
256 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
257 &dst_surf);
258
259 for (unsigned i = 0; i < layer_count; i++) {
260 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
261 src_base_layer + i,
262 &dst_surf, pRegions[r].dstSubresource.mipLevel,
263 dst_base_layer + i,
264 srcOffset.x, srcOffset.y,
265 dstOffset.x, dstOffset.y,
266 extent.width, extent.height);
267 }
268 }
269 }
270
271 blorp_batch_finish(&batch);
272 }
273
274 static void
275 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
276 struct anv_buffer *anv_buffer,
277 struct anv_image *anv_image,
278 uint32_t regionCount,
279 const VkBufferImageCopy* pRegions,
280 bool buffer_to_image)
281 {
282 struct blorp_batch batch;
283 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
284
285 struct {
286 struct blorp_surf surf;
287 uint32_t level;
288 VkOffset3D offset;
289 } image, buffer, *src, *dst;
290
291 buffer.level = 0;
292 buffer.offset = (VkOffset3D) { 0, 0, 0 };
293
294 if (buffer_to_image) {
295 src = &buffer;
296 dst = &image;
297 } else {
298 src = &image;
299 dst = &buffer;
300 }
301
302 for (unsigned r = 0; r < regionCount; r++) {
303 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
304
305 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
306 &image.surf);
307 image.offset =
308 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
309 image.level = pRegions[r].imageSubresource.mipLevel;
310
311 VkExtent3D extent =
312 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
313 if (anv_image->type != VK_IMAGE_TYPE_3D) {
314 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
315 extent.depth = pRegions[r].imageSubresource.layerCount;
316 }
317
318 const enum isl_format buffer_format =
319 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
320 aspect, VK_IMAGE_TILING_LINEAR);
321
322 const VkExtent3D bufferImageExtent = {
323 .width = pRegions[r].bufferRowLength ?
324 pRegions[r].bufferRowLength : extent.width,
325 .height = pRegions[r].bufferImageHeight ?
326 pRegions[r].bufferImageHeight : extent.height,
327 };
328
329 const struct isl_format_layout *buffer_fmtl =
330 isl_format_get_layout(buffer_format);
331
332 const uint32_t buffer_row_pitch =
333 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
334 (buffer_fmtl->bpb / 8);
335
336 const uint32_t buffer_layer_stride =
337 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
338 buffer_row_pitch;
339
340 struct isl_surf buffer_isl_surf;
341 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
342 anv_buffer, pRegions[r].bufferOffset,
343 extent.width, extent.height,
344 buffer_row_pitch, buffer_format,
345 &buffer.surf, &buffer_isl_surf);
346
347 for (unsigned z = 0; z < extent.depth; z++) {
348 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
349 &dst->surf, dst->level, dst->offset.z,
350 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
351 extent.width, extent.height);
352
353 image.offset.z++;
354 buffer.surf.addr.offset += buffer_layer_stride;
355 }
356 }
357
358 blorp_batch_finish(&batch);
359 }
360
361 void anv_CmdCopyBufferToImage(
362 VkCommandBuffer commandBuffer,
363 VkBuffer srcBuffer,
364 VkImage dstImage,
365 VkImageLayout dstImageLayout,
366 uint32_t regionCount,
367 const VkBufferImageCopy* pRegions)
368 {
369 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
370 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
371 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
372
373 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
374 regionCount, pRegions, true);
375 }
376
377 void anv_CmdCopyImageToBuffer(
378 VkCommandBuffer commandBuffer,
379 VkImage srcImage,
380 VkImageLayout srcImageLayout,
381 VkBuffer dstBuffer,
382 uint32_t regionCount,
383 const VkBufferImageCopy* pRegions)
384 {
385 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
386 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
387 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
388
389 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
390 regionCount, pRegions, false);
391 }
392
393 static bool
394 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
395 {
396 bool flip = false;
397 if (*src0 > *src1) {
398 unsigned tmp = *src0;
399 *src0 = *src1;
400 *src1 = tmp;
401 flip = !flip;
402 }
403
404 if (*dst0 > *dst1) {
405 unsigned tmp = *dst0;
406 *dst0 = *dst1;
407 *dst1 = tmp;
408 flip = !flip;
409 }
410
411 return flip;
412 }
413
414 void anv_CmdBlitImage(
415 VkCommandBuffer commandBuffer,
416 VkImage srcImage,
417 VkImageLayout srcImageLayout,
418 VkImage dstImage,
419 VkImageLayout dstImageLayout,
420 uint32_t regionCount,
421 const VkImageBlit* pRegions,
422 VkFilter filter)
423
424 {
425 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
426 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
427 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
428
429 struct blorp_surf src, dst;
430
431 uint32_t gl_filter;
432 switch (filter) {
433 case VK_FILTER_NEAREST:
434 gl_filter = 0x2600; /* GL_NEAREST */
435 break;
436 case VK_FILTER_LINEAR:
437 gl_filter = 0x2601; /* GL_LINEAR */
438 break;
439 default:
440 unreachable("Invalid filter");
441 }
442
443 struct blorp_batch batch;
444 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
445
446 for (unsigned r = 0; r < regionCount; r++) {
447 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
448 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
449
450 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
451 src_image->aux_usage, &src);
452 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
453 dst_image->aux_usage, &dst);
454
455 struct anv_format src_format =
456 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
457 src_res->aspectMask, src_image->tiling);
458 struct anv_format dst_format =
459 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
460 dst_res->aspectMask, dst_image->tiling);
461
462 unsigned dst_start, dst_end;
463 if (dst_image->type == VK_IMAGE_TYPE_3D) {
464 assert(dst_res->baseArrayLayer == 0);
465 dst_start = pRegions[r].dstOffsets[0].z;
466 dst_end = pRegions[r].dstOffsets[1].z;
467 } else {
468 dst_start = dst_res->baseArrayLayer;
469 dst_end = dst_start + dst_res->layerCount;
470 }
471
472 unsigned src_start, src_end;
473 if (src_image->type == VK_IMAGE_TYPE_3D) {
474 assert(src_res->baseArrayLayer == 0);
475 src_start = pRegions[r].srcOffsets[0].z;
476 src_end = pRegions[r].srcOffsets[1].z;
477 } else {
478 src_start = src_res->baseArrayLayer;
479 src_end = src_start + src_res->layerCount;
480 }
481
482 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
483 float src_z_step = (float)(src_end + 1 - src_start) /
484 (float)(dst_end + 1 - dst_start);
485
486 if (flip_z) {
487 src_start = src_end;
488 src_z_step *= -1;
489 }
490
491 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
492 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
493 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
494 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
495 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
496
497 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
498 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
499 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
500 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
501 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
502
503 const unsigned num_layers = dst_end - dst_start;
504 for (unsigned i = 0; i < num_layers; i++) {
505 unsigned dst_z = dst_start + i;
506 unsigned src_z = src_start + i * src_z_step;
507
508 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
509 src_format.isl_format, src_format.swizzle,
510 &dst, dst_res->mipLevel, dst_z,
511 dst_format.isl_format,
512 anv_swizzle_for_render(dst_format.swizzle),
513 src_x0, src_y0, src_x1, src_y1,
514 dst_x0, dst_y0, dst_x1, dst_y1,
515 gl_filter, flip_x, flip_y);
516 }
517
518 }
519
520 blorp_batch_finish(&batch);
521 }
522
523 static enum isl_format
524 isl_format_for_size(unsigned size_B)
525 {
526 switch (size_B) {
527 case 1: return ISL_FORMAT_R8_UINT;
528 case 2: return ISL_FORMAT_R8G8_UINT;
529 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
530 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
531 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
532 default:
533 unreachable("Not a power-of-two format size");
534 }
535 }
536
537 static void
538 do_buffer_copy(struct blorp_batch *batch,
539 struct anv_bo *src, uint64_t src_offset,
540 struct anv_bo *dst, uint64_t dst_offset,
541 int width, int height, int block_size)
542 {
543 struct anv_device *device = batch->blorp->driver_ctx;
544
545 /* The actual format we pick doesn't matter as blorp will throw it away.
546 * The only thing that actually matters is the size.
547 */
548 enum isl_format format = isl_format_for_size(block_size);
549
550 struct isl_surf surf;
551 isl_surf_init(&device->isl_dev, &surf,
552 .dim = ISL_SURF_DIM_2D,
553 .format = format,
554 .width = width,
555 .height = height,
556 .depth = 1,
557 .levels = 1,
558 .array_len = 1,
559 .samples = 1,
560 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
561 ISL_SURF_USAGE_RENDER_TARGET_BIT,
562 .tiling_flags = ISL_TILING_LINEAR_BIT);
563 assert(surf.row_pitch == width * block_size);
564
565 struct blorp_surf src_blorp_surf = {
566 .surf = &surf,
567 .addr = {
568 .buffer = src,
569 .offset = src_offset,
570 },
571 };
572
573 struct blorp_surf dst_blorp_surf = {
574 .surf = &surf,
575 .addr = {
576 .buffer = dst,
577 .offset = dst_offset,
578 },
579 };
580
581 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
582 0, 0, 0, 0, width, height);
583 }
584
585 /**
586 * Returns the greatest common divisor of a and b that is a power of two.
587 */
588 static inline uint64_t
589 gcd_pow2_u64(uint64_t a, uint64_t b)
590 {
591 assert(a > 0 || b > 0);
592
593 unsigned a_log2 = ffsll(a) - 1;
594 unsigned b_log2 = ffsll(b) - 1;
595
596 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
597 * case, the MIN2() will take the other one. If both are 0 then we will
598 * hit the assert above.
599 */
600 return 1 << MIN2(a_log2, b_log2);
601 }
602
603 /* This is maximum possible width/height our HW can handle */
604 #define MAX_SURFACE_DIM (1ull << 14)
605
606 void anv_CmdCopyBuffer(
607 VkCommandBuffer commandBuffer,
608 VkBuffer srcBuffer,
609 VkBuffer dstBuffer,
610 uint32_t regionCount,
611 const VkBufferCopy* pRegions)
612 {
613 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
614 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
615 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
616
617 struct blorp_batch batch;
618 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
619
620 for (unsigned r = 0; r < regionCount; r++) {
621 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
622 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
623 uint64_t copy_size = pRegions[r].size;
624
625 /* First, we compute the biggest format that can be used with the
626 * given offsets and size.
627 */
628 int bs = 16;
629 bs = gcd_pow2_u64(bs, src_offset);
630 bs = gcd_pow2_u64(bs, dst_offset);
631 bs = gcd_pow2_u64(bs, pRegions[r].size);
632
633 /* First, we make a bunch of max-sized copies */
634 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
635 while (copy_size >= max_copy_size) {
636 do_buffer_copy(&batch, src_buffer->bo, src_offset,
637 dst_buffer->bo, dst_offset,
638 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
639 copy_size -= max_copy_size;
640 src_offset += max_copy_size;
641 dst_offset += max_copy_size;
642 }
643
644 /* Now make a max-width copy */
645 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
646 assert(height < MAX_SURFACE_DIM);
647 if (height != 0) {
648 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
649 do_buffer_copy(&batch, src_buffer->bo, src_offset,
650 dst_buffer->bo, dst_offset,
651 MAX_SURFACE_DIM, height, bs);
652 copy_size -= rect_copy_size;
653 src_offset += rect_copy_size;
654 dst_offset += rect_copy_size;
655 }
656
657 /* Finally, make a small copy to finish it off */
658 if (copy_size != 0) {
659 do_buffer_copy(&batch, src_buffer->bo, src_offset,
660 dst_buffer->bo, dst_offset,
661 copy_size / bs, 1, bs);
662 }
663 }
664
665 blorp_batch_finish(&batch);
666 }
667
668 void anv_CmdUpdateBuffer(
669 VkCommandBuffer commandBuffer,
670 VkBuffer dstBuffer,
671 VkDeviceSize dstOffset,
672 VkDeviceSize dataSize,
673 const void* pData)
674 {
675 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
676 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
677
678 struct blorp_batch batch;
679 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
680
681 /* We can't quite grab a full block because the state stream needs a
682 * little data at the top to build its linked list.
683 */
684 const uint32_t max_update_size =
685 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
686
687 assert(max_update_size < MAX_SURFACE_DIM * 4);
688
689 while (dataSize) {
690 const uint32_t copy_size = MIN2(dataSize, max_update_size);
691
692 struct anv_state tmp_data =
693 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
694
695 memcpy(tmp_data.map, pData, copy_size);
696
697 int bs = 16;
698 bs = gcd_pow2_u64(bs, dstOffset);
699 bs = gcd_pow2_u64(bs, copy_size);
700
701 do_buffer_copy(&batch,
702 &cmd_buffer->device->dynamic_state_block_pool.bo,
703 tmp_data.offset,
704 dst_buffer->bo, dst_buffer->offset + dstOffset,
705 copy_size / bs, 1, bs);
706
707 dataSize -= copy_size;
708 dstOffset += copy_size;
709 pData = (void *)pData + copy_size;
710 }
711
712 blorp_batch_finish(&batch);
713 }
714
715 void anv_CmdFillBuffer(
716 VkCommandBuffer commandBuffer,
717 VkBuffer dstBuffer,
718 VkDeviceSize dstOffset,
719 VkDeviceSize fillSize,
720 uint32_t data)
721 {
722 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
723 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
724 struct blorp_surf surf;
725 struct isl_surf isl_surf;
726
727 struct blorp_batch batch;
728 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
729
730 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
731
732 /* From the Vulkan spec:
733 *
734 * "size is the number of bytes to fill, and must be either a multiple
735 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
736 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
737 * buffer is not a multiple of 4, then the nearest smaller multiple is
738 * used."
739 */
740 fillSize &= ~3ull;
741
742 /* First, we compute the biggest format that can be used with the
743 * given offsets and size.
744 */
745 int bs = 16;
746 bs = gcd_pow2_u64(bs, dstOffset);
747 bs = gcd_pow2_u64(bs, fillSize);
748 enum isl_format isl_format = isl_format_for_size(bs);
749
750 union isl_color_value color = {
751 .u32 = { data, data, data, data },
752 };
753
754 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
755 while (fillSize >= max_fill_size) {
756 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
757 dst_buffer, dstOffset,
758 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
759 MAX_SURFACE_DIM * bs, isl_format,
760 &surf, &isl_surf);
761
762 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
763 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
764 color, NULL);
765 fillSize -= max_fill_size;
766 dstOffset += max_fill_size;
767 }
768
769 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
770 assert(height < MAX_SURFACE_DIM);
771 if (height != 0) {
772 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
773 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
774 dst_buffer, dstOffset,
775 MAX_SURFACE_DIM, height,
776 MAX_SURFACE_DIM * bs, isl_format,
777 &surf, &isl_surf);
778
779 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
780 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
781 color, NULL);
782 fillSize -= rect_fill_size;
783 dstOffset += rect_fill_size;
784 }
785
786 if (fillSize != 0) {
787 const uint32_t width = fillSize / bs;
788 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
789 dst_buffer, dstOffset,
790 width, 1,
791 width * bs, isl_format,
792 &surf, &isl_surf);
793
794 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
795 0, 0, 1, 0, 0, width, 1,
796 color, NULL);
797 }
798
799 blorp_batch_finish(&batch);
800 }
801
802 void anv_CmdClearColorImage(
803 VkCommandBuffer commandBuffer,
804 VkImage _image,
805 VkImageLayout imageLayout,
806 const VkClearColorValue* pColor,
807 uint32_t rangeCount,
808 const VkImageSubresourceRange* pRanges)
809 {
810 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
811 ANV_FROM_HANDLE(anv_image, image, _image);
812
813 static const bool color_write_disable[4] = { false, false, false, false };
814
815 struct blorp_batch batch;
816 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
817
818 struct blorp_surf surf;
819 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
820 image->aux_usage, &surf);
821
822 for (unsigned r = 0; r < rangeCount; r++) {
823 if (pRanges[r].aspectMask == 0)
824 continue;
825
826 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
827
828 struct anv_format src_format =
829 anv_get_format(&cmd_buffer->device->info, image->vk_format,
830 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
831
832 unsigned base_layer = pRanges[r].baseArrayLayer;
833 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
834
835 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
836 const unsigned level = pRanges[r].baseMipLevel + i;
837 const unsigned level_width = anv_minify(image->extent.width, level);
838 const unsigned level_height = anv_minify(image->extent.height, level);
839
840 if (image->type == VK_IMAGE_TYPE_3D) {
841 base_layer = 0;
842 layer_count = anv_minify(image->extent.depth, level);
843 }
844
845 blorp_clear(&batch, &surf,
846 src_format.isl_format, src_format.swizzle,
847 level, base_layer, layer_count,
848 0, 0, level_width, level_height,
849 vk_to_isl_color(*pColor), color_write_disable);
850 }
851 }
852
853 blorp_batch_finish(&batch);
854 }
855
856 void anv_CmdClearDepthStencilImage(
857 VkCommandBuffer commandBuffer,
858 VkImage image_h,
859 VkImageLayout imageLayout,
860 const VkClearDepthStencilValue* pDepthStencil,
861 uint32_t rangeCount,
862 const VkImageSubresourceRange* pRanges)
863 {
864 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
865 ANV_FROM_HANDLE(anv_image, image, image_h);
866
867 struct blorp_batch batch;
868 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
869
870 struct blorp_surf depth, stencil;
871 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
872 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
873 ISL_AUX_USAGE_NONE, &depth);
874 } else {
875 memset(&depth, 0, sizeof(depth));
876 }
877
878 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
879 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
880 ISL_AUX_USAGE_NONE, &stencil);
881 } else {
882 memset(&stencil, 0, sizeof(stencil));
883 }
884
885 for (unsigned r = 0; r < rangeCount; r++) {
886 if (pRanges[r].aspectMask == 0)
887 continue;
888
889 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
890 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
891
892 unsigned base_layer = pRanges[r].baseArrayLayer;
893 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
894
895 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
896 const unsigned level = pRanges[r].baseMipLevel + i;
897 const unsigned level_width = anv_minify(image->extent.width, level);
898 const unsigned level_height = anv_minify(image->extent.height, level);
899
900 if (image->type == VK_IMAGE_TYPE_3D)
901 layer_count = anv_minify(image->extent.depth, level);
902
903 blorp_clear_depth_stencil(&batch, &depth, &stencil,
904 level, base_layer, layer_count,
905 0, 0, level_width, level_height,
906 clear_depth, pDepthStencil->depth,
907 clear_stencil ? 0xff : 0,
908 pDepthStencil->stencil);
909 }
910 }
911
912 blorp_batch_finish(&batch);
913 }
914
915 VkResult
916 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
917 uint32_t num_entries,
918 uint32_t *state_offset,
919 struct anv_state *bt_state)
920 {
921 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
922 state_offset);
923 if (bt_state->map == NULL) {
924 /* We ran out of space. Grab a new binding table block. */
925 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
926 if (result != VK_SUCCESS)
927 return result;
928
929 /* Re-emit state base addresses so we get the new surface state base
930 * address before we start emitting binding tables etc.
931 */
932 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
933
934 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
935 state_offset);
936 assert(bt_state->map != NULL);
937 }
938
939 return VK_SUCCESS;
940 }
941
942 static VkResult
943 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
944 struct anv_state surface_state,
945 uint32_t *bt_offset)
946 {
947 uint32_t state_offset;
948 struct anv_state bt_state;
949
950 VkResult result =
951 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
952 &bt_state);
953 if (result != VK_SUCCESS)
954 return result;
955
956 uint32_t *bt_map = bt_state.map;
957 bt_map[0] = surface_state.offset + state_offset;
958
959 *bt_offset = bt_state.offset;
960 return VK_SUCCESS;
961 }
962
963 static void
964 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
965 struct blorp_batch *batch,
966 const VkClearAttachment *attachment,
967 uint32_t rectCount, const VkClearRect *pRects)
968 {
969 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
970 const uint32_t color_att = attachment->colorAttachment;
971 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
972
973 if (att_idx == VK_ATTACHMENT_UNUSED)
974 return;
975
976 struct anv_render_pass_attachment *pass_att =
977 &cmd_buffer->state.pass->attachments[att_idx];
978 struct anv_attachment_state *att_state =
979 &cmd_buffer->state.attachments[att_idx];
980
981 uint32_t binding_table;
982 VkResult result =
983 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state,
984 &binding_table);
985 if (result != VK_SUCCESS)
986 return;
987
988 union isl_color_value clear_color =
989 vk_to_isl_color(attachment->clearValue.color);
990
991 for (uint32_t r = 0; r < rectCount; ++r) {
992 const VkOffset2D offset = pRects[r].rect.offset;
993 const VkExtent2D extent = pRects[r].rect.extent;
994 blorp_clear_attachments(batch, binding_table,
995 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
996 pRects[r].baseArrayLayer,
997 pRects[r].layerCount,
998 offset.x, offset.y,
999 offset.x + extent.width, offset.y + extent.height,
1000 true, clear_color, false, 0.0f, 0, 0);
1001 }
1002 }
1003
1004 static void
1005 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1006 struct blorp_batch *batch,
1007 const VkClearAttachment *attachment,
1008 uint32_t rectCount, const VkClearRect *pRects)
1009 {
1010 static const union isl_color_value color_value = { .u32 = { 0, } };
1011 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1012 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1013
1014 if (att_idx == VK_ATTACHMENT_UNUSED)
1015 return;
1016
1017 struct anv_render_pass_attachment *pass_att =
1018 &cmd_buffer->state.pass->attachments[att_idx];
1019
1020 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1021 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1022
1023 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1024 if (clear_depth) {
1025 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1026 pass_att->format,
1027 VK_IMAGE_ASPECT_DEPTH_BIT,
1028 VK_IMAGE_TILING_OPTIMAL);
1029 }
1030
1031 uint32_t binding_table;
1032 VkResult result =
1033 binding_table_for_surface_state(cmd_buffer,
1034 cmd_buffer->state.null_surface_state,
1035 &binding_table);
1036 if (result != VK_SUCCESS)
1037 return;
1038
1039 for (uint32_t r = 0; r < rectCount; ++r) {
1040 const VkOffset2D offset = pRects[r].rect.offset;
1041 const VkExtent2D extent = pRects[r].rect.extent;
1042 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1043 blorp_clear_attachments(batch, binding_table,
1044 depth_format, pass_att->samples,
1045 pRects[r].baseArrayLayer,
1046 pRects[r].layerCount,
1047 offset.x, offset.y,
1048 offset.x + extent.width, offset.y + extent.height,
1049 false, color_value,
1050 clear_depth, value.depth,
1051 clear_stencil ? 0xff : 0, value.stencil);
1052 }
1053 }
1054
1055 void anv_CmdClearAttachments(
1056 VkCommandBuffer commandBuffer,
1057 uint32_t attachmentCount,
1058 const VkClearAttachment* pAttachments,
1059 uint32_t rectCount,
1060 const VkClearRect* pRects)
1061 {
1062 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1063
1064 /* Because this gets called within a render pass, we tell blorp not to
1065 * trash our depth and stencil buffers.
1066 */
1067 struct blorp_batch batch;
1068 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1069 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1070
1071 for (uint32_t a = 0; a < attachmentCount; ++a) {
1072 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1073 clear_color_attachment(cmd_buffer, &batch,
1074 &pAttachments[a],
1075 rectCount, pRects);
1076 } else {
1077 clear_depth_stencil_attachment(cmd_buffer, &batch,
1078 &pAttachments[a],
1079 rectCount, pRects);
1080 }
1081 }
1082
1083 blorp_batch_finish(&batch);
1084 }
1085
1086 enum subpass_stage {
1087 SUBPASS_STAGE_LOAD,
1088 SUBPASS_STAGE_DRAW,
1089 SUBPASS_STAGE_RESOLVE,
1090 };
1091
1092 static bool
1093 attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
1094 struct anv_render_pass_attachment *att,
1095 enum subpass_stage stage)
1096 {
1097 struct anv_render_pass *pass = cmd_buffer->state.pass;
1098 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1099
1100 /* We handle this subpass specially based on the current stage */
1101 enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
1102 switch (stage) {
1103 case SUBPASS_STAGE_LOAD:
1104 if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
1105 return true;
1106 break;
1107
1108 case SUBPASS_STAGE_DRAW:
1109 if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
1110 return true;
1111 break;
1112
1113 default:
1114 break;
1115 }
1116
1117 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1118 usage = att->subpass_usage[s];
1119
1120 /* If this attachment is going to be used as an input in this or any
1121 * future subpass, then we need to flush its cache and invalidate the
1122 * texture cache.
1123 */
1124 if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1125 return true;
1126
1127 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1128 /* We found another subpass that draws to this attachment. We'll
1129 * wait to resolve until then.
1130 */
1131 return false;
1132 }
1133 }
1134
1135 return false;
1136 }
1137
1138 static void
1139 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
1140 enum subpass_stage stage)
1141 {
1142 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1143 struct anv_render_pass *pass = cmd_buffer->state.pass;
1144
1145 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1146 uint32_t att = subpass->color_attachments[i].attachment;
1147 assert(att < pass->attachment_count);
1148 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1149 cmd_buffer->state.pending_pipe_bits |=
1150 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1151 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1152 }
1153 }
1154
1155 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1156 uint32_t att = subpass->depth_stencil_attachment.attachment;
1157 assert(att < pass->attachment_count);
1158 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1159 cmd_buffer->state.pending_pipe_bits |=
1160 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1161 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1162 }
1163 }
1164 }
1165
1166 static bool
1167 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1168 {
1169 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1170 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1171
1172 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1173 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1174 if (cmd_state->attachments[a].pending_clear_aspects) {
1175 return true;
1176 }
1177 }
1178
1179 if (ds != VK_ATTACHMENT_UNUSED &&
1180 cmd_state->attachments[ds].pending_clear_aspects) {
1181 return true;
1182 }
1183
1184 return false;
1185 }
1186
1187 void
1188 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1189 {
1190 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1191 const VkRect2D render_area = cmd_buffer->state.render_area;
1192
1193
1194 if (!subpass_needs_clear(cmd_buffer))
1195 return;
1196
1197 /* Because this gets called within a render pass, we tell blorp not to
1198 * trash our depth and stencil buffers.
1199 */
1200 struct blorp_batch batch;
1201 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1202 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1203
1204 VkClearRect clear_rect = {
1205 .rect = cmd_buffer->state.render_area,
1206 .baseArrayLayer = 0,
1207 .layerCount = cmd_buffer->state.framebuffer->layers,
1208 };
1209
1210 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1211 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1212 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1213 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1214
1215 if (!att_state->pending_clear_aspects)
1216 continue;
1217
1218 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1219
1220 struct anv_image_view *iview = fb->attachments[a];
1221 const struct anv_image *image = iview->image;
1222 struct blorp_surf surf;
1223 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1224 att_state->aux_usage, &surf);
1225
1226 if (att_state->fast_clear) {
1227 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1228
1229 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1230 *
1231 * "After Render target fast clear, pipe-control with color cache
1232 * write-flush must be issued before sending any DRAW commands on
1233 * that render target."
1234 *
1235 * This comment is a bit cryptic and doesn't really tell you what's
1236 * going or what's really needed. It appears that fast clear ops are
1237 * not properly synchronized with other drawing. This means that we
1238 * cannot have a fast clear operation in the pipe at the same time as
1239 * other regular drawing operations. We need to use a PIPE_CONTROL
1240 * to ensure that the contents of the previous draw hit the render
1241 * target before we resolve and then use a second PIPE_CONTROL after
1242 * the resolve to ensure that it is completed before any additional
1243 * drawing occurs.
1244 */
1245 cmd_buffer->state.pending_pipe_bits |=
1246 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1247
1248 blorp_fast_clear(&batch, &surf, iview->isl.format,
1249 iview->isl.base_level,
1250 iview->isl.base_array_layer, fb->layers,
1251 render_area.offset.x, render_area.offset.y,
1252 render_area.offset.x + render_area.extent.width,
1253 render_area.offset.y + render_area.extent.height);
1254
1255 cmd_buffer->state.pending_pipe_bits |=
1256 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1257 } else {
1258 blorp_clear(&batch, &surf, iview->isl.format,
1259 anv_swizzle_for_render(iview->isl.swizzle),
1260 iview->isl.base_level,
1261 iview->isl.base_array_layer, fb->layers,
1262 render_area.offset.x, render_area.offset.y,
1263 render_area.offset.x + render_area.extent.width,
1264 render_area.offset.y + render_area.extent.height,
1265 vk_to_isl_color(att_state->clear_value.color), NULL);
1266 }
1267
1268 att_state->pending_clear_aspects = 0;
1269 }
1270
1271 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1272
1273 if (ds != VK_ATTACHMENT_UNUSED &&
1274 cmd_state->attachments[ds].pending_clear_aspects) {
1275
1276 VkClearAttachment clear_att = {
1277 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1278 .clearValue = cmd_state->attachments[ds].clear_value,
1279 };
1280
1281
1282 const uint8_t gen = cmd_buffer->device->info.gen;
1283 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1284 ISL_AUX_USAGE_HIZ;
1285 const struct anv_image_view *iview = fb->attachments[ds];
1286
1287 if (clear_with_hiz) {
1288 const bool clear_depth = clear_att.aspectMask &
1289 VK_IMAGE_ASPECT_DEPTH_BIT;
1290 const bool clear_stencil = clear_att.aspectMask &
1291 VK_IMAGE_ASPECT_STENCIL_BIT;
1292
1293 /* Check against restrictions for depth buffer clearing. A great GPU
1294 * performance benefit isn't expected when using the HZ sequence for
1295 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1296 * a stencil clear in addition to using the BLORP-fallback for depth.
1297 */
1298 if (clear_depth) {
1299 if (!blorp_can_hiz_clear_depth(gen, iview->isl.format,
1300 iview->image->samples,
1301 render_area.offset.x,
1302 render_area.offset.y,
1303 render_area.offset.x +
1304 render_area.extent.width,
1305 render_area.offset.y +
1306 render_area.extent.height)) {
1307 clear_with_hiz = false;
1308 } else if (clear_att.clearValue.depthStencil.depth !=
1309 ANV_HZ_FC_VAL) {
1310 /* Don't enable fast depth clears for any color not equal to
1311 * ANV_HZ_FC_VAL.
1312 */
1313 clear_with_hiz = false;
1314 } else if (gen == 8 &&
1315 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1316 iview->aspect_mask,
1317 iview->image->samples)) {
1318 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1319 * fast-cleared portion of a HiZ buffer. Testing has revealed
1320 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1321 * not support this feature at all.
1322 */
1323 clear_with_hiz = false;
1324 }
1325 }
1326
1327 if (clear_with_hiz) {
1328 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1329 render_area.offset.x,
1330 render_area.offset.y,
1331 render_area.offset.x +
1332 render_area.extent.width,
1333 render_area.offset.y +
1334 render_area.extent.height,
1335 clear_depth, clear_stencil,
1336 clear_att.clearValue.
1337 depthStencil.stencil);
1338 }
1339 }
1340
1341 if (!clear_with_hiz) {
1342 clear_depth_stencil_attachment(cmd_buffer, &batch,
1343 &clear_att, 1, &clear_rect);
1344 }
1345
1346 cmd_state->attachments[ds].pending_clear_aspects = 0;
1347 }
1348
1349 blorp_batch_finish(&batch);
1350
1351 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
1352 }
1353
1354 static void
1355 resolve_image(struct blorp_batch *batch,
1356 const struct anv_image *src_image,
1357 uint32_t src_level, uint32_t src_layer,
1358 const struct anv_image *dst_image,
1359 uint32_t dst_level, uint32_t dst_layer,
1360 VkImageAspectFlags aspect_mask,
1361 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1362 uint32_t width, uint32_t height)
1363 {
1364 assert(src_image->type == VK_IMAGE_TYPE_2D);
1365 assert(src_image->samples > 1);
1366 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1367 assert(dst_image->samples == 1);
1368
1369 uint32_t a;
1370 for_each_bit(a, aspect_mask) {
1371 VkImageAspectFlagBits aspect = 1 << a;
1372
1373 struct blorp_surf src_surf, dst_surf;
1374 get_blorp_surf_for_anv_image(src_image, aspect,
1375 src_image->aux_usage, &src_surf);
1376 get_blorp_surf_for_anv_image(dst_image, aspect,
1377 dst_image->aux_usage, &dst_surf);
1378
1379 blorp_blit(batch,
1380 &src_surf, src_level, src_layer,
1381 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1382 &dst_surf, dst_level, dst_layer,
1383 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1384 src_x, src_y, src_x + width, src_y + height,
1385 dst_x, dst_y, dst_x + width, dst_y + height,
1386 0x2600 /* GL_NEAREST */, false, false);
1387 }
1388 }
1389
1390 void anv_CmdResolveImage(
1391 VkCommandBuffer commandBuffer,
1392 VkImage srcImage,
1393 VkImageLayout srcImageLayout,
1394 VkImage dstImage,
1395 VkImageLayout dstImageLayout,
1396 uint32_t regionCount,
1397 const VkImageResolve* pRegions)
1398 {
1399 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1400 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1401 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1402
1403 struct blorp_batch batch;
1404 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1405
1406 for (uint32_t r = 0; r < regionCount; r++) {
1407 assert(pRegions[r].srcSubresource.aspectMask ==
1408 pRegions[r].dstSubresource.aspectMask);
1409 assert(pRegions[r].srcSubresource.layerCount ==
1410 pRegions[r].dstSubresource.layerCount);
1411
1412 const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
1413
1414 for (uint32_t layer = 0; layer < layer_count; layer++) {
1415 resolve_image(&batch,
1416 src_image, pRegions[r].srcSubresource.mipLevel,
1417 pRegions[r].srcSubresource.baseArrayLayer + layer,
1418 dst_image, pRegions[r].dstSubresource.mipLevel,
1419 pRegions[r].dstSubresource.baseArrayLayer + layer,
1420 pRegions[r].dstSubresource.aspectMask,
1421 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1422 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1423 pRegions[r].extent.width, pRegions[r].extent.height);
1424 }
1425 }
1426
1427 blorp_batch_finish(&batch);
1428 }
1429
1430 static void
1431 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
1432 struct blorp_batch *batch,
1433 uint32_t att)
1434 {
1435 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1436 struct anv_attachment_state *att_state =
1437 &cmd_buffer->state.attachments[att];
1438
1439 if (att_state->aux_usage == ISL_AUX_USAGE_NONE ||
1440 att_state->aux_usage == ISL_AUX_USAGE_MCS)
1441 return; /* Nothing to resolve */
1442
1443 assert(att_state->aux_usage == ISL_AUX_USAGE_CCS_E ||
1444 att_state->aux_usage == ISL_AUX_USAGE_CCS_D);
1445
1446 struct anv_render_pass *pass = cmd_buffer->state.pass;
1447 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1448
1449 /* Scan forward to see what all ways this attachment will be used.
1450 * Ideally, we would like to resolve in the same subpass as the last write
1451 * of a particular attachment. That way we only resolve once but it's
1452 * still hot in the cache.
1453 */
1454 bool found_draw = false;
1455 enum anv_subpass_usage usage = 0;
1456 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1457 usage |= pass->attachments[att].subpass_usage[s];
1458
1459 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1460 /* We found another subpass that draws to this attachment. We'll
1461 * wait to resolve until then.
1462 */
1463 found_draw = true;
1464 break;
1465 }
1466 }
1467
1468 struct anv_image_view *iview = fb->attachments[att];
1469 const struct anv_image *image = iview->image;
1470 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1471
1472 enum blorp_fast_clear_op resolve_op = BLORP_FAST_CLEAR_OP_NONE;
1473 if (!found_draw) {
1474 /* This is the last subpass that writes to this attachment so we need to
1475 * resolve here. Ideally, we would like to only resolve if the storeOp
1476 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1477 * that the CCS bits are set to "resolved" because there may be copy or
1478 * blit operations (which may ignore CCS) between now and the next time
1479 * we render and we need to ensure that anything they write will be
1480 * respected in the next render. Unfortunately, the hardware does not
1481 * provide us with any sort of "invalidate" pass that sets the CCS to
1482 * "resolved" without writing to the render target.
1483 */
1484 if (iview->image->aux_usage != ISL_AUX_USAGE_CCS_E) {
1485 /* The image destination surface doesn't support compression outside
1486 * the render pass. We need a full resolve.
1487 */
1488 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1489 } else if (att_state->fast_clear) {
1490 /* We don't know what to do with clear colors outside the render
1491 * pass. We need a partial resolve. Only transparent black is
1492 * built into the surface state object and thus no resolve is
1493 * required for this case.
1494 */
1495 if (att_state->clear_value.color.uint32[0] ||
1496 att_state->clear_value.color.uint32[1] ||
1497 att_state->clear_value.color.uint32[2] ||
1498 att_state->clear_value.color.uint32[3])
1499 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1500 } else {
1501 /* The image "natively" supports all the compression we care about
1502 * and we don't need to resolve at all. If this is the case, we also
1503 * don't need to resolve for any of the input attachment cases below.
1504 */
1505 }
1506 } else if (usage & ANV_SUBPASS_USAGE_INPUT) {
1507 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1508 * can frequently sample from them with no resolves at all.
1509 */
1510 if (att_state->aux_usage != att_state->input_aux_usage) {
1511 assert(att_state->input_aux_usage == ISL_AUX_USAGE_NONE);
1512 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1513 } else if (!att_state->clear_color_is_zero_one) {
1514 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1515 *
1516 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1517 * is fast cleared with non-0/1 clear value, this RT must be
1518 * partially resolved (refer to Partial Resolve operation) before
1519 * binding this surface to Sampler."
1520 */
1521 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1522 }
1523 }
1524
1525 if (resolve_op == BLORP_FAST_CLEAR_OP_NONE)
1526 return;
1527
1528 struct blorp_surf surf;
1529 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1530 att_state->aux_usage, &surf);
1531 if (att_state->fast_clear)
1532 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1533
1534 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1535 *
1536 * "When performing a render target resolve, PIPE_CONTROL with end of
1537 * pipe sync must be delivered."
1538 *
1539 * This comment is a bit cryptic and doesn't really tell you what's going
1540 * or what's really needed. It appears that fast clear ops are not
1541 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1542 * to ensure that the contents of the previous draw hit the render target
1543 * before we resolve and then use a second PIPE_CONTROL after the resolve
1544 * to ensure that it is completed before any additional drawing occurs.
1545 */
1546 cmd_buffer->state.pending_pipe_bits |=
1547 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1548
1549 for (uint32_t layer = 0; layer < fb->layers; layer++) {
1550 blorp_ccs_resolve(batch, &surf,
1551 iview->isl.base_level,
1552 iview->isl.base_array_layer + layer,
1553 iview->isl.format, resolve_op);
1554 }
1555
1556 cmd_buffer->state.pending_pipe_bits |=
1557 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1558
1559 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1560 att_state->fast_clear = false;
1561 if (att_state->aux_usage == ISL_AUX_USAGE_CCS_D)
1562 att_state->aux_usage = ISL_AUX_USAGE_NONE;
1563 }
1564
1565 void
1566 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1567 {
1568 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1569 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1570
1571
1572 struct blorp_batch batch;
1573 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1574
1575 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1576 ccs_resolve_attachment(cmd_buffer, &batch,
1577 subpass->color_attachments[i].attachment);
1578 }
1579
1580 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
1581
1582 if (subpass->has_resolve) {
1583 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1584 uint32_t src_att = subpass->color_attachments[i].attachment;
1585 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1586
1587 if (dst_att == VK_ATTACHMENT_UNUSED)
1588 continue;
1589
1590 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1591 /* From the Vulkan 1.0 spec:
1592 *
1593 * If the first use of an attachment in a render pass is as a
1594 * resolve attachment, then the loadOp is effectively ignored
1595 * as the resolve is guaranteed to overwrite all pixels in the
1596 * render area.
1597 */
1598 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1599 }
1600
1601 struct anv_image_view *src_iview = fb->attachments[src_att];
1602 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1603
1604 const VkRect2D render_area = cmd_buffer->state.render_area;
1605
1606 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1607 resolve_image(&batch, src_iview->image,
1608 src_iview->isl.base_level,
1609 src_iview->isl.base_array_layer,
1610 dst_iview->image,
1611 dst_iview->isl.base_level,
1612 dst_iview->isl.base_array_layer,
1613 src_iview->aspect_mask,
1614 render_area.offset.x, render_area.offset.y,
1615 render_area.offset.x, render_area.offset.y,
1616 render_area.extent.width, render_area.extent.height);
1617
1618 ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
1619 }
1620
1621 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
1622 }
1623
1624 blorp_batch_finish(&batch);
1625 }
1626
1627 void
1628 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1629 const struct anv_image *image,
1630 enum blorp_hiz_op op)
1631 {
1632 assert(image);
1633
1634 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1635 * don't perform such a resolve on gens that don't support it.
1636 */
1637 if (cmd_buffer->device->info.gen < 8 ||
1638 image->aux_usage != ISL_AUX_USAGE_HIZ)
1639 return;
1640
1641 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1642 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1643
1644 struct blorp_batch batch;
1645 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1646
1647 struct blorp_surf surf;
1648 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1649 ISL_AUX_USAGE_NONE, &surf);
1650
1651 /* Manually add the aux HiZ surf */
1652 surf.aux_surf = &image->aux_surface.isl,
1653 surf.aux_addr = (struct blorp_address) {
1654 .buffer = image->bo,
1655 .offset = image->offset + image->aux_surface.offset,
1656 };
1657 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1658
1659 surf.clear_color.u32[0] = (uint32_t) ANV_HZ_FC_VAL;
1660
1661 blorp_gen6_hiz_op(&batch, &surf, 0, 0, op);
1662 blorp_batch_finish(&batch);
1663 }