16f1692ff53cf27231f457d1b44daf0e5600fc25
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.mocs.tex = device->default_mocs;
96 device->blorp.mocs.rb = device->default_mocs;
97 device->blorp.mocs.vb = device->default_mocs;
98 device->blorp.lookup_shader = lookup_blorp_shader;
99 device->blorp.upload_shader = upload_blorp_shader;
100 switch (device->info.gen) {
101 case 7:
102 if (device->info.is_haswell) {
103 device->blorp.exec = gen75_blorp_exec;
104 } else {
105 device->blorp.exec = gen7_blorp_exec;
106 }
107 break;
108 case 8:
109 device->blorp.exec = gen8_blorp_exec;
110 break;
111 case 9:
112 device->blorp.exec = gen9_blorp_exec;
113 break;
114 default:
115 unreachable("Unknown hardware generation");
116 }
117 }
118
119 void
120 anv_device_finish_blorp(struct anv_device *device)
121 {
122 blorp_finish(&device->blorp);
123 anv_pipeline_cache_finish(&device->blorp_shader_cache);
124 }
125
126 static void
127 get_blorp_surf_for_anv_buffer(struct anv_device *device,
128 struct anv_buffer *buffer, uint64_t offset,
129 uint32_t width, uint32_t height,
130 uint32_t row_pitch, enum isl_format format,
131 struct blorp_surf *blorp_surf,
132 struct isl_surf *isl_surf)
133 {
134 const struct isl_format_layout *fmtl =
135 isl_format_get_layout(format);
136 bool ok UNUSED;
137
138 /* ASTC is the only format which doesn't support linear layouts.
139 * Create an equivalently sized surface with ISL to get around this.
140 */
141 if (fmtl->txc == ISL_TXC_ASTC) {
142 /* Use an equivalently sized format */
143 format = ISL_FORMAT_R32G32B32A32_UINT;
144 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
145
146 /* Shrink the dimensions for the new format */
147 width = DIV_ROUND_UP(width, fmtl->bw);
148 height = DIV_ROUND_UP(height, fmtl->bh);
149 }
150
151 *blorp_surf = (struct blorp_surf) {
152 .surf = isl_surf,
153 .addr = {
154 .buffer = buffer->bo,
155 .offset = buffer->offset + offset,
156 },
157 };
158
159 ok = isl_surf_init(&device->isl_dev, isl_surf,
160 .dim = ISL_SURF_DIM_2D,
161 .format = format,
162 .width = width,
163 .height = height,
164 .depth = 1,
165 .levels = 1,
166 .array_len = 1,
167 .samples = 1,
168 .row_pitch = row_pitch,
169 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
170 ISL_SURF_USAGE_RENDER_TARGET_BIT,
171 .tiling_flags = ISL_TILING_LINEAR_BIT);
172 assert(ok);
173 }
174
175 static void
176 get_blorp_surf_for_anv_image(const struct anv_image *image,
177 VkImageAspectFlags aspect,
178 enum isl_aux_usage aux_usage,
179 struct blorp_surf *blorp_surf)
180 {
181 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
182 aux_usage == ISL_AUX_USAGE_HIZ)
183 aux_usage = ISL_AUX_USAGE_NONE;
184
185 const struct anv_surface *surface =
186 anv_image_get_surface_for_aspect_mask(image, aspect);
187
188 *blorp_surf = (struct blorp_surf) {
189 .surf = &surface->isl,
190 .addr = {
191 .buffer = image->bo,
192 .offset = image->offset + surface->offset,
193 },
194 };
195
196 if (aux_usage != ISL_AUX_USAGE_NONE) {
197 blorp_surf->aux_surf = &image->aux_surface.isl,
198 blorp_surf->aux_addr = (struct blorp_address) {
199 .buffer = image->bo,
200 .offset = image->offset + image->aux_surface.offset,
201 };
202 blorp_surf->aux_usage = aux_usage;
203 }
204 }
205
206 void anv_CmdCopyImage(
207 VkCommandBuffer commandBuffer,
208 VkImage srcImage,
209 VkImageLayout srcImageLayout,
210 VkImage dstImage,
211 VkImageLayout dstImageLayout,
212 uint32_t regionCount,
213 const VkImageCopy* pRegions)
214 {
215 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
216 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
217 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
218
219 struct blorp_batch batch;
220 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
221
222 for (unsigned r = 0; r < regionCount; r++) {
223 VkOffset3D srcOffset =
224 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
225 VkOffset3D dstOffset =
226 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
227 VkExtent3D extent =
228 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
229
230 unsigned dst_base_layer, layer_count;
231 if (dst_image->type == VK_IMAGE_TYPE_3D) {
232 dst_base_layer = pRegions[r].dstOffset.z;
233 layer_count = pRegions[r].extent.depth;
234 } else {
235 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
236 layer_count = pRegions[r].dstSubresource.layerCount;
237 }
238
239 unsigned src_base_layer;
240 if (src_image->type == VK_IMAGE_TYPE_3D) {
241 src_base_layer = pRegions[r].srcOffset.z;
242 } else {
243 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
244 assert(pRegions[r].srcSubresource.layerCount == layer_count);
245 }
246
247 assert(pRegions[r].srcSubresource.aspectMask ==
248 pRegions[r].dstSubresource.aspectMask);
249
250 uint32_t a;
251 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
252 VkImageAspectFlagBits aspect = (1 << a);
253
254 struct blorp_surf src_surf, dst_surf;
255 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
256 &src_surf);
257 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
258 &dst_surf);
259
260 for (unsigned i = 0; i < layer_count; i++) {
261 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
262 src_base_layer + i,
263 &dst_surf, pRegions[r].dstSubresource.mipLevel,
264 dst_base_layer + i,
265 srcOffset.x, srcOffset.y,
266 dstOffset.x, dstOffset.y,
267 extent.width, extent.height);
268 }
269 }
270 }
271
272 blorp_batch_finish(&batch);
273 }
274
275 static void
276 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
277 struct anv_buffer *anv_buffer,
278 struct anv_image *anv_image,
279 uint32_t regionCount,
280 const VkBufferImageCopy* pRegions,
281 bool buffer_to_image)
282 {
283 struct blorp_batch batch;
284 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
285
286 struct {
287 struct blorp_surf surf;
288 uint32_t level;
289 VkOffset3D offset;
290 } image, buffer, *src, *dst;
291
292 buffer.level = 0;
293 buffer.offset = (VkOffset3D) { 0, 0, 0 };
294
295 if (buffer_to_image) {
296 src = &buffer;
297 dst = &image;
298 } else {
299 src = &image;
300 dst = &buffer;
301 }
302
303 for (unsigned r = 0; r < regionCount; r++) {
304 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
305
306 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
307 &image.surf);
308 image.offset =
309 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
310 image.level = pRegions[r].imageSubresource.mipLevel;
311
312 VkExtent3D extent =
313 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
314 if (anv_image->type != VK_IMAGE_TYPE_3D) {
315 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
316 extent.depth = pRegions[r].imageSubresource.layerCount;
317 }
318
319 const enum isl_format buffer_format =
320 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
321 aspect, VK_IMAGE_TILING_LINEAR);
322
323 const VkExtent3D bufferImageExtent = {
324 .width = pRegions[r].bufferRowLength ?
325 pRegions[r].bufferRowLength : extent.width,
326 .height = pRegions[r].bufferImageHeight ?
327 pRegions[r].bufferImageHeight : extent.height,
328 };
329
330 const struct isl_format_layout *buffer_fmtl =
331 isl_format_get_layout(buffer_format);
332
333 const uint32_t buffer_row_pitch =
334 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
335 (buffer_fmtl->bpb / 8);
336
337 const uint32_t buffer_layer_stride =
338 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
339 buffer_row_pitch;
340
341 struct isl_surf buffer_isl_surf;
342 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
343 anv_buffer, pRegions[r].bufferOffset,
344 extent.width, extent.height,
345 buffer_row_pitch, buffer_format,
346 &buffer.surf, &buffer_isl_surf);
347
348 for (unsigned z = 0; z < extent.depth; z++) {
349 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
350 &dst->surf, dst->level, dst->offset.z,
351 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
352 extent.width, extent.height);
353
354 image.offset.z++;
355 buffer.surf.addr.offset += buffer_layer_stride;
356 }
357 }
358
359 blorp_batch_finish(&batch);
360 }
361
362 void anv_CmdCopyBufferToImage(
363 VkCommandBuffer commandBuffer,
364 VkBuffer srcBuffer,
365 VkImage dstImage,
366 VkImageLayout dstImageLayout,
367 uint32_t regionCount,
368 const VkBufferImageCopy* pRegions)
369 {
370 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
371 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
372 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
373
374 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
375 regionCount, pRegions, true);
376 }
377
378 void anv_CmdCopyImageToBuffer(
379 VkCommandBuffer commandBuffer,
380 VkImage srcImage,
381 VkImageLayout srcImageLayout,
382 VkBuffer dstBuffer,
383 uint32_t regionCount,
384 const VkBufferImageCopy* pRegions)
385 {
386 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
387 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
388 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
389
390 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
391 regionCount, pRegions, false);
392 }
393
394 static bool
395 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
396 {
397 bool flip = false;
398 if (*src0 > *src1) {
399 unsigned tmp = *src0;
400 *src0 = *src1;
401 *src1 = tmp;
402 flip = !flip;
403 }
404
405 if (*dst0 > *dst1) {
406 unsigned tmp = *dst0;
407 *dst0 = *dst1;
408 *dst1 = tmp;
409 flip = !flip;
410 }
411
412 return flip;
413 }
414
415 void anv_CmdBlitImage(
416 VkCommandBuffer commandBuffer,
417 VkImage srcImage,
418 VkImageLayout srcImageLayout,
419 VkImage dstImage,
420 VkImageLayout dstImageLayout,
421 uint32_t regionCount,
422 const VkImageBlit* pRegions,
423 VkFilter filter)
424
425 {
426 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
427 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
428 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
429
430 struct blorp_surf src, dst;
431
432 uint32_t gl_filter;
433 switch (filter) {
434 case VK_FILTER_NEAREST:
435 gl_filter = 0x2600; /* GL_NEAREST */
436 break;
437 case VK_FILTER_LINEAR:
438 gl_filter = 0x2601; /* GL_LINEAR */
439 break;
440 default:
441 unreachable("Invalid filter");
442 }
443
444 struct blorp_batch batch;
445 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
446
447 for (unsigned r = 0; r < regionCount; r++) {
448 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
449 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
450
451 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
452 src_image->aux_usage, &src);
453 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
454 dst_image->aux_usage, &dst);
455
456 struct anv_format src_format =
457 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
458 src_res->aspectMask, src_image->tiling);
459 struct anv_format dst_format =
460 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
461 dst_res->aspectMask, dst_image->tiling);
462
463 unsigned dst_start, dst_end;
464 if (dst_image->type == VK_IMAGE_TYPE_3D) {
465 assert(dst_res->baseArrayLayer == 0);
466 dst_start = pRegions[r].dstOffsets[0].z;
467 dst_end = pRegions[r].dstOffsets[1].z;
468 } else {
469 dst_start = dst_res->baseArrayLayer;
470 dst_end = dst_start + dst_res->layerCount;
471 }
472
473 unsigned src_start, src_end;
474 if (src_image->type == VK_IMAGE_TYPE_3D) {
475 assert(src_res->baseArrayLayer == 0);
476 src_start = pRegions[r].srcOffsets[0].z;
477 src_end = pRegions[r].srcOffsets[1].z;
478 } else {
479 src_start = src_res->baseArrayLayer;
480 src_end = src_start + src_res->layerCount;
481 }
482
483 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
484 float src_z_step = (float)(src_end + 1 - src_start) /
485 (float)(dst_end + 1 - dst_start);
486
487 if (flip_z) {
488 src_start = src_end;
489 src_z_step *= -1;
490 }
491
492 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
493 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
494 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
495 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
496 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
497
498 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
499 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
500 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
501 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
502 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
503
504 const unsigned num_layers = dst_end - dst_start;
505 for (unsigned i = 0; i < num_layers; i++) {
506 unsigned dst_z = dst_start + i;
507 unsigned src_z = src_start + i * src_z_step;
508
509 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
510 src_format.isl_format, src_format.swizzle,
511 &dst, dst_res->mipLevel, dst_z,
512 dst_format.isl_format,
513 anv_swizzle_for_render(dst_format.swizzle),
514 src_x0, src_y0, src_x1, src_y1,
515 dst_x0, dst_y0, dst_x1, dst_y1,
516 gl_filter, flip_x, flip_y);
517 }
518
519 }
520
521 blorp_batch_finish(&batch);
522 }
523
524 static enum isl_format
525 isl_format_for_size(unsigned size_B)
526 {
527 switch (size_B) {
528 case 1: return ISL_FORMAT_R8_UINT;
529 case 2: return ISL_FORMAT_R8G8_UINT;
530 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
531 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
532 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
533 default:
534 unreachable("Not a power-of-two format size");
535 }
536 }
537
538 static void
539 do_buffer_copy(struct blorp_batch *batch,
540 struct anv_bo *src, uint64_t src_offset,
541 struct anv_bo *dst, uint64_t dst_offset,
542 int width, int height, int block_size)
543 {
544 struct anv_device *device = batch->blorp->driver_ctx;
545
546 /* The actual format we pick doesn't matter as blorp will throw it away.
547 * The only thing that actually matters is the size.
548 */
549 enum isl_format format = isl_format_for_size(block_size);
550
551 struct isl_surf surf;
552 isl_surf_init(&device->isl_dev, &surf,
553 .dim = ISL_SURF_DIM_2D,
554 .format = format,
555 .width = width,
556 .height = height,
557 .depth = 1,
558 .levels = 1,
559 .array_len = 1,
560 .samples = 1,
561 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
562 ISL_SURF_USAGE_RENDER_TARGET_BIT,
563 .tiling_flags = ISL_TILING_LINEAR_BIT);
564 assert(surf.row_pitch == width * block_size);
565
566 struct blorp_surf src_blorp_surf = {
567 .surf = &surf,
568 .addr = {
569 .buffer = src,
570 .offset = src_offset,
571 },
572 };
573
574 struct blorp_surf dst_blorp_surf = {
575 .surf = &surf,
576 .addr = {
577 .buffer = dst,
578 .offset = dst_offset,
579 },
580 };
581
582 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
583 0, 0, 0, 0, width, height);
584 }
585
586 /**
587 * Returns the greatest common divisor of a and b that is a power of two.
588 */
589 static inline uint64_t
590 gcd_pow2_u64(uint64_t a, uint64_t b)
591 {
592 assert(a > 0 || b > 0);
593
594 unsigned a_log2 = ffsll(a) - 1;
595 unsigned b_log2 = ffsll(b) - 1;
596
597 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
598 * case, the MIN2() will take the other one. If both are 0 then we will
599 * hit the assert above.
600 */
601 return 1 << MIN2(a_log2, b_log2);
602 }
603
604 /* This is maximum possible width/height our HW can handle */
605 #define MAX_SURFACE_DIM (1ull << 14)
606
607 void anv_CmdCopyBuffer(
608 VkCommandBuffer commandBuffer,
609 VkBuffer srcBuffer,
610 VkBuffer dstBuffer,
611 uint32_t regionCount,
612 const VkBufferCopy* pRegions)
613 {
614 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
615 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
616 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
617
618 struct blorp_batch batch;
619 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
620
621 for (unsigned r = 0; r < regionCount; r++) {
622 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
623 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
624 uint64_t copy_size = pRegions[r].size;
625
626 /* First, we compute the biggest format that can be used with the
627 * given offsets and size.
628 */
629 int bs = 16;
630 bs = gcd_pow2_u64(bs, src_offset);
631 bs = gcd_pow2_u64(bs, dst_offset);
632 bs = gcd_pow2_u64(bs, pRegions[r].size);
633
634 /* First, we make a bunch of max-sized copies */
635 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
636 while (copy_size >= max_copy_size) {
637 do_buffer_copy(&batch, src_buffer->bo, src_offset,
638 dst_buffer->bo, dst_offset,
639 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
640 copy_size -= max_copy_size;
641 src_offset += max_copy_size;
642 dst_offset += max_copy_size;
643 }
644
645 /* Now make a max-width copy */
646 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
647 assert(height < MAX_SURFACE_DIM);
648 if (height != 0) {
649 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
650 do_buffer_copy(&batch, src_buffer->bo, src_offset,
651 dst_buffer->bo, dst_offset,
652 MAX_SURFACE_DIM, height, bs);
653 copy_size -= rect_copy_size;
654 src_offset += rect_copy_size;
655 dst_offset += rect_copy_size;
656 }
657
658 /* Finally, make a small copy to finish it off */
659 if (copy_size != 0) {
660 do_buffer_copy(&batch, src_buffer->bo, src_offset,
661 dst_buffer->bo, dst_offset,
662 copy_size / bs, 1, bs);
663 }
664 }
665
666 blorp_batch_finish(&batch);
667 }
668
669 void anv_CmdUpdateBuffer(
670 VkCommandBuffer commandBuffer,
671 VkBuffer dstBuffer,
672 VkDeviceSize dstOffset,
673 VkDeviceSize dataSize,
674 const void* pData)
675 {
676 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
677 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
678
679 struct blorp_batch batch;
680 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
681
682 /* We can't quite grab a full block because the state stream needs a
683 * little data at the top to build its linked list.
684 */
685 const uint32_t max_update_size =
686 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
687
688 assert(max_update_size < MAX_SURFACE_DIM * 4);
689
690 while (dataSize) {
691 const uint32_t copy_size = MIN2(dataSize, max_update_size);
692
693 struct anv_state tmp_data =
694 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
695
696 memcpy(tmp_data.map, pData, copy_size);
697
698 int bs = 16;
699 bs = gcd_pow2_u64(bs, dstOffset);
700 bs = gcd_pow2_u64(bs, copy_size);
701
702 do_buffer_copy(&batch,
703 &cmd_buffer->device->dynamic_state_block_pool.bo,
704 tmp_data.offset,
705 dst_buffer->bo, dst_buffer->offset + dstOffset,
706 copy_size / bs, 1, bs);
707
708 dataSize -= copy_size;
709 dstOffset += copy_size;
710 pData = (void *)pData + copy_size;
711 }
712
713 blorp_batch_finish(&batch);
714 }
715
716 void anv_CmdFillBuffer(
717 VkCommandBuffer commandBuffer,
718 VkBuffer dstBuffer,
719 VkDeviceSize dstOffset,
720 VkDeviceSize fillSize,
721 uint32_t data)
722 {
723 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
724 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
725 struct blorp_surf surf;
726 struct isl_surf isl_surf;
727
728 struct blorp_batch batch;
729 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
730
731 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
732
733 /* From the Vulkan spec:
734 *
735 * "size is the number of bytes to fill, and must be either a multiple
736 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
737 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
738 * buffer is not a multiple of 4, then the nearest smaller multiple is
739 * used."
740 */
741 fillSize &= ~3ull;
742
743 /* First, we compute the biggest format that can be used with the
744 * given offsets and size.
745 */
746 int bs = 16;
747 bs = gcd_pow2_u64(bs, dstOffset);
748 bs = gcd_pow2_u64(bs, fillSize);
749 enum isl_format isl_format = isl_format_for_size(bs);
750
751 union isl_color_value color = {
752 .u32 = { data, data, data, data },
753 };
754
755 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
756 while (fillSize >= max_fill_size) {
757 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
758 dst_buffer, dstOffset,
759 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
760 MAX_SURFACE_DIM * bs, isl_format,
761 &surf, &isl_surf);
762
763 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
764 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
765 color, NULL);
766 fillSize -= max_fill_size;
767 dstOffset += max_fill_size;
768 }
769
770 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
771 assert(height < MAX_SURFACE_DIM);
772 if (height != 0) {
773 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
774 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
775 dst_buffer, dstOffset,
776 MAX_SURFACE_DIM, height,
777 MAX_SURFACE_DIM * bs, isl_format,
778 &surf, &isl_surf);
779
780 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
781 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
782 color, NULL);
783 fillSize -= rect_fill_size;
784 dstOffset += rect_fill_size;
785 }
786
787 if (fillSize != 0) {
788 const uint32_t width = fillSize / bs;
789 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
790 dst_buffer, dstOffset,
791 width, 1,
792 width * bs, isl_format,
793 &surf, &isl_surf);
794
795 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
796 0, 0, 1, 0, 0, width, 1,
797 color, NULL);
798 }
799
800 blorp_batch_finish(&batch);
801 }
802
803 void anv_CmdClearColorImage(
804 VkCommandBuffer commandBuffer,
805 VkImage _image,
806 VkImageLayout imageLayout,
807 const VkClearColorValue* pColor,
808 uint32_t rangeCount,
809 const VkImageSubresourceRange* pRanges)
810 {
811 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
812 ANV_FROM_HANDLE(anv_image, image, _image);
813
814 static const bool color_write_disable[4] = { false, false, false, false };
815
816 struct blorp_batch batch;
817 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
818
819 struct blorp_surf surf;
820 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
821 image->aux_usage, &surf);
822
823 for (unsigned r = 0; r < rangeCount; r++) {
824 if (pRanges[r].aspectMask == 0)
825 continue;
826
827 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
828
829 struct anv_format src_format =
830 anv_get_format(&cmd_buffer->device->info, image->vk_format,
831 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
832
833 unsigned base_layer = pRanges[r].baseArrayLayer;
834 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
835
836 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
837 const unsigned level = pRanges[r].baseMipLevel + i;
838 const unsigned level_width = anv_minify(image->extent.width, level);
839 const unsigned level_height = anv_minify(image->extent.height, level);
840
841 if (image->type == VK_IMAGE_TYPE_3D) {
842 base_layer = 0;
843 layer_count = anv_minify(image->extent.depth, level);
844 }
845
846 blorp_clear(&batch, &surf,
847 src_format.isl_format, src_format.swizzle,
848 level, base_layer, layer_count,
849 0, 0, level_width, level_height,
850 vk_to_isl_color(*pColor), color_write_disable);
851 }
852 }
853
854 blorp_batch_finish(&batch);
855 }
856
857 void anv_CmdClearDepthStencilImage(
858 VkCommandBuffer commandBuffer,
859 VkImage image_h,
860 VkImageLayout imageLayout,
861 const VkClearDepthStencilValue* pDepthStencil,
862 uint32_t rangeCount,
863 const VkImageSubresourceRange* pRanges)
864 {
865 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
866 ANV_FROM_HANDLE(anv_image, image, image_h);
867
868 struct blorp_batch batch;
869 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
870
871 struct blorp_surf depth, stencil;
872 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
873 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
874 ISL_AUX_USAGE_NONE, &depth);
875 } else {
876 memset(&depth, 0, sizeof(depth));
877 }
878
879 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
880 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
881 ISL_AUX_USAGE_NONE, &stencil);
882 } else {
883 memset(&stencil, 0, sizeof(stencil));
884 }
885
886 for (unsigned r = 0; r < rangeCount; r++) {
887 if (pRanges[r].aspectMask == 0)
888 continue;
889
890 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
891 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
892
893 unsigned base_layer = pRanges[r].baseArrayLayer;
894 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
895
896 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
897 const unsigned level = pRanges[r].baseMipLevel + i;
898 const unsigned level_width = anv_minify(image->extent.width, level);
899 const unsigned level_height = anv_minify(image->extent.height, level);
900
901 if (image->type == VK_IMAGE_TYPE_3D)
902 layer_count = anv_minify(image->extent.depth, level);
903
904 blorp_clear_depth_stencil(&batch, &depth, &stencil,
905 level, base_layer, layer_count,
906 0, 0, level_width, level_height,
907 clear_depth, pDepthStencil->depth,
908 clear_stencil ? 0xff : 0,
909 pDepthStencil->stencil);
910 }
911 }
912
913 blorp_batch_finish(&batch);
914 }
915
916 VkResult
917 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
918 uint32_t num_entries,
919 uint32_t *state_offset,
920 struct anv_state *bt_state)
921 {
922 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
923 state_offset);
924 if (bt_state->map == NULL) {
925 /* We ran out of space. Grab a new binding table block. */
926 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
927 if (result != VK_SUCCESS)
928 return result;
929
930 /* Re-emit state base addresses so we get the new surface state base
931 * address before we start emitting binding tables etc.
932 */
933 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
934
935 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
936 state_offset);
937 assert(bt_state->map != NULL);
938 }
939
940 return VK_SUCCESS;
941 }
942
943 static VkResult
944 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
945 struct anv_state surface_state,
946 uint32_t *bt_offset)
947 {
948 uint32_t state_offset;
949 struct anv_state bt_state;
950
951 VkResult result =
952 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
953 &bt_state);
954 if (result != VK_SUCCESS)
955 return result;
956
957 uint32_t *bt_map = bt_state.map;
958 bt_map[0] = surface_state.offset + state_offset;
959
960 *bt_offset = bt_state.offset;
961 return VK_SUCCESS;
962 }
963
964 static void
965 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
966 struct blorp_batch *batch,
967 const VkClearAttachment *attachment,
968 uint32_t rectCount, const VkClearRect *pRects)
969 {
970 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
971 const uint32_t color_att = attachment->colorAttachment;
972 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
973
974 if (att_idx == VK_ATTACHMENT_UNUSED)
975 return;
976
977 struct anv_render_pass_attachment *pass_att =
978 &cmd_buffer->state.pass->attachments[att_idx];
979 struct anv_attachment_state *att_state =
980 &cmd_buffer->state.attachments[att_idx];
981
982 uint32_t binding_table;
983 VkResult result =
984 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state,
985 &binding_table);
986 if (result != VK_SUCCESS)
987 return;
988
989 union isl_color_value clear_color =
990 vk_to_isl_color(attachment->clearValue.color);
991
992 for (uint32_t r = 0; r < rectCount; ++r) {
993 const VkOffset2D offset = pRects[r].rect.offset;
994 const VkExtent2D extent = pRects[r].rect.extent;
995 blorp_clear_attachments(batch, binding_table,
996 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
997 pRects[r].baseArrayLayer,
998 pRects[r].layerCount,
999 offset.x, offset.y,
1000 offset.x + extent.width, offset.y + extent.height,
1001 true, clear_color, false, 0.0f, 0, 0);
1002 }
1003 }
1004
1005 static void
1006 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1007 struct blorp_batch *batch,
1008 const VkClearAttachment *attachment,
1009 uint32_t rectCount, const VkClearRect *pRects)
1010 {
1011 static const union isl_color_value color_value = { .u32 = { 0, } };
1012 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1013 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1014
1015 if (att_idx == VK_ATTACHMENT_UNUSED)
1016 return;
1017
1018 struct anv_render_pass_attachment *pass_att =
1019 &cmd_buffer->state.pass->attachments[att_idx];
1020
1021 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1022 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1023
1024 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1025 if (clear_depth) {
1026 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1027 pass_att->format,
1028 VK_IMAGE_ASPECT_DEPTH_BIT,
1029 VK_IMAGE_TILING_OPTIMAL);
1030 }
1031
1032 uint32_t binding_table;
1033 VkResult result =
1034 binding_table_for_surface_state(cmd_buffer,
1035 cmd_buffer->state.null_surface_state,
1036 &binding_table);
1037 if (result != VK_SUCCESS)
1038 return;
1039
1040 for (uint32_t r = 0; r < rectCount; ++r) {
1041 const VkOffset2D offset = pRects[r].rect.offset;
1042 const VkExtent2D extent = pRects[r].rect.extent;
1043 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1044 blorp_clear_attachments(batch, binding_table,
1045 depth_format, pass_att->samples,
1046 pRects[r].baseArrayLayer,
1047 pRects[r].layerCount,
1048 offset.x, offset.y,
1049 offset.x + extent.width, offset.y + extent.height,
1050 false, color_value,
1051 clear_depth, value.depth,
1052 clear_stencil ? 0xff : 0, value.stencil);
1053 }
1054 }
1055
1056 void anv_CmdClearAttachments(
1057 VkCommandBuffer commandBuffer,
1058 uint32_t attachmentCount,
1059 const VkClearAttachment* pAttachments,
1060 uint32_t rectCount,
1061 const VkClearRect* pRects)
1062 {
1063 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1064
1065 /* Because this gets called within a render pass, we tell blorp not to
1066 * trash our depth and stencil buffers.
1067 */
1068 struct blorp_batch batch;
1069 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1070 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1071
1072 for (uint32_t a = 0; a < attachmentCount; ++a) {
1073 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1074 clear_color_attachment(cmd_buffer, &batch,
1075 &pAttachments[a],
1076 rectCount, pRects);
1077 } else {
1078 clear_depth_stencil_attachment(cmd_buffer, &batch,
1079 &pAttachments[a],
1080 rectCount, pRects);
1081 }
1082 }
1083
1084 blorp_batch_finish(&batch);
1085 }
1086
1087 enum subpass_stage {
1088 SUBPASS_STAGE_LOAD,
1089 SUBPASS_STAGE_DRAW,
1090 SUBPASS_STAGE_RESOLVE,
1091 };
1092
1093 static bool
1094 attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
1095 struct anv_render_pass_attachment *att,
1096 enum subpass_stage stage)
1097 {
1098 struct anv_render_pass *pass = cmd_buffer->state.pass;
1099 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1100
1101 /* We handle this subpass specially based on the current stage */
1102 enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
1103 switch (stage) {
1104 case SUBPASS_STAGE_LOAD:
1105 if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
1106 return true;
1107 break;
1108
1109 case SUBPASS_STAGE_DRAW:
1110 if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
1111 return true;
1112 break;
1113
1114 default:
1115 break;
1116 }
1117
1118 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1119 usage = att->subpass_usage[s];
1120
1121 /* If this attachment is going to be used as an input in this or any
1122 * future subpass, then we need to flush its cache and invalidate the
1123 * texture cache.
1124 */
1125 if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1126 return true;
1127
1128 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1129 /* We found another subpass that draws to this attachment. We'll
1130 * wait to resolve until then.
1131 */
1132 return false;
1133 }
1134 }
1135
1136 return false;
1137 }
1138
1139 static void
1140 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
1141 enum subpass_stage stage)
1142 {
1143 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1144 struct anv_render_pass *pass = cmd_buffer->state.pass;
1145
1146 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1147 uint32_t att = subpass->color_attachments[i].attachment;
1148 assert(att < pass->attachment_count);
1149 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1150 cmd_buffer->state.pending_pipe_bits |=
1151 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1152 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1153 }
1154 }
1155
1156 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1157 uint32_t att = subpass->depth_stencil_attachment.attachment;
1158 assert(att < pass->attachment_count);
1159 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1160 cmd_buffer->state.pending_pipe_bits |=
1161 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1162 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1163 }
1164 }
1165 }
1166
1167 static bool
1168 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1169 {
1170 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1171 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1172
1173 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1174 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1175 if (cmd_state->attachments[a].pending_clear_aspects) {
1176 return true;
1177 }
1178 }
1179
1180 if (ds != VK_ATTACHMENT_UNUSED &&
1181 cmd_state->attachments[ds].pending_clear_aspects) {
1182 return true;
1183 }
1184
1185 return false;
1186 }
1187
1188 void
1189 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1190 {
1191 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1192 const VkRect2D render_area = cmd_buffer->state.render_area;
1193
1194
1195 if (!subpass_needs_clear(cmd_buffer))
1196 return;
1197
1198 /* Because this gets called within a render pass, we tell blorp not to
1199 * trash our depth and stencil buffers.
1200 */
1201 struct blorp_batch batch;
1202 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1203 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1204
1205 VkClearRect clear_rect = {
1206 .rect = cmd_buffer->state.render_area,
1207 .baseArrayLayer = 0,
1208 .layerCount = cmd_buffer->state.framebuffer->layers,
1209 };
1210
1211 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1212 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1213 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1214 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1215
1216 if (!att_state->pending_clear_aspects)
1217 continue;
1218
1219 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1220
1221 struct anv_image_view *iview = fb->attachments[a];
1222 const struct anv_image *image = iview->image;
1223 struct blorp_surf surf;
1224 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1225 att_state->aux_usage, &surf);
1226
1227 if (att_state->fast_clear) {
1228 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1229
1230 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1231 *
1232 * "After Render target fast clear, pipe-control with color cache
1233 * write-flush must be issued before sending any DRAW commands on
1234 * that render target."
1235 *
1236 * This comment is a bit cryptic and doesn't really tell you what's
1237 * going or what's really needed. It appears that fast clear ops are
1238 * not properly synchronized with other drawing. This means that we
1239 * cannot have a fast clear operation in the pipe at the same time as
1240 * other regular drawing operations. We need to use a PIPE_CONTROL
1241 * to ensure that the contents of the previous draw hit the render
1242 * target before we resolve and then use a second PIPE_CONTROL after
1243 * the resolve to ensure that it is completed before any additional
1244 * drawing occurs.
1245 */
1246 cmd_buffer->state.pending_pipe_bits |=
1247 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1248
1249 blorp_fast_clear(&batch, &surf, iview->isl.format,
1250 iview->isl.base_level,
1251 iview->isl.base_array_layer, fb->layers,
1252 render_area.offset.x, render_area.offset.y,
1253 render_area.offset.x + render_area.extent.width,
1254 render_area.offset.y + render_area.extent.height);
1255
1256 cmd_buffer->state.pending_pipe_bits |=
1257 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1258 } else {
1259 blorp_clear(&batch, &surf, iview->isl.format,
1260 anv_swizzle_for_render(iview->isl.swizzle),
1261 iview->isl.base_level,
1262 iview->isl.base_array_layer, fb->layers,
1263 render_area.offset.x, render_area.offset.y,
1264 render_area.offset.x + render_area.extent.width,
1265 render_area.offset.y + render_area.extent.height,
1266 vk_to_isl_color(att_state->clear_value.color), NULL);
1267 }
1268
1269 att_state->pending_clear_aspects = 0;
1270 }
1271
1272 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1273
1274 if (ds != VK_ATTACHMENT_UNUSED &&
1275 cmd_state->attachments[ds].pending_clear_aspects) {
1276
1277 VkClearAttachment clear_att = {
1278 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1279 .clearValue = cmd_state->attachments[ds].clear_value,
1280 };
1281
1282
1283 const uint8_t gen = cmd_buffer->device->info.gen;
1284 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1285 ISL_AUX_USAGE_HIZ;
1286 const struct anv_image_view *iview = fb->attachments[ds];
1287
1288 if (clear_with_hiz) {
1289 const bool clear_depth = clear_att.aspectMask &
1290 VK_IMAGE_ASPECT_DEPTH_BIT;
1291 const bool clear_stencil = clear_att.aspectMask &
1292 VK_IMAGE_ASPECT_STENCIL_BIT;
1293
1294 /* Check against restrictions for depth buffer clearing. A great GPU
1295 * performance benefit isn't expected when using the HZ sequence for
1296 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1297 * a stencil clear in addition to using the BLORP-fallback for depth.
1298 */
1299 if (clear_depth) {
1300 if (!blorp_can_hiz_clear_depth(gen, iview->isl.format,
1301 iview->image->samples,
1302 render_area.offset.x,
1303 render_area.offset.y,
1304 render_area.offset.x +
1305 render_area.extent.width,
1306 render_area.offset.y +
1307 render_area.extent.height)) {
1308 clear_with_hiz = false;
1309 } else if (clear_att.clearValue.depthStencil.depth !=
1310 ANV_HZ_FC_VAL) {
1311 /* Don't enable fast depth clears for any color not equal to
1312 * ANV_HZ_FC_VAL.
1313 */
1314 clear_with_hiz = false;
1315 } else if (gen == 8 &&
1316 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1317 iview->aspect_mask,
1318 iview->image->samples)) {
1319 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1320 * fast-cleared portion of a HiZ buffer. Testing has revealed
1321 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1322 * not support this feature at all.
1323 */
1324 clear_with_hiz = false;
1325 }
1326 }
1327
1328 if (clear_with_hiz) {
1329 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1330 render_area.offset.x,
1331 render_area.offset.y,
1332 render_area.offset.x +
1333 render_area.extent.width,
1334 render_area.offset.y +
1335 render_area.extent.height,
1336 clear_depth, clear_stencil,
1337 clear_att.clearValue.
1338 depthStencil.stencil);
1339 }
1340 }
1341
1342 if (!clear_with_hiz) {
1343 clear_depth_stencil_attachment(cmd_buffer, &batch,
1344 &clear_att, 1, &clear_rect);
1345 }
1346
1347 cmd_state->attachments[ds].pending_clear_aspects = 0;
1348 }
1349
1350 blorp_batch_finish(&batch);
1351
1352 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
1353 }
1354
1355 static void
1356 resolve_image(struct blorp_batch *batch,
1357 const struct anv_image *src_image,
1358 uint32_t src_level, uint32_t src_layer,
1359 const struct anv_image *dst_image,
1360 uint32_t dst_level, uint32_t dst_layer,
1361 VkImageAspectFlags aspect_mask,
1362 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1363 uint32_t width, uint32_t height)
1364 {
1365 assert(src_image->type == VK_IMAGE_TYPE_2D);
1366 assert(src_image->samples > 1);
1367 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1368 assert(dst_image->samples == 1);
1369
1370 uint32_t a;
1371 for_each_bit(a, aspect_mask) {
1372 VkImageAspectFlagBits aspect = 1 << a;
1373
1374 struct blorp_surf src_surf, dst_surf;
1375 get_blorp_surf_for_anv_image(src_image, aspect,
1376 src_image->aux_usage, &src_surf);
1377 get_blorp_surf_for_anv_image(dst_image, aspect,
1378 dst_image->aux_usage, &dst_surf);
1379
1380 blorp_blit(batch,
1381 &src_surf, src_level, src_layer,
1382 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1383 &dst_surf, dst_level, dst_layer,
1384 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1385 src_x, src_y, src_x + width, src_y + height,
1386 dst_x, dst_y, dst_x + width, dst_y + height,
1387 0x2600 /* GL_NEAREST */, false, false);
1388 }
1389 }
1390
1391 void anv_CmdResolveImage(
1392 VkCommandBuffer commandBuffer,
1393 VkImage srcImage,
1394 VkImageLayout srcImageLayout,
1395 VkImage dstImage,
1396 VkImageLayout dstImageLayout,
1397 uint32_t regionCount,
1398 const VkImageResolve* pRegions)
1399 {
1400 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1401 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1402 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1403
1404 struct blorp_batch batch;
1405 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1406
1407 for (uint32_t r = 0; r < regionCount; r++) {
1408 assert(pRegions[r].srcSubresource.aspectMask ==
1409 pRegions[r].dstSubresource.aspectMask);
1410 assert(pRegions[r].srcSubresource.layerCount ==
1411 pRegions[r].dstSubresource.layerCount);
1412
1413 const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
1414
1415 for (uint32_t layer = 0; layer < layer_count; layer++) {
1416 resolve_image(&batch,
1417 src_image, pRegions[r].srcSubresource.mipLevel,
1418 pRegions[r].srcSubresource.baseArrayLayer + layer,
1419 dst_image, pRegions[r].dstSubresource.mipLevel,
1420 pRegions[r].dstSubresource.baseArrayLayer + layer,
1421 pRegions[r].dstSubresource.aspectMask,
1422 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1423 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1424 pRegions[r].extent.width, pRegions[r].extent.height);
1425 }
1426 }
1427
1428 blorp_batch_finish(&batch);
1429 }
1430
1431 static void
1432 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
1433 struct blorp_batch *batch,
1434 uint32_t att)
1435 {
1436 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1437 struct anv_attachment_state *att_state =
1438 &cmd_buffer->state.attachments[att];
1439
1440 if (att_state->aux_usage == ISL_AUX_USAGE_NONE ||
1441 att_state->aux_usage == ISL_AUX_USAGE_MCS)
1442 return; /* Nothing to resolve */
1443
1444 assert(att_state->aux_usage == ISL_AUX_USAGE_CCS_E ||
1445 att_state->aux_usage == ISL_AUX_USAGE_CCS_D);
1446
1447 struct anv_render_pass *pass = cmd_buffer->state.pass;
1448 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1449
1450 /* Scan forward to see what all ways this attachment will be used.
1451 * Ideally, we would like to resolve in the same subpass as the last write
1452 * of a particular attachment. That way we only resolve once but it's
1453 * still hot in the cache.
1454 */
1455 bool found_draw = false;
1456 enum anv_subpass_usage usage = 0;
1457 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1458 usage |= pass->attachments[att].subpass_usage[s];
1459
1460 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1461 /* We found another subpass that draws to this attachment. We'll
1462 * wait to resolve until then.
1463 */
1464 found_draw = true;
1465 break;
1466 }
1467 }
1468
1469 struct anv_image_view *iview = fb->attachments[att];
1470 const struct anv_image *image = iview->image;
1471 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1472
1473 enum blorp_fast_clear_op resolve_op = BLORP_FAST_CLEAR_OP_NONE;
1474 if (!found_draw) {
1475 /* This is the last subpass that writes to this attachment so we need to
1476 * resolve here. Ideally, we would like to only resolve if the storeOp
1477 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1478 * that the CCS bits are set to "resolved" because there may be copy or
1479 * blit operations (which may ignore CCS) between now and the next time
1480 * we render and we need to ensure that anything they write will be
1481 * respected in the next render. Unfortunately, the hardware does not
1482 * provide us with any sort of "invalidate" pass that sets the CCS to
1483 * "resolved" without writing to the render target.
1484 */
1485 if (iview->image->aux_usage != ISL_AUX_USAGE_CCS_E) {
1486 /* The image destination surface doesn't support compression outside
1487 * the render pass. We need a full resolve.
1488 */
1489 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1490 } else if (att_state->fast_clear) {
1491 /* We don't know what to do with clear colors outside the render
1492 * pass. We need a partial resolve. Only transparent black is
1493 * built into the surface state object and thus no resolve is
1494 * required for this case.
1495 */
1496 if (att_state->clear_value.color.uint32[0] ||
1497 att_state->clear_value.color.uint32[1] ||
1498 att_state->clear_value.color.uint32[2] ||
1499 att_state->clear_value.color.uint32[3])
1500 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1501 } else {
1502 /* The image "natively" supports all the compression we care about
1503 * and we don't need to resolve at all. If this is the case, we also
1504 * don't need to resolve for any of the input attachment cases below.
1505 */
1506 }
1507 } else if (usage & ANV_SUBPASS_USAGE_INPUT) {
1508 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1509 * can frequently sample from them with no resolves at all.
1510 */
1511 if (att_state->aux_usage != att_state->input_aux_usage) {
1512 assert(att_state->input_aux_usage == ISL_AUX_USAGE_NONE);
1513 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1514 } else if (!att_state->clear_color_is_zero_one) {
1515 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1516 *
1517 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1518 * is fast cleared with non-0/1 clear value, this RT must be
1519 * partially resolved (refer to Partial Resolve operation) before
1520 * binding this surface to Sampler."
1521 */
1522 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1523 }
1524 }
1525
1526 if (resolve_op == BLORP_FAST_CLEAR_OP_NONE)
1527 return;
1528
1529 struct blorp_surf surf;
1530 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1531 att_state->aux_usage, &surf);
1532 if (att_state->fast_clear)
1533 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1534
1535 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1536 *
1537 * "When performing a render target resolve, PIPE_CONTROL with end of
1538 * pipe sync must be delivered."
1539 *
1540 * This comment is a bit cryptic and doesn't really tell you what's going
1541 * or what's really needed. It appears that fast clear ops are not
1542 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1543 * to ensure that the contents of the previous draw hit the render target
1544 * before we resolve and then use a second PIPE_CONTROL after the resolve
1545 * to ensure that it is completed before any additional drawing occurs.
1546 */
1547 cmd_buffer->state.pending_pipe_bits |=
1548 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1549
1550 for (uint32_t layer = 0; layer < fb->layers; layer++) {
1551 blorp_ccs_resolve(batch, &surf,
1552 iview->isl.base_level,
1553 iview->isl.base_array_layer + layer,
1554 iview->isl.format, resolve_op);
1555 }
1556
1557 cmd_buffer->state.pending_pipe_bits |=
1558 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1559
1560 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1561 att_state->fast_clear = false;
1562 if (att_state->aux_usage == ISL_AUX_USAGE_CCS_D)
1563 att_state->aux_usage = ISL_AUX_USAGE_NONE;
1564 }
1565
1566 void
1567 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1568 {
1569 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1570 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1571
1572
1573 struct blorp_batch batch;
1574 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1575
1576 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1577 ccs_resolve_attachment(cmd_buffer, &batch,
1578 subpass->color_attachments[i].attachment);
1579 }
1580
1581 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
1582
1583 if (subpass->has_resolve) {
1584 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1585 uint32_t src_att = subpass->color_attachments[i].attachment;
1586 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1587
1588 if (dst_att == VK_ATTACHMENT_UNUSED)
1589 continue;
1590
1591 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1592 /* From the Vulkan 1.0 spec:
1593 *
1594 * If the first use of an attachment in a render pass is as a
1595 * resolve attachment, then the loadOp is effectively ignored
1596 * as the resolve is guaranteed to overwrite all pixels in the
1597 * render area.
1598 */
1599 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1600 }
1601
1602 struct anv_image_view *src_iview = fb->attachments[src_att];
1603 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1604
1605 const VkRect2D render_area = cmd_buffer->state.render_area;
1606
1607 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1608 resolve_image(&batch, src_iview->image,
1609 src_iview->isl.base_level,
1610 src_iview->isl.base_array_layer,
1611 dst_iview->image,
1612 dst_iview->isl.base_level,
1613 dst_iview->isl.base_array_layer,
1614 src_iview->aspect_mask,
1615 render_area.offset.x, render_area.offset.y,
1616 render_area.offset.x, render_area.offset.y,
1617 render_area.extent.width, render_area.extent.height);
1618
1619 ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
1620 }
1621
1622 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
1623 }
1624
1625 blorp_batch_finish(&batch);
1626 }
1627
1628 void
1629 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1630 const struct anv_image *image,
1631 enum blorp_hiz_op op)
1632 {
1633 assert(image);
1634
1635 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1636 * don't perform such a resolve on gens that don't support it.
1637 */
1638 if (cmd_buffer->device->info.gen < 8 ||
1639 image->aux_usage != ISL_AUX_USAGE_HIZ)
1640 return;
1641
1642 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1643 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1644
1645 struct blorp_batch batch;
1646 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1647
1648 struct blorp_surf surf;
1649 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1650 ISL_AUX_USAGE_NONE, &surf);
1651
1652 /* Manually add the aux HiZ surf */
1653 surf.aux_surf = &image->aux_surface.isl,
1654 surf.aux_addr = (struct blorp_address) {
1655 .buffer = image->bo,
1656 .offset = image->offset + image->aux_surface.offset,
1657 };
1658 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1659
1660 surf.clear_color.u32[0] = (uint32_t) ANV_HZ_FC_VAL;
1661
1662 blorp_gen6_hiz_op(&batch, &surf, 0, 0, op);
1663 blorp_batch_finish(&batch);
1664 }