anv: fix release build unused variable warnings
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static void
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 /* The cache already has a reference and it's not going anywhere so there
76 * is no need to hold a second reference.
77 */
78 anv_shader_bin_unref(device, bin);
79
80 *kernel_out = bin->kernel.offset;
81 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
82 }
83
84 void
85 anv_device_init_blorp(struct anv_device *device)
86 {
87 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
88 blorp_init(&device->blorp, device, &device->isl_dev);
89 device->blorp.compiler = device->instance->physicalDevice.compiler;
90 device->blorp.mocs.tex = device->default_mocs;
91 device->blorp.mocs.rb = device->default_mocs;
92 device->blorp.mocs.vb = device->default_mocs;
93 device->blorp.lookup_shader = lookup_blorp_shader;
94 device->blorp.upload_shader = upload_blorp_shader;
95 switch (device->info.gen) {
96 case 7:
97 if (device->info.is_haswell) {
98 device->blorp.exec = gen75_blorp_exec;
99 } else {
100 device->blorp.exec = gen7_blorp_exec;
101 }
102 break;
103 case 8:
104 device->blorp.exec = gen8_blorp_exec;
105 break;
106 case 9:
107 device->blorp.exec = gen9_blorp_exec;
108 break;
109 default:
110 unreachable("Unknown hardware generation");
111 }
112 }
113
114 void
115 anv_device_finish_blorp(struct anv_device *device)
116 {
117 blorp_finish(&device->blorp);
118 anv_pipeline_cache_finish(&device->blorp_shader_cache);
119 }
120
121 static void
122 get_blorp_surf_for_anv_buffer(struct anv_device *device,
123 struct anv_buffer *buffer, uint64_t offset,
124 uint32_t width, uint32_t height,
125 uint32_t row_pitch, enum isl_format format,
126 struct blorp_surf *blorp_surf,
127 struct isl_surf *isl_surf)
128 {
129 const struct isl_format_layout *fmtl =
130 isl_format_get_layout(format);
131
132 /* ASTC is the only format which doesn't support linear layouts.
133 * Create an equivalently sized surface with ISL to get around this.
134 */
135 if (fmtl->txc == ISL_TXC_ASTC) {
136 /* Use an equivalently sized format */
137 format = ISL_FORMAT_R32G32B32A32_UINT;
138 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
139
140 /* Shrink the dimensions for the new format */
141 width = DIV_ROUND_UP(width, fmtl->bw);
142 height = DIV_ROUND_UP(height, fmtl->bh);
143 }
144
145 *blorp_surf = (struct blorp_surf) {
146 .surf = isl_surf,
147 .addr = {
148 .buffer = buffer->bo,
149 .offset = buffer->offset + offset,
150 },
151 };
152
153 isl_surf_init(&device->isl_dev, isl_surf,
154 .dim = ISL_SURF_DIM_2D,
155 .format = format,
156 .width = width,
157 .height = height,
158 .depth = 1,
159 .levels = 1,
160 .array_len = 1,
161 .samples = 1,
162 .min_pitch = row_pitch,
163 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
164 ISL_SURF_USAGE_RENDER_TARGET_BIT,
165 .tiling_flags = ISL_TILING_LINEAR_BIT);
166 assert(isl_surf->row_pitch == row_pitch);
167 }
168
169 static void
170 get_blorp_surf_for_anv_image(const struct anv_image *image,
171 VkImageAspectFlags aspect,
172 enum isl_aux_usage aux_usage,
173 struct blorp_surf *blorp_surf)
174 {
175 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
176 aux_usage = ISL_AUX_USAGE_NONE;
177
178 const struct anv_surface *surface =
179 anv_image_get_surface_for_aspect_mask(image, aspect);
180
181 *blorp_surf = (struct blorp_surf) {
182 .surf = &surface->isl,
183 .addr = {
184 .buffer = image->bo,
185 .offset = image->offset + surface->offset,
186 },
187 };
188
189 if (aux_usage != ISL_AUX_USAGE_NONE) {
190 blorp_surf->aux_surf = &image->aux_surface.isl,
191 blorp_surf->aux_addr = (struct blorp_address) {
192 .buffer = image->bo,
193 .offset = image->offset + image->aux_surface.offset,
194 };
195 blorp_surf->aux_usage = aux_usage;
196 }
197 }
198
199 void anv_CmdCopyImage(
200 VkCommandBuffer commandBuffer,
201 VkImage srcImage,
202 VkImageLayout srcImageLayout,
203 VkImage dstImage,
204 VkImageLayout dstImageLayout,
205 uint32_t regionCount,
206 const VkImageCopy* pRegions)
207 {
208 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
209 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
210 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
211
212 struct blorp_batch batch;
213 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
214
215 for (unsigned r = 0; r < regionCount; r++) {
216 VkOffset3D srcOffset =
217 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
218 VkOffset3D dstOffset =
219 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
220 VkExtent3D extent =
221 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
222
223 unsigned dst_base_layer, layer_count;
224 if (dst_image->type == VK_IMAGE_TYPE_3D) {
225 dst_base_layer = pRegions[r].dstOffset.z;
226 layer_count = pRegions[r].extent.depth;
227 } else {
228 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
229 layer_count = pRegions[r].dstSubresource.layerCount;
230 }
231
232 unsigned src_base_layer;
233 if (src_image->type == VK_IMAGE_TYPE_3D) {
234 src_base_layer = pRegions[r].srcOffset.z;
235 } else {
236 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
237 assert(pRegions[r].srcSubresource.layerCount == layer_count);
238 }
239
240 assert(pRegions[r].srcSubresource.aspectMask ==
241 pRegions[r].dstSubresource.aspectMask);
242
243 uint32_t a;
244 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
245 VkImageAspectFlagBits aspect = (1 << a);
246
247 struct blorp_surf src_surf, dst_surf;
248 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
249 &src_surf);
250 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
251 &dst_surf);
252
253 for (unsigned i = 0; i < layer_count; i++) {
254 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
255 src_base_layer + i,
256 &dst_surf, pRegions[r].dstSubresource.mipLevel,
257 dst_base_layer + i,
258 srcOffset.x, srcOffset.y,
259 dstOffset.x, dstOffset.y,
260 extent.width, extent.height);
261 }
262 }
263 }
264
265 blorp_batch_finish(&batch);
266 }
267
268 static void
269 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
270 struct anv_buffer *anv_buffer,
271 struct anv_image *anv_image,
272 uint32_t regionCount,
273 const VkBufferImageCopy* pRegions,
274 bool buffer_to_image)
275 {
276 struct blorp_batch batch;
277 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
278
279 struct {
280 struct blorp_surf surf;
281 uint32_t level;
282 VkOffset3D offset;
283 } image, buffer, *src, *dst;
284
285 buffer.level = 0;
286 buffer.offset = (VkOffset3D) { 0, 0, 0 };
287
288 if (buffer_to_image) {
289 src = &buffer;
290 dst = &image;
291 } else {
292 src = &image;
293 dst = &buffer;
294 }
295
296 for (unsigned r = 0; r < regionCount; r++) {
297 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
298
299 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
300 &image.surf);
301 image.offset =
302 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
303 image.level = pRegions[r].imageSubresource.mipLevel;
304
305 VkExtent3D extent =
306 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
307 if (anv_image->type != VK_IMAGE_TYPE_3D) {
308 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
309 extent.depth = pRegions[r].imageSubresource.layerCount;
310 }
311
312 const enum isl_format buffer_format =
313 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
314 aspect, VK_IMAGE_TILING_LINEAR);
315
316 const VkExtent3D bufferImageExtent = {
317 .width = pRegions[r].bufferRowLength ?
318 pRegions[r].bufferRowLength : extent.width,
319 .height = pRegions[r].bufferImageHeight ?
320 pRegions[r].bufferImageHeight : extent.height,
321 };
322
323 const struct isl_format_layout *buffer_fmtl =
324 isl_format_get_layout(buffer_format);
325
326 const uint32_t buffer_row_pitch =
327 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
328 (buffer_fmtl->bpb / 8);
329
330 const uint32_t buffer_layer_stride =
331 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
332 buffer_row_pitch;
333
334 struct isl_surf buffer_isl_surf;
335 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
336 anv_buffer, pRegions[r].bufferOffset,
337 extent.width, extent.height,
338 buffer_row_pitch, buffer_format,
339 &buffer.surf, &buffer_isl_surf);
340
341 for (unsigned z = 0; z < extent.depth; z++) {
342 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
343 &dst->surf, dst->level, dst->offset.z,
344 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
345 extent.width, extent.height);
346
347 image.offset.z++;
348 buffer.surf.addr.offset += buffer_layer_stride;
349 }
350 }
351
352 blorp_batch_finish(&batch);
353 }
354
355 void anv_CmdCopyBufferToImage(
356 VkCommandBuffer commandBuffer,
357 VkBuffer srcBuffer,
358 VkImage dstImage,
359 VkImageLayout dstImageLayout,
360 uint32_t regionCount,
361 const VkBufferImageCopy* pRegions)
362 {
363 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
364 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
365 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
366
367 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
368 regionCount, pRegions, true);
369 }
370
371 void anv_CmdCopyImageToBuffer(
372 VkCommandBuffer commandBuffer,
373 VkImage srcImage,
374 VkImageLayout srcImageLayout,
375 VkBuffer dstBuffer,
376 uint32_t regionCount,
377 const VkBufferImageCopy* pRegions)
378 {
379 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
380 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
381 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
382
383 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
384 regionCount, pRegions, false);
385 }
386
387 static bool
388 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
389 {
390 bool flip = false;
391 if (*src0 > *src1) {
392 unsigned tmp = *src0;
393 *src0 = *src1;
394 *src1 = tmp;
395 flip = !flip;
396 }
397
398 if (*dst0 > *dst1) {
399 unsigned tmp = *dst0;
400 *dst0 = *dst1;
401 *dst1 = tmp;
402 flip = !flip;
403 }
404
405 return flip;
406 }
407
408 void anv_CmdBlitImage(
409 VkCommandBuffer commandBuffer,
410 VkImage srcImage,
411 VkImageLayout srcImageLayout,
412 VkImage dstImage,
413 VkImageLayout dstImageLayout,
414 uint32_t regionCount,
415 const VkImageBlit* pRegions,
416 VkFilter filter)
417
418 {
419 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
420 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
421 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
422
423 struct blorp_surf src, dst;
424
425 uint32_t gl_filter;
426 switch (filter) {
427 case VK_FILTER_NEAREST:
428 gl_filter = 0x2600; /* GL_NEAREST */
429 break;
430 case VK_FILTER_LINEAR:
431 gl_filter = 0x2601; /* GL_LINEAR */
432 break;
433 default:
434 unreachable("Invalid filter");
435 }
436
437 struct blorp_batch batch;
438 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
439
440 for (unsigned r = 0; r < regionCount; r++) {
441 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
442 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
443
444 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
445 src_image->aux_usage, &src);
446 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
447 dst_image->aux_usage, &dst);
448
449 struct anv_format src_format =
450 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
451 src_res->aspectMask, src_image->tiling);
452 struct anv_format dst_format =
453 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
454 dst_res->aspectMask, dst_image->tiling);
455
456 unsigned dst_start, dst_end;
457 if (dst_image->type == VK_IMAGE_TYPE_3D) {
458 assert(dst_res->baseArrayLayer == 0);
459 dst_start = pRegions[r].dstOffsets[0].z;
460 dst_end = pRegions[r].dstOffsets[1].z;
461 } else {
462 dst_start = dst_res->baseArrayLayer;
463 dst_end = dst_start + dst_res->layerCount;
464 }
465
466 unsigned src_start, src_end;
467 if (src_image->type == VK_IMAGE_TYPE_3D) {
468 assert(src_res->baseArrayLayer == 0);
469 src_start = pRegions[r].srcOffsets[0].z;
470 src_end = pRegions[r].srcOffsets[1].z;
471 } else {
472 src_start = src_res->baseArrayLayer;
473 src_end = src_start + src_res->layerCount;
474 }
475
476 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
477 float src_z_step = (float)(src_end + 1 - src_start) /
478 (float)(dst_end + 1 - dst_start);
479
480 if (flip_z) {
481 src_start = src_end;
482 src_z_step *= -1;
483 }
484
485 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
486 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
487 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
488 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
489 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
490
491 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
492 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
493 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
494 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
495 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
496
497 const unsigned num_layers = dst_end - dst_start;
498 for (unsigned i = 0; i < num_layers; i++) {
499 unsigned dst_z = dst_start + i;
500 unsigned src_z = src_start + i * src_z_step;
501
502 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
503 src_format.isl_format, src_format.swizzle,
504 &dst, dst_res->mipLevel, dst_z,
505 dst_format.isl_format, dst_format.swizzle,
506 src_x0, src_y0, src_x1, src_y1,
507 dst_x0, dst_y0, dst_x1, dst_y1,
508 gl_filter, flip_x, flip_y);
509 }
510
511 }
512
513 blorp_batch_finish(&batch);
514 }
515
516 static enum isl_format
517 isl_format_for_size(unsigned size_B)
518 {
519 switch (size_B) {
520 case 1: return ISL_FORMAT_R8_UINT;
521 case 2: return ISL_FORMAT_R8G8_UINT;
522 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
523 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
524 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
525 default:
526 unreachable("Not a power-of-two format size");
527 }
528 }
529
530 static void
531 do_buffer_copy(struct blorp_batch *batch,
532 struct anv_bo *src, uint64_t src_offset,
533 struct anv_bo *dst, uint64_t dst_offset,
534 int width, int height, int block_size)
535 {
536 struct anv_device *device = batch->blorp->driver_ctx;
537
538 /* The actual format we pick doesn't matter as blorp will throw it away.
539 * The only thing that actually matters is the size.
540 */
541 enum isl_format format = isl_format_for_size(block_size);
542
543 struct isl_surf surf;
544 isl_surf_init(&device->isl_dev, &surf,
545 .dim = ISL_SURF_DIM_2D,
546 .format = format,
547 .width = width,
548 .height = height,
549 .depth = 1,
550 .levels = 1,
551 .array_len = 1,
552 .samples = 1,
553 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
554 ISL_SURF_USAGE_RENDER_TARGET_BIT,
555 .tiling_flags = ISL_TILING_LINEAR_BIT);
556 assert(surf.row_pitch == width * block_size);
557
558 struct blorp_surf src_blorp_surf = {
559 .surf = &surf,
560 .addr = {
561 .buffer = src,
562 .offset = src_offset,
563 },
564 };
565
566 struct blorp_surf dst_blorp_surf = {
567 .surf = &surf,
568 .addr = {
569 .buffer = dst,
570 .offset = dst_offset,
571 },
572 };
573
574 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
575 0, 0, 0, 0, width, height);
576 }
577
578 /**
579 * Returns the greatest common divisor of a and b that is a power of two.
580 */
581 static inline uint64_t
582 gcd_pow2_u64(uint64_t a, uint64_t b)
583 {
584 assert(a > 0 || b > 0);
585
586 unsigned a_log2 = ffsll(a) - 1;
587 unsigned b_log2 = ffsll(b) - 1;
588
589 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
590 * case, the MIN2() will take the other one. If both are 0 then we will
591 * hit the assert above.
592 */
593 return 1 << MIN2(a_log2, b_log2);
594 }
595
596 /* This is maximum possible width/height our HW can handle */
597 #define MAX_SURFACE_DIM (1ull << 14)
598
599 void anv_CmdCopyBuffer(
600 VkCommandBuffer commandBuffer,
601 VkBuffer srcBuffer,
602 VkBuffer dstBuffer,
603 uint32_t regionCount,
604 const VkBufferCopy* pRegions)
605 {
606 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
607 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
608 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
609
610 struct blorp_batch batch;
611 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
612
613 for (unsigned r = 0; r < regionCount; r++) {
614 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
615 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
616 uint64_t copy_size = pRegions[r].size;
617
618 /* First, we compute the biggest format that can be used with the
619 * given offsets and size.
620 */
621 int bs = 16;
622 bs = gcd_pow2_u64(bs, src_offset);
623 bs = gcd_pow2_u64(bs, dst_offset);
624 bs = gcd_pow2_u64(bs, pRegions[r].size);
625
626 /* First, we make a bunch of max-sized copies */
627 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
628 while (copy_size >= max_copy_size) {
629 do_buffer_copy(&batch, src_buffer->bo, src_offset,
630 dst_buffer->bo, dst_offset,
631 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
632 copy_size -= max_copy_size;
633 src_offset += max_copy_size;
634 dst_offset += max_copy_size;
635 }
636
637 /* Now make a max-width copy */
638 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
639 assert(height < MAX_SURFACE_DIM);
640 if (height != 0) {
641 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
642 do_buffer_copy(&batch, src_buffer->bo, src_offset,
643 dst_buffer->bo, dst_offset,
644 MAX_SURFACE_DIM, height, bs);
645 copy_size -= rect_copy_size;
646 src_offset += rect_copy_size;
647 dst_offset += rect_copy_size;
648 }
649
650 /* Finally, make a small copy to finish it off */
651 if (copy_size != 0) {
652 do_buffer_copy(&batch, src_buffer->bo, src_offset,
653 dst_buffer->bo, dst_offset,
654 copy_size / bs, 1, bs);
655 }
656 }
657
658 blorp_batch_finish(&batch);
659 }
660
661 void anv_CmdUpdateBuffer(
662 VkCommandBuffer commandBuffer,
663 VkBuffer dstBuffer,
664 VkDeviceSize dstOffset,
665 VkDeviceSize dataSize,
666 const void* pData)
667 {
668 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
669 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
670
671 struct blorp_batch batch;
672 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
673
674 /* We can't quite grab a full block because the state stream needs a
675 * little data at the top to build its linked list.
676 */
677 const uint32_t max_update_size =
678 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
679
680 assert(max_update_size < MAX_SURFACE_DIM * 4);
681
682 while (dataSize) {
683 const uint32_t copy_size = MIN2(dataSize, max_update_size);
684
685 struct anv_state tmp_data =
686 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
687
688 memcpy(tmp_data.map, pData, copy_size);
689
690 int bs = 16;
691 bs = gcd_pow2_u64(bs, dstOffset);
692 bs = gcd_pow2_u64(bs, copy_size);
693
694 do_buffer_copy(&batch,
695 &cmd_buffer->device->dynamic_state_block_pool.bo,
696 tmp_data.offset,
697 dst_buffer->bo, dst_buffer->offset + dstOffset,
698 copy_size / bs, 1, bs);
699
700 dataSize -= copy_size;
701 dstOffset += copy_size;
702 pData = (void *)pData + copy_size;
703 }
704
705 blorp_batch_finish(&batch);
706 }
707
708 void anv_CmdFillBuffer(
709 VkCommandBuffer commandBuffer,
710 VkBuffer dstBuffer,
711 VkDeviceSize dstOffset,
712 VkDeviceSize fillSize,
713 uint32_t data)
714 {
715 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
716 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
717 struct blorp_surf surf;
718 struct isl_surf isl_surf;
719
720 struct blorp_batch batch;
721 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
722
723 if (fillSize == VK_WHOLE_SIZE) {
724 fillSize = dst_buffer->size - dstOffset;
725 /* Make sure fillSize is a multiple of 4 */
726 fillSize &= ~3ull;
727 }
728
729 /* First, we compute the biggest format that can be used with the
730 * given offsets and size.
731 */
732 int bs = 16;
733 bs = gcd_pow2_u64(bs, dstOffset);
734 bs = gcd_pow2_u64(bs, fillSize);
735 enum isl_format isl_format = isl_format_for_size(bs);
736
737 union isl_color_value color = {
738 .u32 = { data, data, data, data },
739 };
740
741 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
742 while (fillSize >= max_fill_size) {
743 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
744 dst_buffer, dstOffset,
745 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
746 MAX_SURFACE_DIM * bs, isl_format,
747 &surf, &isl_surf);
748
749 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
750 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
751 color, NULL);
752 fillSize -= max_fill_size;
753 dstOffset += max_fill_size;
754 }
755
756 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
757 assert(height < MAX_SURFACE_DIM);
758 if (height != 0) {
759 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
760 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
761 dst_buffer, dstOffset,
762 MAX_SURFACE_DIM, height,
763 MAX_SURFACE_DIM * bs, isl_format,
764 &surf, &isl_surf);
765
766 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
767 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
768 color, NULL);
769 fillSize -= rect_fill_size;
770 dstOffset += rect_fill_size;
771 }
772
773 if (fillSize != 0) {
774 const uint32_t width = fillSize / bs;
775 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
776 dst_buffer, dstOffset,
777 width, 1,
778 width * bs, isl_format,
779 &surf, &isl_surf);
780
781 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
782 0, 0, 1, 0, 0, width, 1,
783 color, NULL);
784 }
785
786 blorp_batch_finish(&batch);
787 }
788
789 void anv_CmdClearColorImage(
790 VkCommandBuffer commandBuffer,
791 VkImage _image,
792 VkImageLayout imageLayout,
793 const VkClearColorValue* pColor,
794 uint32_t rangeCount,
795 const VkImageSubresourceRange* pRanges)
796 {
797 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
798 ANV_FROM_HANDLE(anv_image, image, _image);
799
800 static const bool color_write_disable[4] = { false, false, false, false };
801
802 struct blorp_batch batch;
803 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
804
805 struct blorp_surf surf;
806 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
807 image->aux_usage, &surf);
808
809 for (unsigned r = 0; r < rangeCount; r++) {
810 if (pRanges[r].aspectMask == 0)
811 continue;
812
813 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
814
815 struct anv_format src_format =
816 anv_get_format(&cmd_buffer->device->info, image->vk_format,
817 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
818
819 unsigned base_layer = pRanges[r].baseArrayLayer;
820 unsigned layer_count = pRanges[r].layerCount;
821
822 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
823 const unsigned level = pRanges[r].baseMipLevel + i;
824 const unsigned level_width = anv_minify(image->extent.width, level);
825 const unsigned level_height = anv_minify(image->extent.height, level);
826
827 if (image->type == VK_IMAGE_TYPE_3D) {
828 base_layer = 0;
829 layer_count = anv_minify(image->extent.depth, level);
830 }
831
832 blorp_clear(&batch, &surf,
833 src_format.isl_format, src_format.swizzle,
834 level, base_layer, layer_count,
835 0, 0, level_width, level_height,
836 vk_to_isl_color(*pColor), color_write_disable);
837 }
838 }
839
840 blorp_batch_finish(&batch);
841 }
842
843 void anv_CmdClearDepthStencilImage(
844 VkCommandBuffer commandBuffer,
845 VkImage image_h,
846 VkImageLayout imageLayout,
847 const VkClearDepthStencilValue* pDepthStencil,
848 uint32_t rangeCount,
849 const VkImageSubresourceRange* pRanges)
850 {
851 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
852 ANV_FROM_HANDLE(anv_image, image, image_h);
853
854 struct blorp_batch batch;
855 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
856
857 struct blorp_surf depth, stencil;
858 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
859 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
860 image->aux_usage, &depth);
861 } else {
862 memset(&depth, 0, sizeof(depth));
863 }
864
865 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
866 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
867 ISL_AUX_USAGE_NONE, &stencil);
868 } else {
869 memset(&stencil, 0, sizeof(stencil));
870 }
871
872 for (unsigned r = 0; r < rangeCount; r++) {
873 if (pRanges[r].aspectMask == 0)
874 continue;
875
876 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
877 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
878
879 unsigned base_layer = pRanges[r].baseArrayLayer;
880 unsigned layer_count = pRanges[r].layerCount;
881
882 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
883 const unsigned level = pRanges[r].baseMipLevel + i;
884 const unsigned level_width = anv_minify(image->extent.width, level);
885 const unsigned level_height = anv_minify(image->extent.height, level);
886
887 if (image->type == VK_IMAGE_TYPE_3D)
888 layer_count = anv_minify(image->extent.depth, level);
889
890 blorp_clear_depth_stencil(&batch, &depth, &stencil,
891 level, base_layer, layer_count,
892 0, 0, level_width, level_height,
893 clear_depth, pDepthStencil->depth,
894 clear_stencil ? 0xff : 0,
895 pDepthStencil->stencil);
896 }
897 }
898
899 blorp_batch_finish(&batch);
900 }
901
902 struct anv_state
903 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
904 uint32_t num_entries,
905 uint32_t *state_offset)
906 {
907 struct anv_state bt_state =
908 anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
909 state_offset);
910 if (bt_state.map == NULL) {
911 /* We ran out of space. Grab a new binding table block. */
912 MAYBE_UNUSED VkResult result =
913 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
914 assert(result == VK_SUCCESS);
915
916 /* Re-emit state base addresses so we get the new surface state base
917 * address before we start emitting binding tables etc.
918 */
919 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
920
921 bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
922 state_offset);
923 assert(bt_state.map != NULL);
924 }
925
926 return bt_state;
927 }
928
929 static uint32_t
930 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
931 struct anv_state surface_state)
932 {
933 uint32_t state_offset;
934 struct anv_state bt_state =
935 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset);
936
937 uint32_t *bt_map = bt_state.map;
938 bt_map[0] = surface_state.offset + state_offset;
939
940 return bt_state.offset;
941 }
942
943 static void
944 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
945 struct blorp_batch *batch,
946 const VkClearAttachment *attachment,
947 uint32_t rectCount, const VkClearRect *pRects)
948 {
949 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
950 const uint32_t color_att = attachment->colorAttachment;
951 const uint32_t att_idx = subpass->color_attachments[color_att];
952
953 if (att_idx == VK_ATTACHMENT_UNUSED)
954 return;
955
956 struct anv_render_pass_attachment *pass_att =
957 &cmd_buffer->state.pass->attachments[att_idx];
958 struct anv_attachment_state *att_state =
959 &cmd_buffer->state.attachments[att_idx];
960
961 uint32_t binding_table =
962 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state);
963
964 union isl_color_value clear_color =
965 vk_to_isl_color(attachment->clearValue.color);
966
967 for (uint32_t r = 0; r < rectCount; ++r) {
968 const VkOffset2D offset = pRects[r].rect.offset;
969 const VkExtent2D extent = pRects[r].rect.extent;
970 blorp_clear_attachments(batch, binding_table,
971 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
972 pRects[r].baseArrayLayer,
973 pRects[r].layerCount,
974 offset.x, offset.y,
975 offset.x + extent.width, offset.y + extent.height,
976 true, clear_color, false, 0.0f, 0, 0);
977 }
978 }
979
980 static void
981 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
982 struct blorp_batch *batch,
983 const VkClearAttachment *attachment,
984 uint32_t rectCount, const VkClearRect *pRects)
985 {
986 static const union isl_color_value color_value = { .u32 = { 0, } };
987 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
988 const uint32_t att_idx = subpass->depth_stencil_attachment;
989
990 if (att_idx == VK_ATTACHMENT_UNUSED)
991 return;
992
993 struct anv_render_pass_attachment *pass_att =
994 &cmd_buffer->state.pass->attachments[att_idx];
995
996 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
997 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
998
999 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1000 if (clear_depth) {
1001 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1002 pass_att->format,
1003 VK_IMAGE_ASPECT_DEPTH_BIT,
1004 VK_IMAGE_TILING_OPTIMAL);
1005 }
1006
1007 uint32_t binding_table =
1008 binding_table_for_surface_state(cmd_buffer,
1009 cmd_buffer->state.null_surface_state);
1010
1011 for (uint32_t r = 0; r < rectCount; ++r) {
1012 const VkOffset2D offset = pRects[r].rect.offset;
1013 const VkExtent2D extent = pRects[r].rect.extent;
1014 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1015 blorp_clear_attachments(batch, binding_table,
1016 depth_format, pass_att->samples,
1017 pRects[r].baseArrayLayer,
1018 pRects[r].layerCount,
1019 offset.x, offset.y,
1020 offset.x + extent.width, offset.y + extent.height,
1021 false, color_value,
1022 clear_depth, value.depth,
1023 clear_stencil ? 0xff : 0, value.stencil);
1024 }
1025 }
1026
1027 void anv_CmdClearAttachments(
1028 VkCommandBuffer commandBuffer,
1029 uint32_t attachmentCount,
1030 const VkClearAttachment* pAttachments,
1031 uint32_t rectCount,
1032 const VkClearRect* pRects)
1033 {
1034 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1035
1036 /* Because this gets called within a render pass, we tell blorp not to
1037 * trash our depth and stencil buffers.
1038 */
1039 struct blorp_batch batch;
1040 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1041 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1042
1043 for (uint32_t a = 0; a < attachmentCount; ++a) {
1044 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1045 clear_color_attachment(cmd_buffer, &batch,
1046 &pAttachments[a],
1047 rectCount, pRects);
1048 } else {
1049 clear_depth_stencil_attachment(cmd_buffer, &batch,
1050 &pAttachments[a],
1051 rectCount, pRects);
1052 }
1053 }
1054
1055 blorp_batch_finish(&batch);
1056 }
1057
1058 enum subpass_stage {
1059 SUBPASS_STAGE_LOAD,
1060 SUBPASS_STAGE_DRAW,
1061 SUBPASS_STAGE_RESOLVE,
1062 };
1063
1064 static bool
1065 attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
1066 struct anv_render_pass_attachment *att,
1067 enum subpass_stage stage)
1068 {
1069 struct anv_render_pass *pass = cmd_buffer->state.pass;
1070 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1071 unsigned subpass_idx = subpass - pass->subpasses;
1072 assert(subpass_idx < pass->subpass_count);
1073
1074 /* We handle this subpass specially based on the current stage */
1075 enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
1076 switch (stage) {
1077 case SUBPASS_STAGE_LOAD:
1078 if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
1079 return true;
1080 break;
1081
1082 case SUBPASS_STAGE_DRAW:
1083 if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
1084 return true;
1085 break;
1086
1087 default:
1088 break;
1089 }
1090
1091 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1092 usage = att->subpass_usage[s];
1093
1094 /* If this attachment is going to be used as an input in this or any
1095 * future subpass, then we need to flush its cache and invalidate the
1096 * texture cache.
1097 */
1098 if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1099 return true;
1100
1101 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1102 /* We found another subpass that draws to this attachment. We'll
1103 * wait to resolve until then.
1104 */
1105 return false;
1106 }
1107 }
1108
1109 return false;
1110 }
1111
1112 static void
1113 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
1114 enum subpass_stage stage)
1115 {
1116 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1117 struct anv_render_pass *pass = cmd_buffer->state.pass;
1118
1119 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1120 uint32_t att = subpass->color_attachments[i];
1121 assert(att < pass->attachment_count);
1122 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1123 cmd_buffer->state.pending_pipe_bits |=
1124 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1125 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1126 }
1127 }
1128
1129 if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
1130 uint32_t att = subpass->depth_stencil_attachment;
1131 assert(att < pass->attachment_count);
1132 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1133 cmd_buffer->state.pending_pipe_bits |=
1134 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1135 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1136 }
1137 }
1138 }
1139
1140 static bool
1141 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1142 {
1143 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1144 uint32_t ds = cmd_state->subpass->depth_stencil_attachment;
1145
1146 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1147 uint32_t a = cmd_state->subpass->color_attachments[i];
1148 if (cmd_state->attachments[a].pending_clear_aspects) {
1149 return true;
1150 }
1151 }
1152
1153 if (ds != VK_ATTACHMENT_UNUSED &&
1154 cmd_state->attachments[ds].pending_clear_aspects) {
1155 return true;
1156 }
1157
1158 return false;
1159 }
1160
1161 void
1162 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1163 {
1164 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1165
1166 if (!subpass_needs_clear(cmd_buffer))
1167 return;
1168
1169 /* Because this gets called within a render pass, we tell blorp not to
1170 * trash our depth and stencil buffers.
1171 */
1172 struct blorp_batch batch;
1173 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1174 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1175
1176 VkClearRect clear_rect = {
1177 .rect = cmd_buffer->state.render_area,
1178 .baseArrayLayer = 0,
1179 .layerCount = cmd_buffer->state.framebuffer->layers,
1180 };
1181
1182 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1183 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1184 const uint32_t a = cmd_state->subpass->color_attachments[i];
1185 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1186
1187 if (!att_state->pending_clear_aspects)
1188 continue;
1189
1190 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1191
1192 struct anv_image_view *iview = fb->attachments[a];
1193 const struct anv_image *image = iview->image;
1194 struct blorp_surf surf;
1195 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1196 att_state->aux_usage, &surf);
1197 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1198
1199 const VkRect2D render_area = cmd_buffer->state.render_area;
1200
1201 if (att_state->fast_clear) {
1202 blorp_fast_clear(&batch, &surf, iview->isl.format,
1203 iview->isl.base_level,
1204 iview->isl.base_array_layer, fb->layers,
1205 render_area.offset.x, render_area.offset.y,
1206 render_area.offset.x + render_area.extent.width,
1207 render_area.offset.y + render_area.extent.height);
1208
1209 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1210 *
1211 * "After Render target fast clear, pipe-control with color cache
1212 * write-flush must be issued before sending any DRAW commands on
1213 * that render target."
1214 */
1215 cmd_buffer->state.pending_pipe_bits |=
1216 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1217 } else {
1218 blorp_clear(&batch, &surf, iview->isl.format, iview->isl.swizzle,
1219 iview->isl.base_level,
1220 iview->isl.base_array_layer, fb->layers,
1221 render_area.offset.x, render_area.offset.y,
1222 render_area.offset.x + render_area.extent.width,
1223 render_area.offset.y + render_area.extent.height,
1224 surf.clear_color, NULL);
1225 }
1226
1227 att_state->pending_clear_aspects = 0;
1228 }
1229
1230 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment;
1231
1232 if (ds != VK_ATTACHMENT_UNUSED &&
1233 cmd_state->attachments[ds].pending_clear_aspects) {
1234
1235 VkClearAttachment clear_att = {
1236 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1237 .clearValue = cmd_state->attachments[ds].clear_value,
1238 };
1239
1240 clear_depth_stencil_attachment(cmd_buffer, &batch,
1241 &clear_att, 1, &clear_rect);
1242
1243 cmd_state->attachments[ds].pending_clear_aspects = 0;
1244 }
1245
1246 blorp_batch_finish(&batch);
1247
1248 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
1249 }
1250
1251 static void
1252 resolve_image(struct blorp_batch *batch,
1253 const struct anv_image *src_image,
1254 uint32_t src_level, uint32_t src_layer,
1255 const struct anv_image *dst_image,
1256 uint32_t dst_level, uint32_t dst_layer,
1257 VkImageAspectFlags aspect_mask,
1258 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1259 uint32_t width, uint32_t height)
1260 {
1261 assert(src_image->type == VK_IMAGE_TYPE_2D);
1262 assert(src_image->samples > 1);
1263 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1264 assert(dst_image->samples == 1);
1265
1266 uint32_t a;
1267 for_each_bit(a, aspect_mask) {
1268 VkImageAspectFlagBits aspect = 1 << a;
1269
1270 struct blorp_surf src_surf, dst_surf;
1271 get_blorp_surf_for_anv_image(src_image, aspect,
1272 src_image->aux_usage, &src_surf);
1273 get_blorp_surf_for_anv_image(dst_image, aspect,
1274 dst_image->aux_usage, &dst_surf);
1275
1276 blorp_blit(batch,
1277 &src_surf, src_level, src_layer,
1278 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1279 &dst_surf, dst_level, dst_layer,
1280 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1281 src_x, src_y, src_x + width, src_y + height,
1282 dst_x, dst_y, dst_x + width, dst_y + height,
1283 0x2600 /* GL_NEAREST */, false, false);
1284 }
1285 }
1286
1287 void anv_CmdResolveImage(
1288 VkCommandBuffer commandBuffer,
1289 VkImage srcImage,
1290 VkImageLayout srcImageLayout,
1291 VkImage dstImage,
1292 VkImageLayout dstImageLayout,
1293 uint32_t regionCount,
1294 const VkImageResolve* pRegions)
1295 {
1296 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1297 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1298 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1299
1300 struct blorp_batch batch;
1301 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1302
1303 for (uint32_t r = 0; r < regionCount; r++) {
1304 assert(pRegions[r].srcSubresource.aspectMask ==
1305 pRegions[r].dstSubresource.aspectMask);
1306 assert(pRegions[r].srcSubresource.layerCount ==
1307 pRegions[r].dstSubresource.layerCount);
1308
1309 const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
1310
1311 for (uint32_t layer = 0; layer < layer_count; layer++) {
1312 resolve_image(&batch,
1313 src_image, pRegions[r].srcSubresource.mipLevel,
1314 pRegions[r].srcSubresource.baseArrayLayer + layer,
1315 dst_image, pRegions[r].dstSubresource.mipLevel,
1316 pRegions[r].dstSubresource.baseArrayLayer + layer,
1317 pRegions[r].dstSubresource.aspectMask,
1318 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1319 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1320 pRegions[r].extent.width, pRegions[r].extent.height);
1321 }
1322 }
1323
1324 blorp_batch_finish(&batch);
1325 }
1326
1327 static void
1328 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
1329 struct blorp_batch *batch,
1330 uint32_t att)
1331 {
1332 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1333 struct anv_attachment_state *att_state =
1334 &cmd_buffer->state.attachments[att];
1335
1336 if (att_state->aux_usage == ISL_AUX_USAGE_NONE)
1337 return; /* Nothing to resolve */
1338
1339 assert(att_state->aux_usage == ISL_AUX_USAGE_CCS_E ||
1340 att_state->aux_usage == ISL_AUX_USAGE_CCS_D);
1341
1342 struct anv_render_pass *pass = cmd_buffer->state.pass;
1343 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1344 unsigned subpass_idx = subpass - pass->subpasses;
1345 assert(subpass_idx < pass->subpass_count);
1346
1347 /* Scan forward to see what all ways this attachment will be used.
1348 * Ideally, we would like to resolve in the same subpass as the last write
1349 * of a particular attachment. That way we only resolve once but it's
1350 * still hot in the cache.
1351 */
1352 bool found_draw = false;
1353 enum anv_subpass_usage usage = 0;
1354 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1355 usage |= pass->attachments[att].subpass_usage[s];
1356
1357 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1358 /* We found another subpass that draws to this attachment. We'll
1359 * wait to resolve until then.
1360 */
1361 found_draw = true;
1362 break;
1363 }
1364 }
1365
1366 struct anv_image_view *iview = fb->attachments[att];
1367 const struct anv_image *image = iview->image;
1368 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1369
1370 enum blorp_fast_clear_op resolve_op = BLORP_FAST_CLEAR_OP_NONE;
1371 if (!found_draw) {
1372 /* This is the last subpass that writes to this attachment so we need to
1373 * resolve here. Ideally, we would like to only resolve if the storeOp
1374 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1375 * that the CCS bits are set to "resolved" because there may be copy or
1376 * blit operations (which may ignore CCS) between now and the next time
1377 * we render and we need to ensure that anything they write will be
1378 * respected in the next render. Unfortunately, the hardware does not
1379 * provide us with any sort of "invalidate" pass that sets the CCS to
1380 * "resolved" without writing to the render target.
1381 */
1382 if (iview->image->aux_usage != ISL_AUX_USAGE_CCS_E) {
1383 /* The image destination surface doesn't support compression outside
1384 * the render pass. We need a full resolve.
1385 */
1386 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1387 } else if (att_state->fast_clear) {
1388 /* We don't know what to do with clear colors outside the render
1389 * pass. We need a partial resolve.
1390 */
1391 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1392 } else {
1393 /* The image "natively" supports all the compression we care about
1394 * and we don't need to resolve at all. If this is the case, we also
1395 * don't need to resolve for any of the input attachment cases below.
1396 */
1397 }
1398 } else if (usage & ANV_SUBPASS_USAGE_INPUT) {
1399 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1400 * can frequently sample from them with no resolves at all.
1401 */
1402 if (att_state->aux_usage != att_state->input_aux_usage) {
1403 assert(att_state->input_aux_usage == ISL_AUX_USAGE_NONE);
1404 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1405 } else if (!att_state->clear_color_is_zero_one) {
1406 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1407 *
1408 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1409 * is fast cleared with non-0/1 clear value, this RT must be
1410 * partially resolved (refer to Partial Resolve operation) before
1411 * binding this surface to Sampler."
1412 */
1413 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1414 }
1415 }
1416
1417 if (resolve_op == BLORP_FAST_CLEAR_OP_NONE)
1418 return;
1419
1420 struct blorp_surf surf;
1421 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1422 att_state->aux_usage, &surf);
1423 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1424
1425 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1426 *
1427 * "When performing a render target resolve, PIPE_CONTROL with end of
1428 * pipe sync must be delivered."
1429 *
1430 * This comment is a bit cryptic and doesn't really tell you what's going
1431 * or what's really needed. It appears that fast clear ops are not
1432 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1433 * to ensure that the contents of the previous draw hit the render target
1434 * before we resolve and then use a second PIPE_CONTROL after the resolve
1435 * to ensure that it is completed before any additional drawing occurs.
1436 */
1437 cmd_buffer->state.pending_pipe_bits |=
1438 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1439
1440 for (uint32_t layer = 0; layer < fb->layers; layer++) {
1441 blorp_ccs_resolve(batch, &surf,
1442 iview->isl.base_level,
1443 iview->isl.base_array_layer + layer,
1444 iview->isl.format, resolve_op);
1445 }
1446
1447 cmd_buffer->state.pending_pipe_bits |=
1448 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1449
1450 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1451 att_state->fast_clear = false;
1452 }
1453
1454 void
1455 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1456 {
1457 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1458 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1459
1460
1461 struct blorp_batch batch;
1462 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1463
1464 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1465 ccs_resolve_attachment(cmd_buffer, &batch,
1466 subpass->color_attachments[i]);
1467 }
1468
1469 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
1470
1471 if (subpass->has_resolve) {
1472 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1473 uint32_t src_att = subpass->color_attachments[i];
1474 uint32_t dst_att = subpass->resolve_attachments[i];
1475
1476 if (dst_att == VK_ATTACHMENT_UNUSED)
1477 continue;
1478
1479 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1480 /* From the Vulkan 1.0 spec:
1481 *
1482 * If the first use of an attachment in a render pass is as a
1483 * resolve attachment, then the loadOp is effectively ignored
1484 * as the resolve is guaranteed to overwrite all pixels in the
1485 * render area.
1486 */
1487 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1488 }
1489
1490 struct anv_image_view *src_iview = fb->attachments[src_att];
1491 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1492
1493 const VkRect2D render_area = cmd_buffer->state.render_area;
1494
1495 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1496 resolve_image(&batch, src_iview->image,
1497 src_iview->isl.base_level,
1498 src_iview->isl.base_array_layer,
1499 dst_iview->image,
1500 dst_iview->isl.base_level,
1501 dst_iview->isl.base_array_layer,
1502 src_iview->aspect_mask,
1503 render_area.offset.x, render_area.offset.y,
1504 render_area.offset.x, render_area.offset.y,
1505 render_area.extent.width, render_area.extent.height);
1506
1507 ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
1508 }
1509
1510 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
1511 }
1512
1513 blorp_batch_finish(&batch);
1514 }