anv: Predicate fast-clear resolves
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.mocs.tex = device->default_mocs;
96 device->blorp.mocs.rb = device->default_mocs;
97 device->blorp.mocs.vb = device->default_mocs;
98 device->blorp.lookup_shader = lookup_blorp_shader;
99 device->blorp.upload_shader = upload_blorp_shader;
100 switch (device->info.gen) {
101 case 7:
102 if (device->info.is_haswell) {
103 device->blorp.exec = gen75_blorp_exec;
104 } else {
105 device->blorp.exec = gen7_blorp_exec;
106 }
107 break;
108 case 8:
109 device->blorp.exec = gen8_blorp_exec;
110 break;
111 case 9:
112 device->blorp.exec = gen9_blorp_exec;
113 break;
114 case 10:
115 device->blorp.exec = gen10_blorp_exec;
116 break;
117 default:
118 unreachable("Unknown hardware generation");
119 }
120 }
121
122 void
123 anv_device_finish_blorp(struct anv_device *device)
124 {
125 blorp_finish(&device->blorp);
126 anv_pipeline_cache_finish(&device->blorp_shader_cache);
127 }
128
129 static void
130 get_blorp_surf_for_anv_buffer(struct anv_device *device,
131 struct anv_buffer *buffer, uint64_t offset,
132 uint32_t width, uint32_t height,
133 uint32_t row_pitch, enum isl_format format,
134 struct blorp_surf *blorp_surf,
135 struct isl_surf *isl_surf)
136 {
137 const struct isl_format_layout *fmtl =
138 isl_format_get_layout(format);
139 bool ok UNUSED;
140
141 /* ASTC is the only format which doesn't support linear layouts.
142 * Create an equivalently sized surface with ISL to get around this.
143 */
144 if (fmtl->txc == ISL_TXC_ASTC) {
145 /* Use an equivalently sized format */
146 format = ISL_FORMAT_R32G32B32A32_UINT;
147 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
148
149 /* Shrink the dimensions for the new format */
150 width = DIV_ROUND_UP(width, fmtl->bw);
151 height = DIV_ROUND_UP(height, fmtl->bh);
152 }
153
154 *blorp_surf = (struct blorp_surf) {
155 .surf = isl_surf,
156 .addr = {
157 .buffer = buffer->bo,
158 .offset = buffer->offset + offset,
159 },
160 };
161
162 ok = isl_surf_init(&device->isl_dev, isl_surf,
163 .dim = ISL_SURF_DIM_2D,
164 .format = format,
165 .width = width,
166 .height = height,
167 .depth = 1,
168 .levels = 1,
169 .array_len = 1,
170 .samples = 1,
171 .row_pitch = row_pitch,
172 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
173 ISL_SURF_USAGE_RENDER_TARGET_BIT,
174 .tiling_flags = ISL_TILING_LINEAR_BIT);
175 assert(ok);
176 }
177
178 static void
179 get_blorp_surf_for_anv_image(const struct anv_image *image,
180 VkImageAspectFlags aspect,
181 enum isl_aux_usage aux_usage,
182 struct blorp_surf *blorp_surf)
183 {
184 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
185 aux_usage == ISL_AUX_USAGE_HIZ)
186 aux_usage = ISL_AUX_USAGE_NONE;
187
188 const struct anv_surface *surface =
189 anv_image_get_surface_for_aspect_mask(image, aspect);
190
191 *blorp_surf = (struct blorp_surf) {
192 .surf = &surface->isl,
193 .addr = {
194 .buffer = image->bo,
195 .offset = image->offset + surface->offset,
196 },
197 };
198
199 if (aux_usage != ISL_AUX_USAGE_NONE) {
200 blorp_surf->aux_surf = &image->aux_surface.isl,
201 blorp_surf->aux_addr = (struct blorp_address) {
202 .buffer = image->bo,
203 .offset = image->offset + image->aux_surface.offset,
204 };
205 blorp_surf->aux_usage = aux_usage;
206 }
207 }
208
209 void anv_CmdCopyImage(
210 VkCommandBuffer commandBuffer,
211 VkImage srcImage,
212 VkImageLayout srcImageLayout,
213 VkImage dstImage,
214 VkImageLayout dstImageLayout,
215 uint32_t regionCount,
216 const VkImageCopy* pRegions)
217 {
218 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
219 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
220 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
221
222 struct blorp_batch batch;
223 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
224
225 for (unsigned r = 0; r < regionCount; r++) {
226 VkOffset3D srcOffset =
227 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
228 VkOffset3D dstOffset =
229 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
230 VkExtent3D extent =
231 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
232
233 unsigned dst_base_layer, layer_count;
234 if (dst_image->type == VK_IMAGE_TYPE_3D) {
235 dst_base_layer = pRegions[r].dstOffset.z;
236 layer_count = pRegions[r].extent.depth;
237 } else {
238 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
239 layer_count =
240 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
241 }
242
243 unsigned src_base_layer;
244 if (src_image->type == VK_IMAGE_TYPE_3D) {
245 src_base_layer = pRegions[r].srcOffset.z;
246 } else {
247 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
248 assert(layer_count ==
249 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
250 }
251
252 assert(pRegions[r].srcSubresource.aspectMask ==
253 pRegions[r].dstSubresource.aspectMask);
254
255 uint32_t a;
256 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
257 VkImageAspectFlagBits aspect = (1 << a);
258
259 struct blorp_surf src_surf, dst_surf;
260 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
261 &src_surf);
262 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
263 &dst_surf);
264
265 for (unsigned i = 0; i < layer_count; i++) {
266 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
267 src_base_layer + i,
268 &dst_surf, pRegions[r].dstSubresource.mipLevel,
269 dst_base_layer + i,
270 srcOffset.x, srcOffset.y,
271 dstOffset.x, dstOffset.y,
272 extent.width, extent.height);
273 }
274 }
275 }
276
277 blorp_batch_finish(&batch);
278 }
279
280 static void
281 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
282 struct anv_buffer *anv_buffer,
283 struct anv_image *anv_image,
284 uint32_t regionCount,
285 const VkBufferImageCopy* pRegions,
286 bool buffer_to_image)
287 {
288 struct blorp_batch batch;
289 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
290
291 struct {
292 struct blorp_surf surf;
293 uint32_t level;
294 VkOffset3D offset;
295 } image, buffer, *src, *dst;
296
297 buffer.level = 0;
298 buffer.offset = (VkOffset3D) { 0, 0, 0 };
299
300 if (buffer_to_image) {
301 src = &buffer;
302 dst = &image;
303 } else {
304 src = &image;
305 dst = &buffer;
306 }
307
308 for (unsigned r = 0; r < regionCount; r++) {
309 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
310
311 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
312 &image.surf);
313 image.offset =
314 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
315 image.level = pRegions[r].imageSubresource.mipLevel;
316
317 VkExtent3D extent =
318 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
319 if (anv_image->type != VK_IMAGE_TYPE_3D) {
320 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
321 extent.depth =
322 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
323 }
324
325 const enum isl_format buffer_format =
326 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
327 aspect, VK_IMAGE_TILING_LINEAR);
328
329 const VkExtent3D bufferImageExtent = {
330 .width = pRegions[r].bufferRowLength ?
331 pRegions[r].bufferRowLength : extent.width,
332 .height = pRegions[r].bufferImageHeight ?
333 pRegions[r].bufferImageHeight : extent.height,
334 };
335
336 const struct isl_format_layout *buffer_fmtl =
337 isl_format_get_layout(buffer_format);
338
339 const uint32_t buffer_row_pitch =
340 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
341 (buffer_fmtl->bpb / 8);
342
343 const uint32_t buffer_layer_stride =
344 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
345 buffer_row_pitch;
346
347 struct isl_surf buffer_isl_surf;
348 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
349 anv_buffer, pRegions[r].bufferOffset,
350 extent.width, extent.height,
351 buffer_row_pitch, buffer_format,
352 &buffer.surf, &buffer_isl_surf);
353
354 for (unsigned z = 0; z < extent.depth; z++) {
355 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
356 &dst->surf, dst->level, dst->offset.z,
357 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
358 extent.width, extent.height);
359
360 image.offset.z++;
361 buffer.surf.addr.offset += buffer_layer_stride;
362 }
363 }
364
365 blorp_batch_finish(&batch);
366 }
367
368 void anv_CmdCopyBufferToImage(
369 VkCommandBuffer commandBuffer,
370 VkBuffer srcBuffer,
371 VkImage dstImage,
372 VkImageLayout dstImageLayout,
373 uint32_t regionCount,
374 const VkBufferImageCopy* pRegions)
375 {
376 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
377 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
378 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
379
380 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
381 regionCount, pRegions, true);
382 }
383
384 void anv_CmdCopyImageToBuffer(
385 VkCommandBuffer commandBuffer,
386 VkImage srcImage,
387 VkImageLayout srcImageLayout,
388 VkBuffer dstBuffer,
389 uint32_t regionCount,
390 const VkBufferImageCopy* pRegions)
391 {
392 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
393 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
394 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
395
396 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
397 regionCount, pRegions, false);
398 }
399
400 static bool
401 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
402 {
403 bool flip = false;
404 if (*src0 > *src1) {
405 unsigned tmp = *src0;
406 *src0 = *src1;
407 *src1 = tmp;
408 flip = !flip;
409 }
410
411 if (*dst0 > *dst1) {
412 unsigned tmp = *dst0;
413 *dst0 = *dst1;
414 *dst1 = tmp;
415 flip = !flip;
416 }
417
418 return flip;
419 }
420
421 void anv_CmdBlitImage(
422 VkCommandBuffer commandBuffer,
423 VkImage srcImage,
424 VkImageLayout srcImageLayout,
425 VkImage dstImage,
426 VkImageLayout dstImageLayout,
427 uint32_t regionCount,
428 const VkImageBlit* pRegions,
429 VkFilter filter)
430
431 {
432 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
433 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
434 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
435
436 struct blorp_surf src, dst;
437
438 uint32_t gl_filter;
439 switch (filter) {
440 case VK_FILTER_NEAREST:
441 gl_filter = 0x2600; /* GL_NEAREST */
442 break;
443 case VK_FILTER_LINEAR:
444 gl_filter = 0x2601; /* GL_LINEAR */
445 break;
446 default:
447 unreachable("Invalid filter");
448 }
449
450 struct blorp_batch batch;
451 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
452
453 for (unsigned r = 0; r < regionCount; r++) {
454 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
455 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
456
457 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
458 src_image->aux_usage, &src);
459 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
460 dst_image->aux_usage, &dst);
461
462 struct anv_format src_format =
463 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
464 src_res->aspectMask, src_image->tiling);
465 struct anv_format dst_format =
466 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
467 dst_res->aspectMask, dst_image->tiling);
468
469 unsigned dst_start, dst_end;
470 if (dst_image->type == VK_IMAGE_TYPE_3D) {
471 assert(dst_res->baseArrayLayer == 0);
472 dst_start = pRegions[r].dstOffsets[0].z;
473 dst_end = pRegions[r].dstOffsets[1].z;
474 } else {
475 dst_start = dst_res->baseArrayLayer;
476 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
477 }
478
479 unsigned src_start, src_end;
480 if (src_image->type == VK_IMAGE_TYPE_3D) {
481 assert(src_res->baseArrayLayer == 0);
482 src_start = pRegions[r].srcOffsets[0].z;
483 src_end = pRegions[r].srcOffsets[1].z;
484 } else {
485 src_start = src_res->baseArrayLayer;
486 src_end = src_start + anv_get_layerCount(src_image, src_res);
487 }
488
489 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
490 float src_z_step = (float)(src_end + 1 - src_start) /
491 (float)(dst_end + 1 - dst_start);
492
493 if (flip_z) {
494 src_start = src_end;
495 src_z_step *= -1;
496 }
497
498 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
499 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
500 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
501 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
502 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
503
504 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
505 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
506 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
507 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
508 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
509
510 const unsigned num_layers = dst_end - dst_start;
511 for (unsigned i = 0; i < num_layers; i++) {
512 unsigned dst_z = dst_start + i;
513 unsigned src_z = src_start + i * src_z_step;
514
515 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
516 src_format.isl_format, src_format.swizzle,
517 &dst, dst_res->mipLevel, dst_z,
518 dst_format.isl_format,
519 anv_swizzle_for_render(dst_format.swizzle),
520 src_x0, src_y0, src_x1, src_y1,
521 dst_x0, dst_y0, dst_x1, dst_y1,
522 gl_filter, flip_x, flip_y);
523 }
524
525 }
526
527 blorp_batch_finish(&batch);
528 }
529
530 static enum isl_format
531 isl_format_for_size(unsigned size_B)
532 {
533 switch (size_B) {
534 case 1: return ISL_FORMAT_R8_UINT;
535 case 2: return ISL_FORMAT_R8G8_UINT;
536 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
537 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
538 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
539 default:
540 unreachable("Not a power-of-two format size");
541 }
542 }
543
544 static void
545 do_buffer_copy(struct blorp_batch *batch,
546 struct anv_bo *src, uint64_t src_offset,
547 struct anv_bo *dst, uint64_t dst_offset,
548 int width, int height, int block_size)
549 {
550 struct anv_device *device = batch->blorp->driver_ctx;
551
552 /* The actual format we pick doesn't matter as blorp will throw it away.
553 * The only thing that actually matters is the size.
554 */
555 enum isl_format format = isl_format_for_size(block_size);
556
557 UNUSED bool ok;
558 struct isl_surf surf;
559 ok = isl_surf_init(&device->isl_dev, &surf,
560 .dim = ISL_SURF_DIM_2D,
561 .format = format,
562 .width = width,
563 .height = height,
564 .depth = 1,
565 .levels = 1,
566 .array_len = 1,
567 .samples = 1,
568 .row_pitch = width * block_size,
569 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
570 ISL_SURF_USAGE_RENDER_TARGET_BIT,
571 .tiling_flags = ISL_TILING_LINEAR_BIT);
572 assert(ok);
573
574 struct blorp_surf src_blorp_surf = {
575 .surf = &surf,
576 .addr = {
577 .buffer = src,
578 .offset = src_offset,
579 },
580 };
581
582 struct blorp_surf dst_blorp_surf = {
583 .surf = &surf,
584 .addr = {
585 .buffer = dst,
586 .offset = dst_offset,
587 },
588 };
589
590 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
591 0, 0, 0, 0, width, height);
592 }
593
594 /**
595 * Returns the greatest common divisor of a and b that is a power of two.
596 */
597 static inline uint64_t
598 gcd_pow2_u64(uint64_t a, uint64_t b)
599 {
600 assert(a > 0 || b > 0);
601
602 unsigned a_log2 = ffsll(a) - 1;
603 unsigned b_log2 = ffsll(b) - 1;
604
605 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
606 * case, the MIN2() will take the other one. If both are 0 then we will
607 * hit the assert above.
608 */
609 return 1 << MIN2(a_log2, b_log2);
610 }
611
612 /* This is maximum possible width/height our HW can handle */
613 #define MAX_SURFACE_DIM (1ull << 14)
614
615 void anv_CmdCopyBuffer(
616 VkCommandBuffer commandBuffer,
617 VkBuffer srcBuffer,
618 VkBuffer dstBuffer,
619 uint32_t regionCount,
620 const VkBufferCopy* pRegions)
621 {
622 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
623 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
624 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
625
626 struct blorp_batch batch;
627 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
628
629 for (unsigned r = 0; r < regionCount; r++) {
630 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
631 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
632 uint64_t copy_size = pRegions[r].size;
633
634 /* First, we compute the biggest format that can be used with the
635 * given offsets and size.
636 */
637 int bs = 16;
638 bs = gcd_pow2_u64(bs, src_offset);
639 bs = gcd_pow2_u64(bs, dst_offset);
640 bs = gcd_pow2_u64(bs, pRegions[r].size);
641
642 /* First, we make a bunch of max-sized copies */
643 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
644 while (copy_size >= max_copy_size) {
645 do_buffer_copy(&batch, src_buffer->bo, src_offset,
646 dst_buffer->bo, dst_offset,
647 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
648 copy_size -= max_copy_size;
649 src_offset += max_copy_size;
650 dst_offset += max_copy_size;
651 }
652
653 /* Now make a max-width copy */
654 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
655 assert(height < MAX_SURFACE_DIM);
656 if (height != 0) {
657 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
658 do_buffer_copy(&batch, src_buffer->bo, src_offset,
659 dst_buffer->bo, dst_offset,
660 MAX_SURFACE_DIM, height, bs);
661 copy_size -= rect_copy_size;
662 src_offset += rect_copy_size;
663 dst_offset += rect_copy_size;
664 }
665
666 /* Finally, make a small copy to finish it off */
667 if (copy_size != 0) {
668 do_buffer_copy(&batch, src_buffer->bo, src_offset,
669 dst_buffer->bo, dst_offset,
670 copy_size / bs, 1, bs);
671 }
672 }
673
674 blorp_batch_finish(&batch);
675 }
676
677 void anv_CmdUpdateBuffer(
678 VkCommandBuffer commandBuffer,
679 VkBuffer dstBuffer,
680 VkDeviceSize dstOffset,
681 VkDeviceSize dataSize,
682 const void* pData)
683 {
684 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
685 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
686
687 struct blorp_batch batch;
688 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
689
690 /* We can't quite grab a full block because the state stream needs a
691 * little data at the top to build its linked list.
692 */
693 const uint32_t max_update_size =
694 cmd_buffer->device->dynamic_state_pool.block_size - 64;
695
696 assert(max_update_size < MAX_SURFACE_DIM * 4);
697
698 /* We're about to read data that was written from the CPU. Flush the
699 * texture cache so we don't get anything stale.
700 */
701 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
702
703 while (dataSize) {
704 const uint32_t copy_size = MIN2(dataSize, max_update_size);
705
706 struct anv_state tmp_data =
707 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
708
709 memcpy(tmp_data.map, pData, copy_size);
710
711 anv_state_flush(cmd_buffer->device, tmp_data);
712
713 int bs = 16;
714 bs = gcd_pow2_u64(bs, dstOffset);
715 bs = gcd_pow2_u64(bs, copy_size);
716
717 do_buffer_copy(&batch,
718 &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
719 tmp_data.offset,
720 dst_buffer->bo, dst_buffer->offset + dstOffset,
721 copy_size / bs, 1, bs);
722
723 dataSize -= copy_size;
724 dstOffset += copy_size;
725 pData = (void *)pData + copy_size;
726 }
727
728 blorp_batch_finish(&batch);
729 }
730
731 void anv_CmdFillBuffer(
732 VkCommandBuffer commandBuffer,
733 VkBuffer dstBuffer,
734 VkDeviceSize dstOffset,
735 VkDeviceSize fillSize,
736 uint32_t data)
737 {
738 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
739 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
740 struct blorp_surf surf;
741 struct isl_surf isl_surf;
742
743 struct blorp_batch batch;
744 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
745
746 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
747
748 /* From the Vulkan spec:
749 *
750 * "size is the number of bytes to fill, and must be either a multiple
751 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
752 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
753 * buffer is not a multiple of 4, then the nearest smaller multiple is
754 * used."
755 */
756 fillSize &= ~3ull;
757
758 /* First, we compute the biggest format that can be used with the
759 * given offsets and size.
760 */
761 int bs = 16;
762 bs = gcd_pow2_u64(bs, dstOffset);
763 bs = gcd_pow2_u64(bs, fillSize);
764 enum isl_format isl_format = isl_format_for_size(bs);
765
766 union isl_color_value color = {
767 .u32 = { data, data, data, data },
768 };
769
770 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
771 while (fillSize >= max_fill_size) {
772 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
773 dst_buffer, dstOffset,
774 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
775 MAX_SURFACE_DIM * bs, isl_format,
776 &surf, &isl_surf);
777
778 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
779 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
780 color, NULL);
781 fillSize -= max_fill_size;
782 dstOffset += max_fill_size;
783 }
784
785 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
786 assert(height < MAX_SURFACE_DIM);
787 if (height != 0) {
788 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
789 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
790 dst_buffer, dstOffset,
791 MAX_SURFACE_DIM, height,
792 MAX_SURFACE_DIM * bs, isl_format,
793 &surf, &isl_surf);
794
795 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
796 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
797 color, NULL);
798 fillSize -= rect_fill_size;
799 dstOffset += rect_fill_size;
800 }
801
802 if (fillSize != 0) {
803 const uint32_t width = fillSize / bs;
804 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
805 dst_buffer, dstOffset,
806 width, 1,
807 width * bs, isl_format,
808 &surf, &isl_surf);
809
810 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
811 0, 0, 1, 0, 0, width, 1,
812 color, NULL);
813 }
814
815 blorp_batch_finish(&batch);
816 }
817
818 void anv_CmdClearColorImage(
819 VkCommandBuffer commandBuffer,
820 VkImage _image,
821 VkImageLayout imageLayout,
822 const VkClearColorValue* pColor,
823 uint32_t rangeCount,
824 const VkImageSubresourceRange* pRanges)
825 {
826 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
827 ANV_FROM_HANDLE(anv_image, image, _image);
828
829 static const bool color_write_disable[4] = { false, false, false, false };
830
831 struct blorp_batch batch;
832 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
833
834 struct blorp_surf surf;
835 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
836 image->aux_usage, &surf);
837
838 for (unsigned r = 0; r < rangeCount; r++) {
839 if (pRanges[r].aspectMask == 0)
840 continue;
841
842 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
843
844 struct anv_format src_format =
845 anv_get_format(&cmd_buffer->device->info, image->vk_format,
846 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
847
848 unsigned base_layer = pRanges[r].baseArrayLayer;
849 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
850
851 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
852 const unsigned level = pRanges[r].baseMipLevel + i;
853 const unsigned level_width = anv_minify(image->extent.width, level);
854 const unsigned level_height = anv_minify(image->extent.height, level);
855
856 if (image->type == VK_IMAGE_TYPE_3D) {
857 base_layer = 0;
858 layer_count = anv_minify(image->extent.depth, level);
859 }
860
861 blorp_clear(&batch, &surf,
862 src_format.isl_format, src_format.swizzle,
863 level, base_layer, layer_count,
864 0, 0, level_width, level_height,
865 vk_to_isl_color(*pColor), color_write_disable);
866 }
867 }
868
869 blorp_batch_finish(&batch);
870 }
871
872 void anv_CmdClearDepthStencilImage(
873 VkCommandBuffer commandBuffer,
874 VkImage image_h,
875 VkImageLayout imageLayout,
876 const VkClearDepthStencilValue* pDepthStencil,
877 uint32_t rangeCount,
878 const VkImageSubresourceRange* pRanges)
879 {
880 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
881 ANV_FROM_HANDLE(anv_image, image, image_h);
882
883 struct blorp_batch batch;
884 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
885
886 struct blorp_surf depth, stencil;
887 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
888 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
889 ISL_AUX_USAGE_NONE, &depth);
890 } else {
891 memset(&depth, 0, sizeof(depth));
892 }
893
894 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
895 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
896 ISL_AUX_USAGE_NONE, &stencil);
897 } else {
898 memset(&stencil, 0, sizeof(stencil));
899 }
900
901 for (unsigned r = 0; r < rangeCount; r++) {
902 if (pRanges[r].aspectMask == 0)
903 continue;
904
905 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
906 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
907
908 unsigned base_layer = pRanges[r].baseArrayLayer;
909 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
910
911 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
912 const unsigned level = pRanges[r].baseMipLevel + i;
913 const unsigned level_width = anv_minify(image->extent.width, level);
914 const unsigned level_height = anv_minify(image->extent.height, level);
915
916 if (image->type == VK_IMAGE_TYPE_3D)
917 layer_count = anv_minify(image->extent.depth, level);
918
919 blorp_clear_depth_stencil(&batch, &depth, &stencil,
920 level, base_layer, layer_count,
921 0, 0, level_width, level_height,
922 clear_depth, pDepthStencil->depth,
923 clear_stencil ? 0xff : 0,
924 pDepthStencil->stencil);
925 }
926 }
927
928 blorp_batch_finish(&batch);
929 }
930
931 VkResult
932 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
933 uint32_t num_entries,
934 uint32_t *state_offset,
935 struct anv_state *bt_state)
936 {
937 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
938 state_offset);
939 if (bt_state->map == NULL) {
940 /* We ran out of space. Grab a new binding table block. */
941 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
942 if (result != VK_SUCCESS)
943 return result;
944
945 /* Re-emit state base addresses so we get the new surface state base
946 * address before we start emitting binding tables etc.
947 */
948 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
949
950 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
951 state_offset);
952 assert(bt_state->map != NULL);
953 }
954
955 return VK_SUCCESS;
956 }
957
958 static VkResult
959 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
960 struct anv_state surface_state,
961 uint32_t *bt_offset)
962 {
963 uint32_t state_offset;
964 struct anv_state bt_state;
965
966 VkResult result =
967 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
968 &bt_state);
969 if (result != VK_SUCCESS)
970 return result;
971
972 uint32_t *bt_map = bt_state.map;
973 bt_map[0] = surface_state.offset + state_offset;
974
975 *bt_offset = bt_state.offset;
976 return VK_SUCCESS;
977 }
978
979 static void
980 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
981 struct blorp_batch *batch,
982 const VkClearAttachment *attachment,
983 uint32_t rectCount, const VkClearRect *pRects)
984 {
985 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
986 const uint32_t color_att = attachment->colorAttachment;
987 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
988
989 if (att_idx == VK_ATTACHMENT_UNUSED)
990 return;
991
992 struct anv_render_pass_attachment *pass_att =
993 &cmd_buffer->state.pass->attachments[att_idx];
994 struct anv_attachment_state *att_state =
995 &cmd_buffer->state.attachments[att_idx];
996
997 uint32_t binding_table;
998 VkResult result =
999 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state,
1000 &binding_table);
1001 if (result != VK_SUCCESS)
1002 return;
1003
1004 union isl_color_value clear_color =
1005 vk_to_isl_color(attachment->clearValue.color);
1006
1007 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1008 if (subpass->view_mask) {
1009 uint32_t view_idx;
1010 for_each_bit(view_idx, subpass->view_mask) {
1011 for (uint32_t r = 0; r < rectCount; ++r) {
1012 const VkOffset2D offset = pRects[r].rect.offset;
1013 const VkExtent2D extent = pRects[r].rect.extent;
1014 blorp_clear_attachments(batch, binding_table,
1015 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1016 view_idx, 1,
1017 offset.x, offset.y,
1018 offset.x + extent.width,
1019 offset.y + extent.height,
1020 true, clear_color, false, 0.0f, 0, 0);
1021 }
1022 }
1023 return;
1024 }
1025
1026 for (uint32_t r = 0; r < rectCount; ++r) {
1027 const VkOffset2D offset = pRects[r].rect.offset;
1028 const VkExtent2D extent = pRects[r].rect.extent;
1029 blorp_clear_attachments(batch, binding_table,
1030 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1031 pRects[r].baseArrayLayer,
1032 pRects[r].layerCount,
1033 offset.x, offset.y,
1034 offset.x + extent.width, offset.y + extent.height,
1035 true, clear_color, false, 0.0f, 0, 0);
1036 }
1037 }
1038
1039 static void
1040 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1041 struct blorp_batch *batch,
1042 const VkClearAttachment *attachment,
1043 uint32_t rectCount, const VkClearRect *pRects)
1044 {
1045 static const union isl_color_value color_value = { .u32 = { 0, } };
1046 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1047 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1048
1049 if (att_idx == VK_ATTACHMENT_UNUSED)
1050 return;
1051
1052 struct anv_render_pass_attachment *pass_att =
1053 &cmd_buffer->state.pass->attachments[att_idx];
1054
1055 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1056 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1057
1058 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1059 if (clear_depth) {
1060 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1061 pass_att->format,
1062 VK_IMAGE_ASPECT_DEPTH_BIT,
1063 VK_IMAGE_TILING_OPTIMAL);
1064 }
1065
1066 uint32_t binding_table;
1067 VkResult result =
1068 binding_table_for_surface_state(cmd_buffer,
1069 cmd_buffer->state.null_surface_state,
1070 &binding_table);
1071 if (result != VK_SUCCESS)
1072 return;
1073
1074 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1075 if (subpass->view_mask) {
1076 uint32_t view_idx;
1077 for_each_bit(view_idx, subpass->view_mask) {
1078 for (uint32_t r = 0; r < rectCount; ++r) {
1079 const VkOffset2D offset = pRects[r].rect.offset;
1080 const VkExtent2D extent = pRects[r].rect.extent;
1081 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1082 blorp_clear_attachments(batch, binding_table,
1083 depth_format, pass_att->samples,
1084 view_idx, 1,
1085 offset.x, offset.y,
1086 offset.x + extent.width,
1087 offset.y + extent.height,
1088 false, color_value,
1089 clear_depth, value.depth,
1090 clear_stencil ? 0xff : 0, value.stencil);
1091 }
1092 }
1093 return;
1094 }
1095
1096 for (uint32_t r = 0; r < rectCount; ++r) {
1097 const VkOffset2D offset = pRects[r].rect.offset;
1098 const VkExtent2D extent = pRects[r].rect.extent;
1099 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1100 blorp_clear_attachments(batch, binding_table,
1101 depth_format, pass_att->samples,
1102 pRects[r].baseArrayLayer,
1103 pRects[r].layerCount,
1104 offset.x, offset.y,
1105 offset.x + extent.width, offset.y + extent.height,
1106 false, color_value,
1107 clear_depth, value.depth,
1108 clear_stencil ? 0xff : 0, value.stencil);
1109 }
1110 }
1111
1112 void anv_CmdClearAttachments(
1113 VkCommandBuffer commandBuffer,
1114 uint32_t attachmentCount,
1115 const VkClearAttachment* pAttachments,
1116 uint32_t rectCount,
1117 const VkClearRect* pRects)
1118 {
1119 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1120
1121 /* Because this gets called within a render pass, we tell blorp not to
1122 * trash our depth and stencil buffers.
1123 */
1124 struct blorp_batch batch;
1125 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1126 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1127
1128 for (uint32_t a = 0; a < attachmentCount; ++a) {
1129 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1130 clear_color_attachment(cmd_buffer, &batch,
1131 &pAttachments[a],
1132 rectCount, pRects);
1133 } else {
1134 clear_depth_stencil_attachment(cmd_buffer, &batch,
1135 &pAttachments[a],
1136 rectCount, pRects);
1137 }
1138 }
1139
1140 blorp_batch_finish(&batch);
1141 }
1142
1143 enum subpass_stage {
1144 SUBPASS_STAGE_LOAD,
1145 SUBPASS_STAGE_DRAW,
1146 SUBPASS_STAGE_RESOLVE,
1147 };
1148
1149 static bool
1150 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1151 {
1152 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1153 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1154
1155 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1156 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1157 if (a == VK_ATTACHMENT_UNUSED)
1158 continue;
1159
1160 assert(a < cmd_state->pass->attachment_count);
1161 if (cmd_state->attachments[a].pending_clear_aspects) {
1162 return true;
1163 }
1164 }
1165
1166 if (ds != VK_ATTACHMENT_UNUSED) {
1167 assert(ds < cmd_state->pass->attachment_count);
1168 if (cmd_state->attachments[ds].pending_clear_aspects)
1169 return true;
1170 }
1171
1172 return false;
1173 }
1174
1175 void
1176 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1177 {
1178 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1179 const VkRect2D render_area = cmd_buffer->state.render_area;
1180
1181
1182 if (!subpass_needs_clear(cmd_buffer))
1183 return;
1184
1185 /* Because this gets called within a render pass, we tell blorp not to
1186 * trash our depth and stencil buffers.
1187 */
1188 struct blorp_batch batch;
1189 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1190 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1191
1192 VkClearRect clear_rect = {
1193 .rect = cmd_buffer->state.render_area,
1194 .baseArrayLayer = 0,
1195 .layerCount = cmd_buffer->state.framebuffer->layers,
1196 };
1197
1198 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1199 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1200 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1201 if (a == VK_ATTACHMENT_UNUSED)
1202 continue;
1203
1204 assert(a < cmd_state->pass->attachment_count);
1205 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1206
1207 if (!att_state->pending_clear_aspects)
1208 continue;
1209
1210 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1211
1212 struct anv_image_view *iview = fb->attachments[a];
1213 const struct anv_image *image = iview->image;
1214 struct blorp_surf surf;
1215 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1216 att_state->aux_usage, &surf);
1217
1218 if (att_state->fast_clear) {
1219 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1220
1221 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1222 *
1223 * "After Render target fast clear, pipe-control with color cache
1224 * write-flush must be issued before sending any DRAW commands on
1225 * that render target."
1226 *
1227 * This comment is a bit cryptic and doesn't really tell you what's
1228 * going or what's really needed. It appears that fast clear ops are
1229 * not properly synchronized with other drawing. This means that we
1230 * cannot have a fast clear operation in the pipe at the same time as
1231 * other regular drawing operations. We need to use a PIPE_CONTROL
1232 * to ensure that the contents of the previous draw hit the render
1233 * target before we resolve and then use a second PIPE_CONTROL after
1234 * the resolve to ensure that it is completed before any additional
1235 * drawing occurs.
1236 */
1237 cmd_buffer->state.pending_pipe_bits |=
1238 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1239
1240 blorp_fast_clear(&batch, &surf, iview->isl.format,
1241 iview->isl.base_level,
1242 iview->isl.base_array_layer, fb->layers,
1243 render_area.offset.x, render_area.offset.y,
1244 render_area.offset.x + render_area.extent.width,
1245 render_area.offset.y + render_area.extent.height);
1246
1247 cmd_buffer->state.pending_pipe_bits |=
1248 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1249 } else {
1250 blorp_clear(&batch, &surf, iview->isl.format,
1251 anv_swizzle_for_render(iview->isl.swizzle),
1252 iview->isl.base_level,
1253 iview->isl.base_array_layer, fb->layers,
1254 render_area.offset.x, render_area.offset.y,
1255 render_area.offset.x + render_area.extent.width,
1256 render_area.offset.y + render_area.extent.height,
1257 vk_to_isl_color(att_state->clear_value.color), NULL);
1258 }
1259
1260 att_state->pending_clear_aspects = 0;
1261 }
1262
1263 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1264 assert(ds == VK_ATTACHMENT_UNUSED || ds < cmd_state->pass->attachment_count);
1265
1266 if (ds != VK_ATTACHMENT_UNUSED &&
1267 cmd_state->attachments[ds].pending_clear_aspects) {
1268
1269 VkClearAttachment clear_att = {
1270 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1271 .clearValue = cmd_state->attachments[ds].clear_value,
1272 };
1273
1274
1275 const uint8_t gen = cmd_buffer->device->info.gen;
1276 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1277 ISL_AUX_USAGE_HIZ;
1278 const struct anv_image_view *iview = fb->attachments[ds];
1279
1280 if (clear_with_hiz) {
1281 const bool clear_depth = clear_att.aspectMask &
1282 VK_IMAGE_ASPECT_DEPTH_BIT;
1283 const bool clear_stencil = clear_att.aspectMask &
1284 VK_IMAGE_ASPECT_STENCIL_BIT;
1285
1286 /* Check against restrictions for depth buffer clearing. A great GPU
1287 * performance benefit isn't expected when using the HZ sequence for
1288 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1289 * a stencil clear in addition to using the BLORP-fallback for depth.
1290 */
1291 if (clear_depth) {
1292 if (!blorp_can_hiz_clear_depth(gen, iview->isl.format,
1293 iview->image->samples,
1294 render_area.offset.x,
1295 render_area.offset.y,
1296 render_area.offset.x +
1297 render_area.extent.width,
1298 render_area.offset.y +
1299 render_area.extent.height)) {
1300 clear_with_hiz = false;
1301 } else if (clear_att.clearValue.depthStencil.depth !=
1302 ANV_HZ_FC_VAL) {
1303 /* Don't enable fast depth clears for any color not equal to
1304 * ANV_HZ_FC_VAL.
1305 */
1306 clear_with_hiz = false;
1307 } else if (gen == 8 &&
1308 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1309 iview->aspect_mask,
1310 iview->image->samples)) {
1311 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1312 * fast-cleared portion of a HiZ buffer. Testing has revealed
1313 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1314 * not support this feature at all.
1315 */
1316 clear_with_hiz = false;
1317 }
1318 }
1319
1320 if (clear_with_hiz) {
1321 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1322 render_area.offset.x,
1323 render_area.offset.y,
1324 render_area.offset.x +
1325 render_area.extent.width,
1326 render_area.offset.y +
1327 render_area.extent.height,
1328 clear_depth, clear_stencil,
1329 clear_att.clearValue.
1330 depthStencil.stencil);
1331
1332 /* From the SKL PRM, Depth Buffer Clear:
1333 *
1334 * Depth Buffer Clear Workaround
1335 * Depth buffer clear pass using any of the methods (WM_STATE,
1336 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
1337 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
1338 * “set” before starting to render. DepthStall and DepthFlush are
1339 * not needed between consecutive depth clear passes nor is it
1340 * required if the depth-clear pass was done with “full_surf_clear”
1341 * bit set in the 3DSTATE_WM_HZ_OP.
1342 */
1343 if (clear_depth) {
1344 cmd_buffer->state.pending_pipe_bits |=
1345 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1346 }
1347 }
1348 }
1349
1350 if (!clear_with_hiz) {
1351 clear_depth_stencil_attachment(cmd_buffer, &batch,
1352 &clear_att, 1, &clear_rect);
1353 }
1354
1355 cmd_state->attachments[ds].pending_clear_aspects = 0;
1356 }
1357
1358 blorp_batch_finish(&batch);
1359 }
1360
1361 static void
1362 resolve_image(struct blorp_batch *batch,
1363 const struct anv_image *src_image,
1364 enum isl_aux_usage src_aux_usage,
1365 uint32_t src_level, uint32_t src_layer,
1366 const struct anv_image *dst_image,
1367 enum isl_aux_usage dst_aux_usage,
1368 uint32_t dst_level, uint32_t dst_layer,
1369 VkImageAspectFlags aspect_mask,
1370 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1371 uint32_t width, uint32_t height)
1372 {
1373 assert(src_image->type == VK_IMAGE_TYPE_2D);
1374 assert(src_image->samples > 1);
1375 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1376 assert(dst_image->samples == 1);
1377
1378 uint32_t a;
1379 for_each_bit(a, aspect_mask) {
1380 VkImageAspectFlagBits aspect = 1 << a;
1381
1382 struct blorp_surf src_surf, dst_surf;
1383 get_blorp_surf_for_anv_image(src_image, aspect,
1384 src_aux_usage, &src_surf);
1385 get_blorp_surf_for_anv_image(dst_image, aspect,
1386 dst_aux_usage, &dst_surf);
1387
1388 blorp_blit(batch,
1389 &src_surf, src_level, src_layer,
1390 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1391 &dst_surf, dst_level, dst_layer,
1392 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1393 src_x, src_y, src_x + width, src_y + height,
1394 dst_x, dst_y, dst_x + width, dst_y + height,
1395 0x2600 /* GL_NEAREST */, false, false);
1396 }
1397 }
1398
1399 void anv_CmdResolveImage(
1400 VkCommandBuffer commandBuffer,
1401 VkImage srcImage,
1402 VkImageLayout srcImageLayout,
1403 VkImage dstImage,
1404 VkImageLayout dstImageLayout,
1405 uint32_t regionCount,
1406 const VkImageResolve* pRegions)
1407 {
1408 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1409 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1410 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1411
1412 struct blorp_batch batch;
1413 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1414
1415 for (uint32_t r = 0; r < regionCount; r++) {
1416 assert(pRegions[r].srcSubresource.aspectMask ==
1417 pRegions[r].dstSubresource.aspectMask);
1418 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1419 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1420
1421 const uint32_t layer_count =
1422 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1423
1424 for (uint32_t layer = 0; layer < layer_count; layer++) {
1425 resolve_image(&batch,
1426 src_image, src_image->aux_usage,
1427 pRegions[r].srcSubresource.mipLevel,
1428 pRegions[r].srcSubresource.baseArrayLayer + layer,
1429 dst_image, dst_image->aux_usage,
1430 pRegions[r].dstSubresource.mipLevel,
1431 pRegions[r].dstSubresource.baseArrayLayer + layer,
1432 pRegions[r].dstSubresource.aspectMask,
1433 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1434 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1435 pRegions[r].extent.width, pRegions[r].extent.height);
1436 }
1437 }
1438
1439 blorp_batch_finish(&batch);
1440 }
1441
1442 void
1443 anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
1444 const struct anv_image *image,
1445 const uint32_t base_level, const uint32_t level_count,
1446 const uint32_t base_layer, uint32_t layer_count)
1447 {
1448 assert(image->type == VK_IMAGE_TYPE_3D || image->extent.depth == 1);
1449
1450 if (image->type == VK_IMAGE_TYPE_3D) {
1451 assert(base_layer == 0);
1452 assert(layer_count == anv_minify(image->extent.depth, base_level));
1453 }
1454
1455 struct blorp_batch batch;
1456 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1457
1458 struct blorp_surf surf;
1459 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1460 image->aux_usage == ISL_AUX_USAGE_NONE ?
1461 ISL_AUX_USAGE_CCS_D : image->aux_usage,
1462 &surf);
1463
1464 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1465 *
1466 * "After Render target fast clear, pipe-control with color cache
1467 * write-flush must be issued before sending any DRAW commands on
1468 * that render target."
1469 *
1470 * This comment is a bit cryptic and doesn't really tell you what's going
1471 * or what's really needed. It appears that fast clear ops are not
1472 * properly synchronized with other drawing. This means that we cannot
1473 * have a fast clear operation in the pipe at the same time as other
1474 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1475 * that the contents of the previous draw hit the render target before we
1476 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1477 * that it is completed before any additional drawing occurs.
1478 */
1479 cmd_buffer->state.pending_pipe_bits |=
1480 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1481
1482 for (uint32_t l = 0; l < level_count; l++) {
1483 const uint32_t level = base_level + l;
1484
1485 const VkExtent3D extent = {
1486 .width = anv_minify(image->extent.width, level),
1487 .height = anv_minify(image->extent.height, level),
1488 .depth = anv_minify(image->extent.depth, level),
1489 };
1490
1491 if (image->type == VK_IMAGE_TYPE_3D)
1492 layer_count = extent.depth;
1493
1494 assert(level < anv_image_aux_levels(image));
1495 assert(base_layer + layer_count <= anv_image_aux_layers(image, level));
1496 blorp_fast_clear(&batch, &surf, surf.surf->format,
1497 level, base_layer, layer_count,
1498 0, 0, extent.width, extent.height);
1499 }
1500
1501 cmd_buffer->state.pending_pipe_bits |=
1502 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1503 }
1504
1505 void
1506 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1507 {
1508 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1509 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1510
1511 if (subpass->has_resolve) {
1512 struct blorp_batch batch;
1513 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1514
1515 /* We are about to do some MSAA resolves. We need to flush so that the
1516 * result of writes to the MSAA color attachments show up in the sampler
1517 * when we blit to the single-sampled resolve target.
1518 */
1519 cmd_buffer->state.pending_pipe_bits |=
1520 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1521 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1522
1523 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1524 uint32_t src_att = subpass->color_attachments[i].attachment;
1525 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1526
1527 if (dst_att == VK_ATTACHMENT_UNUSED)
1528 continue;
1529
1530 assert(src_att < cmd_buffer->state.pass->attachment_count);
1531 assert(dst_att < cmd_buffer->state.pass->attachment_count);
1532
1533 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1534 /* From the Vulkan 1.0 spec:
1535 *
1536 * If the first use of an attachment in a render pass is as a
1537 * resolve attachment, then the loadOp is effectively ignored
1538 * as the resolve is guaranteed to overwrite all pixels in the
1539 * render area.
1540 */
1541 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1542 }
1543
1544 struct anv_image_view *src_iview = fb->attachments[src_att];
1545 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1546
1547 enum isl_aux_usage src_aux_usage =
1548 cmd_buffer->state.attachments[src_att].aux_usage;
1549 enum isl_aux_usage dst_aux_usage =
1550 cmd_buffer->state.attachments[dst_att].aux_usage;
1551
1552 const VkRect2D render_area = cmd_buffer->state.render_area;
1553
1554 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1555
1556 resolve_image(&batch, src_iview->image, src_aux_usage,
1557 src_iview->isl.base_level,
1558 src_iview->isl.base_array_layer,
1559 dst_iview->image, dst_aux_usage,
1560 dst_iview->isl.base_level,
1561 dst_iview->isl.base_array_layer,
1562 src_iview->aspect_mask,
1563 render_area.offset.x, render_area.offset.y,
1564 render_area.offset.x, render_area.offset.y,
1565 render_area.extent.width, render_area.extent.height);
1566 }
1567
1568 blorp_batch_finish(&batch);
1569 }
1570 }
1571
1572 void
1573 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1574 const struct anv_image *image,
1575 enum blorp_hiz_op op)
1576 {
1577 assert(image);
1578
1579 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1580 * don't perform such a resolve on gens that don't support it.
1581 */
1582 if (cmd_buffer->device->info.gen < 8 ||
1583 image->aux_usage != ISL_AUX_USAGE_HIZ)
1584 return;
1585
1586 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1587 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1588
1589 struct blorp_batch batch;
1590 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1591
1592 struct blorp_surf surf;
1593 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1594 ISL_AUX_USAGE_NONE, &surf);
1595
1596 /* Manually add the aux HiZ surf */
1597 surf.aux_surf = &image->aux_surface.isl,
1598 surf.aux_addr = (struct blorp_address) {
1599 .buffer = image->bo,
1600 .offset = image->offset + image->aux_surface.offset,
1601 };
1602 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1603
1604 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1605
1606 blorp_hiz_op(&batch, &surf, 0, 0, 1, op);
1607 blorp_batch_finish(&batch);
1608 }
1609
1610 void
1611 anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
1612 const struct anv_state surface_state,
1613 const struct anv_image * const image,
1614 const uint8_t level, const uint32_t layer_count,
1615 const enum blorp_fast_clear_op op)
1616 {
1617 assert(cmd_buffer && image);
1618
1619 /* The resolved subresource range must have a CCS buffer. */
1620 assert(level < anv_image_aux_levels(image));
1621 assert(layer_count <= anv_image_aux_layers(image, level));
1622 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->samples == 1);
1623
1624 /* Create a binding table for this surface state. */
1625 uint32_t binding_table;
1626 VkResult result =
1627 binding_table_for_surface_state(cmd_buffer, surface_state,
1628 &binding_table);
1629 if (result != VK_SUCCESS)
1630 return;
1631
1632 struct blorp_batch batch;
1633 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1634 BLORP_BATCH_PREDICATE_ENABLE);
1635
1636 struct blorp_surf surf;
1637 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1638 image->aux_usage == ISL_AUX_USAGE_CCS_E ?
1639 ISL_AUX_USAGE_CCS_E : ISL_AUX_USAGE_CCS_D,
1640 &surf);
1641
1642 blorp_ccs_resolve_attachment(&batch, binding_table, &surf, level,
1643 layer_count, image->color_surface.isl.format,
1644 op);
1645
1646 blorp_batch_finish(&batch);
1647 }