anv: Enable VK_KHR_16bit_storage for SSBO and UBO
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.lookup_shader = lookup_blorp_shader;
96 device->blorp.upload_shader = upload_blorp_shader;
97 switch (device->info.gen) {
98 case 7:
99 if (device->info.is_haswell) {
100 device->blorp.exec = gen75_blorp_exec;
101 } else {
102 device->blorp.exec = gen7_blorp_exec;
103 }
104 break;
105 case 8:
106 device->blorp.exec = gen8_blorp_exec;
107 break;
108 case 9:
109 device->blorp.exec = gen9_blorp_exec;
110 break;
111 case 10:
112 device->blorp.exec = gen10_blorp_exec;
113 break;
114 case 11:
115 device->blorp.exec = gen11_blorp_exec;
116 break;
117 default:
118 unreachable("Unknown hardware generation");
119 }
120 }
121
122 void
123 anv_device_finish_blorp(struct anv_device *device)
124 {
125 blorp_finish(&device->blorp);
126 anv_pipeline_cache_finish(&device->blorp_shader_cache);
127 }
128
129 static void
130 get_blorp_surf_for_anv_buffer(struct anv_device *device,
131 struct anv_buffer *buffer, uint64_t offset,
132 uint32_t width, uint32_t height,
133 uint32_t row_pitch, enum isl_format format,
134 struct blorp_surf *blorp_surf,
135 struct isl_surf *isl_surf)
136 {
137 const struct isl_format_layout *fmtl =
138 isl_format_get_layout(format);
139 bool ok UNUSED;
140
141 /* ASTC is the only format which doesn't support linear layouts.
142 * Create an equivalently sized surface with ISL to get around this.
143 */
144 if (fmtl->txc == ISL_TXC_ASTC) {
145 /* Use an equivalently sized format */
146 format = ISL_FORMAT_R32G32B32A32_UINT;
147 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
148
149 /* Shrink the dimensions for the new format */
150 width = DIV_ROUND_UP(width, fmtl->bw);
151 height = DIV_ROUND_UP(height, fmtl->bh);
152 }
153
154 *blorp_surf = (struct blorp_surf) {
155 .surf = isl_surf,
156 .addr = {
157 .buffer = buffer->bo,
158 .offset = buffer->offset + offset,
159 .mocs = device->default_mocs,
160 },
161 };
162
163 ok = isl_surf_init(&device->isl_dev, isl_surf,
164 .dim = ISL_SURF_DIM_2D,
165 .format = format,
166 .width = width,
167 .height = height,
168 .depth = 1,
169 .levels = 1,
170 .array_len = 1,
171 .samples = 1,
172 .row_pitch = row_pitch,
173 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
174 ISL_SURF_USAGE_RENDER_TARGET_BIT,
175 .tiling_flags = ISL_TILING_LINEAR_BIT);
176 assert(ok);
177 }
178
179 /* Pick something high enough that it won't be used in core and low enough it
180 * will never map to an extension.
181 */
182 #define ANV_IMAGE_LAYOUT_EXPLICIT_AUX (VkImageLayout)10000000
183
184 static struct blorp_address
185 anv_to_blorp_address(struct anv_address addr)
186 {
187 return (struct blorp_address) {
188 .buffer = addr.bo,
189 .offset = addr.offset,
190 };
191 }
192
193 static void
194 get_blorp_surf_for_anv_image(const struct anv_device *device,
195 const struct anv_image *image,
196 VkImageAspectFlags aspect,
197 VkImageLayout layout,
198 enum isl_aux_usage aux_usage,
199 struct blorp_surf *blorp_surf)
200 {
201 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
202
203 if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX)
204 aux_usage = anv_layout_to_aux_usage(&device->info, image, aspect, layout);
205
206 const struct anv_surface *surface = &image->planes[plane].surface;
207 *blorp_surf = (struct blorp_surf) {
208 .surf = &surface->isl,
209 .addr = {
210 .buffer = image->planes[plane].bo,
211 .offset = image->planes[plane].bo_offset + surface->offset,
212 .mocs = device->default_mocs,
213 },
214 };
215
216 if (aux_usage != ISL_AUX_USAGE_NONE) {
217 const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
218 blorp_surf->aux_surf = &aux_surface->isl,
219 blorp_surf->aux_addr = (struct blorp_address) {
220 .buffer = image->planes[plane].bo,
221 .offset = image->planes[plane].bo_offset + aux_surface->offset,
222 .mocs = device->default_mocs,
223 };
224 blorp_surf->aux_usage = aux_usage;
225 }
226 }
227
228 void anv_CmdCopyImage(
229 VkCommandBuffer commandBuffer,
230 VkImage srcImage,
231 VkImageLayout srcImageLayout,
232 VkImage dstImage,
233 VkImageLayout dstImageLayout,
234 uint32_t regionCount,
235 const VkImageCopy* pRegions)
236 {
237 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
238 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
239 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
240
241 struct blorp_batch batch;
242 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
243
244 for (unsigned r = 0; r < regionCount; r++) {
245 VkOffset3D srcOffset =
246 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
247 VkOffset3D dstOffset =
248 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
249 VkExtent3D extent =
250 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
251
252 const uint32_t dst_level = pRegions[r].dstSubresource.mipLevel;
253 unsigned dst_base_layer, layer_count;
254 if (dst_image->type == VK_IMAGE_TYPE_3D) {
255 dst_base_layer = pRegions[r].dstOffset.z;
256 layer_count = pRegions[r].extent.depth;
257 } else {
258 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
259 layer_count =
260 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
261 }
262
263 const uint32_t src_level = pRegions[r].srcSubresource.mipLevel;
264 unsigned src_base_layer;
265 if (src_image->type == VK_IMAGE_TYPE_3D) {
266 src_base_layer = pRegions[r].srcOffset.z;
267 } else {
268 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
269 assert(layer_count ==
270 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
271 }
272
273 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
274 dst_mask = pRegions[r].dstSubresource.aspectMask;
275
276 assert(anv_image_aspects_compatible(src_mask, dst_mask));
277
278 if (_mesa_bitcount(src_mask) > 1) {
279 uint32_t aspect_bit;
280 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
281 struct blorp_surf src_surf, dst_surf;
282 get_blorp_surf_for_anv_image(cmd_buffer->device,
283 src_image, 1UL << aspect_bit,
284 srcImageLayout, ISL_AUX_USAGE_NONE,
285 &src_surf);
286 get_blorp_surf_for_anv_image(cmd_buffer->device,
287 dst_image, 1UL << aspect_bit,
288 dstImageLayout, ISL_AUX_USAGE_NONE,
289 &dst_surf);
290 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
291 1UL << aspect_bit,
292 dst_surf.aux_usage, dst_level,
293 dst_base_layer, layer_count);
294
295 for (unsigned i = 0; i < layer_count; i++) {
296 blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
297 &dst_surf, dst_level, dst_base_layer + i,
298 srcOffset.x, srcOffset.y,
299 dstOffset.x, dstOffset.y,
300 extent.width, extent.height);
301 }
302 }
303 } else {
304 struct blorp_surf src_surf, dst_surf;
305 get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
306 srcImageLayout, ISL_AUX_USAGE_NONE,
307 &src_surf);
308 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
309 dstImageLayout, ISL_AUX_USAGE_NONE,
310 &dst_surf);
311 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image, dst_mask,
312 dst_surf.aux_usage, dst_level,
313 dst_base_layer, layer_count);
314
315 for (unsigned i = 0; i < layer_count; i++) {
316 blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
317 &dst_surf, dst_level, dst_base_layer + i,
318 srcOffset.x, srcOffset.y,
319 dstOffset.x, dstOffset.y,
320 extent.width, extent.height);
321 }
322 }
323 }
324
325 blorp_batch_finish(&batch);
326 }
327
328 static void
329 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
330 struct anv_buffer *anv_buffer,
331 struct anv_image *anv_image,
332 VkImageLayout image_layout,
333 uint32_t regionCount,
334 const VkBufferImageCopy* pRegions,
335 bool buffer_to_image)
336 {
337 struct blorp_batch batch;
338 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
339
340 struct {
341 struct blorp_surf surf;
342 uint32_t level;
343 VkOffset3D offset;
344 } image, buffer, *src, *dst;
345
346 buffer.level = 0;
347 buffer.offset = (VkOffset3D) { 0, 0, 0 };
348
349 if (buffer_to_image) {
350 src = &buffer;
351 dst = &image;
352 } else {
353 src = &image;
354 dst = &buffer;
355 }
356
357 for (unsigned r = 0; r < regionCount; r++) {
358 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
359
360 get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
361 image_layout, ISL_AUX_USAGE_NONE,
362 &image.surf);
363 image.offset =
364 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
365 image.level = pRegions[r].imageSubresource.mipLevel;
366
367 VkExtent3D extent =
368 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
369 if (anv_image->type != VK_IMAGE_TYPE_3D) {
370 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
371 extent.depth =
372 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
373 }
374
375 const enum isl_format buffer_format =
376 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
377 aspect, VK_IMAGE_TILING_LINEAR);
378
379 const VkExtent3D bufferImageExtent = {
380 .width = pRegions[r].bufferRowLength ?
381 pRegions[r].bufferRowLength : extent.width,
382 .height = pRegions[r].bufferImageHeight ?
383 pRegions[r].bufferImageHeight : extent.height,
384 };
385
386 const struct isl_format_layout *buffer_fmtl =
387 isl_format_get_layout(buffer_format);
388
389 const uint32_t buffer_row_pitch =
390 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
391 (buffer_fmtl->bpb / 8);
392
393 const uint32_t buffer_layer_stride =
394 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
395 buffer_row_pitch;
396
397 struct isl_surf buffer_isl_surf;
398 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
399 anv_buffer, pRegions[r].bufferOffset,
400 extent.width, extent.height,
401 buffer_row_pitch, buffer_format,
402 &buffer.surf, &buffer_isl_surf);
403
404 if (&image == dst) {
405 anv_cmd_buffer_mark_image_written(cmd_buffer, anv_image,
406 aspect, dst->surf.aux_usage,
407 dst->level,
408 dst->offset.z, extent.depth);
409 }
410
411 for (unsigned z = 0; z < extent.depth; z++) {
412 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
413 &dst->surf, dst->level, dst->offset.z,
414 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
415 extent.width, extent.height);
416
417 image.offset.z++;
418 buffer.surf.addr.offset += buffer_layer_stride;
419 }
420 }
421
422 blorp_batch_finish(&batch);
423 }
424
425 void anv_CmdCopyBufferToImage(
426 VkCommandBuffer commandBuffer,
427 VkBuffer srcBuffer,
428 VkImage dstImage,
429 VkImageLayout dstImageLayout,
430 uint32_t regionCount,
431 const VkBufferImageCopy* pRegions)
432 {
433 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
434 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
435 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
436
437 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image, dstImageLayout,
438 regionCount, pRegions, true);
439 }
440
441 void anv_CmdCopyImageToBuffer(
442 VkCommandBuffer commandBuffer,
443 VkImage srcImage,
444 VkImageLayout srcImageLayout,
445 VkBuffer dstBuffer,
446 uint32_t regionCount,
447 const VkBufferImageCopy* pRegions)
448 {
449 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
450 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
451 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
452
453 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image, srcImageLayout,
454 regionCount, pRegions, false);
455 }
456
457 static bool
458 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
459 {
460 bool flip = false;
461 if (*src0 > *src1) {
462 unsigned tmp = *src0;
463 *src0 = *src1;
464 *src1 = tmp;
465 flip = !flip;
466 }
467
468 if (*dst0 > *dst1) {
469 unsigned tmp = *dst0;
470 *dst0 = *dst1;
471 *dst1 = tmp;
472 flip = !flip;
473 }
474
475 return flip;
476 }
477
478 void anv_CmdBlitImage(
479 VkCommandBuffer commandBuffer,
480 VkImage srcImage,
481 VkImageLayout srcImageLayout,
482 VkImage dstImage,
483 VkImageLayout dstImageLayout,
484 uint32_t regionCount,
485 const VkImageBlit* pRegions,
486 VkFilter filter)
487
488 {
489 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
490 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
491 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
492
493 struct blorp_surf src, dst;
494
495 uint32_t gl_filter;
496 switch (filter) {
497 case VK_FILTER_NEAREST:
498 gl_filter = 0x2600; /* GL_NEAREST */
499 break;
500 case VK_FILTER_LINEAR:
501 gl_filter = 0x2601; /* GL_LINEAR */
502 break;
503 default:
504 unreachable("Invalid filter");
505 }
506
507 struct blorp_batch batch;
508 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
509
510 for (unsigned r = 0; r < regionCount; r++) {
511 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
512 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
513
514 get_blorp_surf_for_anv_image(cmd_buffer->device,
515 src_image, src_res->aspectMask,
516 srcImageLayout, ISL_AUX_USAGE_NONE, &src);
517 get_blorp_surf_for_anv_image(cmd_buffer->device,
518 dst_image, dst_res->aspectMask,
519 dstImageLayout, ISL_AUX_USAGE_NONE, &dst);
520
521 struct anv_format_plane src_format =
522 anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
523 src_res->aspectMask, src_image->tiling);
524 struct anv_format_plane dst_format =
525 anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
526 dst_res->aspectMask, dst_image->tiling);
527
528 unsigned dst_start, dst_end;
529 if (dst_image->type == VK_IMAGE_TYPE_3D) {
530 assert(dst_res->baseArrayLayer == 0);
531 dst_start = pRegions[r].dstOffsets[0].z;
532 dst_end = pRegions[r].dstOffsets[1].z;
533 } else {
534 dst_start = dst_res->baseArrayLayer;
535 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
536 }
537
538 unsigned src_start, src_end;
539 if (src_image->type == VK_IMAGE_TYPE_3D) {
540 assert(src_res->baseArrayLayer == 0);
541 src_start = pRegions[r].srcOffsets[0].z;
542 src_end = pRegions[r].srcOffsets[1].z;
543 } else {
544 src_start = src_res->baseArrayLayer;
545 src_end = src_start + anv_get_layerCount(src_image, src_res);
546 }
547
548 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
549 float src_z_step = (float)(src_end + 1 - src_start) /
550 (float)(dst_end + 1 - dst_start);
551
552 if (flip_z) {
553 src_start = src_end;
554 src_z_step *= -1;
555 }
556
557 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
558 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
559 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
560 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
561 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
562
563 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
564 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
565 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
566 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
567 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
568
569 const unsigned num_layers = dst_end - dst_start;
570 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
571 dst_res->aspectMask,
572 dst.aux_usage,
573 dst_res->mipLevel,
574 dst_start, num_layers);
575
576 for (unsigned i = 0; i < num_layers; i++) {
577 unsigned dst_z = dst_start + i;
578 unsigned src_z = src_start + i * src_z_step;
579
580 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
581 src_format.isl_format, src_format.swizzle,
582 &dst, dst_res->mipLevel, dst_z,
583 dst_format.isl_format,
584 anv_swizzle_for_render(dst_format.swizzle),
585 src_x0, src_y0, src_x1, src_y1,
586 dst_x0, dst_y0, dst_x1, dst_y1,
587 gl_filter, flip_x, flip_y);
588 }
589
590 }
591
592 blorp_batch_finish(&batch);
593 }
594
595 static enum isl_format
596 isl_format_for_size(unsigned size_B)
597 {
598 switch (size_B) {
599 case 4: return ISL_FORMAT_R32_UINT;
600 case 8: return ISL_FORMAT_R32G32_UINT;
601 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
602 default:
603 unreachable("Not a power-of-two format size");
604 }
605 }
606
607 /**
608 * Returns the greatest common divisor of a and b that is a power of two.
609 */
610 static uint64_t
611 gcd_pow2_u64(uint64_t a, uint64_t b)
612 {
613 assert(a > 0 || b > 0);
614
615 unsigned a_log2 = ffsll(a) - 1;
616 unsigned b_log2 = ffsll(b) - 1;
617
618 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
619 * case, the MIN2() will take the other one. If both are 0 then we will
620 * hit the assert above.
621 */
622 return 1 << MIN2(a_log2, b_log2);
623 }
624
625 /* This is maximum possible width/height our HW can handle */
626 #define MAX_SURFACE_DIM (1ull << 14)
627
628 void anv_CmdCopyBuffer(
629 VkCommandBuffer commandBuffer,
630 VkBuffer srcBuffer,
631 VkBuffer dstBuffer,
632 uint32_t regionCount,
633 const VkBufferCopy* pRegions)
634 {
635 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
636 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
637 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
638
639 struct blorp_batch batch;
640 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
641
642 for (unsigned r = 0; r < regionCount; r++) {
643 struct blorp_address src = {
644 .buffer = src_buffer->bo,
645 .offset = src_buffer->offset + pRegions[r].srcOffset,
646 .mocs = cmd_buffer->device->default_mocs,
647 };
648 struct blorp_address dst = {
649 .buffer = dst_buffer->bo,
650 .offset = dst_buffer->offset + pRegions[r].dstOffset,
651 .mocs = cmd_buffer->device->default_mocs,
652 };
653
654 blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
655 }
656
657 blorp_batch_finish(&batch);
658 }
659
660 void anv_CmdUpdateBuffer(
661 VkCommandBuffer commandBuffer,
662 VkBuffer dstBuffer,
663 VkDeviceSize dstOffset,
664 VkDeviceSize dataSize,
665 const void* pData)
666 {
667 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
668 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
669
670 struct blorp_batch batch;
671 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
672
673 /* We can't quite grab a full block because the state stream needs a
674 * little data at the top to build its linked list.
675 */
676 const uint32_t max_update_size =
677 cmd_buffer->device->dynamic_state_pool.block_size - 64;
678
679 assert(max_update_size < MAX_SURFACE_DIM * 4);
680
681 /* We're about to read data that was written from the CPU. Flush the
682 * texture cache so we don't get anything stale.
683 */
684 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
685
686 while (dataSize) {
687 const uint32_t copy_size = MIN2(dataSize, max_update_size);
688
689 struct anv_state tmp_data =
690 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
691
692 memcpy(tmp_data.map, pData, copy_size);
693
694 anv_state_flush(cmd_buffer->device, tmp_data);
695
696 struct blorp_address src = {
697 .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
698 .offset = tmp_data.offset,
699 .mocs = cmd_buffer->device->default_mocs,
700 };
701 struct blorp_address dst = {
702 .buffer = dst_buffer->bo,
703 .offset = dst_buffer->offset + dstOffset,
704 .mocs = cmd_buffer->device->default_mocs,
705 };
706
707 blorp_buffer_copy(&batch, src, dst, copy_size);
708
709 dataSize -= copy_size;
710 dstOffset += copy_size;
711 pData = (void *)pData + copy_size;
712 }
713
714 blorp_batch_finish(&batch);
715 }
716
717 void anv_CmdFillBuffer(
718 VkCommandBuffer commandBuffer,
719 VkBuffer dstBuffer,
720 VkDeviceSize dstOffset,
721 VkDeviceSize fillSize,
722 uint32_t data)
723 {
724 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
725 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
726 struct blorp_surf surf;
727 struct isl_surf isl_surf;
728
729 struct blorp_batch batch;
730 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
731
732 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
733
734 /* From the Vulkan spec:
735 *
736 * "size is the number of bytes to fill, and must be either a multiple
737 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
738 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
739 * buffer is not a multiple of 4, then the nearest smaller multiple is
740 * used."
741 */
742 fillSize &= ~3ull;
743
744 /* First, we compute the biggest format that can be used with the
745 * given offsets and size.
746 */
747 int bs = 16;
748 bs = gcd_pow2_u64(bs, dstOffset);
749 bs = gcd_pow2_u64(bs, fillSize);
750 enum isl_format isl_format = isl_format_for_size(bs);
751
752 union isl_color_value color = {
753 .u32 = { data, data, data, data },
754 };
755
756 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
757 while (fillSize >= max_fill_size) {
758 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
759 dst_buffer, dstOffset,
760 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
761 MAX_SURFACE_DIM * bs, isl_format,
762 &surf, &isl_surf);
763
764 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
765 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
766 color, NULL);
767 fillSize -= max_fill_size;
768 dstOffset += max_fill_size;
769 }
770
771 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
772 assert(height < MAX_SURFACE_DIM);
773 if (height != 0) {
774 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
775 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
776 dst_buffer, dstOffset,
777 MAX_SURFACE_DIM, height,
778 MAX_SURFACE_DIM * bs, isl_format,
779 &surf, &isl_surf);
780
781 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
782 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
783 color, NULL);
784 fillSize -= rect_fill_size;
785 dstOffset += rect_fill_size;
786 }
787
788 if (fillSize != 0) {
789 const uint32_t width = fillSize / bs;
790 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
791 dst_buffer, dstOffset,
792 width, 1,
793 width * bs, isl_format,
794 &surf, &isl_surf);
795
796 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
797 0, 0, 1, 0, 0, width, 1,
798 color, NULL);
799 }
800
801 blorp_batch_finish(&batch);
802 }
803
804 void anv_CmdClearColorImage(
805 VkCommandBuffer commandBuffer,
806 VkImage _image,
807 VkImageLayout imageLayout,
808 const VkClearColorValue* pColor,
809 uint32_t rangeCount,
810 const VkImageSubresourceRange* pRanges)
811 {
812 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
813 ANV_FROM_HANDLE(anv_image, image, _image);
814
815 static const bool color_write_disable[4] = { false, false, false, false };
816
817 struct blorp_batch batch;
818 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
819
820
821 for (unsigned r = 0; r < rangeCount; r++) {
822 if (pRanges[r].aspectMask == 0)
823 continue;
824
825 assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
826
827 struct blorp_surf surf;
828 get_blorp_surf_for_anv_image(cmd_buffer->device,
829 image, pRanges[r].aspectMask,
830 imageLayout, ISL_AUX_USAGE_NONE, &surf);
831
832 struct anv_format_plane src_format =
833 anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
834 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
835
836 unsigned base_layer = pRanges[r].baseArrayLayer;
837 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
838
839 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
840 const unsigned level = pRanges[r].baseMipLevel + i;
841 const unsigned level_width = anv_minify(image->extent.width, level);
842 const unsigned level_height = anv_minify(image->extent.height, level);
843
844 if (image->type == VK_IMAGE_TYPE_3D) {
845 base_layer = 0;
846 layer_count = anv_minify(image->extent.depth, level);
847 }
848
849 anv_cmd_buffer_mark_image_written(cmd_buffer, image,
850 pRanges[r].aspectMask,
851 surf.aux_usage, level,
852 base_layer, layer_count);
853
854 blorp_clear(&batch, &surf,
855 src_format.isl_format, src_format.swizzle,
856 level, base_layer, layer_count,
857 0, 0, level_width, level_height,
858 vk_to_isl_color(*pColor), color_write_disable);
859 }
860 }
861
862 blorp_batch_finish(&batch);
863 }
864
865 void anv_CmdClearDepthStencilImage(
866 VkCommandBuffer commandBuffer,
867 VkImage image_h,
868 VkImageLayout imageLayout,
869 const VkClearDepthStencilValue* pDepthStencil,
870 uint32_t rangeCount,
871 const VkImageSubresourceRange* pRanges)
872 {
873 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
874 ANV_FROM_HANDLE(anv_image, image, image_h);
875
876 struct blorp_batch batch;
877 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
878
879 struct blorp_surf depth, stencil;
880 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
881 get_blorp_surf_for_anv_image(cmd_buffer->device,
882 image, VK_IMAGE_ASPECT_DEPTH_BIT,
883 imageLayout, ISL_AUX_USAGE_NONE, &depth);
884 } else {
885 memset(&depth, 0, sizeof(depth));
886 }
887
888 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
889 get_blorp_surf_for_anv_image(cmd_buffer->device,
890 image, VK_IMAGE_ASPECT_STENCIL_BIT,
891 imageLayout, ISL_AUX_USAGE_NONE, &stencil);
892 } else {
893 memset(&stencil, 0, sizeof(stencil));
894 }
895
896 for (unsigned r = 0; r < rangeCount; r++) {
897 if (pRanges[r].aspectMask == 0)
898 continue;
899
900 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
901 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
902
903 unsigned base_layer = pRanges[r].baseArrayLayer;
904 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
905
906 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
907 const unsigned level = pRanges[r].baseMipLevel + i;
908 const unsigned level_width = anv_minify(image->extent.width, level);
909 const unsigned level_height = anv_minify(image->extent.height, level);
910
911 if (image->type == VK_IMAGE_TYPE_3D)
912 layer_count = anv_minify(image->extent.depth, level);
913
914 blorp_clear_depth_stencil(&batch, &depth, &stencil,
915 level, base_layer, layer_count,
916 0, 0, level_width, level_height,
917 clear_depth, pDepthStencil->depth,
918 clear_stencil ? 0xff : 0,
919 pDepthStencil->stencil);
920 }
921 }
922
923 blorp_batch_finish(&batch);
924 }
925
926 VkResult
927 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
928 uint32_t num_entries,
929 uint32_t *state_offset,
930 struct anv_state *bt_state)
931 {
932 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
933 state_offset);
934 if (bt_state->map == NULL) {
935 /* We ran out of space. Grab a new binding table block. */
936 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
937 if (result != VK_SUCCESS)
938 return result;
939
940 /* Re-emit state base addresses so we get the new surface state base
941 * address before we start emitting binding tables etc.
942 */
943 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
944
945 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
946 state_offset);
947 assert(bt_state->map != NULL);
948 }
949
950 return VK_SUCCESS;
951 }
952
953 static VkResult
954 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
955 struct anv_state surface_state,
956 uint32_t *bt_offset)
957 {
958 uint32_t state_offset;
959 struct anv_state bt_state;
960
961 VkResult result =
962 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
963 &bt_state);
964 if (result != VK_SUCCESS)
965 return result;
966
967 uint32_t *bt_map = bt_state.map;
968 bt_map[0] = surface_state.offset + state_offset;
969
970 *bt_offset = bt_state.offset;
971 return VK_SUCCESS;
972 }
973
974 static void
975 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
976 struct blorp_batch *batch,
977 const VkClearAttachment *attachment,
978 uint32_t rectCount, const VkClearRect *pRects)
979 {
980 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
981 const uint32_t color_att = attachment->colorAttachment;
982 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
983
984 if (att_idx == VK_ATTACHMENT_UNUSED)
985 return;
986
987 struct anv_render_pass_attachment *pass_att =
988 &cmd_buffer->state.pass->attachments[att_idx];
989 struct anv_attachment_state *att_state =
990 &cmd_buffer->state.attachments[att_idx];
991
992 uint32_t binding_table;
993 VkResult result =
994 binding_table_for_surface_state(cmd_buffer, att_state->color.state,
995 &binding_table);
996 if (result != VK_SUCCESS)
997 return;
998
999 union isl_color_value clear_color =
1000 vk_to_isl_color(attachment->clearValue.color);
1001
1002 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1003 if (subpass->view_mask) {
1004 uint32_t view_idx;
1005 for_each_bit(view_idx, subpass->view_mask) {
1006 for (uint32_t r = 0; r < rectCount; ++r) {
1007 const VkOffset2D offset = pRects[r].rect.offset;
1008 const VkExtent2D extent = pRects[r].rect.extent;
1009 blorp_clear_attachments(batch, binding_table,
1010 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1011 view_idx, 1,
1012 offset.x, offset.y,
1013 offset.x + extent.width,
1014 offset.y + extent.height,
1015 true, clear_color, false, 0.0f, 0, 0);
1016 }
1017 }
1018 return;
1019 }
1020
1021 for (uint32_t r = 0; r < rectCount; ++r) {
1022 const VkOffset2D offset = pRects[r].rect.offset;
1023 const VkExtent2D extent = pRects[r].rect.extent;
1024 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
1025 blorp_clear_attachments(batch, binding_table,
1026 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1027 pRects[r].baseArrayLayer,
1028 pRects[r].layerCount,
1029 offset.x, offset.y,
1030 offset.x + extent.width, offset.y + extent.height,
1031 true, clear_color, false, 0.0f, 0, 0);
1032 }
1033 }
1034
1035 static void
1036 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1037 struct blorp_batch *batch,
1038 const VkClearAttachment *attachment,
1039 uint32_t rectCount, const VkClearRect *pRects)
1040 {
1041 static const union isl_color_value color_value = { .u32 = { 0, } };
1042 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1043 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1044
1045 if (att_idx == VK_ATTACHMENT_UNUSED)
1046 return;
1047
1048 struct anv_render_pass_attachment *pass_att =
1049 &cmd_buffer->state.pass->attachments[att_idx];
1050
1051 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1052 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1053
1054 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1055 if (clear_depth) {
1056 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1057 pass_att->format,
1058 VK_IMAGE_ASPECT_DEPTH_BIT,
1059 VK_IMAGE_TILING_OPTIMAL);
1060 }
1061
1062 uint32_t binding_table;
1063 VkResult result =
1064 binding_table_for_surface_state(cmd_buffer,
1065 cmd_buffer->state.null_surface_state,
1066 &binding_table);
1067 if (result != VK_SUCCESS)
1068 return;
1069
1070 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1071 if (subpass->view_mask) {
1072 uint32_t view_idx;
1073 for_each_bit(view_idx, subpass->view_mask) {
1074 for (uint32_t r = 0; r < rectCount; ++r) {
1075 const VkOffset2D offset = pRects[r].rect.offset;
1076 const VkExtent2D extent = pRects[r].rect.extent;
1077 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1078 blorp_clear_attachments(batch, binding_table,
1079 depth_format, pass_att->samples,
1080 view_idx, 1,
1081 offset.x, offset.y,
1082 offset.x + extent.width,
1083 offset.y + extent.height,
1084 false, color_value,
1085 clear_depth, value.depth,
1086 clear_stencil ? 0xff : 0, value.stencil);
1087 }
1088 }
1089 return;
1090 }
1091
1092 for (uint32_t r = 0; r < rectCount; ++r) {
1093 const VkOffset2D offset = pRects[r].rect.offset;
1094 const VkExtent2D extent = pRects[r].rect.extent;
1095 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1096 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
1097 blorp_clear_attachments(batch, binding_table,
1098 depth_format, pass_att->samples,
1099 pRects[r].baseArrayLayer,
1100 pRects[r].layerCount,
1101 offset.x, offset.y,
1102 offset.x + extent.width, offset.y + extent.height,
1103 false, color_value,
1104 clear_depth, value.depth,
1105 clear_stencil ? 0xff : 0, value.stencil);
1106 }
1107 }
1108
1109 void anv_CmdClearAttachments(
1110 VkCommandBuffer commandBuffer,
1111 uint32_t attachmentCount,
1112 const VkClearAttachment* pAttachments,
1113 uint32_t rectCount,
1114 const VkClearRect* pRects)
1115 {
1116 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1117
1118 /* Because this gets called within a render pass, we tell blorp not to
1119 * trash our depth and stencil buffers.
1120 */
1121 struct blorp_batch batch;
1122 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1123 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1124
1125 for (uint32_t a = 0; a < attachmentCount; ++a) {
1126 if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1127 assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1128 clear_color_attachment(cmd_buffer, &batch,
1129 &pAttachments[a],
1130 rectCount, pRects);
1131 } else {
1132 clear_depth_stencil_attachment(cmd_buffer, &batch,
1133 &pAttachments[a],
1134 rectCount, pRects);
1135 }
1136 }
1137
1138 blorp_batch_finish(&batch);
1139 }
1140
1141 enum subpass_stage {
1142 SUBPASS_STAGE_LOAD,
1143 SUBPASS_STAGE_DRAW,
1144 SUBPASS_STAGE_RESOLVE,
1145 };
1146
1147 static void
1148 resolve_surface(struct blorp_batch *batch,
1149 struct blorp_surf *src_surf,
1150 uint32_t src_level, uint32_t src_layer,
1151 struct blorp_surf *dst_surf,
1152 uint32_t dst_level, uint32_t dst_layer,
1153 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1154 uint32_t width, uint32_t height)
1155 {
1156 blorp_blit(batch,
1157 src_surf, src_level, src_layer,
1158 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1159 dst_surf, dst_level, dst_layer,
1160 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1161 src_x, src_y, src_x + width, src_y + height,
1162 dst_x, dst_y, dst_x + width, dst_y + height,
1163 0x2600 /* GL_NEAREST */, false, false);
1164 }
1165
1166 static void
1167 resolve_image(struct anv_device *device,
1168 struct blorp_batch *batch,
1169 const struct anv_image *src_image,
1170 VkImageLayout src_image_layout,
1171 uint32_t src_level, uint32_t src_layer,
1172 const struct anv_image *dst_image,
1173 VkImageLayout dst_image_layout,
1174 uint32_t dst_level, uint32_t dst_layer,
1175 VkImageAspectFlags aspect_mask,
1176 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1177 uint32_t width, uint32_t height)
1178 {
1179 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
1180
1181 assert(src_image->type == VK_IMAGE_TYPE_2D);
1182 assert(src_image->samples > 1);
1183 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1184 assert(dst_image->samples == 1);
1185 assert(src_image->n_planes == dst_image->n_planes);
1186
1187 uint32_t aspect_bit;
1188
1189 anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
1190 struct blorp_surf src_surf, dst_surf;
1191 get_blorp_surf_for_anv_image(device, src_image, 1UL << aspect_bit,
1192 src_image_layout, ISL_AUX_USAGE_NONE,
1193 &src_surf);
1194 get_blorp_surf_for_anv_image(device, dst_image, 1UL << aspect_bit,
1195 dst_image_layout, ISL_AUX_USAGE_NONE,
1196 &dst_surf);
1197 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
1198 1UL << aspect_bit,
1199 dst_surf.aux_usage,
1200 dst_level, dst_layer, 1);
1201
1202 assert(!src_image->format->can_ycbcr);
1203 assert(!dst_image->format->can_ycbcr);
1204
1205 resolve_surface(batch,
1206 &src_surf, src_level, src_layer,
1207 &dst_surf, dst_level, dst_layer,
1208 src_x, src_y, dst_x, dst_y, width, height);
1209 }
1210 }
1211
1212 void anv_CmdResolveImage(
1213 VkCommandBuffer commandBuffer,
1214 VkImage srcImage,
1215 VkImageLayout srcImageLayout,
1216 VkImage dstImage,
1217 VkImageLayout dstImageLayout,
1218 uint32_t regionCount,
1219 const VkImageResolve* pRegions)
1220 {
1221 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1222 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1223 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1224
1225 struct blorp_batch batch;
1226 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1227
1228 for (uint32_t r = 0; r < regionCount; r++) {
1229 assert(pRegions[r].srcSubresource.aspectMask ==
1230 pRegions[r].dstSubresource.aspectMask);
1231 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1232 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1233
1234 const uint32_t layer_count =
1235 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1236
1237 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
1238 dst_mask = pRegions[r].dstSubresource.aspectMask;
1239
1240 assert(anv_image_aspects_compatible(src_mask, dst_mask));
1241
1242 for (uint32_t layer = 0; layer < layer_count; layer++) {
1243 resolve_image(cmd_buffer->device, &batch,
1244 src_image, srcImageLayout,
1245 pRegions[r].srcSubresource.mipLevel,
1246 pRegions[r].srcSubresource.baseArrayLayer + layer,
1247 dst_image, dstImageLayout,
1248 pRegions[r].dstSubresource.mipLevel,
1249 pRegions[r].dstSubresource.baseArrayLayer + layer,
1250 pRegions[r].dstSubresource.aspectMask,
1251 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1252 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1253 pRegions[r].extent.width, pRegions[r].extent.height);
1254 }
1255 }
1256
1257 blorp_batch_finish(&batch);
1258 }
1259
1260 static enum isl_aux_usage
1261 fast_clear_aux_usage(const struct anv_image *image,
1262 VkImageAspectFlagBits aspect)
1263 {
1264 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1265 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
1266 return ISL_AUX_USAGE_CCS_D;
1267 else
1268 return image->planes[plane].aux_usage;
1269 }
1270
1271 void
1272 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1273 {
1274 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1275 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1276
1277 if (subpass->has_resolve) {
1278 struct blorp_batch batch;
1279 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1280
1281 /* We are about to do some MSAA resolves. We need to flush so that the
1282 * result of writes to the MSAA color attachments show up in the sampler
1283 * when we blit to the single-sampled resolve target.
1284 */
1285 cmd_buffer->state.pending_pipe_bits |=
1286 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1287 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1288
1289 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1290 uint32_t src_att = subpass->color_attachments[i].attachment;
1291 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1292
1293 if (dst_att == VK_ATTACHMENT_UNUSED)
1294 continue;
1295
1296 assert(src_att < cmd_buffer->state.pass->attachment_count);
1297 assert(dst_att < cmd_buffer->state.pass->attachment_count);
1298
1299 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1300 /* From the Vulkan 1.0 spec:
1301 *
1302 * If the first use of an attachment in a render pass is as a
1303 * resolve attachment, then the loadOp is effectively ignored
1304 * as the resolve is guaranteed to overwrite all pixels in the
1305 * render area.
1306 */
1307 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1308 }
1309
1310 struct anv_image_view *src_iview = fb->attachments[src_att];
1311 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1312
1313 enum isl_aux_usage src_aux_usage =
1314 cmd_buffer->state.attachments[src_att].aux_usage;
1315 enum isl_aux_usage dst_aux_usage =
1316 cmd_buffer->state.attachments[dst_att].aux_usage;
1317
1318 const VkRect2D render_area = cmd_buffer->state.render_area;
1319
1320 assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
1321 dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
1322
1323 struct blorp_surf src_surf, dst_surf;
1324 get_blorp_surf_for_anv_image(cmd_buffer->device, src_iview->image,
1325 VK_IMAGE_ASPECT_COLOR_BIT,
1326 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1327 src_aux_usage, &src_surf);
1328 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_iview->image,
1329 VK_IMAGE_ASPECT_COLOR_BIT,
1330 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1331 dst_aux_usage, &dst_surf);
1332
1333 uint32_t base_src_layer = src_iview->planes[0].isl.base_array_layer;
1334 uint32_t base_dst_layer = dst_iview->planes[0].isl.base_array_layer;
1335
1336 assert(src_iview->planes[0].isl.array_len >= fb->layers);
1337 assert(dst_iview->planes[0].isl.array_len >= fb->layers);
1338
1339 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_iview->image,
1340 VK_IMAGE_ASPECT_COLOR_BIT,
1341 dst_surf.aux_usage,
1342 dst_iview->planes[0].isl.base_level,
1343 base_dst_layer, fb->layers);
1344
1345 assert(!src_iview->image->format->can_ycbcr);
1346 assert(!dst_iview->image->format->can_ycbcr);
1347
1348 for (uint32_t i = 0; i < fb->layers; i++) {
1349 resolve_surface(&batch,
1350 &src_surf,
1351 src_iview->planes[0].isl.base_level,
1352 base_src_layer + i,
1353 &dst_surf,
1354 dst_iview->planes[0].isl.base_level,
1355 base_dst_layer + i,
1356 render_area.offset.x, render_area.offset.y,
1357 render_area.offset.x, render_area.offset.y,
1358 render_area.extent.width, render_area.extent.height);
1359 }
1360 }
1361
1362 blorp_batch_finish(&batch);
1363 }
1364 }
1365
1366 void
1367 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
1368 const struct anv_image *image,
1369 uint32_t base_level, uint32_t level_count,
1370 uint32_t base_layer, uint32_t layer_count)
1371 {
1372 struct blorp_batch batch;
1373 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1374
1375 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
1376
1377 struct blorp_surf surf;
1378 get_blorp_surf_for_anv_image(cmd_buffer->device,
1379 image, VK_IMAGE_ASPECT_COLOR_BIT,
1380 VK_IMAGE_LAYOUT_GENERAL,
1381 ISL_AUX_USAGE_NONE, &surf);
1382 assert(surf.aux_usage == ISL_AUX_USAGE_NONE);
1383
1384 struct blorp_surf shadow_surf = {
1385 .surf = &image->planes[0].shadow_surface.isl,
1386 .addr = {
1387 .buffer = image->planes[0].bo,
1388 .offset = image->planes[0].bo_offset +
1389 image->planes[0].shadow_surface.offset,
1390 .mocs = cmd_buffer->device->default_mocs,
1391 },
1392 };
1393
1394 for (uint32_t l = 0; l < level_count; l++) {
1395 const uint32_t level = base_level + l;
1396
1397 const VkExtent3D extent = {
1398 .width = anv_minify(image->extent.width, level),
1399 .height = anv_minify(image->extent.height, level),
1400 .depth = anv_minify(image->extent.depth, level),
1401 };
1402
1403 if (image->type == VK_IMAGE_TYPE_3D)
1404 layer_count = extent.depth;
1405
1406 for (uint32_t a = 0; a < layer_count; a++) {
1407 const uint32_t layer = base_layer + a;
1408
1409 blorp_copy(&batch, &surf, level, layer,
1410 &shadow_surf, level, layer,
1411 0, 0, 0, 0, extent.width, extent.height);
1412 }
1413 }
1414
1415 blorp_batch_finish(&batch);
1416 }
1417
1418 void
1419 anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
1420 const struct anv_image *image,
1421 VkImageAspectFlagBits aspect,
1422 enum isl_aux_usage aux_usage,
1423 enum isl_format format, struct isl_swizzle swizzle,
1424 uint32_t level, uint32_t base_layer, uint32_t layer_count,
1425 VkRect2D area, union isl_color_value clear_color)
1426 {
1427 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1428
1429 /* We don't support planar images with multisampling yet */
1430 assert(image->n_planes == 1);
1431
1432 struct blorp_batch batch;
1433 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1434
1435 struct blorp_surf surf;
1436 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1437 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1438 aux_usage, &surf);
1439 anv_cmd_buffer_mark_image_written(cmd_buffer, image, aspect, aux_usage,
1440 level, base_layer, layer_count);
1441
1442 blorp_clear(&batch, &surf, format, anv_swizzle_for_render(swizzle),
1443 level, base_layer, layer_count,
1444 area.offset.x, area.offset.y,
1445 area.offset.x + area.extent.width,
1446 area.offset.y + area.extent.height,
1447 clear_color, NULL);
1448
1449 blorp_batch_finish(&batch);
1450 }
1451
1452 void
1453 anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
1454 const struct anv_image *image,
1455 VkImageAspectFlags aspects,
1456 enum isl_aux_usage depth_aux_usage,
1457 uint32_t level,
1458 uint32_t base_layer, uint32_t layer_count,
1459 VkRect2D area,
1460 float depth_value, uint8_t stencil_value)
1461 {
1462 assert(image->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1463 VK_IMAGE_ASPECT_STENCIL_BIT));
1464
1465 struct blorp_batch batch;
1466 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1467
1468 struct blorp_surf depth = {};
1469 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1470 get_blorp_surf_for_anv_image(cmd_buffer->device,
1471 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1472 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1473 depth_aux_usage, &depth);
1474 depth.clear_color.f32[0] = ANV_HZ_FC_VAL;
1475 }
1476
1477 struct blorp_surf stencil = {};
1478 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1479 get_blorp_surf_for_anv_image(cmd_buffer->device,
1480 image, VK_IMAGE_ASPECT_STENCIL_BIT,
1481 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1482 ISL_AUX_USAGE_NONE, &stencil);
1483 }
1484
1485 blorp_clear_depth_stencil(&batch, &depth, &stencil,
1486 level, base_layer, layer_count,
1487 area.offset.x, area.offset.y,
1488 area.offset.x + area.extent.width,
1489 area.offset.y + area.extent.height,
1490 aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1491 depth_value,
1492 (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) ? 0xff : 0,
1493 stencil_value);
1494
1495 blorp_batch_finish(&batch);
1496 }
1497
1498 void
1499 anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
1500 const struct anv_image *image,
1501 VkImageAspectFlagBits aspect, uint32_t level,
1502 uint32_t base_layer, uint32_t layer_count,
1503 enum isl_aux_op hiz_op)
1504 {
1505 assert(aspect == VK_IMAGE_ASPECT_DEPTH_BIT);
1506 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
1507 assert(anv_image_aspect_to_plane(image->aspects,
1508 VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
1509
1510 struct blorp_batch batch;
1511 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1512
1513 struct blorp_surf surf;
1514 get_blorp_surf_for_anv_image(cmd_buffer->device,
1515 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1516 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1517 ISL_AUX_USAGE_HIZ, &surf);
1518 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1519
1520 blorp_hiz_op(&batch, &surf, level, base_layer, layer_count, hiz_op);
1521
1522 blorp_batch_finish(&batch);
1523 }
1524
1525 void
1526 anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
1527 const struct anv_image *image,
1528 VkImageAspectFlags aspects,
1529 uint32_t level,
1530 uint32_t base_layer, uint32_t layer_count,
1531 VkRect2D area, uint8_t stencil_value)
1532 {
1533 assert(image->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1534 VK_IMAGE_ASPECT_STENCIL_BIT));
1535
1536 struct blorp_batch batch;
1537 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1538
1539 struct blorp_surf depth = {};
1540 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1541 assert(base_layer + layer_count <=
1542 anv_image_aux_layers(image, VK_IMAGE_ASPECT_DEPTH_BIT, level));
1543 get_blorp_surf_for_anv_image(cmd_buffer->device,
1544 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1545 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1546 ISL_AUX_USAGE_HIZ, &depth);
1547 depth.clear_color.f32[0] = ANV_HZ_FC_VAL;
1548 }
1549
1550 struct blorp_surf stencil = {};
1551 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1552 get_blorp_surf_for_anv_image(cmd_buffer->device,
1553 image, VK_IMAGE_ASPECT_STENCIL_BIT,
1554 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1555 ISL_AUX_USAGE_NONE, &stencil);
1556 }
1557
1558 blorp_hiz_clear_depth_stencil(&batch, &depth, &stencil,
1559 level, base_layer, layer_count,
1560 area.offset.x, area.offset.y,
1561 area.offset.x + area.extent.width,
1562 area.offset.y + area.extent.height,
1563 aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1564 ANV_HZ_FC_VAL,
1565 aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
1566 stencil_value);
1567
1568 blorp_batch_finish(&batch);
1569
1570 /* From the SKL PRM, Depth Buffer Clear:
1571 *
1572 * Depth Buffer Clear Workaround
1573 * Depth buffer clear pass using any of the methods (WM_STATE, 3DSTATE_WM
1574 * or 3DSTATE_WM_HZ_OP) must be followed by a PIPE_CONTROL command with
1575 * DEPTH_STALL bit and Depth FLUSH bits “set” before starting to render.
1576 * DepthStall and DepthFlush are not needed between consecutive depth clear
1577 * passes nor is it required if the depth-clear pass was done with
1578 * “full_surf_clear” bit set in the 3DSTATE_WM_HZ_OP.
1579 */
1580 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1581 cmd_buffer->state.pending_pipe_bits |=
1582 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1583 }
1584 }
1585
1586 void
1587 anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
1588 const struct anv_image *image,
1589 VkImageAspectFlagBits aspect,
1590 uint32_t base_layer, uint32_t layer_count,
1591 enum isl_aux_op mcs_op, bool predicate)
1592 {
1593 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1594 assert(image->samples > 1);
1595 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, 0));
1596
1597 /* Multisampling with multi-planar formats is not supported */
1598 assert(image->n_planes == 1);
1599
1600 struct blorp_batch batch;
1601 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1602 predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
1603
1604 struct blorp_surf surf;
1605 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1606 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1607 ISL_AUX_USAGE_MCS, &surf);
1608
1609 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1610 *
1611 * "After Render target fast clear, pipe-control with color cache
1612 * write-flush must be issued before sending any DRAW commands on
1613 * that render target."
1614 *
1615 * This comment is a bit cryptic and doesn't really tell you what's going
1616 * or what's really needed. It appears that fast clear ops are not
1617 * properly synchronized with other drawing. This means that we cannot
1618 * have a fast clear operation in the pipe at the same time as other
1619 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1620 * that the contents of the previous draw hit the render target before we
1621 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1622 * that it is completed before any additional drawing occurs.
1623 */
1624 cmd_buffer->state.pending_pipe_bits |=
1625 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1626
1627 switch (mcs_op) {
1628 case ISL_AUX_OP_FAST_CLEAR:
1629 blorp_fast_clear(&batch, &surf, surf.surf->format,
1630 0, base_layer, layer_count,
1631 0, 0, image->extent.width, image->extent.height);
1632 break;
1633 case ISL_AUX_OP_FULL_RESOLVE:
1634 case ISL_AUX_OP_PARTIAL_RESOLVE:
1635 case ISL_AUX_OP_AMBIGUATE:
1636 default:
1637 unreachable("Unsupported MCS operation");
1638 }
1639
1640 cmd_buffer->state.pending_pipe_bits |=
1641 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1642
1643 blorp_batch_finish(&batch);
1644 }
1645
1646 void
1647 anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
1648 const struct anv_image *image,
1649 VkImageAspectFlagBits aspect, uint32_t level,
1650 uint32_t base_layer, uint32_t layer_count,
1651 enum isl_aux_op ccs_op, bool predicate)
1652 {
1653 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1654 assert(image->samples == 1);
1655 assert(level < anv_image_aux_levels(image, aspect));
1656 /* Multi-LOD YcBcR is not allowed */
1657 assert(image->n_planes == 1 || level == 0);
1658 assert(base_layer + layer_count <=
1659 anv_image_aux_layers(image, aspect, level));
1660
1661 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1662 uint32_t width_div = image->format->planes[plane].denominator_scales[0];
1663 uint32_t height_div = image->format->planes[plane].denominator_scales[1];
1664 uint32_t level_width = anv_minify(image->extent.width, level) / width_div;
1665 uint32_t level_height = anv_minify(image->extent.height, level) / height_div;
1666
1667 struct blorp_batch batch;
1668 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1669 predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
1670
1671 struct blorp_surf surf;
1672 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1673 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1674 fast_clear_aux_usage(image, aspect),
1675 &surf);
1676
1677 if (ccs_op == ISL_AUX_OP_FULL_RESOLVE ||
1678 ccs_op == ISL_AUX_OP_PARTIAL_RESOLVE) {
1679 /* If we're doing a resolve operation, then we need the indirect clear
1680 * color. The clear and ambiguate operations just stomp the CCS to a
1681 * particular value and don't care about format or clear value.
1682 */
1683 const struct anv_address clear_color_addr =
1684 anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
1685 surf.clear_color_addr = anv_to_blorp_address(clear_color_addr);
1686 }
1687
1688 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1689 *
1690 * "After Render target fast clear, pipe-control with color cache
1691 * write-flush must be issued before sending any DRAW commands on
1692 * that render target."
1693 *
1694 * This comment is a bit cryptic and doesn't really tell you what's going
1695 * or what's really needed. It appears that fast clear ops are not
1696 * properly synchronized with other drawing. This means that we cannot
1697 * have a fast clear operation in the pipe at the same time as other
1698 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1699 * that the contents of the previous draw hit the render target before we
1700 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1701 * that it is completed before any additional drawing occurs.
1702 */
1703 cmd_buffer->state.pending_pipe_bits |=
1704 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1705
1706 switch (ccs_op) {
1707 case ISL_AUX_OP_FAST_CLEAR:
1708 blorp_fast_clear(&batch, &surf, surf.surf->format,
1709 level, base_layer, layer_count,
1710 0, 0, level_width, level_height);
1711 break;
1712 case ISL_AUX_OP_FULL_RESOLVE:
1713 case ISL_AUX_OP_PARTIAL_RESOLVE:
1714 blorp_ccs_resolve(&batch, &surf, level, base_layer, layer_count,
1715 surf.surf->format, ccs_op);
1716 break;
1717 case ISL_AUX_OP_AMBIGUATE:
1718 for (uint32_t a = 0; a < layer_count; a++) {
1719 const uint32_t layer = base_layer + a;
1720 blorp_ccs_ambiguate(&batch, &surf, level, layer);
1721 }
1722 break;
1723 default:
1724 unreachable("Unsupported CCS operation");
1725 }
1726
1727 cmd_buffer->state.pending_pipe_bits |=
1728 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1729
1730 blorp_batch_finish(&batch);
1731 }