anv/blorp: Support ISL_AUX_USAGE_HIZ in surf_for_anv_image
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.lookup_shader = lookup_blorp_shader;
96 device->blorp.upload_shader = upload_blorp_shader;
97 switch (device->info.gen) {
98 case 7:
99 if (device->info.is_haswell) {
100 device->blorp.exec = gen75_blorp_exec;
101 } else {
102 device->blorp.exec = gen7_blorp_exec;
103 }
104 break;
105 case 8:
106 device->blorp.exec = gen8_blorp_exec;
107 break;
108 case 9:
109 device->blorp.exec = gen9_blorp_exec;
110 break;
111 case 10:
112 device->blorp.exec = gen10_blorp_exec;
113 break;
114 default:
115 unreachable("Unknown hardware generation");
116 }
117 }
118
119 void
120 anv_device_finish_blorp(struct anv_device *device)
121 {
122 blorp_finish(&device->blorp);
123 anv_pipeline_cache_finish(&device->blorp_shader_cache);
124 }
125
126 static void
127 get_blorp_surf_for_anv_buffer(struct anv_device *device,
128 struct anv_buffer *buffer, uint64_t offset,
129 uint32_t width, uint32_t height,
130 uint32_t row_pitch, enum isl_format format,
131 struct blorp_surf *blorp_surf,
132 struct isl_surf *isl_surf)
133 {
134 const struct isl_format_layout *fmtl =
135 isl_format_get_layout(format);
136 bool ok UNUSED;
137
138 /* ASTC is the only format which doesn't support linear layouts.
139 * Create an equivalently sized surface with ISL to get around this.
140 */
141 if (fmtl->txc == ISL_TXC_ASTC) {
142 /* Use an equivalently sized format */
143 format = ISL_FORMAT_R32G32B32A32_UINT;
144 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
145
146 /* Shrink the dimensions for the new format */
147 width = DIV_ROUND_UP(width, fmtl->bw);
148 height = DIV_ROUND_UP(height, fmtl->bh);
149 }
150
151 *blorp_surf = (struct blorp_surf) {
152 .surf = isl_surf,
153 .addr = {
154 .buffer = buffer->bo,
155 .offset = buffer->offset + offset,
156 .mocs = device->default_mocs,
157 },
158 };
159
160 ok = isl_surf_init(&device->isl_dev, isl_surf,
161 .dim = ISL_SURF_DIM_2D,
162 .format = format,
163 .width = width,
164 .height = height,
165 .depth = 1,
166 .levels = 1,
167 .array_len = 1,
168 .samples = 1,
169 .row_pitch = row_pitch,
170 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
171 ISL_SURF_USAGE_RENDER_TARGET_BIT,
172 .tiling_flags = ISL_TILING_LINEAR_BIT);
173 assert(ok);
174 }
175
176 #define ANV_AUX_USAGE_DEFAULT ((enum isl_aux_usage)0xff)
177
178 static struct blorp_address
179 anv_to_blorp_address(struct anv_address addr)
180 {
181 return (struct blorp_address) {
182 .buffer = addr.bo,
183 .offset = addr.offset,
184 };
185 }
186
187 static void
188 get_blorp_surf_for_anv_image(const struct anv_device *device,
189 const struct anv_image *image,
190 VkImageAspectFlags aspect,
191 enum isl_aux_usage aux_usage,
192 struct blorp_surf *blorp_surf)
193 {
194 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
195
196 if (aux_usage == ANV_AUX_USAGE_DEFAULT) {
197 aux_usage = image->planes[plane].aux_usage;
198
199 /* Blorp copies and blits can't handle HiZ so disable it by default */
200 if (aux_usage == ISL_AUX_USAGE_HIZ)
201 aux_usage = ISL_AUX_USAGE_NONE;
202 }
203
204 const struct anv_surface *surface = &image->planes[plane].surface;
205 *blorp_surf = (struct blorp_surf) {
206 .surf = &surface->isl,
207 .addr = {
208 .buffer = image->planes[plane].bo,
209 .offset = image->planes[plane].bo_offset + surface->offset,
210 .mocs = device->default_mocs,
211 },
212 };
213
214 if (aux_usage != ISL_AUX_USAGE_NONE) {
215 const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
216 blorp_surf->aux_surf = &aux_surface->isl,
217 blorp_surf->aux_addr = (struct blorp_address) {
218 .buffer = image->planes[plane].bo,
219 .offset = image->planes[plane].bo_offset + aux_surface->offset,
220 .mocs = device->default_mocs,
221 };
222 blorp_surf->aux_usage = aux_usage;
223 }
224 }
225
226 void anv_CmdCopyImage(
227 VkCommandBuffer commandBuffer,
228 VkImage srcImage,
229 VkImageLayout srcImageLayout,
230 VkImage dstImage,
231 VkImageLayout dstImageLayout,
232 uint32_t regionCount,
233 const VkImageCopy* pRegions)
234 {
235 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
236 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
237 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
238
239 struct blorp_batch batch;
240 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
241
242 for (unsigned r = 0; r < regionCount; r++) {
243 VkOffset3D srcOffset =
244 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
245 VkOffset3D dstOffset =
246 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
247 VkExtent3D extent =
248 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
249
250 unsigned dst_base_layer, layer_count;
251 if (dst_image->type == VK_IMAGE_TYPE_3D) {
252 dst_base_layer = pRegions[r].dstOffset.z;
253 layer_count = pRegions[r].extent.depth;
254 } else {
255 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
256 layer_count =
257 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
258 }
259
260 unsigned src_base_layer;
261 if (src_image->type == VK_IMAGE_TYPE_3D) {
262 src_base_layer = pRegions[r].srcOffset.z;
263 } else {
264 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
265 assert(layer_count ==
266 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
267 }
268
269 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
270 dst_mask = pRegions[r].dstSubresource.aspectMask;
271
272 assert(anv_image_aspects_compatible(src_mask, dst_mask));
273
274 if (_mesa_bitcount(src_mask) > 1) {
275 uint32_t aspect_bit;
276 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
277 struct blorp_surf src_surf, dst_surf;
278 get_blorp_surf_for_anv_image(cmd_buffer->device,
279 src_image, 1UL << aspect_bit,
280 ANV_AUX_USAGE_DEFAULT, &src_surf);
281 get_blorp_surf_for_anv_image(cmd_buffer->device,
282 dst_image, 1UL << aspect_bit,
283 ANV_AUX_USAGE_DEFAULT, &dst_surf);
284
285 for (unsigned i = 0; i < layer_count; i++) {
286 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
287 src_base_layer + i,
288 &dst_surf, pRegions[r].dstSubresource.mipLevel,
289 dst_base_layer + i,
290 srcOffset.x, srcOffset.y,
291 dstOffset.x, dstOffset.y,
292 extent.width, extent.height);
293 }
294 }
295 } else {
296 struct blorp_surf src_surf, dst_surf;
297 get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
298 ANV_AUX_USAGE_DEFAULT, &src_surf);
299 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
300 ANV_AUX_USAGE_DEFAULT, &dst_surf);
301
302 for (unsigned i = 0; i < layer_count; i++) {
303 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
304 src_base_layer + i,
305 &dst_surf, pRegions[r].dstSubresource.mipLevel,
306 dst_base_layer + i,
307 srcOffset.x, srcOffset.y,
308 dstOffset.x, dstOffset.y,
309 extent.width, extent.height);
310 }
311 }
312 }
313
314 blorp_batch_finish(&batch);
315 }
316
317 static void
318 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
319 struct anv_buffer *anv_buffer,
320 struct anv_image *anv_image,
321 uint32_t regionCount,
322 const VkBufferImageCopy* pRegions,
323 bool buffer_to_image)
324 {
325 struct blorp_batch batch;
326 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
327
328 struct {
329 struct blorp_surf surf;
330 uint32_t level;
331 VkOffset3D offset;
332 } image, buffer, *src, *dst;
333
334 buffer.level = 0;
335 buffer.offset = (VkOffset3D) { 0, 0, 0 };
336
337 if (buffer_to_image) {
338 src = &buffer;
339 dst = &image;
340 } else {
341 src = &image;
342 dst = &buffer;
343 }
344
345 for (unsigned r = 0; r < regionCount; r++) {
346 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
347
348 get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
349 ANV_AUX_USAGE_DEFAULT, &image.surf);
350 image.offset =
351 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
352 image.level = pRegions[r].imageSubresource.mipLevel;
353
354 VkExtent3D extent =
355 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
356 if (anv_image->type != VK_IMAGE_TYPE_3D) {
357 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
358 extent.depth =
359 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
360 }
361
362 const enum isl_format buffer_format =
363 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
364 aspect, VK_IMAGE_TILING_LINEAR);
365
366 const VkExtent3D bufferImageExtent = {
367 .width = pRegions[r].bufferRowLength ?
368 pRegions[r].bufferRowLength : extent.width,
369 .height = pRegions[r].bufferImageHeight ?
370 pRegions[r].bufferImageHeight : extent.height,
371 };
372
373 const struct isl_format_layout *buffer_fmtl =
374 isl_format_get_layout(buffer_format);
375
376 const uint32_t buffer_row_pitch =
377 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
378 (buffer_fmtl->bpb / 8);
379
380 const uint32_t buffer_layer_stride =
381 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
382 buffer_row_pitch;
383
384 struct isl_surf buffer_isl_surf;
385 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
386 anv_buffer, pRegions[r].bufferOffset,
387 extent.width, extent.height,
388 buffer_row_pitch, buffer_format,
389 &buffer.surf, &buffer_isl_surf);
390
391 for (unsigned z = 0; z < extent.depth; z++) {
392 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
393 &dst->surf, dst->level, dst->offset.z,
394 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
395 extent.width, extent.height);
396
397 image.offset.z++;
398 buffer.surf.addr.offset += buffer_layer_stride;
399 }
400 }
401
402 blorp_batch_finish(&batch);
403 }
404
405 void anv_CmdCopyBufferToImage(
406 VkCommandBuffer commandBuffer,
407 VkBuffer srcBuffer,
408 VkImage dstImage,
409 VkImageLayout dstImageLayout,
410 uint32_t regionCount,
411 const VkBufferImageCopy* pRegions)
412 {
413 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
414 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
415 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
416
417 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
418 regionCount, pRegions, true);
419 }
420
421 void anv_CmdCopyImageToBuffer(
422 VkCommandBuffer commandBuffer,
423 VkImage srcImage,
424 VkImageLayout srcImageLayout,
425 VkBuffer dstBuffer,
426 uint32_t regionCount,
427 const VkBufferImageCopy* pRegions)
428 {
429 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
430 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
431 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
432
433 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
434 regionCount, pRegions, false);
435 }
436
437 static bool
438 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
439 {
440 bool flip = false;
441 if (*src0 > *src1) {
442 unsigned tmp = *src0;
443 *src0 = *src1;
444 *src1 = tmp;
445 flip = !flip;
446 }
447
448 if (*dst0 > *dst1) {
449 unsigned tmp = *dst0;
450 *dst0 = *dst1;
451 *dst1 = tmp;
452 flip = !flip;
453 }
454
455 return flip;
456 }
457
458 void anv_CmdBlitImage(
459 VkCommandBuffer commandBuffer,
460 VkImage srcImage,
461 VkImageLayout srcImageLayout,
462 VkImage dstImage,
463 VkImageLayout dstImageLayout,
464 uint32_t regionCount,
465 const VkImageBlit* pRegions,
466 VkFilter filter)
467
468 {
469 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
470 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
471 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
472
473 struct blorp_surf src, dst;
474
475 uint32_t gl_filter;
476 switch (filter) {
477 case VK_FILTER_NEAREST:
478 gl_filter = 0x2600; /* GL_NEAREST */
479 break;
480 case VK_FILTER_LINEAR:
481 gl_filter = 0x2601; /* GL_LINEAR */
482 break;
483 default:
484 unreachable("Invalid filter");
485 }
486
487 struct blorp_batch batch;
488 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
489
490 for (unsigned r = 0; r < regionCount; r++) {
491 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
492 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
493
494 get_blorp_surf_for_anv_image(cmd_buffer->device,
495 src_image, src_res->aspectMask,
496 ANV_AUX_USAGE_DEFAULT, &src);
497 get_blorp_surf_for_anv_image(cmd_buffer->device,
498 dst_image, dst_res->aspectMask,
499 ANV_AUX_USAGE_DEFAULT, &dst);
500
501 struct anv_format_plane src_format =
502 anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
503 src_res->aspectMask, src_image->tiling);
504 struct anv_format_plane dst_format =
505 anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
506 dst_res->aspectMask, dst_image->tiling);
507
508 unsigned dst_start, dst_end;
509 if (dst_image->type == VK_IMAGE_TYPE_3D) {
510 assert(dst_res->baseArrayLayer == 0);
511 dst_start = pRegions[r].dstOffsets[0].z;
512 dst_end = pRegions[r].dstOffsets[1].z;
513 } else {
514 dst_start = dst_res->baseArrayLayer;
515 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
516 }
517
518 unsigned src_start, src_end;
519 if (src_image->type == VK_IMAGE_TYPE_3D) {
520 assert(src_res->baseArrayLayer == 0);
521 src_start = pRegions[r].srcOffsets[0].z;
522 src_end = pRegions[r].srcOffsets[1].z;
523 } else {
524 src_start = src_res->baseArrayLayer;
525 src_end = src_start + anv_get_layerCount(src_image, src_res);
526 }
527
528 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
529 float src_z_step = (float)(src_end + 1 - src_start) /
530 (float)(dst_end + 1 - dst_start);
531
532 if (flip_z) {
533 src_start = src_end;
534 src_z_step *= -1;
535 }
536
537 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
538 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
539 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
540 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
541 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
542
543 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
544 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
545 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
546 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
547 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
548
549 const unsigned num_layers = dst_end - dst_start;
550 for (unsigned i = 0; i < num_layers; i++) {
551 unsigned dst_z = dst_start + i;
552 unsigned src_z = src_start + i * src_z_step;
553
554 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
555 src_format.isl_format, src_format.swizzle,
556 &dst, dst_res->mipLevel, dst_z,
557 dst_format.isl_format,
558 anv_swizzle_for_render(dst_format.swizzle),
559 src_x0, src_y0, src_x1, src_y1,
560 dst_x0, dst_y0, dst_x1, dst_y1,
561 gl_filter, flip_x, flip_y);
562 }
563
564 }
565
566 blorp_batch_finish(&batch);
567 }
568
569 static enum isl_format
570 isl_format_for_size(unsigned size_B)
571 {
572 switch (size_B) {
573 case 4: return ISL_FORMAT_R32_UINT;
574 case 8: return ISL_FORMAT_R32G32_UINT;
575 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
576 default:
577 unreachable("Not a power-of-two format size");
578 }
579 }
580
581 /**
582 * Returns the greatest common divisor of a and b that is a power of two.
583 */
584 static uint64_t
585 gcd_pow2_u64(uint64_t a, uint64_t b)
586 {
587 assert(a > 0 || b > 0);
588
589 unsigned a_log2 = ffsll(a) - 1;
590 unsigned b_log2 = ffsll(b) - 1;
591
592 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
593 * case, the MIN2() will take the other one. If both are 0 then we will
594 * hit the assert above.
595 */
596 return 1 << MIN2(a_log2, b_log2);
597 }
598
599 /* This is maximum possible width/height our HW can handle */
600 #define MAX_SURFACE_DIM (1ull << 14)
601
602 void anv_CmdCopyBuffer(
603 VkCommandBuffer commandBuffer,
604 VkBuffer srcBuffer,
605 VkBuffer dstBuffer,
606 uint32_t regionCount,
607 const VkBufferCopy* pRegions)
608 {
609 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
610 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
611 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
612
613 struct blorp_batch batch;
614 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
615
616 for (unsigned r = 0; r < regionCount; r++) {
617 struct blorp_address src = {
618 .buffer = src_buffer->bo,
619 .offset = src_buffer->offset + pRegions[r].srcOffset,
620 .mocs = cmd_buffer->device->default_mocs,
621 };
622 struct blorp_address dst = {
623 .buffer = dst_buffer->bo,
624 .offset = dst_buffer->offset + pRegions[r].dstOffset,
625 .mocs = cmd_buffer->device->default_mocs,
626 };
627
628 blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
629 }
630
631 blorp_batch_finish(&batch);
632 }
633
634 void anv_CmdUpdateBuffer(
635 VkCommandBuffer commandBuffer,
636 VkBuffer dstBuffer,
637 VkDeviceSize dstOffset,
638 VkDeviceSize dataSize,
639 const void* pData)
640 {
641 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
642 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
643
644 struct blorp_batch batch;
645 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
646
647 /* We can't quite grab a full block because the state stream needs a
648 * little data at the top to build its linked list.
649 */
650 const uint32_t max_update_size =
651 cmd_buffer->device->dynamic_state_pool.block_size - 64;
652
653 assert(max_update_size < MAX_SURFACE_DIM * 4);
654
655 /* We're about to read data that was written from the CPU. Flush the
656 * texture cache so we don't get anything stale.
657 */
658 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
659
660 while (dataSize) {
661 const uint32_t copy_size = MIN2(dataSize, max_update_size);
662
663 struct anv_state tmp_data =
664 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
665
666 memcpy(tmp_data.map, pData, copy_size);
667
668 anv_state_flush(cmd_buffer->device, tmp_data);
669
670 struct blorp_address src = {
671 .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
672 .offset = tmp_data.offset,
673 .mocs = cmd_buffer->device->default_mocs,
674 };
675 struct blorp_address dst = {
676 .buffer = dst_buffer->bo,
677 .offset = dst_buffer->offset + dstOffset,
678 .mocs = cmd_buffer->device->default_mocs,
679 };
680
681 blorp_buffer_copy(&batch, src, dst, copy_size);
682
683 dataSize -= copy_size;
684 dstOffset += copy_size;
685 pData = (void *)pData + copy_size;
686 }
687
688 blorp_batch_finish(&batch);
689 }
690
691 void anv_CmdFillBuffer(
692 VkCommandBuffer commandBuffer,
693 VkBuffer dstBuffer,
694 VkDeviceSize dstOffset,
695 VkDeviceSize fillSize,
696 uint32_t data)
697 {
698 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
699 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
700 struct blorp_surf surf;
701 struct isl_surf isl_surf;
702
703 struct blorp_batch batch;
704 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
705
706 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
707
708 /* From the Vulkan spec:
709 *
710 * "size is the number of bytes to fill, and must be either a multiple
711 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
712 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
713 * buffer is not a multiple of 4, then the nearest smaller multiple is
714 * used."
715 */
716 fillSize &= ~3ull;
717
718 /* First, we compute the biggest format that can be used with the
719 * given offsets and size.
720 */
721 int bs = 16;
722 bs = gcd_pow2_u64(bs, dstOffset);
723 bs = gcd_pow2_u64(bs, fillSize);
724 enum isl_format isl_format = isl_format_for_size(bs);
725
726 union isl_color_value color = {
727 .u32 = { data, data, data, data },
728 };
729
730 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
731 while (fillSize >= max_fill_size) {
732 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
733 dst_buffer, dstOffset,
734 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
735 MAX_SURFACE_DIM * bs, isl_format,
736 &surf, &isl_surf);
737
738 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
739 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
740 color, NULL);
741 fillSize -= max_fill_size;
742 dstOffset += max_fill_size;
743 }
744
745 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
746 assert(height < MAX_SURFACE_DIM);
747 if (height != 0) {
748 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
749 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
750 dst_buffer, dstOffset,
751 MAX_SURFACE_DIM, height,
752 MAX_SURFACE_DIM * bs, isl_format,
753 &surf, &isl_surf);
754
755 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
756 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
757 color, NULL);
758 fillSize -= rect_fill_size;
759 dstOffset += rect_fill_size;
760 }
761
762 if (fillSize != 0) {
763 const uint32_t width = fillSize / bs;
764 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
765 dst_buffer, dstOffset,
766 width, 1,
767 width * bs, isl_format,
768 &surf, &isl_surf);
769
770 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
771 0, 0, 1, 0, 0, width, 1,
772 color, NULL);
773 }
774
775 blorp_batch_finish(&batch);
776 }
777
778 void anv_CmdClearColorImage(
779 VkCommandBuffer commandBuffer,
780 VkImage _image,
781 VkImageLayout imageLayout,
782 const VkClearColorValue* pColor,
783 uint32_t rangeCount,
784 const VkImageSubresourceRange* pRanges)
785 {
786 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
787 ANV_FROM_HANDLE(anv_image, image, _image);
788
789 static const bool color_write_disable[4] = { false, false, false, false };
790
791 struct blorp_batch batch;
792 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
793
794
795 for (unsigned r = 0; r < rangeCount; r++) {
796 if (pRanges[r].aspectMask == 0)
797 continue;
798
799 assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
800
801 struct blorp_surf surf;
802 get_blorp_surf_for_anv_image(cmd_buffer->device,
803 image, pRanges[r].aspectMask,
804 ANV_AUX_USAGE_DEFAULT, &surf);
805
806 struct anv_format_plane src_format =
807 anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
808 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
809
810 unsigned base_layer = pRanges[r].baseArrayLayer;
811 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
812
813 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
814 const unsigned level = pRanges[r].baseMipLevel + i;
815 const unsigned level_width = anv_minify(image->extent.width, level);
816 const unsigned level_height = anv_minify(image->extent.height, level);
817
818 if (image->type == VK_IMAGE_TYPE_3D) {
819 base_layer = 0;
820 layer_count = anv_minify(image->extent.depth, level);
821 }
822
823 blorp_clear(&batch, &surf,
824 src_format.isl_format, src_format.swizzle,
825 level, base_layer, layer_count,
826 0, 0, level_width, level_height,
827 vk_to_isl_color(*pColor), color_write_disable);
828 }
829 }
830
831 blorp_batch_finish(&batch);
832 }
833
834 void anv_CmdClearDepthStencilImage(
835 VkCommandBuffer commandBuffer,
836 VkImage image_h,
837 VkImageLayout imageLayout,
838 const VkClearDepthStencilValue* pDepthStencil,
839 uint32_t rangeCount,
840 const VkImageSubresourceRange* pRanges)
841 {
842 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
843 ANV_FROM_HANDLE(anv_image, image, image_h);
844
845 struct blorp_batch batch;
846 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
847
848 struct blorp_surf depth, stencil;
849 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
850 get_blorp_surf_for_anv_image(cmd_buffer->device,
851 image, VK_IMAGE_ASPECT_DEPTH_BIT,
852 ISL_AUX_USAGE_NONE, &depth);
853 } else {
854 memset(&depth, 0, sizeof(depth));
855 }
856
857 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
858 get_blorp_surf_for_anv_image(cmd_buffer->device,
859 image, VK_IMAGE_ASPECT_STENCIL_BIT,
860 ISL_AUX_USAGE_NONE, &stencil);
861 } else {
862 memset(&stencil, 0, sizeof(stencil));
863 }
864
865 for (unsigned r = 0; r < rangeCount; r++) {
866 if (pRanges[r].aspectMask == 0)
867 continue;
868
869 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
870 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
871
872 unsigned base_layer = pRanges[r].baseArrayLayer;
873 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
874
875 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
876 const unsigned level = pRanges[r].baseMipLevel + i;
877 const unsigned level_width = anv_minify(image->extent.width, level);
878 const unsigned level_height = anv_minify(image->extent.height, level);
879
880 if (image->type == VK_IMAGE_TYPE_3D)
881 layer_count = anv_minify(image->extent.depth, level);
882
883 blorp_clear_depth_stencil(&batch, &depth, &stencil,
884 level, base_layer, layer_count,
885 0, 0, level_width, level_height,
886 clear_depth, pDepthStencil->depth,
887 clear_stencil ? 0xff : 0,
888 pDepthStencil->stencil);
889 }
890 }
891
892 blorp_batch_finish(&batch);
893 }
894
895 VkResult
896 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
897 uint32_t num_entries,
898 uint32_t *state_offset,
899 struct anv_state *bt_state)
900 {
901 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
902 state_offset);
903 if (bt_state->map == NULL) {
904 /* We ran out of space. Grab a new binding table block. */
905 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
906 if (result != VK_SUCCESS)
907 return result;
908
909 /* Re-emit state base addresses so we get the new surface state base
910 * address before we start emitting binding tables etc.
911 */
912 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
913
914 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
915 state_offset);
916 assert(bt_state->map != NULL);
917 }
918
919 return VK_SUCCESS;
920 }
921
922 static VkResult
923 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
924 struct anv_state surface_state,
925 uint32_t *bt_offset)
926 {
927 uint32_t state_offset;
928 struct anv_state bt_state;
929
930 VkResult result =
931 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
932 &bt_state);
933 if (result != VK_SUCCESS)
934 return result;
935
936 uint32_t *bt_map = bt_state.map;
937 bt_map[0] = surface_state.offset + state_offset;
938
939 *bt_offset = bt_state.offset;
940 return VK_SUCCESS;
941 }
942
943 static void
944 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
945 struct blorp_batch *batch,
946 const VkClearAttachment *attachment,
947 uint32_t rectCount, const VkClearRect *pRects)
948 {
949 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
950 const uint32_t color_att = attachment->colorAttachment;
951 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
952
953 if (att_idx == VK_ATTACHMENT_UNUSED)
954 return;
955
956 struct anv_render_pass_attachment *pass_att =
957 &cmd_buffer->state.pass->attachments[att_idx];
958 struct anv_attachment_state *att_state =
959 &cmd_buffer->state.attachments[att_idx];
960
961 uint32_t binding_table;
962 VkResult result =
963 binding_table_for_surface_state(cmd_buffer, att_state->color.state,
964 &binding_table);
965 if (result != VK_SUCCESS)
966 return;
967
968 union isl_color_value clear_color =
969 vk_to_isl_color(attachment->clearValue.color);
970
971 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
972 if (subpass->view_mask) {
973 uint32_t view_idx;
974 for_each_bit(view_idx, subpass->view_mask) {
975 for (uint32_t r = 0; r < rectCount; ++r) {
976 const VkOffset2D offset = pRects[r].rect.offset;
977 const VkExtent2D extent = pRects[r].rect.extent;
978 blorp_clear_attachments(batch, binding_table,
979 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
980 view_idx, 1,
981 offset.x, offset.y,
982 offset.x + extent.width,
983 offset.y + extent.height,
984 true, clear_color, false, 0.0f, 0, 0);
985 }
986 }
987 return;
988 }
989
990 for (uint32_t r = 0; r < rectCount; ++r) {
991 const VkOffset2D offset = pRects[r].rect.offset;
992 const VkExtent2D extent = pRects[r].rect.extent;
993 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
994 blorp_clear_attachments(batch, binding_table,
995 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
996 pRects[r].baseArrayLayer,
997 pRects[r].layerCount,
998 offset.x, offset.y,
999 offset.x + extent.width, offset.y + extent.height,
1000 true, clear_color, false, 0.0f, 0, 0);
1001 }
1002 }
1003
1004 static void
1005 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1006 struct blorp_batch *batch,
1007 const VkClearAttachment *attachment,
1008 uint32_t rectCount, const VkClearRect *pRects)
1009 {
1010 static const union isl_color_value color_value = { .u32 = { 0, } };
1011 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1012 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1013
1014 if (att_idx == VK_ATTACHMENT_UNUSED)
1015 return;
1016
1017 struct anv_render_pass_attachment *pass_att =
1018 &cmd_buffer->state.pass->attachments[att_idx];
1019
1020 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1021 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1022
1023 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1024 if (clear_depth) {
1025 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1026 pass_att->format,
1027 VK_IMAGE_ASPECT_DEPTH_BIT,
1028 VK_IMAGE_TILING_OPTIMAL);
1029 }
1030
1031 uint32_t binding_table;
1032 VkResult result =
1033 binding_table_for_surface_state(cmd_buffer,
1034 cmd_buffer->state.null_surface_state,
1035 &binding_table);
1036 if (result != VK_SUCCESS)
1037 return;
1038
1039 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1040 if (subpass->view_mask) {
1041 uint32_t view_idx;
1042 for_each_bit(view_idx, subpass->view_mask) {
1043 for (uint32_t r = 0; r < rectCount; ++r) {
1044 const VkOffset2D offset = pRects[r].rect.offset;
1045 const VkExtent2D extent = pRects[r].rect.extent;
1046 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1047 blorp_clear_attachments(batch, binding_table,
1048 depth_format, pass_att->samples,
1049 view_idx, 1,
1050 offset.x, offset.y,
1051 offset.x + extent.width,
1052 offset.y + extent.height,
1053 false, color_value,
1054 clear_depth, value.depth,
1055 clear_stencil ? 0xff : 0, value.stencil);
1056 }
1057 }
1058 return;
1059 }
1060
1061 for (uint32_t r = 0; r < rectCount; ++r) {
1062 const VkOffset2D offset = pRects[r].rect.offset;
1063 const VkExtent2D extent = pRects[r].rect.extent;
1064 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1065 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
1066 blorp_clear_attachments(batch, binding_table,
1067 depth_format, pass_att->samples,
1068 pRects[r].baseArrayLayer,
1069 pRects[r].layerCount,
1070 offset.x, offset.y,
1071 offset.x + extent.width, offset.y + extent.height,
1072 false, color_value,
1073 clear_depth, value.depth,
1074 clear_stencil ? 0xff : 0, value.stencil);
1075 }
1076 }
1077
1078 void anv_CmdClearAttachments(
1079 VkCommandBuffer commandBuffer,
1080 uint32_t attachmentCount,
1081 const VkClearAttachment* pAttachments,
1082 uint32_t rectCount,
1083 const VkClearRect* pRects)
1084 {
1085 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1086
1087 /* Because this gets called within a render pass, we tell blorp not to
1088 * trash our depth and stencil buffers.
1089 */
1090 struct blorp_batch batch;
1091 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1092 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1093
1094 for (uint32_t a = 0; a < attachmentCount; ++a) {
1095 if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1096 assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1097 clear_color_attachment(cmd_buffer, &batch,
1098 &pAttachments[a],
1099 rectCount, pRects);
1100 } else {
1101 clear_depth_stencil_attachment(cmd_buffer, &batch,
1102 &pAttachments[a],
1103 rectCount, pRects);
1104 }
1105 }
1106
1107 blorp_batch_finish(&batch);
1108 }
1109
1110 enum subpass_stage {
1111 SUBPASS_STAGE_LOAD,
1112 SUBPASS_STAGE_DRAW,
1113 SUBPASS_STAGE_RESOLVE,
1114 };
1115
1116 static bool
1117 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1118 {
1119 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1120 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1121
1122 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1123 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1124 if (a == VK_ATTACHMENT_UNUSED)
1125 continue;
1126
1127 assert(a < cmd_state->pass->attachment_count);
1128 if (cmd_state->attachments[a].pending_clear_aspects) {
1129 return true;
1130 }
1131 }
1132
1133 if (ds != VK_ATTACHMENT_UNUSED) {
1134 assert(ds < cmd_state->pass->attachment_count);
1135 if (cmd_state->attachments[ds].pending_clear_aspects)
1136 return true;
1137 }
1138
1139 return false;
1140 }
1141
1142 void
1143 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1144 {
1145 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1146 const VkRect2D render_area = cmd_buffer->state.render_area;
1147
1148
1149 if (!subpass_needs_clear(cmd_buffer))
1150 return;
1151
1152 /* Because this gets called within a render pass, we tell blorp not to
1153 * trash our depth and stencil buffers.
1154 */
1155 struct blorp_batch batch;
1156 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1157 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1158
1159 VkClearRect clear_rect = {
1160 .rect = cmd_buffer->state.render_area,
1161 .baseArrayLayer = 0,
1162 .layerCount = cmd_buffer->state.framebuffer->layers,
1163 };
1164
1165 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1166 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1167 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1168 if (a == VK_ATTACHMENT_UNUSED)
1169 continue;
1170
1171 assert(a < cmd_state->pass->attachment_count);
1172 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1173
1174 if (!att_state->pending_clear_aspects)
1175 continue;
1176
1177 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1178
1179 struct anv_image_view *iview = fb->attachments[a];
1180 const struct anv_image *image = iview->image;
1181 struct blorp_surf surf;
1182 get_blorp_surf_for_anv_image(cmd_buffer->device,
1183 image, VK_IMAGE_ASPECT_COLOR_BIT,
1184 att_state->aux_usage, &surf);
1185
1186 if (att_state->fast_clear) {
1187 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1188
1189 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1190 *
1191 * "After Render target fast clear, pipe-control with color cache
1192 * write-flush must be issued before sending any DRAW commands on
1193 * that render target."
1194 *
1195 * This comment is a bit cryptic and doesn't really tell you what's
1196 * going or what's really needed. It appears that fast clear ops are
1197 * not properly synchronized with other drawing. This means that we
1198 * cannot have a fast clear operation in the pipe at the same time as
1199 * other regular drawing operations. We need to use a PIPE_CONTROL
1200 * to ensure that the contents of the previous draw hit the render
1201 * target before we resolve and then use a second PIPE_CONTROL after
1202 * the resolve to ensure that it is completed before any additional
1203 * drawing occurs.
1204 */
1205 cmd_buffer->state.pending_pipe_bits |=
1206 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1207
1208 assert(image->n_planes == 1);
1209 blorp_fast_clear(&batch, &surf, iview->planes[0].isl.format,
1210 iview->planes[0].isl.base_level,
1211 iview->planes[0].isl.base_array_layer, fb->layers,
1212 render_area.offset.x, render_area.offset.y,
1213 render_area.offset.x + render_area.extent.width,
1214 render_area.offset.y + render_area.extent.height);
1215
1216 cmd_buffer->state.pending_pipe_bits |=
1217 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1218 } else {
1219 assert(image->n_planes == 1);
1220 blorp_clear(&batch, &surf, iview->planes[0].isl.format,
1221 anv_swizzle_for_render(iview->planes[0].isl.swizzle),
1222 iview->planes[0].isl.base_level,
1223 iview->planes[0].isl.base_array_layer, fb->layers,
1224 render_area.offset.x, render_area.offset.y,
1225 render_area.offset.x + render_area.extent.width,
1226 render_area.offset.y + render_area.extent.height,
1227 vk_to_isl_color(att_state->clear_value.color), NULL);
1228 }
1229
1230 att_state->pending_clear_aspects = 0;
1231 }
1232
1233 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1234 assert(ds == VK_ATTACHMENT_UNUSED || ds < cmd_state->pass->attachment_count);
1235
1236 if (ds != VK_ATTACHMENT_UNUSED &&
1237 cmd_state->attachments[ds].pending_clear_aspects) {
1238
1239 VkClearAttachment clear_att = {
1240 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1241 .clearValue = cmd_state->attachments[ds].clear_value,
1242 };
1243
1244
1245 const uint8_t gen = cmd_buffer->device->info.gen;
1246 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1247 ISL_AUX_USAGE_HIZ;
1248 const struct anv_image_view *iview = fb->attachments[ds];
1249
1250 if (clear_with_hiz) {
1251 const bool clear_depth = clear_att.aspectMask &
1252 VK_IMAGE_ASPECT_DEPTH_BIT;
1253 const bool clear_stencil = clear_att.aspectMask &
1254 VK_IMAGE_ASPECT_STENCIL_BIT;
1255
1256 /* Check against restrictions for depth buffer clearing. A great GPU
1257 * performance benefit isn't expected when using the HZ sequence for
1258 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1259 * a stencil clear in addition to using the BLORP-fallback for depth.
1260 */
1261 if (clear_depth) {
1262 if (!blorp_can_hiz_clear_depth(gen, iview->planes[0].isl.format,
1263 iview->image->samples,
1264 render_area.offset.x,
1265 render_area.offset.y,
1266 render_area.offset.x +
1267 render_area.extent.width,
1268 render_area.offset.y +
1269 render_area.extent.height)) {
1270 clear_with_hiz = false;
1271 } else if (clear_att.clearValue.depthStencil.depth !=
1272 ANV_HZ_FC_VAL) {
1273 /* Don't enable fast depth clears for any color not equal to
1274 * ANV_HZ_FC_VAL.
1275 */
1276 clear_with_hiz = false;
1277 } else if (gen == 8 &&
1278 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1279 iview->image)) {
1280 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1281 * fast-cleared portion of a HiZ buffer. Testing has revealed
1282 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1283 * not support this feature at all.
1284 */
1285 clear_with_hiz = false;
1286 }
1287 }
1288
1289 if (clear_with_hiz) {
1290 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1291 render_area.offset.x,
1292 render_area.offset.y,
1293 render_area.offset.x +
1294 render_area.extent.width,
1295 render_area.offset.y +
1296 render_area.extent.height,
1297 clear_depth, clear_stencil,
1298 clear_att.clearValue.
1299 depthStencil.stencil);
1300
1301 /* From the SKL PRM, Depth Buffer Clear:
1302 *
1303 * Depth Buffer Clear Workaround
1304 * Depth buffer clear pass using any of the methods (WM_STATE,
1305 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
1306 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
1307 * “set” before starting to render. DepthStall and DepthFlush are
1308 * not needed between consecutive depth clear passes nor is it
1309 * required if the depth-clear pass was done with “full_surf_clear”
1310 * bit set in the 3DSTATE_WM_HZ_OP.
1311 */
1312 if (clear_depth) {
1313 cmd_buffer->state.pending_pipe_bits |=
1314 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1315 }
1316 }
1317 }
1318
1319 if (!clear_with_hiz) {
1320 clear_depth_stencil_attachment(cmd_buffer, &batch,
1321 &clear_att, 1, &clear_rect);
1322 }
1323
1324 cmd_state->attachments[ds].pending_clear_aspects = 0;
1325 }
1326
1327 blorp_batch_finish(&batch);
1328 }
1329
1330 static void
1331 resolve_surface(struct blorp_batch *batch,
1332 struct blorp_surf *src_surf,
1333 uint32_t src_level, uint32_t src_layer,
1334 struct blorp_surf *dst_surf,
1335 uint32_t dst_level, uint32_t dst_layer,
1336 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1337 uint32_t width, uint32_t height)
1338 {
1339 blorp_blit(batch,
1340 src_surf, src_level, src_layer,
1341 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1342 dst_surf, dst_level, dst_layer,
1343 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1344 src_x, src_y, src_x + width, src_y + height,
1345 dst_x, dst_y, dst_x + width, dst_y + height,
1346 0x2600 /* GL_NEAREST */, false, false);
1347 }
1348
1349 static void
1350 resolve_image(struct anv_device *device,
1351 struct blorp_batch *batch,
1352 const struct anv_image *src_image,
1353 uint32_t src_level, uint32_t src_layer,
1354 const struct anv_image *dst_image,
1355 uint32_t dst_level, uint32_t dst_layer,
1356 VkImageAspectFlags aspect_mask,
1357 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1358 uint32_t width, uint32_t height)
1359 {
1360 assert(src_image->type == VK_IMAGE_TYPE_2D);
1361 assert(src_image->samples > 1);
1362 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1363 assert(dst_image->samples == 1);
1364 assert(src_image->n_planes == dst_image->n_planes);
1365
1366 uint32_t aspect_bit;
1367
1368 anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
1369 struct blorp_surf src_surf, dst_surf;
1370 get_blorp_surf_for_anv_image(device, src_image, 1UL << aspect_bit,
1371 ANV_AUX_USAGE_DEFAULT, &src_surf);
1372 get_blorp_surf_for_anv_image(device, dst_image, 1UL << aspect_bit,
1373 ANV_AUX_USAGE_DEFAULT, &dst_surf);
1374
1375 assert(!src_image->format->can_ycbcr);
1376 assert(!dst_image->format->can_ycbcr);
1377
1378 resolve_surface(batch,
1379 &src_surf, src_level, src_layer,
1380 &dst_surf, dst_level, dst_layer,
1381 src_x, src_y, dst_x, dst_y, width, height);
1382 }
1383 }
1384
1385 void anv_CmdResolveImage(
1386 VkCommandBuffer commandBuffer,
1387 VkImage srcImage,
1388 VkImageLayout srcImageLayout,
1389 VkImage dstImage,
1390 VkImageLayout dstImageLayout,
1391 uint32_t regionCount,
1392 const VkImageResolve* pRegions)
1393 {
1394 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1395 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1396 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1397
1398 struct blorp_batch batch;
1399 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1400
1401 for (uint32_t r = 0; r < regionCount; r++) {
1402 assert(pRegions[r].srcSubresource.aspectMask ==
1403 pRegions[r].dstSubresource.aspectMask);
1404 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1405 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1406
1407 const uint32_t layer_count =
1408 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1409
1410 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
1411 dst_mask = pRegions[r].dstSubresource.aspectMask;
1412
1413 assert(anv_image_aspects_compatible(src_mask, dst_mask));
1414
1415 for (uint32_t layer = 0; layer < layer_count; layer++) {
1416 resolve_image(cmd_buffer->device, &batch,
1417 src_image,
1418 pRegions[r].srcSubresource.mipLevel,
1419 pRegions[r].srcSubresource.baseArrayLayer + layer,
1420 dst_image,
1421 pRegions[r].dstSubresource.mipLevel,
1422 pRegions[r].dstSubresource.baseArrayLayer + layer,
1423 pRegions[r].dstSubresource.aspectMask,
1424 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1425 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1426 pRegions[r].extent.width, pRegions[r].extent.height);
1427 }
1428 }
1429
1430 blorp_batch_finish(&batch);
1431 }
1432
1433 static enum isl_aux_usage
1434 fast_clear_aux_usage(const struct anv_image *image,
1435 VkImageAspectFlagBits aspect)
1436 {
1437 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1438 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
1439 return ISL_AUX_USAGE_CCS_D;
1440 else
1441 return image->planes[plane].aux_usage;
1442 }
1443
1444 void
1445 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1446 {
1447 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1448 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1449
1450 if (subpass->has_resolve) {
1451 struct blorp_batch batch;
1452 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1453
1454 /* We are about to do some MSAA resolves. We need to flush so that the
1455 * result of writes to the MSAA color attachments show up in the sampler
1456 * when we blit to the single-sampled resolve target.
1457 */
1458 cmd_buffer->state.pending_pipe_bits |=
1459 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1460 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1461
1462 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1463 uint32_t src_att = subpass->color_attachments[i].attachment;
1464 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1465
1466 if (dst_att == VK_ATTACHMENT_UNUSED)
1467 continue;
1468
1469 assert(src_att < cmd_buffer->state.pass->attachment_count);
1470 assert(dst_att < cmd_buffer->state.pass->attachment_count);
1471
1472 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1473 /* From the Vulkan 1.0 spec:
1474 *
1475 * If the first use of an attachment in a render pass is as a
1476 * resolve attachment, then the loadOp is effectively ignored
1477 * as the resolve is guaranteed to overwrite all pixels in the
1478 * render area.
1479 */
1480 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1481 }
1482
1483 struct anv_image_view *src_iview = fb->attachments[src_att];
1484 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1485
1486 enum isl_aux_usage src_aux_usage =
1487 cmd_buffer->state.attachments[src_att].aux_usage;
1488 enum isl_aux_usage dst_aux_usage =
1489 cmd_buffer->state.attachments[dst_att].aux_usage;
1490
1491 const VkRect2D render_area = cmd_buffer->state.render_area;
1492
1493 assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
1494 dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
1495
1496 struct blorp_surf src_surf, dst_surf;
1497 get_blorp_surf_for_anv_image(cmd_buffer->device, src_iview->image,
1498 VK_IMAGE_ASPECT_COLOR_BIT,
1499 src_aux_usage, &src_surf);
1500 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_iview->image,
1501 VK_IMAGE_ASPECT_COLOR_BIT,
1502 dst_aux_usage, &dst_surf);
1503
1504 assert(!src_iview->image->format->can_ycbcr);
1505 assert(!dst_iview->image->format->can_ycbcr);
1506
1507 resolve_surface(&batch,
1508 &src_surf,
1509 src_iview->planes[0].isl.base_level,
1510 src_iview->planes[0].isl.base_array_layer,
1511 &dst_surf,
1512 dst_iview->planes[0].isl.base_level,
1513 dst_iview->planes[0].isl.base_array_layer,
1514 render_area.offset.x, render_area.offset.y,
1515 render_area.offset.x, render_area.offset.y,
1516 render_area.extent.width, render_area.extent.height);
1517 }
1518
1519 blorp_batch_finish(&batch);
1520 }
1521 }
1522
1523 void
1524 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
1525 const struct anv_image *image,
1526 uint32_t base_level, uint32_t level_count,
1527 uint32_t base_layer, uint32_t layer_count)
1528 {
1529 struct blorp_batch batch;
1530 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1531
1532 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
1533
1534 struct blorp_surf surf;
1535 get_blorp_surf_for_anv_image(cmd_buffer->device,
1536 image, VK_IMAGE_ASPECT_COLOR_BIT,
1537 ISL_AUX_USAGE_NONE, &surf);
1538
1539 struct blorp_surf shadow_surf = {
1540 .surf = &image->planes[0].shadow_surface.isl,
1541 .addr = {
1542 .buffer = image->planes[0].bo,
1543 .offset = image->planes[0].bo_offset +
1544 image->planes[0].shadow_surface.offset,
1545 .mocs = cmd_buffer->device->default_mocs,
1546 },
1547 };
1548
1549 for (uint32_t l = 0; l < level_count; l++) {
1550 const uint32_t level = base_level + l;
1551
1552 const VkExtent3D extent = {
1553 .width = anv_minify(image->extent.width, level),
1554 .height = anv_minify(image->extent.height, level),
1555 .depth = anv_minify(image->extent.depth, level),
1556 };
1557
1558 if (image->type == VK_IMAGE_TYPE_3D)
1559 layer_count = extent.depth;
1560
1561 for (uint32_t a = 0; a < layer_count; a++) {
1562 const uint32_t layer = base_layer + a;
1563
1564 blorp_copy(&batch, &surf, level, layer,
1565 &shadow_surf, level, layer,
1566 0, 0, 0, 0, extent.width, extent.height);
1567 }
1568 }
1569
1570 blorp_batch_finish(&batch);
1571 }
1572
1573 void
1574 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1575 const struct anv_image *image,
1576 enum blorp_hiz_op op)
1577 {
1578 assert(image);
1579
1580 assert(anv_image_aspect_to_plane(image->aspects,
1581 VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
1582
1583 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1584 * don't perform such a resolve on gens that don't support it.
1585 */
1586 if (cmd_buffer->device->info.gen < 8 ||
1587 image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ)
1588 return;
1589
1590 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1591 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1592
1593 struct blorp_batch batch;
1594 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1595
1596 struct blorp_surf surf;
1597 get_blorp_surf_for_anv_image(cmd_buffer->device,
1598 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1599 ISL_AUX_USAGE_HIZ, &surf);
1600 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1601
1602 blorp_hiz_op(&batch, &surf, 0, 0, 1, op);
1603 blorp_batch_finish(&batch);
1604 }
1605
1606 void
1607 anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
1608 const struct anv_image *image,
1609 VkImageAspectFlagBits aspect,
1610 uint32_t base_layer, uint32_t layer_count,
1611 enum isl_aux_op mcs_op, bool predicate)
1612 {
1613 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1614 assert(image->samples > 1);
1615 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, 0));
1616
1617 /* Multisampling with multi-planar formats is not supported */
1618 assert(image->n_planes == 1);
1619
1620 struct blorp_batch batch;
1621 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1622 predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
1623
1624 struct blorp_surf surf;
1625 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1626 ANV_AUX_USAGE_DEFAULT, &surf);
1627
1628 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1629 *
1630 * "After Render target fast clear, pipe-control with color cache
1631 * write-flush must be issued before sending any DRAW commands on
1632 * that render target."
1633 *
1634 * This comment is a bit cryptic and doesn't really tell you what's going
1635 * or what's really needed. It appears that fast clear ops are not
1636 * properly synchronized with other drawing. This means that we cannot
1637 * have a fast clear operation in the pipe at the same time as other
1638 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1639 * that the contents of the previous draw hit the render target before we
1640 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1641 * that it is completed before any additional drawing occurs.
1642 */
1643 cmd_buffer->state.pending_pipe_bits |=
1644 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1645
1646 switch (mcs_op) {
1647 case ISL_AUX_OP_FAST_CLEAR:
1648 blorp_fast_clear(&batch, &surf, surf.surf->format,
1649 0, base_layer, layer_count,
1650 0, 0, image->extent.width, image->extent.height);
1651 break;
1652 case ISL_AUX_OP_FULL_RESOLVE:
1653 case ISL_AUX_OP_PARTIAL_RESOLVE:
1654 case ISL_AUX_OP_AMBIGUATE:
1655 default:
1656 unreachable("Unsupported MCS operation");
1657 }
1658
1659 cmd_buffer->state.pending_pipe_bits |=
1660 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1661
1662 blorp_batch_finish(&batch);
1663 }
1664
1665 static enum blorp_fast_clear_op
1666 isl_to_blorp_fast_clear_op(enum isl_aux_op isl_op)
1667 {
1668 switch (isl_op) {
1669 case ISL_AUX_OP_FAST_CLEAR: return BLORP_FAST_CLEAR_OP_CLEAR;
1670 case ISL_AUX_OP_FULL_RESOLVE: return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1671 case ISL_AUX_OP_PARTIAL_RESOLVE: return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1672 default:
1673 unreachable("Unsupported CCS/MCS aux op");
1674 }
1675 }
1676
1677 void
1678 anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
1679 const struct anv_image *image,
1680 VkImageAspectFlagBits aspect, uint32_t level,
1681 uint32_t base_layer, uint32_t layer_count,
1682 enum isl_aux_op ccs_op, bool predicate)
1683 {
1684 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1685 assert(image->samples == 1);
1686 assert(level < anv_image_aux_levels(image, aspect));
1687 /* Multi-LOD YcBcR is not allowed */
1688 assert(image->n_planes == 1 || level == 0);
1689 assert(base_layer + layer_count <=
1690 anv_image_aux_layers(image, aspect, level));
1691
1692 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1693 uint32_t width_div = image->format->planes[plane].denominator_scales[0];
1694 uint32_t height_div = image->format->planes[plane].denominator_scales[1];
1695 uint32_t level_width = anv_minify(image->extent.width, level) / width_div;
1696 uint32_t level_height = anv_minify(image->extent.height, level) / height_div;
1697
1698 struct blorp_batch batch;
1699 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1700 predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
1701
1702 struct blorp_surf surf;
1703 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1704 fast_clear_aux_usage(image, aspect),
1705 &surf);
1706
1707 if (ccs_op == ISL_AUX_OP_FULL_RESOLVE ||
1708 ccs_op == ISL_AUX_OP_PARTIAL_RESOLVE) {
1709 /* If we're doing a resolve operation, then we need the indirect clear
1710 * color. The clear and ambiguate operations just stomp the CCS to a
1711 * particular value and don't care about format or clear value.
1712 */
1713 const struct anv_address clear_color_addr =
1714 anv_image_get_clear_color_addr(cmd_buffer->device, image,
1715 aspect, level);
1716 surf.clear_color_addr = anv_to_blorp_address(clear_color_addr);
1717 }
1718
1719 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1720 *
1721 * "After Render target fast clear, pipe-control with color cache
1722 * write-flush must be issued before sending any DRAW commands on
1723 * that render target."
1724 *
1725 * This comment is a bit cryptic and doesn't really tell you what's going
1726 * or what's really needed. It appears that fast clear ops are not
1727 * properly synchronized with other drawing. This means that we cannot
1728 * have a fast clear operation in the pipe at the same time as other
1729 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1730 * that the contents of the previous draw hit the render target before we
1731 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1732 * that it is completed before any additional drawing occurs.
1733 */
1734 cmd_buffer->state.pending_pipe_bits |=
1735 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1736
1737 switch (ccs_op) {
1738 case ISL_AUX_OP_FAST_CLEAR:
1739 blorp_fast_clear(&batch, &surf, surf.surf->format,
1740 level, base_layer, layer_count,
1741 0, 0, level_width, level_height);
1742 break;
1743 case ISL_AUX_OP_FULL_RESOLVE:
1744 case ISL_AUX_OP_PARTIAL_RESOLVE:
1745 blorp_ccs_resolve(&batch, &surf, level, base_layer, layer_count,
1746 surf.surf->format, isl_to_blorp_fast_clear_op(ccs_op));
1747 break;
1748 case ISL_AUX_OP_AMBIGUATE:
1749 default:
1750 unreachable("Unsupported CCS operation");
1751 }
1752
1753 cmd_buffer->state.pending_pipe_bits |=
1754 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1755
1756 blorp_batch_finish(&batch);
1757 }