anv/blorp: Delete a pointless assert
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_batch *batch,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct blorp_context *blorp = batch->blorp;
32 struct anv_device *device = blorp->driver_ctx;
33
34 /* The default cache must be a real cache */
35 assert(device->default_pipeline_cache.cache);
36
37 struct anv_shader_bin *bin =
38 anv_pipeline_cache_search(&device->default_pipeline_cache, key, key_size);
39 if (!bin)
40 return false;
41
42 /* The cache already has a reference and it's not going anywhere so there
43 * is no need to hold a second reference.
44 */
45 anv_shader_bin_unref(device, bin);
46
47 *kernel_out = bin->kernel.offset;
48 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
49
50 return true;
51 }
52
53 static bool
54 upload_blorp_shader(struct blorp_batch *batch,
55 const void *key, uint32_t key_size,
56 const void *kernel, uint32_t kernel_size,
57 const struct brw_stage_prog_data *prog_data,
58 uint32_t prog_data_size,
59 uint32_t *kernel_out, void *prog_data_out)
60 {
61 struct blorp_context *blorp = batch->blorp;
62 struct anv_device *device = blorp->driver_ctx;
63
64 /* The blorp cache must be a real cache */
65 assert(device->default_pipeline_cache.cache);
66
67 struct anv_pipeline_bind_map bind_map = {
68 .surface_count = 0,
69 .sampler_count = 0,
70 };
71
72 struct anv_shader_bin *bin =
73 anv_pipeline_cache_upload_kernel(&device->default_pipeline_cache,
74 key, key_size, kernel, kernel_size,
75 NULL, 0,
76 prog_data, prog_data_size,
77 NULL, &bind_map);
78
79 if (!bin)
80 return false;
81
82 /* The cache already has a reference and it's not going anywhere so there
83 * is no need to hold a second reference.
84 */
85 anv_shader_bin_unref(device, bin);
86
87 *kernel_out = bin->kernel.offset;
88 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
89
90 return true;
91 }
92
93 void
94 anv_device_init_blorp(struct anv_device *device)
95 {
96 blorp_init(&device->blorp, device, &device->isl_dev);
97 device->blorp.compiler = device->instance->physicalDevice.compiler;
98 device->blorp.lookup_shader = lookup_blorp_shader;
99 device->blorp.upload_shader = upload_blorp_shader;
100 switch (device->info.gen) {
101 case 7:
102 if (device->info.is_haswell) {
103 device->blorp.exec = gen75_blorp_exec;
104 } else {
105 device->blorp.exec = gen7_blorp_exec;
106 }
107 break;
108 case 8:
109 device->blorp.exec = gen8_blorp_exec;
110 break;
111 case 9:
112 device->blorp.exec = gen9_blorp_exec;
113 break;
114 case 10:
115 device->blorp.exec = gen10_blorp_exec;
116 break;
117 case 11:
118 device->blorp.exec = gen11_blorp_exec;
119 break;
120 default:
121 unreachable("Unknown hardware generation");
122 }
123 }
124
125 void
126 anv_device_finish_blorp(struct anv_device *device)
127 {
128 blorp_finish(&device->blorp);
129 }
130
131 static void
132 get_blorp_surf_for_anv_buffer(struct anv_device *device,
133 struct anv_buffer *buffer, uint64_t offset,
134 uint32_t width, uint32_t height,
135 uint32_t row_pitch, enum isl_format format,
136 struct blorp_surf *blorp_surf,
137 struct isl_surf *isl_surf)
138 {
139 const struct isl_format_layout *fmtl =
140 isl_format_get_layout(format);
141 bool ok UNUSED;
142
143 /* ASTC is the only format which doesn't support linear layouts.
144 * Create an equivalently sized surface with ISL to get around this.
145 */
146 if (fmtl->txc == ISL_TXC_ASTC) {
147 /* Use an equivalently sized format */
148 format = ISL_FORMAT_R32G32B32A32_UINT;
149 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
150
151 /* Shrink the dimensions for the new format */
152 width = DIV_ROUND_UP(width, fmtl->bw);
153 height = DIV_ROUND_UP(height, fmtl->bh);
154 }
155
156 *blorp_surf = (struct blorp_surf) {
157 .surf = isl_surf,
158 .addr = {
159 .buffer = buffer->address.bo,
160 .offset = buffer->address.offset + offset,
161 .mocs = anv_mocs_for_bo(device, buffer->address.bo),
162 },
163 };
164
165 ok = isl_surf_init(&device->isl_dev, isl_surf,
166 .dim = ISL_SURF_DIM_2D,
167 .format = format,
168 .width = width,
169 .height = height,
170 .depth = 1,
171 .levels = 1,
172 .array_len = 1,
173 .samples = 1,
174 .row_pitch_B = row_pitch,
175 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
176 ISL_SURF_USAGE_RENDER_TARGET_BIT,
177 .tiling_flags = ISL_TILING_LINEAR_BIT);
178 assert(ok);
179 }
180
181 /* Pick something high enough that it won't be used in core and low enough it
182 * will never map to an extension.
183 */
184 #define ANV_IMAGE_LAYOUT_EXPLICIT_AUX (VkImageLayout)10000000
185
186 static struct blorp_address
187 anv_to_blorp_address(struct anv_address addr)
188 {
189 return (struct blorp_address) {
190 .buffer = addr.bo,
191 .offset = addr.offset,
192 };
193 }
194
195 static void
196 get_blorp_surf_for_anv_image(const struct anv_device *device,
197 const struct anv_image *image,
198 VkImageAspectFlags aspect,
199 VkImageLayout layout,
200 enum isl_aux_usage aux_usage,
201 struct blorp_surf *blorp_surf)
202 {
203 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
204
205 if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX)
206 aux_usage = anv_layout_to_aux_usage(&device->info, image, aspect, layout);
207
208 const struct anv_surface *surface = &image->planes[plane].surface;
209 *blorp_surf = (struct blorp_surf) {
210 .surf = &surface->isl,
211 .addr = {
212 .buffer = image->planes[plane].address.bo,
213 .offset = image->planes[plane].address.offset + surface->offset,
214 .mocs = anv_mocs_for_bo(device, image->planes[plane].address.bo),
215 },
216 };
217
218 if (aux_usage != ISL_AUX_USAGE_NONE) {
219 const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
220 blorp_surf->aux_surf = &aux_surface->isl,
221 blorp_surf->aux_addr = (struct blorp_address) {
222 .buffer = image->planes[plane].address.bo,
223 .offset = image->planes[plane].address.offset + aux_surface->offset,
224 .mocs = anv_mocs_for_bo(device, image->planes[plane].address.bo),
225 };
226 blorp_surf->aux_usage = aux_usage;
227
228 /* If we're doing a partial resolve, then we need the indirect clear
229 * color. If we are doing a fast clear and want to store/update the
230 * clear color, we also pass the address to blorp, otherwise it will only
231 * stomp the CCS to a particular value and won't care about format or
232 * clear value
233 */
234 if (aspect & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
235 const struct anv_address clear_color_addr =
236 anv_image_get_clear_color_addr(device, image, aspect);
237 blorp_surf->clear_color_addr = anv_to_blorp_address(clear_color_addr);
238 } else if (aspect & VK_IMAGE_ASPECT_DEPTH_BIT
239 && device->info.gen >= 10) {
240 /* Vulkan always clears to 1.0. On gen < 10, we set that directly in
241 * the state packet. For gen >= 10, must provide the clear value in a
242 * buffer. We have a single global buffer that stores the 1.0 value.
243 */
244 const struct anv_address clear_color_addr = (struct anv_address) {
245 .bo = (struct anv_bo *)&device->hiz_clear_bo
246 };
247 blorp_surf->clear_color_addr = anv_to_blorp_address(clear_color_addr);
248 }
249 }
250 }
251
252 void anv_CmdCopyImage(
253 VkCommandBuffer commandBuffer,
254 VkImage srcImage,
255 VkImageLayout srcImageLayout,
256 VkImage dstImage,
257 VkImageLayout dstImageLayout,
258 uint32_t regionCount,
259 const VkImageCopy* pRegions)
260 {
261 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
262 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
263 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
264
265 struct blorp_batch batch;
266 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
267
268 for (unsigned r = 0; r < regionCount; r++) {
269 VkOffset3D srcOffset =
270 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
271 VkOffset3D dstOffset =
272 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
273 VkExtent3D extent =
274 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
275
276 const uint32_t dst_level = pRegions[r].dstSubresource.mipLevel;
277 unsigned dst_base_layer, layer_count;
278 if (dst_image->type == VK_IMAGE_TYPE_3D) {
279 dst_base_layer = pRegions[r].dstOffset.z;
280 layer_count = pRegions[r].extent.depth;
281 } else {
282 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
283 layer_count =
284 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
285 }
286
287 const uint32_t src_level = pRegions[r].srcSubresource.mipLevel;
288 unsigned src_base_layer;
289 if (src_image->type == VK_IMAGE_TYPE_3D) {
290 src_base_layer = pRegions[r].srcOffset.z;
291 } else {
292 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
293 assert(layer_count ==
294 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
295 }
296
297 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
298 dst_mask = pRegions[r].dstSubresource.aspectMask;
299
300 assert(anv_image_aspects_compatible(src_mask, dst_mask));
301
302 if (util_bitcount(src_mask) > 1) {
303 uint32_t aspect_bit;
304 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
305 struct blorp_surf src_surf, dst_surf;
306 get_blorp_surf_for_anv_image(cmd_buffer->device,
307 src_image, 1UL << aspect_bit,
308 srcImageLayout, ISL_AUX_USAGE_NONE,
309 &src_surf);
310 get_blorp_surf_for_anv_image(cmd_buffer->device,
311 dst_image, 1UL << aspect_bit,
312 dstImageLayout, ISL_AUX_USAGE_NONE,
313 &dst_surf);
314 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
315 1UL << aspect_bit,
316 dst_surf.aux_usage, dst_level,
317 dst_base_layer, layer_count);
318
319 for (unsigned i = 0; i < layer_count; i++) {
320 blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
321 &dst_surf, dst_level, dst_base_layer + i,
322 srcOffset.x, srcOffset.y,
323 dstOffset.x, dstOffset.y,
324 extent.width, extent.height);
325 }
326 }
327 } else {
328 struct blorp_surf src_surf, dst_surf;
329 get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
330 srcImageLayout, ISL_AUX_USAGE_NONE,
331 &src_surf);
332 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
333 dstImageLayout, ISL_AUX_USAGE_NONE,
334 &dst_surf);
335 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image, dst_mask,
336 dst_surf.aux_usage, dst_level,
337 dst_base_layer, layer_count);
338
339 for (unsigned i = 0; i < layer_count; i++) {
340 blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
341 &dst_surf, dst_level, dst_base_layer + i,
342 srcOffset.x, srcOffset.y,
343 dstOffset.x, dstOffset.y,
344 extent.width, extent.height);
345 }
346 }
347 }
348
349 blorp_batch_finish(&batch);
350 }
351
352 static void
353 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
354 struct anv_buffer *anv_buffer,
355 struct anv_image *anv_image,
356 VkImageLayout image_layout,
357 uint32_t regionCount,
358 const VkBufferImageCopy* pRegions,
359 bool buffer_to_image)
360 {
361 struct blorp_batch batch;
362 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
363
364 struct {
365 struct blorp_surf surf;
366 uint32_t level;
367 VkOffset3D offset;
368 } image, buffer, *src, *dst;
369
370 buffer.level = 0;
371 buffer.offset = (VkOffset3D) { 0, 0, 0 };
372
373 if (buffer_to_image) {
374 src = &buffer;
375 dst = &image;
376 } else {
377 src = &image;
378 dst = &buffer;
379 }
380
381 for (unsigned r = 0; r < regionCount; r++) {
382 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
383
384 get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
385 image_layout, ISL_AUX_USAGE_NONE,
386 &image.surf);
387 image.offset =
388 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
389 image.level = pRegions[r].imageSubresource.mipLevel;
390
391 VkExtent3D extent =
392 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
393 if (anv_image->type != VK_IMAGE_TYPE_3D) {
394 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
395 extent.depth =
396 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
397 }
398
399 const enum isl_format buffer_format =
400 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
401 aspect, VK_IMAGE_TILING_LINEAR);
402
403 const VkExtent3D bufferImageExtent = {
404 .width = pRegions[r].bufferRowLength ?
405 pRegions[r].bufferRowLength : extent.width,
406 .height = pRegions[r].bufferImageHeight ?
407 pRegions[r].bufferImageHeight : extent.height,
408 };
409
410 const struct isl_format_layout *buffer_fmtl =
411 isl_format_get_layout(buffer_format);
412
413 const uint32_t buffer_row_pitch =
414 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
415 (buffer_fmtl->bpb / 8);
416
417 const uint32_t buffer_layer_stride =
418 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
419 buffer_row_pitch;
420
421 struct isl_surf buffer_isl_surf;
422 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
423 anv_buffer, pRegions[r].bufferOffset,
424 extent.width, extent.height,
425 buffer_row_pitch, buffer_format,
426 &buffer.surf, &buffer_isl_surf);
427
428 if (&image == dst) {
429 anv_cmd_buffer_mark_image_written(cmd_buffer, anv_image,
430 aspect, dst->surf.aux_usage,
431 dst->level,
432 dst->offset.z, extent.depth);
433 }
434
435 for (unsigned z = 0; z < extent.depth; z++) {
436 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
437 &dst->surf, dst->level, dst->offset.z,
438 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
439 extent.width, extent.height);
440
441 image.offset.z++;
442 buffer.surf.addr.offset += buffer_layer_stride;
443 }
444 }
445
446 blorp_batch_finish(&batch);
447 }
448
449 void anv_CmdCopyBufferToImage(
450 VkCommandBuffer commandBuffer,
451 VkBuffer srcBuffer,
452 VkImage dstImage,
453 VkImageLayout dstImageLayout,
454 uint32_t regionCount,
455 const VkBufferImageCopy* pRegions)
456 {
457 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
458 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
459 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
460
461 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image, dstImageLayout,
462 regionCount, pRegions, true);
463 }
464
465 void anv_CmdCopyImageToBuffer(
466 VkCommandBuffer commandBuffer,
467 VkImage srcImage,
468 VkImageLayout srcImageLayout,
469 VkBuffer dstBuffer,
470 uint32_t regionCount,
471 const VkBufferImageCopy* pRegions)
472 {
473 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
474 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
475 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
476
477 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image, srcImageLayout,
478 regionCount, pRegions, false);
479
480 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
481 }
482
483 static bool
484 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
485 {
486 bool flip = false;
487 if (*src0 > *src1) {
488 unsigned tmp = *src0;
489 *src0 = *src1;
490 *src1 = tmp;
491 flip = !flip;
492 }
493
494 if (*dst0 > *dst1) {
495 unsigned tmp = *dst0;
496 *dst0 = *dst1;
497 *dst1 = tmp;
498 flip = !flip;
499 }
500
501 return flip;
502 }
503
504 void anv_CmdBlitImage(
505 VkCommandBuffer commandBuffer,
506 VkImage srcImage,
507 VkImageLayout srcImageLayout,
508 VkImage dstImage,
509 VkImageLayout dstImageLayout,
510 uint32_t regionCount,
511 const VkImageBlit* pRegions,
512 VkFilter filter)
513
514 {
515 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
516 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
517 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
518
519 struct blorp_surf src, dst;
520
521 enum blorp_filter blorp_filter;
522 switch (filter) {
523 case VK_FILTER_NEAREST:
524 blorp_filter = BLORP_FILTER_NEAREST;
525 break;
526 case VK_FILTER_LINEAR:
527 blorp_filter = BLORP_FILTER_BILINEAR;
528 break;
529 default:
530 unreachable("Invalid filter");
531 }
532
533 struct blorp_batch batch;
534 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
535
536 for (unsigned r = 0; r < regionCount; r++) {
537 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
538 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
539
540 assert(anv_image_aspects_compatible(src_res->aspectMask,
541 dst_res->aspectMask));
542
543 uint32_t aspect_bit;
544 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_res->aspectMask) {
545 get_blorp_surf_for_anv_image(cmd_buffer->device,
546 src_image, 1U << aspect_bit,
547 srcImageLayout, ISL_AUX_USAGE_NONE, &src);
548 get_blorp_surf_for_anv_image(cmd_buffer->device,
549 dst_image, 1U << aspect_bit,
550 dstImageLayout, ISL_AUX_USAGE_NONE, &dst);
551
552 struct anv_format_plane src_format =
553 anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
554 1U << aspect_bit, src_image->tiling);
555 struct anv_format_plane dst_format =
556 anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
557 1U << aspect_bit, dst_image->tiling);
558
559 unsigned dst_start, dst_end;
560 if (dst_image->type == VK_IMAGE_TYPE_3D) {
561 assert(dst_res->baseArrayLayer == 0);
562 dst_start = pRegions[r].dstOffsets[0].z;
563 dst_end = pRegions[r].dstOffsets[1].z;
564 } else {
565 dst_start = dst_res->baseArrayLayer;
566 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
567 }
568
569 unsigned src_start, src_end;
570 if (src_image->type == VK_IMAGE_TYPE_3D) {
571 assert(src_res->baseArrayLayer == 0);
572 src_start = pRegions[r].srcOffsets[0].z;
573 src_end = pRegions[r].srcOffsets[1].z;
574 } else {
575 src_start = src_res->baseArrayLayer;
576 src_end = src_start + anv_get_layerCount(src_image, src_res);
577 }
578
579 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
580 float src_z_step = (float)(src_end + 1 - src_start) /
581 (float)(dst_end + 1 - dst_start);
582
583 if (flip_z) {
584 src_start = src_end;
585 src_z_step *= -1;
586 }
587
588 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
589 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
590 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
591 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
592 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
593
594 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
595 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
596 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
597 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
598 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
599
600 const unsigned num_layers = dst_end - dst_start;
601 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
602 1U << aspect_bit,
603 dst.aux_usage,
604 dst_res->mipLevel,
605 dst_start, num_layers);
606
607 for (unsigned i = 0; i < num_layers; i++) {
608 unsigned dst_z = dst_start + i;
609 unsigned src_z = src_start + i * src_z_step;
610
611 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
612 src_format.isl_format, src_format.swizzle,
613 &dst, dst_res->mipLevel, dst_z,
614 dst_format.isl_format, dst_format.swizzle,
615 src_x0, src_y0, src_x1, src_y1,
616 dst_x0, dst_y0, dst_x1, dst_y1,
617 blorp_filter, flip_x, flip_y);
618 }
619 }
620 }
621
622 blorp_batch_finish(&batch);
623 }
624
625 static enum isl_format
626 isl_format_for_size(unsigned size_B)
627 {
628 switch (size_B) {
629 case 4: return ISL_FORMAT_R32_UINT;
630 case 8: return ISL_FORMAT_R32G32_UINT;
631 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
632 default:
633 unreachable("Not a power-of-two format size");
634 }
635 }
636
637 /**
638 * Returns the greatest common divisor of a and b that is a power of two.
639 */
640 static uint64_t
641 gcd_pow2_u64(uint64_t a, uint64_t b)
642 {
643 assert(a > 0 || b > 0);
644
645 unsigned a_log2 = ffsll(a) - 1;
646 unsigned b_log2 = ffsll(b) - 1;
647
648 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
649 * case, the MIN2() will take the other one. If both are 0 then we will
650 * hit the assert above.
651 */
652 return 1 << MIN2(a_log2, b_log2);
653 }
654
655 /* This is maximum possible width/height our HW can handle */
656 #define MAX_SURFACE_DIM (1ull << 14)
657
658 void anv_CmdCopyBuffer(
659 VkCommandBuffer commandBuffer,
660 VkBuffer srcBuffer,
661 VkBuffer dstBuffer,
662 uint32_t regionCount,
663 const VkBufferCopy* pRegions)
664 {
665 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
666 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
667 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
668
669 struct blorp_batch batch;
670 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
671
672 for (unsigned r = 0; r < regionCount; r++) {
673 struct blorp_address src = {
674 .buffer = src_buffer->address.bo,
675 .offset = src_buffer->address.offset + pRegions[r].srcOffset,
676 .mocs = anv_mocs_for_bo(cmd_buffer->device, src_buffer->address.bo),
677 };
678 struct blorp_address dst = {
679 .buffer = dst_buffer->address.bo,
680 .offset = dst_buffer->address.offset + pRegions[r].dstOffset,
681 .mocs = anv_mocs_for_bo(cmd_buffer->device, dst_buffer->address.bo),
682 };
683
684 blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
685 }
686
687 blorp_batch_finish(&batch);
688
689 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
690 }
691
692 void anv_CmdUpdateBuffer(
693 VkCommandBuffer commandBuffer,
694 VkBuffer dstBuffer,
695 VkDeviceSize dstOffset,
696 VkDeviceSize dataSize,
697 const void* pData)
698 {
699 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
700 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
701
702 struct blorp_batch batch;
703 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
704
705 /* We can't quite grab a full block because the state stream needs a
706 * little data at the top to build its linked list.
707 */
708 const uint32_t max_update_size =
709 cmd_buffer->device->dynamic_state_pool.block_size - 64;
710
711 assert(max_update_size < MAX_SURFACE_DIM * 4);
712
713 /* We're about to read data that was written from the CPU. Flush the
714 * texture cache so we don't get anything stale.
715 */
716 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
717
718 while (dataSize) {
719 const uint32_t copy_size = MIN2(dataSize, max_update_size);
720
721 struct anv_state tmp_data =
722 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
723
724 memcpy(tmp_data.map, pData, copy_size);
725
726 struct blorp_address src = {
727 .buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
728 .offset = tmp_data.offset,
729 .mocs = cmd_buffer->device->default_mocs,
730 };
731 struct blorp_address dst = {
732 .buffer = dst_buffer->address.bo,
733 .offset = dst_buffer->address.offset + dstOffset,
734 .mocs = anv_mocs_for_bo(cmd_buffer->device, dst_buffer->address.bo),
735 };
736
737 blorp_buffer_copy(&batch, src, dst, copy_size);
738
739 dataSize -= copy_size;
740 dstOffset += copy_size;
741 pData = (void *)pData + copy_size;
742 }
743
744 blorp_batch_finish(&batch);
745
746 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
747 }
748
749 void anv_CmdFillBuffer(
750 VkCommandBuffer commandBuffer,
751 VkBuffer dstBuffer,
752 VkDeviceSize dstOffset,
753 VkDeviceSize fillSize,
754 uint32_t data)
755 {
756 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
757 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
758 struct blorp_surf surf;
759 struct isl_surf isl_surf;
760
761 struct blorp_batch batch;
762 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
763
764 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
765
766 /* From the Vulkan spec:
767 *
768 * "size is the number of bytes to fill, and must be either a multiple
769 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
770 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
771 * buffer is not a multiple of 4, then the nearest smaller multiple is
772 * used."
773 */
774 fillSize &= ~3ull;
775
776 /* First, we compute the biggest format that can be used with the
777 * given offsets and size.
778 */
779 int bs = 16;
780 bs = gcd_pow2_u64(bs, dstOffset);
781 bs = gcd_pow2_u64(bs, fillSize);
782 enum isl_format isl_format = isl_format_for_size(bs);
783
784 union isl_color_value color = {
785 .u32 = { data, data, data, data },
786 };
787
788 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
789 while (fillSize >= max_fill_size) {
790 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
791 dst_buffer, dstOffset,
792 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
793 MAX_SURFACE_DIM * bs, isl_format,
794 &surf, &isl_surf);
795
796 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
797 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
798 color, NULL);
799 fillSize -= max_fill_size;
800 dstOffset += max_fill_size;
801 }
802
803 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
804 assert(height < MAX_SURFACE_DIM);
805 if (height != 0) {
806 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
807 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
808 dst_buffer, dstOffset,
809 MAX_SURFACE_DIM, height,
810 MAX_SURFACE_DIM * bs, isl_format,
811 &surf, &isl_surf);
812
813 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
814 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
815 color, NULL);
816 fillSize -= rect_fill_size;
817 dstOffset += rect_fill_size;
818 }
819
820 if (fillSize != 0) {
821 const uint32_t width = fillSize / bs;
822 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
823 dst_buffer, dstOffset,
824 width, 1,
825 width * bs, isl_format,
826 &surf, &isl_surf);
827
828 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
829 0, 0, 1, 0, 0, width, 1,
830 color, NULL);
831 }
832
833 blorp_batch_finish(&batch);
834
835 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_BUFFER_WRITES;
836 }
837
838 void anv_CmdClearColorImage(
839 VkCommandBuffer commandBuffer,
840 VkImage _image,
841 VkImageLayout imageLayout,
842 const VkClearColorValue* pColor,
843 uint32_t rangeCount,
844 const VkImageSubresourceRange* pRanges)
845 {
846 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
847 ANV_FROM_HANDLE(anv_image, image, _image);
848
849 static const bool color_write_disable[4] = { false, false, false, false };
850
851 struct blorp_batch batch;
852 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
853
854
855 for (unsigned r = 0; r < rangeCount; r++) {
856 if (pRanges[r].aspectMask == 0)
857 continue;
858
859 assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
860
861 struct blorp_surf surf;
862 get_blorp_surf_for_anv_image(cmd_buffer->device,
863 image, pRanges[r].aspectMask,
864 imageLayout, ISL_AUX_USAGE_NONE, &surf);
865
866 struct anv_format_plane src_format =
867 anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
868 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
869
870 unsigned base_layer = pRanges[r].baseArrayLayer;
871 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
872
873 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
874 const unsigned level = pRanges[r].baseMipLevel + i;
875 const unsigned level_width = anv_minify(image->extent.width, level);
876 const unsigned level_height = anv_minify(image->extent.height, level);
877
878 if (image->type == VK_IMAGE_TYPE_3D) {
879 base_layer = 0;
880 layer_count = anv_minify(image->extent.depth, level);
881 }
882
883 anv_cmd_buffer_mark_image_written(cmd_buffer, image,
884 pRanges[r].aspectMask,
885 surf.aux_usage, level,
886 base_layer, layer_count);
887
888 blorp_clear(&batch, &surf,
889 src_format.isl_format, src_format.swizzle,
890 level, base_layer, layer_count,
891 0, 0, level_width, level_height,
892 vk_to_isl_color(*pColor), color_write_disable);
893 }
894 }
895
896 blorp_batch_finish(&batch);
897 }
898
899 void anv_CmdClearDepthStencilImage(
900 VkCommandBuffer commandBuffer,
901 VkImage image_h,
902 VkImageLayout imageLayout,
903 const VkClearDepthStencilValue* pDepthStencil,
904 uint32_t rangeCount,
905 const VkImageSubresourceRange* pRanges)
906 {
907 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
908 ANV_FROM_HANDLE(anv_image, image, image_h);
909
910 struct blorp_batch batch;
911 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
912
913 struct blorp_surf depth, stencil;
914 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
915 get_blorp_surf_for_anv_image(cmd_buffer->device,
916 image, VK_IMAGE_ASPECT_DEPTH_BIT,
917 imageLayout, ISL_AUX_USAGE_NONE, &depth);
918 } else {
919 memset(&depth, 0, sizeof(depth));
920 }
921
922 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
923 get_blorp_surf_for_anv_image(cmd_buffer->device,
924 image, VK_IMAGE_ASPECT_STENCIL_BIT,
925 imageLayout, ISL_AUX_USAGE_NONE, &stencil);
926 } else {
927 memset(&stencil, 0, sizeof(stencil));
928 }
929
930 for (unsigned r = 0; r < rangeCount; r++) {
931 if (pRanges[r].aspectMask == 0)
932 continue;
933
934 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
935 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
936
937 unsigned base_layer = pRanges[r].baseArrayLayer;
938 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
939
940 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
941 const unsigned level = pRanges[r].baseMipLevel + i;
942 const unsigned level_width = anv_minify(image->extent.width, level);
943 const unsigned level_height = anv_minify(image->extent.height, level);
944
945 if (image->type == VK_IMAGE_TYPE_3D)
946 layer_count = anv_minify(image->extent.depth, level);
947
948 blorp_clear_depth_stencil(&batch, &depth, &stencil,
949 level, base_layer, layer_count,
950 0, 0, level_width, level_height,
951 clear_depth, pDepthStencil->depth,
952 clear_stencil ? 0xff : 0,
953 pDepthStencil->stencil);
954 }
955 }
956
957 blorp_batch_finish(&batch);
958 }
959
960 VkResult
961 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
962 uint32_t num_entries,
963 uint32_t *state_offset,
964 struct anv_state *bt_state)
965 {
966 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
967 state_offset);
968 if (bt_state->map == NULL) {
969 /* We ran out of space. Grab a new binding table block. */
970 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
971 if (result != VK_SUCCESS)
972 return result;
973
974 /* Re-emit state base addresses so we get the new surface state base
975 * address before we start emitting binding tables etc.
976 */
977 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
978
979 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
980 state_offset);
981 assert(bt_state->map != NULL);
982 }
983
984 return VK_SUCCESS;
985 }
986
987 static VkResult
988 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
989 struct anv_state surface_state,
990 uint32_t *bt_offset)
991 {
992 uint32_t state_offset;
993 struct anv_state bt_state;
994
995 VkResult result =
996 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
997 &bt_state);
998 if (result != VK_SUCCESS)
999 return result;
1000
1001 uint32_t *bt_map = bt_state.map;
1002 bt_map[0] = surface_state.offset + state_offset;
1003
1004 *bt_offset = bt_state.offset;
1005 return VK_SUCCESS;
1006 }
1007
1008 static void
1009 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
1010 struct blorp_batch *batch,
1011 const VkClearAttachment *attachment,
1012 uint32_t rectCount, const VkClearRect *pRects)
1013 {
1014 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1015 const uint32_t color_att = attachment->colorAttachment;
1016 assert(color_att < subpass->color_count);
1017 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
1018
1019 if (att_idx == VK_ATTACHMENT_UNUSED)
1020 return;
1021
1022 struct anv_render_pass_attachment *pass_att =
1023 &cmd_buffer->state.pass->attachments[att_idx];
1024 struct anv_attachment_state *att_state =
1025 &cmd_buffer->state.attachments[att_idx];
1026
1027 uint32_t binding_table;
1028 VkResult result =
1029 binding_table_for_surface_state(cmd_buffer, att_state->color.state,
1030 &binding_table);
1031 if (result != VK_SUCCESS)
1032 return;
1033
1034 union isl_color_value clear_color =
1035 vk_to_isl_color(attachment->clearValue.color);
1036
1037 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1038 if (subpass->view_mask) {
1039 uint32_t view_idx;
1040 for_each_bit(view_idx, subpass->view_mask) {
1041 for (uint32_t r = 0; r < rectCount; ++r) {
1042 const VkOffset2D offset = pRects[r].rect.offset;
1043 const VkExtent2D extent = pRects[r].rect.extent;
1044 blorp_clear_attachments(batch, binding_table,
1045 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1046 view_idx, 1,
1047 offset.x, offset.y,
1048 offset.x + extent.width,
1049 offset.y + extent.height,
1050 true, clear_color, false, 0.0f, 0, 0);
1051 }
1052 }
1053 return;
1054 }
1055
1056 for (uint32_t r = 0; r < rectCount; ++r) {
1057 const VkOffset2D offset = pRects[r].rect.offset;
1058 const VkExtent2D extent = pRects[r].rect.extent;
1059 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
1060 blorp_clear_attachments(batch, binding_table,
1061 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
1062 pRects[r].baseArrayLayer,
1063 pRects[r].layerCount,
1064 offset.x, offset.y,
1065 offset.x + extent.width, offset.y + extent.height,
1066 true, clear_color, false, 0.0f, 0, 0);
1067 }
1068 }
1069
1070 static void
1071 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1072 struct blorp_batch *batch,
1073 const VkClearAttachment *attachment,
1074 uint32_t rectCount, const VkClearRect *pRects)
1075 {
1076 static const union isl_color_value color_value = { .u32 = { 0, } };
1077 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1078 const uint32_t att_idx = subpass->depth_stencil_attachment->attachment;
1079
1080 if (att_idx == VK_ATTACHMENT_UNUSED)
1081 return;
1082
1083 struct anv_render_pass_attachment *pass_att =
1084 &cmd_buffer->state.pass->attachments[att_idx];
1085
1086 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1087 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1088
1089 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1090 if (clear_depth) {
1091 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1092 pass_att->format,
1093 VK_IMAGE_ASPECT_DEPTH_BIT,
1094 VK_IMAGE_TILING_OPTIMAL);
1095 }
1096
1097 uint32_t binding_table;
1098 VkResult result =
1099 binding_table_for_surface_state(cmd_buffer,
1100 cmd_buffer->state.null_surface_state,
1101 &binding_table);
1102 if (result != VK_SUCCESS)
1103 return;
1104
1105 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1106 if (subpass->view_mask) {
1107 uint32_t view_idx;
1108 for_each_bit(view_idx, subpass->view_mask) {
1109 for (uint32_t r = 0; r < rectCount; ++r) {
1110 const VkOffset2D offset = pRects[r].rect.offset;
1111 const VkExtent2D extent = pRects[r].rect.extent;
1112 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1113 blorp_clear_attachments(batch, binding_table,
1114 depth_format, pass_att->samples,
1115 view_idx, 1,
1116 offset.x, offset.y,
1117 offset.x + extent.width,
1118 offset.y + extent.height,
1119 false, color_value,
1120 clear_depth, value.depth,
1121 clear_stencil ? 0xff : 0, value.stencil);
1122 }
1123 }
1124 return;
1125 }
1126
1127 for (uint32_t r = 0; r < rectCount; ++r) {
1128 const VkOffset2D offset = pRects[r].rect.offset;
1129 const VkExtent2D extent = pRects[r].rect.extent;
1130 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1131 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
1132 blorp_clear_attachments(batch, binding_table,
1133 depth_format, pass_att->samples,
1134 pRects[r].baseArrayLayer,
1135 pRects[r].layerCount,
1136 offset.x, offset.y,
1137 offset.x + extent.width, offset.y + extent.height,
1138 false, color_value,
1139 clear_depth, value.depth,
1140 clear_stencil ? 0xff : 0, value.stencil);
1141 }
1142 }
1143
1144 void anv_CmdClearAttachments(
1145 VkCommandBuffer commandBuffer,
1146 uint32_t attachmentCount,
1147 const VkClearAttachment* pAttachments,
1148 uint32_t rectCount,
1149 const VkClearRect* pRects)
1150 {
1151 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1152
1153 /* Because this gets called within a render pass, we tell blorp not to
1154 * trash our depth and stencil buffers.
1155 */
1156 struct blorp_batch batch;
1157 enum blorp_batch_flags flags = BLORP_BATCH_NO_EMIT_DEPTH_STENCIL;
1158 if (cmd_buffer->state.conditional_render_enabled) {
1159 anv_cmd_emit_conditional_render_predicate(cmd_buffer);
1160 flags |= BLORP_BATCH_PREDICATE_ENABLE;
1161 }
1162 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, flags);
1163
1164 for (uint32_t a = 0; a < attachmentCount; ++a) {
1165 if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1166 assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1167 clear_color_attachment(cmd_buffer, &batch,
1168 &pAttachments[a],
1169 rectCount, pRects);
1170 } else {
1171 clear_depth_stencil_attachment(cmd_buffer, &batch,
1172 &pAttachments[a],
1173 rectCount, pRects);
1174 }
1175 }
1176
1177 blorp_batch_finish(&batch);
1178 }
1179
1180 enum subpass_stage {
1181 SUBPASS_STAGE_LOAD,
1182 SUBPASS_STAGE_DRAW,
1183 SUBPASS_STAGE_RESOLVE,
1184 };
1185
1186 void
1187 anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
1188 const struct anv_image *src_image,
1189 enum isl_aux_usage src_aux_usage,
1190 uint32_t src_level, uint32_t src_base_layer,
1191 const struct anv_image *dst_image,
1192 enum isl_aux_usage dst_aux_usage,
1193 uint32_t dst_level, uint32_t dst_base_layer,
1194 VkImageAspectFlagBits aspect,
1195 uint32_t src_x, uint32_t src_y,
1196 uint32_t dst_x, uint32_t dst_y,
1197 uint32_t width, uint32_t height,
1198 uint32_t layer_count,
1199 enum blorp_filter filter)
1200 {
1201 struct blorp_batch batch;
1202 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1203
1204 assert(src_image->type == VK_IMAGE_TYPE_2D);
1205 assert(src_image->samples > 1);
1206 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1207 assert(dst_image->samples == 1);
1208 assert(src_image->n_planes == dst_image->n_planes);
1209 assert(!src_image->format->can_ycbcr);
1210 assert(!dst_image->format->can_ycbcr);
1211
1212 struct blorp_surf src_surf, dst_surf;
1213 get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, aspect,
1214 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1215 src_aux_usage, &src_surf);
1216 if (src_aux_usage == ISL_AUX_USAGE_MCS) {
1217 src_surf.clear_color_addr = anv_to_blorp_address(
1218 anv_image_get_clear_color_addr(cmd_buffer->device, src_image,
1219 VK_IMAGE_ASPECT_COLOR_BIT));
1220 }
1221 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, aspect,
1222 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1223 dst_aux_usage, &dst_surf);
1224 anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
1225 aspect, dst_aux_usage,
1226 dst_level, dst_base_layer, layer_count);
1227
1228 if (filter == BLORP_FILTER_NONE) {
1229 /* If no explicit filter is provided, then it's implied by the type of
1230 * the source image.
1231 */
1232 if ((src_surf.surf->usage & ISL_SURF_USAGE_DEPTH_BIT) ||
1233 (src_surf.surf->usage & ISL_SURF_USAGE_STENCIL_BIT) ||
1234 isl_format_has_int_channel(src_surf.surf->format)) {
1235 filter = BLORP_FILTER_SAMPLE_0;
1236 } else {
1237 filter = BLORP_FILTER_AVERAGE;
1238 }
1239 }
1240
1241 for (uint32_t l = 0; l < layer_count; l++) {
1242 blorp_blit(&batch,
1243 &src_surf, src_level, src_base_layer + l,
1244 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1245 &dst_surf, dst_level, dst_base_layer + l,
1246 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1247 src_x, src_y, src_x + width, src_y + height,
1248 dst_x, dst_y, dst_x + width, dst_y + height,
1249 filter, false, false);
1250 }
1251
1252 blorp_batch_finish(&batch);
1253 }
1254
1255 void anv_CmdResolveImage(
1256 VkCommandBuffer commandBuffer,
1257 VkImage srcImage,
1258 VkImageLayout srcImageLayout,
1259 VkImage dstImage,
1260 VkImageLayout dstImageLayout,
1261 uint32_t regionCount,
1262 const VkImageResolve* pRegions)
1263 {
1264 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1265 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1266 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1267
1268 assert(!src_image->format->can_ycbcr);
1269
1270 for (uint32_t r = 0; r < regionCount; r++) {
1271 assert(pRegions[r].srcSubresource.aspectMask ==
1272 pRegions[r].dstSubresource.aspectMask);
1273 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1274 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1275
1276 const uint32_t layer_count =
1277 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1278
1279 uint32_t aspect_bit;
1280 anv_foreach_image_aspect_bit(aspect_bit, src_image,
1281 pRegions[r].srcSubresource.aspectMask) {
1282 enum isl_aux_usage src_aux_usage =
1283 anv_layout_to_aux_usage(&cmd_buffer->device->info, src_image,
1284 (1 << aspect_bit), srcImageLayout);
1285 enum isl_aux_usage dst_aux_usage =
1286 anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_image,
1287 (1 << aspect_bit), dstImageLayout);
1288
1289 anv_image_msaa_resolve(cmd_buffer,
1290 src_image, src_aux_usage,
1291 pRegions[r].srcSubresource.mipLevel,
1292 pRegions[r].srcSubresource.baseArrayLayer,
1293 dst_image, dst_aux_usage,
1294 pRegions[r].dstSubresource.mipLevel,
1295 pRegions[r].dstSubresource.baseArrayLayer,
1296 (1 << aspect_bit),
1297 pRegions[r].srcOffset.x,
1298 pRegions[r].srcOffset.y,
1299 pRegions[r].dstOffset.x,
1300 pRegions[r].dstOffset.y,
1301 pRegions[r].extent.width,
1302 pRegions[r].extent.height,
1303 layer_count, BLORP_FILTER_NONE);
1304 }
1305 }
1306 }
1307
1308 static enum isl_aux_usage
1309 fast_clear_aux_usage(const struct anv_image *image,
1310 VkImageAspectFlagBits aspect)
1311 {
1312 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1313 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
1314 return ISL_AUX_USAGE_CCS_D;
1315 else
1316 return image->planes[plane].aux_usage;
1317 }
1318
1319 void
1320 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
1321 const struct anv_image *image,
1322 uint32_t base_level, uint32_t level_count,
1323 uint32_t base_layer, uint32_t layer_count)
1324 {
1325 struct blorp_batch batch;
1326 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1327
1328 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
1329
1330 struct blorp_surf surf;
1331 get_blorp_surf_for_anv_image(cmd_buffer->device,
1332 image, VK_IMAGE_ASPECT_COLOR_BIT,
1333 VK_IMAGE_LAYOUT_GENERAL,
1334 ISL_AUX_USAGE_NONE, &surf);
1335 assert(surf.aux_usage == ISL_AUX_USAGE_NONE);
1336
1337 struct blorp_surf shadow_surf = {
1338 .surf = &image->planes[0].shadow_surface.isl,
1339 .addr = {
1340 .buffer = image->planes[0].address.bo,
1341 .offset = image->planes[0].address.offset +
1342 image->planes[0].shadow_surface.offset,
1343 .mocs = anv_mocs_for_bo(cmd_buffer->device,
1344 image->planes[0].address.bo),
1345 },
1346 };
1347
1348 for (uint32_t l = 0; l < level_count; l++) {
1349 const uint32_t level = base_level + l;
1350
1351 const VkExtent3D extent = {
1352 .width = anv_minify(image->extent.width, level),
1353 .height = anv_minify(image->extent.height, level),
1354 .depth = anv_minify(image->extent.depth, level),
1355 };
1356
1357 if (image->type == VK_IMAGE_TYPE_3D)
1358 layer_count = extent.depth;
1359
1360 for (uint32_t a = 0; a < layer_count; a++) {
1361 const uint32_t layer = base_layer + a;
1362
1363 blorp_copy(&batch, &surf, level, layer,
1364 &shadow_surf, level, layer,
1365 0, 0, 0, 0, extent.width, extent.height);
1366 }
1367 }
1368
1369 blorp_batch_finish(&batch);
1370 }
1371
1372 void
1373 anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
1374 const struct anv_image *image,
1375 VkImageAspectFlagBits aspect,
1376 enum isl_aux_usage aux_usage,
1377 enum isl_format format, struct isl_swizzle swizzle,
1378 uint32_t level, uint32_t base_layer, uint32_t layer_count,
1379 VkRect2D area, union isl_color_value clear_color)
1380 {
1381 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1382
1383 /* We don't support planar images with multisampling yet */
1384 assert(image->n_planes == 1);
1385
1386 struct blorp_batch batch;
1387 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1388
1389 struct blorp_surf surf;
1390 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1391 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1392 aux_usage, &surf);
1393 anv_cmd_buffer_mark_image_written(cmd_buffer, image, aspect, aux_usage,
1394 level, base_layer, layer_count);
1395
1396 blorp_clear(&batch, &surf, format, anv_swizzle_for_render(swizzle),
1397 level, base_layer, layer_count,
1398 area.offset.x, area.offset.y,
1399 area.offset.x + area.extent.width,
1400 area.offset.y + area.extent.height,
1401 clear_color, NULL);
1402
1403 blorp_batch_finish(&batch);
1404 }
1405
1406 void
1407 anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
1408 const struct anv_image *image,
1409 VkImageAspectFlags aspects,
1410 enum isl_aux_usage depth_aux_usage,
1411 uint32_t level,
1412 uint32_t base_layer, uint32_t layer_count,
1413 VkRect2D area,
1414 float depth_value, uint8_t stencil_value)
1415 {
1416 assert(image->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1417 VK_IMAGE_ASPECT_STENCIL_BIT));
1418
1419 struct blorp_batch batch;
1420 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1421
1422 struct blorp_surf depth = {};
1423 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1424 get_blorp_surf_for_anv_image(cmd_buffer->device,
1425 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1426 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1427 depth_aux_usage, &depth);
1428 depth.clear_color.f32[0] = ANV_HZ_FC_VAL;
1429 }
1430
1431 struct blorp_surf stencil = {};
1432 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1433 get_blorp_surf_for_anv_image(cmd_buffer->device,
1434 image, VK_IMAGE_ASPECT_STENCIL_BIT,
1435 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1436 ISL_AUX_USAGE_NONE, &stencil);
1437 }
1438
1439 blorp_clear_depth_stencil(&batch, &depth, &stencil,
1440 level, base_layer, layer_count,
1441 area.offset.x, area.offset.y,
1442 area.offset.x + area.extent.width,
1443 area.offset.y + area.extent.height,
1444 aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1445 depth_value,
1446 (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) ? 0xff : 0,
1447 stencil_value);
1448
1449 blorp_batch_finish(&batch);
1450 }
1451
1452 void
1453 anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
1454 const struct anv_image *image,
1455 VkImageAspectFlagBits aspect, uint32_t level,
1456 uint32_t base_layer, uint32_t layer_count,
1457 enum isl_aux_op hiz_op)
1458 {
1459 assert(aspect == VK_IMAGE_ASPECT_DEPTH_BIT);
1460 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
1461 assert(anv_image_aspect_to_plane(image->aspects,
1462 VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
1463
1464 struct blorp_batch batch;
1465 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1466
1467 struct blorp_surf surf;
1468 get_blorp_surf_for_anv_image(cmd_buffer->device,
1469 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1470 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1471 ISL_AUX_USAGE_HIZ, &surf);
1472 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1473
1474 blorp_hiz_op(&batch, &surf, level, base_layer, layer_count, hiz_op);
1475
1476 blorp_batch_finish(&batch);
1477 }
1478
1479 void
1480 anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
1481 const struct anv_image *image,
1482 VkImageAspectFlags aspects,
1483 uint32_t level,
1484 uint32_t base_layer, uint32_t layer_count,
1485 VkRect2D area, uint8_t stencil_value)
1486 {
1487 assert(image->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1488 VK_IMAGE_ASPECT_STENCIL_BIT));
1489
1490 struct blorp_batch batch;
1491 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1492
1493 struct blorp_surf depth = {};
1494 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1495 assert(base_layer + layer_count <=
1496 anv_image_aux_layers(image, VK_IMAGE_ASPECT_DEPTH_BIT, level));
1497 get_blorp_surf_for_anv_image(cmd_buffer->device,
1498 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1499 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1500 ISL_AUX_USAGE_HIZ, &depth);
1501 depth.clear_color.f32[0] = ANV_HZ_FC_VAL;
1502 }
1503
1504 struct blorp_surf stencil = {};
1505 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1506 get_blorp_surf_for_anv_image(cmd_buffer->device,
1507 image, VK_IMAGE_ASPECT_STENCIL_BIT,
1508 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1509 ISL_AUX_USAGE_NONE, &stencil);
1510 }
1511
1512 /* From the Sky Lake PRM Volume 7, "Depth Buffer Clear":
1513 *
1514 * "The following is required when performing a depth buffer clear with
1515 * using the WM_STATE or 3DSTATE_WM:
1516 *
1517 * * If other rendering operations have preceded this clear, a
1518 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
1519 * enabled must be issued before the rectangle primitive used for
1520 * the depth buffer clear operation.
1521 * * [...]"
1522 *
1523 * Even though the PRM only says that this is required if using 3DSTATE_WM
1524 * and a 3DPRIMITIVE, the GPU appears to also need this to avoid occasional
1525 * hangs when doing a clear with WM_HZ_OP.
1526 */
1527 cmd_buffer->state.pending_pipe_bits |=
1528 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1529
1530 blorp_hiz_clear_depth_stencil(&batch, &depth, &stencil,
1531 level, base_layer, layer_count,
1532 area.offset.x, area.offset.y,
1533 area.offset.x + area.extent.width,
1534 area.offset.y + area.extent.height,
1535 aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1536 ANV_HZ_FC_VAL,
1537 aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
1538 stencil_value);
1539
1540 blorp_batch_finish(&batch);
1541
1542 /* From the SKL PRM, Depth Buffer Clear:
1543 *
1544 * "Depth Buffer Clear Workaround
1545 *
1546 * Depth buffer clear pass using any of the methods (WM_STATE,
1547 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a PIPE_CONTROL
1548 * command with DEPTH_STALL bit and Depth FLUSH bits “set” before
1549 * starting to render. DepthStall and DepthFlush are not needed between
1550 * consecutive depth clear passes nor is it required if the depth-clear
1551 * pass was done with “full_surf_clear” bit set in the
1552 * 3DSTATE_WM_HZ_OP."
1553 *
1554 * Even though the PRM provides a bunch of conditions under which this is
1555 * supposedly unnecessary, we choose to perform the flush unconditionally
1556 * just to be safe.
1557 */
1558 cmd_buffer->state.pending_pipe_bits |=
1559 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1560 }
1561
1562 void
1563 anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
1564 const struct anv_image *image,
1565 enum isl_format format,
1566 VkImageAspectFlagBits aspect,
1567 uint32_t base_layer, uint32_t layer_count,
1568 enum isl_aux_op mcs_op, union isl_color_value *clear_value,
1569 bool predicate)
1570 {
1571 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1572 assert(image->samples > 1);
1573 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, 0));
1574
1575 /* Multisampling with multi-planar formats is not supported */
1576 assert(image->n_planes == 1);
1577
1578 struct blorp_batch batch;
1579 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1580 predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
1581
1582 struct blorp_surf surf;
1583 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1584 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1585 ISL_AUX_USAGE_MCS, &surf);
1586
1587 /* Blorp will store the clear color for us if we provide the clear color
1588 * address and we are doing a fast clear. So we save the clear value into
1589 * the blorp surface. However, in some situations we want to do a fast clear
1590 * without changing the clear value stored in the state buffer. For those
1591 * cases, we set the clear color address pointer to NULL, so blorp will not
1592 * try to store a garbage color.
1593 */
1594 if (mcs_op == ISL_AUX_OP_FAST_CLEAR) {
1595 if (clear_value)
1596 surf.clear_color = *clear_value;
1597 else
1598 surf.clear_color_addr.buffer = NULL;
1599 }
1600
1601 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1602 *
1603 * "After Render target fast clear, pipe-control with color cache
1604 * write-flush must be issued before sending any DRAW commands on
1605 * that render target."
1606 *
1607 * This comment is a bit cryptic and doesn't really tell you what's going
1608 * or what's really needed. It appears that fast clear ops are not
1609 * properly synchronized with other drawing. This means that we cannot
1610 * have a fast clear operation in the pipe at the same time as other
1611 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1612 * that the contents of the previous draw hit the render target before we
1613 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1614 * that it is completed before any additional drawing occurs.
1615 */
1616 cmd_buffer->state.pending_pipe_bits |=
1617 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1618
1619 switch (mcs_op) {
1620 case ISL_AUX_OP_FAST_CLEAR:
1621 blorp_fast_clear(&batch, &surf, format,
1622 0, base_layer, layer_count,
1623 0, 0, image->extent.width, image->extent.height);
1624 break;
1625 case ISL_AUX_OP_PARTIAL_RESOLVE:
1626 blorp_mcs_partial_resolve(&batch, &surf, format,
1627 base_layer, layer_count);
1628 break;
1629 case ISL_AUX_OP_FULL_RESOLVE:
1630 case ISL_AUX_OP_AMBIGUATE:
1631 default:
1632 unreachable("Unsupported MCS operation");
1633 }
1634
1635 cmd_buffer->state.pending_pipe_bits |=
1636 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1637
1638 blorp_batch_finish(&batch);
1639 }
1640
1641 void
1642 anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
1643 const struct anv_image *image,
1644 enum isl_format format,
1645 VkImageAspectFlagBits aspect, uint32_t level,
1646 uint32_t base_layer, uint32_t layer_count,
1647 enum isl_aux_op ccs_op, union isl_color_value *clear_value,
1648 bool predicate)
1649 {
1650 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1651 assert(image->samples == 1);
1652 assert(level < anv_image_aux_levels(image, aspect));
1653 /* Multi-LOD YcBcR is not allowed */
1654 assert(image->n_planes == 1 || level == 0);
1655 assert(base_layer + layer_count <=
1656 anv_image_aux_layers(image, aspect, level));
1657
1658 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1659 uint32_t width_div = image->format->planes[plane].denominator_scales[0];
1660 uint32_t height_div = image->format->planes[plane].denominator_scales[1];
1661 uint32_t level_width = anv_minify(image->extent.width, level) / width_div;
1662 uint32_t level_height = anv_minify(image->extent.height, level) / height_div;
1663
1664 struct blorp_batch batch;
1665 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1666 predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
1667
1668 struct blorp_surf surf;
1669 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1670 ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
1671 fast_clear_aux_usage(image, aspect),
1672 &surf);
1673
1674 /* Blorp will store the clear color for us if we provide the clear color
1675 * address and we are doing a fast clear. So we save the clear value into
1676 * the blorp surface. However, in some situations we want to do a fast clear
1677 * without changing the clear value stored in the state buffer. For those
1678 * cases, we set the clear color address pointer to NULL, so blorp will not
1679 * try to store a garbage color.
1680 */
1681 if (ccs_op == ISL_AUX_OP_FAST_CLEAR) {
1682 if (clear_value)
1683 surf.clear_color = *clear_value;
1684 else
1685 surf.clear_color_addr.buffer = NULL;
1686 }
1687
1688 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1689 *
1690 * "After Render target fast clear, pipe-control with color cache
1691 * write-flush must be issued before sending any DRAW commands on
1692 * that render target."
1693 *
1694 * This comment is a bit cryptic and doesn't really tell you what's going
1695 * or what's really needed. It appears that fast clear ops are not
1696 * properly synchronized with other drawing. This means that we cannot
1697 * have a fast clear operation in the pipe at the same time as other
1698 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1699 * that the contents of the previous draw hit the render target before we
1700 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1701 * that it is completed before any additional drawing occurs.
1702 */
1703 cmd_buffer->state.pending_pipe_bits |=
1704 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1705
1706 switch (ccs_op) {
1707 case ISL_AUX_OP_FAST_CLEAR:
1708 blorp_fast_clear(&batch, &surf, format,
1709 level, base_layer, layer_count,
1710 0, 0, level_width, level_height);
1711 break;
1712 case ISL_AUX_OP_FULL_RESOLVE:
1713 case ISL_AUX_OP_PARTIAL_RESOLVE:
1714 blorp_ccs_resolve(&batch, &surf, level, base_layer, layer_count,
1715 format, ccs_op);
1716 break;
1717 case ISL_AUX_OP_AMBIGUATE:
1718 for (uint32_t a = 0; a < layer_count; a++) {
1719 const uint32_t layer = base_layer + a;
1720 blorp_ccs_ambiguate(&batch, &surf, level, layer);
1721 }
1722 break;
1723 default:
1724 unreachable("Unsupported CCS operation");
1725 }
1726
1727 cmd_buffer->state.pending_pipe_bits |=
1728 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1729
1730 blorp_batch_finish(&batch);
1731 }