2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "anv_private.h"
27 lookup_blorp_shader(struct blorp_context
*blorp
,
28 const void *key
, uint32_t key_size
,
29 uint32_t *kernel_out
, void *prog_data_out
)
31 struct anv_device
*device
= blorp
->driver_ctx
;
33 /* The default cache must be a real cache */
34 assert(device
->default_pipeline_cache
.cache
);
36 struct anv_shader_bin
*bin
=
37 anv_pipeline_cache_search(&device
->default_pipeline_cache
, key
, key_size
);
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
44 anv_shader_bin_unref(device
, bin
);
46 *kernel_out
= bin
->kernel
.offset
;
47 *(const struct brw_stage_prog_data
**)prog_data_out
= bin
->prog_data
;
53 upload_blorp_shader(struct blorp_context
*blorp
,
54 const void *key
, uint32_t key_size
,
55 const void *kernel
, uint32_t kernel_size
,
56 const struct brw_stage_prog_data
*prog_data
,
57 uint32_t prog_data_size
,
58 uint32_t *kernel_out
, void *prog_data_out
)
60 struct anv_device
*device
= blorp
->driver_ctx
;
62 /* The blorp cache must be a real cache */
63 assert(device
->default_pipeline_cache
.cache
);
65 struct anv_pipeline_bind_map bind_map
= {
70 struct anv_shader_bin
*bin
=
71 anv_pipeline_cache_upload_kernel(&device
->default_pipeline_cache
,
72 key
, key_size
, kernel
, kernel_size
,
74 prog_data
, prog_data_size
, &bind_map
);
79 /* The cache already has a reference and it's not going anywhere so there
80 * is no need to hold a second reference.
82 anv_shader_bin_unref(device
, bin
);
84 *kernel_out
= bin
->kernel
.offset
;
85 *(const struct brw_stage_prog_data
**)prog_data_out
= bin
->prog_data
;
91 anv_device_init_blorp(struct anv_device
*device
)
93 blorp_init(&device
->blorp
, device
, &device
->isl_dev
);
94 device
->blorp
.compiler
= device
->instance
->physicalDevice
.compiler
;
95 device
->blorp
.lookup_shader
= lookup_blorp_shader
;
96 device
->blorp
.upload_shader
= upload_blorp_shader
;
97 switch (device
->info
.gen
) {
99 if (device
->info
.is_haswell
) {
100 device
->blorp
.exec
= gen75_blorp_exec
;
102 device
->blorp
.exec
= gen7_blorp_exec
;
106 device
->blorp
.exec
= gen8_blorp_exec
;
109 device
->blorp
.exec
= gen9_blorp_exec
;
112 device
->blorp
.exec
= gen10_blorp_exec
;
115 device
->blorp
.exec
= gen11_blorp_exec
;
118 unreachable("Unknown hardware generation");
123 anv_device_finish_blorp(struct anv_device
*device
)
125 blorp_finish(&device
->blorp
);
129 get_blorp_surf_for_anv_buffer(struct anv_device
*device
,
130 struct anv_buffer
*buffer
, uint64_t offset
,
131 uint32_t width
, uint32_t height
,
132 uint32_t row_pitch
, enum isl_format format
,
133 struct blorp_surf
*blorp_surf
,
134 struct isl_surf
*isl_surf
)
136 const struct isl_format_layout
*fmtl
=
137 isl_format_get_layout(format
);
140 /* ASTC is the only format which doesn't support linear layouts.
141 * Create an equivalently sized surface with ISL to get around this.
143 if (fmtl
->txc
== ISL_TXC_ASTC
) {
144 /* Use an equivalently sized format */
145 format
= ISL_FORMAT_R32G32B32A32_UINT
;
146 assert(fmtl
->bpb
== isl_format_get_layout(format
)->bpb
);
148 /* Shrink the dimensions for the new format */
149 width
= DIV_ROUND_UP(width
, fmtl
->bw
);
150 height
= DIV_ROUND_UP(height
, fmtl
->bh
);
153 *blorp_surf
= (struct blorp_surf
) {
156 .buffer
= buffer
->address
.bo
,
157 .offset
= buffer
->address
.offset
+ offset
,
158 .mocs
= device
->default_mocs
,
162 ok
= isl_surf_init(&device
->isl_dev
, isl_surf
,
163 .dim
= ISL_SURF_DIM_2D
,
171 .row_pitch
= row_pitch
,
172 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
|
173 ISL_SURF_USAGE_RENDER_TARGET_BIT
,
174 .tiling_flags
= ISL_TILING_LINEAR_BIT
);
178 /* Pick something high enough that it won't be used in core and low enough it
179 * will never map to an extension.
181 #define ANV_IMAGE_LAYOUT_EXPLICIT_AUX (VkImageLayout)10000000
183 static struct blorp_address
184 anv_to_blorp_address(struct anv_address addr
)
186 return (struct blorp_address
) {
188 .offset
= addr
.offset
,
193 get_blorp_surf_for_anv_image(const struct anv_device
*device
,
194 const struct anv_image
*image
,
195 VkImageAspectFlags aspect
,
196 VkImageLayout layout
,
197 enum isl_aux_usage aux_usage
,
198 struct blorp_surf
*blorp_surf
)
200 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
202 if (layout
!= ANV_IMAGE_LAYOUT_EXPLICIT_AUX
)
203 aux_usage
= anv_layout_to_aux_usage(&device
->info
, image
, aspect
, layout
);
205 const struct anv_surface
*surface
= &image
->planes
[plane
].surface
;
206 *blorp_surf
= (struct blorp_surf
) {
207 .surf
= &surface
->isl
,
209 .buffer
= image
->planes
[plane
].address
.bo
,
210 .offset
= image
->planes
[plane
].address
.offset
+ surface
->offset
,
211 .mocs
= device
->default_mocs
,
215 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
216 const struct anv_surface
*aux_surface
= &image
->planes
[plane
].aux_surface
;
217 blorp_surf
->aux_surf
= &aux_surface
->isl
,
218 blorp_surf
->aux_addr
= (struct blorp_address
) {
219 .buffer
= image
->planes
[plane
].address
.bo
,
220 .offset
= image
->planes
[plane
].address
.offset
+ aux_surface
->offset
,
221 .mocs
= device
->default_mocs
,
223 blorp_surf
->aux_usage
= aux_usage
;
225 /* If we're doing a partial resolve, then we need the indirect clear
226 * color. If we are doing a fast clear and want to store/update the
227 * clear color, we also pass the address to blorp, otherwise it will only
228 * stomp the CCS to a particular value and won't care about format or
231 if (aspect
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) {
232 const struct anv_address clear_color_addr
=
233 anv_image_get_clear_color_addr(device
, image
, aspect
);
234 blorp_surf
->clear_color_addr
= anv_to_blorp_address(clear_color_addr
);
235 } else if (aspect
& VK_IMAGE_ASPECT_DEPTH_BIT
236 && device
->info
.gen
>= 10) {
237 /* Vulkan always clears to 1.0. On gen < 10, we set that directly in
238 * the state packet. For gen >= 10, must provide the clear value in a
239 * buffer. We have a single global buffer that stores the 1.0 value.
241 const struct anv_address clear_color_addr
= (struct anv_address
) {
242 .bo
= (struct anv_bo
*)&device
->hiz_clear_bo
244 blorp_surf
->clear_color_addr
= anv_to_blorp_address(clear_color_addr
);
249 void anv_CmdCopyImage(
250 VkCommandBuffer commandBuffer
,
252 VkImageLayout srcImageLayout
,
254 VkImageLayout dstImageLayout
,
255 uint32_t regionCount
,
256 const VkImageCopy
* pRegions
)
258 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
259 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
260 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
262 struct blorp_batch batch
;
263 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
265 for (unsigned r
= 0; r
< regionCount
; r
++) {
266 VkOffset3D srcOffset
=
267 anv_sanitize_image_offset(src_image
->type
, pRegions
[r
].srcOffset
);
268 VkOffset3D dstOffset
=
269 anv_sanitize_image_offset(dst_image
->type
, pRegions
[r
].dstOffset
);
271 anv_sanitize_image_extent(src_image
->type
, pRegions
[r
].extent
);
273 const uint32_t dst_level
= pRegions
[r
].dstSubresource
.mipLevel
;
274 unsigned dst_base_layer
, layer_count
;
275 if (dst_image
->type
== VK_IMAGE_TYPE_3D
) {
276 dst_base_layer
= pRegions
[r
].dstOffset
.z
;
277 layer_count
= pRegions
[r
].extent
.depth
;
279 dst_base_layer
= pRegions
[r
].dstSubresource
.baseArrayLayer
;
281 anv_get_layerCount(dst_image
, &pRegions
[r
].dstSubresource
);
284 const uint32_t src_level
= pRegions
[r
].srcSubresource
.mipLevel
;
285 unsigned src_base_layer
;
286 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
287 src_base_layer
= pRegions
[r
].srcOffset
.z
;
289 src_base_layer
= pRegions
[r
].srcSubresource
.baseArrayLayer
;
290 assert(layer_count
==
291 anv_get_layerCount(src_image
, &pRegions
[r
].srcSubresource
));
294 VkImageAspectFlags src_mask
= pRegions
[r
].srcSubresource
.aspectMask
,
295 dst_mask
= pRegions
[r
].dstSubresource
.aspectMask
;
297 assert(anv_image_aspects_compatible(src_mask
, dst_mask
));
299 if (_mesa_bitcount(src_mask
) > 1) {
301 anv_foreach_image_aspect_bit(aspect_bit
, src_image
, src_mask
) {
302 struct blorp_surf src_surf
, dst_surf
;
303 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
304 src_image
, 1UL << aspect_bit
,
305 srcImageLayout
, ISL_AUX_USAGE_NONE
,
307 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
308 dst_image
, 1UL << aspect_bit
,
309 dstImageLayout
, ISL_AUX_USAGE_NONE
,
311 anv_cmd_buffer_mark_image_written(cmd_buffer
, dst_image
,
313 dst_surf
.aux_usage
, dst_level
,
314 dst_base_layer
, layer_count
);
316 for (unsigned i
= 0; i
< layer_count
; i
++) {
317 blorp_copy(&batch
, &src_surf
, src_level
, src_base_layer
+ i
,
318 &dst_surf
, dst_level
, dst_base_layer
+ i
,
319 srcOffset
.x
, srcOffset
.y
,
320 dstOffset
.x
, dstOffset
.y
,
321 extent
.width
, extent
.height
);
325 struct blorp_surf src_surf
, dst_surf
;
326 get_blorp_surf_for_anv_image(cmd_buffer
->device
, src_image
, src_mask
,
327 srcImageLayout
, ISL_AUX_USAGE_NONE
,
329 get_blorp_surf_for_anv_image(cmd_buffer
->device
, dst_image
, dst_mask
,
330 dstImageLayout
, ISL_AUX_USAGE_NONE
,
332 anv_cmd_buffer_mark_image_written(cmd_buffer
, dst_image
, dst_mask
,
333 dst_surf
.aux_usage
, dst_level
,
334 dst_base_layer
, layer_count
);
336 for (unsigned i
= 0; i
< layer_count
; i
++) {
337 blorp_copy(&batch
, &src_surf
, src_level
, src_base_layer
+ i
,
338 &dst_surf
, dst_level
, dst_base_layer
+ i
,
339 srcOffset
.x
, srcOffset
.y
,
340 dstOffset
.x
, dstOffset
.y
,
341 extent
.width
, extent
.height
);
346 blorp_batch_finish(&batch
);
350 copy_buffer_to_image(struct anv_cmd_buffer
*cmd_buffer
,
351 struct anv_buffer
*anv_buffer
,
352 struct anv_image
*anv_image
,
353 VkImageLayout image_layout
,
354 uint32_t regionCount
,
355 const VkBufferImageCopy
* pRegions
,
356 bool buffer_to_image
)
358 struct blorp_batch batch
;
359 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
362 struct blorp_surf surf
;
365 } image
, buffer
, *src
, *dst
;
368 buffer
.offset
= (VkOffset3D
) { 0, 0, 0 };
370 if (buffer_to_image
) {
378 for (unsigned r
= 0; r
< regionCount
; r
++) {
379 const VkImageAspectFlags aspect
= pRegions
[r
].imageSubresource
.aspectMask
;
381 get_blorp_surf_for_anv_image(cmd_buffer
->device
, anv_image
, aspect
,
382 image_layout
, ISL_AUX_USAGE_NONE
,
385 anv_sanitize_image_offset(anv_image
->type
, pRegions
[r
].imageOffset
);
386 image
.level
= pRegions
[r
].imageSubresource
.mipLevel
;
389 anv_sanitize_image_extent(anv_image
->type
, pRegions
[r
].imageExtent
);
390 if (anv_image
->type
!= VK_IMAGE_TYPE_3D
) {
391 image
.offset
.z
= pRegions
[r
].imageSubresource
.baseArrayLayer
;
393 anv_get_layerCount(anv_image
, &pRegions
[r
].imageSubresource
);
396 const enum isl_format buffer_format
=
397 anv_get_isl_format(&cmd_buffer
->device
->info
, anv_image
->vk_format
,
398 aspect
, VK_IMAGE_TILING_LINEAR
);
400 const VkExtent3D bufferImageExtent
= {
401 .width
= pRegions
[r
].bufferRowLength
?
402 pRegions
[r
].bufferRowLength
: extent
.width
,
403 .height
= pRegions
[r
].bufferImageHeight
?
404 pRegions
[r
].bufferImageHeight
: extent
.height
,
407 const struct isl_format_layout
*buffer_fmtl
=
408 isl_format_get_layout(buffer_format
);
410 const uint32_t buffer_row_pitch
=
411 DIV_ROUND_UP(bufferImageExtent
.width
, buffer_fmtl
->bw
) *
412 (buffer_fmtl
->bpb
/ 8);
414 const uint32_t buffer_layer_stride
=
415 DIV_ROUND_UP(bufferImageExtent
.height
, buffer_fmtl
->bh
) *
418 struct isl_surf buffer_isl_surf
;
419 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
420 anv_buffer
, pRegions
[r
].bufferOffset
,
421 extent
.width
, extent
.height
,
422 buffer_row_pitch
, buffer_format
,
423 &buffer
.surf
, &buffer_isl_surf
);
426 anv_cmd_buffer_mark_image_written(cmd_buffer
, anv_image
,
427 aspect
, dst
->surf
.aux_usage
,
429 dst
->offset
.z
, extent
.depth
);
432 for (unsigned z
= 0; z
< extent
.depth
; z
++) {
433 blorp_copy(&batch
, &src
->surf
, src
->level
, src
->offset
.z
,
434 &dst
->surf
, dst
->level
, dst
->offset
.z
,
435 src
->offset
.x
, src
->offset
.y
, dst
->offset
.x
, dst
->offset
.y
,
436 extent
.width
, extent
.height
);
439 buffer
.surf
.addr
.offset
+= buffer_layer_stride
;
443 blorp_batch_finish(&batch
);
446 void anv_CmdCopyBufferToImage(
447 VkCommandBuffer commandBuffer
,
450 VkImageLayout dstImageLayout
,
451 uint32_t regionCount
,
452 const VkBufferImageCopy
* pRegions
)
454 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
455 ANV_FROM_HANDLE(anv_buffer
, src_buffer
, srcBuffer
);
456 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
458 copy_buffer_to_image(cmd_buffer
, src_buffer
, dst_image
, dstImageLayout
,
459 regionCount
, pRegions
, true);
462 void anv_CmdCopyImageToBuffer(
463 VkCommandBuffer commandBuffer
,
465 VkImageLayout srcImageLayout
,
467 uint32_t regionCount
,
468 const VkBufferImageCopy
* pRegions
)
470 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
471 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
472 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
474 copy_buffer_to_image(cmd_buffer
, dst_buffer
, src_image
, srcImageLayout
,
475 regionCount
, pRegions
, false);
479 flip_coords(unsigned *src0
, unsigned *src1
, unsigned *dst0
, unsigned *dst1
)
483 unsigned tmp
= *src0
;
490 unsigned tmp
= *dst0
;
499 void anv_CmdBlitImage(
500 VkCommandBuffer commandBuffer
,
502 VkImageLayout srcImageLayout
,
504 VkImageLayout dstImageLayout
,
505 uint32_t regionCount
,
506 const VkImageBlit
* pRegions
,
510 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
511 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
512 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
514 struct blorp_surf src
, dst
;
516 enum blorp_filter blorp_filter
;
518 case VK_FILTER_NEAREST
:
519 blorp_filter
= BLORP_FILTER_NEAREST
;
521 case VK_FILTER_LINEAR
:
522 blorp_filter
= BLORP_FILTER_BILINEAR
;
525 unreachable("Invalid filter");
528 struct blorp_batch batch
;
529 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
531 for (unsigned r
= 0; r
< regionCount
; r
++) {
532 const VkImageSubresourceLayers
*src_res
= &pRegions
[r
].srcSubresource
;
533 const VkImageSubresourceLayers
*dst_res
= &pRegions
[r
].dstSubresource
;
535 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
536 src_image
, src_res
->aspectMask
,
537 srcImageLayout
, ISL_AUX_USAGE_NONE
, &src
);
538 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
539 dst_image
, dst_res
->aspectMask
,
540 dstImageLayout
, ISL_AUX_USAGE_NONE
, &dst
);
542 struct anv_format_plane src_format
=
543 anv_get_format_plane(&cmd_buffer
->device
->info
, src_image
->vk_format
,
544 src_res
->aspectMask
, src_image
->tiling
);
545 struct anv_format_plane dst_format
=
546 anv_get_format_plane(&cmd_buffer
->device
->info
, dst_image
->vk_format
,
547 dst_res
->aspectMask
, dst_image
->tiling
);
549 unsigned dst_start
, dst_end
;
550 if (dst_image
->type
== VK_IMAGE_TYPE_3D
) {
551 assert(dst_res
->baseArrayLayer
== 0);
552 dst_start
= pRegions
[r
].dstOffsets
[0].z
;
553 dst_end
= pRegions
[r
].dstOffsets
[1].z
;
555 dst_start
= dst_res
->baseArrayLayer
;
556 dst_end
= dst_start
+ anv_get_layerCount(dst_image
, dst_res
);
559 unsigned src_start
, src_end
;
560 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
561 assert(src_res
->baseArrayLayer
== 0);
562 src_start
= pRegions
[r
].srcOffsets
[0].z
;
563 src_end
= pRegions
[r
].srcOffsets
[1].z
;
565 src_start
= src_res
->baseArrayLayer
;
566 src_end
= src_start
+ anv_get_layerCount(src_image
, src_res
);
569 bool flip_z
= flip_coords(&src_start
, &src_end
, &dst_start
, &dst_end
);
570 float src_z_step
= (float)(src_end
+ 1 - src_start
) /
571 (float)(dst_end
+ 1 - dst_start
);
578 unsigned src_x0
= pRegions
[r
].srcOffsets
[0].x
;
579 unsigned src_x1
= pRegions
[r
].srcOffsets
[1].x
;
580 unsigned dst_x0
= pRegions
[r
].dstOffsets
[0].x
;
581 unsigned dst_x1
= pRegions
[r
].dstOffsets
[1].x
;
582 bool flip_x
= flip_coords(&src_x0
, &src_x1
, &dst_x0
, &dst_x1
);
584 unsigned src_y0
= pRegions
[r
].srcOffsets
[0].y
;
585 unsigned src_y1
= pRegions
[r
].srcOffsets
[1].y
;
586 unsigned dst_y0
= pRegions
[r
].dstOffsets
[0].y
;
587 unsigned dst_y1
= pRegions
[r
].dstOffsets
[1].y
;
588 bool flip_y
= flip_coords(&src_y0
, &src_y1
, &dst_y0
, &dst_y1
);
590 const unsigned num_layers
= dst_end
- dst_start
;
591 anv_cmd_buffer_mark_image_written(cmd_buffer
, dst_image
,
595 dst_start
, num_layers
);
597 for (unsigned i
= 0; i
< num_layers
; i
++) {
598 unsigned dst_z
= dst_start
+ i
;
599 unsigned src_z
= src_start
+ i
* src_z_step
;
601 blorp_blit(&batch
, &src
, src_res
->mipLevel
, src_z
,
602 src_format
.isl_format
, src_format
.swizzle
,
603 &dst
, dst_res
->mipLevel
, dst_z
,
604 dst_format
.isl_format
, dst_format
.swizzle
,
605 src_x0
, src_y0
, src_x1
, src_y1
,
606 dst_x0
, dst_y0
, dst_x1
, dst_y1
,
607 blorp_filter
, flip_x
, flip_y
);
612 blorp_batch_finish(&batch
);
615 static enum isl_format
616 isl_format_for_size(unsigned size_B
)
619 case 4: return ISL_FORMAT_R32_UINT
;
620 case 8: return ISL_FORMAT_R32G32_UINT
;
621 case 16: return ISL_FORMAT_R32G32B32A32_UINT
;
623 unreachable("Not a power-of-two format size");
628 * Returns the greatest common divisor of a and b that is a power of two.
631 gcd_pow2_u64(uint64_t a
, uint64_t b
)
633 assert(a
> 0 || b
> 0);
635 unsigned a_log2
= ffsll(a
) - 1;
636 unsigned b_log2
= ffsll(b
) - 1;
638 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
639 * case, the MIN2() will take the other one. If both are 0 then we will
640 * hit the assert above.
642 return 1 << MIN2(a_log2
, b_log2
);
645 /* This is maximum possible width/height our HW can handle */
646 #define MAX_SURFACE_DIM (1ull << 14)
648 void anv_CmdCopyBuffer(
649 VkCommandBuffer commandBuffer
,
652 uint32_t regionCount
,
653 const VkBufferCopy
* pRegions
)
655 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
656 ANV_FROM_HANDLE(anv_buffer
, src_buffer
, srcBuffer
);
657 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
659 struct blorp_batch batch
;
660 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
662 for (unsigned r
= 0; r
< regionCount
; r
++) {
663 struct blorp_address src
= {
664 .buffer
= src_buffer
->address
.bo
,
665 .offset
= src_buffer
->address
.offset
+ pRegions
[r
].srcOffset
,
666 .mocs
= cmd_buffer
->device
->default_mocs
,
668 struct blorp_address dst
= {
669 .buffer
= dst_buffer
->address
.bo
,
670 .offset
= dst_buffer
->address
.offset
+ pRegions
[r
].dstOffset
,
671 .mocs
= cmd_buffer
->device
->default_mocs
,
674 blorp_buffer_copy(&batch
, src
, dst
, pRegions
[r
].size
);
677 blorp_batch_finish(&batch
);
680 void anv_CmdUpdateBuffer(
681 VkCommandBuffer commandBuffer
,
683 VkDeviceSize dstOffset
,
684 VkDeviceSize dataSize
,
687 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
688 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
690 struct blorp_batch batch
;
691 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
693 /* We can't quite grab a full block because the state stream needs a
694 * little data at the top to build its linked list.
696 const uint32_t max_update_size
=
697 cmd_buffer
->device
->dynamic_state_pool
.block_size
- 64;
699 assert(max_update_size
< MAX_SURFACE_DIM
* 4);
701 /* We're about to read data that was written from the CPU. Flush the
702 * texture cache so we don't get anything stale.
704 cmd_buffer
->state
.pending_pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
707 const uint32_t copy_size
= MIN2(dataSize
, max_update_size
);
709 struct anv_state tmp_data
=
710 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, copy_size
, 64);
712 memcpy(tmp_data
.map
, pData
, copy_size
);
714 anv_state_flush(cmd_buffer
->device
, tmp_data
);
716 struct blorp_address src
= {
717 .buffer
= &cmd_buffer
->device
->dynamic_state_pool
.block_pool
.bo
,
718 .offset
= tmp_data
.offset
,
719 .mocs
= cmd_buffer
->device
->default_mocs
,
721 struct blorp_address dst
= {
722 .buffer
= dst_buffer
->address
.bo
,
723 .offset
= dst_buffer
->address
.offset
+ dstOffset
,
724 .mocs
= cmd_buffer
->device
->default_mocs
,
727 blorp_buffer_copy(&batch
, src
, dst
, copy_size
);
729 dataSize
-= copy_size
;
730 dstOffset
+= copy_size
;
731 pData
= (void *)pData
+ copy_size
;
734 blorp_batch_finish(&batch
);
737 void anv_CmdFillBuffer(
738 VkCommandBuffer commandBuffer
,
740 VkDeviceSize dstOffset
,
741 VkDeviceSize fillSize
,
744 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
745 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
746 struct blorp_surf surf
;
747 struct isl_surf isl_surf
;
749 struct blorp_batch batch
;
750 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
752 fillSize
= anv_buffer_get_range(dst_buffer
, dstOffset
, fillSize
);
754 /* From the Vulkan spec:
756 * "size is the number of bytes to fill, and must be either a multiple
757 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
758 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
759 * buffer is not a multiple of 4, then the nearest smaller multiple is
764 /* First, we compute the biggest format that can be used with the
765 * given offsets and size.
768 bs
= gcd_pow2_u64(bs
, dstOffset
);
769 bs
= gcd_pow2_u64(bs
, fillSize
);
770 enum isl_format isl_format
= isl_format_for_size(bs
);
772 union isl_color_value color
= {
773 .u32
= { data
, data
, data
, data
},
776 const uint64_t max_fill_size
= MAX_SURFACE_DIM
* MAX_SURFACE_DIM
* bs
;
777 while (fillSize
>= max_fill_size
) {
778 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
779 dst_buffer
, dstOffset
,
780 MAX_SURFACE_DIM
, MAX_SURFACE_DIM
,
781 MAX_SURFACE_DIM
* bs
, isl_format
,
784 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
785 0, 0, 1, 0, 0, MAX_SURFACE_DIM
, MAX_SURFACE_DIM
,
787 fillSize
-= max_fill_size
;
788 dstOffset
+= max_fill_size
;
791 uint64_t height
= fillSize
/ (MAX_SURFACE_DIM
* bs
);
792 assert(height
< MAX_SURFACE_DIM
);
794 const uint64_t rect_fill_size
= height
* MAX_SURFACE_DIM
* bs
;
795 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
796 dst_buffer
, dstOffset
,
797 MAX_SURFACE_DIM
, height
,
798 MAX_SURFACE_DIM
* bs
, isl_format
,
801 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
802 0, 0, 1, 0, 0, MAX_SURFACE_DIM
, height
,
804 fillSize
-= rect_fill_size
;
805 dstOffset
+= rect_fill_size
;
809 const uint32_t width
= fillSize
/ bs
;
810 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
811 dst_buffer
, dstOffset
,
813 width
* bs
, isl_format
,
816 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
817 0, 0, 1, 0, 0, width
, 1,
821 blorp_batch_finish(&batch
);
824 void anv_CmdClearColorImage(
825 VkCommandBuffer commandBuffer
,
827 VkImageLayout imageLayout
,
828 const VkClearColorValue
* pColor
,
830 const VkImageSubresourceRange
* pRanges
)
832 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
833 ANV_FROM_HANDLE(anv_image
, image
, _image
);
835 static const bool color_write_disable
[4] = { false, false, false, false };
837 struct blorp_batch batch
;
838 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
841 for (unsigned r
= 0; r
< rangeCount
; r
++) {
842 if (pRanges
[r
].aspectMask
== 0)
845 assert(pRanges
[r
].aspectMask
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
);
847 struct blorp_surf surf
;
848 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
849 image
, pRanges
[r
].aspectMask
,
850 imageLayout
, ISL_AUX_USAGE_NONE
, &surf
);
852 struct anv_format_plane src_format
=
853 anv_get_format_plane(&cmd_buffer
->device
->info
, image
->vk_format
,
854 VK_IMAGE_ASPECT_COLOR_BIT
, image
->tiling
);
856 unsigned base_layer
= pRanges
[r
].baseArrayLayer
;
857 unsigned layer_count
= anv_get_layerCount(image
, &pRanges
[r
]);
859 for (unsigned i
= 0; i
< anv_get_levelCount(image
, &pRanges
[r
]); i
++) {
860 const unsigned level
= pRanges
[r
].baseMipLevel
+ i
;
861 const unsigned level_width
= anv_minify(image
->extent
.width
, level
);
862 const unsigned level_height
= anv_minify(image
->extent
.height
, level
);
864 if (image
->type
== VK_IMAGE_TYPE_3D
) {
866 layer_count
= anv_minify(image
->extent
.depth
, level
);
869 anv_cmd_buffer_mark_image_written(cmd_buffer
, image
,
870 pRanges
[r
].aspectMask
,
871 surf
.aux_usage
, level
,
872 base_layer
, layer_count
);
874 blorp_clear(&batch
, &surf
,
875 src_format
.isl_format
, src_format
.swizzle
,
876 level
, base_layer
, layer_count
,
877 0, 0, level_width
, level_height
,
878 vk_to_isl_color(*pColor
), color_write_disable
);
882 blorp_batch_finish(&batch
);
885 void anv_CmdClearDepthStencilImage(
886 VkCommandBuffer commandBuffer
,
888 VkImageLayout imageLayout
,
889 const VkClearDepthStencilValue
* pDepthStencil
,
891 const VkImageSubresourceRange
* pRanges
)
893 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
894 ANV_FROM_HANDLE(anv_image
, image
, image_h
);
896 struct blorp_batch batch
;
897 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
899 struct blorp_surf depth
, stencil
;
900 if (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
901 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
902 image
, VK_IMAGE_ASPECT_DEPTH_BIT
,
903 imageLayout
, ISL_AUX_USAGE_NONE
, &depth
);
905 memset(&depth
, 0, sizeof(depth
));
908 if (image
->aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) {
909 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
910 image
, VK_IMAGE_ASPECT_STENCIL_BIT
,
911 imageLayout
, ISL_AUX_USAGE_NONE
, &stencil
);
913 memset(&stencil
, 0, sizeof(stencil
));
916 for (unsigned r
= 0; r
< rangeCount
; r
++) {
917 if (pRanges
[r
].aspectMask
== 0)
920 bool clear_depth
= pRanges
[r
].aspectMask
& VK_IMAGE_ASPECT_DEPTH_BIT
;
921 bool clear_stencil
= pRanges
[r
].aspectMask
& VK_IMAGE_ASPECT_STENCIL_BIT
;
923 unsigned base_layer
= pRanges
[r
].baseArrayLayer
;
924 unsigned layer_count
= anv_get_layerCount(image
, &pRanges
[r
]);
926 for (unsigned i
= 0; i
< anv_get_levelCount(image
, &pRanges
[r
]); i
++) {
927 const unsigned level
= pRanges
[r
].baseMipLevel
+ i
;
928 const unsigned level_width
= anv_minify(image
->extent
.width
, level
);
929 const unsigned level_height
= anv_minify(image
->extent
.height
, level
);
931 if (image
->type
== VK_IMAGE_TYPE_3D
)
932 layer_count
= anv_minify(image
->extent
.depth
, level
);
934 blorp_clear_depth_stencil(&batch
, &depth
, &stencil
,
935 level
, base_layer
, layer_count
,
936 0, 0, level_width
, level_height
,
937 clear_depth
, pDepthStencil
->depth
,
938 clear_stencil
? 0xff : 0,
939 pDepthStencil
->stencil
);
943 blorp_batch_finish(&batch
);
947 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
948 uint32_t num_entries
,
949 uint32_t *state_offset
,
950 struct anv_state
*bt_state
)
952 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
, num_entries
,
954 if (bt_state
->map
== NULL
) {
955 /* We ran out of space. Grab a new binding table block. */
956 VkResult result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
957 if (result
!= VK_SUCCESS
)
960 /* Re-emit state base addresses so we get the new surface state base
961 * address before we start emitting binding tables etc.
963 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
965 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
, num_entries
,
967 assert(bt_state
->map
!= NULL
);
974 binding_table_for_surface_state(struct anv_cmd_buffer
*cmd_buffer
,
975 struct anv_state surface_state
,
978 uint32_t state_offset
;
979 struct anv_state bt_state
;
982 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer
, 1, &state_offset
,
984 if (result
!= VK_SUCCESS
)
987 uint32_t *bt_map
= bt_state
.map
;
988 bt_map
[0] = surface_state
.offset
+ state_offset
;
990 *bt_offset
= bt_state
.offset
;
995 clear_color_attachment(struct anv_cmd_buffer
*cmd_buffer
,
996 struct blorp_batch
*batch
,
997 const VkClearAttachment
*attachment
,
998 uint32_t rectCount
, const VkClearRect
*pRects
)
1000 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1001 const uint32_t color_att
= attachment
->colorAttachment
;
1002 const uint32_t att_idx
= subpass
->color_attachments
[color_att
].attachment
;
1004 if (att_idx
== VK_ATTACHMENT_UNUSED
)
1007 struct anv_render_pass_attachment
*pass_att
=
1008 &cmd_buffer
->state
.pass
->attachments
[att_idx
];
1009 struct anv_attachment_state
*att_state
=
1010 &cmd_buffer
->state
.attachments
[att_idx
];
1012 uint32_t binding_table
;
1014 binding_table_for_surface_state(cmd_buffer
, att_state
->color
.state
,
1016 if (result
!= VK_SUCCESS
)
1019 union isl_color_value clear_color
=
1020 vk_to_isl_color(attachment
->clearValue
.color
);
1022 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1023 if (subpass
->view_mask
) {
1025 for_each_bit(view_idx
, subpass
->view_mask
) {
1026 for (uint32_t r
= 0; r
< rectCount
; ++r
) {
1027 const VkOffset2D offset
= pRects
[r
].rect
.offset
;
1028 const VkExtent2D extent
= pRects
[r
].rect
.extent
;
1029 blorp_clear_attachments(batch
, binding_table
,
1030 ISL_FORMAT_UNSUPPORTED
, pass_att
->samples
,
1033 offset
.x
+ extent
.width
,
1034 offset
.y
+ extent
.height
,
1035 true, clear_color
, false, 0.0f
, 0, 0);
1041 for (uint32_t r
= 0; r
< rectCount
; ++r
) {
1042 const VkOffset2D offset
= pRects
[r
].rect
.offset
;
1043 const VkExtent2D extent
= pRects
[r
].rect
.extent
;
1044 assert(pRects
[r
].layerCount
!= VK_REMAINING_ARRAY_LAYERS
);
1045 blorp_clear_attachments(batch
, binding_table
,
1046 ISL_FORMAT_UNSUPPORTED
, pass_att
->samples
,
1047 pRects
[r
].baseArrayLayer
,
1048 pRects
[r
].layerCount
,
1050 offset
.x
+ extent
.width
, offset
.y
+ extent
.height
,
1051 true, clear_color
, false, 0.0f
, 0, 0);
1056 clear_depth_stencil_attachment(struct anv_cmd_buffer
*cmd_buffer
,
1057 struct blorp_batch
*batch
,
1058 const VkClearAttachment
*attachment
,
1059 uint32_t rectCount
, const VkClearRect
*pRects
)
1061 static const union isl_color_value color_value
= { .u32
= { 0, } };
1062 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1063 const uint32_t att_idx
= subpass
->depth_stencil_attachment
->attachment
;
1065 if (att_idx
== VK_ATTACHMENT_UNUSED
)
1068 struct anv_render_pass_attachment
*pass_att
=
1069 &cmd_buffer
->state
.pass
->attachments
[att_idx
];
1071 bool clear_depth
= attachment
->aspectMask
& VK_IMAGE_ASPECT_DEPTH_BIT
;
1072 bool clear_stencil
= attachment
->aspectMask
& VK_IMAGE_ASPECT_STENCIL_BIT
;
1074 enum isl_format depth_format
= ISL_FORMAT_UNSUPPORTED
;
1076 depth_format
= anv_get_isl_format(&cmd_buffer
->device
->info
,
1078 VK_IMAGE_ASPECT_DEPTH_BIT
,
1079 VK_IMAGE_TILING_OPTIMAL
);
1082 uint32_t binding_table
;
1084 binding_table_for_surface_state(cmd_buffer
,
1085 cmd_buffer
->state
.null_surface_state
,
1087 if (result
!= VK_SUCCESS
)
1090 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1091 if (subpass
->view_mask
) {
1093 for_each_bit(view_idx
, subpass
->view_mask
) {
1094 for (uint32_t r
= 0; r
< rectCount
; ++r
) {
1095 const VkOffset2D offset
= pRects
[r
].rect
.offset
;
1096 const VkExtent2D extent
= pRects
[r
].rect
.extent
;
1097 VkClearDepthStencilValue value
= attachment
->clearValue
.depthStencil
;
1098 blorp_clear_attachments(batch
, binding_table
,
1099 depth_format
, pass_att
->samples
,
1102 offset
.x
+ extent
.width
,
1103 offset
.y
+ extent
.height
,
1105 clear_depth
, value
.depth
,
1106 clear_stencil
? 0xff : 0, value
.stencil
);
1112 for (uint32_t r
= 0; r
< rectCount
; ++r
) {
1113 const VkOffset2D offset
= pRects
[r
].rect
.offset
;
1114 const VkExtent2D extent
= pRects
[r
].rect
.extent
;
1115 VkClearDepthStencilValue value
= attachment
->clearValue
.depthStencil
;
1116 assert(pRects
[r
].layerCount
!= VK_REMAINING_ARRAY_LAYERS
);
1117 blorp_clear_attachments(batch
, binding_table
,
1118 depth_format
, pass_att
->samples
,
1119 pRects
[r
].baseArrayLayer
,
1120 pRects
[r
].layerCount
,
1122 offset
.x
+ extent
.width
, offset
.y
+ extent
.height
,
1124 clear_depth
, value
.depth
,
1125 clear_stencil
? 0xff : 0, value
.stencil
);
1129 void anv_CmdClearAttachments(
1130 VkCommandBuffer commandBuffer
,
1131 uint32_t attachmentCount
,
1132 const VkClearAttachment
* pAttachments
,
1134 const VkClearRect
* pRects
)
1136 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1138 /* Because this gets called within a render pass, we tell blorp not to
1139 * trash our depth and stencil buffers.
1141 struct blorp_batch batch
;
1142 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
,
1143 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL
);
1145 for (uint32_t a
= 0; a
< attachmentCount
; ++a
) {
1146 if (pAttachments
[a
].aspectMask
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
) {
1147 assert(pAttachments
[a
].aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
1148 clear_color_attachment(cmd_buffer
, &batch
,
1152 clear_depth_stencil_attachment(cmd_buffer
, &batch
,
1158 blorp_batch_finish(&batch
);
1161 enum subpass_stage
{
1164 SUBPASS_STAGE_RESOLVE
,
1168 resolve_surface(struct blorp_batch
*batch
,
1169 struct blorp_surf
*src_surf
,
1170 uint32_t src_level
, uint32_t src_layer
,
1171 struct blorp_surf
*dst_surf
,
1172 uint32_t dst_level
, uint32_t dst_layer
,
1173 uint32_t src_x
, uint32_t src_y
, uint32_t dst_x
, uint32_t dst_y
,
1174 uint32_t width
, uint32_t height
,
1175 enum blorp_filter filter
)
1178 src_surf
, src_level
, src_layer
,
1179 ISL_FORMAT_UNSUPPORTED
, ISL_SWIZZLE_IDENTITY
,
1180 dst_surf
, dst_level
, dst_layer
,
1181 ISL_FORMAT_UNSUPPORTED
, ISL_SWIZZLE_IDENTITY
,
1182 src_x
, src_y
, src_x
+ width
, src_y
+ height
,
1183 dst_x
, dst_y
, dst_x
+ width
, dst_y
+ height
,
1184 filter
, false, false);
1188 resolve_image(struct anv_device
*device
,
1189 struct blorp_batch
*batch
,
1190 const struct anv_image
*src_image
,
1191 VkImageLayout src_image_layout
,
1192 uint32_t src_level
, uint32_t src_layer
,
1193 const struct anv_image
*dst_image
,
1194 VkImageLayout dst_image_layout
,
1195 uint32_t dst_level
, uint32_t dst_layer
,
1196 VkImageAspectFlags aspect_mask
,
1197 uint32_t src_x
, uint32_t src_y
, uint32_t dst_x
, uint32_t dst_y
,
1198 uint32_t width
, uint32_t height
)
1200 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
1202 assert(src_image
->type
== VK_IMAGE_TYPE_2D
);
1203 assert(src_image
->samples
> 1);
1204 assert(dst_image
->type
== VK_IMAGE_TYPE_2D
);
1205 assert(dst_image
->samples
== 1);
1206 assert(src_image
->n_planes
== dst_image
->n_planes
);
1208 uint32_t aspect_bit
;
1210 anv_foreach_image_aspect_bit(aspect_bit
, src_image
, aspect_mask
) {
1211 struct blorp_surf src_surf
, dst_surf
;
1212 get_blorp_surf_for_anv_image(device
, src_image
, 1UL << aspect_bit
,
1213 src_image_layout
, ISL_AUX_USAGE_NONE
,
1215 get_blorp_surf_for_anv_image(device
, dst_image
, 1UL << aspect_bit
,
1216 dst_image_layout
, ISL_AUX_USAGE_NONE
,
1218 anv_cmd_buffer_mark_image_written(cmd_buffer
, dst_image
,
1221 dst_level
, dst_layer
, 1);
1223 enum blorp_filter filter
;
1224 if ((src_surf
.surf
->usage
& ISL_SURF_USAGE_DEPTH_BIT
) ||
1225 (src_surf
.surf
->usage
& ISL_SURF_USAGE_STENCIL_BIT
) ||
1226 isl_format_has_int_channel(src_surf
.surf
->format
)) {
1227 filter
= BLORP_FILTER_SAMPLE_0
;
1229 filter
= BLORP_FILTER_AVERAGE
;
1232 assert(!src_image
->format
->can_ycbcr
);
1233 assert(!dst_image
->format
->can_ycbcr
);
1235 resolve_surface(batch
,
1236 &src_surf
, src_level
, src_layer
,
1237 &dst_surf
, dst_level
, dst_layer
,
1238 src_x
, src_y
, dst_x
, dst_y
, width
, height
, filter
);
1242 void anv_CmdResolveImage(
1243 VkCommandBuffer commandBuffer
,
1245 VkImageLayout srcImageLayout
,
1247 VkImageLayout dstImageLayout
,
1248 uint32_t regionCount
,
1249 const VkImageResolve
* pRegions
)
1251 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1252 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
1253 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
1255 struct blorp_batch batch
;
1256 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1258 for (uint32_t r
= 0; r
< regionCount
; r
++) {
1259 assert(pRegions
[r
].srcSubresource
.aspectMask
==
1260 pRegions
[r
].dstSubresource
.aspectMask
);
1261 assert(anv_get_layerCount(src_image
, &pRegions
[r
].srcSubresource
) ==
1262 anv_get_layerCount(dst_image
, &pRegions
[r
].dstSubresource
));
1264 const uint32_t layer_count
=
1265 anv_get_layerCount(dst_image
, &pRegions
[r
].dstSubresource
);
1267 VkImageAspectFlags src_mask
= pRegions
[r
].srcSubresource
.aspectMask
,
1268 dst_mask
= pRegions
[r
].dstSubresource
.aspectMask
;
1270 assert(anv_image_aspects_compatible(src_mask
, dst_mask
));
1272 for (uint32_t layer
= 0; layer
< layer_count
; layer
++) {
1273 resolve_image(cmd_buffer
->device
, &batch
,
1274 src_image
, srcImageLayout
,
1275 pRegions
[r
].srcSubresource
.mipLevel
,
1276 pRegions
[r
].srcSubresource
.baseArrayLayer
+ layer
,
1277 dst_image
, dstImageLayout
,
1278 pRegions
[r
].dstSubresource
.mipLevel
,
1279 pRegions
[r
].dstSubresource
.baseArrayLayer
+ layer
,
1280 pRegions
[r
].dstSubresource
.aspectMask
,
1281 pRegions
[r
].srcOffset
.x
, pRegions
[r
].srcOffset
.y
,
1282 pRegions
[r
].dstOffset
.x
, pRegions
[r
].dstOffset
.y
,
1283 pRegions
[r
].extent
.width
, pRegions
[r
].extent
.height
);
1287 blorp_batch_finish(&batch
);
1290 static enum isl_aux_usage
1291 fast_clear_aux_usage(const struct anv_image
*image
,
1292 VkImageAspectFlagBits aspect
)
1294 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
1295 if (image
->planes
[plane
].aux_usage
== ISL_AUX_USAGE_NONE
)
1296 return ISL_AUX_USAGE_CCS_D
;
1298 return image
->planes
[plane
].aux_usage
;
1302 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
)
1304 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1305 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1307 if (subpass
->has_resolve
) {
1308 struct blorp_batch batch
;
1309 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1311 /* We are about to do some MSAA resolves. We need to flush so that the
1312 * result of writes to the MSAA color attachments show up in the sampler
1313 * when we blit to the single-sampled resolve target.
1315 cmd_buffer
->state
.pending_pipe_bits
|=
1316 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
|
1317 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1319 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
1320 uint32_t src_att
= subpass
->color_attachments
[i
].attachment
;
1321 uint32_t dst_att
= subpass
->resolve_attachments
[i
].attachment
;
1323 if (dst_att
== VK_ATTACHMENT_UNUSED
)
1326 assert(src_att
< cmd_buffer
->state
.pass
->attachment_count
);
1327 assert(dst_att
< cmd_buffer
->state
.pass
->attachment_count
);
1329 if (cmd_buffer
->state
.attachments
[dst_att
].pending_clear_aspects
) {
1330 /* From the Vulkan 1.0 spec:
1332 * If the first use of an attachment in a render pass is as a
1333 * resolve attachment, then the loadOp is effectively ignored
1334 * as the resolve is guaranteed to overwrite all pixels in the
1337 cmd_buffer
->state
.attachments
[dst_att
].pending_clear_aspects
= 0;
1340 struct anv_image_view
*src_iview
= fb
->attachments
[src_att
];
1341 struct anv_image_view
*dst_iview
= fb
->attachments
[dst_att
];
1343 enum isl_aux_usage src_aux_usage
=
1344 cmd_buffer
->state
.attachments
[src_att
].aux_usage
;
1345 enum isl_aux_usage dst_aux_usage
=
1346 cmd_buffer
->state
.attachments
[dst_att
].aux_usage
;
1348 const VkRect2D render_area
= cmd_buffer
->state
.render_area
;
1350 assert(src_iview
->aspect_mask
== VK_IMAGE_ASPECT_COLOR_BIT
&&
1351 dst_iview
->aspect_mask
== VK_IMAGE_ASPECT_COLOR_BIT
);
1353 enum blorp_filter filter
;
1354 if (isl_format_has_int_channel(src_iview
->planes
[0].isl
.format
)) {
1355 filter
= BLORP_FILTER_SAMPLE_0
;
1357 filter
= BLORP_FILTER_AVERAGE
;
1360 struct blorp_surf src_surf
, dst_surf
;
1361 get_blorp_surf_for_anv_image(cmd_buffer
->device
, src_iview
->image
,
1362 VK_IMAGE_ASPECT_COLOR_BIT
,
1363 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1364 src_aux_usage
, &src_surf
);
1365 if (src_aux_usage
== ISL_AUX_USAGE_MCS
) {
1366 src_surf
.clear_color_addr
= anv_to_blorp_address(
1367 anv_image_get_clear_color_addr(cmd_buffer
->device
,
1369 VK_IMAGE_ASPECT_COLOR_BIT
));
1371 get_blorp_surf_for_anv_image(cmd_buffer
->device
, dst_iview
->image
,
1372 VK_IMAGE_ASPECT_COLOR_BIT
,
1373 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1374 dst_aux_usage
, &dst_surf
);
1376 uint32_t base_src_layer
= src_iview
->planes
[0].isl
.base_array_layer
;
1377 uint32_t base_dst_layer
= dst_iview
->planes
[0].isl
.base_array_layer
;
1379 assert(src_iview
->planes
[0].isl
.array_len
>= fb
->layers
);
1380 assert(dst_iview
->planes
[0].isl
.array_len
>= fb
->layers
);
1382 anv_cmd_buffer_mark_image_written(cmd_buffer
, dst_iview
->image
,
1383 VK_IMAGE_ASPECT_COLOR_BIT
,
1385 dst_iview
->planes
[0].isl
.base_level
,
1386 base_dst_layer
, fb
->layers
);
1388 assert(!src_iview
->image
->format
->can_ycbcr
);
1389 assert(!dst_iview
->image
->format
->can_ycbcr
);
1391 for (uint32_t i
= 0; i
< fb
->layers
; i
++) {
1392 resolve_surface(&batch
,
1394 src_iview
->planes
[0].isl
.base_level
,
1397 dst_iview
->planes
[0].isl
.base_level
,
1399 render_area
.offset
.x
, render_area
.offset
.y
,
1400 render_area
.offset
.x
, render_area
.offset
.y
,
1401 render_area
.extent
.width
, render_area
.extent
.height
,
1406 blorp_batch_finish(&batch
);
1411 anv_image_copy_to_shadow(struct anv_cmd_buffer
*cmd_buffer
,
1412 const struct anv_image
*image
,
1413 uint32_t base_level
, uint32_t level_count
,
1414 uint32_t base_layer
, uint32_t layer_count
)
1416 struct blorp_batch batch
;
1417 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1419 assert(image
->aspects
== VK_IMAGE_ASPECT_COLOR_BIT
&& image
->n_planes
== 1);
1421 struct blorp_surf surf
;
1422 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
1423 image
, VK_IMAGE_ASPECT_COLOR_BIT
,
1424 VK_IMAGE_LAYOUT_GENERAL
,
1425 ISL_AUX_USAGE_NONE
, &surf
);
1426 assert(surf
.aux_usage
== ISL_AUX_USAGE_NONE
);
1428 struct blorp_surf shadow_surf
= {
1429 .surf
= &image
->planes
[0].shadow_surface
.isl
,
1431 .buffer
= image
->planes
[0].address
.bo
,
1432 .offset
= image
->planes
[0].address
.offset
+
1433 image
->planes
[0].shadow_surface
.offset
,
1434 .mocs
= cmd_buffer
->device
->default_mocs
,
1438 for (uint32_t l
= 0; l
< level_count
; l
++) {
1439 const uint32_t level
= base_level
+ l
;
1441 const VkExtent3D extent
= {
1442 .width
= anv_minify(image
->extent
.width
, level
),
1443 .height
= anv_minify(image
->extent
.height
, level
),
1444 .depth
= anv_minify(image
->extent
.depth
, level
),
1447 if (image
->type
== VK_IMAGE_TYPE_3D
)
1448 layer_count
= extent
.depth
;
1450 for (uint32_t a
= 0; a
< layer_count
; a
++) {
1451 const uint32_t layer
= base_layer
+ a
;
1453 blorp_copy(&batch
, &surf
, level
, layer
,
1454 &shadow_surf
, level
, layer
,
1455 0, 0, 0, 0, extent
.width
, extent
.height
);
1459 blorp_batch_finish(&batch
);
1463 anv_image_clear_color(struct anv_cmd_buffer
*cmd_buffer
,
1464 const struct anv_image
*image
,
1465 VkImageAspectFlagBits aspect
,
1466 enum isl_aux_usage aux_usage
,
1467 enum isl_format format
, struct isl_swizzle swizzle
,
1468 uint32_t level
, uint32_t base_layer
, uint32_t layer_count
,
1469 VkRect2D area
, union isl_color_value clear_color
)
1471 assert(image
->aspects
== VK_IMAGE_ASPECT_COLOR_BIT
);
1473 /* We don't support planar images with multisampling yet */
1474 assert(image
->n_planes
== 1);
1476 struct blorp_batch batch
;
1477 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1479 struct blorp_surf surf
;
1480 get_blorp_surf_for_anv_image(cmd_buffer
->device
, image
, aspect
,
1481 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1483 anv_cmd_buffer_mark_image_written(cmd_buffer
, image
, aspect
, aux_usage
,
1484 level
, base_layer
, layer_count
);
1486 blorp_clear(&batch
, &surf
, format
, anv_swizzle_for_render(swizzle
),
1487 level
, base_layer
, layer_count
,
1488 area
.offset
.x
, area
.offset
.y
,
1489 area
.offset
.x
+ area
.extent
.width
,
1490 area
.offset
.y
+ area
.extent
.height
,
1493 blorp_batch_finish(&batch
);
1497 anv_image_clear_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
,
1498 const struct anv_image
*image
,
1499 VkImageAspectFlags aspects
,
1500 enum isl_aux_usage depth_aux_usage
,
1502 uint32_t base_layer
, uint32_t layer_count
,
1504 float depth_value
, uint8_t stencil_value
)
1506 assert(image
->aspects
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
1507 VK_IMAGE_ASPECT_STENCIL_BIT
));
1509 struct blorp_batch batch
;
1510 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1512 struct blorp_surf depth
= {};
1513 if (aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
1514 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
1515 image
, VK_IMAGE_ASPECT_DEPTH_BIT
,
1516 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1517 depth_aux_usage
, &depth
);
1518 depth
.clear_color
.f32
[0] = ANV_HZ_FC_VAL
;
1521 struct blorp_surf stencil
= {};
1522 if (aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) {
1523 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
1524 image
, VK_IMAGE_ASPECT_STENCIL_BIT
,
1525 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1526 ISL_AUX_USAGE_NONE
, &stencil
);
1529 blorp_clear_depth_stencil(&batch
, &depth
, &stencil
,
1530 level
, base_layer
, layer_count
,
1531 area
.offset
.x
, area
.offset
.y
,
1532 area
.offset
.x
+ area
.extent
.width
,
1533 area
.offset
.y
+ area
.extent
.height
,
1534 aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
,
1536 (aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) ? 0xff : 0,
1539 blorp_batch_finish(&batch
);
1543 anv_image_hiz_op(struct anv_cmd_buffer
*cmd_buffer
,
1544 const struct anv_image
*image
,
1545 VkImageAspectFlagBits aspect
, uint32_t level
,
1546 uint32_t base_layer
, uint32_t layer_count
,
1547 enum isl_aux_op hiz_op
)
1549 assert(aspect
== VK_IMAGE_ASPECT_DEPTH_BIT
);
1550 assert(base_layer
+ layer_count
<= anv_image_aux_layers(image
, aspect
, level
));
1551 assert(anv_image_aspect_to_plane(image
->aspects
,
1552 VK_IMAGE_ASPECT_DEPTH_BIT
) == 0);
1554 struct blorp_batch batch
;
1555 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1557 struct blorp_surf surf
;
1558 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
1559 image
, VK_IMAGE_ASPECT_DEPTH_BIT
,
1560 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1561 ISL_AUX_USAGE_HIZ
, &surf
);
1562 surf
.clear_color
.f32
[0] = ANV_HZ_FC_VAL
;
1564 blorp_hiz_op(&batch
, &surf
, level
, base_layer
, layer_count
, hiz_op
);
1566 blorp_batch_finish(&batch
);
1570 anv_image_hiz_clear(struct anv_cmd_buffer
*cmd_buffer
,
1571 const struct anv_image
*image
,
1572 VkImageAspectFlags aspects
,
1574 uint32_t base_layer
, uint32_t layer_count
,
1575 VkRect2D area
, uint8_t stencil_value
)
1577 assert(image
->aspects
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
1578 VK_IMAGE_ASPECT_STENCIL_BIT
));
1580 struct blorp_batch batch
;
1581 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1583 struct blorp_surf depth
= {};
1584 if (aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
1585 assert(base_layer
+ layer_count
<=
1586 anv_image_aux_layers(image
, VK_IMAGE_ASPECT_DEPTH_BIT
, level
));
1587 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
1588 image
, VK_IMAGE_ASPECT_DEPTH_BIT
,
1589 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1590 ISL_AUX_USAGE_HIZ
, &depth
);
1591 depth
.clear_color
.f32
[0] = ANV_HZ_FC_VAL
;
1594 struct blorp_surf stencil
= {};
1595 if (aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) {
1596 get_blorp_surf_for_anv_image(cmd_buffer
->device
,
1597 image
, VK_IMAGE_ASPECT_STENCIL_BIT
,
1598 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1599 ISL_AUX_USAGE_NONE
, &stencil
);
1602 blorp_hiz_clear_depth_stencil(&batch
, &depth
, &stencil
,
1603 level
, base_layer
, layer_count
,
1604 area
.offset
.x
, area
.offset
.y
,
1605 area
.offset
.x
+ area
.extent
.width
,
1606 area
.offset
.y
+ area
.extent
.height
,
1607 aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
,
1609 aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
,
1612 blorp_batch_finish(&batch
);
1614 /* From the SKL PRM, Depth Buffer Clear:
1616 * Depth Buffer Clear Workaround
1617 * Depth buffer clear pass using any of the methods (WM_STATE, 3DSTATE_WM
1618 * or 3DSTATE_WM_HZ_OP) must be followed by a PIPE_CONTROL command with
1619 * DEPTH_STALL bit and Depth FLUSH bits “set” before starting to render.
1620 * DepthStall and DepthFlush are not needed between consecutive depth clear
1621 * passes nor is it required if the depth-clear pass was done with
1622 * “full_surf_clear” bit set in the 3DSTATE_WM_HZ_OP.
1624 if (aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
1625 cmd_buffer
->state
.pending_pipe_bits
|=
1626 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
| ANV_PIPE_DEPTH_STALL_BIT
;
1631 anv_image_mcs_op(struct anv_cmd_buffer
*cmd_buffer
,
1632 const struct anv_image
*image
,
1633 VkImageAspectFlagBits aspect
,
1634 uint32_t base_layer
, uint32_t layer_count
,
1635 enum isl_aux_op mcs_op
, union isl_color_value
*clear_value
,
1638 assert(image
->aspects
== VK_IMAGE_ASPECT_COLOR_BIT
);
1639 assert(image
->samples
> 1);
1640 assert(base_layer
+ layer_count
<= anv_image_aux_layers(image
, aspect
, 0));
1642 /* Multisampling with multi-planar formats is not supported */
1643 assert(image
->n_planes
== 1);
1645 struct blorp_batch batch
;
1646 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
,
1647 predicate
? BLORP_BATCH_PREDICATE_ENABLE
: 0);
1649 struct blorp_surf surf
;
1650 get_blorp_surf_for_anv_image(cmd_buffer
->device
, image
, aspect
,
1651 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1652 ISL_AUX_USAGE_MCS
, &surf
);
1654 /* Blorp will store the clear color for us if we provide the clear color
1655 * address and we are doing a fast clear. So we save the clear value into
1656 * the blorp surface. However, in some situations we want to do a fast clear
1657 * without changing the clear value stored in the state buffer. For those
1658 * cases, we set the clear color address pointer to NULL, so blorp will not
1659 * try to store a garbage color.
1661 if (mcs_op
== ISL_AUX_OP_FAST_CLEAR
) {
1663 surf
.clear_color
= *clear_value
;
1665 surf
.clear_color_addr
.buffer
= NULL
;
1668 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1670 * "After Render target fast clear, pipe-control with color cache
1671 * write-flush must be issued before sending any DRAW commands on
1672 * that render target."
1674 * This comment is a bit cryptic and doesn't really tell you what's going
1675 * or what's really needed. It appears that fast clear ops are not
1676 * properly synchronized with other drawing. This means that we cannot
1677 * have a fast clear operation in the pipe at the same time as other
1678 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1679 * that the contents of the previous draw hit the render target before we
1680 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1681 * that it is completed before any additional drawing occurs.
1683 cmd_buffer
->state
.pending_pipe_bits
|=
1684 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1687 case ISL_AUX_OP_FAST_CLEAR
:
1688 blorp_fast_clear(&batch
, &surf
, surf
.surf
->format
,
1689 0, base_layer
, layer_count
,
1690 0, 0, image
->extent
.width
, image
->extent
.height
);
1692 case ISL_AUX_OP_PARTIAL_RESOLVE
:
1693 blorp_mcs_partial_resolve(&batch
, &surf
, surf
.surf
->format
,
1694 base_layer
, layer_count
);
1696 case ISL_AUX_OP_FULL_RESOLVE
:
1697 case ISL_AUX_OP_AMBIGUATE
:
1699 unreachable("Unsupported MCS operation");
1702 cmd_buffer
->state
.pending_pipe_bits
|=
1703 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1705 blorp_batch_finish(&batch
);
1709 anv_image_ccs_op(struct anv_cmd_buffer
*cmd_buffer
,
1710 const struct anv_image
*image
,
1711 VkImageAspectFlagBits aspect
, uint32_t level
,
1712 uint32_t base_layer
, uint32_t layer_count
,
1713 enum isl_aux_op ccs_op
, union isl_color_value
*clear_value
,
1716 assert(image
->aspects
& VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV
);
1717 assert(image
->samples
== 1);
1718 assert(level
< anv_image_aux_levels(image
, aspect
));
1719 /* Multi-LOD YcBcR is not allowed */
1720 assert(image
->n_planes
== 1 || level
== 0);
1721 assert(base_layer
+ layer_count
<=
1722 anv_image_aux_layers(image
, aspect
, level
));
1724 uint32_t plane
= anv_image_aspect_to_plane(image
->aspects
, aspect
);
1725 uint32_t width_div
= image
->format
->planes
[plane
].denominator_scales
[0];
1726 uint32_t height_div
= image
->format
->planes
[plane
].denominator_scales
[1];
1727 uint32_t level_width
= anv_minify(image
->extent
.width
, level
) / width_div
;
1728 uint32_t level_height
= anv_minify(image
->extent
.height
, level
) / height_div
;
1730 struct blorp_batch batch
;
1731 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
,
1732 predicate
? BLORP_BATCH_PREDICATE_ENABLE
: 0);
1734 struct blorp_surf surf
;
1735 get_blorp_surf_for_anv_image(cmd_buffer
->device
, image
, aspect
,
1736 ANV_IMAGE_LAYOUT_EXPLICIT_AUX
,
1737 fast_clear_aux_usage(image
, aspect
),
1740 /* Blorp will store the clear color for us if we provide the clear color
1741 * address and we are doing a fast clear. So we save the clear value into
1742 * the blorp surface. However, in some situations we want to do a fast clear
1743 * without changing the clear value stored in the state buffer. For those
1744 * cases, we set the clear color address pointer to NULL, so blorp will not
1745 * try to store a garbage color.
1747 if (ccs_op
== ISL_AUX_OP_FAST_CLEAR
) {
1749 surf
.clear_color
= *clear_value
;
1751 surf
.clear_color_addr
.buffer
= NULL
;
1754 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1756 * "After Render target fast clear, pipe-control with color cache
1757 * write-flush must be issued before sending any DRAW commands on
1758 * that render target."
1760 * This comment is a bit cryptic and doesn't really tell you what's going
1761 * or what's really needed. It appears that fast clear ops are not
1762 * properly synchronized with other drawing. This means that we cannot
1763 * have a fast clear operation in the pipe at the same time as other
1764 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1765 * that the contents of the previous draw hit the render target before we
1766 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1767 * that it is completed before any additional drawing occurs.
1769 cmd_buffer
->state
.pending_pipe_bits
|=
1770 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1773 case ISL_AUX_OP_FAST_CLEAR
:
1774 blorp_fast_clear(&batch
, &surf
, surf
.surf
->format
,
1775 level
, base_layer
, layer_count
,
1776 0, 0, level_width
, level_height
);
1778 case ISL_AUX_OP_FULL_RESOLVE
:
1779 case ISL_AUX_OP_PARTIAL_RESOLVE
:
1780 blorp_ccs_resolve(&batch
, &surf
, level
, base_layer
, layer_count
,
1781 surf
.surf
->format
, ccs_op
);
1783 case ISL_AUX_OP_AMBIGUATE
:
1784 for (uint32_t a
= 0; a
< layer_count
; a
++) {
1785 const uint32_t layer
= base_layer
+ a
;
1786 blorp_ccs_ambiguate(&batch
, &surf
, level
, layer
);
1790 unreachable("Unsupported CCS operation");
1793 cmd_buffer
->state
.pending_pipe_bits
|=
1794 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1796 blorp_batch_finish(&batch
);