2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "anv_private.h"
27 lookup_blorp_shader(struct blorp_context
*blorp
,
28 const void *key
, uint32_t key_size
,
29 uint32_t *kernel_out
, void *prog_data_out
)
31 struct anv_device
*device
= blorp
->driver_ctx
;
33 /* The blorp cache must be a real cache */
34 assert(device
->blorp_shader_cache
.cache
);
36 struct anv_shader_bin
*bin
=
37 anv_pipeline_cache_search(&device
->blorp_shader_cache
, key
, key_size
);
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
44 anv_shader_bin_unref(device
, bin
);
46 *kernel_out
= bin
->kernel
.offset
;
47 *(const struct brw_stage_prog_data
**)prog_data_out
= bin
->prog_data
;
53 upload_blorp_shader(struct blorp_context
*blorp
,
54 const void *key
, uint32_t key_size
,
55 const void *kernel
, uint32_t kernel_size
,
56 const struct brw_stage_prog_data
*prog_data
,
57 uint32_t prog_data_size
,
58 uint32_t *kernel_out
, void *prog_data_out
)
60 struct anv_device
*device
= blorp
->driver_ctx
;
62 /* The blorp cache must be a real cache */
63 assert(device
->blorp_shader_cache
.cache
);
65 struct anv_pipeline_bind_map bind_map
= {
70 struct anv_shader_bin
*bin
=
71 anv_pipeline_cache_upload_kernel(&device
->blorp_shader_cache
,
72 key
, key_size
, kernel
, kernel_size
,
73 prog_data
, prog_data_size
, &bind_map
);
75 /* The cache already has a reference and it's not going anywhere so there
76 * is no need to hold a second reference.
78 anv_shader_bin_unref(device
, bin
);
80 *kernel_out
= bin
->kernel
.offset
;
81 *(const struct brw_stage_prog_data
**)prog_data_out
= bin
->prog_data
;
87 anv_device_init_blorp(struct anv_device
*device
)
89 anv_pipeline_cache_init(&device
->blorp_shader_cache
, device
, true);
90 blorp_init(&device
->blorp
, device
, &device
->isl_dev
);
91 device
->blorp
.compiler
= device
->instance
->physicalDevice
.compiler
;
92 device
->blorp
.mocs
.tex
= device
->default_mocs
;
93 device
->blorp
.mocs
.rb
= device
->default_mocs
;
94 device
->blorp
.mocs
.vb
= device
->default_mocs
;
95 device
->blorp
.lookup_shader
= lookup_blorp_shader
;
96 device
->blorp
.upload_shader
= upload_blorp_shader
;
97 switch (device
->info
.gen
) {
99 if (device
->info
.is_haswell
) {
100 device
->blorp
.exec
= gen75_blorp_exec
;
102 device
->blorp
.exec
= gen7_blorp_exec
;
106 device
->blorp
.exec
= gen8_blorp_exec
;
109 device
->blorp
.exec
= gen9_blorp_exec
;
112 unreachable("Unknown hardware generation");
117 anv_device_finish_blorp(struct anv_device
*device
)
119 blorp_finish(&device
->blorp
);
120 anv_pipeline_cache_finish(&device
->blorp_shader_cache
);
124 get_blorp_surf_for_anv_buffer(struct anv_device
*device
,
125 struct anv_buffer
*buffer
, uint64_t offset
,
126 uint32_t width
, uint32_t height
,
127 uint32_t row_pitch
, enum isl_format format
,
128 struct blorp_surf
*blorp_surf
,
129 struct isl_surf
*isl_surf
)
131 const struct isl_format_layout
*fmtl
=
132 isl_format_get_layout(format
);
134 /* ASTC is the only format which doesn't support linear layouts.
135 * Create an equivalently sized surface with ISL to get around this.
137 if (fmtl
->txc
== ISL_TXC_ASTC
) {
138 /* Use an equivalently sized format */
139 format
= ISL_FORMAT_R32G32B32A32_UINT
;
140 assert(fmtl
->bpb
== isl_format_get_layout(format
)->bpb
);
142 /* Shrink the dimensions for the new format */
143 width
= DIV_ROUND_UP(width
, fmtl
->bw
);
144 height
= DIV_ROUND_UP(height
, fmtl
->bh
);
147 *blorp_surf
= (struct blorp_surf
) {
150 .buffer
= buffer
->bo
,
151 .offset
= buffer
->offset
+ offset
,
155 isl_surf_init(&device
->isl_dev
, isl_surf
,
156 .dim
= ISL_SURF_DIM_2D
,
164 .min_pitch
= row_pitch
,
165 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
|
166 ISL_SURF_USAGE_RENDER_TARGET_BIT
,
167 .tiling_flags
= ISL_TILING_LINEAR_BIT
);
168 assert(isl_surf
->row_pitch
== row_pitch
);
172 get_blorp_surf_for_anv_image(const struct anv_image
*image
,
173 VkImageAspectFlags aspect
,
174 enum isl_aux_usage aux_usage
,
175 struct blorp_surf
*blorp_surf
)
177 if (aspect
== VK_IMAGE_ASPECT_STENCIL_BIT
||
178 aux_usage
== ISL_AUX_USAGE_HIZ
)
179 aux_usage
= ISL_AUX_USAGE_NONE
;
181 const struct anv_surface
*surface
=
182 anv_image_get_surface_for_aspect_mask(image
, aspect
);
184 *blorp_surf
= (struct blorp_surf
) {
185 .surf
= &surface
->isl
,
188 .offset
= image
->offset
+ surface
->offset
,
192 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
193 blorp_surf
->aux_surf
= &image
->aux_surface
.isl
,
194 blorp_surf
->aux_addr
= (struct blorp_address
) {
196 .offset
= image
->offset
+ image
->aux_surface
.offset
,
198 blorp_surf
->aux_usage
= aux_usage
;
202 void anv_CmdCopyImage(
203 VkCommandBuffer commandBuffer
,
205 VkImageLayout srcImageLayout
,
207 VkImageLayout dstImageLayout
,
208 uint32_t regionCount
,
209 const VkImageCopy
* pRegions
)
211 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
212 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
213 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
215 struct blorp_batch batch
;
216 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
218 for (unsigned r
= 0; r
< regionCount
; r
++) {
219 VkOffset3D srcOffset
=
220 anv_sanitize_image_offset(src_image
->type
, pRegions
[r
].srcOffset
);
221 VkOffset3D dstOffset
=
222 anv_sanitize_image_offset(dst_image
->type
, pRegions
[r
].dstOffset
);
224 anv_sanitize_image_extent(src_image
->type
, pRegions
[r
].extent
);
226 unsigned dst_base_layer
, layer_count
;
227 if (dst_image
->type
== VK_IMAGE_TYPE_3D
) {
228 dst_base_layer
= pRegions
[r
].dstOffset
.z
;
229 layer_count
= pRegions
[r
].extent
.depth
;
231 dst_base_layer
= pRegions
[r
].dstSubresource
.baseArrayLayer
;
232 layer_count
= pRegions
[r
].dstSubresource
.layerCount
;
235 unsigned src_base_layer
;
236 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
237 src_base_layer
= pRegions
[r
].srcOffset
.z
;
239 src_base_layer
= pRegions
[r
].srcSubresource
.baseArrayLayer
;
240 assert(pRegions
[r
].srcSubresource
.layerCount
== layer_count
);
243 assert(pRegions
[r
].srcSubresource
.aspectMask
==
244 pRegions
[r
].dstSubresource
.aspectMask
);
247 for_each_bit(a
, pRegions
[r
].dstSubresource
.aspectMask
) {
248 VkImageAspectFlagBits aspect
= (1 << a
);
250 struct blorp_surf src_surf
, dst_surf
;
251 get_blorp_surf_for_anv_image(src_image
, aspect
, src_image
->aux_usage
,
253 get_blorp_surf_for_anv_image(dst_image
, aspect
, dst_image
->aux_usage
,
256 for (unsigned i
= 0; i
< layer_count
; i
++) {
257 blorp_copy(&batch
, &src_surf
, pRegions
[r
].srcSubresource
.mipLevel
,
259 &dst_surf
, pRegions
[r
].dstSubresource
.mipLevel
,
261 srcOffset
.x
, srcOffset
.y
,
262 dstOffset
.x
, dstOffset
.y
,
263 extent
.width
, extent
.height
);
268 blorp_batch_finish(&batch
);
272 copy_buffer_to_image(struct anv_cmd_buffer
*cmd_buffer
,
273 struct anv_buffer
*anv_buffer
,
274 struct anv_image
*anv_image
,
275 uint32_t regionCount
,
276 const VkBufferImageCopy
* pRegions
,
277 bool buffer_to_image
)
279 struct blorp_batch batch
;
280 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
283 struct blorp_surf surf
;
286 } image
, buffer
, *src
, *dst
;
289 buffer
.offset
= (VkOffset3D
) { 0, 0, 0 };
291 if (buffer_to_image
) {
299 for (unsigned r
= 0; r
< regionCount
; r
++) {
300 const VkImageAspectFlags aspect
= pRegions
[r
].imageSubresource
.aspectMask
;
302 get_blorp_surf_for_anv_image(anv_image
, aspect
, anv_image
->aux_usage
,
305 anv_sanitize_image_offset(anv_image
->type
, pRegions
[r
].imageOffset
);
306 image
.level
= pRegions
[r
].imageSubresource
.mipLevel
;
309 anv_sanitize_image_extent(anv_image
->type
, pRegions
[r
].imageExtent
);
310 if (anv_image
->type
!= VK_IMAGE_TYPE_3D
) {
311 image
.offset
.z
= pRegions
[r
].imageSubresource
.baseArrayLayer
;
312 extent
.depth
= pRegions
[r
].imageSubresource
.layerCount
;
315 const enum isl_format buffer_format
=
316 anv_get_isl_format(&cmd_buffer
->device
->info
, anv_image
->vk_format
,
317 aspect
, VK_IMAGE_TILING_LINEAR
);
319 const VkExtent3D bufferImageExtent
= {
320 .width
= pRegions
[r
].bufferRowLength
?
321 pRegions
[r
].bufferRowLength
: extent
.width
,
322 .height
= pRegions
[r
].bufferImageHeight
?
323 pRegions
[r
].bufferImageHeight
: extent
.height
,
326 const struct isl_format_layout
*buffer_fmtl
=
327 isl_format_get_layout(buffer_format
);
329 const uint32_t buffer_row_pitch
=
330 DIV_ROUND_UP(bufferImageExtent
.width
, buffer_fmtl
->bw
) *
331 (buffer_fmtl
->bpb
/ 8);
333 const uint32_t buffer_layer_stride
=
334 DIV_ROUND_UP(bufferImageExtent
.height
, buffer_fmtl
->bh
) *
337 struct isl_surf buffer_isl_surf
;
338 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
339 anv_buffer
, pRegions
[r
].bufferOffset
,
340 extent
.width
, extent
.height
,
341 buffer_row_pitch
, buffer_format
,
342 &buffer
.surf
, &buffer_isl_surf
);
344 for (unsigned z
= 0; z
< extent
.depth
; z
++) {
345 blorp_copy(&batch
, &src
->surf
, src
->level
, src
->offset
.z
,
346 &dst
->surf
, dst
->level
, dst
->offset
.z
,
347 src
->offset
.x
, src
->offset
.y
, dst
->offset
.x
, dst
->offset
.y
,
348 extent
.width
, extent
.height
);
351 buffer
.surf
.addr
.offset
+= buffer_layer_stride
;
355 blorp_batch_finish(&batch
);
358 void anv_CmdCopyBufferToImage(
359 VkCommandBuffer commandBuffer
,
362 VkImageLayout dstImageLayout
,
363 uint32_t regionCount
,
364 const VkBufferImageCopy
* pRegions
)
366 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
367 ANV_FROM_HANDLE(anv_buffer
, src_buffer
, srcBuffer
);
368 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
370 copy_buffer_to_image(cmd_buffer
, src_buffer
, dst_image
,
371 regionCount
, pRegions
, true);
374 void anv_CmdCopyImageToBuffer(
375 VkCommandBuffer commandBuffer
,
377 VkImageLayout srcImageLayout
,
379 uint32_t regionCount
,
380 const VkBufferImageCopy
* pRegions
)
382 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
383 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
384 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
386 copy_buffer_to_image(cmd_buffer
, dst_buffer
, src_image
,
387 regionCount
, pRegions
, false);
391 flip_coords(unsigned *src0
, unsigned *src1
, unsigned *dst0
, unsigned *dst1
)
395 unsigned tmp
= *src0
;
402 unsigned tmp
= *dst0
;
411 void anv_CmdBlitImage(
412 VkCommandBuffer commandBuffer
,
414 VkImageLayout srcImageLayout
,
416 VkImageLayout dstImageLayout
,
417 uint32_t regionCount
,
418 const VkImageBlit
* pRegions
,
422 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
423 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
424 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
426 struct blorp_surf src
, dst
;
430 case VK_FILTER_NEAREST
:
431 gl_filter
= 0x2600; /* GL_NEAREST */
433 case VK_FILTER_LINEAR
:
434 gl_filter
= 0x2601; /* GL_LINEAR */
437 unreachable("Invalid filter");
440 struct blorp_batch batch
;
441 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
443 for (unsigned r
= 0; r
< regionCount
; r
++) {
444 const VkImageSubresourceLayers
*src_res
= &pRegions
[r
].srcSubresource
;
445 const VkImageSubresourceLayers
*dst_res
= &pRegions
[r
].dstSubresource
;
447 get_blorp_surf_for_anv_image(src_image
, src_res
->aspectMask
,
448 src_image
->aux_usage
, &src
);
449 get_blorp_surf_for_anv_image(dst_image
, dst_res
->aspectMask
,
450 dst_image
->aux_usage
, &dst
);
452 struct anv_format src_format
=
453 anv_get_format(&cmd_buffer
->device
->info
, src_image
->vk_format
,
454 src_res
->aspectMask
, src_image
->tiling
);
455 struct anv_format dst_format
=
456 anv_get_format(&cmd_buffer
->device
->info
, dst_image
->vk_format
,
457 dst_res
->aspectMask
, dst_image
->tiling
);
459 unsigned dst_start
, dst_end
;
460 if (dst_image
->type
== VK_IMAGE_TYPE_3D
) {
461 assert(dst_res
->baseArrayLayer
== 0);
462 dst_start
= pRegions
[r
].dstOffsets
[0].z
;
463 dst_end
= pRegions
[r
].dstOffsets
[1].z
;
465 dst_start
= dst_res
->baseArrayLayer
;
466 dst_end
= dst_start
+ dst_res
->layerCount
;
469 unsigned src_start
, src_end
;
470 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
471 assert(src_res
->baseArrayLayer
== 0);
472 src_start
= pRegions
[r
].srcOffsets
[0].z
;
473 src_end
= pRegions
[r
].srcOffsets
[1].z
;
475 src_start
= src_res
->baseArrayLayer
;
476 src_end
= src_start
+ src_res
->layerCount
;
479 bool flip_z
= flip_coords(&src_start
, &src_end
, &dst_start
, &dst_end
);
480 float src_z_step
= (float)(src_end
+ 1 - src_start
) /
481 (float)(dst_end
+ 1 - dst_start
);
488 unsigned src_x0
= pRegions
[r
].srcOffsets
[0].x
;
489 unsigned src_x1
= pRegions
[r
].srcOffsets
[1].x
;
490 unsigned dst_x0
= pRegions
[r
].dstOffsets
[0].x
;
491 unsigned dst_x1
= pRegions
[r
].dstOffsets
[1].x
;
492 bool flip_x
= flip_coords(&src_x0
, &src_x1
, &dst_x0
, &dst_x1
);
494 unsigned src_y0
= pRegions
[r
].srcOffsets
[0].y
;
495 unsigned src_y1
= pRegions
[r
].srcOffsets
[1].y
;
496 unsigned dst_y0
= pRegions
[r
].dstOffsets
[0].y
;
497 unsigned dst_y1
= pRegions
[r
].dstOffsets
[1].y
;
498 bool flip_y
= flip_coords(&src_y0
, &src_y1
, &dst_y0
, &dst_y1
);
500 const unsigned num_layers
= dst_end
- dst_start
;
501 for (unsigned i
= 0; i
< num_layers
; i
++) {
502 unsigned dst_z
= dst_start
+ i
;
503 unsigned src_z
= src_start
+ i
* src_z_step
;
505 blorp_blit(&batch
, &src
, src_res
->mipLevel
, src_z
,
506 src_format
.isl_format
, src_format
.swizzle
,
507 &dst
, dst_res
->mipLevel
, dst_z
,
508 dst_format
.isl_format
,
509 anv_swizzle_for_render(dst_format
.swizzle
),
510 src_x0
, src_y0
, src_x1
, src_y1
,
511 dst_x0
, dst_y0
, dst_x1
, dst_y1
,
512 gl_filter
, flip_x
, flip_y
);
517 blorp_batch_finish(&batch
);
520 static enum isl_format
521 isl_format_for_size(unsigned size_B
)
524 case 1: return ISL_FORMAT_R8_UINT
;
525 case 2: return ISL_FORMAT_R8G8_UINT
;
526 case 4: return ISL_FORMAT_R8G8B8A8_UINT
;
527 case 8: return ISL_FORMAT_R16G16B16A16_UINT
;
528 case 16: return ISL_FORMAT_R32G32B32A32_UINT
;
530 unreachable("Not a power-of-two format size");
535 do_buffer_copy(struct blorp_batch
*batch
,
536 struct anv_bo
*src
, uint64_t src_offset
,
537 struct anv_bo
*dst
, uint64_t dst_offset
,
538 int width
, int height
, int block_size
)
540 struct anv_device
*device
= batch
->blorp
->driver_ctx
;
542 /* The actual format we pick doesn't matter as blorp will throw it away.
543 * The only thing that actually matters is the size.
545 enum isl_format format
= isl_format_for_size(block_size
);
547 struct isl_surf surf
;
548 isl_surf_init(&device
->isl_dev
, &surf
,
549 .dim
= ISL_SURF_DIM_2D
,
557 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
|
558 ISL_SURF_USAGE_RENDER_TARGET_BIT
,
559 .tiling_flags
= ISL_TILING_LINEAR_BIT
);
560 assert(surf
.row_pitch
== width
* block_size
);
562 struct blorp_surf src_blorp_surf
= {
566 .offset
= src_offset
,
570 struct blorp_surf dst_blorp_surf
= {
574 .offset
= dst_offset
,
578 blorp_copy(batch
, &src_blorp_surf
, 0, 0, &dst_blorp_surf
, 0, 0,
579 0, 0, 0, 0, width
, height
);
583 * Returns the greatest common divisor of a and b that is a power of two.
585 static inline uint64_t
586 gcd_pow2_u64(uint64_t a
, uint64_t b
)
588 assert(a
> 0 || b
> 0);
590 unsigned a_log2
= ffsll(a
) - 1;
591 unsigned b_log2
= ffsll(b
) - 1;
593 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
594 * case, the MIN2() will take the other one. If both are 0 then we will
595 * hit the assert above.
597 return 1 << MIN2(a_log2
, b_log2
);
600 /* This is maximum possible width/height our HW can handle */
601 #define MAX_SURFACE_DIM (1ull << 14)
603 void anv_CmdCopyBuffer(
604 VkCommandBuffer commandBuffer
,
607 uint32_t regionCount
,
608 const VkBufferCopy
* pRegions
)
610 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
611 ANV_FROM_HANDLE(anv_buffer
, src_buffer
, srcBuffer
);
612 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
614 struct blorp_batch batch
;
615 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
617 for (unsigned r
= 0; r
< regionCount
; r
++) {
618 uint64_t src_offset
= src_buffer
->offset
+ pRegions
[r
].srcOffset
;
619 uint64_t dst_offset
= dst_buffer
->offset
+ pRegions
[r
].dstOffset
;
620 uint64_t copy_size
= pRegions
[r
].size
;
622 /* First, we compute the biggest format that can be used with the
623 * given offsets and size.
626 bs
= gcd_pow2_u64(bs
, src_offset
);
627 bs
= gcd_pow2_u64(bs
, dst_offset
);
628 bs
= gcd_pow2_u64(bs
, pRegions
[r
].size
);
630 /* First, we make a bunch of max-sized copies */
631 uint64_t max_copy_size
= MAX_SURFACE_DIM
* MAX_SURFACE_DIM
* bs
;
632 while (copy_size
>= max_copy_size
) {
633 do_buffer_copy(&batch
, src_buffer
->bo
, src_offset
,
634 dst_buffer
->bo
, dst_offset
,
635 MAX_SURFACE_DIM
, MAX_SURFACE_DIM
, bs
);
636 copy_size
-= max_copy_size
;
637 src_offset
+= max_copy_size
;
638 dst_offset
+= max_copy_size
;
641 /* Now make a max-width copy */
642 uint64_t height
= copy_size
/ (MAX_SURFACE_DIM
* bs
);
643 assert(height
< MAX_SURFACE_DIM
);
645 uint64_t rect_copy_size
= height
* MAX_SURFACE_DIM
* bs
;
646 do_buffer_copy(&batch
, src_buffer
->bo
, src_offset
,
647 dst_buffer
->bo
, dst_offset
,
648 MAX_SURFACE_DIM
, height
, bs
);
649 copy_size
-= rect_copy_size
;
650 src_offset
+= rect_copy_size
;
651 dst_offset
+= rect_copy_size
;
654 /* Finally, make a small copy to finish it off */
655 if (copy_size
!= 0) {
656 do_buffer_copy(&batch
, src_buffer
->bo
, src_offset
,
657 dst_buffer
->bo
, dst_offset
,
658 copy_size
/ bs
, 1, bs
);
662 blorp_batch_finish(&batch
);
665 void anv_CmdUpdateBuffer(
666 VkCommandBuffer commandBuffer
,
668 VkDeviceSize dstOffset
,
669 VkDeviceSize dataSize
,
672 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
673 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
675 struct blorp_batch batch
;
676 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
678 /* We can't quite grab a full block because the state stream needs a
679 * little data at the top to build its linked list.
681 const uint32_t max_update_size
=
682 cmd_buffer
->device
->dynamic_state_block_pool
.block_size
- 64;
684 assert(max_update_size
< MAX_SURFACE_DIM
* 4);
687 const uint32_t copy_size
= MIN2(dataSize
, max_update_size
);
689 struct anv_state tmp_data
=
690 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, copy_size
, 64);
692 memcpy(tmp_data
.map
, pData
, copy_size
);
695 bs
= gcd_pow2_u64(bs
, dstOffset
);
696 bs
= gcd_pow2_u64(bs
, copy_size
);
698 do_buffer_copy(&batch
,
699 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
701 dst_buffer
->bo
, dst_buffer
->offset
+ dstOffset
,
702 copy_size
/ bs
, 1, bs
);
704 dataSize
-= copy_size
;
705 dstOffset
+= copy_size
;
706 pData
= (void *)pData
+ copy_size
;
709 blorp_batch_finish(&batch
);
712 void anv_CmdFillBuffer(
713 VkCommandBuffer commandBuffer
,
715 VkDeviceSize dstOffset
,
716 VkDeviceSize fillSize
,
719 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
720 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
721 struct blorp_surf surf
;
722 struct isl_surf isl_surf
;
724 struct blorp_batch batch
;
725 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
727 fillSize
= anv_buffer_get_range(dst_buffer
, dstOffset
, fillSize
);
729 /* From the Vulkan spec:
731 * "size is the number of bytes to fill, and must be either a multiple
732 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
733 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
734 * buffer is not a multiple of 4, then the nearest smaller multiple is
739 /* First, we compute the biggest format that can be used with the
740 * given offsets and size.
743 bs
= gcd_pow2_u64(bs
, dstOffset
);
744 bs
= gcd_pow2_u64(bs
, fillSize
);
745 enum isl_format isl_format
= isl_format_for_size(bs
);
747 union isl_color_value color
= {
748 .u32
= { data
, data
, data
, data
},
751 const uint64_t max_fill_size
= MAX_SURFACE_DIM
* MAX_SURFACE_DIM
* bs
;
752 while (fillSize
>= max_fill_size
) {
753 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
754 dst_buffer
, dstOffset
,
755 MAX_SURFACE_DIM
, MAX_SURFACE_DIM
,
756 MAX_SURFACE_DIM
* bs
, isl_format
,
759 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
760 0, 0, 1, 0, 0, MAX_SURFACE_DIM
, MAX_SURFACE_DIM
,
762 fillSize
-= max_fill_size
;
763 dstOffset
+= max_fill_size
;
766 uint64_t height
= fillSize
/ (MAX_SURFACE_DIM
* bs
);
767 assert(height
< MAX_SURFACE_DIM
);
769 const uint64_t rect_fill_size
= height
* MAX_SURFACE_DIM
* bs
;
770 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
771 dst_buffer
, dstOffset
,
772 MAX_SURFACE_DIM
, height
,
773 MAX_SURFACE_DIM
* bs
, isl_format
,
776 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
777 0, 0, 1, 0, 0, MAX_SURFACE_DIM
, height
,
779 fillSize
-= rect_fill_size
;
780 dstOffset
+= rect_fill_size
;
784 const uint32_t width
= fillSize
/ bs
;
785 get_blorp_surf_for_anv_buffer(cmd_buffer
->device
,
786 dst_buffer
, dstOffset
,
788 width
* bs
, isl_format
,
791 blorp_clear(&batch
, &surf
, isl_format
, ISL_SWIZZLE_IDENTITY
,
792 0, 0, 1, 0, 0, width
, 1,
796 blorp_batch_finish(&batch
);
799 void anv_CmdClearColorImage(
800 VkCommandBuffer commandBuffer
,
802 VkImageLayout imageLayout
,
803 const VkClearColorValue
* pColor
,
805 const VkImageSubresourceRange
* pRanges
)
807 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
808 ANV_FROM_HANDLE(anv_image
, image
, _image
);
810 static const bool color_write_disable
[4] = { false, false, false, false };
812 struct blorp_batch batch
;
813 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
815 struct blorp_surf surf
;
816 get_blorp_surf_for_anv_image(image
, VK_IMAGE_ASPECT_COLOR_BIT
,
817 image
->aux_usage
, &surf
);
819 for (unsigned r
= 0; r
< rangeCount
; r
++) {
820 if (pRanges
[r
].aspectMask
== 0)
823 assert(pRanges
[r
].aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
);
825 struct anv_format src_format
=
826 anv_get_format(&cmd_buffer
->device
->info
, image
->vk_format
,
827 VK_IMAGE_ASPECT_COLOR_BIT
, image
->tiling
);
829 unsigned base_layer
= pRanges
[r
].baseArrayLayer
;
830 unsigned layer_count
= pRanges
[r
].layerCount
;
832 for (unsigned i
= 0; i
< anv_get_levelCount(image
, &pRanges
[r
]); i
++) {
833 const unsigned level
= pRanges
[r
].baseMipLevel
+ i
;
834 const unsigned level_width
= anv_minify(image
->extent
.width
, level
);
835 const unsigned level_height
= anv_minify(image
->extent
.height
, level
);
837 if (image
->type
== VK_IMAGE_TYPE_3D
) {
839 layer_count
= anv_minify(image
->extent
.depth
, level
);
842 blorp_clear(&batch
, &surf
,
843 src_format
.isl_format
, src_format
.swizzle
,
844 level
, base_layer
, layer_count
,
845 0, 0, level_width
, level_height
,
846 vk_to_isl_color(*pColor
), color_write_disable
);
850 blorp_batch_finish(&batch
);
853 void anv_CmdClearDepthStencilImage(
854 VkCommandBuffer commandBuffer
,
856 VkImageLayout imageLayout
,
857 const VkClearDepthStencilValue
* pDepthStencil
,
859 const VkImageSubresourceRange
* pRanges
)
861 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
862 ANV_FROM_HANDLE(anv_image
, image
, image_h
);
864 struct blorp_batch batch
;
865 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
867 struct blorp_surf depth
, stencil
;
868 if (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
) {
869 get_blorp_surf_for_anv_image(image
, VK_IMAGE_ASPECT_DEPTH_BIT
,
870 ISL_AUX_USAGE_NONE
, &depth
);
872 memset(&depth
, 0, sizeof(depth
));
875 if (image
->aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
) {
876 get_blorp_surf_for_anv_image(image
, VK_IMAGE_ASPECT_STENCIL_BIT
,
877 ISL_AUX_USAGE_NONE
, &stencil
);
879 memset(&stencil
, 0, sizeof(stencil
));
882 for (unsigned r
= 0; r
< rangeCount
; r
++) {
883 if (pRanges
[r
].aspectMask
== 0)
886 bool clear_depth
= pRanges
[r
].aspectMask
& VK_IMAGE_ASPECT_DEPTH_BIT
;
887 bool clear_stencil
= pRanges
[r
].aspectMask
& VK_IMAGE_ASPECT_STENCIL_BIT
;
889 unsigned base_layer
= pRanges
[r
].baseArrayLayer
;
890 unsigned layer_count
= pRanges
[r
].layerCount
;
892 for (unsigned i
= 0; i
< anv_get_levelCount(image
, &pRanges
[r
]); i
++) {
893 const unsigned level
= pRanges
[r
].baseMipLevel
+ i
;
894 const unsigned level_width
= anv_minify(image
->extent
.width
, level
);
895 const unsigned level_height
= anv_minify(image
->extent
.height
, level
);
897 if (image
->type
== VK_IMAGE_TYPE_3D
)
898 layer_count
= anv_minify(image
->extent
.depth
, level
);
900 blorp_clear_depth_stencil(&batch
, &depth
, &stencil
,
901 level
, base_layer
, layer_count
,
902 0, 0, level_width
, level_height
,
903 clear_depth
, pDepthStencil
->depth
,
904 clear_stencil
? 0xff : 0,
905 pDepthStencil
->stencil
);
909 blorp_batch_finish(&batch
);
913 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
914 uint32_t num_entries
,
915 uint32_t *state_offset
)
917 struct anv_state bt_state
=
918 anv_cmd_buffer_alloc_binding_table(cmd_buffer
, num_entries
,
920 if (bt_state
.map
== NULL
) {
921 /* We ran out of space. Grab a new binding table block. */
922 MAYBE_UNUSED VkResult result
=
923 anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
924 assert(result
== VK_SUCCESS
);
926 /* Re-emit state base addresses so we get the new surface state base
927 * address before we start emitting binding tables etc.
929 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
931 bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
, num_entries
,
933 assert(bt_state
.map
!= NULL
);
940 binding_table_for_surface_state(struct anv_cmd_buffer
*cmd_buffer
,
941 struct anv_state surface_state
)
943 uint32_t state_offset
;
944 struct anv_state bt_state
=
945 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer
, 1, &state_offset
);
947 uint32_t *bt_map
= bt_state
.map
;
948 bt_map
[0] = surface_state
.offset
+ state_offset
;
950 return bt_state
.offset
;
954 clear_color_attachment(struct anv_cmd_buffer
*cmd_buffer
,
955 struct blorp_batch
*batch
,
956 const VkClearAttachment
*attachment
,
957 uint32_t rectCount
, const VkClearRect
*pRects
)
959 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
960 const uint32_t color_att
= attachment
->colorAttachment
;
961 const uint32_t att_idx
= subpass
->color_attachments
[color_att
].attachment
;
963 if (att_idx
== VK_ATTACHMENT_UNUSED
)
966 struct anv_render_pass_attachment
*pass_att
=
967 &cmd_buffer
->state
.pass
->attachments
[att_idx
];
968 struct anv_attachment_state
*att_state
=
969 &cmd_buffer
->state
.attachments
[att_idx
];
971 uint32_t binding_table
=
972 binding_table_for_surface_state(cmd_buffer
, att_state
->color_rt_state
);
974 union isl_color_value clear_color
=
975 vk_to_isl_color(attachment
->clearValue
.color
);
977 for (uint32_t r
= 0; r
< rectCount
; ++r
) {
978 const VkOffset2D offset
= pRects
[r
].rect
.offset
;
979 const VkExtent2D extent
= pRects
[r
].rect
.extent
;
980 blorp_clear_attachments(batch
, binding_table
,
981 ISL_FORMAT_UNSUPPORTED
, pass_att
->samples
,
982 pRects
[r
].baseArrayLayer
,
983 pRects
[r
].layerCount
,
985 offset
.x
+ extent
.width
, offset
.y
+ extent
.height
,
986 true, clear_color
, false, 0.0f
, 0, 0);
991 clear_depth_stencil_attachment(struct anv_cmd_buffer
*cmd_buffer
,
992 struct blorp_batch
*batch
,
993 const VkClearAttachment
*attachment
,
994 uint32_t rectCount
, const VkClearRect
*pRects
)
996 static const union isl_color_value color_value
= { .u32
= { 0, } };
997 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
998 const uint32_t att_idx
= subpass
->depth_stencil_attachment
.attachment
;
1000 if (att_idx
== VK_ATTACHMENT_UNUSED
)
1003 struct anv_render_pass_attachment
*pass_att
=
1004 &cmd_buffer
->state
.pass
->attachments
[att_idx
];
1006 bool clear_depth
= attachment
->aspectMask
& VK_IMAGE_ASPECT_DEPTH_BIT
;
1007 bool clear_stencil
= attachment
->aspectMask
& VK_IMAGE_ASPECT_STENCIL_BIT
;
1009 enum isl_format depth_format
= ISL_FORMAT_UNSUPPORTED
;
1011 depth_format
= anv_get_isl_format(&cmd_buffer
->device
->info
,
1013 VK_IMAGE_ASPECT_DEPTH_BIT
,
1014 VK_IMAGE_TILING_OPTIMAL
);
1017 uint32_t binding_table
=
1018 binding_table_for_surface_state(cmd_buffer
,
1019 cmd_buffer
->state
.null_surface_state
);
1021 for (uint32_t r
= 0; r
< rectCount
; ++r
) {
1022 const VkOffset2D offset
= pRects
[r
].rect
.offset
;
1023 const VkExtent2D extent
= pRects
[r
].rect
.extent
;
1024 VkClearDepthStencilValue value
= attachment
->clearValue
.depthStencil
;
1025 blorp_clear_attachments(batch
, binding_table
,
1026 depth_format
, pass_att
->samples
,
1027 pRects
[r
].baseArrayLayer
,
1028 pRects
[r
].layerCount
,
1030 offset
.x
+ extent
.width
, offset
.y
+ extent
.height
,
1032 clear_depth
, value
.depth
,
1033 clear_stencil
? 0xff : 0, value
.stencil
);
1037 void anv_CmdClearAttachments(
1038 VkCommandBuffer commandBuffer
,
1039 uint32_t attachmentCount
,
1040 const VkClearAttachment
* pAttachments
,
1042 const VkClearRect
* pRects
)
1044 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1046 /* Because this gets called within a render pass, we tell blorp not to
1047 * trash our depth and stencil buffers.
1049 struct blorp_batch batch
;
1050 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
,
1051 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL
);
1053 for (uint32_t a
= 0; a
< attachmentCount
; ++a
) {
1054 if (pAttachments
[a
].aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
) {
1055 clear_color_attachment(cmd_buffer
, &batch
,
1059 clear_depth_stencil_attachment(cmd_buffer
, &batch
,
1065 blorp_batch_finish(&batch
);
1068 enum subpass_stage
{
1071 SUBPASS_STAGE_RESOLVE
,
1075 attachment_needs_flush(struct anv_cmd_buffer
*cmd_buffer
,
1076 struct anv_render_pass_attachment
*att
,
1077 enum subpass_stage stage
)
1079 struct anv_render_pass
*pass
= cmd_buffer
->state
.pass
;
1080 const uint32_t subpass_idx
= anv_get_subpass_id(&cmd_buffer
->state
);
1082 /* We handle this subpass specially based on the current stage */
1083 enum anv_subpass_usage usage
= att
->subpass_usage
[subpass_idx
];
1085 case SUBPASS_STAGE_LOAD
:
1086 if (usage
& (ANV_SUBPASS_USAGE_INPUT
| ANV_SUBPASS_USAGE_RESOLVE_SRC
))
1090 case SUBPASS_STAGE_DRAW
:
1091 if (usage
& ANV_SUBPASS_USAGE_RESOLVE_SRC
)
1099 for (uint32_t s
= subpass_idx
+ 1; s
< pass
->subpass_count
; s
++) {
1100 usage
= att
->subpass_usage
[s
];
1102 /* If this attachment is going to be used as an input in this or any
1103 * future subpass, then we need to flush its cache and invalidate the
1106 if (att
->subpass_usage
[s
] & ANV_SUBPASS_USAGE_INPUT
)
1109 if (usage
& (ANV_SUBPASS_USAGE_DRAW
| ANV_SUBPASS_USAGE_RESOLVE_DST
)) {
1110 /* We found another subpass that draws to this attachment. We'll
1111 * wait to resolve until then.
1121 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer
*cmd_buffer
,
1122 enum subpass_stage stage
)
1124 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1125 struct anv_render_pass
*pass
= cmd_buffer
->state
.pass
;
1127 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
1128 uint32_t att
= subpass
->color_attachments
[i
].attachment
;
1129 assert(att
< pass
->attachment_count
);
1130 if (attachment_needs_flush(cmd_buffer
, &pass
->attachments
[att
], stage
)) {
1131 cmd_buffer
->state
.pending_pipe_bits
|=
1132 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
|
1133 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
1137 if (subpass
->depth_stencil_attachment
.attachment
!= VK_ATTACHMENT_UNUSED
) {
1138 uint32_t att
= subpass
->depth_stencil_attachment
.attachment
;
1139 assert(att
< pass
->attachment_count
);
1140 if (attachment_needs_flush(cmd_buffer
, &pass
->attachments
[att
], stage
)) {
1141 cmd_buffer
->state
.pending_pipe_bits
|=
1142 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
|
1143 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
1149 subpass_needs_clear(const struct anv_cmd_buffer
*cmd_buffer
)
1151 const struct anv_cmd_state
*cmd_state
= &cmd_buffer
->state
;
1152 uint32_t ds
= cmd_state
->subpass
->depth_stencil_attachment
.attachment
;
1154 for (uint32_t i
= 0; i
< cmd_state
->subpass
->color_count
; ++i
) {
1155 uint32_t a
= cmd_state
->subpass
->color_attachments
[i
].attachment
;
1156 if (cmd_state
->attachments
[a
].pending_clear_aspects
) {
1161 if (ds
!= VK_ATTACHMENT_UNUSED
&&
1162 cmd_state
->attachments
[ds
].pending_clear_aspects
) {
1170 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer
*cmd_buffer
)
1172 const struct anv_cmd_state
*cmd_state
= &cmd_buffer
->state
;
1173 const VkRect2D render_area
= cmd_buffer
->state
.render_area
;
1176 if (!subpass_needs_clear(cmd_buffer
))
1179 /* Because this gets called within a render pass, we tell blorp not to
1180 * trash our depth and stencil buffers.
1182 struct blorp_batch batch
;
1183 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
,
1184 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL
);
1186 VkClearRect clear_rect
= {
1187 .rect
= cmd_buffer
->state
.render_area
,
1188 .baseArrayLayer
= 0,
1189 .layerCount
= cmd_buffer
->state
.framebuffer
->layers
,
1192 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1193 for (uint32_t i
= 0; i
< cmd_state
->subpass
->color_count
; ++i
) {
1194 const uint32_t a
= cmd_state
->subpass
->color_attachments
[i
].attachment
;
1195 struct anv_attachment_state
*att_state
= &cmd_state
->attachments
[a
];
1197 if (!att_state
->pending_clear_aspects
)
1200 assert(att_state
->pending_clear_aspects
== VK_IMAGE_ASPECT_COLOR_BIT
);
1202 struct anv_image_view
*iview
= fb
->attachments
[a
];
1203 const struct anv_image
*image
= iview
->image
;
1204 struct blorp_surf surf
;
1205 get_blorp_surf_for_anv_image(image
, VK_IMAGE_ASPECT_COLOR_BIT
,
1206 att_state
->aux_usage
, &surf
);
1208 if (att_state
->fast_clear
) {
1209 surf
.clear_color
= vk_to_isl_color(att_state
->clear_value
.color
);
1211 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1213 * "After Render target fast clear, pipe-control with color cache
1214 * write-flush must be issued before sending any DRAW commands on
1215 * that render target."
1217 * This comment is a bit cryptic and doesn't really tell you what's
1218 * going or what's really needed. It appears that fast clear ops are
1219 * not properly synchronized with other drawing. This means that we
1220 * cannot have a fast clear operation in the pipe at the same time as
1221 * other regular drawing operations. We need to use a PIPE_CONTROL
1222 * to ensure that the contents of the previous draw hit the render
1223 * target before we resolve and then use a second PIPE_CONTROL after
1224 * the resolve to ensure that it is completed before any additional
1227 cmd_buffer
->state
.pending_pipe_bits
|=
1228 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1230 blorp_fast_clear(&batch
, &surf
, iview
->isl
.format
,
1231 iview
->isl
.base_level
,
1232 iview
->isl
.base_array_layer
, fb
->layers
,
1233 render_area
.offset
.x
, render_area
.offset
.y
,
1234 render_area
.offset
.x
+ render_area
.extent
.width
,
1235 render_area
.offset
.y
+ render_area
.extent
.height
);
1237 cmd_buffer
->state
.pending_pipe_bits
|=
1238 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1240 blorp_clear(&batch
, &surf
, iview
->isl
.format
,
1241 anv_swizzle_for_render(iview
->isl
.swizzle
),
1242 iview
->isl
.base_level
,
1243 iview
->isl
.base_array_layer
, fb
->layers
,
1244 render_area
.offset
.x
, render_area
.offset
.y
,
1245 render_area
.offset
.x
+ render_area
.extent
.width
,
1246 render_area
.offset
.y
+ render_area
.extent
.height
,
1247 vk_to_isl_color(att_state
->clear_value
.color
), NULL
);
1250 att_state
->pending_clear_aspects
= 0;
1253 const uint32_t ds
= cmd_state
->subpass
->depth_stencil_attachment
.attachment
;
1255 if (ds
!= VK_ATTACHMENT_UNUSED
&&
1256 cmd_state
->attachments
[ds
].pending_clear_aspects
) {
1258 VkClearAttachment clear_att
= {
1259 .aspectMask
= cmd_state
->attachments
[ds
].pending_clear_aspects
,
1260 .clearValue
= cmd_state
->attachments
[ds
].clear_value
,
1264 const uint8_t gen
= cmd_buffer
->device
->info
.gen
;
1265 bool clear_with_hiz
= gen
>= 8 && cmd_state
->attachments
[ds
].aux_usage
==
1267 const struct anv_image_view
*iview
= fb
->attachments
[ds
];
1269 if (clear_with_hiz
) {
1270 const bool clear_depth
= clear_att
.aspectMask
&
1271 VK_IMAGE_ASPECT_DEPTH_BIT
;
1272 const bool clear_stencil
= clear_att
.aspectMask
&
1273 VK_IMAGE_ASPECT_STENCIL_BIT
;
1275 /* Check against restrictions for depth buffer clearing. A great GPU
1276 * performance benefit isn't expected when using the HZ sequence for
1277 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1278 * a stencil clear in addition to using the BLORP-fallback for depth.
1281 if (!blorp_can_hiz_clear_depth(gen
, iview
->isl
.format
,
1282 iview
->image
->samples
,
1283 render_area
.offset
.x
,
1284 render_area
.offset
.y
,
1285 render_area
.offset
.x
+
1286 render_area
.extent
.width
,
1287 render_area
.offset
.y
+
1288 render_area
.extent
.height
)) {
1289 clear_with_hiz
= false;
1290 } else if (clear_att
.clearValue
.depthStencil
.depth
!=
1292 /* Don't enable fast depth clears for any color not equal to
1295 clear_with_hiz
= false;
1296 } else if (gen
== 8 &&
1297 anv_can_sample_with_hiz(&cmd_buffer
->device
->info
,
1299 iview
->image
->samples
)) {
1300 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1301 * fast-cleared portion of a HiZ buffer. Testing has revealed
1302 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1303 * not support this feature at all.
1305 clear_with_hiz
= false;
1309 if (clear_with_hiz
) {
1310 blorp_gen8_hiz_clear_attachments(&batch
, iview
->image
->samples
,
1311 render_area
.offset
.x
,
1312 render_area
.offset
.y
,
1313 render_area
.offset
.x
+
1314 render_area
.extent
.width
,
1315 render_area
.offset
.y
+
1316 render_area
.extent
.height
,
1317 clear_depth
, clear_stencil
,
1318 clear_att
.clearValue
.
1319 depthStencil
.stencil
);
1323 if (!clear_with_hiz
) {
1324 clear_depth_stencil_attachment(cmd_buffer
, &batch
,
1325 &clear_att
, 1, &clear_rect
);
1328 cmd_state
->attachments
[ds
].pending_clear_aspects
= 0;
1331 blorp_batch_finish(&batch
);
1333 anv_cmd_buffer_flush_attachments(cmd_buffer
, SUBPASS_STAGE_LOAD
);
1337 resolve_image(struct blorp_batch
*batch
,
1338 const struct anv_image
*src_image
,
1339 uint32_t src_level
, uint32_t src_layer
,
1340 const struct anv_image
*dst_image
,
1341 uint32_t dst_level
, uint32_t dst_layer
,
1342 VkImageAspectFlags aspect_mask
,
1343 uint32_t src_x
, uint32_t src_y
, uint32_t dst_x
, uint32_t dst_y
,
1344 uint32_t width
, uint32_t height
)
1346 assert(src_image
->type
== VK_IMAGE_TYPE_2D
);
1347 assert(src_image
->samples
> 1);
1348 assert(dst_image
->type
== VK_IMAGE_TYPE_2D
);
1349 assert(dst_image
->samples
== 1);
1352 for_each_bit(a
, aspect_mask
) {
1353 VkImageAspectFlagBits aspect
= 1 << a
;
1355 struct blorp_surf src_surf
, dst_surf
;
1356 get_blorp_surf_for_anv_image(src_image
, aspect
,
1357 src_image
->aux_usage
, &src_surf
);
1358 get_blorp_surf_for_anv_image(dst_image
, aspect
,
1359 dst_image
->aux_usage
, &dst_surf
);
1362 &src_surf
, src_level
, src_layer
,
1363 ISL_FORMAT_UNSUPPORTED
, ISL_SWIZZLE_IDENTITY
,
1364 &dst_surf
, dst_level
, dst_layer
,
1365 ISL_FORMAT_UNSUPPORTED
, ISL_SWIZZLE_IDENTITY
,
1366 src_x
, src_y
, src_x
+ width
, src_y
+ height
,
1367 dst_x
, dst_y
, dst_x
+ width
, dst_y
+ height
,
1368 0x2600 /* GL_NEAREST */, false, false);
1372 void anv_CmdResolveImage(
1373 VkCommandBuffer commandBuffer
,
1375 VkImageLayout srcImageLayout
,
1377 VkImageLayout dstImageLayout
,
1378 uint32_t regionCount
,
1379 const VkImageResolve
* pRegions
)
1381 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1382 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
1383 ANV_FROM_HANDLE(anv_image
, dst_image
, dstImage
);
1385 struct blorp_batch batch
;
1386 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1388 for (uint32_t r
= 0; r
< regionCount
; r
++) {
1389 assert(pRegions
[r
].srcSubresource
.aspectMask
==
1390 pRegions
[r
].dstSubresource
.aspectMask
);
1391 assert(pRegions
[r
].srcSubresource
.layerCount
==
1392 pRegions
[r
].dstSubresource
.layerCount
);
1394 const uint32_t layer_count
= pRegions
[r
].dstSubresource
.layerCount
;
1396 for (uint32_t layer
= 0; layer
< layer_count
; layer
++) {
1397 resolve_image(&batch
,
1398 src_image
, pRegions
[r
].srcSubresource
.mipLevel
,
1399 pRegions
[r
].srcSubresource
.baseArrayLayer
+ layer
,
1400 dst_image
, pRegions
[r
].dstSubresource
.mipLevel
,
1401 pRegions
[r
].dstSubresource
.baseArrayLayer
+ layer
,
1402 pRegions
[r
].dstSubresource
.aspectMask
,
1403 pRegions
[r
].srcOffset
.x
, pRegions
[r
].srcOffset
.y
,
1404 pRegions
[r
].dstOffset
.x
, pRegions
[r
].dstOffset
.y
,
1405 pRegions
[r
].extent
.width
, pRegions
[r
].extent
.height
);
1409 blorp_batch_finish(&batch
);
1413 ccs_resolve_attachment(struct anv_cmd_buffer
*cmd_buffer
,
1414 struct blorp_batch
*batch
,
1417 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1418 struct anv_attachment_state
*att_state
=
1419 &cmd_buffer
->state
.attachments
[att
];
1421 if (att_state
->aux_usage
== ISL_AUX_USAGE_NONE
||
1422 att_state
->aux_usage
== ISL_AUX_USAGE_MCS
)
1423 return; /* Nothing to resolve */
1425 assert(att_state
->aux_usage
== ISL_AUX_USAGE_CCS_E
||
1426 att_state
->aux_usage
== ISL_AUX_USAGE_CCS_D
);
1428 struct anv_render_pass
*pass
= cmd_buffer
->state
.pass
;
1429 const uint32_t subpass_idx
= anv_get_subpass_id(&cmd_buffer
->state
);
1431 /* Scan forward to see what all ways this attachment will be used.
1432 * Ideally, we would like to resolve in the same subpass as the last write
1433 * of a particular attachment. That way we only resolve once but it's
1434 * still hot in the cache.
1436 bool found_draw
= false;
1437 enum anv_subpass_usage usage
= 0;
1438 for (uint32_t s
= subpass_idx
+ 1; s
< pass
->subpass_count
; s
++) {
1439 usage
|= pass
->attachments
[att
].subpass_usage
[s
];
1441 if (usage
& (ANV_SUBPASS_USAGE_DRAW
| ANV_SUBPASS_USAGE_RESOLVE_DST
)) {
1442 /* We found another subpass that draws to this attachment. We'll
1443 * wait to resolve until then.
1450 struct anv_image_view
*iview
= fb
->attachments
[att
];
1451 const struct anv_image
*image
= iview
->image
;
1452 assert(image
->aspects
== VK_IMAGE_ASPECT_COLOR_BIT
);
1454 enum blorp_fast_clear_op resolve_op
= BLORP_FAST_CLEAR_OP_NONE
;
1456 /* This is the last subpass that writes to this attachment so we need to
1457 * resolve here. Ideally, we would like to only resolve if the storeOp
1458 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1459 * that the CCS bits are set to "resolved" because there may be copy or
1460 * blit operations (which may ignore CCS) between now and the next time
1461 * we render and we need to ensure that anything they write will be
1462 * respected in the next render. Unfortunately, the hardware does not
1463 * provide us with any sort of "invalidate" pass that sets the CCS to
1464 * "resolved" without writing to the render target.
1466 if (iview
->image
->aux_usage
!= ISL_AUX_USAGE_CCS_E
) {
1467 /* The image destination surface doesn't support compression outside
1468 * the render pass. We need a full resolve.
1470 resolve_op
= BLORP_FAST_CLEAR_OP_RESOLVE_FULL
;
1471 } else if (att_state
->fast_clear
) {
1472 /* We don't know what to do with clear colors outside the render
1473 * pass. We need a partial resolve. Only transparent black is
1474 * built into the surface state object and thus no resolve is
1475 * required for this case.
1477 if (att_state
->clear_value
.color
.uint32
[0] ||
1478 att_state
->clear_value
.color
.uint32
[1] ||
1479 att_state
->clear_value
.color
.uint32
[2] ||
1480 att_state
->clear_value
.color
.uint32
[3])
1481 resolve_op
= BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL
;
1483 /* The image "natively" supports all the compression we care about
1484 * and we don't need to resolve at all. If this is the case, we also
1485 * don't need to resolve for any of the input attachment cases below.
1488 } else if (usage
& ANV_SUBPASS_USAGE_INPUT
) {
1489 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1490 * can frequently sample from them with no resolves at all.
1492 if (att_state
->aux_usage
!= att_state
->input_aux_usage
) {
1493 assert(att_state
->input_aux_usage
== ISL_AUX_USAGE_NONE
);
1494 resolve_op
= BLORP_FAST_CLEAR_OP_RESOLVE_FULL
;
1495 } else if (!att_state
->clear_color_is_zero_one
) {
1496 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1498 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1499 * is fast cleared with non-0/1 clear value, this RT must be
1500 * partially resolved (refer to Partial Resolve operation) before
1501 * binding this surface to Sampler."
1503 resolve_op
= BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL
;
1507 if (resolve_op
== BLORP_FAST_CLEAR_OP_NONE
)
1510 struct blorp_surf surf
;
1511 get_blorp_surf_for_anv_image(image
, VK_IMAGE_ASPECT_COLOR_BIT
,
1512 att_state
->aux_usage
, &surf
);
1513 if (att_state
->fast_clear
)
1514 surf
.clear_color
= vk_to_isl_color(att_state
->clear_value
.color
);
1516 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1518 * "When performing a render target resolve, PIPE_CONTROL with end of
1519 * pipe sync must be delivered."
1521 * This comment is a bit cryptic and doesn't really tell you what's going
1522 * or what's really needed. It appears that fast clear ops are not
1523 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1524 * to ensure that the contents of the previous draw hit the render target
1525 * before we resolve and then use a second PIPE_CONTROL after the resolve
1526 * to ensure that it is completed before any additional drawing occurs.
1528 cmd_buffer
->state
.pending_pipe_bits
|=
1529 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1531 for (uint32_t layer
= 0; layer
< fb
->layers
; layer
++) {
1532 blorp_ccs_resolve(batch
, &surf
,
1533 iview
->isl
.base_level
,
1534 iview
->isl
.base_array_layer
+ layer
,
1535 iview
->isl
.format
, resolve_op
);
1538 cmd_buffer
->state
.pending_pipe_bits
|=
1539 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
| ANV_PIPE_CS_STALL_BIT
;
1541 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1542 att_state
->fast_clear
= false;
1543 if (att_state
->aux_usage
== ISL_AUX_USAGE_CCS_D
)
1544 att_state
->aux_usage
= ISL_AUX_USAGE_NONE
;
1548 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer
*cmd_buffer
)
1550 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1551 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1554 struct blorp_batch batch
;
1555 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1557 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
1558 ccs_resolve_attachment(cmd_buffer
, &batch
,
1559 subpass
->color_attachments
[i
].attachment
);
1562 anv_cmd_buffer_flush_attachments(cmd_buffer
, SUBPASS_STAGE_DRAW
);
1564 if (subpass
->has_resolve
) {
1565 for (uint32_t i
= 0; i
< subpass
->color_count
; ++i
) {
1566 uint32_t src_att
= subpass
->color_attachments
[i
].attachment
;
1567 uint32_t dst_att
= subpass
->resolve_attachments
[i
].attachment
;
1569 if (dst_att
== VK_ATTACHMENT_UNUSED
)
1572 if (cmd_buffer
->state
.attachments
[dst_att
].pending_clear_aspects
) {
1573 /* From the Vulkan 1.0 spec:
1575 * If the first use of an attachment in a render pass is as a
1576 * resolve attachment, then the loadOp is effectively ignored
1577 * as the resolve is guaranteed to overwrite all pixels in the
1580 cmd_buffer
->state
.attachments
[dst_att
].pending_clear_aspects
= 0;
1583 struct anv_image_view
*src_iview
= fb
->attachments
[src_att
];
1584 struct anv_image_view
*dst_iview
= fb
->attachments
[dst_att
];
1586 const VkRect2D render_area
= cmd_buffer
->state
.render_area
;
1588 assert(src_iview
->aspect_mask
== dst_iview
->aspect_mask
);
1589 resolve_image(&batch
, src_iview
->image
,
1590 src_iview
->isl
.base_level
,
1591 src_iview
->isl
.base_array_layer
,
1593 dst_iview
->isl
.base_level
,
1594 dst_iview
->isl
.base_array_layer
,
1595 src_iview
->aspect_mask
,
1596 render_area
.offset
.x
, render_area
.offset
.y
,
1597 render_area
.offset
.x
, render_area
.offset
.y
,
1598 render_area
.extent
.width
, render_area
.extent
.height
);
1600 ccs_resolve_attachment(cmd_buffer
, &batch
, dst_att
);
1603 anv_cmd_buffer_flush_attachments(cmd_buffer
, SUBPASS_STAGE_RESOLVE
);
1606 blorp_batch_finish(&batch
);
1610 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer
*cmd_buffer
,
1611 const struct anv_image
*image
,
1612 enum blorp_hiz_op op
)
1616 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1617 * don't perform such a resolve on gens that don't support it.
1619 if (cmd_buffer
->device
->info
.gen
< 8 ||
1620 image
->aux_usage
!= ISL_AUX_USAGE_HIZ
)
1623 assert(op
== BLORP_HIZ_OP_HIZ_RESOLVE
||
1624 op
== BLORP_HIZ_OP_DEPTH_RESOLVE
);
1626 struct blorp_batch batch
;
1627 blorp_batch_init(&cmd_buffer
->device
->blorp
, &batch
, cmd_buffer
, 0);
1629 struct blorp_surf surf
;
1630 get_blorp_surf_for_anv_image(image
, VK_IMAGE_ASPECT_DEPTH_BIT
,
1631 ISL_AUX_USAGE_NONE
, &surf
);
1633 /* Manually add the aux HiZ surf */
1634 surf
.aux_surf
= &image
->aux_surface
.isl
,
1635 surf
.aux_addr
= (struct blorp_address
) {
1636 .buffer
= image
->bo
,
1637 .offset
= image
->offset
+ image
->aux_surface
.offset
,
1639 surf
.aux_usage
= ISL_AUX_USAGE_HIZ
;
1641 surf
.clear_color
.u32
[0] = (uint32_t) ANV_HZ_FC_VAL
;
1643 blorp_gen6_hiz_op(&batch
, &surf
, 0, 0, op
);
1644 blorp_batch_finish(&batch
);