2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_debug.h"
29 #include "radv_private.h"
30 #include "vk_format.h"
32 #include "radv_radeon_winsys.h"
35 #include "util/debug.h"
36 #include "util/u_atomic.h"
38 radv_choose_tiling(struct radv_device
*device
,
39 const struct radv_image_create_info
*create_info
)
41 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
43 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
) {
44 assert(pCreateInfo
->samples
<= 1);
45 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
48 if (!vk_format_is_compressed(pCreateInfo
->format
) &&
49 !vk_format_is_depth_or_stencil(pCreateInfo
->format
)
50 && device
->physical_device
->rad_info
.chip_class
<= VI
) {
51 /* this causes hangs in some VK CTS tests on GFX9. */
52 /* Textures with a very small height are recommended to be linear. */
53 if (pCreateInfo
->imageType
== VK_IMAGE_TYPE_1D
||
54 /* Only very thin and long 2D textures should benefit from
56 (pCreateInfo
->extent
.width
> 8 && pCreateInfo
->extent
.height
<= 2))
57 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
60 /* MSAA resources must be 2D tiled. */
61 if (pCreateInfo
->samples
> 1)
62 return RADEON_SURF_MODE_2D
;
64 return RADEON_SURF_MODE_2D
;
68 radv_use_tc_compat_htile_for_image(struct radv_device
*device
,
69 const VkImageCreateInfo
*pCreateInfo
)
71 /* TC-compat HTILE is only available for GFX8+. */
72 if (device
->physical_device
->rad_info
.chip_class
< VI
)
75 if (pCreateInfo
->usage
& VK_IMAGE_USAGE_STORAGE_BIT
)
78 if (pCreateInfo
->flags
& (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
|
79 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR
))
82 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
)
85 if (pCreateInfo
->mipLevels
> 1)
88 /* FIXME: for some reason TC compat with 2/4/8 samples breaks some cts
89 * tests - disable for now */
90 if (pCreateInfo
->samples
>= 2 &&
91 pCreateInfo
->format
== VK_FORMAT_D32_SFLOAT_S8_UINT
)
94 /* GFX9 supports both 32-bit and 16-bit depth surfaces, while GFX8 only
95 * supports 32-bit. Though, it's possible to enable TC-compat for
96 * 16-bit depth surfaces if no Z planes are compressed.
98 if (pCreateInfo
->format
!= VK_FORMAT_D32_SFLOAT_S8_UINT
&&
99 pCreateInfo
->format
!= VK_FORMAT_D32_SFLOAT
&&
100 pCreateInfo
->format
!= VK_FORMAT_D16_UNORM
)
107 radv_use_dcc_for_image(struct radv_device
*device
,
108 const struct radv_image_create_info
*create_info
,
109 const VkImageCreateInfo
*pCreateInfo
)
111 bool dcc_compatible_formats
;
114 /* DCC (Delta Color Compression) is only available for GFX8+. */
115 if (device
->physical_device
->rad_info
.chip_class
< VI
)
118 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_DCC
)
121 /* TODO: Enable DCC for storage images. */
122 if ((pCreateInfo
->usage
& VK_IMAGE_USAGE_STORAGE_BIT
) ||
123 (pCreateInfo
->flags
& VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR
))
126 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
)
129 /* TODO: Enable DCC for mipmaps and array layers. */
130 if (pCreateInfo
->mipLevels
> 1 || pCreateInfo
->arrayLayers
> 1)
133 if (create_info
->scanout
)
136 /* FIXME: DCC for MSAA with 4x and 8x samples doesn't work yet, while
137 * 2x can be enabled with an option.
139 if (pCreateInfo
->samples
> 2 ||
140 (pCreateInfo
->samples
== 2 &&
141 !device
->physical_device
->dcc_msaa_allowed
))
144 /* Determine if the formats are DCC compatible. */
145 dcc_compatible_formats
=
146 radv_is_colorbuffer_format_supported(pCreateInfo
->format
,
149 if (pCreateInfo
->flags
& VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
) {
150 const struct VkImageFormatListCreateInfoKHR
*format_list
=
151 (const struct VkImageFormatListCreateInfoKHR
*)
152 vk_find_struct_const(pCreateInfo
->pNext
,
153 IMAGE_FORMAT_LIST_CREATE_INFO_KHR
);
155 /* We have to ignore the existence of the list if viewFormatCount = 0 */
156 if (format_list
&& format_list
->viewFormatCount
) {
157 /* compatibility is transitive, so we only need to check
158 * one format with everything else. */
159 for (unsigned i
= 0; i
< format_list
->viewFormatCount
; ++i
) {
160 if (!radv_dcc_formats_compatible(pCreateInfo
->format
,
161 format_list
->pViewFormats
[i
]))
162 dcc_compatible_formats
= false;
165 dcc_compatible_formats
= false;
169 if (!dcc_compatible_formats
)
176 radv_init_surface(struct radv_device
*device
,
177 struct radeon_surf
*surface
,
178 const struct radv_image_create_info
*create_info
)
180 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
181 unsigned array_mode
= radv_choose_tiling(device
, create_info
);
182 const struct vk_format_description
*desc
=
183 vk_format_description(pCreateInfo
->format
);
184 bool is_depth
, is_stencil
;
186 is_depth
= vk_format_has_depth(desc
);
187 is_stencil
= vk_format_has_stencil(desc
);
189 surface
->blk_w
= vk_format_get_blockwidth(pCreateInfo
->format
);
190 surface
->blk_h
= vk_format_get_blockheight(pCreateInfo
->format
);
192 surface
->bpe
= vk_format_get_blocksize(vk_format_depth_only(pCreateInfo
->format
));
193 /* align byte per element on dword */
194 if (surface
->bpe
== 3) {
197 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
199 switch (pCreateInfo
->imageType
){
200 case VK_IMAGE_TYPE_1D
:
201 if (pCreateInfo
->arrayLayers
> 1)
202 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
204 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
206 case VK_IMAGE_TYPE_2D
:
207 if (pCreateInfo
->arrayLayers
> 1)
208 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
210 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
212 case VK_IMAGE_TYPE_3D
:
213 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
216 unreachable("unhandled image type");
220 surface
->flags
|= RADEON_SURF_ZBUFFER
;
221 if (radv_use_tc_compat_htile_for_image(device
, pCreateInfo
))
222 surface
->flags
|= RADEON_SURF_TC_COMPATIBLE_HTILE
;
226 surface
->flags
|= RADEON_SURF_SBUFFER
;
228 surface
->flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
230 if (!radv_use_dcc_for_image(device
, create_info
, pCreateInfo
))
231 surface
->flags
|= RADEON_SURF_DISABLE_DCC
;
233 if (create_info
->scanout
)
234 surface
->flags
|= RADEON_SURF_SCANOUT
;
238 static uint32_t si_get_bo_metadata_word1(struct radv_device
*device
)
240 return (ATI_VENDOR_ID
<< 16) | device
->physical_device
->rad_info
.pci_id
;
243 static inline unsigned
244 si_tile_mode_index(const struct radv_image
*image
, unsigned level
, bool stencil
)
247 return image
->surface
.u
.legacy
.stencil_tiling_index
[level
];
249 return image
->surface
.u
.legacy
.tiling_index
[level
];
252 static unsigned radv_map_swizzle(unsigned swizzle
)
256 return V_008F0C_SQ_SEL_Y
;
258 return V_008F0C_SQ_SEL_Z
;
260 return V_008F0C_SQ_SEL_W
;
262 return V_008F0C_SQ_SEL_0
;
264 return V_008F0C_SQ_SEL_1
;
265 default: /* VK_SWIZZLE_X */
266 return V_008F0C_SQ_SEL_X
;
271 radv_make_buffer_descriptor(struct radv_device
*device
,
272 struct radv_buffer
*buffer
,
278 const struct vk_format_description
*desc
;
280 uint64_t gpu_address
= radv_buffer_get_va(buffer
->bo
);
281 uint64_t va
= gpu_address
+ buffer
->offset
;
282 unsigned num_format
, data_format
;
284 desc
= vk_format_description(vk_format
);
285 first_non_void
= vk_format_get_first_non_void_channel(vk_format
);
286 stride
= desc
->block
.bits
/ 8;
288 num_format
= radv_translate_buffer_numformat(desc
, first_non_void
);
289 data_format
= radv_translate_buffer_dataformat(desc
, first_non_void
);
293 state
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
294 S_008F04_STRIDE(stride
);
296 if (device
->physical_device
->rad_info
.chip_class
!= VI
&& stride
) {
301 state
[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc
->swizzle
[0])) |
302 S_008F0C_DST_SEL_Y(radv_map_swizzle(desc
->swizzle
[1])) |
303 S_008F0C_DST_SEL_Z(radv_map_swizzle(desc
->swizzle
[2])) |
304 S_008F0C_DST_SEL_W(radv_map_swizzle(desc
->swizzle
[3])) |
305 S_008F0C_NUM_FORMAT(num_format
) |
306 S_008F0C_DATA_FORMAT(data_format
);
310 si_set_mutable_tex_desc_fields(struct radv_device
*device
,
311 struct radv_image
*image
,
312 const struct legacy_surf_level
*base_level_info
,
313 unsigned base_level
, unsigned first_level
,
314 unsigned block_width
, bool is_stencil
,
315 bool is_storage_image
, uint32_t *state
)
317 uint64_t gpu_address
= image
->bo
? radv_buffer_get_va(image
->bo
) + image
->offset
: 0;
318 uint64_t va
= gpu_address
;
319 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
320 uint64_t meta_va
= 0;
321 if (chip_class
>= GFX9
) {
323 va
+= image
->surface
.u
.gfx9
.stencil_offset
;
325 va
+= image
->surface
.u
.gfx9
.surf_offset
;
327 va
+= base_level_info
->offset
;
330 if (chip_class
>= GFX9
||
331 base_level_info
->mode
== RADEON_SURF_MODE_2D
)
332 state
[0] |= image
->surface
.tile_swizzle
;
333 state
[1] &= C_008F14_BASE_ADDRESS_HI
;
334 state
[1] |= S_008F14_BASE_ADDRESS_HI(va
>> 40);
336 if (chip_class
>= VI
) {
337 state
[6] &= C_008F28_COMPRESSION_EN
;
339 if (!is_storage_image
&& radv_dcc_enabled(image
, first_level
)) {
340 meta_va
= gpu_address
+ image
->dcc_offset
;
341 if (chip_class
<= VI
)
342 meta_va
+= base_level_info
->dcc_offset
;
343 } else if (!is_storage_image
&&
344 radv_image_is_tc_compat_htile(image
)) {
345 meta_va
= gpu_address
+ image
->htile_offset
;
349 state
[6] |= S_008F28_COMPRESSION_EN(1);
350 state
[7] = meta_va
>> 8;
351 state
[7] |= image
->surface
.tile_swizzle
;
355 if (chip_class
>= GFX9
) {
356 state
[3] &= C_008F1C_SW_MODE
;
357 state
[4] &= C_008F20_PITCH_GFX9
;
360 state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.stencil
.swizzle_mode
);
361 state
[4] |= S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.stencil
.epitch
);
363 state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.surf
.swizzle_mode
);
364 state
[4] |= S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.surf
.epitch
);
367 state
[5] &= C_008F24_META_DATA_ADDRESS
&
368 C_008F24_META_PIPE_ALIGNED
&
369 C_008F24_META_RB_ALIGNED
;
371 struct gfx9_surf_meta_flags meta
;
373 if (image
->dcc_offset
)
374 meta
= image
->surface
.u
.gfx9
.dcc
;
376 meta
= image
->surface
.u
.gfx9
.htile
;
378 state
[5] |= S_008F24_META_DATA_ADDRESS(meta_va
>> 40) |
379 S_008F24_META_PIPE_ALIGNED(meta
.pipe_aligned
) |
380 S_008F24_META_RB_ALIGNED(meta
.rb_aligned
);
384 unsigned pitch
= base_level_info
->nblk_x
* block_width
;
385 unsigned index
= si_tile_mode_index(image
, base_level
, is_stencil
);
387 state
[3] &= C_008F1C_TILING_INDEX
;
388 state
[3] |= S_008F1C_TILING_INDEX(index
);
389 state
[4] &= C_008F20_PITCH_GFX6
;
390 state
[4] |= S_008F20_PITCH_GFX6(pitch
- 1);
394 static unsigned radv_tex_dim(VkImageType image_type
, VkImageViewType view_type
,
395 unsigned nr_layers
, unsigned nr_samples
, bool is_storage_image
, bool gfx9
)
397 if (view_type
== VK_IMAGE_VIEW_TYPE_CUBE
|| view_type
== VK_IMAGE_VIEW_TYPE_CUBE_ARRAY
)
398 return is_storage_image
? V_008F1C_SQ_RSRC_IMG_2D_ARRAY
: V_008F1C_SQ_RSRC_IMG_CUBE
;
400 /* GFX9 allocates 1D textures as 2D. */
401 if (gfx9
&& image_type
== VK_IMAGE_TYPE_1D
)
402 image_type
= VK_IMAGE_TYPE_2D
;
403 switch (image_type
) {
404 case VK_IMAGE_TYPE_1D
:
405 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY
: V_008F1C_SQ_RSRC_IMG_1D
;
406 case VK_IMAGE_TYPE_2D
:
408 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
: V_008F1C_SQ_RSRC_IMG_2D_MSAA
;
410 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY
: V_008F1C_SQ_RSRC_IMG_2D
;
411 case VK_IMAGE_TYPE_3D
:
412 if (view_type
== VK_IMAGE_VIEW_TYPE_3D
)
413 return V_008F1C_SQ_RSRC_IMG_3D
;
415 return V_008F1C_SQ_RSRC_IMG_2D_ARRAY
;
417 unreachable("illegale image type");
421 static unsigned gfx9_border_color_swizzle(const enum vk_swizzle swizzle
[4])
423 unsigned bc_swizzle
= V_008F20_BC_SWIZZLE_XYZW
;
425 if (swizzle
[3] == VK_SWIZZLE_X
) {
426 /* For the pre-defined border color values (white, opaque
427 * black, transparent black), the only thing that matters is
428 * that the alpha channel winds up in the correct place
429 * (because the RGB channels are all the same) so either of
430 * these enumerations will work.
432 if (swizzle
[2] == VK_SWIZZLE_Y
)
433 bc_swizzle
= V_008F20_BC_SWIZZLE_WZYX
;
435 bc_swizzle
= V_008F20_BC_SWIZZLE_WXYZ
;
436 } else if (swizzle
[0] == VK_SWIZZLE_X
) {
437 if (swizzle
[1] == VK_SWIZZLE_Y
)
438 bc_swizzle
= V_008F20_BC_SWIZZLE_XYZW
;
440 bc_swizzle
= V_008F20_BC_SWIZZLE_XWYZ
;
441 } else if (swizzle
[1] == VK_SWIZZLE_X
) {
442 bc_swizzle
= V_008F20_BC_SWIZZLE_YXWZ
;
443 } else if (swizzle
[2] == VK_SWIZZLE_X
) {
444 bc_swizzle
= V_008F20_BC_SWIZZLE_ZYXW
;
451 * Build the sampler view descriptor for a texture.
454 si_make_texture_descriptor(struct radv_device
*device
,
455 struct radv_image
*image
,
456 bool is_storage_image
,
457 VkImageViewType view_type
,
459 const VkComponentMapping
*mapping
,
460 unsigned first_level
, unsigned last_level
,
461 unsigned first_layer
, unsigned last_layer
,
462 unsigned width
, unsigned height
, unsigned depth
,
464 uint32_t *fmask_state
)
466 const struct vk_format_description
*desc
;
467 enum vk_swizzle swizzle
[4];
469 unsigned num_format
, data_format
, type
;
471 desc
= vk_format_description(vk_format
);
473 if (desc
->colorspace
== VK_FORMAT_COLORSPACE_ZS
) {
474 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
475 vk_format_compose_swizzles(mapping
, swizzle_xxxx
, swizzle
);
477 vk_format_compose_swizzles(mapping
, desc
->swizzle
, swizzle
);
480 first_non_void
= vk_format_get_first_non_void_channel(vk_format
);
482 num_format
= radv_translate_tex_numformat(vk_format
, desc
, first_non_void
);
483 if (num_format
== ~0) {
487 data_format
= radv_translate_tex_dataformat(vk_format
, desc
, first_non_void
);
488 if (data_format
== ~0) {
492 /* S8 with either Z16 or Z32 HTILE need a special format. */
493 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
494 vk_format
== VK_FORMAT_S8_UINT
&&
495 radv_image_is_tc_compat_htile(image
)) {
496 if (image
->vk_format
== VK_FORMAT_D32_SFLOAT_S8_UINT
)
497 data_format
= V_008F14_IMG_DATA_FORMAT_S8_32
;
498 else if (image
->vk_format
== VK_FORMAT_D16_UNORM_S8_UINT
)
499 data_format
= V_008F14_IMG_DATA_FORMAT_S8_16
;
501 type
= radv_tex_dim(image
->type
, view_type
, image
->info
.array_size
, image
->info
.samples
,
502 is_storage_image
, device
->physical_device
->rad_info
.chip_class
>= GFX9
);
503 if (type
== V_008F1C_SQ_RSRC_IMG_1D_ARRAY
) {
505 depth
= image
->info
.array_size
;
506 } else if (type
== V_008F1C_SQ_RSRC_IMG_2D_ARRAY
||
507 type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
) {
508 if (view_type
!= VK_IMAGE_VIEW_TYPE_3D
)
509 depth
= image
->info
.array_size
;
510 } else if (type
== V_008F1C_SQ_RSRC_IMG_CUBE
)
511 depth
= image
->info
.array_size
/ 6;
514 state
[1] = (S_008F14_DATA_FORMAT_GFX6(data_format
) |
515 S_008F14_NUM_FORMAT_GFX6(num_format
));
516 state
[2] = (S_008F18_WIDTH(width
- 1) |
517 S_008F18_HEIGHT(height
- 1) |
518 S_008F18_PERF_MOD(4));
519 state
[3] = (S_008F1C_DST_SEL_X(radv_map_swizzle(swizzle
[0])) |
520 S_008F1C_DST_SEL_Y(radv_map_swizzle(swizzle
[1])) |
521 S_008F1C_DST_SEL_Z(radv_map_swizzle(swizzle
[2])) |
522 S_008F1C_DST_SEL_W(radv_map_swizzle(swizzle
[3])) |
523 S_008F1C_BASE_LEVEL(image
->info
.samples
> 1 ?
525 S_008F1C_LAST_LEVEL(image
->info
.samples
> 1 ?
526 util_logbase2(image
->info
.samples
) :
528 S_008F1C_TYPE(type
));
530 state
[5] = S_008F24_BASE_ARRAY(first_layer
);
534 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
535 unsigned bc_swizzle
= gfx9_border_color_swizzle(swizzle
);
537 /* Depth is the the last accessible layer on Gfx9.
538 * The hw doesn't need to know the total number of layers.
540 if (type
== V_008F1C_SQ_RSRC_IMG_3D
)
541 state
[4] |= S_008F20_DEPTH(depth
- 1);
543 state
[4] |= S_008F20_DEPTH(last_layer
);
545 state
[4] |= S_008F20_BC_SWIZZLE(bc_swizzle
);
546 state
[5] |= S_008F24_MAX_MIP(image
->info
.samples
> 1 ?
547 util_logbase2(image
->info
.samples
) :
548 image
->info
.levels
- 1);
550 state
[3] |= S_008F1C_POW2_PAD(image
->info
.levels
> 1);
551 state
[4] |= S_008F20_DEPTH(depth
- 1);
552 state
[5] |= S_008F24_LAST_ARRAY(last_layer
);
554 if (image
->dcc_offset
) {
555 unsigned swap
= radv_translate_colorswap(vk_format
, FALSE
);
557 state
[6] = S_008F28_ALPHA_IS_ON_MSB(swap
<= 1);
559 /* The last dword is unused by hw. The shader uses it to clear
560 * bits in the first dword of sampler state.
562 if (device
->physical_device
->rad_info
.chip_class
<= CIK
&& image
->info
.samples
<= 1) {
563 if (first_level
== last_level
)
564 state
[7] = C_008F30_MAX_ANISO_RATIO
;
566 state
[7] = 0xffffffff;
570 /* Initialize the sampler view for FMASK. */
571 if (radv_image_has_fmask(image
)) {
572 uint32_t fmask_format
, num_format
;
573 uint64_t gpu_address
= radv_buffer_get_va(image
->bo
);
576 va
= gpu_address
+ image
->offset
+ image
->fmask
.offset
;
578 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
579 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK
;
580 switch (image
->info
.samples
) {
582 num_format
= V_008F14_IMG_FMASK_8_2_2
;
585 num_format
= V_008F14_IMG_FMASK_8_4_4
;
588 num_format
= V_008F14_IMG_FMASK_32_8_8
;
591 unreachable("invalid nr_samples");
594 switch (image
->info
.samples
) {
596 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2
;
599 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4
;
602 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8
;
606 fmask_format
= V_008F14_IMG_DATA_FORMAT_INVALID
;
608 num_format
= V_008F14_IMG_NUM_FORMAT_UINT
;
611 fmask_state
[0] = va
>> 8;
612 fmask_state
[0] |= image
->fmask
.tile_swizzle
;
613 fmask_state
[1] = S_008F14_BASE_ADDRESS_HI(va
>> 40) |
614 S_008F14_DATA_FORMAT_GFX6(fmask_format
) |
615 S_008F14_NUM_FORMAT_GFX6(num_format
);
616 fmask_state
[2] = S_008F18_WIDTH(width
- 1) |
617 S_008F18_HEIGHT(height
- 1);
618 fmask_state
[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X
) |
619 S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X
) |
620 S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X
) |
621 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X
) |
622 S_008F1C_TYPE(radv_tex_dim(image
->type
, view_type
, 1, 0, false, false));
624 fmask_state
[5] = S_008F24_BASE_ARRAY(first_layer
);
628 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
629 fmask_state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.fmask
.swizzle_mode
);
630 fmask_state
[4] |= S_008F20_DEPTH(last_layer
) |
631 S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.fmask
.epitch
);
632 fmask_state
[5] |= S_008F24_META_PIPE_ALIGNED(image
->surface
.u
.gfx9
.cmask
.pipe_aligned
) |
633 S_008F24_META_RB_ALIGNED(image
->surface
.u
.gfx9
.cmask
.rb_aligned
);
635 fmask_state
[3] |= S_008F1C_TILING_INDEX(image
->fmask
.tile_mode_index
);
636 fmask_state
[4] |= S_008F20_DEPTH(depth
- 1) |
637 S_008F20_PITCH_GFX6(image
->fmask
.pitch_in_pixels
- 1);
638 fmask_state
[5] |= S_008F24_LAST_ARRAY(last_layer
);
640 } else if (fmask_state
)
641 memset(fmask_state
, 0, 8 * 4);
645 radv_query_opaque_metadata(struct radv_device
*device
,
646 struct radv_image
*image
,
647 struct radeon_bo_metadata
*md
)
649 static const VkComponentMapping fixedmapping
;
652 /* Metadata image format format version 1:
653 * [0] = 1 (metadata format identifier)
654 * [1] = (VENDOR_ID << 16) | PCI_ID
655 * [2:9] = image descriptor for the whole resource
656 * [2] is always 0, because the base address is cleared
657 * [9] is the DCC offset bits [39:8] from the beginning of
659 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
661 md
->metadata
[0] = 1; /* metadata image format version 1 */
663 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
664 md
->metadata
[1] = si_get_bo_metadata_word1(device
);
667 si_make_texture_descriptor(device
, image
, false,
668 (VkImageViewType
)image
->type
, image
->vk_format
,
669 &fixedmapping
, 0, image
->info
.levels
- 1, 0,
670 image
->info
.array_size
,
671 image
->info
.width
, image
->info
.height
,
675 si_set_mutable_tex_desc_fields(device
, image
, &image
->surface
.u
.legacy
.level
[0], 0, 0,
676 image
->surface
.blk_w
, false, false, desc
);
678 /* Clear the base address and set the relative DCC offset. */
680 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
681 desc
[7] = image
->dcc_offset
>> 8;
683 /* Dwords [2:9] contain the image descriptor. */
684 memcpy(&md
->metadata
[2], desc
, sizeof(desc
));
686 /* Dwords [10:..] contain the mipmap level offsets. */
687 if (device
->physical_device
->rad_info
.chip_class
<= VI
) {
688 for (i
= 0; i
<= image
->info
.levels
- 1; i
++)
689 md
->metadata
[10+i
] = image
->surface
.u
.legacy
.level
[i
].offset
>> 8;
690 md
->size_metadata
= (11 + image
->info
.levels
- 1) * 4;
695 radv_init_metadata(struct radv_device
*device
,
696 struct radv_image
*image
,
697 struct radeon_bo_metadata
*metadata
)
699 struct radeon_surf
*surface
= &image
->surface
;
701 memset(metadata
, 0, sizeof(*metadata
));
703 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
704 metadata
->u
.gfx9
.swizzle_mode
= surface
->u
.gfx9
.surf
.swizzle_mode
;
706 metadata
->u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
707 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
708 metadata
->u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
709 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
710 metadata
->u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
711 metadata
->u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
712 metadata
->u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
713 metadata
->u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
714 metadata
->u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
715 metadata
->u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
716 metadata
->u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
717 metadata
->u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
719 radv_query_opaque_metadata(device
, image
, metadata
);
722 /* The number of samples can be specified independently of the texture. */
724 radv_image_get_fmask_info(struct radv_device
*device
,
725 struct radv_image
*image
,
727 struct radv_fmask_info
*out
)
729 /* FMASK is allocated like an ordinary texture. */
730 struct radeon_surf fmask
= {};
731 struct ac_surf_info info
= image
->info
;
732 memset(out
, 0, sizeof(*out
));
734 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
735 out
->alignment
= image
->surface
.u
.gfx9
.fmask_alignment
;
736 out
->size
= image
->surface
.u
.gfx9
.fmask_size
;
740 fmask
.blk_w
= image
->surface
.blk_w
;
741 fmask
.blk_h
= image
->surface
.blk_h
;
743 fmask
.flags
= image
->surface
.flags
| RADEON_SURF_FMASK
;
745 if (!image
->shareable
) {
746 info
.fmask_surf_index
= &device
->fmask_mrt_offset_counter
;
747 info
.surf_index
= &device
->fmask_mrt_offset_counter
;
750 /* Force 2D tiling if it wasn't set. This may occur when creating
751 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
752 * destination buffer must have an FMASK too. */
753 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
754 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
756 switch (nr_samples
) {
768 device
->ws
->surface_init(device
->ws
, &info
, &fmask
);
769 assert(fmask
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
);
771 out
->slice_tile_max
= (fmask
.u
.legacy
.level
[0].nblk_x
* fmask
.u
.legacy
.level
[0].nblk_y
) / 64;
772 if (out
->slice_tile_max
)
773 out
->slice_tile_max
-= 1;
775 out
->tile_mode_index
= fmask
.u
.legacy
.tiling_index
[0];
776 out
->pitch_in_pixels
= fmask
.u
.legacy
.level
[0].nblk_x
;
777 out
->bank_height
= fmask
.u
.legacy
.bankh
;
778 out
->tile_swizzle
= fmask
.tile_swizzle
;
779 out
->alignment
= MAX2(256, fmask
.surf_alignment
);
780 out
->size
= fmask
.surf_size
;
782 assert(!out
->tile_swizzle
|| !image
->shareable
);
786 radv_image_alloc_fmask(struct radv_device
*device
,
787 struct radv_image
*image
)
789 radv_image_get_fmask_info(device
, image
, image
->info
.samples
, &image
->fmask
);
791 image
->fmask
.offset
= align64(image
->size
, image
->fmask
.alignment
);
792 image
->size
= image
->fmask
.offset
+ image
->fmask
.size
;
793 image
->alignment
= MAX2(image
->alignment
, image
->fmask
.alignment
);
797 radv_image_get_cmask_info(struct radv_device
*device
,
798 struct radv_image
*image
,
799 struct radv_cmask_info
*out
)
801 unsigned pipe_interleave_bytes
= device
->physical_device
->rad_info
.pipe_interleave_bytes
;
802 unsigned num_pipes
= device
->physical_device
->rad_info
.num_tile_pipes
;
803 unsigned cl_width
, cl_height
;
805 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
806 out
->alignment
= image
->surface
.u
.gfx9
.cmask_alignment
;
807 out
->size
= image
->surface
.u
.gfx9
.cmask_size
;
824 case 16: /* Hawaii */
833 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
835 unsigned width
= align(image
->info
.width
, cl_width
*8);
836 unsigned height
= align(image
->info
.height
, cl_height
*8);
837 unsigned slice_elements
= (width
* height
) / (8*8);
839 /* Each element of CMASK is a nibble. */
840 unsigned slice_bytes
= slice_elements
/ 2;
842 out
->slice_tile_max
= (width
* height
) / (128*128);
843 if (out
->slice_tile_max
)
844 out
->slice_tile_max
-= 1;
846 out
->alignment
= MAX2(256, base_align
);
847 out
->size
= (image
->type
== VK_IMAGE_TYPE_3D
? image
->info
.depth
: image
->info
.array_size
) *
848 align(slice_bytes
, base_align
);
852 radv_image_alloc_cmask(struct radv_device
*device
,
853 struct radv_image
*image
)
855 uint32_t clear_value_size
= 0;
856 radv_image_get_cmask_info(device
, image
, &image
->cmask
);
858 image
->cmask
.offset
= align64(image
->size
, image
->cmask
.alignment
);
859 /* + 8 for storing the clear values */
860 if (!image
->clear_value_offset
) {
861 image
->clear_value_offset
= image
->cmask
.offset
+ image
->cmask
.size
;
862 clear_value_size
= 8;
864 image
->size
= image
->cmask
.offset
+ image
->cmask
.size
+ clear_value_size
;
865 image
->alignment
= MAX2(image
->alignment
, image
->cmask
.alignment
);
869 radv_image_alloc_dcc(struct radv_image
*image
)
871 image
->dcc_offset
= align64(image
->size
, image
->surface
.dcc_alignment
);
872 /* + 16 for storing the clear values + dcc pred */
873 image
->clear_value_offset
= image
->dcc_offset
+ image
->surface
.dcc_size
;
874 image
->dcc_pred_offset
= image
->clear_value_offset
+ 8;
875 image
->size
= image
->dcc_offset
+ image
->surface
.dcc_size
+ 16;
876 image
->alignment
= MAX2(image
->alignment
, image
->surface
.dcc_alignment
);
880 radv_image_alloc_htile(struct radv_image
*image
)
882 image
->htile_offset
= align64(image
->size
, image
->surface
.htile_alignment
);
884 /* + 8 for storing the clear values */
885 image
->clear_value_offset
= image
->htile_offset
+ image
->surface
.htile_size
;
886 image
->size
= image
->clear_value_offset
+ 8;
887 image
->alignment
= align64(image
->alignment
, image
->surface
.htile_alignment
);
891 radv_image_can_enable_dcc_or_cmask(struct radv_image
*image
)
893 if (image
->info
.samples
<= 1 &&
894 image
->info
.width
* image
->info
.height
<= 512 * 512) {
895 /* Do not enable CMASK or DCC for small surfaces where the cost
896 * of the eliminate pass can be higher than the benefit of fast
897 * clear. RadeonSI does this, but the image threshold is
903 return image
->usage
& VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
&&
904 (image
->exclusive
|| image
->queue_family_mask
== 1);
908 radv_image_can_enable_dcc(struct radv_image
*image
)
910 return radv_image_can_enable_dcc_or_cmask(image
) &&
911 radv_image_has_dcc(image
);
915 radv_image_can_enable_cmask(struct radv_image
*image
)
917 if (image
->surface
.bpe
> 8 && image
->info
.samples
== 1) {
918 /* Do not enable CMASK for non-MSAA images (fast color clear)
919 * because 128 bit formats are not supported, but FMASK might
925 return radv_image_can_enable_dcc_or_cmask(image
) &&
926 image
->info
.levels
== 1 &&
927 image
->info
.depth
== 1 &&
928 !image
->surface
.is_linear
;
932 radv_image_can_enable_fmask(struct radv_image
*image
)
934 return image
->info
.samples
> 1 && vk_format_is_color(image
->vk_format
);
938 radv_image_can_enable_htile(struct radv_image
*image
)
940 return image
->info
.levels
== 1 && vk_format_is_depth(image
->vk_format
);
944 radv_image_create(VkDevice _device
,
945 const struct radv_image_create_info
*create_info
,
946 const VkAllocationCallbacks
* alloc
,
949 RADV_FROM_HANDLE(radv_device
, device
, _device
);
950 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
951 struct radv_image
*image
= NULL
;
952 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
);
954 radv_assert(pCreateInfo
->mipLevels
> 0);
955 radv_assert(pCreateInfo
->arrayLayers
> 0);
956 radv_assert(pCreateInfo
->samples
> 0);
957 radv_assert(pCreateInfo
->extent
.width
> 0);
958 radv_assert(pCreateInfo
->extent
.height
> 0);
959 radv_assert(pCreateInfo
->extent
.depth
> 0);
961 image
= vk_zalloc2(&device
->alloc
, alloc
, sizeof(*image
), 8,
962 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
964 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
966 image
->type
= pCreateInfo
->imageType
;
967 image
->info
.width
= pCreateInfo
->extent
.width
;
968 image
->info
.height
= pCreateInfo
->extent
.height
;
969 image
->info
.depth
= pCreateInfo
->extent
.depth
;
970 image
->info
.samples
= pCreateInfo
->samples
;
971 image
->info
.array_size
= pCreateInfo
->arrayLayers
;
972 image
->info
.levels
= pCreateInfo
->mipLevels
;
973 image
->info
.num_channels
= vk_format_get_nr_components(pCreateInfo
->format
);
975 image
->vk_format
= pCreateInfo
->format
;
976 image
->tiling
= pCreateInfo
->tiling
;
977 image
->usage
= pCreateInfo
->usage
;
978 image
->flags
= pCreateInfo
->flags
;
980 image
->exclusive
= pCreateInfo
->sharingMode
== VK_SHARING_MODE_EXCLUSIVE
;
981 if (pCreateInfo
->sharingMode
== VK_SHARING_MODE_CONCURRENT
) {
982 for (uint32_t i
= 0; i
< pCreateInfo
->queueFamilyIndexCount
; ++i
)
983 if (pCreateInfo
->pQueueFamilyIndices
[i
] == VK_QUEUE_FAMILY_EXTERNAL_KHR
)
984 image
->queue_family_mask
|= (1u << RADV_MAX_QUEUE_FAMILIES
) - 1u;
986 image
->queue_family_mask
|= 1u << pCreateInfo
->pQueueFamilyIndices
[i
];
989 image
->shareable
= vk_find_struct_const(pCreateInfo
->pNext
,
990 EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR
) != NULL
;
991 if (!vk_format_is_depth(pCreateInfo
->format
) && !create_info
->scanout
&& !image
->shareable
) {
992 image
->info
.surf_index
= &device
->image_mrt_offset_counter
;
995 radv_init_surface(device
, &image
->surface
, create_info
);
997 device
->ws
->surface_init(device
->ws
, &image
->info
, &image
->surface
);
999 image
->size
= image
->surface
.surf_size
;
1000 image
->alignment
= image
->surface
.surf_alignment
;
1002 if (!create_info
->no_metadata_planes
) {
1003 /* Try to enable DCC first. */
1004 if (radv_image_can_enable_dcc(image
)) {
1005 radv_image_alloc_dcc(image
);
1006 if (image
->info
.samples
> 1) {
1007 /* CMASK should be enabled because DCC fast
1008 * clear with MSAA needs it.
1010 assert(radv_image_can_enable_cmask(image
));
1011 radv_image_alloc_cmask(device
, image
);
1014 /* When DCC cannot be enabled, try CMASK. */
1015 image
->surface
.dcc_size
= 0;
1016 if (radv_image_can_enable_cmask(image
)) {
1017 radv_image_alloc_cmask(device
, image
);
1021 /* Try to enable FMASK for multisampled images. */
1022 if (radv_image_can_enable_fmask(image
)) {
1023 radv_image_alloc_fmask(device
, image
);
1025 /* Otherwise, try to enable HTILE for depth surfaces. */
1026 if (radv_image_can_enable_htile(image
) &&
1027 !(device
->instance
->debug_flags
& RADV_DEBUG_NO_HIZ
)) {
1028 radv_image_alloc_htile(image
);
1029 image
->tc_compatible_htile
= image
->surface
.flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
;
1031 image
->surface
.htile_size
= 0;
1035 image
->surface
.dcc_size
= 0;
1036 image
->surface
.htile_size
= 0;
1039 if (pCreateInfo
->flags
& VK_IMAGE_CREATE_SPARSE_BINDING_BIT
) {
1040 image
->alignment
= MAX2(image
->alignment
, 4096);
1041 image
->size
= align64(image
->size
, image
->alignment
);
1044 image
->bo
= device
->ws
->buffer_create(device
->ws
, image
->size
, image
->alignment
,
1045 0, RADEON_FLAG_VIRTUAL
);
1047 vk_free2(&device
->alloc
, alloc
, image
);
1048 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1052 *pImage
= radv_image_to_handle(image
);
1058 radv_image_view_make_descriptor(struct radv_image_view
*iview
,
1059 struct radv_device
*device
,
1060 const VkComponentMapping
*components
,
1061 bool is_storage_image
)
1063 struct radv_image
*image
= iview
->image
;
1064 bool is_stencil
= iview
->aspect_mask
== VK_IMAGE_ASPECT_STENCIL_BIT
;
1066 uint32_t *descriptor
;
1067 uint32_t hw_level
= 0;
1069 if (is_storage_image
) {
1070 descriptor
= iview
->storage_descriptor
;
1072 descriptor
= iview
->descriptor
;
1075 assert(image
->surface
.blk_w
% vk_format_get_blockwidth(image
->vk_format
) == 0);
1076 blk_w
= image
->surface
.blk_w
/ vk_format_get_blockwidth(image
->vk_format
) * vk_format_get_blockwidth(iview
->vk_format
);
1078 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1079 hw_level
= iview
->base_mip
;
1080 si_make_texture_descriptor(device
, image
, is_storage_image
,
1084 hw_level
, hw_level
+ iview
->level_count
- 1,
1086 iview
->base_layer
+ iview
->layer_count
- 1,
1087 iview
->extent
.width
,
1088 iview
->extent
.height
,
1089 iview
->extent
.depth
,
1093 const struct legacy_surf_level
*base_level_info
= NULL
;
1094 if (device
->physical_device
->rad_info
.chip_class
<= GFX9
) {
1096 base_level_info
= &image
->surface
.u
.legacy
.stencil_level
[iview
->base_mip
];
1098 base_level_info
= &image
->surface
.u
.legacy
.level
[iview
->base_mip
];
1100 si_set_mutable_tex_desc_fields(device
, image
,
1104 blk_w
, is_stencil
, is_storage_image
, descriptor
);
1108 radv_image_view_init(struct radv_image_view
*iview
,
1109 struct radv_device
*device
,
1110 const VkImageViewCreateInfo
* pCreateInfo
)
1112 RADV_FROM_HANDLE(radv_image
, image
, pCreateInfo
->image
);
1113 const VkImageSubresourceRange
*range
= &pCreateInfo
->subresourceRange
;
1115 switch (image
->type
) {
1116 case VK_IMAGE_TYPE_1D
:
1117 case VK_IMAGE_TYPE_2D
:
1118 assert(range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1 <= image
->info
.array_size
);
1120 case VK_IMAGE_TYPE_3D
:
1121 assert(range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1
1122 <= radv_minify(image
->info
.depth
, range
->baseMipLevel
));
1125 unreachable("bad VkImageType");
1127 iview
->image
= image
;
1128 iview
->bo
= image
->bo
;
1129 iview
->type
= pCreateInfo
->viewType
;
1130 iview
->vk_format
= pCreateInfo
->format
;
1131 iview
->aspect_mask
= pCreateInfo
->subresourceRange
.aspectMask
;
1133 if (iview
->aspect_mask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
1134 iview
->vk_format
= vk_format_stencil_only(iview
->vk_format
);
1135 } else if (iview
->aspect_mask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
1136 iview
->vk_format
= vk_format_depth_only(iview
->vk_format
);
1139 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1140 iview
->extent
= (VkExtent3D
) {
1141 .width
= image
->info
.width
,
1142 .height
= image
->info
.height
,
1143 .depth
= image
->info
.depth
,
1146 iview
->extent
= (VkExtent3D
) {
1147 .width
= radv_minify(image
->info
.width
, range
->baseMipLevel
),
1148 .height
= radv_minify(image
->info
.height
, range
->baseMipLevel
),
1149 .depth
= radv_minify(image
->info
.depth
, range
->baseMipLevel
),
1153 if (iview
->vk_format
!= image
->vk_format
) {
1154 unsigned view_bw
= vk_format_get_blockwidth(iview
->vk_format
);
1155 unsigned view_bh
= vk_format_get_blockheight(iview
->vk_format
);
1156 unsigned img_bw
= vk_format_get_blockwidth(image
->vk_format
);
1157 unsigned img_bh
= vk_format_get_blockheight(image
->vk_format
);
1159 iview
->extent
.width
= round_up_u32(iview
->extent
.width
* view_bw
, img_bw
);
1160 iview
->extent
.height
= round_up_u32(iview
->extent
.height
* view_bh
, img_bh
);
1162 /* Comment ported from amdvlk -
1163 * If we have the following image:
1164 * Uncompressed pixels Compressed block sizes (4x4)
1165 * mip0: 22 x 22 6 x 6
1166 * mip1: 11 x 11 3 x 3
1171 * On GFX9 the descriptor is always programmed with the WIDTH and HEIGHT of the base level and the HW is
1172 * calculating the degradation of the block sizes down the mip-chain as follows (straight-up
1173 * divide-by-two integer math):
1179 * This means that mip2 will be missing texels.
1181 * Fix this by calculating the base mip's width and height, then convert that, and round it
1182 * back up to get the level 0 size.
1183 * Clamp the converted size between the original values, and next power of two, which
1184 * means we don't oversize the image.
1186 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
1187 vk_format_is_compressed(image
->vk_format
) &&
1188 !vk_format_is_compressed(iview
->vk_format
)) {
1189 unsigned rounded_img_w
= util_next_power_of_two(iview
->extent
.width
);
1190 unsigned rounded_img_h
= util_next_power_of_two(iview
->extent
.height
);
1191 unsigned lvl_width
= radv_minify(image
->info
.width
, range
->baseMipLevel
);
1192 unsigned lvl_height
= radv_minify(image
->info
.height
, range
->baseMipLevel
);
1194 lvl_width
= round_up_u32(lvl_width
* view_bw
, img_bw
);
1195 lvl_height
= round_up_u32(lvl_height
* view_bh
, img_bh
);
1197 lvl_width
<<= range
->baseMipLevel
;
1198 lvl_height
<<= range
->baseMipLevel
;
1200 iview
->extent
.width
= CLAMP(lvl_width
, iview
->extent
.width
, rounded_img_w
);
1201 iview
->extent
.height
= CLAMP(lvl_height
, iview
->extent
.height
, rounded_img_h
);
1205 iview
->base_layer
= range
->baseArrayLayer
;
1206 iview
->layer_count
= radv_get_layerCount(image
, range
);
1207 iview
->base_mip
= range
->baseMipLevel
;
1208 iview
->level_count
= radv_get_levelCount(image
, range
);
1210 radv_image_view_make_descriptor(iview
, device
, &pCreateInfo
->components
, false);
1211 radv_image_view_make_descriptor(iview
, device
, &pCreateInfo
->components
, true);
1214 bool radv_layout_has_htile(const struct radv_image
*image
,
1215 VkImageLayout layout
,
1216 unsigned queue_mask
)
1218 if (radv_image_is_tc_compat_htile(image
))
1219 return layout
!= VK_IMAGE_LAYOUT_GENERAL
;
1221 return radv_image_has_htile(image
) &&
1222 (layout
== VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
||
1223 layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) &&
1224 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
1227 bool radv_layout_is_htile_compressed(const struct radv_image
*image
,
1228 VkImageLayout layout
,
1229 unsigned queue_mask
)
1231 if (radv_image_is_tc_compat_htile(image
))
1232 return layout
!= VK_IMAGE_LAYOUT_GENERAL
;
1234 return radv_image_has_htile(image
) &&
1235 (layout
== VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
||
1236 layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) &&
1237 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
1240 bool radv_layout_can_fast_clear(const struct radv_image
*image
,
1241 VkImageLayout layout
,
1242 unsigned queue_mask
)
1244 return layout
== VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
&&
1245 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
1248 bool radv_layout_dcc_compressed(const struct radv_image
*image
,
1249 VkImageLayout layout
,
1250 unsigned queue_mask
)
1252 /* Don't compress compute transfer dst, as image stores are not supported. */
1253 if (layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
&&
1254 (queue_mask
& (1u << RADV_QUEUE_COMPUTE
)))
1257 return radv_image_has_dcc(image
) && layout
!= VK_IMAGE_LAYOUT_GENERAL
;
1261 unsigned radv_image_queue_family_mask(const struct radv_image
*image
, uint32_t family
, uint32_t queue_family
)
1263 if (!image
->exclusive
)
1264 return image
->queue_family_mask
;
1265 if (family
== VK_QUEUE_FAMILY_EXTERNAL_KHR
)
1266 return (1u << RADV_MAX_QUEUE_FAMILIES
) - 1u;
1267 if (family
== VK_QUEUE_FAMILY_IGNORED
)
1268 return 1u << queue_family
;
1269 return 1u << family
;
1273 radv_CreateImage(VkDevice device
,
1274 const VkImageCreateInfo
*pCreateInfo
,
1275 const VkAllocationCallbacks
*pAllocator
,
1279 const VkNativeBufferANDROID
*gralloc_info
=
1280 vk_find_struct_const(pCreateInfo
->pNext
, NATIVE_BUFFER_ANDROID
);
1283 return radv_image_from_gralloc(device
, pCreateInfo
, gralloc_info
,
1284 pAllocator
, pImage
);
1287 const struct wsi_image_create_info
*wsi_info
=
1288 vk_find_struct_const(pCreateInfo
->pNext
, WSI_IMAGE_CREATE_INFO_MESA
);
1289 bool scanout
= wsi_info
&& wsi_info
->scanout
;
1291 return radv_image_create(device
,
1292 &(struct radv_image_create_info
) {
1293 .vk_info
= pCreateInfo
,
1301 radv_DestroyImage(VkDevice _device
, VkImage _image
,
1302 const VkAllocationCallbacks
*pAllocator
)
1304 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1305 RADV_FROM_HANDLE(radv_image
, image
, _image
);
1310 if (image
->flags
& VK_IMAGE_CREATE_SPARSE_BINDING_BIT
)
1311 device
->ws
->buffer_destroy(image
->bo
);
1313 if (image
->owned_memory
!= VK_NULL_HANDLE
)
1314 radv_FreeMemory(_device
, image
->owned_memory
, pAllocator
);
1316 vk_free2(&device
->alloc
, pAllocator
, image
);
1319 void radv_GetImageSubresourceLayout(
1322 const VkImageSubresource
* pSubresource
,
1323 VkSubresourceLayout
* pLayout
)
1325 RADV_FROM_HANDLE(radv_image
, image
, _image
);
1326 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1327 int level
= pSubresource
->mipLevel
;
1328 int layer
= pSubresource
->arrayLayer
;
1329 struct radeon_surf
*surface
= &image
->surface
;
1331 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1332 pLayout
->offset
= surface
->u
.gfx9
.offset
[level
] + surface
->u
.gfx9
.surf_slice_size
* layer
;
1333 pLayout
->rowPitch
= surface
->u
.gfx9
.surf_pitch
* surface
->bpe
;
1334 pLayout
->arrayPitch
= surface
->u
.gfx9
.surf_slice_size
;
1335 pLayout
->depthPitch
= surface
->u
.gfx9
.surf_slice_size
;
1336 pLayout
->size
= surface
->u
.gfx9
.surf_slice_size
;
1337 if (image
->type
== VK_IMAGE_TYPE_3D
)
1338 pLayout
->size
*= u_minify(image
->info
.depth
, level
);
1340 pLayout
->offset
= surface
->u
.legacy
.level
[level
].offset
+ (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4 * layer
;
1341 pLayout
->rowPitch
= surface
->u
.legacy
.level
[level
].nblk_x
* surface
->bpe
;
1342 pLayout
->arrayPitch
= (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4;
1343 pLayout
->depthPitch
= (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4;
1344 pLayout
->size
= (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4;
1345 if (image
->type
== VK_IMAGE_TYPE_3D
)
1346 pLayout
->size
*= u_minify(image
->info
.depth
, level
);
1352 radv_CreateImageView(VkDevice _device
,
1353 const VkImageViewCreateInfo
*pCreateInfo
,
1354 const VkAllocationCallbacks
*pAllocator
,
1357 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1358 struct radv_image_view
*view
;
1360 view
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*view
), 8,
1361 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1363 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1365 radv_image_view_init(view
, device
, pCreateInfo
);
1367 *pView
= radv_image_view_to_handle(view
);
1373 radv_DestroyImageView(VkDevice _device
, VkImageView _iview
,
1374 const VkAllocationCallbacks
*pAllocator
)
1376 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1377 RADV_FROM_HANDLE(radv_image_view
, iview
, _iview
);
1381 vk_free2(&device
->alloc
, pAllocator
, iview
);
1384 void radv_buffer_view_init(struct radv_buffer_view
*view
,
1385 struct radv_device
*device
,
1386 const VkBufferViewCreateInfo
* pCreateInfo
)
1388 RADV_FROM_HANDLE(radv_buffer
, buffer
, pCreateInfo
->buffer
);
1390 view
->bo
= buffer
->bo
;
1391 view
->range
= pCreateInfo
->range
== VK_WHOLE_SIZE
?
1392 buffer
->size
- pCreateInfo
->offset
: pCreateInfo
->range
;
1393 view
->vk_format
= pCreateInfo
->format
;
1395 radv_make_buffer_descriptor(device
, buffer
, view
->vk_format
,
1396 pCreateInfo
->offset
, view
->range
, view
->state
);
1400 radv_CreateBufferView(VkDevice _device
,
1401 const VkBufferViewCreateInfo
*pCreateInfo
,
1402 const VkAllocationCallbacks
*pAllocator
,
1403 VkBufferView
*pView
)
1405 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1406 struct radv_buffer_view
*view
;
1408 view
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*view
), 8,
1409 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1411 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1413 radv_buffer_view_init(view
, device
, pCreateInfo
);
1415 *pView
= radv_buffer_view_to_handle(view
);
1421 radv_DestroyBufferView(VkDevice _device
, VkBufferView bufferView
,
1422 const VkAllocationCallbacks
*pAllocator
)
1424 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1425 RADV_FROM_HANDLE(radv_buffer_view
, view
, bufferView
);
1430 vk_free2(&device
->alloc
, pAllocator
, view
);