2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_debug.h"
29 #include "radv_private.h"
30 #include "vk_format.h"
32 #include "radv_radeon_winsys.h"
35 #include "util/debug.h"
36 #include "util/u_atomic.h"
38 radv_choose_tiling(struct radv_device
*device
,
39 const struct radv_image_create_info
*create_info
)
41 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
43 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
) {
44 assert(pCreateInfo
->samples
<= 1);
45 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
48 if (!vk_format_is_compressed(pCreateInfo
->format
) &&
49 !vk_format_is_depth_or_stencil(pCreateInfo
->format
)
50 && device
->physical_device
->rad_info
.chip_class
<= VI
) {
51 /* this causes hangs in some VK CTS tests on GFX9. */
52 /* Textures with a very small height are recommended to be linear. */
53 if (pCreateInfo
->imageType
== VK_IMAGE_TYPE_1D
||
54 /* Only very thin and long 2D textures should benefit from
56 (pCreateInfo
->extent
.width
> 8 && pCreateInfo
->extent
.height
<= 2))
57 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
60 /* MSAA resources must be 2D tiled. */
61 if (pCreateInfo
->samples
> 1)
62 return RADEON_SURF_MODE_2D
;
64 return RADEON_SURF_MODE_2D
;
68 radv_use_tc_compat_htile_for_image(struct radv_device
*device
,
69 const VkImageCreateInfo
*pCreateInfo
)
71 /* TC-compat HTILE is only available for GFX8+. */
72 if (device
->physical_device
->rad_info
.chip_class
< VI
)
75 if (pCreateInfo
->usage
& VK_IMAGE_USAGE_STORAGE_BIT
)
78 if (pCreateInfo
->flags
& (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
|
79 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR
))
82 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
)
85 if (pCreateInfo
->mipLevels
> 1)
88 /* FIXME: for some reason TC compat with 2/4/8 samples breaks some cts
89 * tests - disable for now */
90 if (pCreateInfo
->samples
>= 2 &&
91 pCreateInfo
->format
== VK_FORMAT_D32_SFLOAT_S8_UINT
)
94 /* GFX9 supports both 32-bit and 16-bit depth surfaces, while GFX8 only
95 * supports 32-bit. Though, it's possible to enable TC-compat for
96 * 16-bit depth surfaces if no Z planes are compressed.
98 if (pCreateInfo
->format
!= VK_FORMAT_D32_SFLOAT_S8_UINT
&&
99 pCreateInfo
->format
!= VK_FORMAT_D32_SFLOAT
&&
100 pCreateInfo
->format
!= VK_FORMAT_D16_UNORM
)
107 radv_use_dcc_for_image(struct radv_device
*device
,
108 const struct radv_image_create_info
*create_info
,
109 const VkImageCreateInfo
*pCreateInfo
)
111 bool dcc_compatible_formats
;
114 /* DCC (Delta Color Compression) is only available for GFX8+. */
115 if (device
->physical_device
->rad_info
.chip_class
< VI
)
118 if (device
->instance
->debug_flags
& RADV_DEBUG_NO_DCC
)
121 /* TODO: Enable DCC for storage images. */
122 if ((pCreateInfo
->usage
& VK_IMAGE_USAGE_STORAGE_BIT
) ||
123 (pCreateInfo
->flags
& VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR
))
126 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
)
129 /* TODO: Enable DCC for mipmaps and array layers. */
130 if (pCreateInfo
->mipLevels
> 1 || pCreateInfo
->arrayLayers
> 1)
133 if (create_info
->scanout
)
136 /* FIXME: DCC for MSAA with 4x and 8x samples doesn't work yet, while
137 * 2x can be enabled with an option.
139 if (pCreateInfo
->samples
> 2 ||
140 (pCreateInfo
->samples
== 2 &&
141 !device
->physical_device
->dcc_msaa_allowed
))
144 /* Determine if the formats are DCC compatible. */
145 dcc_compatible_formats
=
146 radv_is_colorbuffer_format_supported(pCreateInfo
->format
,
149 if (pCreateInfo
->flags
& VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
) {
150 const struct VkImageFormatListCreateInfoKHR
*format_list
=
151 (const struct VkImageFormatListCreateInfoKHR
*)
152 vk_find_struct_const(pCreateInfo
->pNext
,
153 IMAGE_FORMAT_LIST_CREATE_INFO_KHR
);
155 /* We have to ignore the existence of the list if viewFormatCount = 0 */
156 if (format_list
&& format_list
->viewFormatCount
) {
157 /* compatibility is transitive, so we only need to check
158 * one format with everything else. */
159 for (unsigned i
= 0; i
< format_list
->viewFormatCount
; ++i
) {
160 if (!radv_dcc_formats_compatible(pCreateInfo
->format
,
161 format_list
->pViewFormats
[i
]))
162 dcc_compatible_formats
= false;
165 dcc_compatible_formats
= false;
169 if (!dcc_compatible_formats
)
176 radv_init_surface(struct radv_device
*device
,
177 struct radeon_surf
*surface
,
178 const struct radv_image_create_info
*create_info
)
180 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
181 unsigned array_mode
= radv_choose_tiling(device
, create_info
);
182 const struct vk_format_description
*desc
=
183 vk_format_description(pCreateInfo
->format
);
184 bool is_depth
, is_stencil
;
186 is_depth
= vk_format_has_depth(desc
);
187 is_stencil
= vk_format_has_stencil(desc
);
189 surface
->blk_w
= vk_format_get_blockwidth(pCreateInfo
->format
);
190 surface
->blk_h
= vk_format_get_blockheight(pCreateInfo
->format
);
192 surface
->bpe
= vk_format_get_blocksize(vk_format_depth_only(pCreateInfo
->format
));
193 /* align byte per element on dword */
194 if (surface
->bpe
== 3) {
197 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
199 switch (pCreateInfo
->imageType
){
200 case VK_IMAGE_TYPE_1D
:
201 if (pCreateInfo
->arrayLayers
> 1)
202 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
204 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
206 case VK_IMAGE_TYPE_2D
:
207 if (pCreateInfo
->arrayLayers
> 1)
208 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
210 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
212 case VK_IMAGE_TYPE_3D
:
213 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
216 unreachable("unhandled image type");
220 surface
->flags
|= RADEON_SURF_ZBUFFER
;
221 if (radv_use_tc_compat_htile_for_image(device
, pCreateInfo
))
222 surface
->flags
|= RADEON_SURF_TC_COMPATIBLE_HTILE
;
226 surface
->flags
|= RADEON_SURF_SBUFFER
;
228 surface
->flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
230 if (!radv_use_dcc_for_image(device
, create_info
, pCreateInfo
))
231 surface
->flags
|= RADEON_SURF_DISABLE_DCC
;
233 if (create_info
->scanout
)
234 surface
->flags
|= RADEON_SURF_SCANOUT
;
238 static uint32_t si_get_bo_metadata_word1(struct radv_device
*device
)
240 return (ATI_VENDOR_ID
<< 16) | device
->physical_device
->rad_info
.pci_id
;
243 static inline unsigned
244 si_tile_mode_index(const struct radv_image
*image
, unsigned level
, bool stencil
)
247 return image
->surface
.u
.legacy
.stencil_tiling_index
[level
];
249 return image
->surface
.u
.legacy
.tiling_index
[level
];
252 static unsigned radv_map_swizzle(unsigned swizzle
)
256 return V_008F0C_SQ_SEL_Y
;
258 return V_008F0C_SQ_SEL_Z
;
260 return V_008F0C_SQ_SEL_W
;
262 return V_008F0C_SQ_SEL_0
;
264 return V_008F0C_SQ_SEL_1
;
265 default: /* VK_SWIZZLE_X */
266 return V_008F0C_SQ_SEL_X
;
271 radv_make_buffer_descriptor(struct radv_device
*device
,
272 struct radv_buffer
*buffer
,
278 const struct vk_format_description
*desc
;
280 uint64_t gpu_address
= radv_buffer_get_va(buffer
->bo
);
281 uint64_t va
= gpu_address
+ buffer
->offset
;
282 unsigned num_format
, data_format
;
284 desc
= vk_format_description(vk_format
);
285 first_non_void
= vk_format_get_first_non_void_channel(vk_format
);
286 stride
= desc
->block
.bits
/ 8;
288 num_format
= radv_translate_buffer_numformat(desc
, first_non_void
);
289 data_format
= radv_translate_buffer_dataformat(desc
, first_non_void
);
293 state
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
294 S_008F04_STRIDE(stride
);
296 if (device
->physical_device
->rad_info
.chip_class
!= VI
&& stride
) {
301 state
[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc
->swizzle
[0])) |
302 S_008F0C_DST_SEL_Y(radv_map_swizzle(desc
->swizzle
[1])) |
303 S_008F0C_DST_SEL_Z(radv_map_swizzle(desc
->swizzle
[2])) |
304 S_008F0C_DST_SEL_W(radv_map_swizzle(desc
->swizzle
[3])) |
305 S_008F0C_NUM_FORMAT(num_format
) |
306 S_008F0C_DATA_FORMAT(data_format
);
310 si_set_mutable_tex_desc_fields(struct radv_device
*device
,
311 struct radv_image
*image
,
312 const struct legacy_surf_level
*base_level_info
,
313 unsigned base_level
, unsigned first_level
,
314 unsigned block_width
, bool is_stencil
,
315 bool is_storage_image
, uint32_t *state
)
317 uint64_t gpu_address
= image
->bo
? radv_buffer_get_va(image
->bo
) + image
->offset
: 0;
318 uint64_t va
= gpu_address
;
319 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
320 uint64_t meta_va
= 0;
321 if (chip_class
>= GFX9
) {
323 va
+= image
->surface
.u
.gfx9
.stencil_offset
;
325 va
+= image
->surface
.u
.gfx9
.surf_offset
;
327 va
+= base_level_info
->offset
;
330 if (chip_class
>= GFX9
||
331 base_level_info
->mode
== RADEON_SURF_MODE_2D
)
332 state
[0] |= image
->surface
.tile_swizzle
;
333 state
[1] &= C_008F14_BASE_ADDRESS_HI
;
334 state
[1] |= S_008F14_BASE_ADDRESS_HI(va
>> 40);
336 if (chip_class
>= VI
) {
337 state
[6] &= C_008F28_COMPRESSION_EN
;
339 if (!is_storage_image
&& radv_dcc_enabled(image
, first_level
)) {
340 meta_va
= gpu_address
+ image
->dcc_offset
;
341 if (chip_class
<= VI
)
342 meta_va
+= base_level_info
->dcc_offset
;
343 } else if (!is_storage_image
&&
344 radv_image_is_tc_compat_htile(image
)) {
345 meta_va
= gpu_address
+ image
->htile_offset
;
349 state
[6] |= S_008F28_COMPRESSION_EN(1);
350 state
[7] = meta_va
>> 8;
351 state
[7] |= image
->surface
.tile_swizzle
;
355 if (chip_class
>= GFX9
) {
356 state
[3] &= C_008F1C_SW_MODE
;
357 state
[4] &= C_008F20_PITCH_GFX9
;
360 state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.stencil
.swizzle_mode
);
361 state
[4] |= S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.stencil
.epitch
);
363 state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.surf
.swizzle_mode
);
364 state
[4] |= S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.surf
.epitch
);
367 state
[5] &= C_008F24_META_DATA_ADDRESS
&
368 C_008F24_META_PIPE_ALIGNED
&
369 C_008F24_META_RB_ALIGNED
;
371 struct gfx9_surf_meta_flags meta
;
373 if (image
->dcc_offset
)
374 meta
= image
->surface
.u
.gfx9
.dcc
;
376 meta
= image
->surface
.u
.gfx9
.htile
;
378 state
[5] |= S_008F24_META_DATA_ADDRESS(meta_va
>> 40) |
379 S_008F24_META_PIPE_ALIGNED(meta
.pipe_aligned
) |
380 S_008F24_META_RB_ALIGNED(meta
.rb_aligned
);
384 unsigned pitch
= base_level_info
->nblk_x
* block_width
;
385 unsigned index
= si_tile_mode_index(image
, base_level
, is_stencil
);
387 state
[3] &= C_008F1C_TILING_INDEX
;
388 state
[3] |= S_008F1C_TILING_INDEX(index
);
389 state
[4] &= C_008F20_PITCH_GFX6
;
390 state
[4] |= S_008F20_PITCH_GFX6(pitch
- 1);
394 static unsigned radv_tex_dim(VkImageType image_type
, VkImageViewType view_type
,
395 unsigned nr_layers
, unsigned nr_samples
, bool is_storage_image
, bool gfx9
)
397 if (view_type
== VK_IMAGE_VIEW_TYPE_CUBE
|| view_type
== VK_IMAGE_VIEW_TYPE_CUBE_ARRAY
)
398 return is_storage_image
? V_008F1C_SQ_RSRC_IMG_2D_ARRAY
: V_008F1C_SQ_RSRC_IMG_CUBE
;
400 /* GFX9 allocates 1D textures as 2D. */
401 if (gfx9
&& image_type
== VK_IMAGE_TYPE_1D
)
402 image_type
= VK_IMAGE_TYPE_2D
;
403 switch (image_type
) {
404 case VK_IMAGE_TYPE_1D
:
405 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY
: V_008F1C_SQ_RSRC_IMG_1D
;
406 case VK_IMAGE_TYPE_2D
:
408 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
: V_008F1C_SQ_RSRC_IMG_2D_MSAA
;
410 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY
: V_008F1C_SQ_RSRC_IMG_2D
;
411 case VK_IMAGE_TYPE_3D
:
412 if (view_type
== VK_IMAGE_VIEW_TYPE_3D
)
413 return V_008F1C_SQ_RSRC_IMG_3D
;
415 return V_008F1C_SQ_RSRC_IMG_2D_ARRAY
;
417 unreachable("illegale image type");
421 static unsigned gfx9_border_color_swizzle(const enum vk_swizzle swizzle
[4])
423 unsigned bc_swizzle
= V_008F20_BC_SWIZZLE_XYZW
;
425 if (swizzle
[3] == VK_SWIZZLE_X
) {
426 /* For the pre-defined border color values (white, opaque
427 * black, transparent black), the only thing that matters is
428 * that the alpha channel winds up in the correct place
429 * (because the RGB channels are all the same) so either of
430 * these enumerations will work.
432 if (swizzle
[2] == VK_SWIZZLE_Y
)
433 bc_swizzle
= V_008F20_BC_SWIZZLE_WZYX
;
435 bc_swizzle
= V_008F20_BC_SWIZZLE_WXYZ
;
436 } else if (swizzle
[0] == VK_SWIZZLE_X
) {
437 if (swizzle
[1] == VK_SWIZZLE_Y
)
438 bc_swizzle
= V_008F20_BC_SWIZZLE_XYZW
;
440 bc_swizzle
= V_008F20_BC_SWIZZLE_XWYZ
;
441 } else if (swizzle
[1] == VK_SWIZZLE_X
) {
442 bc_swizzle
= V_008F20_BC_SWIZZLE_YXWZ
;
443 } else if (swizzle
[2] == VK_SWIZZLE_X
) {
444 bc_swizzle
= V_008F20_BC_SWIZZLE_ZYXW
;
451 * Build the sampler view descriptor for a texture.
454 si_make_texture_descriptor(struct radv_device
*device
,
455 struct radv_image
*image
,
456 bool is_storage_image
,
457 VkImageViewType view_type
,
459 const VkComponentMapping
*mapping
,
460 unsigned first_level
, unsigned last_level
,
461 unsigned first_layer
, unsigned last_layer
,
462 unsigned width
, unsigned height
, unsigned depth
,
464 uint32_t *fmask_state
)
466 const struct vk_format_description
*desc
;
467 enum vk_swizzle swizzle
[4];
469 unsigned num_format
, data_format
, type
;
471 desc
= vk_format_description(vk_format
);
473 if (desc
->colorspace
== VK_FORMAT_COLORSPACE_ZS
) {
474 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
475 vk_format_compose_swizzles(mapping
, swizzle_xxxx
, swizzle
);
477 vk_format_compose_swizzles(mapping
, desc
->swizzle
, swizzle
);
480 first_non_void
= vk_format_get_first_non_void_channel(vk_format
);
482 num_format
= radv_translate_tex_numformat(vk_format
, desc
, first_non_void
);
483 if (num_format
== ~0) {
487 data_format
= radv_translate_tex_dataformat(vk_format
, desc
, first_non_void
);
488 if (data_format
== ~0) {
492 /* S8 with either Z16 or Z32 HTILE need a special format. */
493 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
494 vk_format
== VK_FORMAT_S8_UINT
&&
495 radv_image_is_tc_compat_htile(image
)) {
496 if (image
->vk_format
== VK_FORMAT_D32_SFLOAT_S8_UINT
)
497 data_format
= V_008F14_IMG_DATA_FORMAT_S8_32
;
498 else if (image
->vk_format
== VK_FORMAT_D16_UNORM_S8_UINT
)
499 data_format
= V_008F14_IMG_DATA_FORMAT_S8_16
;
501 type
= radv_tex_dim(image
->type
, view_type
, image
->info
.array_size
, image
->info
.samples
,
502 is_storage_image
, device
->physical_device
->rad_info
.chip_class
>= GFX9
);
503 if (type
== V_008F1C_SQ_RSRC_IMG_1D_ARRAY
) {
505 depth
= image
->info
.array_size
;
506 } else if (type
== V_008F1C_SQ_RSRC_IMG_2D_ARRAY
||
507 type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
) {
508 if (view_type
!= VK_IMAGE_VIEW_TYPE_3D
)
509 depth
= image
->info
.array_size
;
510 } else if (type
== V_008F1C_SQ_RSRC_IMG_CUBE
)
511 depth
= image
->info
.array_size
/ 6;
514 state
[1] = (S_008F14_DATA_FORMAT_GFX6(data_format
) |
515 S_008F14_NUM_FORMAT_GFX6(num_format
));
516 state
[2] = (S_008F18_WIDTH(width
- 1) |
517 S_008F18_HEIGHT(height
- 1) |
518 S_008F18_PERF_MOD(4));
519 state
[3] = (S_008F1C_DST_SEL_X(radv_map_swizzle(swizzle
[0])) |
520 S_008F1C_DST_SEL_Y(radv_map_swizzle(swizzle
[1])) |
521 S_008F1C_DST_SEL_Z(radv_map_swizzle(swizzle
[2])) |
522 S_008F1C_DST_SEL_W(radv_map_swizzle(swizzle
[3])) |
523 S_008F1C_BASE_LEVEL(image
->info
.samples
> 1 ?
525 S_008F1C_LAST_LEVEL(image
->info
.samples
> 1 ?
526 util_logbase2(image
->info
.samples
) :
528 S_008F1C_TYPE(type
));
530 state
[5] = S_008F24_BASE_ARRAY(first_layer
);
534 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
535 unsigned bc_swizzle
= gfx9_border_color_swizzle(swizzle
);
537 /* Depth is the the last accessible layer on Gfx9.
538 * The hw doesn't need to know the total number of layers.
540 if (type
== V_008F1C_SQ_RSRC_IMG_3D
)
541 state
[4] |= S_008F20_DEPTH(depth
- 1);
543 state
[4] |= S_008F20_DEPTH(last_layer
);
545 state
[4] |= S_008F20_BC_SWIZZLE(bc_swizzle
);
546 state
[5] |= S_008F24_MAX_MIP(image
->info
.samples
> 1 ?
547 util_logbase2(image
->info
.samples
) :
548 image
->info
.levels
- 1);
550 state
[3] |= S_008F1C_POW2_PAD(image
->info
.levels
> 1);
551 state
[4] |= S_008F20_DEPTH(depth
- 1);
552 state
[5] |= S_008F24_LAST_ARRAY(last_layer
);
554 if (image
->dcc_offset
) {
555 unsigned swap
= radv_translate_colorswap(vk_format
, FALSE
);
557 state
[6] = S_008F28_ALPHA_IS_ON_MSB(swap
<= 1);
559 /* The last dword is unused by hw. The shader uses it to clear
560 * bits in the first dword of sampler state.
562 if (device
->physical_device
->rad_info
.chip_class
<= CIK
&& image
->info
.samples
<= 1) {
563 if (first_level
== last_level
)
564 state
[7] = C_008F30_MAX_ANISO_RATIO
;
566 state
[7] = 0xffffffff;
570 /* Initialize the sampler view for FMASK. */
571 if (radv_image_has_fmask(image
)) {
572 uint32_t fmask_format
, num_format
;
573 uint64_t gpu_address
= radv_buffer_get_va(image
->bo
);
576 va
= gpu_address
+ image
->offset
+ image
->fmask
.offset
;
578 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
579 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK
;
580 switch (image
->info
.samples
) {
582 num_format
= V_008F14_IMG_FMASK_8_2_2
;
585 num_format
= V_008F14_IMG_FMASK_8_4_4
;
588 num_format
= V_008F14_IMG_FMASK_32_8_8
;
591 unreachable("invalid nr_samples");
594 switch (image
->info
.samples
) {
596 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2
;
599 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4
;
602 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8
;
606 fmask_format
= V_008F14_IMG_DATA_FORMAT_INVALID
;
608 num_format
= V_008F14_IMG_NUM_FORMAT_UINT
;
611 fmask_state
[0] = va
>> 8;
612 fmask_state
[0] |= image
->fmask
.tile_swizzle
;
613 fmask_state
[1] = S_008F14_BASE_ADDRESS_HI(va
>> 40) |
614 S_008F14_DATA_FORMAT_GFX6(fmask_format
) |
615 S_008F14_NUM_FORMAT_GFX6(num_format
);
616 fmask_state
[2] = S_008F18_WIDTH(width
- 1) |
617 S_008F18_HEIGHT(height
- 1);
618 fmask_state
[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X
) |
619 S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X
) |
620 S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X
) |
621 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X
) |
622 S_008F1C_TYPE(radv_tex_dim(image
->type
, view_type
, 1, 0, false, false));
624 fmask_state
[5] = S_008F24_BASE_ARRAY(first_layer
);
628 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
629 fmask_state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.fmask
.swizzle_mode
);
630 fmask_state
[4] |= S_008F20_DEPTH(last_layer
) |
631 S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.fmask
.epitch
);
632 fmask_state
[5] |= S_008F24_META_PIPE_ALIGNED(image
->surface
.u
.gfx9
.cmask
.pipe_aligned
) |
633 S_008F24_META_RB_ALIGNED(image
->surface
.u
.gfx9
.cmask
.rb_aligned
);
635 fmask_state
[3] |= S_008F1C_TILING_INDEX(image
->fmask
.tile_mode_index
);
636 fmask_state
[4] |= S_008F20_DEPTH(depth
- 1) |
637 S_008F20_PITCH_GFX6(image
->fmask
.pitch_in_pixels
- 1);
638 fmask_state
[5] |= S_008F24_LAST_ARRAY(last_layer
);
640 } else if (fmask_state
)
641 memset(fmask_state
, 0, 8 * 4);
645 radv_query_opaque_metadata(struct radv_device
*device
,
646 struct radv_image
*image
,
647 struct radeon_bo_metadata
*md
)
649 static const VkComponentMapping fixedmapping
;
652 /* Metadata image format format version 1:
653 * [0] = 1 (metadata format identifier)
654 * [1] = (VENDOR_ID << 16) | PCI_ID
655 * [2:9] = image descriptor for the whole resource
656 * [2] is always 0, because the base address is cleared
657 * [9] is the DCC offset bits [39:8] from the beginning of
659 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
661 md
->metadata
[0] = 1; /* metadata image format version 1 */
663 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
664 md
->metadata
[1] = si_get_bo_metadata_word1(device
);
667 si_make_texture_descriptor(device
, image
, false,
668 (VkImageViewType
)image
->type
, image
->vk_format
,
669 &fixedmapping
, 0, image
->info
.levels
- 1, 0,
670 image
->info
.array_size
,
671 image
->info
.width
, image
->info
.height
,
675 si_set_mutable_tex_desc_fields(device
, image
, &image
->surface
.u
.legacy
.level
[0], 0, 0,
676 image
->surface
.blk_w
, false, false, desc
);
678 /* Clear the base address and set the relative DCC offset. */
680 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
681 desc
[7] = image
->dcc_offset
>> 8;
683 /* Dwords [2:9] contain the image descriptor. */
684 memcpy(&md
->metadata
[2], desc
, sizeof(desc
));
686 /* Dwords [10:..] contain the mipmap level offsets. */
687 if (device
->physical_device
->rad_info
.chip_class
<= VI
) {
688 for (i
= 0; i
<= image
->info
.levels
- 1; i
++)
689 md
->metadata
[10+i
] = image
->surface
.u
.legacy
.level
[i
].offset
>> 8;
690 md
->size_metadata
= (11 + image
->info
.levels
- 1) * 4;
695 radv_init_metadata(struct radv_device
*device
,
696 struct radv_image
*image
,
697 struct radeon_bo_metadata
*metadata
)
699 struct radeon_surf
*surface
= &image
->surface
;
701 memset(metadata
, 0, sizeof(*metadata
));
703 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
704 metadata
->u
.gfx9
.swizzle_mode
= surface
->u
.gfx9
.surf
.swizzle_mode
;
706 metadata
->u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
707 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
708 metadata
->u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
709 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
710 metadata
->u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
711 metadata
->u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
712 metadata
->u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
713 metadata
->u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
714 metadata
->u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
715 metadata
->u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
716 metadata
->u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
717 metadata
->u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
719 radv_query_opaque_metadata(device
, image
, metadata
);
722 /* The number of samples can be specified independently of the texture. */
724 radv_image_get_fmask_info(struct radv_device
*device
,
725 struct radv_image
*image
,
727 struct radv_fmask_info
*out
)
729 /* FMASK is allocated like an ordinary texture. */
730 struct radeon_surf fmask
= {};
731 struct ac_surf_info info
= image
->info
;
732 memset(out
, 0, sizeof(*out
));
734 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
735 out
->alignment
= image
->surface
.u
.gfx9
.fmask_alignment
;
736 out
->size
= image
->surface
.u
.gfx9
.fmask_size
;
740 fmask
.blk_w
= image
->surface
.blk_w
;
741 fmask
.blk_h
= image
->surface
.blk_h
;
743 fmask
.flags
= image
->surface
.flags
| RADEON_SURF_FMASK
;
745 if (!image
->shareable
)
746 info
.surf_index
= &device
->fmask_mrt_offset_counter
;
748 /* Force 2D tiling if it wasn't set. This may occur when creating
749 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
750 * destination buffer must have an FMASK too. */
751 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
752 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
754 switch (nr_samples
) {
766 device
->ws
->surface_init(device
->ws
, &info
, &fmask
);
767 assert(fmask
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
);
769 out
->slice_tile_max
= (fmask
.u
.legacy
.level
[0].nblk_x
* fmask
.u
.legacy
.level
[0].nblk_y
) / 64;
770 if (out
->slice_tile_max
)
771 out
->slice_tile_max
-= 1;
773 out
->tile_mode_index
= fmask
.u
.legacy
.tiling_index
[0];
774 out
->pitch_in_pixels
= fmask
.u
.legacy
.level
[0].nblk_x
;
775 out
->bank_height
= fmask
.u
.legacy
.bankh
;
776 out
->tile_swizzle
= fmask
.tile_swizzle
;
777 out
->alignment
= MAX2(256, fmask
.surf_alignment
);
778 out
->size
= fmask
.surf_size
;
780 assert(!out
->tile_swizzle
|| !image
->shareable
);
784 radv_image_alloc_fmask(struct radv_device
*device
,
785 struct radv_image
*image
)
787 radv_image_get_fmask_info(device
, image
, image
->info
.samples
, &image
->fmask
);
789 image
->fmask
.offset
= align64(image
->size
, image
->fmask
.alignment
);
790 image
->size
= image
->fmask
.offset
+ image
->fmask
.size
;
791 image
->alignment
= MAX2(image
->alignment
, image
->fmask
.alignment
);
795 radv_image_get_cmask_info(struct radv_device
*device
,
796 struct radv_image
*image
,
797 struct radv_cmask_info
*out
)
799 unsigned pipe_interleave_bytes
= device
->physical_device
->rad_info
.pipe_interleave_bytes
;
800 unsigned num_pipes
= device
->physical_device
->rad_info
.num_tile_pipes
;
801 unsigned cl_width
, cl_height
;
803 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
804 out
->alignment
= image
->surface
.u
.gfx9
.cmask_alignment
;
805 out
->size
= image
->surface
.u
.gfx9
.cmask_size
;
822 case 16: /* Hawaii */
831 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
833 unsigned width
= align(image
->info
.width
, cl_width
*8);
834 unsigned height
= align(image
->info
.height
, cl_height
*8);
835 unsigned slice_elements
= (width
* height
) / (8*8);
837 /* Each element of CMASK is a nibble. */
838 unsigned slice_bytes
= slice_elements
/ 2;
840 out
->slice_tile_max
= (width
* height
) / (128*128);
841 if (out
->slice_tile_max
)
842 out
->slice_tile_max
-= 1;
844 out
->alignment
= MAX2(256, base_align
);
845 out
->size
= (image
->type
== VK_IMAGE_TYPE_3D
? image
->info
.depth
: image
->info
.array_size
) *
846 align(slice_bytes
, base_align
);
850 radv_image_alloc_cmask(struct radv_device
*device
,
851 struct radv_image
*image
)
853 uint32_t clear_value_size
= 0;
854 radv_image_get_cmask_info(device
, image
, &image
->cmask
);
856 image
->cmask
.offset
= align64(image
->size
, image
->cmask
.alignment
);
857 /* + 8 for storing the clear values */
858 if (!image
->clear_value_offset
) {
859 image
->clear_value_offset
= image
->cmask
.offset
+ image
->cmask
.size
;
860 clear_value_size
= 8;
862 image
->size
= image
->cmask
.offset
+ image
->cmask
.size
+ clear_value_size
;
863 image
->alignment
= MAX2(image
->alignment
, image
->cmask
.alignment
);
867 radv_image_alloc_dcc(struct radv_image
*image
)
869 image
->dcc_offset
= align64(image
->size
, image
->surface
.dcc_alignment
);
870 /* + 16 for storing the clear values + dcc pred */
871 image
->clear_value_offset
= image
->dcc_offset
+ image
->surface
.dcc_size
;
872 image
->dcc_pred_offset
= image
->clear_value_offset
+ 8;
873 image
->size
= image
->dcc_offset
+ image
->surface
.dcc_size
+ 16;
874 image
->alignment
= MAX2(image
->alignment
, image
->surface
.dcc_alignment
);
878 radv_image_alloc_htile(struct radv_image
*image
)
880 image
->htile_offset
= align64(image
->size
, image
->surface
.htile_alignment
);
882 /* + 8 for storing the clear values */
883 image
->clear_value_offset
= image
->htile_offset
+ image
->surface
.htile_size
;
884 image
->size
= image
->clear_value_offset
+ 8;
885 image
->alignment
= align64(image
->alignment
, image
->surface
.htile_alignment
);
889 radv_image_can_enable_dcc_or_cmask(struct radv_image
*image
)
891 if (image
->info
.samples
<= 1 &&
892 image
->info
.width
* image
->info
.height
<= 512 * 512) {
893 /* Do not enable CMASK or DCC for small surfaces where the cost
894 * of the eliminate pass can be higher than the benefit of fast
895 * clear. RadeonSI does this, but the image threshold is
901 return image
->usage
& VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
&&
902 (image
->exclusive
|| image
->queue_family_mask
== 1);
906 radv_image_can_enable_dcc(struct radv_image
*image
)
908 return radv_image_can_enable_dcc_or_cmask(image
) &&
909 radv_image_has_dcc(image
);
913 radv_image_can_enable_cmask(struct radv_image
*image
)
915 if (image
->surface
.bpe
> 8 && image
->info
.samples
== 1) {
916 /* Do not enable CMASK for non-MSAA images (fast color clear)
917 * because 128 bit formats are not supported, but FMASK might
923 return radv_image_can_enable_dcc_or_cmask(image
) &&
924 image
->info
.levels
== 1 &&
925 image
->info
.depth
== 1 &&
926 !image
->surface
.is_linear
;
930 radv_image_can_enable_fmask(struct radv_image
*image
)
932 return image
->info
.samples
> 1 && vk_format_is_color(image
->vk_format
);
936 radv_image_can_enable_htile(struct radv_image
*image
)
938 return image
->info
.levels
== 1 && vk_format_is_depth(image
->vk_format
);
942 radv_image_create(VkDevice _device
,
943 const struct radv_image_create_info
*create_info
,
944 const VkAllocationCallbacks
* alloc
,
947 RADV_FROM_HANDLE(radv_device
, device
, _device
);
948 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
949 struct radv_image
*image
= NULL
;
950 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
);
952 radv_assert(pCreateInfo
->mipLevels
> 0);
953 radv_assert(pCreateInfo
->arrayLayers
> 0);
954 radv_assert(pCreateInfo
->samples
> 0);
955 radv_assert(pCreateInfo
->extent
.width
> 0);
956 radv_assert(pCreateInfo
->extent
.height
> 0);
957 radv_assert(pCreateInfo
->extent
.depth
> 0);
959 image
= vk_zalloc2(&device
->alloc
, alloc
, sizeof(*image
), 8,
960 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
962 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
964 image
->type
= pCreateInfo
->imageType
;
965 image
->info
.width
= pCreateInfo
->extent
.width
;
966 image
->info
.height
= pCreateInfo
->extent
.height
;
967 image
->info
.depth
= pCreateInfo
->extent
.depth
;
968 image
->info
.samples
= pCreateInfo
->samples
;
969 image
->info
.array_size
= pCreateInfo
->arrayLayers
;
970 image
->info
.levels
= pCreateInfo
->mipLevels
;
971 image
->info
.num_channels
= 4; /* TODO: set this correctly */
973 image
->vk_format
= pCreateInfo
->format
;
974 image
->tiling
= pCreateInfo
->tiling
;
975 image
->usage
= pCreateInfo
->usage
;
976 image
->flags
= pCreateInfo
->flags
;
978 image
->exclusive
= pCreateInfo
->sharingMode
== VK_SHARING_MODE_EXCLUSIVE
;
979 if (pCreateInfo
->sharingMode
== VK_SHARING_MODE_CONCURRENT
) {
980 for (uint32_t i
= 0; i
< pCreateInfo
->queueFamilyIndexCount
; ++i
)
981 if (pCreateInfo
->pQueueFamilyIndices
[i
] == VK_QUEUE_FAMILY_EXTERNAL_KHR
)
982 image
->queue_family_mask
|= (1u << RADV_MAX_QUEUE_FAMILIES
) - 1u;
984 image
->queue_family_mask
|= 1u << pCreateInfo
->pQueueFamilyIndices
[i
];
987 image
->shareable
= vk_find_struct_const(pCreateInfo
->pNext
,
988 EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR
) != NULL
;
989 if (!vk_format_is_depth(pCreateInfo
->format
) && !create_info
->scanout
&& !image
->shareable
) {
990 image
->info
.surf_index
= &device
->image_mrt_offset_counter
;
993 radv_init_surface(device
, &image
->surface
, create_info
);
995 device
->ws
->surface_init(device
->ws
, &image
->info
, &image
->surface
);
997 image
->size
= image
->surface
.surf_size
;
998 image
->alignment
= image
->surface
.surf_alignment
;
1000 if (!create_info
->no_metadata_planes
) {
1001 /* Try to enable DCC first. */
1002 if (radv_image_can_enable_dcc(image
)) {
1003 radv_image_alloc_dcc(image
);
1004 if (image
->info
.samples
> 1) {
1005 /* CMASK should be enabled because DCC fast
1006 * clear with MSAA needs it.
1008 assert(radv_image_can_enable_cmask(image
));
1009 radv_image_alloc_cmask(device
, image
);
1012 /* When DCC cannot be enabled, try CMASK. */
1013 image
->surface
.dcc_size
= 0;
1014 if (radv_image_can_enable_cmask(image
)) {
1015 radv_image_alloc_cmask(device
, image
);
1019 /* Try to enable FMASK for multisampled images. */
1020 if (radv_image_can_enable_fmask(image
)) {
1021 radv_image_alloc_fmask(device
, image
);
1023 /* Otherwise, try to enable HTILE for depth surfaces. */
1024 if (radv_image_can_enable_htile(image
) &&
1025 !(device
->instance
->debug_flags
& RADV_DEBUG_NO_HIZ
)) {
1026 radv_image_alloc_htile(image
);
1027 image
->tc_compatible_htile
= image
->surface
.flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
;
1029 image
->surface
.htile_size
= 0;
1033 image
->surface
.dcc_size
= 0;
1034 image
->surface
.htile_size
= 0;
1037 if (pCreateInfo
->flags
& VK_IMAGE_CREATE_SPARSE_BINDING_BIT
) {
1038 image
->alignment
= MAX2(image
->alignment
, 4096);
1039 image
->size
= align64(image
->size
, image
->alignment
);
1042 image
->bo
= device
->ws
->buffer_create(device
->ws
, image
->size
, image
->alignment
,
1043 0, RADEON_FLAG_VIRTUAL
);
1045 vk_free2(&device
->alloc
, alloc
, image
);
1046 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1050 *pImage
= radv_image_to_handle(image
);
1056 radv_image_view_make_descriptor(struct radv_image_view
*iview
,
1057 struct radv_device
*device
,
1058 const VkComponentMapping
*components
,
1059 bool is_storage_image
)
1061 struct radv_image
*image
= iview
->image
;
1062 bool is_stencil
= iview
->aspect_mask
== VK_IMAGE_ASPECT_STENCIL_BIT
;
1064 uint32_t *descriptor
;
1065 uint32_t hw_level
= 0;
1067 if (is_storage_image
) {
1068 descriptor
= iview
->storage_descriptor
;
1070 descriptor
= iview
->descriptor
;
1073 assert(image
->surface
.blk_w
% vk_format_get_blockwidth(image
->vk_format
) == 0);
1074 blk_w
= image
->surface
.blk_w
/ vk_format_get_blockwidth(image
->vk_format
) * vk_format_get_blockwidth(iview
->vk_format
);
1076 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1077 hw_level
= iview
->base_mip
;
1078 si_make_texture_descriptor(device
, image
, is_storage_image
,
1082 hw_level
, hw_level
+ iview
->level_count
- 1,
1084 iview
->base_layer
+ iview
->layer_count
- 1,
1085 iview
->extent
.width
,
1086 iview
->extent
.height
,
1087 iview
->extent
.depth
,
1091 const struct legacy_surf_level
*base_level_info
= NULL
;
1092 if (device
->physical_device
->rad_info
.chip_class
<= GFX9
) {
1094 base_level_info
= &image
->surface
.u
.legacy
.stencil_level
[iview
->base_mip
];
1096 base_level_info
= &image
->surface
.u
.legacy
.level
[iview
->base_mip
];
1098 si_set_mutable_tex_desc_fields(device
, image
,
1102 blk_w
, is_stencil
, is_storage_image
, descriptor
);
1106 radv_image_view_init(struct radv_image_view
*iview
,
1107 struct radv_device
*device
,
1108 const VkImageViewCreateInfo
* pCreateInfo
)
1110 RADV_FROM_HANDLE(radv_image
, image
, pCreateInfo
->image
);
1111 const VkImageSubresourceRange
*range
= &pCreateInfo
->subresourceRange
;
1113 switch (image
->type
) {
1114 case VK_IMAGE_TYPE_1D
:
1115 case VK_IMAGE_TYPE_2D
:
1116 assert(range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1 <= image
->info
.array_size
);
1118 case VK_IMAGE_TYPE_3D
:
1119 assert(range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1
1120 <= radv_minify(image
->info
.depth
, range
->baseMipLevel
));
1123 unreachable("bad VkImageType");
1125 iview
->image
= image
;
1126 iview
->bo
= image
->bo
;
1127 iview
->type
= pCreateInfo
->viewType
;
1128 iview
->vk_format
= pCreateInfo
->format
;
1129 iview
->aspect_mask
= pCreateInfo
->subresourceRange
.aspectMask
;
1131 if (iview
->aspect_mask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
1132 iview
->vk_format
= vk_format_stencil_only(iview
->vk_format
);
1133 } else if (iview
->aspect_mask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
1134 iview
->vk_format
= vk_format_depth_only(iview
->vk_format
);
1137 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1138 iview
->extent
= (VkExtent3D
) {
1139 .width
= image
->info
.width
,
1140 .height
= image
->info
.height
,
1141 .depth
= image
->info
.depth
,
1144 iview
->extent
= (VkExtent3D
) {
1145 .width
= radv_minify(image
->info
.width
, range
->baseMipLevel
),
1146 .height
= radv_minify(image
->info
.height
, range
->baseMipLevel
),
1147 .depth
= radv_minify(image
->info
.depth
, range
->baseMipLevel
),
1151 if (iview
->vk_format
!= image
->vk_format
) {
1152 unsigned view_bw
= vk_format_get_blockwidth(iview
->vk_format
);
1153 unsigned view_bh
= vk_format_get_blockheight(iview
->vk_format
);
1154 unsigned img_bw
= vk_format_get_blockwidth(image
->vk_format
);
1155 unsigned img_bh
= vk_format_get_blockheight(image
->vk_format
);
1157 iview
->extent
.width
= round_up_u32(iview
->extent
.width
* view_bw
, img_bw
);
1158 iview
->extent
.height
= round_up_u32(iview
->extent
.height
* view_bh
, img_bh
);
1160 /* Comment ported from amdvlk -
1161 * If we have the following image:
1162 * Uncompressed pixels Compressed block sizes (4x4)
1163 * mip0: 22 x 22 6 x 6
1164 * mip1: 11 x 11 3 x 3
1169 * On GFX9 the descriptor is always programmed with the WIDTH and HEIGHT of the base level and the HW is
1170 * calculating the degradation of the block sizes down the mip-chain as follows (straight-up
1171 * divide-by-two integer math):
1177 * This means that mip2 will be missing texels.
1179 * Fix this by calculating the base mip's width and height, then convert that, and round it
1180 * back up to get the level 0 size.
1181 * Clamp the converted size between the original values, and next power of two, which
1182 * means we don't oversize the image.
1184 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
1185 vk_format_is_compressed(image
->vk_format
) &&
1186 !vk_format_is_compressed(iview
->vk_format
)) {
1187 unsigned rounded_img_w
= util_next_power_of_two(iview
->extent
.width
);
1188 unsigned rounded_img_h
= util_next_power_of_two(iview
->extent
.height
);
1189 unsigned lvl_width
= radv_minify(image
->info
.width
, range
->baseMipLevel
);
1190 unsigned lvl_height
= radv_minify(image
->info
.height
, range
->baseMipLevel
);
1192 lvl_width
= round_up_u32(lvl_width
* view_bw
, img_bw
);
1193 lvl_height
= round_up_u32(lvl_height
* view_bh
, img_bh
);
1195 lvl_width
<<= range
->baseMipLevel
;
1196 lvl_height
<<= range
->baseMipLevel
;
1198 iview
->extent
.width
= CLAMP(lvl_width
, iview
->extent
.width
, rounded_img_w
);
1199 iview
->extent
.height
= CLAMP(lvl_height
, iview
->extent
.height
, rounded_img_h
);
1203 iview
->base_layer
= range
->baseArrayLayer
;
1204 iview
->layer_count
= radv_get_layerCount(image
, range
);
1205 iview
->base_mip
= range
->baseMipLevel
;
1206 iview
->level_count
= radv_get_levelCount(image
, range
);
1208 radv_image_view_make_descriptor(iview
, device
, &pCreateInfo
->components
, false);
1209 radv_image_view_make_descriptor(iview
, device
, &pCreateInfo
->components
, true);
1212 bool radv_layout_has_htile(const struct radv_image
*image
,
1213 VkImageLayout layout
,
1214 unsigned queue_mask
)
1216 if (radv_image_is_tc_compat_htile(image
))
1217 return layout
!= VK_IMAGE_LAYOUT_GENERAL
;
1219 return radv_image_has_htile(image
) &&
1220 (layout
== VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
||
1221 layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) &&
1222 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
1225 bool radv_layout_is_htile_compressed(const struct radv_image
*image
,
1226 VkImageLayout layout
,
1227 unsigned queue_mask
)
1229 if (radv_image_is_tc_compat_htile(image
))
1230 return layout
!= VK_IMAGE_LAYOUT_GENERAL
;
1232 return radv_image_has_htile(image
) &&
1233 (layout
== VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
||
1234 layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) &&
1235 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
1238 bool radv_layout_can_fast_clear(const struct radv_image
*image
,
1239 VkImageLayout layout
,
1240 unsigned queue_mask
)
1242 return layout
== VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
&&
1243 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
1246 bool radv_layout_dcc_compressed(const struct radv_image
*image
,
1247 VkImageLayout layout
,
1248 unsigned queue_mask
)
1250 /* Don't compress compute transfer dst, as image stores are not supported. */
1251 if (layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
&&
1252 (queue_mask
& (1u << RADV_QUEUE_COMPUTE
)))
1255 return radv_image_has_dcc(image
) && layout
!= VK_IMAGE_LAYOUT_GENERAL
;
1259 unsigned radv_image_queue_family_mask(const struct radv_image
*image
, uint32_t family
, uint32_t queue_family
)
1261 if (!image
->exclusive
)
1262 return image
->queue_family_mask
;
1263 if (family
== VK_QUEUE_FAMILY_EXTERNAL_KHR
)
1264 return (1u << RADV_MAX_QUEUE_FAMILIES
) - 1u;
1265 if (family
== VK_QUEUE_FAMILY_IGNORED
)
1266 return 1u << queue_family
;
1267 return 1u << family
;
1271 radv_CreateImage(VkDevice device
,
1272 const VkImageCreateInfo
*pCreateInfo
,
1273 const VkAllocationCallbacks
*pAllocator
,
1277 const VkNativeBufferANDROID
*gralloc_info
=
1278 vk_find_struct_const(pCreateInfo
->pNext
, NATIVE_BUFFER_ANDROID
);
1281 return radv_image_from_gralloc(device
, pCreateInfo
, gralloc_info
,
1282 pAllocator
, pImage
);
1285 const struct wsi_image_create_info
*wsi_info
=
1286 vk_find_struct_const(pCreateInfo
->pNext
, WSI_IMAGE_CREATE_INFO_MESA
);
1287 bool scanout
= wsi_info
&& wsi_info
->scanout
;
1289 return radv_image_create(device
,
1290 &(struct radv_image_create_info
) {
1291 .vk_info
= pCreateInfo
,
1299 radv_DestroyImage(VkDevice _device
, VkImage _image
,
1300 const VkAllocationCallbacks
*pAllocator
)
1302 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1303 RADV_FROM_HANDLE(radv_image
, image
, _image
);
1308 if (image
->flags
& VK_IMAGE_CREATE_SPARSE_BINDING_BIT
)
1309 device
->ws
->buffer_destroy(image
->bo
);
1311 if (image
->owned_memory
!= VK_NULL_HANDLE
)
1312 radv_FreeMemory(_device
, image
->owned_memory
, pAllocator
);
1314 vk_free2(&device
->alloc
, pAllocator
, image
);
1317 void radv_GetImageSubresourceLayout(
1320 const VkImageSubresource
* pSubresource
,
1321 VkSubresourceLayout
* pLayout
)
1323 RADV_FROM_HANDLE(radv_image
, image
, _image
);
1324 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1325 int level
= pSubresource
->mipLevel
;
1326 int layer
= pSubresource
->arrayLayer
;
1327 struct radeon_surf
*surface
= &image
->surface
;
1329 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1330 pLayout
->offset
= surface
->u
.gfx9
.offset
[level
] + surface
->u
.gfx9
.surf_slice_size
* layer
;
1331 pLayout
->rowPitch
= surface
->u
.gfx9
.surf_pitch
* surface
->bpe
;
1332 pLayout
->arrayPitch
= surface
->u
.gfx9
.surf_slice_size
;
1333 pLayout
->depthPitch
= surface
->u
.gfx9
.surf_slice_size
;
1334 pLayout
->size
= surface
->u
.gfx9
.surf_slice_size
;
1335 if (image
->type
== VK_IMAGE_TYPE_3D
)
1336 pLayout
->size
*= u_minify(image
->info
.depth
, level
);
1338 pLayout
->offset
= surface
->u
.legacy
.level
[level
].offset
+ (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4 * layer
;
1339 pLayout
->rowPitch
= surface
->u
.legacy
.level
[level
].nblk_x
* surface
->bpe
;
1340 pLayout
->arrayPitch
= (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4;
1341 pLayout
->depthPitch
= (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4;
1342 pLayout
->size
= (uint64_t)surface
->u
.legacy
.level
[level
].slice_size_dw
* 4;
1343 if (image
->type
== VK_IMAGE_TYPE_3D
)
1344 pLayout
->size
*= u_minify(image
->info
.depth
, level
);
1350 radv_CreateImageView(VkDevice _device
,
1351 const VkImageViewCreateInfo
*pCreateInfo
,
1352 const VkAllocationCallbacks
*pAllocator
,
1355 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1356 struct radv_image_view
*view
;
1358 view
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*view
), 8,
1359 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1361 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1363 radv_image_view_init(view
, device
, pCreateInfo
);
1365 *pView
= radv_image_view_to_handle(view
);
1371 radv_DestroyImageView(VkDevice _device
, VkImageView _iview
,
1372 const VkAllocationCallbacks
*pAllocator
)
1374 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1375 RADV_FROM_HANDLE(radv_image_view
, iview
, _iview
);
1379 vk_free2(&device
->alloc
, pAllocator
, iview
);
1382 void radv_buffer_view_init(struct radv_buffer_view
*view
,
1383 struct radv_device
*device
,
1384 const VkBufferViewCreateInfo
* pCreateInfo
)
1386 RADV_FROM_HANDLE(radv_buffer
, buffer
, pCreateInfo
->buffer
);
1388 view
->bo
= buffer
->bo
;
1389 view
->range
= pCreateInfo
->range
== VK_WHOLE_SIZE
?
1390 buffer
->size
- pCreateInfo
->offset
: pCreateInfo
->range
;
1391 view
->vk_format
= pCreateInfo
->format
;
1393 radv_make_buffer_descriptor(device
, buffer
, view
->vk_format
,
1394 pCreateInfo
->offset
, view
->range
, view
->state
);
1398 radv_CreateBufferView(VkDevice _device
,
1399 const VkBufferViewCreateInfo
*pCreateInfo
,
1400 const VkAllocationCallbacks
*pAllocator
,
1401 VkBufferView
*pView
)
1403 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1404 struct radv_buffer_view
*view
;
1406 view
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*view
), 8,
1407 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1409 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1411 radv_buffer_view_init(view
, device
, pCreateInfo
);
1413 *pView
= radv_buffer_view_to_handle(view
);
1419 radv_DestroyBufferView(VkDevice _device
, VkBufferView bufferView
,
1420 const VkAllocationCallbacks
*pAllocator
)
1422 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1423 RADV_FROM_HANDLE(radv_buffer_view
, view
, bufferView
);
1428 vk_free2(&device
->alloc
, pAllocator
, view
);