2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "vk_format.h"
31 #include "radv_radeon_winsys.h"
34 #include "util/debug.h"
35 #include "util/u_atomic.h"
37 radv_choose_tiling(struct radv_device
*Device
,
38 const struct radv_image_create_info
*create_info
)
40 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
42 if (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
) {
43 assert(pCreateInfo
->samples
<= 1);
44 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
47 /* Textures with a very small height are recommended to be linear. */
48 if (pCreateInfo
->imageType
== VK_IMAGE_TYPE_1D
||
49 /* Only very thin and long 2D textures should benefit from
51 (pCreateInfo
->extent
.width
> 8 && pCreateInfo
->extent
.height
<= 2))
52 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
54 /* MSAA resources must be 2D tiled. */
55 if (pCreateInfo
->samples
> 1)
56 return RADEON_SURF_MODE_2D
;
58 return RADEON_SURF_MODE_2D
;
61 radv_init_surface(struct radv_device
*device
,
62 struct radeon_surf
*surface
,
63 const struct radv_image_create_info
*create_info
)
65 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
66 unsigned array_mode
= radv_choose_tiling(device
, create_info
);
67 const struct vk_format_description
*desc
=
68 vk_format_description(pCreateInfo
->format
);
69 bool is_depth
, is_stencil
, blendable
;
71 is_depth
= vk_format_has_depth(desc
);
72 is_stencil
= vk_format_has_stencil(desc
);
74 surface
->blk_w
= vk_format_get_blockwidth(pCreateInfo
->format
);
75 surface
->blk_h
= vk_format_get_blockheight(pCreateInfo
->format
);
77 surface
->bpe
= vk_format_get_blocksize(vk_format_depth_only(pCreateInfo
->format
));
78 /* align byte per element on dword */
79 if (surface
->bpe
== 3) {
82 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
84 switch (pCreateInfo
->imageType
){
85 case VK_IMAGE_TYPE_1D
:
86 if (pCreateInfo
->arrayLayers
> 1)
87 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
89 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
91 case VK_IMAGE_TYPE_2D
:
92 if (pCreateInfo
->arrayLayers
> 1)
93 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
95 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
97 case VK_IMAGE_TYPE_3D
:
98 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
101 unreachable("unhandled image type");
105 surface
->flags
|= RADEON_SURF_ZBUFFER
;
109 surface
->flags
|= RADEON_SURF_SBUFFER
;
111 surface
->flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
113 if ((pCreateInfo
->usage
& (VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
114 VK_IMAGE_USAGE_STORAGE_BIT
)) ||
115 (pCreateInfo
->flags
& VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
) ||
116 (pCreateInfo
->tiling
== VK_IMAGE_TILING_LINEAR
) ||
117 device
->physical_device
->rad_info
.chip_class
< VI
||
118 create_info
->scanout
|| (device
->debug_flags
& RADV_DEBUG_NO_DCC
) ||
119 !radv_is_colorbuffer_format_supported(pCreateInfo
->format
, &blendable
))
120 surface
->flags
|= RADEON_SURF_DISABLE_DCC
;
121 if (create_info
->scanout
)
122 surface
->flags
|= RADEON_SURF_SCANOUT
;
125 #define ATI_VENDOR_ID 0x1002
126 static uint32_t si_get_bo_metadata_word1(struct radv_device
*device
)
128 return (ATI_VENDOR_ID
<< 16) | device
->physical_device
->rad_info
.pci_id
;
131 static inline unsigned
132 si_tile_mode_index(const struct radv_image
*image
, unsigned level
, bool stencil
)
135 return image
->surface
.u
.legacy
.stencil_tiling_index
[level
];
137 return image
->surface
.u
.legacy
.tiling_index
[level
];
140 static unsigned radv_map_swizzle(unsigned swizzle
)
144 return V_008F0C_SQ_SEL_Y
;
146 return V_008F0C_SQ_SEL_Z
;
148 return V_008F0C_SQ_SEL_W
;
150 return V_008F0C_SQ_SEL_0
;
152 return V_008F0C_SQ_SEL_1
;
153 default: /* VK_SWIZZLE_X */
154 return V_008F0C_SQ_SEL_X
;
159 radv_make_buffer_descriptor(struct radv_device
*device
,
160 struct radv_buffer
*buffer
,
166 const struct vk_format_description
*desc
;
168 uint64_t gpu_address
= device
->ws
->buffer_get_va(buffer
->bo
);
169 uint64_t va
= gpu_address
+ buffer
->offset
;
170 unsigned num_format
, data_format
;
172 desc
= vk_format_description(vk_format
);
173 first_non_void
= vk_format_get_first_non_void_channel(vk_format
);
174 stride
= desc
->block
.bits
/ 8;
176 num_format
= radv_translate_buffer_numformat(desc
, first_non_void
);
177 data_format
= radv_translate_buffer_dataformat(desc
, first_non_void
);
181 state
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
182 S_008F04_STRIDE(stride
);
184 if (device
->physical_device
->rad_info
.chip_class
< VI
&& stride
) {
189 state
[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc
->swizzle
[0])) |
190 S_008F0C_DST_SEL_Y(radv_map_swizzle(desc
->swizzle
[1])) |
191 S_008F0C_DST_SEL_Z(radv_map_swizzle(desc
->swizzle
[2])) |
192 S_008F0C_DST_SEL_W(radv_map_swizzle(desc
->swizzle
[3])) |
193 S_008F0C_NUM_FORMAT(num_format
) |
194 S_008F0C_DATA_FORMAT(data_format
);
198 si_set_mutable_tex_desc_fields(struct radv_device
*device
,
199 struct radv_image
*image
,
200 const struct legacy_surf_level
*base_level_info
,
201 unsigned base_level
, unsigned first_level
,
202 unsigned block_width
, bool is_stencil
,
205 uint64_t gpu_address
= image
->bo
? device
->ws
->buffer_get_va(image
->bo
) + image
->offset
: 0;
206 uint64_t va
= gpu_address
;
207 unsigned pitch
= base_level_info
->nblk_x
* block_width
;
208 enum chip_class chip_class
= device
->physical_device
->rad_info
.chip_class
;
209 uint64_t meta_va
= 0;
210 if (chip_class
>= GFX9
) {
212 va
+= image
->surface
.u
.gfx9
.stencil_offset
;
214 va
+= image
->surface
.u
.gfx9
.surf_offset
;
216 va
+= base_level_info
->offset
;
219 if (chip_class
< GFX9
)
220 state
[0] |= image
->surface
.tile_swizzle
;
221 state
[1] &= C_008F14_BASE_ADDRESS_HI
;
222 state
[1] |= S_008F14_BASE_ADDRESS_HI(va
>> 40);
223 state
[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(image
, base_level
,
225 state
[4] |= S_008F20_PITCH_GFX6(pitch
- 1);
227 if (chip_class
>= VI
) {
228 state
[6] &= C_008F28_COMPRESSION_EN
;
230 if (image
->surface
.dcc_size
&& first_level
< image
->surface
.num_dcc_levels
) {
231 meta_va
= gpu_address
+ image
->dcc_offset
;
232 if (chip_class
<= VI
)
233 meta_va
+= base_level_info
->dcc_offset
;
234 state
[6] |= S_008F28_COMPRESSION_EN(1);
235 state
[7] = meta_va
>> 8;
236 if (chip_class
< GFX9
)
237 state
[7] |= image
->surface
.tile_swizzle
;
241 if (chip_class
>= GFX9
) {
242 state
[3] &= C_008F1C_SW_MODE
;
243 state
[4] &= C_008F20_PITCH_GFX9
;
246 state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.stencil
.swizzle_mode
);
247 state
[4] |= S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.stencil
.epitch
);
249 state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.surf
.swizzle_mode
);
250 state
[4] |= S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.surf
.epitch
);
253 state
[5] &= C_008F24_META_DATA_ADDRESS
&
254 C_008F24_META_PIPE_ALIGNED
&
255 C_008F24_META_RB_ALIGNED
;
257 struct gfx9_surf_meta_flags meta
;
259 if (image
->dcc_offset
)
260 meta
= image
->surface
.u
.gfx9
.dcc
;
262 meta
= image
->surface
.u
.gfx9
.htile
;
264 state
[5] |= S_008F24_META_DATA_ADDRESS(meta_va
>> 40) |
265 S_008F24_META_PIPE_ALIGNED(meta
.pipe_aligned
) |
266 S_008F24_META_RB_ALIGNED(meta
.rb_aligned
);
270 unsigned pitch
= base_level_info
->nblk_x
* block_width
;
271 unsigned index
= si_tile_mode_index(image
, base_level
, is_stencil
);
273 state
[3] &= C_008F1C_TILING_INDEX
;
274 state
[3] |= S_008F1C_TILING_INDEX(index
);
275 state
[4] &= C_008F20_PITCH_GFX6
;
276 state
[4] |= S_008F20_PITCH_GFX6(pitch
- 1);
280 static unsigned radv_tex_dim(VkImageType image_type
, VkImageViewType view_type
,
281 unsigned nr_layers
, unsigned nr_samples
, bool is_storage_image
)
283 if (view_type
== VK_IMAGE_VIEW_TYPE_CUBE
|| view_type
== VK_IMAGE_VIEW_TYPE_CUBE_ARRAY
)
284 return is_storage_image
? V_008F1C_SQ_RSRC_IMG_2D_ARRAY
: V_008F1C_SQ_RSRC_IMG_CUBE
;
285 switch (image_type
) {
286 case VK_IMAGE_TYPE_1D
:
287 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY
: V_008F1C_SQ_RSRC_IMG_1D
;
288 case VK_IMAGE_TYPE_2D
:
290 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
: V_008F1C_SQ_RSRC_IMG_2D_MSAA
;
292 return nr_layers
> 1 ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY
: V_008F1C_SQ_RSRC_IMG_2D
;
293 case VK_IMAGE_TYPE_3D
:
294 if (view_type
== VK_IMAGE_VIEW_TYPE_3D
)
295 return V_008F1C_SQ_RSRC_IMG_3D
;
297 return V_008F1C_SQ_RSRC_IMG_2D_ARRAY
;
299 unreachable("illegale image type");
303 static unsigned gfx9_border_color_swizzle(const unsigned char swizzle
[4])
305 unsigned bc_swizzle
= V_008F20_BC_SWIZZLE_XYZW
;
307 if (swizzle
[3] == VK_SWIZZLE_X
) {
308 /* For the pre-defined border color values (white, opaque
309 * black, transparent black), the only thing that matters is
310 * that the alpha channel winds up in the correct place
311 * (because the RGB channels are all the same) so either of
312 * these enumerations will work.
314 if (swizzle
[2] == VK_SWIZZLE_Y
)
315 bc_swizzle
= V_008F20_BC_SWIZZLE_WZYX
;
317 bc_swizzle
= V_008F20_BC_SWIZZLE_WXYZ
;
318 } else if (swizzle
[0] == VK_SWIZZLE_X
) {
319 if (swizzle
[1] == VK_SWIZZLE_Y
)
320 bc_swizzle
= V_008F20_BC_SWIZZLE_XYZW
;
322 bc_swizzle
= V_008F20_BC_SWIZZLE_XWYZ
;
323 } else if (swizzle
[1] == VK_SWIZZLE_X
) {
324 bc_swizzle
= V_008F20_BC_SWIZZLE_YXWZ
;
325 } else if (swizzle
[2] == VK_SWIZZLE_X
) {
326 bc_swizzle
= V_008F20_BC_SWIZZLE_ZYXW
;
333 * Build the sampler view descriptor for a texture.
336 si_make_texture_descriptor(struct radv_device
*device
,
337 struct radv_image
*image
,
338 bool is_storage_image
,
339 VkImageViewType view_type
,
341 const VkComponentMapping
*mapping
,
342 unsigned first_level
, unsigned last_level
,
343 unsigned first_layer
, unsigned last_layer
,
344 unsigned width
, unsigned height
, unsigned depth
,
346 uint32_t *fmask_state
)
348 const struct vk_format_description
*desc
;
349 enum vk_swizzle swizzle
[4];
351 unsigned num_format
, data_format
, type
;
353 desc
= vk_format_description(vk_format
);
355 if (desc
->colorspace
== VK_FORMAT_COLORSPACE_ZS
) {
356 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
357 vk_format_compose_swizzles(mapping
, swizzle_xxxx
, swizzle
);
359 vk_format_compose_swizzles(mapping
, desc
->swizzle
, swizzle
);
362 first_non_void
= vk_format_get_first_non_void_channel(vk_format
);
364 num_format
= radv_translate_tex_numformat(vk_format
, desc
, first_non_void
);
365 if (num_format
== ~0) {
369 data_format
= radv_translate_tex_dataformat(vk_format
, desc
, first_non_void
);
370 if (data_format
== ~0) {
374 type
= radv_tex_dim(image
->type
, view_type
, image
->info
.array_size
, image
->info
.samples
,
376 if (type
== V_008F1C_SQ_RSRC_IMG_1D_ARRAY
) {
378 depth
= image
->info
.array_size
;
379 } else if (type
== V_008F1C_SQ_RSRC_IMG_2D_ARRAY
||
380 type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
) {
381 if (view_type
!= VK_IMAGE_VIEW_TYPE_3D
)
382 depth
= image
->info
.array_size
;
383 } else if (type
== V_008F1C_SQ_RSRC_IMG_CUBE
)
384 depth
= image
->info
.array_size
/ 6;
387 state
[1] = (S_008F14_DATA_FORMAT_GFX6(data_format
) |
388 S_008F14_NUM_FORMAT_GFX6(num_format
));
389 state
[2] = (S_008F18_WIDTH(width
- 1) |
390 S_008F18_HEIGHT(height
- 1) |
391 S_008F18_PERF_MOD(4));
392 state
[3] = (S_008F1C_DST_SEL_X(radv_map_swizzle(swizzle
[0])) |
393 S_008F1C_DST_SEL_Y(radv_map_swizzle(swizzle
[1])) |
394 S_008F1C_DST_SEL_Z(radv_map_swizzle(swizzle
[2])) |
395 S_008F1C_DST_SEL_W(radv_map_swizzle(swizzle
[3])) |
396 S_008F1C_BASE_LEVEL(image
->info
.samples
> 1 ?
398 S_008F1C_LAST_LEVEL(image
->info
.samples
> 1 ?
399 util_logbase2(image
->info
.samples
) :
401 S_008F1C_TYPE(type
));
403 state
[5] = S_008F24_BASE_ARRAY(first_layer
);
407 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
408 unsigned bc_swizzle
= gfx9_border_color_swizzle(desc
->swizzle
);
410 /* Depth is the the last accessible layer on Gfx9.
411 * The hw doesn't need to know the total number of layers.
413 if (type
== V_008F1C_SQ_RSRC_IMG_3D
)
414 state
[4] |= S_008F20_DEPTH(depth
- 1);
416 state
[4] |= S_008F20_DEPTH(last_layer
);
418 state
[4] |= S_008F20_BC_SWIZZLE(bc_swizzle
);
419 state
[5] |= S_008F24_MAX_MIP(image
->info
.samples
> 1 ?
420 util_logbase2(image
->info
.samples
) :
423 state
[3] |= S_008F1C_POW2_PAD(image
->info
.levels
> 1);
424 state
[4] |= S_008F20_DEPTH(depth
- 1);
425 state
[5] |= S_008F24_LAST_ARRAY(last_layer
);
427 if (image
->dcc_offset
) {
428 unsigned swap
= radv_translate_colorswap(vk_format
, FALSE
);
430 state
[6] = S_008F28_ALPHA_IS_ON_MSB(swap
<= 1);
432 /* The last dword is unused by hw. The shader uses it to clear
433 * bits in the first dword of sampler state.
435 if (device
->physical_device
->rad_info
.chip_class
<= CIK
&& image
->info
.samples
<= 1) {
436 if (first_level
== last_level
)
437 state
[7] = C_008F30_MAX_ANISO_RATIO
;
439 state
[7] = 0xffffffff;
443 /* Initialize the sampler view for FMASK. */
444 if (image
->fmask
.size
) {
445 uint32_t fmask_format
, num_format
;
446 uint64_t gpu_address
= device
->ws
->buffer_get_va(image
->bo
);
449 va
= gpu_address
+ image
->offset
+ image
->fmask
.offset
;
451 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
452 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK
;
453 switch (image
->info
.samples
) {
455 num_format
= V_008F14_IMG_FMASK_8_2_2
;
458 num_format
= V_008F14_IMG_FMASK_8_4_4
;
461 num_format
= V_008F14_IMG_FMASK_32_8_8
;
464 unreachable("invalid nr_samples");
467 switch (image
->info
.samples
) {
469 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2
;
472 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4
;
475 fmask_format
= V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8
;
479 fmask_format
= V_008F14_IMG_DATA_FORMAT_INVALID
;
481 num_format
= V_008F14_IMG_NUM_FORMAT_UINT
;
484 fmask_state
[0] = va
>> 8;
485 if (device
->physical_device
->rad_info
.chip_class
< GFX9
)
486 fmask_state
[0] |= image
->surface
.tile_swizzle
;
487 fmask_state
[1] = S_008F14_BASE_ADDRESS_HI(va
>> 40) |
488 S_008F14_DATA_FORMAT_GFX6(fmask_format
) |
489 S_008F14_NUM_FORMAT_GFX6(num_format
);
490 fmask_state
[2] = S_008F18_WIDTH(width
- 1) |
491 S_008F18_HEIGHT(height
- 1);
492 fmask_state
[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X
) |
493 S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X
) |
494 S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X
) |
495 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X
) |
496 S_008F1C_TYPE(radv_tex_dim(image
->type
, view_type
, 1, 0, false));
498 fmask_state
[5] = S_008F24_BASE_ARRAY(first_layer
);
502 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
503 fmask_state
[3] |= S_008F1C_SW_MODE(image
->surface
.u
.gfx9
.fmask
.swizzle_mode
);
504 fmask_state
[4] |= S_008F20_DEPTH(last_layer
) |
505 S_008F20_PITCH_GFX9(image
->surface
.u
.gfx9
.fmask
.epitch
);
506 fmask_state
[5] |= S_008F24_META_PIPE_ALIGNED(image
->surface
.u
.gfx9
.cmask
.pipe_aligned
) |
507 S_008F24_META_RB_ALIGNED(image
->surface
.u
.gfx9
.cmask
.rb_aligned
);
509 fmask_state
[3] |= S_008F1C_TILING_INDEX(image
->fmask
.tile_mode_index
);
510 fmask_state
[4] |= S_008F20_DEPTH(depth
- 1) |
511 S_008F20_PITCH_GFX6(image
->fmask
.pitch_in_pixels
- 1);
512 fmask_state
[5] |= S_008F24_LAST_ARRAY(last_layer
);
514 } else if (fmask_state
)
515 memset(fmask_state
, 0, 8 * 4);
519 radv_query_opaque_metadata(struct radv_device
*device
,
520 struct radv_image
*image
,
521 struct radeon_bo_metadata
*md
)
523 static const VkComponentMapping fixedmapping
;
526 /* Metadata image format format version 1:
527 * [0] = 1 (metadata format identifier)
528 * [1] = (VENDOR_ID << 16) | PCI_ID
529 * [2:9] = image descriptor for the whole resource
530 * [2] is always 0, because the base address is cleared
531 * [9] is the DCC offset bits [39:8] from the beginning of
533 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
535 md
->metadata
[0] = 1; /* metadata image format version 1 */
537 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
538 md
->metadata
[1] = si_get_bo_metadata_word1(device
);
541 si_make_texture_descriptor(device
, image
, false,
542 (VkImageViewType
)image
->type
, image
->vk_format
,
543 &fixedmapping
, 0, image
->info
.levels
- 1, 0,
544 image
->info
.array_size
,
545 image
->info
.width
, image
->info
.height
,
549 si_set_mutable_tex_desc_fields(device
, image
, &image
->surface
.u
.legacy
.level
[0], 0, 0,
550 image
->surface
.blk_w
, false, desc
);
552 /* Clear the base address and set the relative DCC offset. */
554 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
555 desc
[7] = image
->dcc_offset
>> 8;
557 /* Dwords [2:9] contain the image descriptor. */
558 memcpy(&md
->metadata
[2], desc
, sizeof(desc
));
560 /* Dwords [10:..] contain the mipmap level offsets. */
561 for (i
= 0; i
<= image
->info
.levels
- 1; i
++)
562 md
->metadata
[10+i
] = image
->surface
.u
.legacy
.level
[i
].offset
>> 8;
564 md
->size_metadata
= (11 + image
->info
.levels
- 1) * 4;
568 radv_init_metadata(struct radv_device
*device
,
569 struct radv_image
*image
,
570 struct radeon_bo_metadata
*metadata
)
572 struct radeon_surf
*surface
= &image
->surface
;
574 memset(metadata
, 0, sizeof(*metadata
));
576 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
577 metadata
->u
.gfx9
.swizzle_mode
= surface
->u
.gfx9
.surf
.swizzle_mode
;
579 metadata
->u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
580 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
581 metadata
->u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
582 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
583 metadata
->u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
584 metadata
->u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
585 metadata
->u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
586 metadata
->u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
587 metadata
->u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
588 metadata
->u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
589 metadata
->u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
590 metadata
->u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
592 radv_query_opaque_metadata(device
, image
, metadata
);
595 /* The number of samples can be specified independently of the texture. */
597 radv_image_get_fmask_info(struct radv_device
*device
,
598 struct radv_image
*image
,
600 struct radv_fmask_info
*out
)
602 /* FMASK is allocated like an ordinary texture. */
603 struct radeon_surf fmask
= {};
604 struct ac_surf_info info
= image
->info
;
605 memset(out
, 0, sizeof(*out
));
607 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
608 out
->alignment
= image
->surface
.u
.gfx9
.fmask_alignment
;
609 out
->size
= image
->surface
.u
.gfx9
.fmask_size
;
613 fmask
.blk_w
= image
->surface
.blk_w
;
614 fmask
.blk_h
= image
->surface
.blk_h
;
616 fmask
.flags
= image
->surface
.flags
| RADEON_SURF_FMASK
;
618 /* Force 2D tiling if it wasn't set. This may occur when creating
619 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
620 * destination buffer must have an FMASK too. */
621 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
622 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
624 switch (nr_samples
) {
636 device
->ws
->surface_init(device
->ws
, &info
, &fmask
);
637 assert(fmask
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
);
639 out
->slice_tile_max
= (fmask
.u
.legacy
.level
[0].nblk_x
* fmask
.u
.legacy
.level
[0].nblk_y
) / 64;
640 if (out
->slice_tile_max
)
641 out
->slice_tile_max
-= 1;
643 out
->tile_mode_index
= fmask
.u
.legacy
.tiling_index
[0];
644 out
->pitch_in_pixels
= fmask
.u
.legacy
.level
[0].nblk_x
;
645 out
->bank_height
= fmask
.u
.legacy
.bankh
;
646 out
->alignment
= MAX2(256, fmask
.surf_alignment
);
647 out
->size
= fmask
.surf_size
;
651 radv_image_alloc_fmask(struct radv_device
*device
,
652 struct radv_image
*image
)
654 radv_image_get_fmask_info(device
, image
, image
->info
.samples
, &image
->fmask
);
656 image
->fmask
.offset
= align64(image
->size
, image
->fmask
.alignment
);
657 image
->size
= image
->fmask
.offset
+ image
->fmask
.size
;
658 image
->alignment
= MAX2(image
->alignment
, image
->fmask
.alignment
);
662 radv_image_get_cmask_info(struct radv_device
*device
,
663 struct radv_image
*image
,
664 struct radv_cmask_info
*out
)
666 unsigned pipe_interleave_bytes
= device
->physical_device
->rad_info
.pipe_interleave_bytes
;
667 unsigned num_pipes
= device
->physical_device
->rad_info
.num_tile_pipes
;
668 unsigned cl_width
, cl_height
;
670 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
671 out
->alignment
= image
->surface
.u
.gfx9
.cmask_alignment
;
672 out
->size
= image
->surface
.u
.gfx9
.cmask_size
;
689 case 16: /* Hawaii */
698 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
700 unsigned width
= align(image
->info
.width
, cl_width
*8);
701 unsigned height
= align(image
->info
.height
, cl_height
*8);
702 unsigned slice_elements
= (width
* height
) / (8*8);
704 /* Each element of CMASK is a nibble. */
705 unsigned slice_bytes
= slice_elements
/ 2;
707 out
->slice_tile_max
= (width
* height
) / (128*128);
708 if (out
->slice_tile_max
)
709 out
->slice_tile_max
-= 1;
711 out
->alignment
= MAX2(256, base_align
);
712 out
->size
= (image
->type
== VK_IMAGE_TYPE_3D
? image
->info
.depth
: image
->info
.array_size
) *
713 align(slice_bytes
, base_align
);
717 radv_image_alloc_cmask(struct radv_device
*device
,
718 struct radv_image
*image
)
720 uint32_t clear_value_size
= 0;
721 radv_image_get_cmask_info(device
, image
, &image
->cmask
);
723 image
->cmask
.offset
= align64(image
->size
, image
->cmask
.alignment
);
724 /* + 8 for storing the clear values */
725 if (!image
->clear_value_offset
) {
726 image
->clear_value_offset
= image
->cmask
.offset
+ image
->cmask
.size
;
727 clear_value_size
= 8;
729 image
->size
= image
->cmask
.offset
+ image
->cmask
.size
+ clear_value_size
;
730 image
->alignment
= MAX2(image
->alignment
, image
->cmask
.alignment
);
734 radv_image_alloc_dcc(struct radv_device
*device
,
735 struct radv_image
*image
)
737 image
->dcc_offset
= align64(image
->size
, image
->surface
.dcc_alignment
);
738 /* + 16 for storing the clear values + dcc pred */
739 image
->clear_value_offset
= image
->dcc_offset
+ image
->surface
.dcc_size
;
740 image
->dcc_pred_offset
= image
->clear_value_offset
+ 8;
741 image
->size
= image
->dcc_offset
+ image
->surface
.dcc_size
+ 16;
742 image
->alignment
= MAX2(image
->alignment
, image
->surface
.dcc_alignment
);
746 radv_image_alloc_htile(struct radv_device
*device
,
747 struct radv_image
*image
)
749 if ((device
->debug_flags
& RADV_DEBUG_NO_HIZ
) || image
->info
.levels
> 1) {
750 image
->surface
.htile_size
= 0;
754 image
->htile_offset
= align64(image
->size
, image
->surface
.htile_alignment
);
756 /* + 8 for storing the clear values */
757 image
->clear_value_offset
= image
->htile_offset
+ image
->surface
.htile_size
;
758 image
->size
= image
->clear_value_offset
+ 8;
759 image
->alignment
= align64(image
->alignment
, image
->surface
.htile_alignment
);
763 radv_image_create(VkDevice _device
,
764 const struct radv_image_create_info
*create_info
,
765 const VkAllocationCallbacks
* alloc
,
768 RADV_FROM_HANDLE(radv_device
, device
, _device
);
769 const VkImageCreateInfo
*pCreateInfo
= create_info
->vk_info
;
770 struct radv_image
*image
= NULL
;
771 bool can_cmask_dcc
= false;
772 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
);
774 radv_assert(pCreateInfo
->mipLevels
> 0);
775 radv_assert(pCreateInfo
->arrayLayers
> 0);
776 radv_assert(pCreateInfo
->samples
> 0);
777 radv_assert(pCreateInfo
->extent
.width
> 0);
778 radv_assert(pCreateInfo
->extent
.height
> 0);
779 radv_assert(pCreateInfo
->extent
.depth
> 0);
781 image
= vk_alloc2(&device
->alloc
, alloc
, sizeof(*image
), 8,
782 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
784 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
786 memset(image
, 0, sizeof(*image
));
787 image
->type
= pCreateInfo
->imageType
;
788 image
->info
.width
= pCreateInfo
->extent
.width
;
789 image
->info
.height
= pCreateInfo
->extent
.height
;
790 image
->info
.depth
= pCreateInfo
->extent
.depth
;
791 image
->info
.samples
= pCreateInfo
->samples
;
792 image
->info
.array_size
= pCreateInfo
->arrayLayers
;
793 image
->info
.levels
= pCreateInfo
->mipLevels
;
795 image
->vk_format
= pCreateInfo
->format
;
796 image
->tiling
= pCreateInfo
->tiling
;
797 image
->usage
= pCreateInfo
->usage
;
798 image
->flags
= pCreateInfo
->flags
;
800 image
->exclusive
= pCreateInfo
->sharingMode
== VK_SHARING_MODE_EXCLUSIVE
;
801 if (pCreateInfo
->sharingMode
== VK_SHARING_MODE_CONCURRENT
) {
802 for (uint32_t i
= 0; i
< pCreateInfo
->queueFamilyIndexCount
; ++i
)
803 if (pCreateInfo
->pQueueFamilyIndices
[i
] == VK_QUEUE_FAMILY_EXTERNAL_KHR
)
804 image
->queue_family_mask
|= (1u << RADV_MAX_QUEUE_FAMILIES
) - 1u;
806 image
->queue_family_mask
|= 1u << pCreateInfo
->pQueueFamilyIndices
[i
];
809 image
->shareable
= vk_find_struct_const(pCreateInfo
->pNext
,
810 EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR
) != NULL
;
811 if (!vk_format_is_depth(pCreateInfo
->format
) && !create_info
->scanout
&& !image
->shareable
) {
812 image
->info
.surf_index
= p_atomic_inc_return(&device
->image_mrt_offset_counter
) - 1;
815 radv_init_surface(device
, &image
->surface
, create_info
);
817 device
->ws
->surface_init(device
->ws
, &image
->info
, &image
->surface
);
819 image
->size
= image
->surface
.surf_size
;
820 image
->alignment
= image
->surface
.surf_alignment
;
822 if (image
->exclusive
|| image
->queue_family_mask
== 1)
823 can_cmask_dcc
= true;
825 if ((pCreateInfo
->usage
& VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
) &&
826 image
->surface
.dcc_size
&& can_cmask_dcc
)
827 radv_image_alloc_dcc(device
, image
);
829 image
->surface
.dcc_size
= 0;
831 if ((pCreateInfo
->usage
& VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
) &&
832 pCreateInfo
->mipLevels
== 1 &&
833 !image
->surface
.dcc_size
&& image
->info
.depth
== 1 && can_cmask_dcc
)
834 radv_image_alloc_cmask(device
, image
);
835 if (image
->info
.samples
> 1 && vk_format_is_color(pCreateInfo
->format
)) {
836 radv_image_alloc_fmask(device
, image
);
837 } else if (vk_format_is_depth(pCreateInfo
->format
)) {
839 radv_image_alloc_htile(device
, image
);
842 if (pCreateInfo
->flags
& VK_IMAGE_CREATE_SPARSE_BINDING_BIT
) {
843 image
->alignment
= MAX2(image
->alignment
, 4096);
844 image
->size
= align64(image
->size
, image
->alignment
);
847 image
->bo
= device
->ws
->buffer_create(device
->ws
, image
->size
, image
->alignment
,
848 0, RADEON_FLAG_VIRTUAL
);
850 vk_free2(&device
->alloc
, alloc
, image
);
851 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
855 *pImage
= radv_image_to_handle(image
);
861 radv_image_view_make_descriptor(struct radv_image_view
*iview
,
862 struct radv_device
*device
,
863 const VkImageViewCreateInfo
* pCreateInfo
,
864 bool is_storage_image
)
866 RADV_FROM_HANDLE(radv_image
, image
, pCreateInfo
->image
);
867 const VkImageSubresourceRange
*range
= &pCreateInfo
->subresourceRange
;
868 bool is_stencil
= iview
->aspect_mask
== VK_IMAGE_ASPECT_STENCIL_BIT
;
870 uint32_t *descriptor
;
871 uint32_t *fmask_descriptor
;
873 if (is_storage_image
) {
874 descriptor
= iview
->storage_descriptor
;
875 fmask_descriptor
= iview
->storage_fmask_descriptor
;
877 descriptor
= iview
->descriptor
;
878 fmask_descriptor
= iview
->fmask_descriptor
;
881 assert(image
->surface
.blk_w
% vk_format_get_blockwidth(image
->vk_format
) == 0);
882 blk_w
= image
->surface
.blk_w
/ vk_format_get_blockwidth(image
->vk_format
) * vk_format_get_blockwidth(iview
->vk_format
);
884 si_make_texture_descriptor(device
, image
, is_storage_image
,
887 &pCreateInfo
->components
,
888 0, radv_get_levelCount(image
, range
) - 1,
889 range
->baseArrayLayer
,
890 range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1,
892 iview
->extent
.height
,
896 si_set_mutable_tex_desc_fields(device
, image
,
897 is_stencil
? &image
->surface
.u
.legacy
.stencil_level
[range
->baseMipLevel
]
898 : &image
->surface
.u
.legacy
.level
[range
->baseMipLevel
],
901 blk_w
, is_stencil
, descriptor
);
905 radv_image_view_init(struct radv_image_view
*iview
,
906 struct radv_device
*device
,
907 const VkImageViewCreateInfo
* pCreateInfo
)
909 RADV_FROM_HANDLE(radv_image
, image
, pCreateInfo
->image
);
910 const VkImageSubresourceRange
*range
= &pCreateInfo
->subresourceRange
;
912 switch (image
->type
) {
913 case VK_IMAGE_TYPE_1D
:
914 case VK_IMAGE_TYPE_2D
:
915 assert(range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1 <= image
->info
.array_size
);
917 case VK_IMAGE_TYPE_3D
:
918 assert(range
->baseArrayLayer
+ radv_get_layerCount(image
, range
) - 1
919 <= radv_minify(image
->info
.depth
, range
->baseMipLevel
));
922 unreachable("bad VkImageType");
924 iview
->image
= image
;
925 iview
->bo
= image
->bo
;
926 iview
->type
= pCreateInfo
->viewType
;
927 iview
->vk_format
= pCreateInfo
->format
;
928 iview
->aspect_mask
= pCreateInfo
->subresourceRange
.aspectMask
;
930 if (iview
->aspect_mask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
931 iview
->vk_format
= vk_format_stencil_only(iview
->vk_format
);
932 } else if (iview
->aspect_mask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
933 iview
->vk_format
= vk_format_depth_only(iview
->vk_format
);
936 iview
->extent
= (VkExtent3D
) {
937 .width
= radv_minify(image
->info
.width
, range
->baseMipLevel
),
938 .height
= radv_minify(image
->info
.height
, range
->baseMipLevel
),
939 .depth
= radv_minify(image
->info
.depth
, range
->baseMipLevel
),
942 iview
->extent
.width
= round_up_u32(iview
->extent
.width
* vk_format_get_blockwidth(iview
->vk_format
),
943 vk_format_get_blockwidth(image
->vk_format
));
944 iview
->extent
.height
= round_up_u32(iview
->extent
.height
* vk_format_get_blockheight(iview
->vk_format
),
945 vk_format_get_blockheight(image
->vk_format
));
947 iview
->base_layer
= range
->baseArrayLayer
;
948 iview
->layer_count
= radv_get_layerCount(image
, range
);
949 iview
->base_mip
= range
->baseMipLevel
;
951 radv_image_view_make_descriptor(iview
, device
, pCreateInfo
, false);
952 radv_image_view_make_descriptor(iview
, device
, pCreateInfo
, true);
955 bool radv_layout_has_htile(const struct radv_image
*image
,
956 VkImageLayout layout
,
959 return image
->surface
.htile_size
&&
960 (layout
== VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
||
961 layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) &&
962 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
965 bool radv_layout_is_htile_compressed(const struct radv_image
*image
,
966 VkImageLayout layout
,
969 return image
->surface
.htile_size
&&
970 (layout
== VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
||
971 layout
== VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) &&
972 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
975 bool radv_layout_can_fast_clear(const struct radv_image
*image
,
976 VkImageLayout layout
,
979 return layout
== VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
&&
980 queue_mask
== (1u << RADV_QUEUE_GENERAL
);
984 unsigned radv_image_queue_family_mask(const struct radv_image
*image
, uint32_t family
, uint32_t queue_family
)
986 if (!image
->exclusive
)
987 return image
->queue_family_mask
;
988 if (family
== VK_QUEUE_FAMILY_EXTERNAL_KHR
)
989 return (1u << RADV_MAX_QUEUE_FAMILIES
) - 1u;
990 if (family
== VK_QUEUE_FAMILY_IGNORED
)
991 return 1u << queue_family
;
996 radv_CreateImage(VkDevice device
,
997 const VkImageCreateInfo
*pCreateInfo
,
998 const VkAllocationCallbacks
*pAllocator
,
1001 return radv_image_create(device
,
1002 &(struct radv_image_create_info
) {
1003 .vk_info
= pCreateInfo
,
1011 radv_DestroyImage(VkDevice _device
, VkImage _image
,
1012 const VkAllocationCallbacks
*pAllocator
)
1014 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1015 RADV_FROM_HANDLE(radv_image
, image
, _image
);
1020 if (image
->flags
& VK_IMAGE_CREATE_SPARSE_BINDING_BIT
)
1021 device
->ws
->buffer_destroy(image
->bo
);
1023 vk_free2(&device
->alloc
, pAllocator
, image
);
1026 void radv_GetImageSubresourceLayout(
1029 const VkImageSubresource
* pSubresource
,
1030 VkSubresourceLayout
* pLayout
)
1032 RADV_FROM_HANDLE(radv_image
, image
, _image
);
1033 int level
= pSubresource
->mipLevel
;
1034 int layer
= pSubresource
->arrayLayer
;
1035 struct radeon_surf
*surface
= &image
->surface
;
1037 pLayout
->offset
= surface
->u
.legacy
.level
[level
].offset
+ surface
->u
.legacy
.level
[level
].slice_size
* layer
;
1038 pLayout
->rowPitch
= surface
->u
.legacy
.level
[level
].nblk_x
* surface
->bpe
;
1039 pLayout
->arrayPitch
= surface
->u
.legacy
.level
[level
].slice_size
;
1040 pLayout
->depthPitch
= surface
->u
.legacy
.level
[level
].slice_size
;
1041 pLayout
->size
= surface
->u
.legacy
.level
[level
].slice_size
;
1042 if (image
->type
== VK_IMAGE_TYPE_3D
)
1043 pLayout
->size
*= u_minify(image
->info
.depth
, level
);
1048 radv_CreateImageView(VkDevice _device
,
1049 const VkImageViewCreateInfo
*pCreateInfo
,
1050 const VkAllocationCallbacks
*pAllocator
,
1053 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1054 struct radv_image_view
*view
;
1056 view
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*view
), 8,
1057 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1059 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1061 radv_image_view_init(view
, device
, pCreateInfo
);
1063 *pView
= radv_image_view_to_handle(view
);
1069 radv_DestroyImageView(VkDevice _device
, VkImageView _iview
,
1070 const VkAllocationCallbacks
*pAllocator
)
1072 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1073 RADV_FROM_HANDLE(radv_image_view
, iview
, _iview
);
1077 vk_free2(&device
->alloc
, pAllocator
, iview
);
1080 void radv_buffer_view_init(struct radv_buffer_view
*view
,
1081 struct radv_device
*device
,
1082 const VkBufferViewCreateInfo
* pCreateInfo
,
1083 struct radv_cmd_buffer
*cmd_buffer
)
1085 RADV_FROM_HANDLE(radv_buffer
, buffer
, pCreateInfo
->buffer
);
1087 view
->bo
= buffer
->bo
;
1088 view
->range
= pCreateInfo
->range
== VK_WHOLE_SIZE
?
1089 buffer
->size
- pCreateInfo
->offset
: pCreateInfo
->range
;
1090 view
->vk_format
= pCreateInfo
->format
;
1092 radv_make_buffer_descriptor(device
, buffer
, view
->vk_format
,
1093 pCreateInfo
->offset
, view
->range
, view
->state
);
1097 radv_CreateBufferView(VkDevice _device
,
1098 const VkBufferViewCreateInfo
*pCreateInfo
,
1099 const VkAllocationCallbacks
*pAllocator
,
1100 VkBufferView
*pView
)
1102 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1103 struct radv_buffer_view
*view
;
1105 view
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*view
), 8,
1106 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1108 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1110 radv_buffer_view_init(view
, device
, pCreateInfo
, NULL
);
1112 *pView
= radv_buffer_view_to_handle(view
);
1118 radv_DestroyBufferView(VkDevice _device
, VkBufferView bufferView
,
1119 const VkAllocationCallbacks
*pAllocator
)
1121 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1122 RADV_FROM_HANDLE(radv_buffer_view
, view
, bufferView
);
1127 vk_free2(&device
->alloc
, pAllocator
, view
);