2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
28 #include "ac_surface.h"
29 #include "amd_family.h"
30 #include "addrlib/src/amdgpu_asic_addr.h"
31 #include "ac_gpu_info.h"
32 #include "util/hash_table.h"
33 #include "util/macros.h"
34 #include "util/simple_mtx.h"
35 #include "util/u_atomic.h"
36 #include "util/u_math.h"
37 #include "util/u_memory.h"
44 #include "drm-uapi/amdgpu_drm.h"
46 #include "addrlib/inc/addrinterface.h"
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
59 /* The cache of DCC retile maps for reuse when allocating images of
62 simple_mtx_t dcc_retile_map_lock
;
63 struct hash_table
*dcc_retile_maps
;
66 struct dcc_retile_map_key
{
67 enum radeon_family family
;
68 unsigned retile_width
;
69 unsigned retile_height
;
72 unsigned dcc_retile_num_elements
;
73 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input
;
76 static uint32_t dcc_retile_map_hash_key(const void *key
)
78 return _mesa_hash_data(key
, sizeof(struct dcc_retile_map_key
));
81 static bool dcc_retile_map_keys_equal(const void *a
, const void *b
)
83 return memcmp(a
, b
, sizeof(struct dcc_retile_map_key
)) == 0;
86 static void dcc_retile_map_free(struct hash_entry
*entry
)
88 free((void*)entry
->key
);
92 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib
*addrlib
,
93 const struct radeon_info
*info
,
94 unsigned retile_width
, unsigned retile_height
,
95 bool rb_aligned
, bool pipe_aligned
, bool use_uint16
,
96 unsigned dcc_retile_num_elements
,
97 const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT
*in
)
99 unsigned dcc_retile_map_size
= dcc_retile_num_elements
* (use_uint16
? 2 : 4);
100 struct dcc_retile_map_key key
;
102 assert(in
->numFrags
== 1 && in
->numSlices
== 1 && in
->numMipLevels
== 1);
104 memset(&key
, 0, sizeof(key
));
105 key
.family
= info
->family
;
106 key
.retile_width
= retile_width
;
107 key
.retile_height
= retile_height
;
108 key
.rb_aligned
= rb_aligned
;
109 key
.pipe_aligned
= pipe_aligned
;
110 key
.dcc_retile_num_elements
= dcc_retile_num_elements
;
111 memcpy(&key
.input
, in
, sizeof(*in
));
113 simple_mtx_lock(&addrlib
->dcc_retile_map_lock
);
115 /* If we have already computed this retile map, get it from the hash table. */
116 struct hash_entry
*entry
= _mesa_hash_table_search(addrlib
->dcc_retile_maps
, &key
);
118 uint32_t *map
= entry
->data
;
119 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
123 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin
;
124 memcpy(&addrin
, in
, sizeof(*in
));
126 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout
= {};
127 addrout
.size
= sizeof(addrout
);
129 void *dcc_retile_map
= malloc(dcc_retile_map_size
);
130 if (!dcc_retile_map
) {
131 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
137 for (unsigned y
= 0; y
< retile_height
; y
+= in
->compressBlkHeight
) {
140 for (unsigned x
= 0; x
< retile_width
; x
+= in
->compressBlkWidth
) {
143 /* Compute src DCC address */
144 addrin
.dccKeyFlags
.pipeAligned
= pipe_aligned
;
145 addrin
.dccKeyFlags
.rbAligned
= rb_aligned
;
148 if (Addr2ComputeDccAddrFromCoord(addrlib
->handle
, &addrin
, &addrout
) != ADDR_OK
) {
149 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
154 ((uint16_t*)dcc_retile_map
)[index
* 2] = addrout
.addr
;
156 ((uint32_t*)dcc_retile_map
)[index
* 2] = addrout
.addr
;
158 /* Compute dst DCC address */
159 addrin
.dccKeyFlags
.pipeAligned
= 0;
160 addrin
.dccKeyFlags
.rbAligned
= 0;
163 if (Addr2ComputeDccAddrFromCoord(addrlib
->handle
, &addrin
, &addrout
) != ADDR_OK
) {
164 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
169 ((uint16_t*)dcc_retile_map
)[index
* 2 + 1] = addrout
.addr
;
171 ((uint32_t*)dcc_retile_map
)[index
* 2 + 1] = addrout
.addr
;
173 assert(index
* 2 + 1 < dcc_retile_num_elements
);
177 /* Fill the remaining pairs with the last one (for the compute shader). */
178 for (unsigned i
= index
* 2; i
< dcc_retile_num_elements
; i
++) {
180 ((uint16_t*)dcc_retile_map
)[i
] = ((uint16_t*)dcc_retile_map
)[i
- 2];
182 ((uint32_t*)dcc_retile_map
)[i
] = ((uint32_t*)dcc_retile_map
)[i
- 2];
185 /* Insert the retile map into the hash table, so that it can be reused and
186 * the computation can be skipped for similar image sizes.
188 _mesa_hash_table_insert(addrlib
->dcc_retile_maps
,
189 mem_dup(&key
, sizeof(key
)), dcc_retile_map
);
191 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
192 return dcc_retile_map
;
195 static void *ADDR_API
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT
* pInput
)
197 return malloc(pInput
->sizeInBytes
);
200 static ADDR_E_RETURNCODE ADDR_API
freeSysMem(const ADDR_FREESYSMEM_INPUT
* pInput
)
202 free(pInput
->pVirtAddr
);
206 struct ac_addrlib
*ac_addrlib_create(const struct radeon_info
*info
,
207 const struct amdgpu_gpu_info
*amdinfo
,
208 uint64_t *max_alignment
)
210 ADDR_CREATE_INPUT addrCreateInput
= {0};
211 ADDR_CREATE_OUTPUT addrCreateOutput
= {0};
212 ADDR_REGISTER_VALUE regValue
= {0};
213 ADDR_CREATE_FLAGS createFlags
= {{0}};
214 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput
= {0};
215 ADDR_E_RETURNCODE addrRet
;
217 addrCreateInput
.size
= sizeof(ADDR_CREATE_INPUT
);
218 addrCreateOutput
.size
= sizeof(ADDR_CREATE_OUTPUT
);
220 regValue
.gbAddrConfig
= amdinfo
->gb_addr_cfg
;
221 createFlags
.value
= 0;
223 addrCreateInput
.chipFamily
= info
->family_id
;
224 addrCreateInput
.chipRevision
= info
->chip_external_rev
;
226 if (addrCreateInput
.chipFamily
== FAMILY_UNKNOWN
)
229 if (addrCreateInput
.chipFamily
>= FAMILY_AI
) {
230 addrCreateInput
.chipEngine
= CIASICIDGFXENGINE_ARCTICISLAND
;
232 regValue
.noOfBanks
= amdinfo
->mc_arb_ramcfg
& 0x3;
233 regValue
.noOfRanks
= (amdinfo
->mc_arb_ramcfg
& 0x4) >> 2;
235 regValue
.backendDisables
= amdinfo
->enabled_rb_pipes_mask
;
236 regValue
.pTileConfig
= amdinfo
->gb_tile_mode
;
237 regValue
.noOfEntries
= ARRAY_SIZE(amdinfo
->gb_tile_mode
);
238 if (addrCreateInput
.chipFamily
== FAMILY_SI
) {
239 regValue
.pMacroTileConfig
= NULL
;
240 regValue
.noOfMacroEntries
= 0;
242 regValue
.pMacroTileConfig
= amdinfo
->gb_macro_tile_mode
;
243 regValue
.noOfMacroEntries
= ARRAY_SIZE(amdinfo
->gb_macro_tile_mode
);
246 createFlags
.useTileIndex
= 1;
247 createFlags
.useHtileSliceAlign
= 1;
249 addrCreateInput
.chipEngine
= CIASICIDGFXENGINE_SOUTHERNISLAND
;
252 addrCreateInput
.callbacks
.allocSysMem
= allocSysMem
;
253 addrCreateInput
.callbacks
.freeSysMem
= freeSysMem
;
254 addrCreateInput
.callbacks
.debugPrint
= 0;
255 addrCreateInput
.createFlags
= createFlags
;
256 addrCreateInput
.regValue
= regValue
;
258 addrRet
= AddrCreate(&addrCreateInput
, &addrCreateOutput
);
259 if (addrRet
!= ADDR_OK
)
263 addrRet
= AddrGetMaxAlignments(addrCreateOutput
.hLib
, &addrGetMaxAlignmentsOutput
);
264 if (addrRet
== ADDR_OK
){
265 *max_alignment
= addrGetMaxAlignmentsOutput
.baseAlign
;
269 struct ac_addrlib
*addrlib
= calloc(1, sizeof(struct ac_addrlib
));
271 AddrDestroy(addrCreateOutput
.hLib
);
275 addrlib
->handle
= addrCreateOutput
.hLib
;
276 simple_mtx_init(&addrlib
->dcc_retile_map_lock
, mtx_plain
);
277 addrlib
->dcc_retile_maps
= _mesa_hash_table_create(NULL
, dcc_retile_map_hash_key
,
278 dcc_retile_map_keys_equal
);
282 void ac_addrlib_destroy(struct ac_addrlib
*addrlib
)
284 AddrDestroy(addrlib
->handle
);
285 simple_mtx_destroy(&addrlib
->dcc_retile_map_lock
);
286 _mesa_hash_table_destroy(addrlib
->dcc_retile_maps
, dcc_retile_map_free
);
290 static int surf_config_sanity(const struct ac_surf_config
*config
,
293 /* FMASK is allocated together with the color surface and can't be
294 * allocated separately.
296 assert(!(flags
& RADEON_SURF_FMASK
));
297 if (flags
& RADEON_SURF_FMASK
)
300 /* all dimension must be at least 1 ! */
301 if (!config
->info
.width
|| !config
->info
.height
|| !config
->info
.depth
||
302 !config
->info
.array_size
|| !config
->info
.levels
)
305 switch (config
->info
.samples
) {
313 if (flags
& RADEON_SURF_Z_OR_SBUFFER
)
320 if (!(flags
& RADEON_SURF_Z_OR_SBUFFER
)) {
321 switch (config
->info
.storage_samples
) {
333 if (config
->is_3d
&& config
->info
.array_size
> 1)
335 if (config
->is_cube
&& config
->info
.depth
> 1)
341 static int gfx6_compute_level(ADDR_HANDLE addrlib
,
342 const struct ac_surf_config
*config
,
343 struct radeon_surf
*surf
, bool is_stencil
,
344 unsigned level
, bool compressed
,
345 ADDR_COMPUTE_SURFACE_INFO_INPUT
*AddrSurfInfoIn
,
346 ADDR_COMPUTE_SURFACE_INFO_OUTPUT
*AddrSurfInfoOut
,
347 ADDR_COMPUTE_DCCINFO_INPUT
*AddrDccIn
,
348 ADDR_COMPUTE_DCCINFO_OUTPUT
*AddrDccOut
,
349 ADDR_COMPUTE_HTILE_INFO_INPUT
*AddrHtileIn
,
350 ADDR_COMPUTE_HTILE_INFO_OUTPUT
*AddrHtileOut
)
352 struct legacy_surf_level
*surf_level
;
353 ADDR_E_RETURNCODE ret
;
355 AddrSurfInfoIn
->mipLevel
= level
;
356 AddrSurfInfoIn
->width
= u_minify(config
->info
.width
, level
);
357 AddrSurfInfoIn
->height
= u_minify(config
->info
.height
, level
);
359 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
360 * because GFX9 needs linear alignment of 256 bytes.
362 if (config
->info
.levels
== 1 &&
363 AddrSurfInfoIn
->tileMode
== ADDR_TM_LINEAR_ALIGNED
&&
364 AddrSurfInfoIn
->bpp
&&
365 util_is_power_of_two_or_zero(AddrSurfInfoIn
->bpp
)) {
366 unsigned alignment
= 256 / (AddrSurfInfoIn
->bpp
/ 8);
368 AddrSurfInfoIn
->width
= align(AddrSurfInfoIn
->width
, alignment
);
371 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
372 * true for r32g32b32 formats. */
373 if (AddrSurfInfoIn
->bpp
== 96) {
374 assert(config
->info
.levels
== 1);
375 assert(AddrSurfInfoIn
->tileMode
== ADDR_TM_LINEAR_ALIGNED
);
377 /* The least common multiple of 64 bytes and 12 bytes/pixel is
378 * 192 bytes, or 16 pixels. */
379 AddrSurfInfoIn
->width
= align(AddrSurfInfoIn
->width
, 16);
383 AddrSurfInfoIn
->numSlices
= u_minify(config
->info
.depth
, level
);
384 else if (config
->is_cube
)
385 AddrSurfInfoIn
->numSlices
= 6;
387 AddrSurfInfoIn
->numSlices
= config
->info
.array_size
;
390 /* Set the base level pitch. This is needed for calculation
391 * of non-zero levels. */
393 AddrSurfInfoIn
->basePitch
= surf
->u
.legacy
.stencil_level
[0].nblk_x
;
395 AddrSurfInfoIn
->basePitch
= surf
->u
.legacy
.level
[0].nblk_x
;
397 /* Convert blocks to pixels for compressed formats. */
399 AddrSurfInfoIn
->basePitch
*= surf
->blk_w
;
402 ret
= AddrComputeSurfaceInfo(addrlib
,
405 if (ret
!= ADDR_OK
) {
409 surf_level
= is_stencil
? &surf
->u
.legacy
.stencil_level
[level
] : &surf
->u
.legacy
.level
[level
];
410 surf_level
->offset
= align64(surf
->surf_size
, AddrSurfInfoOut
->baseAlign
);
411 surf_level
->slice_size_dw
= AddrSurfInfoOut
->sliceSize
/ 4;
412 surf_level
->nblk_x
= AddrSurfInfoOut
->pitch
;
413 surf_level
->nblk_y
= AddrSurfInfoOut
->height
;
415 switch (AddrSurfInfoOut
->tileMode
) {
416 case ADDR_TM_LINEAR_ALIGNED
:
417 surf_level
->mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
419 case ADDR_TM_1D_TILED_THIN1
:
420 surf_level
->mode
= RADEON_SURF_MODE_1D
;
422 case ADDR_TM_2D_TILED_THIN1
:
423 surf_level
->mode
= RADEON_SURF_MODE_2D
;
430 surf
->u
.legacy
.stencil_tiling_index
[level
] = AddrSurfInfoOut
->tileIndex
;
432 surf
->u
.legacy
.tiling_index
[level
] = AddrSurfInfoOut
->tileIndex
;
434 surf
->surf_size
= surf_level
->offset
+ AddrSurfInfoOut
->surfSize
;
436 /* Clear DCC fields at the beginning. */
437 surf_level
->dcc_offset
= 0;
439 /* The previous level's flag tells us if we can use DCC for this level. */
440 if (AddrSurfInfoIn
->flags
.dccCompatible
&&
441 (level
== 0 || AddrDccOut
->subLvlCompressible
)) {
442 bool prev_level_clearable
= level
== 0 ||
443 AddrDccOut
->dccRamSizeAligned
;
445 AddrDccIn
->colorSurfSize
= AddrSurfInfoOut
->surfSize
;
446 AddrDccIn
->tileMode
= AddrSurfInfoOut
->tileMode
;
447 AddrDccIn
->tileInfo
= *AddrSurfInfoOut
->pTileInfo
;
448 AddrDccIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
449 AddrDccIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
451 ret
= AddrComputeDccInfo(addrlib
,
455 if (ret
== ADDR_OK
) {
456 surf_level
->dcc_offset
= surf
->dcc_size
;
457 surf
->num_dcc_levels
= level
+ 1;
458 surf
->dcc_size
= surf_level
->dcc_offset
+ AddrDccOut
->dccRamSize
;
459 surf
->dcc_alignment
= MAX2(surf
->dcc_alignment
, AddrDccOut
->dccRamBaseAlign
);
461 /* If the DCC size of a subresource (1 mip level or 1 slice)
462 * is not aligned, the DCC memory layout is not contiguous for
463 * that subresource, which means we can't use fast clear.
465 * We only do fast clears for whole mipmap levels. If we did
466 * per-slice fast clears, the same restriction would apply.
467 * (i.e. only compute the slice size and see if it's aligned)
469 * The last level can be non-contiguous and still be clearable
470 * if it's interleaved with the next level that doesn't exist.
472 if (AddrDccOut
->dccRamSizeAligned
||
473 (prev_level_clearable
&& level
== config
->info
.levels
- 1))
474 surf_level
->dcc_fast_clear_size
= AddrDccOut
->dccFastClearSize
;
476 surf_level
->dcc_fast_clear_size
= 0;
478 /* Compute the DCC slice size because addrlib doesn't
479 * provide this info. As DCC memory is linear (each
480 * slice is the same size) it's easy to compute.
482 surf
->dcc_slice_size
= AddrDccOut
->dccRamSize
/ config
->info
.array_size
;
484 /* For arrays, we have to compute the DCC info again
485 * with one slice size to get a correct fast clear
488 if (config
->info
.array_size
> 1) {
489 AddrDccIn
->colorSurfSize
= AddrSurfInfoOut
->sliceSize
;
490 AddrDccIn
->tileMode
= AddrSurfInfoOut
->tileMode
;
491 AddrDccIn
->tileInfo
= *AddrSurfInfoOut
->pTileInfo
;
492 AddrDccIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
493 AddrDccIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
495 ret
= AddrComputeDccInfo(addrlib
,
496 AddrDccIn
, AddrDccOut
);
497 if (ret
== ADDR_OK
) {
498 /* If the DCC memory isn't properly
499 * aligned, the data are interleaved
502 if (AddrDccOut
->dccRamSizeAligned
)
503 surf_level
->dcc_slice_fast_clear_size
= AddrDccOut
->dccFastClearSize
;
505 surf_level
->dcc_slice_fast_clear_size
= 0;
508 if (surf
->flags
& RADEON_SURF_CONTIGUOUS_DCC_LAYERS
&&
509 surf
->dcc_slice_size
!= surf_level
->dcc_slice_fast_clear_size
) {
511 surf
->num_dcc_levels
= 0;
512 AddrDccOut
->subLvlCompressible
= false;
515 surf_level
->dcc_slice_fast_clear_size
= surf_level
->dcc_fast_clear_size
;
522 AddrSurfInfoIn
->flags
.depth
&&
523 surf_level
->mode
== RADEON_SURF_MODE_2D
&&
525 !(surf
->flags
& RADEON_SURF_NO_HTILE
)) {
526 AddrHtileIn
->flags
.tcCompatible
= AddrSurfInfoOut
->tcCompatible
;
527 AddrHtileIn
->pitch
= AddrSurfInfoOut
->pitch
;
528 AddrHtileIn
->height
= AddrSurfInfoOut
->height
;
529 AddrHtileIn
->numSlices
= AddrSurfInfoOut
->depth
;
530 AddrHtileIn
->blockWidth
= ADDR_HTILE_BLOCKSIZE_8
;
531 AddrHtileIn
->blockHeight
= ADDR_HTILE_BLOCKSIZE_8
;
532 AddrHtileIn
->pTileInfo
= AddrSurfInfoOut
->pTileInfo
;
533 AddrHtileIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
534 AddrHtileIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
536 ret
= AddrComputeHtileInfo(addrlib
,
540 if (ret
== ADDR_OK
) {
541 surf
->htile_size
= AddrHtileOut
->htileBytes
;
542 surf
->htile_slice_size
= AddrHtileOut
->sliceSize
;
543 surf
->htile_alignment
= AddrHtileOut
->baseAlign
;
550 static void gfx6_set_micro_tile_mode(struct radeon_surf
*surf
,
551 const struct radeon_info
*info
)
553 uint32_t tile_mode
= info
->si_tile_mode_array
[surf
->u
.legacy
.tiling_index
[0]];
555 if (info
->chip_class
>= GFX7
)
556 surf
->micro_tile_mode
= G_009910_MICRO_TILE_MODE_NEW(tile_mode
);
558 surf
->micro_tile_mode
= G_009910_MICRO_TILE_MODE(tile_mode
);
561 static unsigned cik_get_macro_tile_index(struct radeon_surf
*surf
)
563 unsigned index
, tileb
;
565 tileb
= 8 * 8 * surf
->bpe
;
566 tileb
= MIN2(surf
->u
.legacy
.tile_split
, tileb
);
568 for (index
= 0; tileb
> 64; index
++)
575 static bool get_display_flag(const struct ac_surf_config
*config
,
576 const struct radeon_surf
*surf
)
578 unsigned num_channels
= config
->info
.num_channels
;
579 unsigned bpe
= surf
->bpe
;
581 if (!config
->is_3d
&&
583 !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
) &&
584 surf
->flags
& RADEON_SURF_SCANOUT
&&
585 config
->info
.samples
<= 1 &&
586 surf
->blk_w
<= 2 && surf
->blk_h
== 1) {
588 if (surf
->blk_w
== 2 && surf
->blk_h
== 1)
591 if (/* RGBA8 or RGBA16F */
592 (bpe
>= 4 && bpe
<= 8 && num_channels
== 4) ||
593 /* R5G6B5 or R5G5B5A1 */
594 (bpe
== 2 && num_channels
>= 3) ||
596 (bpe
== 1 && num_channels
== 1))
603 * This must be called after the first level is computed.
605 * Copy surface-global settings like pipe/bank config from level 0 surface
606 * computation, and compute tile swizzle.
608 static int gfx6_surface_settings(ADDR_HANDLE addrlib
,
609 const struct radeon_info
*info
,
610 const struct ac_surf_config
*config
,
611 ADDR_COMPUTE_SURFACE_INFO_OUTPUT
* csio
,
612 struct radeon_surf
*surf
)
614 surf
->surf_alignment
= csio
->baseAlign
;
615 surf
->u
.legacy
.pipe_config
= csio
->pTileInfo
->pipeConfig
- 1;
616 gfx6_set_micro_tile_mode(surf
, info
);
618 /* For 2D modes only. */
619 if (csio
->tileMode
>= ADDR_TM_2D_TILED_THIN1
) {
620 surf
->u
.legacy
.bankw
= csio
->pTileInfo
->bankWidth
;
621 surf
->u
.legacy
.bankh
= csio
->pTileInfo
->bankHeight
;
622 surf
->u
.legacy
.mtilea
= csio
->pTileInfo
->macroAspectRatio
;
623 surf
->u
.legacy
.tile_split
= csio
->pTileInfo
->tileSplitBytes
;
624 surf
->u
.legacy
.num_banks
= csio
->pTileInfo
->banks
;
625 surf
->u
.legacy
.macro_tile_index
= csio
->macroModeIndex
;
627 surf
->u
.legacy
.macro_tile_index
= 0;
630 /* Compute tile swizzle. */
631 /* TODO: fix tile swizzle with mipmapping for GFX6 */
632 if ((info
->chip_class
>= GFX7
|| config
->info
.levels
== 1) &&
633 config
->info
.surf_index
&&
634 surf
->u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
&&
635 !(surf
->flags
& (RADEON_SURF_Z_OR_SBUFFER
| RADEON_SURF_SHAREABLE
)) &&
636 !get_display_flag(config
, surf
)) {
637 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn
= {0};
638 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut
= {0};
640 AddrBaseSwizzleIn
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT
);
641 AddrBaseSwizzleOut
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT
);
643 AddrBaseSwizzleIn
.surfIndex
= p_atomic_inc_return(config
->info
.surf_index
) - 1;
644 AddrBaseSwizzleIn
.tileIndex
= csio
->tileIndex
;
645 AddrBaseSwizzleIn
.macroModeIndex
= csio
->macroModeIndex
;
646 AddrBaseSwizzleIn
.pTileInfo
= csio
->pTileInfo
;
647 AddrBaseSwizzleIn
.tileMode
= csio
->tileMode
;
649 int r
= AddrComputeBaseSwizzle(addrlib
, &AddrBaseSwizzleIn
,
650 &AddrBaseSwizzleOut
);
654 assert(AddrBaseSwizzleOut
.tileSwizzle
<=
655 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
656 surf
->tile_swizzle
= AddrBaseSwizzleOut
.tileSwizzle
;
661 static void ac_compute_cmask(const struct radeon_info
*info
,
662 const struct ac_surf_config
*config
,
663 struct radeon_surf
*surf
)
665 unsigned pipe_interleave_bytes
= info
->pipe_interleave_bytes
;
666 unsigned num_pipes
= info
->num_tile_pipes
;
667 unsigned cl_width
, cl_height
;
669 if (surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
|| surf
->is_linear
||
670 (config
->info
.samples
>= 2 && !surf
->fmask_size
))
673 assert(info
->chip_class
<= GFX8
);
688 case 16: /* Hawaii */
697 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
699 unsigned width
= align(surf
->u
.legacy
.level
[0].nblk_x
, cl_width
*8);
700 unsigned height
= align(surf
->u
.legacy
.level
[0].nblk_y
, cl_height
*8);
701 unsigned slice_elements
= (width
* height
) / (8*8);
703 /* Each element of CMASK is a nibble. */
704 unsigned slice_bytes
= slice_elements
/ 2;
706 surf
->u
.legacy
.cmask_slice_tile_max
= (width
* height
) / (128*128);
707 if (surf
->u
.legacy
.cmask_slice_tile_max
)
708 surf
->u
.legacy
.cmask_slice_tile_max
-= 1;
712 num_layers
= config
->info
.depth
;
713 else if (config
->is_cube
)
716 num_layers
= config
->info
.array_size
;
718 surf
->cmask_alignment
= MAX2(256, base_align
);
719 surf
->cmask_slice_size
= align(slice_bytes
, base_align
);
720 surf
->cmask_size
= surf
->cmask_slice_size
* num_layers
;
724 * Fill in the tiling information in \p surf based on the given surface config.
726 * The following fields of \p surf must be initialized by the caller:
727 * blk_w, blk_h, bpe, flags.
729 static int gfx6_compute_surface(ADDR_HANDLE addrlib
,
730 const struct radeon_info
*info
,
731 const struct ac_surf_config
*config
,
732 enum radeon_surf_mode mode
,
733 struct radeon_surf
*surf
)
737 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn
= {0};
738 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut
= {0};
739 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn
= {0};
740 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut
= {0};
741 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn
= {0};
742 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut
= {0};
743 ADDR_TILEINFO AddrTileInfoIn
= {0};
744 ADDR_TILEINFO AddrTileInfoOut
= {0};
747 AddrSurfInfoIn
.size
= sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT
);
748 AddrSurfInfoOut
.size
= sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT
);
749 AddrDccIn
.size
= sizeof(ADDR_COMPUTE_DCCINFO_INPUT
);
750 AddrDccOut
.size
= sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT
);
751 AddrHtileIn
.size
= sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT
);
752 AddrHtileOut
.size
= sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT
);
753 AddrSurfInfoOut
.pTileInfo
= &AddrTileInfoOut
;
755 compressed
= surf
->blk_w
== 4 && surf
->blk_h
== 4;
757 /* MSAA requires 2D tiling. */
758 if (config
->info
.samples
> 1)
759 mode
= RADEON_SURF_MODE_2D
;
761 /* DB doesn't support linear layouts. */
762 if (surf
->flags
& (RADEON_SURF_Z_OR_SBUFFER
) &&
763 mode
< RADEON_SURF_MODE_1D
)
764 mode
= RADEON_SURF_MODE_1D
;
766 /* Set the requested tiling mode. */
768 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
769 AddrSurfInfoIn
.tileMode
= ADDR_TM_LINEAR_ALIGNED
;
771 case RADEON_SURF_MODE_1D
:
772 AddrSurfInfoIn
.tileMode
= ADDR_TM_1D_TILED_THIN1
;
774 case RADEON_SURF_MODE_2D
:
775 AddrSurfInfoIn
.tileMode
= ADDR_TM_2D_TILED_THIN1
;
781 /* The format must be set correctly for the allocation of compressed
782 * textures to work. In other cases, setting the bpp is sufficient.
787 AddrSurfInfoIn
.format
= ADDR_FMT_BC1
;
790 AddrSurfInfoIn
.format
= ADDR_FMT_BC3
;
797 AddrDccIn
.bpp
= AddrSurfInfoIn
.bpp
= surf
->bpe
* 8;
800 AddrDccIn
.numSamples
= AddrSurfInfoIn
.numSamples
=
801 MAX2(1, config
->info
.samples
);
802 AddrSurfInfoIn
.tileIndex
= -1;
804 if (!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
)) {
805 AddrDccIn
.numSamples
= AddrSurfInfoIn
.numFrags
=
806 MAX2(1, config
->info
.storage_samples
);
809 /* Set the micro tile type. */
810 if (surf
->flags
& RADEON_SURF_SCANOUT
)
811 AddrSurfInfoIn
.tileType
= ADDR_DISPLAYABLE
;
812 else if (surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
)
813 AddrSurfInfoIn
.tileType
= ADDR_DEPTH_SAMPLE_ORDER
;
815 AddrSurfInfoIn
.tileType
= ADDR_NON_DISPLAYABLE
;
817 AddrSurfInfoIn
.flags
.color
= !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
818 AddrSurfInfoIn
.flags
.depth
= (surf
->flags
& RADEON_SURF_ZBUFFER
) != 0;
819 AddrSurfInfoIn
.flags
.cube
= config
->is_cube
;
820 AddrSurfInfoIn
.flags
.display
= get_display_flag(config
, surf
);
821 AddrSurfInfoIn
.flags
.pow2Pad
= config
->info
.levels
> 1;
822 AddrSurfInfoIn
.flags
.tcCompatible
= (surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
) != 0;
824 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
825 * requested, because TC-compatible HTILE requires 2D tiling.
827 AddrSurfInfoIn
.flags
.opt4Space
= !AddrSurfInfoIn
.flags
.tcCompatible
&&
828 !AddrSurfInfoIn
.flags
.fmask
&&
829 config
->info
.samples
<= 1 &&
830 !(surf
->flags
& RADEON_SURF_FORCE_SWIZZLE_MODE
);
833 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
835 * - Mipmapped array textures have low performance (discovered by a closed
838 AddrSurfInfoIn
.flags
.dccCompatible
=
839 info
->chip_class
>= GFX8
&&
840 info
->has_graphics
&& /* disable DCC on compute-only chips */
841 !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
) &&
842 !(surf
->flags
& RADEON_SURF_DISABLE_DCC
) &&
844 ((config
->info
.array_size
== 1 && config
->info
.depth
== 1) ||
845 config
->info
.levels
== 1);
847 AddrSurfInfoIn
.flags
.noStencil
= (surf
->flags
& RADEON_SURF_SBUFFER
) == 0;
848 AddrSurfInfoIn
.flags
.compressZ
= !!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
850 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
851 * for Z and stencil. This can cause a number of problems which we work
854 * - a depth part that is incompatible with mipmapped texturing
855 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
856 * incorrect tiling applied to the stencil part, stencil buffer
857 * memory accesses that go out of bounds) even without mipmapping
859 * Some piglit tests that are prone to different types of related
861 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
862 * ./bin/framebuffer-blit-levels {draw,read} stencil
863 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
864 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
865 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
867 int stencil_tile_idx
= -1;
869 if (AddrSurfInfoIn
.flags
.depth
&& !AddrSurfInfoIn
.flags
.noStencil
&&
870 (config
->info
.levels
> 1 || info
->family
== CHIP_STONEY
)) {
871 /* Compute stencilTileIdx that is compatible with the (depth)
872 * tileIdx. This degrades the depth surface if necessary to
873 * ensure that a matching stencilTileIdx exists. */
874 AddrSurfInfoIn
.flags
.matchStencilTileCfg
= 1;
876 /* Keep the depth mip-tail compatible with texturing. */
877 AddrSurfInfoIn
.flags
.noStencil
= 1;
880 /* Set preferred macrotile parameters. This is usually required
881 * for shared resources. This is for 2D tiling only. */
882 if (AddrSurfInfoIn
.tileMode
>= ADDR_TM_2D_TILED_THIN1
&&
883 surf
->u
.legacy
.bankw
&& surf
->u
.legacy
.bankh
&&
884 surf
->u
.legacy
.mtilea
&& surf
->u
.legacy
.tile_split
) {
885 /* If any of these parameters are incorrect, the calculation
887 AddrTileInfoIn
.banks
= surf
->u
.legacy
.num_banks
;
888 AddrTileInfoIn
.bankWidth
= surf
->u
.legacy
.bankw
;
889 AddrTileInfoIn
.bankHeight
= surf
->u
.legacy
.bankh
;
890 AddrTileInfoIn
.macroAspectRatio
= surf
->u
.legacy
.mtilea
;
891 AddrTileInfoIn
.tileSplitBytes
= surf
->u
.legacy
.tile_split
;
892 AddrTileInfoIn
.pipeConfig
= surf
->u
.legacy
.pipe_config
+ 1; /* +1 compared to GB_TILE_MODE */
893 AddrSurfInfoIn
.flags
.opt4Space
= 0;
894 AddrSurfInfoIn
.pTileInfo
= &AddrTileInfoIn
;
896 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
897 * the tile index, because we are expected to know it if
898 * we know the other parameters.
900 * This is something that can easily be fixed in Addrlib.
901 * For now, just figure it out here.
902 * Note that only 2D_TILE_THIN1 is handled here.
904 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
905 assert(AddrSurfInfoIn
.tileMode
== ADDR_TM_2D_TILED_THIN1
);
907 if (info
->chip_class
== GFX6
) {
908 if (AddrSurfInfoIn
.tileType
== ADDR_DISPLAYABLE
) {
910 AddrSurfInfoIn
.tileIndex
= 11; /* 16bpp */
912 AddrSurfInfoIn
.tileIndex
= 12; /* 32bpp */
915 AddrSurfInfoIn
.tileIndex
= 14; /* 8bpp */
916 else if (surf
->bpe
== 2)
917 AddrSurfInfoIn
.tileIndex
= 15; /* 16bpp */
918 else if (surf
->bpe
== 4)
919 AddrSurfInfoIn
.tileIndex
= 16; /* 32bpp */
921 AddrSurfInfoIn
.tileIndex
= 17; /* 64bpp (and 128bpp) */
925 if (AddrSurfInfoIn
.tileType
== ADDR_DISPLAYABLE
)
926 AddrSurfInfoIn
.tileIndex
= 10; /* 2D displayable */
928 AddrSurfInfoIn
.tileIndex
= 14; /* 2D non-displayable */
930 /* Addrlib doesn't set this if tileIndex is forced like above. */
931 AddrSurfInfoOut
.macroModeIndex
= cik_get_macro_tile_index(surf
);
935 surf
->has_stencil
= !!(surf
->flags
& RADEON_SURF_SBUFFER
);
936 surf
->num_dcc_levels
= 0;
939 surf
->dcc_alignment
= 1;
940 surf
->htile_size
= 0;
941 surf
->htile_slice_size
= 0;
942 surf
->htile_alignment
= 1;
944 const bool only_stencil
= (surf
->flags
& RADEON_SURF_SBUFFER
) &&
945 !(surf
->flags
& RADEON_SURF_ZBUFFER
);
947 /* Calculate texture layout information. */
949 for (level
= 0; level
< config
->info
.levels
; level
++) {
950 r
= gfx6_compute_level(addrlib
, config
, surf
, false, level
, compressed
,
951 &AddrSurfInfoIn
, &AddrSurfInfoOut
,
952 &AddrDccIn
, &AddrDccOut
, &AddrHtileIn
, &AddrHtileOut
);
959 if (!AddrSurfInfoOut
.tcCompatible
) {
960 AddrSurfInfoIn
.flags
.tcCompatible
= 0;
961 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
964 if (AddrSurfInfoIn
.flags
.matchStencilTileCfg
) {
965 AddrSurfInfoIn
.flags
.matchStencilTileCfg
= 0;
966 AddrSurfInfoIn
.tileIndex
= AddrSurfInfoOut
.tileIndex
;
967 stencil_tile_idx
= AddrSurfInfoOut
.stencilTileIdx
;
969 assert(stencil_tile_idx
>= 0);
972 r
= gfx6_surface_settings(addrlib
, info
, config
,
973 &AddrSurfInfoOut
, surf
);
979 /* Calculate texture layout information for stencil. */
980 if (surf
->flags
& RADEON_SURF_SBUFFER
) {
981 AddrSurfInfoIn
.tileIndex
= stencil_tile_idx
;
982 AddrSurfInfoIn
.bpp
= 8;
983 AddrSurfInfoIn
.flags
.depth
= 0;
984 AddrSurfInfoIn
.flags
.stencil
= 1;
985 AddrSurfInfoIn
.flags
.tcCompatible
= 0;
986 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
987 AddrTileInfoIn
.tileSplitBytes
= surf
->u
.legacy
.stencil_tile_split
;
989 for (level
= 0; level
< config
->info
.levels
; level
++) {
990 r
= gfx6_compute_level(addrlib
, config
, surf
, true, level
, compressed
,
991 &AddrSurfInfoIn
, &AddrSurfInfoOut
,
992 &AddrDccIn
, &AddrDccOut
,
997 /* DB uses the depth pitch for both stencil and depth. */
999 if (surf
->u
.legacy
.stencil_level
[level
].nblk_x
!=
1000 surf
->u
.legacy
.level
[level
].nblk_x
)
1001 surf
->u
.legacy
.stencil_adjusted
= true;
1003 surf
->u
.legacy
.level
[level
].nblk_x
=
1004 surf
->u
.legacy
.stencil_level
[level
].nblk_x
;
1009 r
= gfx6_surface_settings(addrlib
, info
, config
,
1010 &AddrSurfInfoOut
, surf
);
1015 /* For 2D modes only. */
1016 if (AddrSurfInfoOut
.tileMode
>= ADDR_TM_2D_TILED_THIN1
) {
1017 surf
->u
.legacy
.stencil_tile_split
=
1018 AddrSurfInfoOut
.pTileInfo
->tileSplitBytes
;
1024 /* Compute FMASK. */
1025 if (config
->info
.samples
>= 2 && AddrSurfInfoIn
.flags
.color
&&
1026 info
->has_graphics
&& !(surf
->flags
& RADEON_SURF_NO_FMASK
)) {
1027 ADDR_COMPUTE_FMASK_INFO_INPUT fin
= {0};
1028 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout
= {0};
1029 ADDR_TILEINFO fmask_tile_info
= {};
1031 fin
.size
= sizeof(fin
);
1032 fout
.size
= sizeof(fout
);
1034 fin
.tileMode
= AddrSurfInfoOut
.tileMode
;
1035 fin
.pitch
= AddrSurfInfoOut
.pitch
;
1036 fin
.height
= config
->info
.height
;
1037 fin
.numSlices
= AddrSurfInfoIn
.numSlices
;
1038 fin
.numSamples
= AddrSurfInfoIn
.numSamples
;
1039 fin
.numFrags
= AddrSurfInfoIn
.numFrags
;
1041 fout
.pTileInfo
= &fmask_tile_info
;
1043 r
= AddrComputeFmaskInfo(addrlib
, &fin
, &fout
);
1047 surf
->fmask_size
= fout
.fmaskBytes
;
1048 surf
->fmask_alignment
= fout
.baseAlign
;
1049 surf
->fmask_tile_swizzle
= 0;
1051 surf
->u
.legacy
.fmask
.slice_tile_max
=
1052 (fout
.pitch
* fout
.height
) / 64;
1053 if (surf
->u
.legacy
.fmask
.slice_tile_max
)
1054 surf
->u
.legacy
.fmask
.slice_tile_max
-= 1;
1056 surf
->u
.legacy
.fmask
.tiling_index
= fout
.tileIndex
;
1057 surf
->u
.legacy
.fmask
.bankh
= fout
.pTileInfo
->bankHeight
;
1058 surf
->u
.legacy
.fmask
.pitch_in_pixels
= fout
.pitch
;
1059 surf
->u
.legacy
.fmask
.slice_size
= fout
.sliceSize
;
1061 /* Compute tile swizzle for FMASK. */
1062 if (config
->info
.fmask_surf_index
&&
1063 !(surf
->flags
& RADEON_SURF_SHAREABLE
)) {
1064 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin
= {0};
1065 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout
= {0};
1067 xin
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT
);
1068 xout
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT
);
1070 /* This counter starts from 1 instead of 0. */
1071 xin
.surfIndex
= p_atomic_inc_return(config
->info
.fmask_surf_index
);
1072 xin
.tileIndex
= fout
.tileIndex
;
1073 xin
.macroModeIndex
= fout
.macroModeIndex
;
1074 xin
.pTileInfo
= fout
.pTileInfo
;
1075 xin
.tileMode
= fin
.tileMode
;
1077 int r
= AddrComputeBaseSwizzle(addrlib
, &xin
, &xout
);
1081 assert(xout
.tileSwizzle
<=
1082 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
1083 surf
->fmask_tile_swizzle
= xout
.tileSwizzle
;
1087 /* Recalculate the whole DCC miptree size including disabled levels.
1088 * This is what addrlib does, but calling addrlib would be a lot more
1091 if (surf
->dcc_size
&& config
->info
.levels
> 1) {
1092 /* The smallest miplevels that are never compressed by DCC
1093 * still read the DCC buffer via TC if the base level uses DCC,
1094 * and for some reason the DCC buffer needs to be larger if
1095 * the miptree uses non-zero tile_swizzle. Otherwise there are
1098 * "dcc_alignment * 4" was determined by trial and error.
1100 surf
->dcc_size
= align64(surf
->surf_size
>> 8,
1101 surf
->dcc_alignment
* 4);
1104 /* Make sure HTILE covers the whole miptree, because the shader reads
1105 * TC-compatible HTILE even for levels where it's disabled by DB.
1107 if (surf
->htile_size
&& config
->info
.levels
> 1 &&
1108 surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
) {
1109 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1110 const unsigned total_pixels
= surf
->surf_size
/ surf
->bpe
;
1111 const unsigned htile_block_size
= 8 * 8;
1112 const unsigned htile_element_size
= 4;
1114 surf
->htile_size
= (total_pixels
/ htile_block_size
) *
1116 surf
->htile_size
= align(surf
->htile_size
, surf
->htile_alignment
);
1117 } else if (!surf
->htile_size
) {
1118 /* Unset this if HTILE is not present. */
1119 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
1122 surf
->is_linear
= surf
->u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
;
1123 surf
->is_displayable
= (surf
->is_linear
||
1124 surf
->micro_tile_mode
== RADEON_MICRO_MODE_DISPLAY
||
1125 surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
/* rotated */) &&
1128 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1129 * used at the same time. This case is not currently expected to occur
1130 * because we don't use rotated. Enforce this restriction on all chips
1131 * to facilitate testing.
1133 if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
) {
1134 assert(!"rotate micro tile mode is unsupported");
1138 ac_compute_cmask(info
, config
, surf
);
1142 /* This is only called when expecting a tiled layout. */
1144 gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib
,
1145 struct radeon_surf
*surf
,
1146 ADDR2_COMPUTE_SURFACE_INFO_INPUT
*in
,
1147 bool is_fmask
, AddrSwizzleMode
*swizzle_mode
)
1149 ADDR_E_RETURNCODE ret
;
1150 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin
= {0};
1151 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout
= {0};
1153 sin
.size
= sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT
);
1154 sout
.size
= sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT
);
1156 sin
.flags
= in
->flags
;
1157 sin
.resourceType
= in
->resourceType
;
1158 sin
.format
= in
->format
;
1159 sin
.resourceLoction
= ADDR_RSRC_LOC_INVIS
;
1160 /* TODO: We could allow some of these: */
1161 sin
.forbiddenBlock
.micro
= 1; /* don't allow the 256B swizzle modes */
1162 sin
.forbiddenBlock
.var
= 1; /* don't allow the variable-sized swizzle modes */
1164 sin
.width
= in
->width
;
1165 sin
.height
= in
->height
;
1166 sin
.numSlices
= in
->numSlices
;
1167 sin
.numMipLevels
= in
->numMipLevels
;
1168 sin
.numSamples
= in
->numSamples
;
1169 sin
.numFrags
= in
->numFrags
;
1172 sin
.flags
.display
= 0;
1173 sin
.flags
.color
= 0;
1174 sin
.flags
.fmask
= 1;
1177 if (surf
->flags
& RADEON_SURF_FORCE_MICRO_TILE_MODE
) {
1178 sin
.forbiddenBlock
.linear
= 1;
1180 if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_DISPLAY
)
1181 sin
.preferredSwSet
.sw_D
= 1;
1182 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_STANDARD
)
1183 sin
.preferredSwSet
.sw_S
= 1;
1184 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_DEPTH
)
1185 sin
.preferredSwSet
.sw_Z
= 1;
1186 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
)
1187 sin
.preferredSwSet
.sw_R
= 1;
1190 ret
= Addr2GetPreferredSurfaceSetting(addrlib
, &sin
, &sout
);
1194 *swizzle_mode
= sout
.swizzleMode
;
1198 static bool is_dcc_supported_by_CB(const struct radeon_info
*info
, unsigned sw_mode
)
1200 if (info
->chip_class
>= GFX10
)
1201 return sw_mode
== ADDR_SW_64KB_Z_X
|| sw_mode
== ADDR_SW_64KB_R_X
;
1203 return sw_mode
!= ADDR_SW_LINEAR
;
1206 ASSERTED
static bool is_dcc_supported_by_L2(const struct radeon_info
*info
,
1207 const struct radeon_surf
*surf
)
1209 if (info
->chip_class
<= GFX9
) {
1210 /* Only independent 64B blocks are supported. */
1211 return surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1212 !surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1213 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
;
1216 if (info
->family
== CHIP_NAVI10
) {
1217 /* Only independent 128B blocks are supported. */
1218 return !surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1219 surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1220 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
;
1223 if (info
->family
== CHIP_NAVI12
||
1224 info
->family
== CHIP_NAVI14
) {
1225 /* Either 64B or 128B can be used, but not both.
1226 * If 64B is used, DCC image stores are unsupported.
1228 return surf
->u
.gfx9
.dcc
.independent_64B_blocks
!=
1229 surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1230 (!surf
->u
.gfx9
.dcc
.independent_64B_blocks
||
1231 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
) &&
1232 (!surf
->u
.gfx9
.dcc
.independent_128B_blocks
||
1233 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
);
1236 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1237 * Since there is no reason to ever disable 128B, require it.
1238 * DCC image stores are always supported.
1240 return surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1241 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
;
1244 static bool is_dcc_supported_by_DCN(const struct radeon_info
*info
,
1245 const struct ac_surf_config
*config
,
1246 const struct radeon_surf
*surf
,
1247 bool rb_aligned
, bool pipe_aligned
)
1249 if (!info
->use_display_dcc_unaligned
&&
1250 !info
->use_display_dcc_with_retile_blit
)
1253 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1257 /* Handle unaligned DCC. */
1258 if (info
->use_display_dcc_unaligned
&&
1259 (rb_aligned
|| pipe_aligned
))
1262 switch (info
->chip_class
) {
1264 /* There are more constraints, but we always set
1265 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1266 * which always works.
1268 assert(surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1269 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
);
1273 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1274 if (info
->chip_class
== GFX10
&&
1275 surf
->u
.gfx9
.dcc
.independent_128B_blocks
)
1278 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1279 return ((config
->info
.width
<= 2560 &&
1280 config
->info
.height
<= 2560) ||
1281 (surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1282 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
));
1284 unreachable("unhandled chip");
1289 static int gfx9_compute_miptree(struct ac_addrlib
*addrlib
,
1290 const struct radeon_info
*info
,
1291 const struct ac_surf_config
*config
,
1292 struct radeon_surf
*surf
, bool compressed
,
1293 ADDR2_COMPUTE_SURFACE_INFO_INPUT
*in
)
1295 ADDR2_MIP_INFO mip_info
[RADEON_SURF_MAX_LEVELS
] = {};
1296 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out
= {0};
1297 ADDR_E_RETURNCODE ret
;
1299 out
.size
= sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT
);
1300 out
.pMipInfo
= mip_info
;
1302 ret
= Addr2ComputeSurfaceInfo(addrlib
->handle
, in
, &out
);
1306 if (in
->flags
.stencil
) {
1307 surf
->u
.gfx9
.stencil
.swizzle_mode
= in
->swizzleMode
;
1308 surf
->u
.gfx9
.stencil
.epitch
= out
.epitchIsHeight
? out
.mipChainHeight
- 1 :
1309 out
.mipChainPitch
- 1;
1310 surf
->surf_alignment
= MAX2(surf
->surf_alignment
, out
.baseAlign
);
1311 surf
->u
.gfx9
.stencil_offset
= align(surf
->surf_size
, out
.baseAlign
);
1312 surf
->surf_size
= surf
->u
.gfx9
.stencil_offset
+ out
.surfSize
;
1316 surf
->u
.gfx9
.surf
.swizzle_mode
= in
->swizzleMode
;
1317 surf
->u
.gfx9
.surf
.epitch
= out
.epitchIsHeight
? out
.mipChainHeight
- 1 :
1318 out
.mipChainPitch
- 1;
1320 /* CMASK fast clear uses these even if FMASK isn't allocated.
1321 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1323 surf
->u
.gfx9
.fmask
.swizzle_mode
= surf
->u
.gfx9
.surf
.swizzle_mode
& ~0x3;
1324 surf
->u
.gfx9
.fmask
.epitch
= surf
->u
.gfx9
.surf
.epitch
;
1326 surf
->u
.gfx9
.surf_slice_size
= out
.sliceSize
;
1327 surf
->u
.gfx9
.surf_pitch
= out
.pitch
;
1328 if (!compressed
&& surf
->blk_w
> 1 && out
.pitch
== out
.pixelPitch
&&
1329 surf
->u
.gfx9
.surf
.swizzle_mode
== ADDR_SW_LINEAR
) {
1330 /* Adjust surf_pitch to be in elements units,
1332 surf
->u
.gfx9
.surf_pitch
=
1333 align(surf
->u
.gfx9
.surf_pitch
/ surf
->blk_w
, 256 / surf
->bpe
);
1334 surf
->u
.gfx9
.surf
.epitch
= MAX2(surf
->u
.gfx9
.surf
.epitch
,
1335 surf
->u
.gfx9
.surf_pitch
* surf
->blk_w
- 1);
1337 surf
->u
.gfx9
.surf_height
= out
.height
;
1338 surf
->surf_size
= out
.surfSize
;
1339 surf
->surf_alignment
= out
.baseAlign
;
1341 if (in
->swizzleMode
== ADDR_SW_LINEAR
) {
1342 for (unsigned i
= 0; i
< in
->numMipLevels
; i
++) {
1343 surf
->u
.gfx9
.offset
[i
] = mip_info
[i
].offset
;
1344 surf
->u
.gfx9
.pitch
[i
] = mip_info
[i
].pitch
;
1348 if (in
->flags
.depth
) {
1349 assert(in
->swizzleMode
!= ADDR_SW_LINEAR
);
1351 if (surf
->flags
& RADEON_SURF_NO_HTILE
)
1355 ADDR2_COMPUTE_HTILE_INFO_INPUT hin
= {0};
1356 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout
= {0};
1358 hin
.size
= sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT
);
1359 hout
.size
= sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT
);
1361 assert(in
->flags
.metaPipeUnaligned
== 0);
1362 assert(in
->flags
.metaRbUnaligned
== 0);
1364 hin
.hTileFlags
.pipeAligned
= 1;
1365 hin
.hTileFlags
.rbAligned
= 1;
1366 hin
.depthFlags
= in
->flags
;
1367 hin
.swizzleMode
= in
->swizzleMode
;
1368 hin
.unalignedWidth
= in
->width
;
1369 hin
.unalignedHeight
= in
->height
;
1370 hin
.numSlices
= in
->numSlices
;
1371 hin
.numMipLevels
= in
->numMipLevels
;
1372 hin
.firstMipIdInTail
= out
.firstMipIdInTail
;
1374 ret
= Addr2ComputeHtileInfo(addrlib
->handle
, &hin
, &hout
);
1378 surf
->htile_size
= hout
.htileBytes
;
1379 surf
->htile_slice_size
= hout
.sliceSize
;
1380 surf
->htile_alignment
= hout
.baseAlign
;
1385 /* Compute tile swizzle for the color surface.
1386 * All *_X and *_T modes can use the swizzle.
1388 if (config
->info
.surf_index
&&
1389 in
->swizzleMode
>= ADDR_SW_64KB_Z_T
&&
1390 !out
.mipChainInTail
&&
1391 !(surf
->flags
& RADEON_SURF_SHAREABLE
) &&
1392 !in
->flags
.display
) {
1393 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin
= {0};
1394 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout
= {0};
1396 xin
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT
);
1397 xout
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT
);
1399 xin
.surfIndex
= p_atomic_inc_return(config
->info
.surf_index
) - 1;
1400 xin
.flags
= in
->flags
;
1401 xin
.swizzleMode
= in
->swizzleMode
;
1402 xin
.resourceType
= in
->resourceType
;
1403 xin
.format
= in
->format
;
1404 xin
.numSamples
= in
->numSamples
;
1405 xin
.numFrags
= in
->numFrags
;
1407 ret
= Addr2ComputePipeBankXor(addrlib
->handle
, &xin
, &xout
);
1411 assert(xout
.pipeBankXor
<=
1412 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
1413 surf
->tile_swizzle
= xout
.pipeBankXor
;
1417 if (info
->has_graphics
&&
1418 !(surf
->flags
& RADEON_SURF_DISABLE_DCC
) &&
1420 is_dcc_supported_by_CB(info
, in
->swizzleMode
) &&
1421 (!in
->flags
.display
||
1422 is_dcc_supported_by_DCN(info
, config
, surf
,
1423 !in
->flags
.metaRbUnaligned
,
1424 !in
->flags
.metaPipeUnaligned
))) {
1425 ADDR2_COMPUTE_DCCINFO_INPUT din
= {0};
1426 ADDR2_COMPUTE_DCCINFO_OUTPUT dout
= {0};
1427 ADDR2_META_MIP_INFO meta_mip_info
[RADEON_SURF_MAX_LEVELS
] = {};
1429 din
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_INPUT
);
1430 dout
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT
);
1431 dout
.pMipInfo
= meta_mip_info
;
1433 din
.dccKeyFlags
.pipeAligned
= !in
->flags
.metaPipeUnaligned
;
1434 din
.dccKeyFlags
.rbAligned
= !in
->flags
.metaRbUnaligned
;
1435 din
.colorFlags
= in
->flags
;
1436 din
.resourceType
= in
->resourceType
;
1437 din
.swizzleMode
= in
->swizzleMode
;
1439 din
.unalignedWidth
= in
->width
;
1440 din
.unalignedHeight
= in
->height
;
1441 din
.numSlices
= in
->numSlices
;
1442 din
.numFrags
= in
->numFrags
;
1443 din
.numMipLevels
= in
->numMipLevels
;
1444 din
.dataSurfaceSize
= out
.surfSize
;
1445 din
.firstMipIdInTail
= out
.firstMipIdInTail
;
1447 ret
= Addr2ComputeDccInfo(addrlib
->handle
, &din
, &dout
);
1451 surf
->u
.gfx9
.dcc
.rb_aligned
= din
.dccKeyFlags
.rbAligned
;
1452 surf
->u
.gfx9
.dcc
.pipe_aligned
= din
.dccKeyFlags
.pipeAligned
;
1453 surf
->u
.gfx9
.dcc_block_width
= dout
.compressBlkWidth
;
1454 surf
->u
.gfx9
.dcc_block_height
= dout
.compressBlkHeight
;
1455 surf
->u
.gfx9
.dcc_block_depth
= dout
.compressBlkDepth
;
1456 surf
->dcc_size
= dout
.dccRamSize
;
1457 surf
->dcc_alignment
= dout
.dccRamBaseAlign
;
1458 surf
->num_dcc_levels
= in
->numMipLevels
;
1460 /* Disable DCC for levels that are in the mip tail.
1462 * There are two issues that this is intended to
1465 * 1. Multiple mip levels may share a cache line. This
1466 * can lead to corruption when switching between
1467 * rendering to different mip levels because the
1468 * RBs don't maintain coherency.
1470 * 2. Texturing with metadata after rendering sometimes
1471 * fails with corruption, probably for a similar
1474 * Working around these issues for all levels in the
1475 * mip tail may be overly conservative, but it's what
1478 * Alternative solutions that also work but are worse:
1479 * - Disable DCC entirely.
1480 * - Flush TC L2 after rendering.
1482 for (unsigned i
= 0; i
< in
->numMipLevels
; i
++) {
1483 if (meta_mip_info
[i
].inMiptail
) {
1484 /* GFX10 can only compress the first level
1487 * TODO: Try to do the same thing for gfx9
1488 * if there are no regressions.
1490 if (info
->chip_class
>= GFX10
)
1491 surf
->num_dcc_levels
= i
+ 1;
1493 surf
->num_dcc_levels
= i
;
1498 if (!surf
->num_dcc_levels
)
1501 surf
->u
.gfx9
.display_dcc_size
= surf
->dcc_size
;
1502 surf
->u
.gfx9
.display_dcc_alignment
= surf
->dcc_alignment
;
1503 surf
->u
.gfx9
.display_dcc_pitch_max
= dout
.pitch
- 1;
1505 /* Compute displayable DCC. */
1506 if (in
->flags
.display
&&
1507 surf
->num_dcc_levels
&&
1508 info
->use_display_dcc_with_retile_blit
) {
1509 /* Compute displayable DCC info. */
1510 din
.dccKeyFlags
.pipeAligned
= 0;
1511 din
.dccKeyFlags
.rbAligned
= 0;
1513 assert(din
.numSlices
== 1);
1514 assert(din
.numMipLevels
== 1);
1515 assert(din
.numFrags
== 1);
1516 assert(surf
->tile_swizzle
== 0);
1517 assert(surf
->u
.gfx9
.dcc
.pipe_aligned
||
1518 surf
->u
.gfx9
.dcc
.rb_aligned
);
1520 ret
= Addr2ComputeDccInfo(addrlib
->handle
, &din
, &dout
);
1524 surf
->u
.gfx9
.display_dcc_size
= dout
.dccRamSize
;
1525 surf
->u
.gfx9
.display_dcc_alignment
= dout
.dccRamBaseAlign
;
1526 surf
->u
.gfx9
.display_dcc_pitch_max
= dout
.pitch
- 1;
1527 assert(surf
->u
.gfx9
.display_dcc_size
<= surf
->dcc_size
);
1529 surf
->u
.gfx9
.dcc_retile_use_uint16
=
1530 surf
->u
.gfx9
.display_dcc_size
<= UINT16_MAX
+ 1 &&
1531 surf
->dcc_size
<= UINT16_MAX
+ 1;
1533 /* Align the retile map size to get more hash table hits and
1534 * decrease the maximum memory footprint when all retile maps
1535 * are cached in the hash table.
1537 unsigned retile_dim
[2] = {in
->width
, in
->height
};
1539 for (unsigned i
= 0; i
< 2; i
++) {
1540 /* Increase the alignment as the size increases.
1541 * Greater alignment increases retile compute work,
1542 * but decreases maximum memory footprint for the cache.
1544 * With this alignment, the worst case memory footprint of
1550 * The worst case size in MB can be computed in Haskell as follows:
1551 * (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1552 * [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]]))))) `div` 1024^2
1554 * alignment x = if x <= 512 then 16 else if x <= 1024 then 32 else if x <= 2048 then 64 else 128
1555 * align x = (x + (alignment x) - 1) `div` (alignment x) * (alignment x)
1556 * align_pair e = (align (fst e), align (snd e))
1557 * deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a) == (snd b))) . sortBy compare
1558 * get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1559 * get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else 4)
1560 * bpp = 4; maxwidth = 3840; maxheight = 2160
1562 if (retile_dim
[i
] <= 512)
1563 retile_dim
[i
] = align(retile_dim
[i
], 16);
1564 else if (retile_dim
[i
] <= 1024)
1565 retile_dim
[i
] = align(retile_dim
[i
], 32);
1566 else if (retile_dim
[i
] <= 2048)
1567 retile_dim
[i
] = align(retile_dim
[i
], 64);
1569 retile_dim
[i
] = align(retile_dim
[i
], 128);
1571 /* Don't align more than the DCC pixel alignment. */
1572 assert(dout
.metaBlkWidth
>= 128 && dout
.metaBlkHeight
>= 128);
1575 surf
->u
.gfx9
.dcc_retile_num_elements
=
1576 DIV_ROUND_UP(retile_dim
[0], dout
.compressBlkWidth
) *
1577 DIV_ROUND_UP(retile_dim
[1], dout
.compressBlkHeight
) * 2;
1578 /* Align the size to 4 (for the compute shader). */
1579 surf
->u
.gfx9
.dcc_retile_num_elements
=
1580 align(surf
->u
.gfx9
.dcc_retile_num_elements
, 4);
1582 if (!(surf
->flags
& RADEON_SURF_IMPORTED
)) {
1583 /* Compute address mapping from non-displayable to displayable DCC. */
1584 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin
;
1585 memset(&addrin
, 0, sizeof(addrin
));
1586 addrin
.size
= sizeof(addrin
);
1587 addrin
.swizzleMode
= din
.swizzleMode
;
1588 addrin
.resourceType
= din
.resourceType
;
1589 addrin
.bpp
= din
.bpp
;
1590 addrin
.numSlices
= 1;
1591 addrin
.numMipLevels
= 1;
1592 addrin
.numFrags
= 1;
1593 addrin
.pitch
= dout
.pitch
;
1594 addrin
.height
= dout
.height
;
1595 addrin
.compressBlkWidth
= dout
.compressBlkWidth
;
1596 addrin
.compressBlkHeight
= dout
.compressBlkHeight
;
1597 addrin
.compressBlkDepth
= dout
.compressBlkDepth
;
1598 addrin
.metaBlkWidth
= dout
.metaBlkWidth
;
1599 addrin
.metaBlkHeight
= dout
.metaBlkHeight
;
1600 addrin
.metaBlkDepth
= dout
.metaBlkDepth
;
1601 addrin
.dccRamSliceSize
= 0; /* Don't care for non-layered images. */
1603 surf
->u
.gfx9
.dcc_retile_map
=
1604 ac_compute_dcc_retile_map(addrlib
, info
,
1605 retile_dim
[0], retile_dim
[1],
1606 surf
->u
.gfx9
.dcc
.rb_aligned
,
1607 surf
->u
.gfx9
.dcc
.pipe_aligned
,
1608 surf
->u
.gfx9
.dcc_retile_use_uint16
,
1609 surf
->u
.gfx9
.dcc_retile_num_elements
,
1611 if (!surf
->u
.gfx9
.dcc_retile_map
)
1612 return ADDR_OUTOFMEMORY
;
1618 if (in
->numSamples
> 1 && info
->has_graphics
&&
1619 !(surf
->flags
& RADEON_SURF_NO_FMASK
)) {
1620 ADDR2_COMPUTE_FMASK_INFO_INPUT fin
= {0};
1621 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout
= {0};
1623 fin
.size
= sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT
);
1624 fout
.size
= sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT
);
1626 ret
= gfx9_get_preferred_swizzle_mode(addrlib
->handle
, surf
, in
,
1627 true, &fin
.swizzleMode
);
1631 fin
.unalignedWidth
= in
->width
;
1632 fin
.unalignedHeight
= in
->height
;
1633 fin
.numSlices
= in
->numSlices
;
1634 fin
.numSamples
= in
->numSamples
;
1635 fin
.numFrags
= in
->numFrags
;
1637 ret
= Addr2ComputeFmaskInfo(addrlib
->handle
, &fin
, &fout
);
1641 surf
->u
.gfx9
.fmask
.swizzle_mode
= fin
.swizzleMode
;
1642 surf
->u
.gfx9
.fmask
.epitch
= fout
.pitch
- 1;
1643 surf
->fmask_size
= fout
.fmaskBytes
;
1644 surf
->fmask_alignment
= fout
.baseAlign
;
1646 /* Compute tile swizzle for the FMASK surface. */
1647 if (config
->info
.fmask_surf_index
&&
1648 fin
.swizzleMode
>= ADDR_SW_64KB_Z_T
&&
1649 !(surf
->flags
& RADEON_SURF_SHAREABLE
)) {
1650 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin
= {0};
1651 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout
= {0};
1653 xin
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT
);
1654 xout
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT
);
1656 /* This counter starts from 1 instead of 0. */
1657 xin
.surfIndex
= p_atomic_inc_return(config
->info
.fmask_surf_index
);
1658 xin
.flags
= in
->flags
;
1659 xin
.swizzleMode
= fin
.swizzleMode
;
1660 xin
.resourceType
= in
->resourceType
;
1661 xin
.format
= in
->format
;
1662 xin
.numSamples
= in
->numSamples
;
1663 xin
.numFrags
= in
->numFrags
;
1665 ret
= Addr2ComputePipeBankXor(addrlib
->handle
, &xin
, &xout
);
1669 assert(xout
.pipeBankXor
<=
1670 u_bit_consecutive(0, sizeof(surf
->fmask_tile_swizzle
) * 8));
1671 surf
->fmask_tile_swizzle
= xout
.pipeBankXor
;
1675 /* CMASK -- on GFX10 only for FMASK */
1676 if (in
->swizzleMode
!= ADDR_SW_LINEAR
&&
1677 in
->resourceType
== ADDR_RSRC_TEX_2D
&&
1678 ((info
->chip_class
<= GFX9
&&
1679 in
->numSamples
== 1 &&
1680 in
->flags
.metaPipeUnaligned
== 0 &&
1681 in
->flags
.metaRbUnaligned
== 0) ||
1682 (surf
->fmask_size
&& in
->numSamples
>= 2))) {
1683 ADDR2_COMPUTE_CMASK_INFO_INPUT cin
= {0};
1684 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout
= {0};
1686 cin
.size
= sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT
);
1687 cout
.size
= sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT
);
1689 assert(in
->flags
.metaPipeUnaligned
== 0);
1690 assert(in
->flags
.metaRbUnaligned
== 0);
1692 cin
.cMaskFlags
.pipeAligned
= 1;
1693 cin
.cMaskFlags
.rbAligned
= 1;
1694 cin
.colorFlags
= in
->flags
;
1695 cin
.resourceType
= in
->resourceType
;
1696 cin
.unalignedWidth
= in
->width
;
1697 cin
.unalignedHeight
= in
->height
;
1698 cin
.numSlices
= in
->numSlices
;
1700 if (in
->numSamples
> 1)
1701 cin
.swizzleMode
= surf
->u
.gfx9
.fmask
.swizzle_mode
;
1703 cin
.swizzleMode
= in
->swizzleMode
;
1705 ret
= Addr2ComputeCmaskInfo(addrlib
->handle
, &cin
, &cout
);
1709 surf
->cmask_size
= cout
.cmaskBytes
;
1710 surf
->cmask_alignment
= cout
.baseAlign
;
1717 static int gfx9_compute_surface(struct ac_addrlib
*addrlib
,
1718 const struct radeon_info
*info
,
1719 const struct ac_surf_config
*config
,
1720 enum radeon_surf_mode mode
,
1721 struct radeon_surf
*surf
)
1724 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn
= {0};
1727 AddrSurfInfoIn
.size
= sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT
);
1729 compressed
= surf
->blk_w
== 4 && surf
->blk_h
== 4;
1731 /* The format must be set correctly for the allocation of compressed
1732 * textures to work. In other cases, setting the bpp is sufficient. */
1734 switch (surf
->bpe
) {
1736 AddrSurfInfoIn
.format
= ADDR_FMT_BC1
;
1739 AddrSurfInfoIn
.format
= ADDR_FMT_BC3
;
1745 switch (surf
->bpe
) {
1747 assert(!(surf
->flags
& RADEON_SURF_ZBUFFER
));
1748 AddrSurfInfoIn
.format
= ADDR_FMT_8
;
1751 assert(surf
->flags
& RADEON_SURF_ZBUFFER
||
1752 !(surf
->flags
& RADEON_SURF_SBUFFER
));
1753 AddrSurfInfoIn
.format
= ADDR_FMT_16
;
1756 assert(surf
->flags
& RADEON_SURF_ZBUFFER
||
1757 !(surf
->flags
& RADEON_SURF_SBUFFER
));
1758 AddrSurfInfoIn
.format
= ADDR_FMT_32
;
1761 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1762 AddrSurfInfoIn
.format
= ADDR_FMT_32_32
;
1765 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1766 AddrSurfInfoIn
.format
= ADDR_FMT_32_32_32
;
1769 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1770 AddrSurfInfoIn
.format
= ADDR_FMT_32_32_32_32
;
1775 AddrSurfInfoIn
.bpp
= surf
->bpe
* 8;
1778 bool is_color_surface
= !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
1779 AddrSurfInfoIn
.flags
.color
= is_color_surface
&&
1780 !(surf
->flags
& RADEON_SURF_NO_RENDER_TARGET
);
1781 AddrSurfInfoIn
.flags
.depth
= (surf
->flags
& RADEON_SURF_ZBUFFER
) != 0;
1782 AddrSurfInfoIn
.flags
.display
= get_display_flag(config
, surf
);
1783 /* flags.texture currently refers to TC-compatible HTILE */
1784 AddrSurfInfoIn
.flags
.texture
= is_color_surface
||
1785 surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
;
1786 AddrSurfInfoIn
.flags
.opt4space
= 1;
1788 AddrSurfInfoIn
.numMipLevels
= config
->info
.levels
;
1789 AddrSurfInfoIn
.numSamples
= MAX2(1, config
->info
.samples
);
1790 AddrSurfInfoIn
.numFrags
= AddrSurfInfoIn
.numSamples
;
1792 if (!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
))
1793 AddrSurfInfoIn
.numFrags
= MAX2(1, config
->info
.storage_samples
);
1795 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1796 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1797 * must sample 1D textures as 2D. */
1799 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_3D
;
1800 else if (info
->chip_class
!= GFX9
&& config
->is_1d
)
1801 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_1D
;
1803 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_2D
;
1805 AddrSurfInfoIn
.width
= config
->info
.width
;
1806 AddrSurfInfoIn
.height
= config
->info
.height
;
1809 AddrSurfInfoIn
.numSlices
= config
->info
.depth
;
1810 else if (config
->is_cube
)
1811 AddrSurfInfoIn
.numSlices
= 6;
1813 AddrSurfInfoIn
.numSlices
= config
->info
.array_size
;
1815 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1816 AddrSurfInfoIn
.flags
.metaPipeUnaligned
= 0;
1817 AddrSurfInfoIn
.flags
.metaRbUnaligned
= 0;
1819 /* Optimal values for the L2 cache. */
1820 if (info
->chip_class
== GFX9
) {
1821 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1822 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 0;
1823 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1824 } else if (info
->chip_class
>= GFX10
) {
1825 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 0;
1826 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 1;
1827 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_128B
;
1830 if (AddrSurfInfoIn
.flags
.display
) {
1831 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1832 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1834 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1835 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1836 * after rendering, so PIPE_ALIGNED=1 is recommended.
1838 if (info
->use_display_dcc_unaligned
) {
1839 AddrSurfInfoIn
.flags
.metaPipeUnaligned
= 1;
1840 AddrSurfInfoIn
.flags
.metaRbUnaligned
= 1;
1843 /* Adjust DCC settings to meet DCN requirements. */
1844 if (info
->use_display_dcc_unaligned
||
1845 info
->use_display_dcc_with_retile_blit
) {
1846 /* Only Navi12/14 support independent 64B blocks in L2,
1847 * but without DCC image stores.
1849 if (info
->family
== CHIP_NAVI12
||
1850 info
->family
== CHIP_NAVI14
) {
1851 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1852 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 0;
1853 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1856 if (info
->chip_class
>= GFX10_3
) {
1857 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1858 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 1;
1859 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1865 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1866 assert(config
->info
.samples
<= 1);
1867 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1868 AddrSurfInfoIn
.swizzleMode
= ADDR_SW_LINEAR
;
1871 case RADEON_SURF_MODE_1D
:
1872 case RADEON_SURF_MODE_2D
:
1873 if (surf
->flags
& RADEON_SURF_IMPORTED
||
1874 (info
->chip_class
>= GFX10
&&
1875 surf
->flags
& RADEON_SURF_FORCE_SWIZZLE_MODE
)) {
1876 AddrSurfInfoIn
.swizzleMode
= surf
->u
.gfx9
.surf
.swizzle_mode
;
1880 r
= gfx9_get_preferred_swizzle_mode(addrlib
->handle
, surf
, &AddrSurfInfoIn
,
1881 false, &AddrSurfInfoIn
.swizzleMode
);
1890 surf
->u
.gfx9
.resource_type
= AddrSurfInfoIn
.resourceType
;
1891 surf
->has_stencil
= !!(surf
->flags
& RADEON_SURF_SBUFFER
);
1893 surf
->num_dcc_levels
= 0;
1894 surf
->surf_size
= 0;
1895 surf
->fmask_size
= 0;
1897 surf
->htile_size
= 0;
1898 surf
->htile_slice_size
= 0;
1899 surf
->u
.gfx9
.surf_offset
= 0;
1900 surf
->u
.gfx9
.stencil_offset
= 0;
1901 surf
->cmask_size
= 0;
1902 surf
->u
.gfx9
.dcc_retile_use_uint16
= false;
1903 surf
->u
.gfx9
.dcc_retile_num_elements
= 0;
1904 surf
->u
.gfx9
.dcc_retile_map
= NULL
;
1906 /* Calculate texture layout information. */
1907 r
= gfx9_compute_miptree(addrlib
, info
, config
, surf
, compressed
,
1912 /* Calculate texture layout information for stencil. */
1913 if (surf
->flags
& RADEON_SURF_SBUFFER
) {
1914 AddrSurfInfoIn
.flags
.stencil
= 1;
1915 AddrSurfInfoIn
.bpp
= 8;
1916 AddrSurfInfoIn
.format
= ADDR_FMT_8
;
1918 if (!AddrSurfInfoIn
.flags
.depth
) {
1919 r
= gfx9_get_preferred_swizzle_mode(addrlib
->handle
, surf
, &AddrSurfInfoIn
,
1920 false, &AddrSurfInfoIn
.swizzleMode
);
1924 AddrSurfInfoIn
.flags
.depth
= 0;
1926 r
= gfx9_compute_miptree(addrlib
, info
, config
, surf
, compressed
,
1932 surf
->is_linear
= surf
->u
.gfx9
.surf
.swizzle_mode
== ADDR_SW_LINEAR
;
1934 /* Query whether the surface is displayable. */
1935 /* This is only useful for surfaces that are allocated without SCANOUT. */
1936 bool displayable
= false;
1937 if (!config
->is_3d
&& !config
->is_cube
) {
1938 r
= Addr2IsValidDisplaySwizzleMode(addrlib
->handle
, surf
->u
.gfx9
.surf
.swizzle_mode
,
1939 surf
->bpe
* 8, &displayable
);
1943 /* Display needs unaligned DCC. */
1944 if (surf
->num_dcc_levels
&&
1945 (!is_dcc_supported_by_DCN(info
, config
, surf
,
1946 surf
->u
.gfx9
.dcc
.rb_aligned
,
1947 surf
->u
.gfx9
.dcc
.pipe_aligned
) ||
1948 /* Don't set is_displayable if displayable DCC is missing. */
1949 (info
->use_display_dcc_with_retile_blit
&&
1950 !surf
->u
.gfx9
.dcc_retile_num_elements
)))
1951 displayable
= false;
1953 surf
->is_displayable
= displayable
;
1955 /* Validate that we allocated a displayable surface if requested. */
1956 assert(!AddrSurfInfoIn
.flags
.display
|| surf
->is_displayable
);
1958 /* Validate that DCC is set up correctly. */
1959 if (surf
->num_dcc_levels
) {
1960 assert(is_dcc_supported_by_L2(info
, surf
));
1961 if (AddrSurfInfoIn
.flags
.color
)
1962 assert(is_dcc_supported_by_CB(info
, surf
->u
.gfx9
.surf
.swizzle_mode
));
1963 if (AddrSurfInfoIn
.flags
.display
) {
1964 assert(is_dcc_supported_by_DCN(info
, config
, surf
,
1965 surf
->u
.gfx9
.dcc
.rb_aligned
,
1966 surf
->u
.gfx9
.dcc
.pipe_aligned
));
1970 if (info
->has_graphics
&&
1973 config
->info
.levels
== 1 &&
1974 AddrSurfInfoIn
.flags
.color
&&
1976 surf
->surf_alignment
>= 64 * 1024 && /* 64KB tiling */
1977 !(surf
->flags
& (RADEON_SURF_DISABLE_DCC
|
1978 RADEON_SURF_FORCE_SWIZZLE_MODE
|
1979 RADEON_SURF_FORCE_MICRO_TILE_MODE
))) {
1980 /* Validate that DCC is enabled if DCN can do it. */
1981 if ((info
->use_display_dcc_unaligned
||
1982 info
->use_display_dcc_with_retile_blit
) &&
1983 AddrSurfInfoIn
.flags
.display
&&
1985 assert(surf
->num_dcc_levels
);
1988 /* Validate that non-scanout DCC is always enabled. */
1989 if (!AddrSurfInfoIn
.flags
.display
)
1990 assert(surf
->num_dcc_levels
);
1993 if (!surf
->htile_size
) {
1994 /* Unset this if HTILE is not present. */
1995 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
1998 switch (surf
->u
.gfx9
.surf
.swizzle_mode
) {
2000 case ADDR_SW_256B_S
:
2002 case ADDR_SW_64KB_S
:
2003 case ADDR_SW_64KB_S_T
:
2004 case ADDR_SW_4KB_S_X
:
2005 case ADDR_SW_64KB_S_X
:
2006 surf
->micro_tile_mode
= RADEON_MICRO_MODE_STANDARD
;
2010 case ADDR_SW_LINEAR
:
2011 case ADDR_SW_256B_D
:
2013 case ADDR_SW_64KB_D
:
2014 case ADDR_SW_64KB_D_T
:
2015 case ADDR_SW_4KB_D_X
:
2016 case ADDR_SW_64KB_D_X
:
2017 surf
->micro_tile_mode
= RADEON_MICRO_MODE_DISPLAY
;
2020 /* R = rotated (gfx9), render target (gfx10). */
2021 case ADDR_SW_256B_R
:
2023 case ADDR_SW_64KB_R
:
2024 case ADDR_SW_64KB_R_T
:
2025 case ADDR_SW_4KB_R_X
:
2026 case ADDR_SW_64KB_R_X
:
2027 case ADDR_SW_VAR_R_X
:
2028 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2029 * used at the same time. We currently do not use rotated
2032 assert(info
->chip_class
>= GFX10
||
2033 !"rotate micro tile mode is unsupported");
2034 surf
->micro_tile_mode
= RADEON_MICRO_MODE_RENDER
;
2039 case ADDR_SW_64KB_Z
:
2040 case ADDR_SW_64KB_Z_T
:
2041 case ADDR_SW_4KB_Z_X
:
2042 case ADDR_SW_64KB_Z_X
:
2043 case ADDR_SW_VAR_Z_X
:
2044 surf
->micro_tile_mode
= RADEON_MICRO_MODE_DEPTH
;
2054 free(surf
->u
.gfx9
.dcc_retile_map
);
2055 surf
->u
.gfx9
.dcc_retile_map
= NULL
;
2059 int ac_compute_surface(struct ac_addrlib
*addrlib
, const struct radeon_info
*info
,
2060 const struct ac_surf_config
*config
,
2061 enum radeon_surf_mode mode
,
2062 struct radeon_surf
*surf
)
2066 r
= surf_config_sanity(config
, surf
->flags
);
2070 if (info
->chip_class
>= GFX9
)
2071 r
= gfx9_compute_surface(addrlib
, info
, config
, mode
, surf
);
2073 r
= gfx6_compute_surface(addrlib
->handle
, info
, config
, mode
, surf
);
2078 /* Determine the memory layout of multiple allocations in one buffer. */
2079 surf
->total_size
= surf
->surf_size
;
2080 surf
->alignment
= surf
->surf_alignment
;
2082 if (surf
->htile_size
) {
2083 surf
->htile_offset
= align64(surf
->total_size
, surf
->htile_alignment
);
2084 surf
->total_size
= surf
->htile_offset
+ surf
->htile_size
;
2085 surf
->alignment
= MAX2(surf
->alignment
, surf
->htile_alignment
);
2088 if (surf
->fmask_size
) {
2089 assert(config
->info
.samples
>= 2);
2090 surf
->fmask_offset
= align64(surf
->total_size
, surf
->fmask_alignment
);
2091 surf
->total_size
= surf
->fmask_offset
+ surf
->fmask_size
;
2092 surf
->alignment
= MAX2(surf
->alignment
, surf
->fmask_alignment
);
2095 /* Single-sample CMASK is in a separate buffer. */
2096 if (surf
->cmask_size
&& config
->info
.samples
>= 2) {
2097 surf
->cmask_offset
= align64(surf
->total_size
, surf
->cmask_alignment
);
2098 surf
->total_size
= surf
->cmask_offset
+ surf
->cmask_size
;
2099 surf
->alignment
= MAX2(surf
->alignment
, surf
->cmask_alignment
);
2102 if (surf
->is_displayable
)
2103 surf
->flags
|= RADEON_SURF_SCANOUT
;
2105 if (surf
->dcc_size
&&
2106 /* dcc_size is computed on GFX9+ only if it's displayable. */
2107 (info
->chip_class
>= GFX9
|| !get_display_flag(config
, surf
))) {
2108 /* It's better when displayable DCC is immediately after
2109 * the image due to hw-specific reasons.
2111 if (info
->chip_class
>= GFX9
&&
2112 surf
->u
.gfx9
.dcc_retile_num_elements
) {
2113 /* Add space for the displayable DCC buffer. */
2114 surf
->display_dcc_offset
=
2115 align64(surf
->total_size
, surf
->u
.gfx9
.display_dcc_alignment
);
2116 surf
->total_size
= surf
->display_dcc_offset
+
2117 surf
->u
.gfx9
.display_dcc_size
;
2119 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
2120 surf
->dcc_retile_map_offset
=
2121 align64(surf
->total_size
, info
->tcc_cache_line_size
);
2123 if (surf
->u
.gfx9
.dcc_retile_use_uint16
) {
2124 surf
->total_size
= surf
->dcc_retile_map_offset
+
2125 surf
->u
.gfx9
.dcc_retile_num_elements
* 2;
2127 surf
->total_size
= surf
->dcc_retile_map_offset
+
2128 surf
->u
.gfx9
.dcc_retile_num_elements
* 4;
2132 surf
->dcc_offset
= align64(surf
->total_size
, surf
->dcc_alignment
);
2133 surf
->total_size
= surf
->dcc_offset
+ surf
->dcc_size
;
2134 surf
->alignment
= MAX2(surf
->alignment
, surf
->dcc_alignment
);
2140 /* This is meant to be used for disabling DCC. */
2141 void ac_surface_zero_dcc_fields(struct radeon_surf
*surf
)
2143 surf
->dcc_offset
= 0;
2144 surf
->display_dcc_offset
= 0;
2145 surf
->dcc_retile_map_offset
= 0;
2148 static unsigned eg_tile_split(unsigned tile_split
)
2150 switch (tile_split
) {
2151 case 0: tile_split
= 64; break;
2152 case 1: tile_split
= 128; break;
2153 case 2: tile_split
= 256; break;
2154 case 3: tile_split
= 512; break;
2156 case 4: tile_split
= 1024; break;
2157 case 5: tile_split
= 2048; break;
2158 case 6: tile_split
= 4096; break;
2163 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
2165 switch (eg_tile_split
) {
2171 case 1024: return 4;
2172 case 2048: return 5;
2173 case 4096: return 6;
2177 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2178 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2180 /* This should be called before ac_compute_surface. */
2181 void ac_surface_set_bo_metadata(const struct radeon_info
*info
,
2182 struct radeon_surf
*surf
, uint64_t tiling_flags
,
2183 enum radeon_surf_mode
*mode
)
2187 if (info
->chip_class
>= GFX9
) {
2188 surf
->u
.gfx9
.surf
.swizzle_mode
= AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
2189 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= AMDGPU_TILING_GET(tiling_flags
, DCC_INDEPENDENT_64B
);
2190 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= AMDGPU_TILING_GET(tiling_flags
, DCC_INDEPENDENT_128B
);
2191 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= AMDGPU_TILING_GET(tiling_flags
, DCC_MAX_COMPRESSED_BLOCK_SIZE
);
2192 surf
->u
.gfx9
.display_dcc_pitch_max
= AMDGPU_TILING_GET(tiling_flags
, DCC_PITCH_MAX
);
2193 scanout
= AMDGPU_TILING_GET(tiling_flags
, SCANOUT
);
2194 *mode
= surf
->u
.gfx9
.surf
.swizzle_mode
> 0 ? RADEON_SURF_MODE_2D
: RADEON_SURF_MODE_LINEAR_ALIGNED
;
2196 surf
->u
.legacy
.pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2197 surf
->u
.legacy
.bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2198 surf
->u
.legacy
.bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2199 surf
->u
.legacy
.tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
2200 surf
->u
.legacy
.mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2201 surf
->u
.legacy
.num_banks
= 2 << AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2202 scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
2204 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
2205 *mode
= RADEON_SURF_MODE_2D
;
2206 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
2207 *mode
= RADEON_SURF_MODE_1D
;
2209 *mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
2213 surf
->flags
|= RADEON_SURF_SCANOUT
;
2215 surf
->flags
&= ~RADEON_SURF_SCANOUT
;
2218 void ac_surface_get_bo_metadata(const struct radeon_info
*info
,
2219 struct radeon_surf
*surf
, uint64_t *tiling_flags
)
2223 if (info
->chip_class
>= GFX9
) {
2224 uint64_t dcc_offset
= 0;
2226 if (surf
->dcc_offset
) {
2227 dcc_offset
= surf
->display_dcc_offset
? surf
->display_dcc_offset
2229 assert((dcc_offset
>> 8) != 0 && (dcc_offset
>> 8) < (1 << 24));
2232 *tiling_flags
|= AMDGPU_TILING_SET(SWIZZLE_MODE
, surf
->u
.gfx9
.surf
.swizzle_mode
);
2233 *tiling_flags
|= AMDGPU_TILING_SET(DCC_OFFSET_256B
, dcc_offset
>> 8);
2234 *tiling_flags
|= AMDGPU_TILING_SET(DCC_PITCH_MAX
, surf
->u
.gfx9
.display_dcc_pitch_max
);
2235 *tiling_flags
|= AMDGPU_TILING_SET(DCC_INDEPENDENT_64B
, surf
->u
.gfx9
.dcc
.independent_64B_blocks
);
2236 *tiling_flags
|= AMDGPU_TILING_SET(DCC_INDEPENDENT_128B
, surf
->u
.gfx9
.dcc
.independent_128B_blocks
);
2237 *tiling_flags
|= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE
, surf
->u
.gfx9
.dcc
.max_compressed_block_size
);
2238 *tiling_flags
|= AMDGPU_TILING_SET(SCANOUT
, (surf
->flags
& RADEON_SURF_SCANOUT
) != 0);
2240 if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
)
2241 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
2242 else if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
)
2243 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
2245 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
2247 *tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, surf
->u
.legacy
.pipe_config
);
2248 *tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(surf
->u
.legacy
.bankw
));
2249 *tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(surf
->u
.legacy
.bankh
));
2250 if (surf
->u
.legacy
.tile_split
)
2251 *tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(surf
->u
.legacy
.tile_split
));
2252 *tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(surf
->u
.legacy
.mtilea
));
2253 *tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(surf
->u
.legacy
.num_banks
)-1);
2255 if (surf
->flags
& RADEON_SURF_SCANOUT
)
2256 *tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
2258 *tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
2262 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info
*info
)
2264 return (ATI_VENDOR_ID
<< 16) | info
->pci_id
;
2267 /* This should be called after ac_compute_surface. */
2268 bool ac_surface_set_umd_metadata(const struct radeon_info
*info
,
2269 struct radeon_surf
*surf
,
2270 unsigned num_storage_samples
,
2271 unsigned num_mipmap_levels
,
2272 unsigned size_metadata
,
2273 uint32_t metadata
[64])
2275 uint32_t *desc
= &metadata
[2];
2278 if (info
->chip_class
>= GFX9
)
2279 offset
= surf
->u
.gfx9
.surf_offset
;
2281 offset
= surf
->u
.legacy
.level
[0].offset
;
2283 if (offset
|| /* Non-zero planes ignore metadata. */
2284 size_metadata
< 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2285 metadata
[0] == 0 || /* invalid version number */
2286 metadata
[1] != ac_get_umd_metadata_word1(info
)) /* invalid PCI ID */ {
2287 /* Disable DCC because it might not be enabled. */
2288 ac_surface_zero_dcc_fields(surf
);
2290 /* Don't report an error if the texture comes from an incompatible driver,
2291 * but this might not work.
2296 /* Validate that sample counts and the number of mipmap levels match. */
2297 unsigned desc_last_level
= G_008F1C_LAST_LEVEL(desc
[3]);
2298 unsigned type
= G_008F1C_TYPE(desc
[3]);
2300 if (type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA
|| type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
) {
2301 unsigned log_samples
= util_logbase2(MAX2(1, num_storage_samples
));
2303 if (desc_last_level
!= log_samples
) {
2305 "amdgpu: invalid MSAA texture import, "
2306 "metadata has log2(samples) = %u, the caller set %u\n",
2307 desc_last_level
, log_samples
);
2311 if (desc_last_level
!= num_mipmap_levels
- 1) {
2313 "amdgpu: invalid mipmapped texture import, "
2314 "metadata has last_level = %u, the caller set %u\n",
2315 desc_last_level
, num_mipmap_levels
- 1);
2320 if (info
->chip_class
>= GFX8
&& G_008F28_COMPRESSION_EN(desc
[6])) {
2321 /* Read DCC information. */
2322 switch (info
->chip_class
) {
2324 surf
->dcc_offset
= (uint64_t)desc
[7] << 8;
2329 ((uint64_t)desc
[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc
[5]) << 40);
2330 surf
->u
.gfx9
.dcc
.pipe_aligned
= G_008F24_META_PIPE_ALIGNED(desc
[5]);
2331 surf
->u
.gfx9
.dcc
.rb_aligned
= G_008F24_META_RB_ALIGNED(desc
[5]);
2333 /* If DCC is unaligned, this can only be a displayable image. */
2334 if (!surf
->u
.gfx9
.dcc
.pipe_aligned
&& !surf
->u
.gfx9
.dcc
.rb_aligned
)
2335 assert(surf
->is_displayable
);
2341 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc
[6]) << 8) | ((uint64_t)desc
[7] << 16);
2342 surf
->u
.gfx9
.dcc
.pipe_aligned
= G_00A018_META_PIPE_ALIGNED(desc
[6]);
2350 /* Disable DCC. dcc_offset is always set by texture_from_handle
2351 * and must be cleared here.
2353 ac_surface_zero_dcc_fields(surf
);
2359 void ac_surface_get_umd_metadata(const struct radeon_info
*info
,
2360 struct radeon_surf
*surf
,
2361 unsigned num_mipmap_levels
,
2363 unsigned *size_metadata
, uint32_t metadata
[64])
2365 /* Clear the base address and set the relative DCC offset. */
2367 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
2369 switch (info
->chip_class
) {
2374 desc
[7] = surf
->dcc_offset
>> 8;
2377 desc
[7] = surf
->dcc_offset
>> 8;
2378 desc
[5] &= C_008F24_META_DATA_ADDRESS
;
2379 desc
[5] |= S_008F24_META_DATA_ADDRESS(surf
->dcc_offset
>> 40);
2383 desc
[6] &= C_00A018_META_DATA_ADDRESS_LO
;
2384 desc
[6] |= S_00A018_META_DATA_ADDRESS_LO(surf
->dcc_offset
>> 8);
2385 desc
[7] = surf
->dcc_offset
>> 16;
2391 /* Metadata image format format version 1:
2392 * [0] = 1 (metadata format identifier)
2393 * [1] = (VENDOR_ID << 16) | PCI_ID
2394 * [2:9] = image descriptor for the whole resource
2395 * [2] is always 0, because the base address is cleared
2396 * [9] is the DCC offset bits [39:8] from the beginning of
2398 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2401 metadata
[0] = 1; /* metadata image format version 1 */
2403 /* Tiling modes are ambiguous without a PCI ID. */
2404 metadata
[1] = ac_get_umd_metadata_word1(info
);
2406 /* Dwords [2:9] contain the image descriptor. */
2407 memcpy(&metadata
[2], desc
, 8 * 4);
2408 *size_metadata
= 10 * 4;
2410 /* Dwords [10:..] contain the mipmap level offsets. */
2411 if (info
->chip_class
<= GFX8
) {
2412 for (unsigned i
= 0; i
< num_mipmap_levels
; i
++)
2413 metadata
[10 + i
] = surf
->u
.legacy
.level
[i
].offset
>> 8;
2415 *size_metadata
+= num_mipmap_levels
* 4;
2419 void ac_surface_override_offset_stride(const struct radeon_info
*info
,
2420 struct radeon_surf
*surf
,
2421 unsigned num_mipmap_levels
,
2422 uint64_t offset
, unsigned pitch
)
2424 if (info
->chip_class
>= GFX9
) {
2426 surf
->u
.gfx9
.surf_pitch
= pitch
;
2427 if (num_mipmap_levels
== 1)
2428 surf
->u
.gfx9
.surf
.epitch
= pitch
- 1;
2429 surf
->u
.gfx9
.surf_slice_size
=
2430 (uint64_t)pitch
* surf
->u
.gfx9
.surf_height
* surf
->bpe
;
2432 surf
->u
.gfx9
.surf_offset
= offset
;
2433 if (surf
->u
.gfx9
.stencil_offset
)
2434 surf
->u
.gfx9
.stencil_offset
+= offset
;
2437 surf
->u
.legacy
.level
[0].nblk_x
= pitch
;
2438 surf
->u
.legacy
.level
[0].slice_size_dw
=
2439 ((uint64_t)pitch
* surf
->u
.legacy
.level
[0].nblk_y
* surf
->bpe
) / 4;
2443 for (unsigned i
= 0; i
< ARRAY_SIZE(surf
->u
.legacy
.level
); ++i
)
2444 surf
->u
.legacy
.level
[i
].offset
+= offset
;
2448 if (surf
->htile_offset
)
2449 surf
->htile_offset
+= offset
;
2450 if (surf
->fmask_offset
)
2451 surf
->fmask_offset
+= offset
;
2452 if (surf
->cmask_offset
)
2453 surf
->cmask_offset
+= offset
;
2454 if (surf
->dcc_offset
)
2455 surf
->dcc_offset
+= offset
;
2456 if (surf
->display_dcc_offset
)
2457 surf
->display_dcc_offset
+= offset
;
2458 if (surf
->dcc_retile_map_offset
)
2459 surf
->dcc_retile_map_offset
+= offset
;