2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
28 #include "ac_surface.h"
29 #include "amd_family.h"
30 #include "addrlib/src/amdgpu_asic_addr.h"
31 #include "ac_gpu_info.h"
32 #include "util/macros.h"
33 #include "util/u_atomic.h"
34 #include "util/u_math.h"
41 #include "drm-uapi/amdgpu_drm.h"
43 #include "addrlib/inc/addrinterface.h"
45 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
46 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
49 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
50 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
53 static void *ADDR_API
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT
* pInput
)
55 return malloc(pInput
->sizeInBytes
);
58 static ADDR_E_RETURNCODE ADDR_API
freeSysMem(const ADDR_FREESYSMEM_INPUT
* pInput
)
60 free(pInput
->pVirtAddr
);
64 ADDR_HANDLE
amdgpu_addr_create(const struct radeon_info
*info
,
65 const struct amdgpu_gpu_info
*amdinfo
,
66 uint64_t *max_alignment
)
68 ADDR_CREATE_INPUT addrCreateInput
= {0};
69 ADDR_CREATE_OUTPUT addrCreateOutput
= {0};
70 ADDR_REGISTER_VALUE regValue
= {0};
71 ADDR_CREATE_FLAGS createFlags
= {{0}};
72 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput
= {0};
73 ADDR_E_RETURNCODE addrRet
;
75 addrCreateInput
.size
= sizeof(ADDR_CREATE_INPUT
);
76 addrCreateOutput
.size
= sizeof(ADDR_CREATE_OUTPUT
);
78 regValue
.gbAddrConfig
= amdinfo
->gb_addr_cfg
;
79 createFlags
.value
= 0;
81 addrCreateInput
.chipFamily
= info
->family_id
;
82 addrCreateInput
.chipRevision
= info
->chip_external_rev
;
84 if (addrCreateInput
.chipFamily
== FAMILY_UNKNOWN
)
87 if (addrCreateInput
.chipFamily
>= FAMILY_AI
) {
88 addrCreateInput
.chipEngine
= CIASICIDGFXENGINE_ARCTICISLAND
;
90 regValue
.noOfBanks
= amdinfo
->mc_arb_ramcfg
& 0x3;
91 regValue
.noOfRanks
= (amdinfo
->mc_arb_ramcfg
& 0x4) >> 2;
93 regValue
.backendDisables
= amdinfo
->enabled_rb_pipes_mask
;
94 regValue
.pTileConfig
= amdinfo
->gb_tile_mode
;
95 regValue
.noOfEntries
= ARRAY_SIZE(amdinfo
->gb_tile_mode
);
96 if (addrCreateInput
.chipFamily
== FAMILY_SI
) {
97 regValue
.pMacroTileConfig
= NULL
;
98 regValue
.noOfMacroEntries
= 0;
100 regValue
.pMacroTileConfig
= amdinfo
->gb_macro_tile_mode
;
101 regValue
.noOfMacroEntries
= ARRAY_SIZE(amdinfo
->gb_macro_tile_mode
);
104 createFlags
.useTileIndex
= 1;
105 createFlags
.useHtileSliceAlign
= 1;
107 addrCreateInput
.chipEngine
= CIASICIDGFXENGINE_SOUTHERNISLAND
;
110 addrCreateInput
.callbacks
.allocSysMem
= allocSysMem
;
111 addrCreateInput
.callbacks
.freeSysMem
= freeSysMem
;
112 addrCreateInput
.callbacks
.debugPrint
= 0;
113 addrCreateInput
.createFlags
= createFlags
;
114 addrCreateInput
.regValue
= regValue
;
116 addrRet
= AddrCreate(&addrCreateInput
, &addrCreateOutput
);
117 if (addrRet
!= ADDR_OK
)
121 addrRet
= AddrGetMaxAlignments(addrCreateOutput
.hLib
, &addrGetMaxAlignmentsOutput
);
122 if (addrRet
== ADDR_OK
){
123 *max_alignment
= addrGetMaxAlignmentsOutput
.baseAlign
;
126 return addrCreateOutput
.hLib
;
129 static int surf_config_sanity(const struct ac_surf_config
*config
,
132 /* FMASK is allocated together with the color surface and can't be
133 * allocated separately.
135 assert(!(flags
& RADEON_SURF_FMASK
));
136 if (flags
& RADEON_SURF_FMASK
)
139 /* all dimension must be at least 1 ! */
140 if (!config
->info
.width
|| !config
->info
.height
|| !config
->info
.depth
||
141 !config
->info
.array_size
|| !config
->info
.levels
)
144 switch (config
->info
.samples
) {
152 if (flags
& RADEON_SURF_Z_OR_SBUFFER
)
159 if (!(flags
& RADEON_SURF_Z_OR_SBUFFER
)) {
160 switch (config
->info
.storage_samples
) {
172 if (config
->is_3d
&& config
->info
.array_size
> 1)
174 if (config
->is_cube
&& config
->info
.depth
> 1)
180 static int gfx6_compute_level(ADDR_HANDLE addrlib
,
181 const struct ac_surf_config
*config
,
182 struct radeon_surf
*surf
, bool is_stencil
,
183 unsigned level
, bool compressed
,
184 ADDR_COMPUTE_SURFACE_INFO_INPUT
*AddrSurfInfoIn
,
185 ADDR_COMPUTE_SURFACE_INFO_OUTPUT
*AddrSurfInfoOut
,
186 ADDR_COMPUTE_DCCINFO_INPUT
*AddrDccIn
,
187 ADDR_COMPUTE_DCCINFO_OUTPUT
*AddrDccOut
,
188 ADDR_COMPUTE_HTILE_INFO_INPUT
*AddrHtileIn
,
189 ADDR_COMPUTE_HTILE_INFO_OUTPUT
*AddrHtileOut
)
191 struct legacy_surf_level
*surf_level
;
192 ADDR_E_RETURNCODE ret
;
194 AddrSurfInfoIn
->mipLevel
= level
;
195 AddrSurfInfoIn
->width
= u_minify(config
->info
.width
, level
);
196 AddrSurfInfoIn
->height
= u_minify(config
->info
.height
, level
);
198 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
199 * because GFX9 needs linear alignment of 256 bytes.
201 if (config
->info
.levels
== 1 &&
202 AddrSurfInfoIn
->tileMode
== ADDR_TM_LINEAR_ALIGNED
&&
203 AddrSurfInfoIn
->bpp
&&
204 util_is_power_of_two_or_zero(AddrSurfInfoIn
->bpp
)) {
205 unsigned alignment
= 256 / (AddrSurfInfoIn
->bpp
/ 8);
207 AddrSurfInfoIn
->width
= align(AddrSurfInfoIn
->width
, alignment
);
210 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
211 * true for r32g32b32 formats. */
212 if (AddrSurfInfoIn
->bpp
== 96) {
213 assert(config
->info
.levels
== 1);
214 assert(AddrSurfInfoIn
->tileMode
== ADDR_TM_LINEAR_ALIGNED
);
216 /* The least common multiple of 64 bytes and 12 bytes/pixel is
217 * 192 bytes, or 16 pixels. */
218 AddrSurfInfoIn
->width
= align(AddrSurfInfoIn
->width
, 16);
222 AddrSurfInfoIn
->numSlices
= u_minify(config
->info
.depth
, level
);
223 else if (config
->is_cube
)
224 AddrSurfInfoIn
->numSlices
= 6;
226 AddrSurfInfoIn
->numSlices
= config
->info
.array_size
;
229 /* Set the base level pitch. This is needed for calculation
230 * of non-zero levels. */
232 AddrSurfInfoIn
->basePitch
= surf
->u
.legacy
.stencil_level
[0].nblk_x
;
234 AddrSurfInfoIn
->basePitch
= surf
->u
.legacy
.level
[0].nblk_x
;
236 /* Convert blocks to pixels for compressed formats. */
238 AddrSurfInfoIn
->basePitch
*= surf
->blk_w
;
241 ret
= AddrComputeSurfaceInfo(addrlib
,
244 if (ret
!= ADDR_OK
) {
248 surf_level
= is_stencil
? &surf
->u
.legacy
.stencil_level
[level
] : &surf
->u
.legacy
.level
[level
];
249 surf_level
->offset
= align64(surf
->surf_size
, AddrSurfInfoOut
->baseAlign
);
250 surf_level
->slice_size_dw
= AddrSurfInfoOut
->sliceSize
/ 4;
251 surf_level
->nblk_x
= AddrSurfInfoOut
->pitch
;
252 surf_level
->nblk_y
= AddrSurfInfoOut
->height
;
254 switch (AddrSurfInfoOut
->tileMode
) {
255 case ADDR_TM_LINEAR_ALIGNED
:
256 surf_level
->mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
258 case ADDR_TM_1D_TILED_THIN1
:
259 surf_level
->mode
= RADEON_SURF_MODE_1D
;
261 case ADDR_TM_2D_TILED_THIN1
:
262 surf_level
->mode
= RADEON_SURF_MODE_2D
;
269 surf
->u
.legacy
.stencil_tiling_index
[level
] = AddrSurfInfoOut
->tileIndex
;
271 surf
->u
.legacy
.tiling_index
[level
] = AddrSurfInfoOut
->tileIndex
;
273 surf
->surf_size
= surf_level
->offset
+ AddrSurfInfoOut
->surfSize
;
275 /* Clear DCC fields at the beginning. */
276 surf_level
->dcc_offset
= 0;
278 /* The previous level's flag tells us if we can use DCC for this level. */
279 if (AddrSurfInfoIn
->flags
.dccCompatible
&&
280 (level
== 0 || AddrDccOut
->subLvlCompressible
)) {
281 bool prev_level_clearable
= level
== 0 ||
282 AddrDccOut
->dccRamSizeAligned
;
284 AddrDccIn
->colorSurfSize
= AddrSurfInfoOut
->surfSize
;
285 AddrDccIn
->tileMode
= AddrSurfInfoOut
->tileMode
;
286 AddrDccIn
->tileInfo
= *AddrSurfInfoOut
->pTileInfo
;
287 AddrDccIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
288 AddrDccIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
290 ret
= AddrComputeDccInfo(addrlib
,
294 if (ret
== ADDR_OK
) {
295 surf_level
->dcc_offset
= surf
->dcc_size
;
296 surf
->num_dcc_levels
= level
+ 1;
297 surf
->dcc_size
= surf_level
->dcc_offset
+ AddrDccOut
->dccRamSize
;
298 surf
->dcc_alignment
= MAX2(surf
->dcc_alignment
, AddrDccOut
->dccRamBaseAlign
);
300 /* If the DCC size of a subresource (1 mip level or 1 slice)
301 * is not aligned, the DCC memory layout is not contiguous for
302 * that subresource, which means we can't use fast clear.
304 * We only do fast clears for whole mipmap levels. If we did
305 * per-slice fast clears, the same restriction would apply.
306 * (i.e. only compute the slice size and see if it's aligned)
308 * The last level can be non-contiguous and still be clearable
309 * if it's interleaved with the next level that doesn't exist.
311 if (AddrDccOut
->dccRamSizeAligned
||
312 (prev_level_clearable
&& level
== config
->info
.levels
- 1))
313 surf_level
->dcc_fast_clear_size
= AddrDccOut
->dccFastClearSize
;
315 surf_level
->dcc_fast_clear_size
= 0;
317 /* Compute the DCC slice size because addrlib doesn't
318 * provide this info. As DCC memory is linear (each
319 * slice is the same size) it's easy to compute.
321 surf
->dcc_slice_size
= AddrDccOut
->dccRamSize
/ config
->info
.array_size
;
323 /* For arrays, we have to compute the DCC info again
324 * with one slice size to get a correct fast clear
327 if (config
->info
.array_size
> 1) {
328 AddrDccIn
->colorSurfSize
= AddrSurfInfoOut
->sliceSize
;
329 AddrDccIn
->tileMode
= AddrSurfInfoOut
->tileMode
;
330 AddrDccIn
->tileInfo
= *AddrSurfInfoOut
->pTileInfo
;
331 AddrDccIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
332 AddrDccIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
334 ret
= AddrComputeDccInfo(addrlib
,
335 AddrDccIn
, AddrDccOut
);
336 if (ret
== ADDR_OK
) {
337 /* If the DCC memory isn't properly
338 * aligned, the data are interleaved
341 if (AddrDccOut
->dccRamSizeAligned
)
342 surf_level
->dcc_slice_fast_clear_size
= AddrDccOut
->dccFastClearSize
;
344 surf_level
->dcc_slice_fast_clear_size
= 0;
347 if (surf
->flags
& RADEON_SURF_CONTIGUOUS_DCC_LAYERS
&&
348 surf
->dcc_slice_size
!= surf_level
->dcc_slice_fast_clear_size
) {
350 surf
->num_dcc_levels
= 0;
351 AddrDccOut
->subLvlCompressible
= false;
354 surf_level
->dcc_slice_fast_clear_size
= surf_level
->dcc_fast_clear_size
;
361 AddrSurfInfoIn
->flags
.depth
&&
362 surf_level
->mode
== RADEON_SURF_MODE_2D
&&
364 !(surf
->flags
& RADEON_SURF_NO_HTILE
)) {
365 AddrHtileIn
->flags
.tcCompatible
= AddrSurfInfoOut
->tcCompatible
;
366 AddrHtileIn
->pitch
= AddrSurfInfoOut
->pitch
;
367 AddrHtileIn
->height
= AddrSurfInfoOut
->height
;
368 AddrHtileIn
->numSlices
= AddrSurfInfoOut
->depth
;
369 AddrHtileIn
->blockWidth
= ADDR_HTILE_BLOCKSIZE_8
;
370 AddrHtileIn
->blockHeight
= ADDR_HTILE_BLOCKSIZE_8
;
371 AddrHtileIn
->pTileInfo
= AddrSurfInfoOut
->pTileInfo
;
372 AddrHtileIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
373 AddrHtileIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
375 ret
= AddrComputeHtileInfo(addrlib
,
379 if (ret
== ADDR_OK
) {
380 surf
->htile_size
= AddrHtileOut
->htileBytes
;
381 surf
->htile_slice_size
= AddrHtileOut
->sliceSize
;
382 surf
->htile_alignment
= AddrHtileOut
->baseAlign
;
389 static void gfx6_set_micro_tile_mode(struct radeon_surf
*surf
,
390 const struct radeon_info
*info
)
392 uint32_t tile_mode
= info
->si_tile_mode_array
[surf
->u
.legacy
.tiling_index
[0]];
394 if (info
->chip_class
>= GFX7
)
395 surf
->micro_tile_mode
= G_009910_MICRO_TILE_MODE_NEW(tile_mode
);
397 surf
->micro_tile_mode
= G_009910_MICRO_TILE_MODE(tile_mode
);
400 static unsigned cik_get_macro_tile_index(struct radeon_surf
*surf
)
402 unsigned index
, tileb
;
404 tileb
= 8 * 8 * surf
->bpe
;
405 tileb
= MIN2(surf
->u
.legacy
.tile_split
, tileb
);
407 for (index
= 0; tileb
> 64; index
++)
414 static bool get_display_flag(const struct ac_surf_config
*config
,
415 const struct radeon_surf
*surf
)
417 unsigned num_channels
= config
->info
.num_channels
;
418 unsigned bpe
= surf
->bpe
;
420 if (!config
->is_3d
&&
422 !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
) &&
423 surf
->flags
& RADEON_SURF_SCANOUT
&&
424 config
->info
.samples
<= 1 &&
425 surf
->blk_w
<= 2 && surf
->blk_h
== 1) {
427 if (surf
->blk_w
== 2 && surf
->blk_h
== 1)
430 if (/* RGBA8 or RGBA16F */
431 (bpe
>= 4 && bpe
<= 8 && num_channels
== 4) ||
432 /* R5G6B5 or R5G5B5A1 */
433 (bpe
== 2 && num_channels
>= 3) ||
435 (bpe
== 1 && num_channels
== 1))
442 * This must be called after the first level is computed.
444 * Copy surface-global settings like pipe/bank config from level 0 surface
445 * computation, and compute tile swizzle.
447 static int gfx6_surface_settings(ADDR_HANDLE addrlib
,
448 const struct radeon_info
*info
,
449 const struct ac_surf_config
*config
,
450 ADDR_COMPUTE_SURFACE_INFO_OUTPUT
* csio
,
451 struct radeon_surf
*surf
)
453 surf
->surf_alignment
= csio
->baseAlign
;
454 surf
->u
.legacy
.pipe_config
= csio
->pTileInfo
->pipeConfig
- 1;
455 gfx6_set_micro_tile_mode(surf
, info
);
457 /* For 2D modes only. */
458 if (csio
->tileMode
>= ADDR_TM_2D_TILED_THIN1
) {
459 surf
->u
.legacy
.bankw
= csio
->pTileInfo
->bankWidth
;
460 surf
->u
.legacy
.bankh
= csio
->pTileInfo
->bankHeight
;
461 surf
->u
.legacy
.mtilea
= csio
->pTileInfo
->macroAspectRatio
;
462 surf
->u
.legacy
.tile_split
= csio
->pTileInfo
->tileSplitBytes
;
463 surf
->u
.legacy
.num_banks
= csio
->pTileInfo
->banks
;
464 surf
->u
.legacy
.macro_tile_index
= csio
->macroModeIndex
;
466 surf
->u
.legacy
.macro_tile_index
= 0;
469 /* Compute tile swizzle. */
470 /* TODO: fix tile swizzle with mipmapping for GFX6 */
471 if ((info
->chip_class
>= GFX7
|| config
->info
.levels
== 1) &&
472 config
->info
.surf_index
&&
473 surf
->u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
&&
474 !(surf
->flags
& (RADEON_SURF_Z_OR_SBUFFER
| RADEON_SURF_SHAREABLE
)) &&
475 !get_display_flag(config
, surf
)) {
476 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn
= {0};
477 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut
= {0};
479 AddrBaseSwizzleIn
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT
);
480 AddrBaseSwizzleOut
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT
);
482 AddrBaseSwizzleIn
.surfIndex
= p_atomic_inc_return(config
->info
.surf_index
) - 1;
483 AddrBaseSwizzleIn
.tileIndex
= csio
->tileIndex
;
484 AddrBaseSwizzleIn
.macroModeIndex
= csio
->macroModeIndex
;
485 AddrBaseSwizzleIn
.pTileInfo
= csio
->pTileInfo
;
486 AddrBaseSwizzleIn
.tileMode
= csio
->tileMode
;
488 int r
= AddrComputeBaseSwizzle(addrlib
, &AddrBaseSwizzleIn
,
489 &AddrBaseSwizzleOut
);
493 assert(AddrBaseSwizzleOut
.tileSwizzle
<=
494 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
495 surf
->tile_swizzle
= AddrBaseSwizzleOut
.tileSwizzle
;
500 static void ac_compute_cmask(const struct radeon_info
*info
,
501 const struct ac_surf_config
*config
,
502 struct radeon_surf
*surf
)
504 unsigned pipe_interleave_bytes
= info
->pipe_interleave_bytes
;
505 unsigned num_pipes
= info
->num_tile_pipes
;
506 unsigned cl_width
, cl_height
;
508 if (surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
|| surf
->is_linear
||
509 (config
->info
.samples
>= 2 && !surf
->fmask_size
))
512 assert(info
->chip_class
<= GFX8
);
527 case 16: /* Hawaii */
536 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
538 unsigned width
= align(surf
->u
.legacy
.level
[0].nblk_x
, cl_width
*8);
539 unsigned height
= align(surf
->u
.legacy
.level
[0].nblk_y
, cl_height
*8);
540 unsigned slice_elements
= (width
* height
) / (8*8);
542 /* Each element of CMASK is a nibble. */
543 unsigned slice_bytes
= slice_elements
/ 2;
545 surf
->u
.legacy
.cmask_slice_tile_max
= (width
* height
) / (128*128);
546 if (surf
->u
.legacy
.cmask_slice_tile_max
)
547 surf
->u
.legacy
.cmask_slice_tile_max
-= 1;
551 num_layers
= config
->info
.depth
;
552 else if (config
->is_cube
)
555 num_layers
= config
->info
.array_size
;
557 surf
->cmask_alignment
= MAX2(256, base_align
);
558 surf
->cmask_slice_size
= align(slice_bytes
, base_align
);
559 surf
->cmask_size
= surf
->cmask_slice_size
* num_layers
;
563 * Fill in the tiling information in \p surf based on the given surface config.
565 * The following fields of \p surf must be initialized by the caller:
566 * blk_w, blk_h, bpe, flags.
568 static int gfx6_compute_surface(ADDR_HANDLE addrlib
,
569 const struct radeon_info
*info
,
570 const struct ac_surf_config
*config
,
571 enum radeon_surf_mode mode
,
572 struct radeon_surf
*surf
)
576 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn
= {0};
577 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut
= {0};
578 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn
= {0};
579 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut
= {0};
580 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn
= {0};
581 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut
= {0};
582 ADDR_TILEINFO AddrTileInfoIn
= {0};
583 ADDR_TILEINFO AddrTileInfoOut
= {0};
586 AddrSurfInfoIn
.size
= sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT
);
587 AddrSurfInfoOut
.size
= sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT
);
588 AddrDccIn
.size
= sizeof(ADDR_COMPUTE_DCCINFO_INPUT
);
589 AddrDccOut
.size
= sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT
);
590 AddrHtileIn
.size
= sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT
);
591 AddrHtileOut
.size
= sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT
);
592 AddrSurfInfoOut
.pTileInfo
= &AddrTileInfoOut
;
594 compressed
= surf
->blk_w
== 4 && surf
->blk_h
== 4;
596 /* MSAA requires 2D tiling. */
597 if (config
->info
.samples
> 1)
598 mode
= RADEON_SURF_MODE_2D
;
600 /* DB doesn't support linear layouts. */
601 if (surf
->flags
& (RADEON_SURF_Z_OR_SBUFFER
) &&
602 mode
< RADEON_SURF_MODE_1D
)
603 mode
= RADEON_SURF_MODE_1D
;
605 /* Set the requested tiling mode. */
607 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
608 AddrSurfInfoIn
.tileMode
= ADDR_TM_LINEAR_ALIGNED
;
610 case RADEON_SURF_MODE_1D
:
611 AddrSurfInfoIn
.tileMode
= ADDR_TM_1D_TILED_THIN1
;
613 case RADEON_SURF_MODE_2D
:
614 AddrSurfInfoIn
.tileMode
= ADDR_TM_2D_TILED_THIN1
;
620 /* The format must be set correctly for the allocation of compressed
621 * textures to work. In other cases, setting the bpp is sufficient.
626 AddrSurfInfoIn
.format
= ADDR_FMT_BC1
;
629 AddrSurfInfoIn
.format
= ADDR_FMT_BC3
;
636 AddrDccIn
.bpp
= AddrSurfInfoIn
.bpp
= surf
->bpe
* 8;
639 AddrDccIn
.numSamples
= AddrSurfInfoIn
.numSamples
=
640 MAX2(1, config
->info
.samples
);
641 AddrSurfInfoIn
.tileIndex
= -1;
643 if (!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
)) {
644 AddrDccIn
.numSamples
= AddrSurfInfoIn
.numFrags
=
645 MAX2(1, config
->info
.storage_samples
);
648 /* Set the micro tile type. */
649 if (surf
->flags
& RADEON_SURF_SCANOUT
)
650 AddrSurfInfoIn
.tileType
= ADDR_DISPLAYABLE
;
651 else if (surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
)
652 AddrSurfInfoIn
.tileType
= ADDR_DEPTH_SAMPLE_ORDER
;
654 AddrSurfInfoIn
.tileType
= ADDR_NON_DISPLAYABLE
;
656 AddrSurfInfoIn
.flags
.color
= !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
657 AddrSurfInfoIn
.flags
.depth
= (surf
->flags
& RADEON_SURF_ZBUFFER
) != 0;
658 AddrSurfInfoIn
.flags
.cube
= config
->is_cube
;
659 AddrSurfInfoIn
.flags
.display
= get_display_flag(config
, surf
);
660 AddrSurfInfoIn
.flags
.pow2Pad
= config
->info
.levels
> 1;
661 AddrSurfInfoIn
.flags
.tcCompatible
= (surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
) != 0;
663 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
664 * requested, because TC-compatible HTILE requires 2D tiling.
666 AddrSurfInfoIn
.flags
.opt4Space
= !AddrSurfInfoIn
.flags
.tcCompatible
&&
667 !AddrSurfInfoIn
.flags
.fmask
&&
668 config
->info
.samples
<= 1 &&
669 !(surf
->flags
& RADEON_SURF_FORCE_SWIZZLE_MODE
);
672 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
674 * - Mipmapped array textures have low performance (discovered by a closed
677 AddrSurfInfoIn
.flags
.dccCompatible
=
678 info
->chip_class
>= GFX8
&&
679 info
->has_graphics
&& /* disable DCC on compute-only chips */
680 !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
) &&
681 !(surf
->flags
& RADEON_SURF_DISABLE_DCC
) &&
683 ((config
->info
.array_size
== 1 && config
->info
.depth
== 1) ||
684 config
->info
.levels
== 1);
686 AddrSurfInfoIn
.flags
.noStencil
= (surf
->flags
& RADEON_SURF_SBUFFER
) == 0;
687 AddrSurfInfoIn
.flags
.compressZ
= !!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
689 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
690 * for Z and stencil. This can cause a number of problems which we work
693 * - a depth part that is incompatible with mipmapped texturing
694 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
695 * incorrect tiling applied to the stencil part, stencil buffer
696 * memory accesses that go out of bounds) even without mipmapping
698 * Some piglit tests that are prone to different types of related
700 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
701 * ./bin/framebuffer-blit-levels {draw,read} stencil
702 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
703 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
704 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
706 int stencil_tile_idx
= -1;
708 if (AddrSurfInfoIn
.flags
.depth
&& !AddrSurfInfoIn
.flags
.noStencil
&&
709 (config
->info
.levels
> 1 || info
->family
== CHIP_STONEY
)) {
710 /* Compute stencilTileIdx that is compatible with the (depth)
711 * tileIdx. This degrades the depth surface if necessary to
712 * ensure that a matching stencilTileIdx exists. */
713 AddrSurfInfoIn
.flags
.matchStencilTileCfg
= 1;
715 /* Keep the depth mip-tail compatible with texturing. */
716 AddrSurfInfoIn
.flags
.noStencil
= 1;
719 /* Set preferred macrotile parameters. This is usually required
720 * for shared resources. This is for 2D tiling only. */
721 if (AddrSurfInfoIn
.tileMode
>= ADDR_TM_2D_TILED_THIN1
&&
722 surf
->u
.legacy
.bankw
&& surf
->u
.legacy
.bankh
&&
723 surf
->u
.legacy
.mtilea
&& surf
->u
.legacy
.tile_split
) {
724 /* If any of these parameters are incorrect, the calculation
726 AddrTileInfoIn
.banks
= surf
->u
.legacy
.num_banks
;
727 AddrTileInfoIn
.bankWidth
= surf
->u
.legacy
.bankw
;
728 AddrTileInfoIn
.bankHeight
= surf
->u
.legacy
.bankh
;
729 AddrTileInfoIn
.macroAspectRatio
= surf
->u
.legacy
.mtilea
;
730 AddrTileInfoIn
.tileSplitBytes
= surf
->u
.legacy
.tile_split
;
731 AddrTileInfoIn
.pipeConfig
= surf
->u
.legacy
.pipe_config
+ 1; /* +1 compared to GB_TILE_MODE */
732 AddrSurfInfoIn
.flags
.opt4Space
= 0;
733 AddrSurfInfoIn
.pTileInfo
= &AddrTileInfoIn
;
735 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
736 * the tile index, because we are expected to know it if
737 * we know the other parameters.
739 * This is something that can easily be fixed in Addrlib.
740 * For now, just figure it out here.
741 * Note that only 2D_TILE_THIN1 is handled here.
743 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
744 assert(AddrSurfInfoIn
.tileMode
== ADDR_TM_2D_TILED_THIN1
);
746 if (info
->chip_class
== GFX6
) {
747 if (AddrSurfInfoIn
.tileType
== ADDR_DISPLAYABLE
) {
749 AddrSurfInfoIn
.tileIndex
= 11; /* 16bpp */
751 AddrSurfInfoIn
.tileIndex
= 12; /* 32bpp */
754 AddrSurfInfoIn
.tileIndex
= 14; /* 8bpp */
755 else if (surf
->bpe
== 2)
756 AddrSurfInfoIn
.tileIndex
= 15; /* 16bpp */
757 else if (surf
->bpe
== 4)
758 AddrSurfInfoIn
.tileIndex
= 16; /* 32bpp */
760 AddrSurfInfoIn
.tileIndex
= 17; /* 64bpp (and 128bpp) */
764 if (AddrSurfInfoIn
.tileType
== ADDR_DISPLAYABLE
)
765 AddrSurfInfoIn
.tileIndex
= 10; /* 2D displayable */
767 AddrSurfInfoIn
.tileIndex
= 14; /* 2D non-displayable */
769 /* Addrlib doesn't set this if tileIndex is forced like above. */
770 AddrSurfInfoOut
.macroModeIndex
= cik_get_macro_tile_index(surf
);
774 surf
->has_stencil
= !!(surf
->flags
& RADEON_SURF_SBUFFER
);
775 surf
->num_dcc_levels
= 0;
778 surf
->dcc_alignment
= 1;
779 surf
->htile_size
= 0;
780 surf
->htile_slice_size
= 0;
781 surf
->htile_alignment
= 1;
783 const bool only_stencil
= (surf
->flags
& RADEON_SURF_SBUFFER
) &&
784 !(surf
->flags
& RADEON_SURF_ZBUFFER
);
786 /* Calculate texture layout information. */
788 for (level
= 0; level
< config
->info
.levels
; level
++) {
789 r
= gfx6_compute_level(addrlib
, config
, surf
, false, level
, compressed
,
790 &AddrSurfInfoIn
, &AddrSurfInfoOut
,
791 &AddrDccIn
, &AddrDccOut
, &AddrHtileIn
, &AddrHtileOut
);
798 if (!AddrSurfInfoOut
.tcCompatible
) {
799 AddrSurfInfoIn
.flags
.tcCompatible
= 0;
800 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
803 if (AddrSurfInfoIn
.flags
.matchStencilTileCfg
) {
804 AddrSurfInfoIn
.flags
.matchStencilTileCfg
= 0;
805 AddrSurfInfoIn
.tileIndex
= AddrSurfInfoOut
.tileIndex
;
806 stencil_tile_idx
= AddrSurfInfoOut
.stencilTileIdx
;
808 assert(stencil_tile_idx
>= 0);
811 r
= gfx6_surface_settings(addrlib
, info
, config
,
812 &AddrSurfInfoOut
, surf
);
818 /* Calculate texture layout information for stencil. */
819 if (surf
->flags
& RADEON_SURF_SBUFFER
) {
820 AddrSurfInfoIn
.tileIndex
= stencil_tile_idx
;
821 AddrSurfInfoIn
.bpp
= 8;
822 AddrSurfInfoIn
.flags
.depth
= 0;
823 AddrSurfInfoIn
.flags
.stencil
= 1;
824 AddrSurfInfoIn
.flags
.tcCompatible
= 0;
825 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
826 AddrTileInfoIn
.tileSplitBytes
= surf
->u
.legacy
.stencil_tile_split
;
828 for (level
= 0; level
< config
->info
.levels
; level
++) {
829 r
= gfx6_compute_level(addrlib
, config
, surf
, true, level
, compressed
,
830 &AddrSurfInfoIn
, &AddrSurfInfoOut
,
831 &AddrDccIn
, &AddrDccOut
,
836 /* DB uses the depth pitch for both stencil and depth. */
838 if (surf
->u
.legacy
.stencil_level
[level
].nblk_x
!=
839 surf
->u
.legacy
.level
[level
].nblk_x
)
840 surf
->u
.legacy
.stencil_adjusted
= true;
842 surf
->u
.legacy
.level
[level
].nblk_x
=
843 surf
->u
.legacy
.stencil_level
[level
].nblk_x
;
848 r
= gfx6_surface_settings(addrlib
, info
, config
,
849 &AddrSurfInfoOut
, surf
);
854 /* For 2D modes only. */
855 if (AddrSurfInfoOut
.tileMode
>= ADDR_TM_2D_TILED_THIN1
) {
856 surf
->u
.legacy
.stencil_tile_split
=
857 AddrSurfInfoOut
.pTileInfo
->tileSplitBytes
;
864 if (config
->info
.samples
>= 2 && AddrSurfInfoIn
.flags
.color
&&
865 info
->has_graphics
&& !(surf
->flags
& RADEON_SURF_NO_FMASK
)) {
866 ADDR_COMPUTE_FMASK_INFO_INPUT fin
= {0};
867 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout
= {0};
868 ADDR_TILEINFO fmask_tile_info
= {};
870 fin
.size
= sizeof(fin
);
871 fout
.size
= sizeof(fout
);
873 fin
.tileMode
= AddrSurfInfoOut
.tileMode
;
874 fin
.pitch
= AddrSurfInfoOut
.pitch
;
875 fin
.height
= config
->info
.height
;
876 fin
.numSlices
= AddrSurfInfoIn
.numSlices
;
877 fin
.numSamples
= AddrSurfInfoIn
.numSamples
;
878 fin
.numFrags
= AddrSurfInfoIn
.numFrags
;
880 fout
.pTileInfo
= &fmask_tile_info
;
882 r
= AddrComputeFmaskInfo(addrlib
, &fin
, &fout
);
886 surf
->fmask_size
= fout
.fmaskBytes
;
887 surf
->fmask_alignment
= fout
.baseAlign
;
888 surf
->fmask_tile_swizzle
= 0;
890 surf
->u
.legacy
.fmask
.slice_tile_max
=
891 (fout
.pitch
* fout
.height
) / 64;
892 if (surf
->u
.legacy
.fmask
.slice_tile_max
)
893 surf
->u
.legacy
.fmask
.slice_tile_max
-= 1;
895 surf
->u
.legacy
.fmask
.tiling_index
= fout
.tileIndex
;
896 surf
->u
.legacy
.fmask
.bankh
= fout
.pTileInfo
->bankHeight
;
897 surf
->u
.legacy
.fmask
.pitch_in_pixels
= fout
.pitch
;
898 surf
->u
.legacy
.fmask
.slice_size
= fout
.sliceSize
;
900 /* Compute tile swizzle for FMASK. */
901 if (config
->info
.fmask_surf_index
&&
902 !(surf
->flags
& RADEON_SURF_SHAREABLE
)) {
903 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin
= {0};
904 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout
= {0};
906 xin
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT
);
907 xout
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT
);
909 /* This counter starts from 1 instead of 0. */
910 xin
.surfIndex
= p_atomic_inc_return(config
->info
.fmask_surf_index
);
911 xin
.tileIndex
= fout
.tileIndex
;
912 xin
.macroModeIndex
= fout
.macroModeIndex
;
913 xin
.pTileInfo
= fout
.pTileInfo
;
914 xin
.tileMode
= fin
.tileMode
;
916 int r
= AddrComputeBaseSwizzle(addrlib
, &xin
, &xout
);
920 assert(xout
.tileSwizzle
<=
921 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
922 surf
->fmask_tile_swizzle
= xout
.tileSwizzle
;
926 /* Recalculate the whole DCC miptree size including disabled levels.
927 * This is what addrlib does, but calling addrlib would be a lot more
930 if (surf
->dcc_size
&& config
->info
.levels
> 1) {
931 /* The smallest miplevels that are never compressed by DCC
932 * still read the DCC buffer via TC if the base level uses DCC,
933 * and for some reason the DCC buffer needs to be larger if
934 * the miptree uses non-zero tile_swizzle. Otherwise there are
937 * "dcc_alignment * 4" was determined by trial and error.
939 surf
->dcc_size
= align64(surf
->surf_size
>> 8,
940 surf
->dcc_alignment
* 4);
943 /* Make sure HTILE covers the whole miptree, because the shader reads
944 * TC-compatible HTILE even for levels where it's disabled by DB.
946 if (surf
->htile_size
&& config
->info
.levels
> 1 &&
947 surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
) {
948 /* MSAA can't occur with levels > 1, so ignore the sample count. */
949 const unsigned total_pixels
= surf
->surf_size
/ surf
->bpe
;
950 const unsigned htile_block_size
= 8 * 8;
951 const unsigned htile_element_size
= 4;
953 surf
->htile_size
= (total_pixels
/ htile_block_size
) *
955 surf
->htile_size
= align(surf
->htile_size
, surf
->htile_alignment
);
956 } else if (!surf
->htile_size
) {
957 /* Unset this if HTILE is not present. */
958 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
961 surf
->is_linear
= surf
->u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
;
962 surf
->is_displayable
= surf
->is_linear
||
963 surf
->micro_tile_mode
== RADEON_MICRO_MODE_DISPLAY
||
964 surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
;
966 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
967 * used at the same time. This case is not currently expected to occur
968 * because we don't use rotated. Enforce this restriction on all chips
969 * to facilitate testing.
971 if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
) {
972 assert(!"rotate micro tile mode is unsupported");
976 ac_compute_cmask(info
, config
, surf
);
980 /* This is only called when expecting a tiled layout. */
982 gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib
,
983 struct radeon_surf
*surf
,
984 ADDR2_COMPUTE_SURFACE_INFO_INPUT
*in
,
985 bool is_fmask
, AddrSwizzleMode
*swizzle_mode
)
987 ADDR_E_RETURNCODE ret
;
988 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin
= {0};
989 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout
= {0};
991 sin
.size
= sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT
);
992 sout
.size
= sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT
);
994 sin
.flags
= in
->flags
;
995 sin
.resourceType
= in
->resourceType
;
996 sin
.format
= in
->format
;
997 sin
.resourceLoction
= ADDR_RSRC_LOC_INVIS
;
998 /* TODO: We could allow some of these: */
999 sin
.forbiddenBlock
.micro
= 1; /* don't allow the 256B swizzle modes */
1000 sin
.forbiddenBlock
.var
= 1; /* don't allow the variable-sized swizzle modes */
1002 sin
.width
= in
->width
;
1003 sin
.height
= in
->height
;
1004 sin
.numSlices
= in
->numSlices
;
1005 sin
.numMipLevels
= in
->numMipLevels
;
1006 sin
.numSamples
= in
->numSamples
;
1007 sin
.numFrags
= in
->numFrags
;
1010 sin
.flags
.display
= 0;
1011 sin
.flags
.color
= 0;
1012 sin
.flags
.fmask
= 1;
1015 if (surf
->flags
& RADEON_SURF_FORCE_MICRO_TILE_MODE
) {
1016 sin
.forbiddenBlock
.linear
= 1;
1018 if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_DISPLAY
)
1019 sin
.preferredSwSet
.sw_D
= 1;
1020 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_STANDARD
)
1021 sin
.preferredSwSet
.sw_S
= 1;
1022 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_DEPTH
)
1023 sin
.preferredSwSet
.sw_Z
= 1;
1024 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
)
1025 sin
.preferredSwSet
.sw_R
= 1;
1028 ret
= Addr2GetPreferredSurfaceSetting(addrlib
, &sin
, &sout
);
1032 *swizzle_mode
= sout
.swizzleMode
;
1036 static bool is_dcc_supported_by_CB(const struct radeon_info
*info
, unsigned sw_mode
)
1038 if (info
->chip_class
>= GFX10
)
1039 return sw_mode
== ADDR_SW_64KB_Z_X
|| sw_mode
== ADDR_SW_64KB_R_X
;
1041 return sw_mode
!= ADDR_SW_LINEAR
;
1044 ASSERTED
static bool is_dcc_supported_by_L2(const struct radeon_info
*info
,
1045 const struct radeon_surf
*surf
)
1047 if (info
->chip_class
<= GFX9
) {
1048 /* Only independent 64B blocks are supported. */
1049 return surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1050 !surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1051 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
;
1054 if (info
->family
== CHIP_NAVI10
) {
1055 /* Only independent 128B blocks are supported. */
1056 return !surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1057 surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1058 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
;
1061 if (info
->family
== CHIP_NAVI12
||
1062 info
->family
== CHIP_NAVI14
) {
1063 /* Either 64B or 128B can be used, but not both.
1064 * If 64B is used, DCC image stores are unsupported.
1066 return surf
->u
.gfx9
.dcc
.independent_64B_blocks
!=
1067 surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1068 (!surf
->u
.gfx9
.dcc
.independent_64B_blocks
||
1069 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
) &&
1070 (!surf
->u
.gfx9
.dcc
.independent_128B_blocks
||
1071 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
);
1074 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1075 * Since there is no reason to ever disable 128B, require it.
1076 * DCC image stores are always supported.
1078 return surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1079 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
;
1082 static bool is_dcc_supported_by_DCN(const struct radeon_info
*info
,
1083 const struct ac_surf_config
*config
,
1084 const struct radeon_surf
*surf
,
1085 bool rb_aligned
, bool pipe_aligned
)
1087 if (!info
->use_display_dcc_unaligned
&&
1088 !info
->use_display_dcc_with_retile_blit
)
1091 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1095 /* Handle unaligned DCC. */
1096 if (info
->use_display_dcc_unaligned
&&
1097 (rb_aligned
|| pipe_aligned
))
1100 switch (info
->chip_class
) {
1102 /* There are more constraints, but we always set
1103 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1104 * which always works.
1106 assert(surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1107 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
);
1111 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1112 if (info
->chip_class
== GFX10
&&
1113 surf
->u
.gfx9
.dcc
.independent_128B_blocks
)
1116 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1117 return ((config
->info
.width
<= 2560 &&
1118 config
->info
.height
<= 2560) ||
1119 (surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1120 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
));
1122 unreachable("unhandled chip");
1127 static int gfx9_compute_miptree(ADDR_HANDLE addrlib
,
1128 const struct radeon_info
*info
,
1129 const struct ac_surf_config
*config
,
1130 struct radeon_surf
*surf
, bool compressed
,
1131 ADDR2_COMPUTE_SURFACE_INFO_INPUT
*in
)
1133 ADDR2_MIP_INFO mip_info
[RADEON_SURF_MAX_LEVELS
] = {};
1134 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out
= {0};
1135 ADDR_E_RETURNCODE ret
;
1137 out
.size
= sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT
);
1138 out
.pMipInfo
= mip_info
;
1140 ret
= Addr2ComputeSurfaceInfo(addrlib
, in
, &out
);
1144 if (in
->flags
.stencil
) {
1145 surf
->u
.gfx9
.stencil
.swizzle_mode
= in
->swizzleMode
;
1146 surf
->u
.gfx9
.stencil
.epitch
= out
.epitchIsHeight
? out
.mipChainHeight
- 1 :
1147 out
.mipChainPitch
- 1;
1148 surf
->surf_alignment
= MAX2(surf
->surf_alignment
, out
.baseAlign
);
1149 surf
->u
.gfx9
.stencil_offset
= align(surf
->surf_size
, out
.baseAlign
);
1150 surf
->surf_size
= surf
->u
.gfx9
.stencil_offset
+ out
.surfSize
;
1154 surf
->u
.gfx9
.surf
.swizzle_mode
= in
->swizzleMode
;
1155 surf
->u
.gfx9
.surf
.epitch
= out
.epitchIsHeight
? out
.mipChainHeight
- 1 :
1156 out
.mipChainPitch
- 1;
1158 /* CMASK fast clear uses these even if FMASK isn't allocated.
1159 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1161 surf
->u
.gfx9
.fmask
.swizzle_mode
= surf
->u
.gfx9
.surf
.swizzle_mode
& ~0x3;
1162 surf
->u
.gfx9
.fmask
.epitch
= surf
->u
.gfx9
.surf
.epitch
;
1164 surf
->u
.gfx9
.surf_slice_size
= out
.sliceSize
;
1165 surf
->u
.gfx9
.surf_pitch
= out
.pitch
;
1166 if (!compressed
&& surf
->blk_w
> 1 && out
.pitch
== out
.pixelPitch
) {
1167 /* Adjust surf_pitch to be in elements units,
1169 surf
->u
.gfx9
.surf_pitch
/= surf
->blk_w
;
1171 surf
->u
.gfx9
.surf_height
= out
.height
;
1172 surf
->surf_size
= out
.surfSize
;
1173 surf
->surf_alignment
= out
.baseAlign
;
1175 if (in
->swizzleMode
== ADDR_SW_LINEAR
) {
1176 for (unsigned i
= 0; i
< in
->numMipLevels
; i
++) {
1177 surf
->u
.gfx9
.offset
[i
] = mip_info
[i
].offset
;
1178 surf
->u
.gfx9
.pitch
[i
] = mip_info
[i
].pitch
;
1182 if (in
->flags
.depth
) {
1183 assert(in
->swizzleMode
!= ADDR_SW_LINEAR
);
1185 if (surf
->flags
& RADEON_SURF_NO_HTILE
)
1189 ADDR2_COMPUTE_HTILE_INFO_INPUT hin
= {0};
1190 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout
= {0};
1192 hin
.size
= sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT
);
1193 hout
.size
= sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT
);
1195 assert(in
->flags
.metaPipeUnaligned
== 0);
1196 assert(in
->flags
.metaRbUnaligned
== 0);
1198 hin
.hTileFlags
.pipeAligned
= 1;
1199 hin
.hTileFlags
.rbAligned
= 1;
1200 hin
.depthFlags
= in
->flags
;
1201 hin
.swizzleMode
= in
->swizzleMode
;
1202 hin
.unalignedWidth
= in
->width
;
1203 hin
.unalignedHeight
= in
->height
;
1204 hin
.numSlices
= in
->numSlices
;
1205 hin
.numMipLevels
= in
->numMipLevels
;
1206 hin
.firstMipIdInTail
= out
.firstMipIdInTail
;
1208 ret
= Addr2ComputeHtileInfo(addrlib
, &hin
, &hout
);
1212 surf
->htile_size
= hout
.htileBytes
;
1213 surf
->htile_slice_size
= hout
.sliceSize
;
1214 surf
->htile_alignment
= hout
.baseAlign
;
1219 /* Compute tile swizzle for the color surface.
1220 * All *_X and *_T modes can use the swizzle.
1222 if (config
->info
.surf_index
&&
1223 in
->swizzleMode
>= ADDR_SW_64KB_Z_T
&&
1224 !out
.mipChainInTail
&&
1225 !(surf
->flags
& RADEON_SURF_SHAREABLE
) &&
1226 !in
->flags
.display
) {
1227 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin
= {0};
1228 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout
= {0};
1230 xin
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT
);
1231 xout
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT
);
1233 xin
.surfIndex
= p_atomic_inc_return(config
->info
.surf_index
) - 1;
1234 xin
.flags
= in
->flags
;
1235 xin
.swizzleMode
= in
->swizzleMode
;
1236 xin
.resourceType
= in
->resourceType
;
1237 xin
.format
= in
->format
;
1238 xin
.numSamples
= in
->numSamples
;
1239 xin
.numFrags
= in
->numFrags
;
1241 ret
= Addr2ComputePipeBankXor(addrlib
, &xin
, &xout
);
1245 assert(xout
.pipeBankXor
<=
1246 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
1247 surf
->tile_swizzle
= xout
.pipeBankXor
;
1251 if (info
->has_graphics
&&
1252 !(surf
->flags
& RADEON_SURF_DISABLE_DCC
) &&
1254 is_dcc_supported_by_CB(info
, in
->swizzleMode
) &&
1255 (!in
->flags
.display
||
1256 is_dcc_supported_by_DCN(info
, config
, surf
,
1257 !in
->flags
.metaRbUnaligned
,
1258 !in
->flags
.metaPipeUnaligned
))) {
1259 ADDR2_COMPUTE_DCCINFO_INPUT din
= {0};
1260 ADDR2_COMPUTE_DCCINFO_OUTPUT dout
= {0};
1261 ADDR2_META_MIP_INFO meta_mip_info
[RADEON_SURF_MAX_LEVELS
] = {};
1263 din
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_INPUT
);
1264 dout
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT
);
1265 dout
.pMipInfo
= meta_mip_info
;
1267 din
.dccKeyFlags
.pipeAligned
= !in
->flags
.metaPipeUnaligned
;
1268 din
.dccKeyFlags
.rbAligned
= !in
->flags
.metaRbUnaligned
;
1269 din
.colorFlags
= in
->flags
;
1270 din
.resourceType
= in
->resourceType
;
1271 din
.swizzleMode
= in
->swizzleMode
;
1273 din
.unalignedWidth
= in
->width
;
1274 din
.unalignedHeight
= in
->height
;
1275 din
.numSlices
= in
->numSlices
;
1276 din
.numFrags
= in
->numFrags
;
1277 din
.numMipLevels
= in
->numMipLevels
;
1278 din
.dataSurfaceSize
= out
.surfSize
;
1279 din
.firstMipIdInTail
= out
.firstMipIdInTail
;
1281 ret
= Addr2ComputeDccInfo(addrlib
, &din
, &dout
);
1285 surf
->u
.gfx9
.dcc
.rb_aligned
= din
.dccKeyFlags
.rbAligned
;
1286 surf
->u
.gfx9
.dcc
.pipe_aligned
= din
.dccKeyFlags
.pipeAligned
;
1287 surf
->u
.gfx9
.dcc_block_width
= dout
.compressBlkWidth
;
1288 surf
->u
.gfx9
.dcc_block_height
= dout
.compressBlkHeight
;
1289 surf
->u
.gfx9
.dcc_block_depth
= dout
.compressBlkDepth
;
1290 surf
->dcc_size
= dout
.dccRamSize
;
1291 surf
->dcc_alignment
= dout
.dccRamBaseAlign
;
1292 surf
->num_dcc_levels
= in
->numMipLevels
;
1294 /* Disable DCC for levels that are in the mip tail.
1296 * There are two issues that this is intended to
1299 * 1. Multiple mip levels may share a cache line. This
1300 * can lead to corruption when switching between
1301 * rendering to different mip levels because the
1302 * RBs don't maintain coherency.
1304 * 2. Texturing with metadata after rendering sometimes
1305 * fails with corruption, probably for a similar
1308 * Working around these issues for all levels in the
1309 * mip tail may be overly conservative, but it's what
1312 * Alternative solutions that also work but are worse:
1313 * - Disable DCC entirely.
1314 * - Flush TC L2 after rendering.
1316 for (unsigned i
= 0; i
< in
->numMipLevels
; i
++) {
1317 if (meta_mip_info
[i
].inMiptail
) {
1318 surf
->num_dcc_levels
= i
;
1323 if (!surf
->num_dcc_levels
)
1326 surf
->u
.gfx9
.display_dcc_size
= surf
->dcc_size
;
1327 surf
->u
.gfx9
.display_dcc_alignment
= surf
->dcc_alignment
;
1328 surf
->u
.gfx9
.display_dcc_pitch_max
= dout
.pitch
- 1;
1330 /* Compute displayable DCC. */
1331 if (in
->flags
.display
&&
1332 surf
->num_dcc_levels
&&
1333 info
->use_display_dcc_with_retile_blit
) {
1334 /* Compute displayable DCC info. */
1335 din
.dccKeyFlags
.pipeAligned
= 0;
1336 din
.dccKeyFlags
.rbAligned
= 0;
1338 assert(din
.numSlices
== 1);
1339 assert(din
.numMipLevels
== 1);
1340 assert(din
.numFrags
== 1);
1341 assert(surf
->tile_swizzle
== 0);
1342 assert(surf
->u
.gfx9
.dcc
.pipe_aligned
||
1343 surf
->u
.gfx9
.dcc
.rb_aligned
);
1345 ret
= Addr2ComputeDccInfo(addrlib
, &din
, &dout
);
1349 surf
->u
.gfx9
.display_dcc_size
= dout
.dccRamSize
;
1350 surf
->u
.gfx9
.display_dcc_alignment
= dout
.dccRamBaseAlign
;
1351 surf
->u
.gfx9
.display_dcc_pitch_max
= dout
.pitch
- 1;
1352 assert(surf
->u
.gfx9
.display_dcc_size
<= surf
->dcc_size
);
1354 /* Compute address mapping from non-displayable to displayable DCC. */
1355 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin
= {};
1356 addrin
.size
= sizeof(addrin
);
1357 addrin
.colorFlags
.color
= 1;
1358 addrin
.swizzleMode
= din
.swizzleMode
;
1359 addrin
.resourceType
= din
.resourceType
;
1360 addrin
.bpp
= din
.bpp
;
1361 addrin
.unalignedWidth
= din
.unalignedWidth
;
1362 addrin
.unalignedHeight
= din
.unalignedHeight
;
1363 addrin
.numSlices
= 1;
1364 addrin
.numMipLevels
= 1;
1365 addrin
.numFrags
= 1;
1367 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout
= {};
1368 addrout
.size
= sizeof(addrout
);
1370 surf
->u
.gfx9
.dcc_retile_num_elements
=
1371 DIV_ROUND_UP(in
->width
, dout
.compressBlkWidth
) *
1372 DIV_ROUND_UP(in
->height
, dout
.compressBlkHeight
) * 2;
1373 /* Align the size to 4 (for the compute shader). */
1374 surf
->u
.gfx9
.dcc_retile_num_elements
=
1375 align(surf
->u
.gfx9
.dcc_retile_num_elements
, 4);
1377 surf
->u
.gfx9
.dcc_retile_map
=
1378 malloc(surf
->u
.gfx9
.dcc_retile_num_elements
* 4);
1379 if (!surf
->u
.gfx9
.dcc_retile_map
)
1380 return ADDR_OUTOFMEMORY
;
1383 surf
->u
.gfx9
.dcc_retile_use_uint16
= true;
1385 for (unsigned y
= 0; y
< in
->height
; y
+= dout
.compressBlkHeight
) {
1388 for (unsigned x
= 0; x
< in
->width
; x
+= dout
.compressBlkWidth
) {
1391 /* Compute src DCC address */
1392 addrin
.dccKeyFlags
.pipeAligned
= surf
->u
.gfx9
.dcc
.pipe_aligned
;
1393 addrin
.dccKeyFlags
.rbAligned
= surf
->u
.gfx9
.dcc
.rb_aligned
;
1396 ret
= Addr2ComputeDccAddrFromCoord(addrlib
, &addrin
, &addrout
);
1400 surf
->u
.gfx9
.dcc_retile_map
[index
* 2] = addrout
.addr
;
1401 if (addrout
.addr
> UINT16_MAX
)
1402 surf
->u
.gfx9
.dcc_retile_use_uint16
= false;
1404 /* Compute dst DCC address */
1405 addrin
.dccKeyFlags
.pipeAligned
= 0;
1406 addrin
.dccKeyFlags
.rbAligned
= 0;
1409 ret
= Addr2ComputeDccAddrFromCoord(addrlib
, &addrin
, &addrout
);
1413 surf
->u
.gfx9
.dcc_retile_map
[index
* 2 + 1] = addrout
.addr
;
1414 if (addrout
.addr
> UINT16_MAX
)
1415 surf
->u
.gfx9
.dcc_retile_use_uint16
= false;
1417 assert(index
* 2 + 1 < surf
->u
.gfx9
.dcc_retile_num_elements
);
1421 /* Fill the remaining pairs with the last one (for the compute shader). */
1422 for (unsigned i
= index
* 2; i
< surf
->u
.gfx9
.dcc_retile_num_elements
; i
++)
1423 surf
->u
.gfx9
.dcc_retile_map
[i
] = surf
->u
.gfx9
.dcc_retile_map
[i
- 2];
1428 if (in
->numSamples
> 1 && info
->has_graphics
&&
1429 !(surf
->flags
& RADEON_SURF_NO_FMASK
)) {
1430 ADDR2_COMPUTE_FMASK_INFO_INPUT fin
= {0};
1431 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout
= {0};
1433 fin
.size
= sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT
);
1434 fout
.size
= sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT
);
1436 ret
= gfx9_get_preferred_swizzle_mode(addrlib
, surf
, in
,
1437 true, &fin
.swizzleMode
);
1441 fin
.unalignedWidth
= in
->width
;
1442 fin
.unalignedHeight
= in
->height
;
1443 fin
.numSlices
= in
->numSlices
;
1444 fin
.numSamples
= in
->numSamples
;
1445 fin
.numFrags
= in
->numFrags
;
1447 ret
= Addr2ComputeFmaskInfo(addrlib
, &fin
, &fout
);
1451 surf
->u
.gfx9
.fmask
.swizzle_mode
= fin
.swizzleMode
;
1452 surf
->u
.gfx9
.fmask
.epitch
= fout
.pitch
- 1;
1453 surf
->fmask_size
= fout
.fmaskBytes
;
1454 surf
->fmask_alignment
= fout
.baseAlign
;
1456 /* Compute tile swizzle for the FMASK surface. */
1457 if (config
->info
.fmask_surf_index
&&
1458 fin
.swizzleMode
>= ADDR_SW_64KB_Z_T
&&
1459 !(surf
->flags
& RADEON_SURF_SHAREABLE
)) {
1460 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin
= {0};
1461 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout
= {0};
1463 xin
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT
);
1464 xout
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT
);
1466 /* This counter starts from 1 instead of 0. */
1467 xin
.surfIndex
= p_atomic_inc_return(config
->info
.fmask_surf_index
);
1468 xin
.flags
= in
->flags
;
1469 xin
.swizzleMode
= fin
.swizzleMode
;
1470 xin
.resourceType
= in
->resourceType
;
1471 xin
.format
= in
->format
;
1472 xin
.numSamples
= in
->numSamples
;
1473 xin
.numFrags
= in
->numFrags
;
1475 ret
= Addr2ComputePipeBankXor(addrlib
, &xin
, &xout
);
1479 assert(xout
.pipeBankXor
<=
1480 u_bit_consecutive(0, sizeof(surf
->fmask_tile_swizzle
) * 8));
1481 surf
->fmask_tile_swizzle
= xout
.pipeBankXor
;
1485 /* CMASK -- on GFX10 only for FMASK */
1486 if (in
->swizzleMode
!= ADDR_SW_LINEAR
&&
1487 in
->resourceType
== ADDR_RSRC_TEX_2D
&&
1488 ((info
->chip_class
<= GFX9
&&
1489 in
->numSamples
== 1 &&
1490 in
->flags
.metaPipeUnaligned
== 0 &&
1491 in
->flags
.metaRbUnaligned
== 0) ||
1492 (surf
->fmask_size
&& in
->numSamples
>= 2))) {
1493 ADDR2_COMPUTE_CMASK_INFO_INPUT cin
= {0};
1494 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout
= {0};
1496 cin
.size
= sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT
);
1497 cout
.size
= sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT
);
1499 assert(in
->flags
.metaPipeUnaligned
== 0);
1500 assert(in
->flags
.metaRbUnaligned
== 0);
1502 cin
.cMaskFlags
.pipeAligned
= 1;
1503 cin
.cMaskFlags
.rbAligned
= 1;
1504 cin
.colorFlags
= in
->flags
;
1505 cin
.resourceType
= in
->resourceType
;
1506 cin
.unalignedWidth
= in
->width
;
1507 cin
.unalignedHeight
= in
->height
;
1508 cin
.numSlices
= in
->numSlices
;
1510 if (in
->numSamples
> 1)
1511 cin
.swizzleMode
= surf
->u
.gfx9
.fmask
.swizzle_mode
;
1513 cin
.swizzleMode
= in
->swizzleMode
;
1515 ret
= Addr2ComputeCmaskInfo(addrlib
, &cin
, &cout
);
1519 surf
->cmask_size
= cout
.cmaskBytes
;
1520 surf
->cmask_alignment
= cout
.baseAlign
;
1527 static int gfx9_compute_surface(ADDR_HANDLE addrlib
,
1528 const struct radeon_info
*info
,
1529 const struct ac_surf_config
*config
,
1530 enum radeon_surf_mode mode
,
1531 struct radeon_surf
*surf
)
1534 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn
= {0};
1537 AddrSurfInfoIn
.size
= sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT
);
1539 compressed
= surf
->blk_w
== 4 && surf
->blk_h
== 4;
1541 /* The format must be set correctly for the allocation of compressed
1542 * textures to work. In other cases, setting the bpp is sufficient. */
1544 switch (surf
->bpe
) {
1546 AddrSurfInfoIn
.format
= ADDR_FMT_BC1
;
1549 AddrSurfInfoIn
.format
= ADDR_FMT_BC3
;
1555 switch (surf
->bpe
) {
1557 assert(!(surf
->flags
& RADEON_SURF_ZBUFFER
));
1558 AddrSurfInfoIn
.format
= ADDR_FMT_8
;
1561 assert(surf
->flags
& RADEON_SURF_ZBUFFER
||
1562 !(surf
->flags
& RADEON_SURF_SBUFFER
));
1563 AddrSurfInfoIn
.format
= ADDR_FMT_16
;
1566 assert(surf
->flags
& RADEON_SURF_ZBUFFER
||
1567 !(surf
->flags
& RADEON_SURF_SBUFFER
));
1568 AddrSurfInfoIn
.format
= ADDR_FMT_32
;
1571 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1572 AddrSurfInfoIn
.format
= ADDR_FMT_32_32
;
1575 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1576 AddrSurfInfoIn
.format
= ADDR_FMT_32_32_32
;
1579 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1580 AddrSurfInfoIn
.format
= ADDR_FMT_32_32_32_32
;
1585 AddrSurfInfoIn
.bpp
= surf
->bpe
* 8;
1588 bool is_color_surface
= !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
1589 AddrSurfInfoIn
.flags
.color
= is_color_surface
&&
1590 !(surf
->flags
& RADEON_SURF_NO_RENDER_TARGET
);
1591 AddrSurfInfoIn
.flags
.depth
= (surf
->flags
& RADEON_SURF_ZBUFFER
) != 0;
1592 AddrSurfInfoIn
.flags
.display
= get_display_flag(config
, surf
);
1593 /* flags.texture currently refers to TC-compatible HTILE */
1594 AddrSurfInfoIn
.flags
.texture
= is_color_surface
||
1595 surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
;
1596 AddrSurfInfoIn
.flags
.opt4space
= 1;
1598 AddrSurfInfoIn
.numMipLevels
= config
->info
.levels
;
1599 AddrSurfInfoIn
.numSamples
= MAX2(1, config
->info
.samples
);
1600 AddrSurfInfoIn
.numFrags
= AddrSurfInfoIn
.numSamples
;
1602 if (!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
))
1603 AddrSurfInfoIn
.numFrags
= MAX2(1, config
->info
.storage_samples
);
1605 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1606 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1607 * must sample 1D textures as 2D. */
1609 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_3D
;
1610 else if (info
->chip_class
!= GFX9
&& config
->is_1d
)
1611 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_1D
;
1613 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_2D
;
1615 AddrSurfInfoIn
.width
= config
->info
.width
;
1616 AddrSurfInfoIn
.height
= config
->info
.height
;
1619 AddrSurfInfoIn
.numSlices
= config
->info
.depth
;
1620 else if (config
->is_cube
)
1621 AddrSurfInfoIn
.numSlices
= 6;
1623 AddrSurfInfoIn
.numSlices
= config
->info
.array_size
;
1625 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1626 AddrSurfInfoIn
.flags
.metaPipeUnaligned
= 0;
1627 AddrSurfInfoIn
.flags
.metaRbUnaligned
= 0;
1629 /* Optimal values for the L2 cache. */
1630 if (info
->chip_class
== GFX9
) {
1631 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1632 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 0;
1633 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1634 } else if (info
->chip_class
>= GFX10
) {
1635 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 0;
1636 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 1;
1637 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_128B
;
1640 if (AddrSurfInfoIn
.flags
.display
) {
1641 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1642 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1644 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1645 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1646 * after rendering, so PIPE_ALIGNED=1 is recommended.
1648 if (info
->use_display_dcc_unaligned
) {
1649 AddrSurfInfoIn
.flags
.metaPipeUnaligned
= 1;
1650 AddrSurfInfoIn
.flags
.metaRbUnaligned
= 1;
1653 /* Adjust DCC settings to meet DCN requirements. */
1654 if (info
->use_display_dcc_unaligned
||
1655 info
->use_display_dcc_with_retile_blit
) {
1656 /* Only Navi12/14 support independent 64B blocks in L2,
1657 * but without DCC image stores.
1659 if (info
->family
== CHIP_NAVI12
||
1660 info
->family
== CHIP_NAVI14
) {
1661 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1662 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 0;
1663 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1666 if (info
->chip_class
>= GFX10_3
) {
1667 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1668 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 1;
1669 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1675 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1676 assert(config
->info
.samples
<= 1);
1677 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1678 AddrSurfInfoIn
.swizzleMode
= ADDR_SW_LINEAR
;
1681 case RADEON_SURF_MODE_1D
:
1682 case RADEON_SURF_MODE_2D
:
1683 if (surf
->flags
& RADEON_SURF_IMPORTED
||
1684 (info
->chip_class
>= GFX10
&&
1685 surf
->flags
& RADEON_SURF_FORCE_SWIZZLE_MODE
)) {
1686 AddrSurfInfoIn
.swizzleMode
= surf
->u
.gfx9
.surf
.swizzle_mode
;
1690 r
= gfx9_get_preferred_swizzle_mode(addrlib
, surf
, &AddrSurfInfoIn
,
1691 false, &AddrSurfInfoIn
.swizzleMode
);
1700 surf
->u
.gfx9
.resource_type
= AddrSurfInfoIn
.resourceType
;
1701 surf
->has_stencil
= !!(surf
->flags
& RADEON_SURF_SBUFFER
);
1703 surf
->num_dcc_levels
= 0;
1704 surf
->surf_size
= 0;
1705 surf
->fmask_size
= 0;
1707 surf
->htile_size
= 0;
1708 surf
->htile_slice_size
= 0;
1709 surf
->u
.gfx9
.surf_offset
= 0;
1710 surf
->u
.gfx9
.stencil_offset
= 0;
1711 surf
->cmask_size
= 0;
1712 surf
->u
.gfx9
.dcc_retile_use_uint16
= false;
1713 surf
->u
.gfx9
.dcc_retile_num_elements
= 0;
1714 surf
->u
.gfx9
.dcc_retile_map
= NULL
;
1716 /* Calculate texture layout information. */
1717 r
= gfx9_compute_miptree(addrlib
, info
, config
, surf
, compressed
,
1722 /* Calculate texture layout information for stencil. */
1723 if (surf
->flags
& RADEON_SURF_SBUFFER
) {
1724 AddrSurfInfoIn
.flags
.stencil
= 1;
1725 AddrSurfInfoIn
.bpp
= 8;
1726 AddrSurfInfoIn
.format
= ADDR_FMT_8
;
1728 if (!AddrSurfInfoIn
.flags
.depth
) {
1729 r
= gfx9_get_preferred_swizzle_mode(addrlib
, surf
, &AddrSurfInfoIn
,
1730 false, &AddrSurfInfoIn
.swizzleMode
);
1734 AddrSurfInfoIn
.flags
.depth
= 0;
1736 r
= gfx9_compute_miptree(addrlib
, info
, config
, surf
, compressed
,
1742 surf
->is_linear
= surf
->u
.gfx9
.surf
.swizzle_mode
== ADDR_SW_LINEAR
;
1744 /* Query whether the surface is displayable. */
1745 /* This is only useful for surfaces that are allocated without SCANOUT. */
1746 bool displayable
= false;
1747 if (!config
->is_3d
&& !config
->is_cube
) {
1748 r
= Addr2IsValidDisplaySwizzleMode(addrlib
, surf
->u
.gfx9
.surf
.swizzle_mode
,
1749 surf
->bpe
* 8, &displayable
);
1753 /* Display needs unaligned DCC. */
1754 if (surf
->num_dcc_levels
&&
1755 !is_dcc_supported_by_DCN(info
, config
, surf
,
1756 surf
->u
.gfx9
.dcc
.rb_aligned
,
1757 surf
->u
.gfx9
.dcc
.pipe_aligned
))
1758 displayable
= false;
1760 surf
->is_displayable
= displayable
;
1762 /* Validate that we allocated a displayable surface if requested. */
1763 assert(!AddrSurfInfoIn
.flags
.display
|| surf
->is_displayable
);
1765 /* Validate that DCC is set up correctly. */
1766 if (surf
->num_dcc_levels
) {
1767 assert(is_dcc_supported_by_L2(info
, surf
));
1768 if (AddrSurfInfoIn
.flags
.color
)
1769 assert(is_dcc_supported_by_CB(info
, surf
->u
.gfx9
.surf
.swizzle_mode
));
1770 if (AddrSurfInfoIn
.flags
.display
) {
1771 assert(is_dcc_supported_by_DCN(info
, config
, surf
,
1772 surf
->u
.gfx9
.dcc
.rb_aligned
,
1773 surf
->u
.gfx9
.dcc
.pipe_aligned
));
1777 if (info
->has_graphics
&&
1780 config
->info
.levels
== 1 &&
1781 AddrSurfInfoIn
.flags
.color
&&
1783 surf
->surf_alignment
>= 64 * 1024 && /* 64KB tiling */
1784 !(surf
->flags
& (RADEON_SURF_DISABLE_DCC
|
1785 RADEON_SURF_FORCE_SWIZZLE_MODE
|
1786 RADEON_SURF_FORCE_MICRO_TILE_MODE
))) {
1787 /* Validate that DCC is enabled if DCN can do it. */
1788 if ((info
->use_display_dcc_unaligned
||
1789 info
->use_display_dcc_with_retile_blit
) &&
1790 AddrSurfInfoIn
.flags
.display
&&
1792 assert(surf
->num_dcc_levels
);
1795 /* Validate that non-scanout DCC is always enabled. */
1796 if (!AddrSurfInfoIn
.flags
.display
)
1797 assert(surf
->num_dcc_levels
);
1800 if (!surf
->htile_size
) {
1801 /* Unset this if HTILE is not present. */
1802 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
1805 switch (surf
->u
.gfx9
.surf
.swizzle_mode
) {
1807 case ADDR_SW_256B_S
:
1809 case ADDR_SW_64KB_S
:
1810 case ADDR_SW_64KB_S_T
:
1811 case ADDR_SW_4KB_S_X
:
1812 case ADDR_SW_64KB_S_X
:
1813 surf
->micro_tile_mode
= RADEON_MICRO_MODE_STANDARD
;
1817 case ADDR_SW_LINEAR
:
1818 case ADDR_SW_256B_D
:
1820 case ADDR_SW_64KB_D
:
1821 case ADDR_SW_64KB_D_T
:
1822 case ADDR_SW_4KB_D_X
:
1823 case ADDR_SW_64KB_D_X
:
1824 surf
->micro_tile_mode
= RADEON_MICRO_MODE_DISPLAY
;
1827 /* R = rotated (gfx9), render target (gfx10). */
1828 case ADDR_SW_256B_R
:
1830 case ADDR_SW_64KB_R
:
1831 case ADDR_SW_64KB_R_T
:
1832 case ADDR_SW_4KB_R_X
:
1833 case ADDR_SW_64KB_R_X
:
1834 case ADDR_SW_VAR_R_X
:
1835 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1836 * used at the same time. We currently do not use rotated
1839 assert(info
->chip_class
>= GFX10
||
1840 !"rotate micro tile mode is unsupported");
1841 surf
->micro_tile_mode
= RADEON_MICRO_MODE_RENDER
;
1846 case ADDR_SW_64KB_Z
:
1847 case ADDR_SW_64KB_Z_T
:
1848 case ADDR_SW_4KB_Z_X
:
1849 case ADDR_SW_64KB_Z_X
:
1850 case ADDR_SW_VAR_Z_X
:
1851 surf
->micro_tile_mode
= RADEON_MICRO_MODE_DEPTH
;
1861 free(surf
->u
.gfx9
.dcc_retile_map
);
1862 surf
->u
.gfx9
.dcc_retile_map
= NULL
;
1866 int ac_compute_surface(ADDR_HANDLE addrlib
, const struct radeon_info
*info
,
1867 const struct ac_surf_config
*config
,
1868 enum radeon_surf_mode mode
,
1869 struct radeon_surf
*surf
)
1873 r
= surf_config_sanity(config
, surf
->flags
);
1877 if (info
->chip_class
>= GFX9
)
1878 r
= gfx9_compute_surface(addrlib
, info
, config
, mode
, surf
);
1880 r
= gfx6_compute_surface(addrlib
, info
, config
, mode
, surf
);
1885 /* Determine the memory layout of multiple allocations in one buffer. */
1886 surf
->total_size
= surf
->surf_size
;
1887 surf
->alignment
= surf
->surf_alignment
;
1889 if (surf
->htile_size
) {
1890 surf
->htile_offset
= align64(surf
->total_size
, surf
->htile_alignment
);
1891 surf
->total_size
= surf
->htile_offset
+ surf
->htile_size
;
1892 surf
->alignment
= MAX2(surf
->alignment
, surf
->htile_alignment
);
1895 if (surf
->fmask_size
) {
1896 assert(config
->info
.samples
>= 2);
1897 surf
->fmask_offset
= align64(surf
->total_size
, surf
->fmask_alignment
);
1898 surf
->total_size
= surf
->fmask_offset
+ surf
->fmask_size
;
1899 surf
->alignment
= MAX2(surf
->alignment
, surf
->fmask_alignment
);
1902 /* Single-sample CMASK is in a separate buffer. */
1903 if (surf
->cmask_size
&& config
->info
.samples
>= 2) {
1904 surf
->cmask_offset
= align64(surf
->total_size
, surf
->cmask_alignment
);
1905 surf
->total_size
= surf
->cmask_offset
+ surf
->cmask_size
;
1906 surf
->alignment
= MAX2(surf
->alignment
, surf
->cmask_alignment
);
1909 if (surf
->is_displayable
)
1910 surf
->flags
|= RADEON_SURF_SCANOUT
;
1912 if (surf
->dcc_size
&&
1913 /* dcc_size is computed on GFX9+ only if it's displayable. */
1914 (info
->chip_class
>= GFX9
|| !get_display_flag(config
, surf
))) {
1915 /* It's better when displayable DCC is immediately after
1916 * the image due to hw-specific reasons.
1918 if (info
->chip_class
>= GFX9
&&
1919 surf
->u
.gfx9
.dcc_retile_num_elements
) {
1920 /* Add space for the displayable DCC buffer. */
1921 surf
->display_dcc_offset
=
1922 align64(surf
->total_size
, surf
->u
.gfx9
.display_dcc_alignment
);
1923 surf
->total_size
= surf
->display_dcc_offset
+
1924 surf
->u
.gfx9
.display_dcc_size
;
1926 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
1927 surf
->dcc_retile_map_offset
=
1928 align64(surf
->total_size
, info
->tcc_cache_line_size
);
1930 if (surf
->u
.gfx9
.dcc_retile_use_uint16
) {
1931 surf
->total_size
= surf
->dcc_retile_map_offset
+
1932 surf
->u
.gfx9
.dcc_retile_num_elements
* 2;
1934 surf
->total_size
= surf
->dcc_retile_map_offset
+
1935 surf
->u
.gfx9
.dcc_retile_num_elements
* 4;
1939 surf
->dcc_offset
= align64(surf
->total_size
, surf
->dcc_alignment
);
1940 surf
->total_size
= surf
->dcc_offset
+ surf
->dcc_size
;
1941 surf
->alignment
= MAX2(surf
->alignment
, surf
->dcc_alignment
);
1947 /* This is meant to be used for disabling DCC. */
1948 void ac_surface_zero_dcc_fields(struct radeon_surf
*surf
)
1950 surf
->dcc_offset
= 0;
1951 surf
->display_dcc_offset
= 0;
1952 surf
->dcc_retile_map_offset
= 0;
1955 static unsigned eg_tile_split(unsigned tile_split
)
1957 switch (tile_split
) {
1958 case 0: tile_split
= 64; break;
1959 case 1: tile_split
= 128; break;
1960 case 2: tile_split
= 256; break;
1961 case 3: tile_split
= 512; break;
1963 case 4: tile_split
= 1024; break;
1964 case 5: tile_split
= 2048; break;
1965 case 6: tile_split
= 4096; break;
1970 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
1972 switch (eg_tile_split
) {
1978 case 1024: return 4;
1979 case 2048: return 5;
1980 case 4096: return 6;
1984 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
1985 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
1987 /* This should be called before ac_compute_surface. */
1988 void ac_surface_set_bo_metadata(const struct radeon_info
*info
,
1989 struct radeon_surf
*surf
, uint64_t tiling_flags
,
1990 enum radeon_surf_mode
*mode
)
1994 if (info
->chip_class
>= GFX9
) {
1995 surf
->u
.gfx9
.surf
.swizzle_mode
= AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1996 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= AMDGPU_TILING_GET(tiling_flags
, DCC_INDEPENDENT_64B
);
1997 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= AMDGPU_TILING_GET(tiling_flags
, DCC_INDEPENDENT_128B
);
1998 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= AMDGPU_TILING_GET(tiling_flags
, DCC_MAX_COMPRESSED_BLOCK_SIZE
);
1999 surf
->u
.gfx9
.display_dcc_pitch_max
= AMDGPU_TILING_GET(tiling_flags
, DCC_PITCH_MAX
);
2000 scanout
= AMDGPU_TILING_GET(tiling_flags
, SCANOUT
);
2001 *mode
= surf
->u
.gfx9
.surf
.swizzle_mode
> 0 ? RADEON_SURF_MODE_2D
: RADEON_SURF_MODE_LINEAR_ALIGNED
;
2003 surf
->u
.legacy
.pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2004 surf
->u
.legacy
.bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2005 surf
->u
.legacy
.bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2006 surf
->u
.legacy
.tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
2007 surf
->u
.legacy
.mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2008 surf
->u
.legacy
.num_banks
= 2 << AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2009 scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
2011 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
2012 *mode
= RADEON_SURF_MODE_2D
;
2013 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
2014 *mode
= RADEON_SURF_MODE_1D
;
2016 *mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
2020 surf
->flags
|= RADEON_SURF_SCANOUT
;
2022 surf
->flags
&= ~RADEON_SURF_SCANOUT
;
2025 void ac_surface_get_bo_metadata(const struct radeon_info
*info
,
2026 struct radeon_surf
*surf
, uint64_t *tiling_flags
)
2030 if (info
->chip_class
>= GFX9
) {
2031 uint64_t dcc_offset
= 0;
2033 if (surf
->dcc_offset
) {
2034 dcc_offset
= surf
->display_dcc_offset
? surf
->display_dcc_offset
2036 assert((dcc_offset
>> 8) != 0 && (dcc_offset
>> 8) < (1 << 24));
2039 *tiling_flags
|= AMDGPU_TILING_SET(SWIZZLE_MODE
, surf
->u
.gfx9
.surf
.swizzle_mode
);
2040 *tiling_flags
|= AMDGPU_TILING_SET(DCC_OFFSET_256B
, dcc_offset
>> 8);
2041 *tiling_flags
|= AMDGPU_TILING_SET(DCC_PITCH_MAX
, surf
->u
.gfx9
.display_dcc_pitch_max
);
2042 *tiling_flags
|= AMDGPU_TILING_SET(DCC_INDEPENDENT_64B
, surf
->u
.gfx9
.dcc
.independent_64B_blocks
);
2043 *tiling_flags
|= AMDGPU_TILING_SET(DCC_INDEPENDENT_128B
, surf
->u
.gfx9
.dcc
.independent_128B_blocks
);
2044 *tiling_flags
|= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE
, surf
->u
.gfx9
.dcc
.max_compressed_block_size
);
2045 *tiling_flags
|= AMDGPU_TILING_SET(SCANOUT
, (surf
->flags
& RADEON_SURF_SCANOUT
) != 0);
2047 if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
)
2048 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
2049 else if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
)
2050 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
2052 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
2054 *tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, surf
->u
.legacy
.pipe_config
);
2055 *tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(surf
->u
.legacy
.bankw
));
2056 *tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(surf
->u
.legacy
.bankh
));
2057 if (surf
->u
.legacy
.tile_split
)
2058 *tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(surf
->u
.legacy
.tile_split
));
2059 *tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(surf
->u
.legacy
.mtilea
));
2060 *tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(surf
->u
.legacy
.num_banks
)-1);
2062 if (surf
->flags
& RADEON_SURF_SCANOUT
)
2063 *tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
2065 *tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
2069 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info
*info
)
2071 return (ATI_VENDOR_ID
<< 16) | info
->pci_id
;
2074 /* This should be called after ac_compute_surface. */
2075 bool ac_surface_set_umd_metadata(const struct radeon_info
*info
,
2076 struct radeon_surf
*surf
,
2077 unsigned num_storage_samples
,
2078 unsigned num_mipmap_levels
,
2079 unsigned size_metadata
,
2080 uint32_t metadata
[64])
2082 uint32_t *desc
= &metadata
[2];
2085 if (info
->chip_class
>= GFX9
)
2086 offset
= surf
->u
.gfx9
.surf_offset
;
2088 offset
= surf
->u
.legacy
.level
[0].offset
;
2090 if (offset
|| /* Non-zero planes ignore metadata. */
2091 size_metadata
< 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2092 metadata
[0] == 0 || /* invalid version number */
2093 metadata
[1] != ac_get_umd_metadata_word1(info
)) /* invalid PCI ID */ {
2094 /* Disable DCC because it might not be enabled. */
2095 ac_surface_zero_dcc_fields(surf
);
2097 /* Don't report an error if the texture comes from an incompatible driver,
2098 * but this might not work.
2103 /* Validate that sample counts and the number of mipmap levels match. */
2104 unsigned desc_last_level
= G_008F1C_LAST_LEVEL(desc
[3]);
2105 unsigned type
= G_008F1C_TYPE(desc
[3]);
2107 if (type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA
|| type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
) {
2108 unsigned log_samples
= util_logbase2(MAX2(1, num_storage_samples
));
2110 if (desc_last_level
!= log_samples
) {
2112 "amdgpu: invalid MSAA texture import, "
2113 "metadata has log2(samples) = %u, the caller set %u\n",
2114 desc_last_level
, log_samples
);
2118 if (desc_last_level
!= num_mipmap_levels
- 1) {
2120 "amdgpu: invalid mipmapped texture import, "
2121 "metadata has last_level = %u, the caller set %u\n",
2122 desc_last_level
, num_mipmap_levels
- 1);
2127 if (info
->chip_class
>= GFX8
&& G_008F28_COMPRESSION_EN(desc
[6])) {
2128 /* Read DCC information. */
2129 switch (info
->chip_class
) {
2131 surf
->dcc_offset
= (uint64_t)desc
[7] << 8;
2136 ((uint64_t)desc
[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc
[5]) << 40);
2137 surf
->u
.gfx9
.dcc
.pipe_aligned
= G_008F24_META_PIPE_ALIGNED(desc
[5]);
2138 surf
->u
.gfx9
.dcc
.rb_aligned
= G_008F24_META_RB_ALIGNED(desc
[5]);
2140 /* If DCC is unaligned, this can only be a displayable image. */
2141 if (!surf
->u
.gfx9
.dcc
.pipe_aligned
&& !surf
->u
.gfx9
.dcc
.rb_aligned
)
2142 assert(surf
->is_displayable
);
2148 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc
[6]) << 8) | ((uint64_t)desc
[7] << 16);
2149 surf
->u
.gfx9
.dcc
.pipe_aligned
= G_00A018_META_PIPE_ALIGNED(desc
[6]);
2157 /* Disable DCC. dcc_offset is always set by texture_from_handle
2158 * and must be cleared here.
2160 ac_surface_zero_dcc_fields(surf
);
2166 void ac_surface_get_umd_metadata(const struct radeon_info
*info
,
2167 struct radeon_surf
*surf
,
2168 unsigned num_mipmap_levels
,
2170 unsigned *size_metadata
, uint32_t metadata
[64])
2172 /* Clear the base address and set the relative DCC offset. */
2174 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
2176 switch (info
->chip_class
) {
2181 desc
[7] = surf
->dcc_offset
>> 8;
2184 desc
[7] = surf
->dcc_offset
>> 8;
2185 desc
[5] &= C_008F24_META_DATA_ADDRESS
;
2186 desc
[5] |= S_008F24_META_DATA_ADDRESS(surf
->dcc_offset
>> 40);
2190 desc
[6] &= C_00A018_META_DATA_ADDRESS_LO
;
2191 desc
[6] |= S_00A018_META_DATA_ADDRESS_LO(surf
->dcc_offset
>> 8);
2192 desc
[7] = surf
->dcc_offset
>> 16;
2198 /* Metadata image format format version 1:
2199 * [0] = 1 (metadata format identifier)
2200 * [1] = (VENDOR_ID << 16) | PCI_ID
2201 * [2:9] = image descriptor for the whole resource
2202 * [2] is always 0, because the base address is cleared
2203 * [9] is the DCC offset bits [39:8] from the beginning of
2205 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2208 metadata
[0] = 1; /* metadata image format version 1 */
2210 /* Tiling modes are ambiguous without a PCI ID. */
2211 metadata
[1] = ac_get_umd_metadata_word1(info
);
2213 /* Dwords [2:9] contain the image descriptor. */
2214 memcpy(&metadata
[2], desc
, 8 * 4);
2215 *size_metadata
= 10 * 4;
2217 /* Dwords [10:..] contain the mipmap level offsets. */
2218 if (info
->chip_class
<= GFX8
) {
2219 for (unsigned i
= 0; i
< num_mipmap_levels
; i
++)
2220 metadata
[10 + i
] = surf
->u
.legacy
.level
[i
].offset
>> 8;
2222 *size_metadata
+= num_mipmap_levels
* 4;
2226 void ac_surface_override_offset_stride(const struct radeon_info
*info
,
2227 struct radeon_surf
*surf
,
2228 unsigned num_mipmap_levels
,
2229 uint64_t offset
, unsigned pitch
)
2231 if (info
->chip_class
>= GFX9
) {
2233 surf
->u
.gfx9
.surf_pitch
= pitch
;
2234 if (num_mipmap_levels
== 1)
2235 surf
->u
.gfx9
.surf
.epitch
= pitch
- 1;
2236 surf
->u
.gfx9
.surf_slice_size
=
2237 (uint64_t)pitch
* surf
->u
.gfx9
.surf_height
* surf
->bpe
;
2239 surf
->u
.gfx9
.surf_offset
= offset
;
2240 if (surf
->u
.gfx9
.stencil_offset
)
2241 surf
->u
.gfx9
.stencil_offset
+= offset
;
2244 surf
->u
.legacy
.level
[0].nblk_x
= pitch
;
2245 surf
->u
.legacy
.level
[0].slice_size_dw
=
2246 ((uint64_t)pitch
* surf
->u
.legacy
.level
[0].nblk_y
* surf
->bpe
) / 4;
2250 for (unsigned i
= 0; i
< ARRAY_SIZE(surf
->u
.legacy
.level
); ++i
)
2251 surf
->u
.legacy
.level
[i
].offset
+= offset
;
2255 if (surf
->htile_offset
)
2256 surf
->htile_offset
+= offset
;
2257 if (surf
->fmask_offset
)
2258 surf
->fmask_offset
+= offset
;
2259 if (surf
->cmask_offset
)
2260 surf
->cmask_offset
+= offset
;
2261 if (surf
->dcc_offset
)
2262 surf
->dcc_offset
+= offset
;
2263 if (surf
->display_dcc_offset
)
2264 surf
->display_dcc_offset
+= offset
;
2265 if (surf
->dcc_retile_map_offset
)
2266 surf
->dcc_retile_map_offset
+= offset
;