2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
28 #include "ac_surface.h"
30 #include "ac_gpu_info.h"
31 #include "addrlib/inc/addrinterface.h"
32 #include "addrlib/src/amdgpu_asic_addr.h"
33 #include "amd_family.h"
34 #include "drm-uapi/amdgpu_drm.h"
36 #include "util/hash_table.h"
37 #include "util/macros.h"
38 #include "util/simple_mtx.h"
39 #include "util/u_atomic.h"
40 #include "util/u_math.h"
41 #include "util/u_memory.h"
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
59 /* The cache of DCC retile maps for reuse when allocating images of
62 simple_mtx_t dcc_retile_map_lock
;
63 struct hash_table
*dcc_retile_maps
;
64 struct hash_table
*dcc_retile_tile_indices
;
67 struct dcc_retile_map_key
{
68 enum radeon_family family
;
69 unsigned retile_width
;
70 unsigned retile_height
;
73 unsigned dcc_retile_num_elements
;
74 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input
;
77 static uint32_t dcc_retile_map_hash_key(const void *key
)
79 return _mesa_hash_data(key
, sizeof(struct dcc_retile_map_key
));
82 static bool dcc_retile_map_keys_equal(const void *a
, const void *b
)
84 return memcmp(a
, b
, sizeof(struct dcc_retile_map_key
)) == 0;
87 static void dcc_retile_map_free(struct hash_entry
*entry
)
89 free((void *)entry
->key
);
93 struct dcc_retile_tile_key
{
94 enum radeon_family family
;
96 unsigned swizzle_mode
;
101 struct dcc_retile_tile_data
{
102 unsigned tile_width_log2
;
103 unsigned tile_height_log2
;
107 static uint32_t dcc_retile_tile_hash_key(const void *key
)
109 return _mesa_hash_data(key
, sizeof(struct dcc_retile_tile_key
));
112 static bool dcc_retile_tile_keys_equal(const void *a
, const void *b
)
114 return memcmp(a
, b
, sizeof(struct dcc_retile_tile_key
)) == 0;
117 static void dcc_retile_tile_free(struct hash_entry
*entry
)
119 free((void *)entry
->key
);
120 free(((struct dcc_retile_tile_data
*)entry
->data
)->data
);
124 /* Assumes dcc_retile_map_lock is taken. */
125 static const struct dcc_retile_tile_data
*
126 ac_compute_dcc_retile_tile_indices(struct ac_addrlib
*addrlib
, const struct radeon_info
*info
,
127 unsigned bpp
, unsigned swizzle_mode
, bool rb_aligned
,
130 struct dcc_retile_tile_key key
= (struct dcc_retile_tile_key
){.family
= info
->family
,
132 .swizzle_mode
= swizzle_mode
,
133 .rb_aligned
= rb_aligned
,
134 .pipe_aligned
= pipe_aligned
};
136 struct hash_entry
*entry
= _mesa_hash_table_search(addrlib
->dcc_retile_tile_indices
, &key
);
140 ADDR2_COMPUTE_DCCINFO_INPUT din
= {0};
141 ADDR2_COMPUTE_DCCINFO_OUTPUT dout
= {0};
142 din
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_INPUT
);
143 dout
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT
);
145 din
.dccKeyFlags
.pipeAligned
= pipe_aligned
;
146 din
.dccKeyFlags
.rbAligned
= rb_aligned
;
147 din
.resourceType
= ADDR_RSRC_TEX_2D
;
148 din
.swizzleMode
= swizzle_mode
;
150 din
.unalignedWidth
= 1;
151 din
.unalignedHeight
= 1;
154 din
.numMipLevels
= 1;
156 ADDR_E_RETURNCODE ret
= Addr2ComputeDccInfo(addrlib
->handle
, &din
, &dout
);
160 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin
= {0};
161 addrin
.size
= sizeof(addrin
);
162 addrin
.swizzleMode
= swizzle_mode
;
163 addrin
.resourceType
= ADDR_RSRC_TEX_2D
;
165 addrin
.numSlices
= 1;
166 addrin
.numMipLevels
= 1;
168 addrin
.pitch
= dout
.pitch
;
169 addrin
.height
= dout
.height
;
170 addrin
.compressBlkWidth
= dout
.compressBlkWidth
;
171 addrin
.compressBlkHeight
= dout
.compressBlkHeight
;
172 addrin
.compressBlkDepth
= dout
.compressBlkDepth
;
173 addrin
.metaBlkWidth
= dout
.metaBlkWidth
;
174 addrin
.metaBlkHeight
= dout
.metaBlkHeight
;
175 addrin
.metaBlkDepth
= dout
.metaBlkDepth
;
176 addrin
.dccKeyFlags
.pipeAligned
= pipe_aligned
;
177 addrin
.dccKeyFlags
.rbAligned
= rb_aligned
;
179 unsigned w
= dout
.metaBlkWidth
/ dout
.compressBlkWidth
;
180 unsigned h
= dout
.metaBlkHeight
/ dout
.compressBlkHeight
;
181 uint16_t *indices
= malloc(w
* h
* sizeof(uint16_t));
185 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout
= {};
186 addrout
.size
= sizeof(addrout
);
188 for (unsigned y
= 0; y
< h
; ++y
) {
189 addrin
.y
= y
* dout
.compressBlkHeight
;
190 for (unsigned x
= 0; x
< w
; ++x
) {
191 addrin
.x
= x
* dout
.compressBlkWidth
;
194 if (Addr2ComputeDccAddrFromCoord(addrlib
->handle
, &addrin
, &addrout
) != ADDR_OK
) {
198 indices
[y
* w
+ x
] = addrout
.addr
;
202 struct dcc_retile_tile_data
*data
= calloc(1, sizeof(*data
));
208 data
->tile_width_log2
= util_logbase2(w
);
209 data
->tile_height_log2
= util_logbase2(h
);
210 data
->data
= indices
;
212 struct dcc_retile_tile_key
*heap_key
= mem_dup(&key
, sizeof(key
));
219 entry
= _mesa_hash_table_insert(addrlib
->dcc_retile_tile_indices
, heap_key
, data
);
228 static uint32_t ac_compute_retile_tile_addr(const struct dcc_retile_tile_data
*tile
,
229 unsigned stride
, unsigned x
, unsigned y
)
231 unsigned x_mask
= (1u << tile
->tile_width_log2
) - 1;
232 unsigned y_mask
= (1u << tile
->tile_height_log2
) - 1;
233 unsigned tile_size_log2
= tile
->tile_width_log2
+ tile
->tile_height_log2
;
235 unsigned base
= ((y
>> tile
->tile_height_log2
) * stride
+ (x
>> tile
->tile_width_log2
))
237 unsigned offset_in_tile
= tile
->data
[((y
& y_mask
) << tile
->tile_width_log2
) + (x
& x_mask
)];
238 return base
+ offset_in_tile
;
241 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib
*addrlib
,
242 const struct radeon_info
*info
, unsigned retile_width
,
243 unsigned retile_height
, bool rb_aligned
,
244 bool pipe_aligned
, bool use_uint16
,
245 unsigned dcc_retile_num_elements
,
246 const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT
*in
)
248 unsigned dcc_retile_map_size
= dcc_retile_num_elements
* (use_uint16
? 2 : 4);
249 struct dcc_retile_map_key key
;
251 assert(in
->numFrags
== 1 && in
->numSlices
== 1 && in
->numMipLevels
== 1);
253 memset(&key
, 0, sizeof(key
));
254 key
.family
= info
->family
;
255 key
.retile_width
= retile_width
;
256 key
.retile_height
= retile_height
;
257 key
.rb_aligned
= rb_aligned
;
258 key
.pipe_aligned
= pipe_aligned
;
259 key
.dcc_retile_num_elements
= dcc_retile_num_elements
;
260 memcpy(&key
.input
, in
, sizeof(*in
));
262 simple_mtx_lock(&addrlib
->dcc_retile_map_lock
);
264 /* If we have already computed this retile map, get it from the hash table. */
265 struct hash_entry
*entry
= _mesa_hash_table_search(addrlib
->dcc_retile_maps
, &key
);
267 uint32_t *map
= entry
->data
;
268 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
272 const struct dcc_retile_tile_data
*src_tile
= ac_compute_dcc_retile_tile_indices(
273 addrlib
, info
, in
->bpp
, in
->swizzleMode
, rb_aligned
, pipe_aligned
);
274 const struct dcc_retile_tile_data
*dst_tile
=
275 ac_compute_dcc_retile_tile_indices(addrlib
, info
, in
->bpp
, in
->swizzleMode
, false, false);
276 if (!src_tile
|| !dst_tile
) {
277 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
281 void *dcc_retile_map
= malloc(dcc_retile_map_size
);
282 if (!dcc_retile_map
) {
283 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
288 unsigned w
= DIV_ROUND_UP(retile_width
, in
->compressBlkWidth
);
289 unsigned h
= DIV_ROUND_UP(retile_height
, in
->compressBlkHeight
);
290 unsigned src_stride
= DIV_ROUND_UP(w
, 1u << src_tile
->tile_width_log2
);
291 unsigned dst_stride
= DIV_ROUND_UP(w
, 1u << dst_tile
->tile_width_log2
);
293 for (unsigned y
= 0; y
< h
; ++y
) {
294 for (unsigned x
= 0; x
< w
; ++x
) {
295 unsigned src_addr
= ac_compute_retile_tile_addr(src_tile
, src_stride
, x
, y
);
296 unsigned dst_addr
= ac_compute_retile_tile_addr(dst_tile
, dst_stride
, x
, y
);
299 ((uint16_t *)dcc_retile_map
)[2 * index
] = src_addr
;
300 ((uint16_t *)dcc_retile_map
)[2 * index
+ 1] = dst_addr
;
302 ((uint32_t *)dcc_retile_map
)[2 * index
] = src_addr
;
303 ((uint32_t *)dcc_retile_map
)[2 * index
+ 1] = dst_addr
;
309 /* Fill the remaining pairs with the last one (for the compute shader). */
310 for (unsigned i
= index
* 2; i
< dcc_retile_num_elements
; i
++) {
312 ((uint16_t *)dcc_retile_map
)[i
] = ((uint16_t *)dcc_retile_map
)[i
- 2];
314 ((uint32_t *)dcc_retile_map
)[i
] = ((uint32_t *)dcc_retile_map
)[i
- 2];
317 /* Insert the retile map into the hash table, so that it can be reused and
318 * the computation can be skipped for similar image sizes.
320 _mesa_hash_table_insert(addrlib
->dcc_retile_maps
, mem_dup(&key
, sizeof(key
)), dcc_retile_map
);
322 simple_mtx_unlock(&addrlib
->dcc_retile_map_lock
);
323 return dcc_retile_map
;
326 static void *ADDR_API
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT
*pInput
)
328 return malloc(pInput
->sizeInBytes
);
331 static ADDR_E_RETURNCODE ADDR_API
freeSysMem(const ADDR_FREESYSMEM_INPUT
*pInput
)
333 free(pInput
->pVirtAddr
);
337 struct ac_addrlib
*ac_addrlib_create(const struct radeon_info
*info
,
338 const struct amdgpu_gpu_info
*amdinfo
, uint64_t *max_alignment
)
340 ADDR_CREATE_INPUT addrCreateInput
= {0};
341 ADDR_CREATE_OUTPUT addrCreateOutput
= {0};
342 ADDR_REGISTER_VALUE regValue
= {0};
343 ADDR_CREATE_FLAGS createFlags
= {{0}};
344 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput
= {0};
345 ADDR_E_RETURNCODE addrRet
;
347 addrCreateInput
.size
= sizeof(ADDR_CREATE_INPUT
);
348 addrCreateOutput
.size
= sizeof(ADDR_CREATE_OUTPUT
);
350 regValue
.gbAddrConfig
= amdinfo
->gb_addr_cfg
;
351 createFlags
.value
= 0;
353 addrCreateInput
.chipFamily
= info
->family_id
;
354 addrCreateInput
.chipRevision
= info
->chip_external_rev
;
356 if (addrCreateInput
.chipFamily
== FAMILY_UNKNOWN
)
359 if (addrCreateInput
.chipFamily
>= FAMILY_AI
) {
360 addrCreateInput
.chipEngine
= CIASICIDGFXENGINE_ARCTICISLAND
;
362 regValue
.noOfBanks
= amdinfo
->mc_arb_ramcfg
& 0x3;
363 regValue
.noOfRanks
= (amdinfo
->mc_arb_ramcfg
& 0x4) >> 2;
365 regValue
.backendDisables
= amdinfo
->enabled_rb_pipes_mask
;
366 regValue
.pTileConfig
= amdinfo
->gb_tile_mode
;
367 regValue
.noOfEntries
= ARRAY_SIZE(amdinfo
->gb_tile_mode
);
368 if (addrCreateInput
.chipFamily
== FAMILY_SI
) {
369 regValue
.pMacroTileConfig
= NULL
;
370 regValue
.noOfMacroEntries
= 0;
372 regValue
.pMacroTileConfig
= amdinfo
->gb_macro_tile_mode
;
373 regValue
.noOfMacroEntries
= ARRAY_SIZE(amdinfo
->gb_macro_tile_mode
);
376 createFlags
.useTileIndex
= 1;
377 createFlags
.useHtileSliceAlign
= 1;
379 addrCreateInput
.chipEngine
= CIASICIDGFXENGINE_SOUTHERNISLAND
;
382 addrCreateInput
.callbacks
.allocSysMem
= allocSysMem
;
383 addrCreateInput
.callbacks
.freeSysMem
= freeSysMem
;
384 addrCreateInput
.callbacks
.debugPrint
= 0;
385 addrCreateInput
.createFlags
= createFlags
;
386 addrCreateInput
.regValue
= regValue
;
388 addrRet
= AddrCreate(&addrCreateInput
, &addrCreateOutput
);
389 if (addrRet
!= ADDR_OK
)
393 addrRet
= AddrGetMaxAlignments(addrCreateOutput
.hLib
, &addrGetMaxAlignmentsOutput
);
394 if (addrRet
== ADDR_OK
) {
395 *max_alignment
= addrGetMaxAlignmentsOutput
.baseAlign
;
399 struct ac_addrlib
*addrlib
= calloc(1, sizeof(struct ac_addrlib
));
401 AddrDestroy(addrCreateOutput
.hLib
);
405 addrlib
->handle
= addrCreateOutput
.hLib
;
406 simple_mtx_init(&addrlib
->dcc_retile_map_lock
, mtx_plain
);
407 addrlib
->dcc_retile_maps
=
408 _mesa_hash_table_create(NULL
, dcc_retile_map_hash_key
, dcc_retile_map_keys_equal
);
409 addrlib
->dcc_retile_tile_indices
=
410 _mesa_hash_table_create(NULL
, dcc_retile_tile_hash_key
, dcc_retile_tile_keys_equal
);
414 void ac_addrlib_destroy(struct ac_addrlib
*addrlib
)
416 AddrDestroy(addrlib
->handle
);
417 simple_mtx_destroy(&addrlib
->dcc_retile_map_lock
);
418 _mesa_hash_table_destroy(addrlib
->dcc_retile_maps
, dcc_retile_map_free
);
419 _mesa_hash_table_destroy(addrlib
->dcc_retile_tile_indices
, dcc_retile_tile_free
);
423 static int surf_config_sanity(const struct ac_surf_config
*config
, unsigned flags
)
425 /* FMASK is allocated together with the color surface and can't be
426 * allocated separately.
428 assert(!(flags
& RADEON_SURF_FMASK
));
429 if (flags
& RADEON_SURF_FMASK
)
432 /* all dimension must be at least 1 ! */
433 if (!config
->info
.width
|| !config
->info
.height
|| !config
->info
.depth
||
434 !config
->info
.array_size
|| !config
->info
.levels
)
437 switch (config
->info
.samples
) {
445 if (flags
& RADEON_SURF_Z_OR_SBUFFER
)
452 if (!(flags
& RADEON_SURF_Z_OR_SBUFFER
)) {
453 switch (config
->info
.storage_samples
) {
465 if (config
->is_3d
&& config
->info
.array_size
> 1)
467 if (config
->is_cube
&& config
->info
.depth
> 1)
473 static int gfx6_compute_level(ADDR_HANDLE addrlib
, const struct ac_surf_config
*config
,
474 struct radeon_surf
*surf
, bool is_stencil
, unsigned level
,
475 bool compressed
, ADDR_COMPUTE_SURFACE_INFO_INPUT
*AddrSurfInfoIn
,
476 ADDR_COMPUTE_SURFACE_INFO_OUTPUT
*AddrSurfInfoOut
,
477 ADDR_COMPUTE_DCCINFO_INPUT
*AddrDccIn
,
478 ADDR_COMPUTE_DCCINFO_OUTPUT
*AddrDccOut
,
479 ADDR_COMPUTE_HTILE_INFO_INPUT
*AddrHtileIn
,
480 ADDR_COMPUTE_HTILE_INFO_OUTPUT
*AddrHtileOut
)
482 struct legacy_surf_level
*surf_level
;
483 ADDR_E_RETURNCODE ret
;
485 AddrSurfInfoIn
->mipLevel
= level
;
486 AddrSurfInfoIn
->width
= u_minify(config
->info
.width
, level
);
487 AddrSurfInfoIn
->height
= u_minify(config
->info
.height
, level
);
489 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
490 * because GFX9 needs linear alignment of 256 bytes.
492 if (config
->info
.levels
== 1 && AddrSurfInfoIn
->tileMode
== ADDR_TM_LINEAR_ALIGNED
&&
493 AddrSurfInfoIn
->bpp
&& util_is_power_of_two_or_zero(AddrSurfInfoIn
->bpp
)) {
494 unsigned alignment
= 256 / (AddrSurfInfoIn
->bpp
/ 8);
496 AddrSurfInfoIn
->width
= align(AddrSurfInfoIn
->width
, alignment
);
499 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
500 * true for r32g32b32 formats. */
501 if (AddrSurfInfoIn
->bpp
== 96) {
502 assert(config
->info
.levels
== 1);
503 assert(AddrSurfInfoIn
->tileMode
== ADDR_TM_LINEAR_ALIGNED
);
505 /* The least common multiple of 64 bytes and 12 bytes/pixel is
506 * 192 bytes, or 16 pixels. */
507 AddrSurfInfoIn
->width
= align(AddrSurfInfoIn
->width
, 16);
511 AddrSurfInfoIn
->numSlices
= u_minify(config
->info
.depth
, level
);
512 else if (config
->is_cube
)
513 AddrSurfInfoIn
->numSlices
= 6;
515 AddrSurfInfoIn
->numSlices
= config
->info
.array_size
;
518 /* Set the base level pitch. This is needed for calculation
519 * of non-zero levels. */
521 AddrSurfInfoIn
->basePitch
= surf
->u
.legacy
.stencil_level
[0].nblk_x
;
523 AddrSurfInfoIn
->basePitch
= surf
->u
.legacy
.level
[0].nblk_x
;
525 /* Convert blocks to pixels for compressed formats. */
527 AddrSurfInfoIn
->basePitch
*= surf
->blk_w
;
530 ret
= AddrComputeSurfaceInfo(addrlib
, AddrSurfInfoIn
, AddrSurfInfoOut
);
531 if (ret
!= ADDR_OK
) {
535 surf_level
= is_stencil
? &surf
->u
.legacy
.stencil_level
[level
] : &surf
->u
.legacy
.level
[level
];
536 surf_level
->offset
= align64(surf
->surf_size
, AddrSurfInfoOut
->baseAlign
);
537 surf_level
->slice_size_dw
= AddrSurfInfoOut
->sliceSize
/ 4;
538 surf_level
->nblk_x
= AddrSurfInfoOut
->pitch
;
539 surf_level
->nblk_y
= AddrSurfInfoOut
->height
;
541 switch (AddrSurfInfoOut
->tileMode
) {
542 case ADDR_TM_LINEAR_ALIGNED
:
543 surf_level
->mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
545 case ADDR_TM_1D_TILED_THIN1
:
546 surf_level
->mode
= RADEON_SURF_MODE_1D
;
548 case ADDR_TM_2D_TILED_THIN1
:
549 surf_level
->mode
= RADEON_SURF_MODE_2D
;
556 surf
->u
.legacy
.stencil_tiling_index
[level
] = AddrSurfInfoOut
->tileIndex
;
558 surf
->u
.legacy
.tiling_index
[level
] = AddrSurfInfoOut
->tileIndex
;
560 surf
->surf_size
= surf_level
->offset
+ AddrSurfInfoOut
->surfSize
;
562 /* Clear DCC fields at the beginning. */
563 surf_level
->dcc_offset
= 0;
565 /* The previous level's flag tells us if we can use DCC for this level. */
566 if (AddrSurfInfoIn
->flags
.dccCompatible
&& (level
== 0 || AddrDccOut
->subLvlCompressible
)) {
567 bool prev_level_clearable
= level
== 0 || AddrDccOut
->dccRamSizeAligned
;
569 AddrDccIn
->colorSurfSize
= AddrSurfInfoOut
->surfSize
;
570 AddrDccIn
->tileMode
= AddrSurfInfoOut
->tileMode
;
571 AddrDccIn
->tileInfo
= *AddrSurfInfoOut
->pTileInfo
;
572 AddrDccIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
573 AddrDccIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
575 ret
= AddrComputeDccInfo(addrlib
, AddrDccIn
, AddrDccOut
);
577 if (ret
== ADDR_OK
) {
578 surf_level
->dcc_offset
= surf
->dcc_size
;
579 surf
->num_dcc_levels
= level
+ 1;
580 surf
->dcc_size
= surf_level
->dcc_offset
+ AddrDccOut
->dccRamSize
;
581 surf
->dcc_alignment
= MAX2(surf
->dcc_alignment
, AddrDccOut
->dccRamBaseAlign
);
583 /* If the DCC size of a subresource (1 mip level or 1 slice)
584 * is not aligned, the DCC memory layout is not contiguous for
585 * that subresource, which means we can't use fast clear.
587 * We only do fast clears for whole mipmap levels. If we did
588 * per-slice fast clears, the same restriction would apply.
589 * (i.e. only compute the slice size and see if it's aligned)
591 * The last level can be non-contiguous and still be clearable
592 * if it's interleaved with the next level that doesn't exist.
594 if (AddrDccOut
->dccRamSizeAligned
||
595 (prev_level_clearable
&& level
== config
->info
.levels
- 1))
596 surf_level
->dcc_fast_clear_size
= AddrDccOut
->dccFastClearSize
;
598 surf_level
->dcc_fast_clear_size
= 0;
600 /* Compute the DCC slice size because addrlib doesn't
601 * provide this info. As DCC memory is linear (each
602 * slice is the same size) it's easy to compute.
604 surf
->dcc_slice_size
= AddrDccOut
->dccRamSize
/ config
->info
.array_size
;
606 /* For arrays, we have to compute the DCC info again
607 * with one slice size to get a correct fast clear
610 if (config
->info
.array_size
> 1) {
611 AddrDccIn
->colorSurfSize
= AddrSurfInfoOut
->sliceSize
;
612 AddrDccIn
->tileMode
= AddrSurfInfoOut
->tileMode
;
613 AddrDccIn
->tileInfo
= *AddrSurfInfoOut
->pTileInfo
;
614 AddrDccIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
615 AddrDccIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
617 ret
= AddrComputeDccInfo(addrlib
, AddrDccIn
, AddrDccOut
);
618 if (ret
== ADDR_OK
) {
619 /* If the DCC memory isn't properly
620 * aligned, the data are interleaved
623 if (AddrDccOut
->dccRamSizeAligned
)
624 surf_level
->dcc_slice_fast_clear_size
= AddrDccOut
->dccFastClearSize
;
626 surf_level
->dcc_slice_fast_clear_size
= 0;
629 if (surf
->flags
& RADEON_SURF_CONTIGUOUS_DCC_LAYERS
&&
630 surf
->dcc_slice_size
!= surf_level
->dcc_slice_fast_clear_size
) {
632 surf
->num_dcc_levels
= 0;
633 AddrDccOut
->subLvlCompressible
= false;
636 surf_level
->dcc_slice_fast_clear_size
= surf_level
->dcc_fast_clear_size
;
642 if (!is_stencil
&& AddrSurfInfoIn
->flags
.depth
&& surf_level
->mode
== RADEON_SURF_MODE_2D
&&
643 level
== 0 && !(surf
->flags
& RADEON_SURF_NO_HTILE
)) {
644 AddrHtileIn
->flags
.tcCompatible
= AddrSurfInfoOut
->tcCompatible
;
645 AddrHtileIn
->pitch
= AddrSurfInfoOut
->pitch
;
646 AddrHtileIn
->height
= AddrSurfInfoOut
->height
;
647 AddrHtileIn
->numSlices
= AddrSurfInfoOut
->depth
;
648 AddrHtileIn
->blockWidth
= ADDR_HTILE_BLOCKSIZE_8
;
649 AddrHtileIn
->blockHeight
= ADDR_HTILE_BLOCKSIZE_8
;
650 AddrHtileIn
->pTileInfo
= AddrSurfInfoOut
->pTileInfo
;
651 AddrHtileIn
->tileIndex
= AddrSurfInfoOut
->tileIndex
;
652 AddrHtileIn
->macroModeIndex
= AddrSurfInfoOut
->macroModeIndex
;
654 ret
= AddrComputeHtileInfo(addrlib
, AddrHtileIn
, AddrHtileOut
);
656 if (ret
== ADDR_OK
) {
657 surf
->htile_size
= AddrHtileOut
->htileBytes
;
658 surf
->htile_slice_size
= AddrHtileOut
->sliceSize
;
659 surf
->htile_alignment
= AddrHtileOut
->baseAlign
;
666 static void gfx6_set_micro_tile_mode(struct radeon_surf
*surf
, const struct radeon_info
*info
)
668 uint32_t tile_mode
= info
->si_tile_mode_array
[surf
->u
.legacy
.tiling_index
[0]];
670 if (info
->chip_class
>= GFX7
)
671 surf
->micro_tile_mode
= G_009910_MICRO_TILE_MODE_NEW(tile_mode
);
673 surf
->micro_tile_mode
= G_009910_MICRO_TILE_MODE(tile_mode
);
676 static unsigned cik_get_macro_tile_index(struct radeon_surf
*surf
)
678 unsigned index
, tileb
;
680 tileb
= 8 * 8 * surf
->bpe
;
681 tileb
= MIN2(surf
->u
.legacy
.tile_split
, tileb
);
683 for (index
= 0; tileb
> 64; index
++)
690 static bool get_display_flag(const struct ac_surf_config
*config
, const struct radeon_surf
*surf
)
692 unsigned num_channels
= config
->info
.num_channels
;
693 unsigned bpe
= surf
->bpe
;
695 if (!config
->is_3d
&& !config
->is_cube
&& !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
) &&
696 surf
->flags
& RADEON_SURF_SCANOUT
&& config
->info
.samples
<= 1 && surf
->blk_w
<= 2 &&
699 if (surf
->blk_w
== 2 && surf
->blk_h
== 1)
702 if (/* RGBA8 or RGBA16F */
703 (bpe
>= 4 && bpe
<= 8 && num_channels
== 4) ||
704 /* R5G6B5 or R5G5B5A1 */
705 (bpe
== 2 && num_channels
>= 3) ||
707 (bpe
== 1 && num_channels
== 1))
714 * This must be called after the first level is computed.
716 * Copy surface-global settings like pipe/bank config from level 0 surface
717 * computation, and compute tile swizzle.
719 static int gfx6_surface_settings(ADDR_HANDLE addrlib
, const struct radeon_info
*info
,
720 const struct ac_surf_config
*config
,
721 ADDR_COMPUTE_SURFACE_INFO_OUTPUT
*csio
, struct radeon_surf
*surf
)
723 surf
->surf_alignment
= csio
->baseAlign
;
724 surf
->u
.legacy
.pipe_config
= csio
->pTileInfo
->pipeConfig
- 1;
725 gfx6_set_micro_tile_mode(surf
, info
);
727 /* For 2D modes only. */
728 if (csio
->tileMode
>= ADDR_TM_2D_TILED_THIN1
) {
729 surf
->u
.legacy
.bankw
= csio
->pTileInfo
->bankWidth
;
730 surf
->u
.legacy
.bankh
= csio
->pTileInfo
->bankHeight
;
731 surf
->u
.legacy
.mtilea
= csio
->pTileInfo
->macroAspectRatio
;
732 surf
->u
.legacy
.tile_split
= csio
->pTileInfo
->tileSplitBytes
;
733 surf
->u
.legacy
.num_banks
= csio
->pTileInfo
->banks
;
734 surf
->u
.legacy
.macro_tile_index
= csio
->macroModeIndex
;
736 surf
->u
.legacy
.macro_tile_index
= 0;
739 /* Compute tile swizzle. */
740 /* TODO: fix tile swizzle with mipmapping for GFX6 */
741 if ((info
->chip_class
>= GFX7
|| config
->info
.levels
== 1) && config
->info
.surf_index
&&
742 surf
->u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
&&
743 !(surf
->flags
& (RADEON_SURF_Z_OR_SBUFFER
| RADEON_SURF_SHAREABLE
)) &&
744 !get_display_flag(config
, surf
)) {
745 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn
= {0};
746 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut
= {0};
748 AddrBaseSwizzleIn
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT
);
749 AddrBaseSwizzleOut
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT
);
751 AddrBaseSwizzleIn
.surfIndex
= p_atomic_inc_return(config
->info
.surf_index
) - 1;
752 AddrBaseSwizzleIn
.tileIndex
= csio
->tileIndex
;
753 AddrBaseSwizzleIn
.macroModeIndex
= csio
->macroModeIndex
;
754 AddrBaseSwizzleIn
.pTileInfo
= csio
->pTileInfo
;
755 AddrBaseSwizzleIn
.tileMode
= csio
->tileMode
;
757 int r
= AddrComputeBaseSwizzle(addrlib
, &AddrBaseSwizzleIn
, &AddrBaseSwizzleOut
);
761 assert(AddrBaseSwizzleOut
.tileSwizzle
<=
762 u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
763 surf
->tile_swizzle
= AddrBaseSwizzleOut
.tileSwizzle
;
768 static void ac_compute_cmask(const struct radeon_info
*info
, const struct ac_surf_config
*config
,
769 struct radeon_surf
*surf
)
771 unsigned pipe_interleave_bytes
= info
->pipe_interleave_bytes
;
772 unsigned num_pipes
= info
->num_tile_pipes
;
773 unsigned cl_width
, cl_height
;
775 if (surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
|| surf
->is_linear
||
776 (config
->info
.samples
>= 2 && !surf
->fmask_size
))
779 assert(info
->chip_class
<= GFX8
);
794 case 16: /* Hawaii */
803 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
805 unsigned width
= align(surf
->u
.legacy
.level
[0].nblk_x
, cl_width
* 8);
806 unsigned height
= align(surf
->u
.legacy
.level
[0].nblk_y
, cl_height
* 8);
807 unsigned slice_elements
= (width
* height
) / (8 * 8);
809 /* Each element of CMASK is a nibble. */
810 unsigned slice_bytes
= slice_elements
/ 2;
812 surf
->u
.legacy
.cmask_slice_tile_max
= (width
* height
) / (128 * 128);
813 if (surf
->u
.legacy
.cmask_slice_tile_max
)
814 surf
->u
.legacy
.cmask_slice_tile_max
-= 1;
818 num_layers
= config
->info
.depth
;
819 else if (config
->is_cube
)
822 num_layers
= config
->info
.array_size
;
824 surf
->cmask_alignment
= MAX2(256, base_align
);
825 surf
->cmask_slice_size
= align(slice_bytes
, base_align
);
826 surf
->cmask_size
= surf
->cmask_slice_size
* num_layers
;
830 * Fill in the tiling information in \p surf based on the given surface config.
832 * The following fields of \p surf must be initialized by the caller:
833 * blk_w, blk_h, bpe, flags.
835 static int gfx6_compute_surface(ADDR_HANDLE addrlib
, const struct radeon_info
*info
,
836 const struct ac_surf_config
*config
, enum radeon_surf_mode mode
,
837 struct radeon_surf
*surf
)
841 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn
= {0};
842 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut
= {0};
843 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn
= {0};
844 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut
= {0};
845 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn
= {0};
846 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut
= {0};
847 ADDR_TILEINFO AddrTileInfoIn
= {0};
848 ADDR_TILEINFO AddrTileInfoOut
= {0};
851 AddrSurfInfoIn
.size
= sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT
);
852 AddrSurfInfoOut
.size
= sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT
);
853 AddrDccIn
.size
= sizeof(ADDR_COMPUTE_DCCINFO_INPUT
);
854 AddrDccOut
.size
= sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT
);
855 AddrHtileIn
.size
= sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT
);
856 AddrHtileOut
.size
= sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT
);
857 AddrSurfInfoOut
.pTileInfo
= &AddrTileInfoOut
;
859 compressed
= surf
->blk_w
== 4 && surf
->blk_h
== 4;
861 /* MSAA requires 2D tiling. */
862 if (config
->info
.samples
> 1)
863 mode
= RADEON_SURF_MODE_2D
;
865 /* DB doesn't support linear layouts. */
866 if (surf
->flags
& (RADEON_SURF_Z_OR_SBUFFER
) && mode
< RADEON_SURF_MODE_1D
)
867 mode
= RADEON_SURF_MODE_1D
;
869 /* Set the requested tiling mode. */
871 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
872 AddrSurfInfoIn
.tileMode
= ADDR_TM_LINEAR_ALIGNED
;
874 case RADEON_SURF_MODE_1D
:
875 AddrSurfInfoIn
.tileMode
= ADDR_TM_1D_TILED_THIN1
;
877 case RADEON_SURF_MODE_2D
:
878 AddrSurfInfoIn
.tileMode
= ADDR_TM_2D_TILED_THIN1
;
884 /* The format must be set correctly for the allocation of compressed
885 * textures to work. In other cases, setting the bpp is sufficient.
890 AddrSurfInfoIn
.format
= ADDR_FMT_BC1
;
893 AddrSurfInfoIn
.format
= ADDR_FMT_BC3
;
899 AddrDccIn
.bpp
= AddrSurfInfoIn
.bpp
= surf
->bpe
* 8;
902 AddrDccIn
.numSamples
= AddrSurfInfoIn
.numSamples
= MAX2(1, config
->info
.samples
);
903 AddrSurfInfoIn
.tileIndex
= -1;
905 if (!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
)) {
906 AddrDccIn
.numSamples
= AddrSurfInfoIn
.numFrags
= MAX2(1, config
->info
.storage_samples
);
909 /* Set the micro tile type. */
910 if (surf
->flags
& RADEON_SURF_SCANOUT
)
911 AddrSurfInfoIn
.tileType
= ADDR_DISPLAYABLE
;
912 else if (surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
)
913 AddrSurfInfoIn
.tileType
= ADDR_DEPTH_SAMPLE_ORDER
;
915 AddrSurfInfoIn
.tileType
= ADDR_NON_DISPLAYABLE
;
917 AddrSurfInfoIn
.flags
.color
= !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
918 AddrSurfInfoIn
.flags
.depth
= (surf
->flags
& RADEON_SURF_ZBUFFER
) != 0;
919 AddrSurfInfoIn
.flags
.cube
= config
->is_cube
;
920 AddrSurfInfoIn
.flags
.display
= get_display_flag(config
, surf
);
921 AddrSurfInfoIn
.flags
.pow2Pad
= config
->info
.levels
> 1;
922 AddrSurfInfoIn
.flags
.tcCompatible
= (surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
) != 0;
924 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
925 * requested, because TC-compatible HTILE requires 2D tiling.
927 AddrSurfInfoIn
.flags
.opt4Space
= !AddrSurfInfoIn
.flags
.tcCompatible
&&
928 !AddrSurfInfoIn
.flags
.fmask
&& config
->info
.samples
<= 1 &&
929 !(surf
->flags
& RADEON_SURF_FORCE_SWIZZLE_MODE
);
932 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
934 * - Mipmapped array textures have low performance (discovered by a closed
937 AddrSurfInfoIn
.flags
.dccCompatible
=
938 info
->chip_class
>= GFX8
&& info
->has_graphics
&& /* disable DCC on compute-only chips */
939 !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
) && !(surf
->flags
& RADEON_SURF_DISABLE_DCC
) &&
941 ((config
->info
.array_size
== 1 && config
->info
.depth
== 1) || config
->info
.levels
== 1);
943 AddrSurfInfoIn
.flags
.noStencil
= (surf
->flags
& RADEON_SURF_SBUFFER
) == 0;
944 AddrSurfInfoIn
.flags
.compressZ
= !!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
946 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
947 * for Z and stencil. This can cause a number of problems which we work
950 * - a depth part that is incompatible with mipmapped texturing
951 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
952 * incorrect tiling applied to the stencil part, stencil buffer
953 * memory accesses that go out of bounds) even without mipmapping
955 * Some piglit tests that are prone to different types of related
957 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
958 * ./bin/framebuffer-blit-levels {draw,read} stencil
959 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
960 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
961 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
963 int stencil_tile_idx
= -1;
965 if (AddrSurfInfoIn
.flags
.depth
&& !AddrSurfInfoIn
.flags
.noStencil
&&
966 (config
->info
.levels
> 1 || info
->family
== CHIP_STONEY
)) {
967 /* Compute stencilTileIdx that is compatible with the (depth)
968 * tileIdx. This degrades the depth surface if necessary to
969 * ensure that a matching stencilTileIdx exists. */
970 AddrSurfInfoIn
.flags
.matchStencilTileCfg
= 1;
972 /* Keep the depth mip-tail compatible with texturing. */
973 AddrSurfInfoIn
.flags
.noStencil
= 1;
976 /* Set preferred macrotile parameters. This is usually required
977 * for shared resources. This is for 2D tiling only. */
978 if (AddrSurfInfoIn
.tileMode
>= ADDR_TM_2D_TILED_THIN1
&& surf
->u
.legacy
.bankw
&&
979 surf
->u
.legacy
.bankh
&& surf
->u
.legacy
.mtilea
&& surf
->u
.legacy
.tile_split
) {
980 /* If any of these parameters are incorrect, the calculation
982 AddrTileInfoIn
.banks
= surf
->u
.legacy
.num_banks
;
983 AddrTileInfoIn
.bankWidth
= surf
->u
.legacy
.bankw
;
984 AddrTileInfoIn
.bankHeight
= surf
->u
.legacy
.bankh
;
985 AddrTileInfoIn
.macroAspectRatio
= surf
->u
.legacy
.mtilea
;
986 AddrTileInfoIn
.tileSplitBytes
= surf
->u
.legacy
.tile_split
;
987 AddrTileInfoIn
.pipeConfig
= surf
->u
.legacy
.pipe_config
+ 1; /* +1 compared to GB_TILE_MODE */
988 AddrSurfInfoIn
.flags
.opt4Space
= 0;
989 AddrSurfInfoIn
.pTileInfo
= &AddrTileInfoIn
;
991 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
992 * the tile index, because we are expected to know it if
993 * we know the other parameters.
995 * This is something that can easily be fixed in Addrlib.
996 * For now, just figure it out here.
997 * Note that only 2D_TILE_THIN1 is handled here.
999 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1000 assert(AddrSurfInfoIn
.tileMode
== ADDR_TM_2D_TILED_THIN1
);
1002 if (info
->chip_class
== GFX6
) {
1003 if (AddrSurfInfoIn
.tileType
== ADDR_DISPLAYABLE
) {
1005 AddrSurfInfoIn
.tileIndex
= 11; /* 16bpp */
1007 AddrSurfInfoIn
.tileIndex
= 12; /* 32bpp */
1010 AddrSurfInfoIn
.tileIndex
= 14; /* 8bpp */
1011 else if (surf
->bpe
== 2)
1012 AddrSurfInfoIn
.tileIndex
= 15; /* 16bpp */
1013 else if (surf
->bpe
== 4)
1014 AddrSurfInfoIn
.tileIndex
= 16; /* 32bpp */
1016 AddrSurfInfoIn
.tileIndex
= 17; /* 64bpp (and 128bpp) */
1020 if (AddrSurfInfoIn
.tileType
== ADDR_DISPLAYABLE
)
1021 AddrSurfInfoIn
.tileIndex
= 10; /* 2D displayable */
1023 AddrSurfInfoIn
.tileIndex
= 14; /* 2D non-displayable */
1025 /* Addrlib doesn't set this if tileIndex is forced like above. */
1026 AddrSurfInfoOut
.macroModeIndex
= cik_get_macro_tile_index(surf
);
1030 surf
->has_stencil
= !!(surf
->flags
& RADEON_SURF_SBUFFER
);
1031 surf
->num_dcc_levels
= 0;
1032 surf
->surf_size
= 0;
1034 surf
->dcc_alignment
= 1;
1035 surf
->htile_size
= 0;
1036 surf
->htile_slice_size
= 0;
1037 surf
->htile_alignment
= 1;
1039 const bool only_stencil
=
1040 (surf
->flags
& RADEON_SURF_SBUFFER
) && !(surf
->flags
& RADEON_SURF_ZBUFFER
);
1042 /* Calculate texture layout information. */
1043 if (!only_stencil
) {
1044 for (level
= 0; level
< config
->info
.levels
; level
++) {
1045 r
= gfx6_compute_level(addrlib
, config
, surf
, false, level
, compressed
, &AddrSurfInfoIn
,
1046 &AddrSurfInfoOut
, &AddrDccIn
, &AddrDccOut
, &AddrHtileIn
,
1054 if (!AddrSurfInfoOut
.tcCompatible
) {
1055 AddrSurfInfoIn
.flags
.tcCompatible
= 0;
1056 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
1059 if (AddrSurfInfoIn
.flags
.matchStencilTileCfg
) {
1060 AddrSurfInfoIn
.flags
.matchStencilTileCfg
= 0;
1061 AddrSurfInfoIn
.tileIndex
= AddrSurfInfoOut
.tileIndex
;
1062 stencil_tile_idx
= AddrSurfInfoOut
.stencilTileIdx
;
1064 assert(stencil_tile_idx
>= 0);
1067 r
= gfx6_surface_settings(addrlib
, info
, config
, &AddrSurfInfoOut
, surf
);
1073 /* Calculate texture layout information for stencil. */
1074 if (surf
->flags
& RADEON_SURF_SBUFFER
) {
1075 AddrSurfInfoIn
.tileIndex
= stencil_tile_idx
;
1076 AddrSurfInfoIn
.bpp
= 8;
1077 AddrSurfInfoIn
.flags
.depth
= 0;
1078 AddrSurfInfoIn
.flags
.stencil
= 1;
1079 AddrSurfInfoIn
.flags
.tcCompatible
= 0;
1080 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
1081 AddrTileInfoIn
.tileSplitBytes
= surf
->u
.legacy
.stencil_tile_split
;
1083 for (level
= 0; level
< config
->info
.levels
; level
++) {
1084 r
= gfx6_compute_level(addrlib
, config
, surf
, true, level
, compressed
, &AddrSurfInfoIn
,
1085 &AddrSurfInfoOut
, &AddrDccIn
, &AddrDccOut
, NULL
, NULL
);
1089 /* DB uses the depth pitch for both stencil and depth. */
1090 if (!only_stencil
) {
1091 if (surf
->u
.legacy
.stencil_level
[level
].nblk_x
!= surf
->u
.legacy
.level
[level
].nblk_x
)
1092 surf
->u
.legacy
.stencil_adjusted
= true;
1094 surf
->u
.legacy
.level
[level
].nblk_x
= surf
->u
.legacy
.stencil_level
[level
].nblk_x
;
1099 r
= gfx6_surface_settings(addrlib
, info
, config
, &AddrSurfInfoOut
, surf
);
1104 /* For 2D modes only. */
1105 if (AddrSurfInfoOut
.tileMode
>= ADDR_TM_2D_TILED_THIN1
) {
1106 surf
->u
.legacy
.stencil_tile_split
= AddrSurfInfoOut
.pTileInfo
->tileSplitBytes
;
1112 /* Compute FMASK. */
1113 if (config
->info
.samples
>= 2 && AddrSurfInfoIn
.flags
.color
&& info
->has_graphics
&&
1114 !(surf
->flags
& RADEON_SURF_NO_FMASK
)) {
1115 ADDR_COMPUTE_FMASK_INFO_INPUT fin
= {0};
1116 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout
= {0};
1117 ADDR_TILEINFO fmask_tile_info
= {};
1119 fin
.size
= sizeof(fin
);
1120 fout
.size
= sizeof(fout
);
1122 fin
.tileMode
= AddrSurfInfoOut
.tileMode
;
1123 fin
.pitch
= AddrSurfInfoOut
.pitch
;
1124 fin
.height
= config
->info
.height
;
1125 fin
.numSlices
= AddrSurfInfoIn
.numSlices
;
1126 fin
.numSamples
= AddrSurfInfoIn
.numSamples
;
1127 fin
.numFrags
= AddrSurfInfoIn
.numFrags
;
1129 fout
.pTileInfo
= &fmask_tile_info
;
1131 r
= AddrComputeFmaskInfo(addrlib
, &fin
, &fout
);
1135 surf
->fmask_size
= fout
.fmaskBytes
;
1136 surf
->fmask_alignment
= fout
.baseAlign
;
1137 surf
->fmask_tile_swizzle
= 0;
1139 surf
->u
.legacy
.fmask
.slice_tile_max
= (fout
.pitch
* fout
.height
) / 64;
1140 if (surf
->u
.legacy
.fmask
.slice_tile_max
)
1141 surf
->u
.legacy
.fmask
.slice_tile_max
-= 1;
1143 surf
->u
.legacy
.fmask
.tiling_index
= fout
.tileIndex
;
1144 surf
->u
.legacy
.fmask
.bankh
= fout
.pTileInfo
->bankHeight
;
1145 surf
->u
.legacy
.fmask
.pitch_in_pixels
= fout
.pitch
;
1146 surf
->u
.legacy
.fmask
.slice_size
= fout
.sliceSize
;
1148 /* Compute tile swizzle for FMASK. */
1149 if (config
->info
.fmask_surf_index
&& !(surf
->flags
& RADEON_SURF_SHAREABLE
)) {
1150 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin
= {0};
1151 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout
= {0};
1153 xin
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT
);
1154 xout
.size
= sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT
);
1156 /* This counter starts from 1 instead of 0. */
1157 xin
.surfIndex
= p_atomic_inc_return(config
->info
.fmask_surf_index
);
1158 xin
.tileIndex
= fout
.tileIndex
;
1159 xin
.macroModeIndex
= fout
.macroModeIndex
;
1160 xin
.pTileInfo
= fout
.pTileInfo
;
1161 xin
.tileMode
= fin
.tileMode
;
1163 int r
= AddrComputeBaseSwizzle(addrlib
, &xin
, &xout
);
1167 assert(xout
.tileSwizzle
<= u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
1168 surf
->fmask_tile_swizzle
= xout
.tileSwizzle
;
1172 /* Recalculate the whole DCC miptree size including disabled levels.
1173 * This is what addrlib does, but calling addrlib would be a lot more
1176 if (surf
->dcc_size
&& config
->info
.levels
> 1) {
1177 /* The smallest miplevels that are never compressed by DCC
1178 * still read the DCC buffer via TC if the base level uses DCC,
1179 * and for some reason the DCC buffer needs to be larger if
1180 * the miptree uses non-zero tile_swizzle. Otherwise there are
1183 * "dcc_alignment * 4" was determined by trial and error.
1185 surf
->dcc_size
= align64(surf
->surf_size
>> 8, surf
->dcc_alignment
* 4);
1188 /* Make sure HTILE covers the whole miptree, because the shader reads
1189 * TC-compatible HTILE even for levels where it's disabled by DB.
1191 if (surf
->htile_size
&& config
->info
.levels
> 1 &&
1192 surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
) {
1193 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1194 const unsigned total_pixels
= surf
->surf_size
/ surf
->bpe
;
1195 const unsigned htile_block_size
= 8 * 8;
1196 const unsigned htile_element_size
= 4;
1198 surf
->htile_size
= (total_pixels
/ htile_block_size
) * htile_element_size
;
1199 surf
->htile_size
= align(surf
->htile_size
, surf
->htile_alignment
);
1200 } else if (!surf
->htile_size
) {
1201 /* Unset this if HTILE is not present. */
1202 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
1205 surf
->is_linear
= surf
->u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
;
1206 surf
->is_displayable
= surf
->is_linear
|| surf
->micro_tile_mode
== RADEON_MICRO_MODE_DISPLAY
||
1207 surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
;
1209 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1210 * used at the same time. This case is not currently expected to occur
1211 * because we don't use rotated. Enforce this restriction on all chips
1212 * to facilitate testing.
1214 if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
) {
1215 assert(!"rotate micro tile mode is unsupported");
1219 ac_compute_cmask(info
, config
, surf
);
1223 /* This is only called when expecting a tiled layout. */
1224 static int gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib
, struct radeon_surf
*surf
,
1225 ADDR2_COMPUTE_SURFACE_INFO_INPUT
*in
, bool is_fmask
,
1226 AddrSwizzleMode
*swizzle_mode
)
1228 ADDR_E_RETURNCODE ret
;
1229 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin
= {0};
1230 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout
= {0};
1232 sin
.size
= sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT
);
1233 sout
.size
= sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT
);
1235 sin
.flags
= in
->flags
;
1236 sin
.resourceType
= in
->resourceType
;
1237 sin
.format
= in
->format
;
1238 sin
.resourceLoction
= ADDR_RSRC_LOC_INVIS
;
1239 /* TODO: We could allow some of these: */
1240 sin
.forbiddenBlock
.micro
= 1; /* don't allow the 256B swizzle modes */
1241 sin
.forbiddenBlock
.var
= 1; /* don't allow the variable-sized swizzle modes */
1243 sin
.width
= in
->width
;
1244 sin
.height
= in
->height
;
1245 sin
.numSlices
= in
->numSlices
;
1246 sin
.numMipLevels
= in
->numMipLevels
;
1247 sin
.numSamples
= in
->numSamples
;
1248 sin
.numFrags
= in
->numFrags
;
1251 sin
.flags
.display
= 0;
1252 sin
.flags
.color
= 0;
1253 sin
.flags
.fmask
= 1;
1256 if (surf
->flags
& RADEON_SURF_FORCE_MICRO_TILE_MODE
) {
1257 sin
.forbiddenBlock
.linear
= 1;
1259 if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_DISPLAY
)
1260 sin
.preferredSwSet
.sw_D
= 1;
1261 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_STANDARD
)
1262 sin
.preferredSwSet
.sw_S
= 1;
1263 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_DEPTH
)
1264 sin
.preferredSwSet
.sw_Z
= 1;
1265 else if (surf
->micro_tile_mode
== RADEON_MICRO_MODE_RENDER
)
1266 sin
.preferredSwSet
.sw_R
= 1;
1269 ret
= Addr2GetPreferredSurfaceSetting(addrlib
, &sin
, &sout
);
1273 *swizzle_mode
= sout
.swizzleMode
;
1277 static bool is_dcc_supported_by_CB(const struct radeon_info
*info
, unsigned sw_mode
)
1279 if (info
->chip_class
>= GFX10
)
1280 return sw_mode
== ADDR_SW_64KB_Z_X
|| sw_mode
== ADDR_SW_64KB_R_X
;
1282 return sw_mode
!= ADDR_SW_LINEAR
;
1285 ASSERTED
static bool is_dcc_supported_by_L2(const struct radeon_info
*info
,
1286 const struct radeon_surf
*surf
)
1288 if (info
->chip_class
<= GFX9
) {
1289 /* Only independent 64B blocks are supported. */
1290 return surf
->u
.gfx9
.dcc
.independent_64B_blocks
&& !surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1291 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
;
1294 if (info
->family
== CHIP_NAVI10
) {
1295 /* Only independent 128B blocks are supported. */
1296 return !surf
->u
.gfx9
.dcc
.independent_64B_blocks
&& surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1297 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
;
1300 if (info
->family
== CHIP_NAVI12
|| info
->family
== CHIP_NAVI14
) {
1301 /* Either 64B or 128B can be used, but not both.
1302 * If 64B is used, DCC image stores are unsupported.
1304 return surf
->u
.gfx9
.dcc
.independent_64B_blocks
!= surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1305 (!surf
->u
.gfx9
.dcc
.independent_64B_blocks
||
1306 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
) &&
1307 (!surf
->u
.gfx9
.dcc
.independent_128B_blocks
||
1308 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
);
1311 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1312 * Since there is no reason to ever disable 128B, require it.
1313 * DCC image stores are always supported.
1315 return surf
->u
.gfx9
.dcc
.independent_128B_blocks
&&
1316 surf
->u
.gfx9
.dcc
.max_compressed_block_size
<= V_028C78_MAX_BLOCK_SIZE_128B
;
1319 static bool is_dcc_supported_by_DCN(const struct radeon_info
*info
,
1320 const struct ac_surf_config
*config
,
1321 const struct radeon_surf
*surf
, bool rb_aligned
,
1324 if (!info
->use_display_dcc_unaligned
&& !info
->use_display_dcc_with_retile_blit
)
1327 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1331 /* Handle unaligned DCC. */
1332 if (info
->use_display_dcc_unaligned
&& (rb_aligned
|| pipe_aligned
))
1335 switch (info
->chip_class
) {
1337 /* There are more constraints, but we always set
1338 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1339 * which always works.
1341 assert(surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1342 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
);
1346 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1347 if (info
->chip_class
== GFX10
&& surf
->u
.gfx9
.dcc
.independent_128B_blocks
)
1350 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1351 return ((config
->info
.width
<= 2560 && config
->info
.height
<= 2560) ||
1352 (surf
->u
.gfx9
.dcc
.independent_64B_blocks
&&
1353 surf
->u
.gfx9
.dcc
.max_compressed_block_size
== V_028C78_MAX_BLOCK_SIZE_64B
));
1355 unreachable("unhandled chip");
1360 static int gfx9_compute_miptree(struct ac_addrlib
*addrlib
, const struct radeon_info
*info
,
1361 const struct ac_surf_config
*config
, struct radeon_surf
*surf
,
1362 bool compressed
, ADDR2_COMPUTE_SURFACE_INFO_INPUT
*in
)
1364 ADDR2_MIP_INFO mip_info
[RADEON_SURF_MAX_LEVELS
] = {};
1365 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out
= {0};
1366 ADDR_E_RETURNCODE ret
;
1368 out
.size
= sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT
);
1369 out
.pMipInfo
= mip_info
;
1371 ret
= Addr2ComputeSurfaceInfo(addrlib
->handle
, in
, &out
);
1375 if (in
->flags
.stencil
) {
1376 surf
->u
.gfx9
.stencil
.swizzle_mode
= in
->swizzleMode
;
1377 surf
->u
.gfx9
.stencil
.epitch
=
1378 out
.epitchIsHeight
? out
.mipChainHeight
- 1 : out
.mipChainPitch
- 1;
1379 surf
->surf_alignment
= MAX2(surf
->surf_alignment
, out
.baseAlign
);
1380 surf
->u
.gfx9
.stencil_offset
= align(surf
->surf_size
, out
.baseAlign
);
1381 surf
->surf_size
= surf
->u
.gfx9
.stencil_offset
+ out
.surfSize
;
1385 surf
->u
.gfx9
.surf
.swizzle_mode
= in
->swizzleMode
;
1386 surf
->u
.gfx9
.surf
.epitch
= out
.epitchIsHeight
? out
.mipChainHeight
- 1 : out
.mipChainPitch
- 1;
1388 /* CMASK fast clear uses these even if FMASK isn't allocated.
1389 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1391 surf
->u
.gfx9
.fmask
.swizzle_mode
= surf
->u
.gfx9
.surf
.swizzle_mode
& ~0x3;
1392 surf
->u
.gfx9
.fmask
.epitch
= surf
->u
.gfx9
.surf
.epitch
;
1394 surf
->u
.gfx9
.surf_slice_size
= out
.sliceSize
;
1395 surf
->u
.gfx9
.surf_pitch
= out
.pitch
;
1396 surf
->u
.gfx9
.surf_height
= out
.height
;
1397 surf
->surf_size
= out
.surfSize
;
1398 surf
->surf_alignment
= out
.baseAlign
;
1400 if (!compressed
&& surf
->blk_w
> 1 && out
.pitch
== out
.pixelPitch
&&
1401 surf
->u
.gfx9
.surf
.swizzle_mode
== ADDR_SW_LINEAR
) {
1402 /* Adjust surf_pitch to be in elements units not in pixels */
1403 surf
->u
.gfx9
.surf_pitch
= align(surf
->u
.gfx9
.surf_pitch
/ surf
->blk_w
, 256 / surf
->bpe
);
1404 surf
->u
.gfx9
.surf
.epitch
=
1405 MAX2(surf
->u
.gfx9
.surf
.epitch
, surf
->u
.gfx9
.surf_pitch
* surf
->blk_w
- 1);
1406 /* The surface is really a surf->bpe bytes per pixel surface even if we
1407 * use it as a surf->bpe bytes per element one.
1408 * Adjust surf_slice_size and surf_size to reflect the change
1409 * made to surf_pitch.
1411 surf
->u
.gfx9
.surf_slice_size
=
1412 MAX2(surf
->u
.gfx9
.surf_slice_size
,
1413 surf
->u
.gfx9
.surf_pitch
* out
.height
* surf
->bpe
* surf
->blk_w
);
1414 surf
->surf_size
= surf
->u
.gfx9
.surf_slice_size
* in
->numSlices
;
1417 if (in
->swizzleMode
== ADDR_SW_LINEAR
) {
1418 for (unsigned i
= 0; i
< in
->numMipLevels
; i
++) {
1419 surf
->u
.gfx9
.offset
[i
] = mip_info
[i
].offset
;
1420 surf
->u
.gfx9
.pitch
[i
] = mip_info
[i
].pitch
;
1424 if (in
->flags
.depth
) {
1425 assert(in
->swizzleMode
!= ADDR_SW_LINEAR
);
1427 if (surf
->flags
& RADEON_SURF_NO_HTILE
)
1431 ADDR2_COMPUTE_HTILE_INFO_INPUT hin
= {0};
1432 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout
= {0};
1434 hin
.size
= sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT
);
1435 hout
.size
= sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT
);
1437 assert(in
->flags
.metaPipeUnaligned
== 0);
1438 assert(in
->flags
.metaRbUnaligned
== 0);
1440 hin
.hTileFlags
.pipeAligned
= 1;
1441 hin
.hTileFlags
.rbAligned
= 1;
1442 hin
.depthFlags
= in
->flags
;
1443 hin
.swizzleMode
= in
->swizzleMode
;
1444 hin
.unalignedWidth
= in
->width
;
1445 hin
.unalignedHeight
= in
->height
;
1446 hin
.numSlices
= in
->numSlices
;
1447 hin
.numMipLevels
= in
->numMipLevels
;
1448 hin
.firstMipIdInTail
= out
.firstMipIdInTail
;
1450 ret
= Addr2ComputeHtileInfo(addrlib
->handle
, &hin
, &hout
);
1454 surf
->htile_size
= hout
.htileBytes
;
1455 surf
->htile_slice_size
= hout
.sliceSize
;
1456 surf
->htile_alignment
= hout
.baseAlign
;
1461 /* Compute tile swizzle for the color surface.
1462 * All *_X and *_T modes can use the swizzle.
1464 if (config
->info
.surf_index
&& in
->swizzleMode
>= ADDR_SW_64KB_Z_T
&& !out
.mipChainInTail
&&
1465 !(surf
->flags
& RADEON_SURF_SHAREABLE
) && !in
->flags
.display
) {
1466 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin
= {0};
1467 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout
= {0};
1469 xin
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT
);
1470 xout
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT
);
1472 xin
.surfIndex
= p_atomic_inc_return(config
->info
.surf_index
) - 1;
1473 xin
.flags
= in
->flags
;
1474 xin
.swizzleMode
= in
->swizzleMode
;
1475 xin
.resourceType
= in
->resourceType
;
1476 xin
.format
= in
->format
;
1477 xin
.numSamples
= in
->numSamples
;
1478 xin
.numFrags
= in
->numFrags
;
1480 ret
= Addr2ComputePipeBankXor(addrlib
->handle
, &xin
, &xout
);
1484 assert(xout
.pipeBankXor
<= u_bit_consecutive(0, sizeof(surf
->tile_swizzle
) * 8));
1485 surf
->tile_swizzle
= xout
.pipeBankXor
;
1489 if (info
->has_graphics
&& !(surf
->flags
& RADEON_SURF_DISABLE_DCC
) && !compressed
&&
1490 is_dcc_supported_by_CB(info
, in
->swizzleMode
) &&
1491 (!in
->flags
.display
||
1492 is_dcc_supported_by_DCN(info
, config
, surf
, !in
->flags
.metaRbUnaligned
,
1493 !in
->flags
.metaPipeUnaligned
))) {
1494 ADDR2_COMPUTE_DCCINFO_INPUT din
= {0};
1495 ADDR2_COMPUTE_DCCINFO_OUTPUT dout
= {0};
1496 ADDR2_META_MIP_INFO meta_mip_info
[RADEON_SURF_MAX_LEVELS
] = {};
1498 din
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_INPUT
);
1499 dout
.size
= sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT
);
1500 dout
.pMipInfo
= meta_mip_info
;
1502 din
.dccKeyFlags
.pipeAligned
= !in
->flags
.metaPipeUnaligned
;
1503 din
.dccKeyFlags
.rbAligned
= !in
->flags
.metaRbUnaligned
;
1504 din
.resourceType
= in
->resourceType
;
1505 din
.swizzleMode
= in
->swizzleMode
;
1507 din
.unalignedWidth
= in
->width
;
1508 din
.unalignedHeight
= in
->height
;
1509 din
.numSlices
= in
->numSlices
;
1510 din
.numFrags
= in
->numFrags
;
1511 din
.numMipLevels
= in
->numMipLevels
;
1512 din
.dataSurfaceSize
= out
.surfSize
;
1513 din
.firstMipIdInTail
= out
.firstMipIdInTail
;
1515 ret
= Addr2ComputeDccInfo(addrlib
->handle
, &din
, &dout
);
1519 surf
->u
.gfx9
.dcc
.rb_aligned
= din
.dccKeyFlags
.rbAligned
;
1520 surf
->u
.gfx9
.dcc
.pipe_aligned
= din
.dccKeyFlags
.pipeAligned
;
1521 surf
->u
.gfx9
.dcc_block_width
= dout
.compressBlkWidth
;
1522 surf
->u
.gfx9
.dcc_block_height
= dout
.compressBlkHeight
;
1523 surf
->u
.gfx9
.dcc_block_depth
= dout
.compressBlkDepth
;
1524 surf
->dcc_size
= dout
.dccRamSize
;
1525 surf
->dcc_alignment
= dout
.dccRamBaseAlign
;
1526 surf
->num_dcc_levels
= in
->numMipLevels
;
1528 /* Disable DCC for levels that are in the mip tail.
1530 * There are two issues that this is intended to
1533 * 1. Multiple mip levels may share a cache line. This
1534 * can lead to corruption when switching between
1535 * rendering to different mip levels because the
1536 * RBs don't maintain coherency.
1538 * 2. Texturing with metadata after rendering sometimes
1539 * fails with corruption, probably for a similar
1542 * Working around these issues for all levels in the
1543 * mip tail may be overly conservative, but it's what
1546 * Alternative solutions that also work but are worse:
1547 * - Disable DCC entirely.
1548 * - Flush TC L2 after rendering.
1550 for (unsigned i
= 0; i
< in
->numMipLevels
; i
++) {
1551 if (meta_mip_info
[i
].inMiptail
) {
1552 /* GFX10 can only compress the first level
1555 * TODO: Try to do the same thing for gfx9
1556 * if there are no regressions.
1558 if (info
->chip_class
>= GFX10
)
1559 surf
->num_dcc_levels
= i
+ 1;
1561 surf
->num_dcc_levels
= i
;
1566 if (!surf
->num_dcc_levels
)
1569 surf
->u
.gfx9
.display_dcc_size
= surf
->dcc_size
;
1570 surf
->u
.gfx9
.display_dcc_alignment
= surf
->dcc_alignment
;
1571 surf
->u
.gfx9
.display_dcc_pitch_max
= dout
.pitch
- 1;
1573 /* Compute displayable DCC. */
1574 if (in
->flags
.display
&& surf
->num_dcc_levels
&& info
->use_display_dcc_with_retile_blit
) {
1575 /* Compute displayable DCC info. */
1576 din
.dccKeyFlags
.pipeAligned
= 0;
1577 din
.dccKeyFlags
.rbAligned
= 0;
1579 assert(din
.numSlices
== 1);
1580 assert(din
.numMipLevels
== 1);
1581 assert(din
.numFrags
== 1);
1582 assert(surf
->tile_swizzle
== 0);
1583 assert(surf
->u
.gfx9
.dcc
.pipe_aligned
|| surf
->u
.gfx9
.dcc
.rb_aligned
);
1585 ret
= Addr2ComputeDccInfo(addrlib
->handle
, &din
, &dout
);
1589 surf
->u
.gfx9
.display_dcc_size
= dout
.dccRamSize
;
1590 surf
->u
.gfx9
.display_dcc_alignment
= dout
.dccRamBaseAlign
;
1591 surf
->u
.gfx9
.display_dcc_pitch_max
= dout
.pitch
- 1;
1592 assert(surf
->u
.gfx9
.display_dcc_size
<= surf
->dcc_size
);
1594 surf
->u
.gfx9
.dcc_retile_use_uint16
=
1595 surf
->u
.gfx9
.display_dcc_size
<= UINT16_MAX
+ 1 && surf
->dcc_size
<= UINT16_MAX
+ 1;
1597 /* Align the retile map size to get more hash table hits and
1598 * decrease the maximum memory footprint when all retile maps
1599 * are cached in the hash table.
1601 unsigned retile_dim
[2] = {in
->width
, in
->height
};
1603 for (unsigned i
= 0; i
< 2; i
++) {
1604 /* Increase the alignment as the size increases.
1605 * Greater alignment increases retile compute work,
1606 * but decreases maximum memory footprint for the cache.
1608 * With this alignment, the worst case memory footprint of
1614 * The worst case size in MB can be computed in Haskell as follows:
1615 * (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1616 * [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]])))))
1617 * `div` 1024^2 where alignment x = if x <= 512 then 16 else if x <= 1024 then 32
1618 * else if x <= 2048 then 64 else 128 align x = (x + (alignment x) - 1) `div`
1619 * (alignment x) * (alignment x) align_pair e = (align (fst e), align (snd e))
1620 * deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a)
1621 * == (snd b))) . sortBy compare get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1622 * get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else
1623 * 4) bpp = 4; maxwidth = 3840; maxheight = 2160
1625 if (retile_dim
[i
] <= 512)
1626 retile_dim
[i
] = align(retile_dim
[i
], 16);
1627 else if (retile_dim
[i
] <= 1024)
1628 retile_dim
[i
] = align(retile_dim
[i
], 32);
1629 else if (retile_dim
[i
] <= 2048)
1630 retile_dim
[i
] = align(retile_dim
[i
], 64);
1632 retile_dim
[i
] = align(retile_dim
[i
], 128);
1634 /* Don't align more than the DCC pixel alignment. */
1635 assert(dout
.metaBlkWidth
>= 128 && dout
.metaBlkHeight
>= 128);
1638 surf
->u
.gfx9
.dcc_retile_num_elements
=
1639 DIV_ROUND_UP(retile_dim
[0], dout
.compressBlkWidth
) *
1640 DIV_ROUND_UP(retile_dim
[1], dout
.compressBlkHeight
) * 2;
1641 /* Align the size to 4 (for the compute shader). */
1642 surf
->u
.gfx9
.dcc_retile_num_elements
= align(surf
->u
.gfx9
.dcc_retile_num_elements
, 4);
1644 if (!(surf
->flags
& RADEON_SURF_IMPORTED
)) {
1645 /* Compute address mapping from non-displayable to displayable DCC. */
1646 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin
;
1647 memset(&addrin
, 0, sizeof(addrin
));
1648 addrin
.size
= sizeof(addrin
);
1649 addrin
.swizzleMode
= din
.swizzleMode
;
1650 addrin
.resourceType
= din
.resourceType
;
1651 addrin
.bpp
= din
.bpp
;
1652 addrin
.numSlices
= 1;
1653 addrin
.numMipLevels
= 1;
1654 addrin
.numFrags
= 1;
1655 addrin
.pitch
= dout
.pitch
;
1656 addrin
.height
= dout
.height
;
1657 addrin
.compressBlkWidth
= dout
.compressBlkWidth
;
1658 addrin
.compressBlkHeight
= dout
.compressBlkHeight
;
1659 addrin
.compressBlkDepth
= dout
.compressBlkDepth
;
1660 addrin
.metaBlkWidth
= dout
.metaBlkWidth
;
1661 addrin
.metaBlkHeight
= dout
.metaBlkHeight
;
1662 addrin
.metaBlkDepth
= dout
.metaBlkDepth
;
1663 addrin
.dccRamSliceSize
= 0; /* Don't care for non-layered images. */
1665 surf
->u
.gfx9
.dcc_retile_map
= ac_compute_dcc_retile_map(
1666 addrlib
, info
, retile_dim
[0], retile_dim
[1], surf
->u
.gfx9
.dcc
.rb_aligned
,
1667 surf
->u
.gfx9
.dcc
.pipe_aligned
, surf
->u
.gfx9
.dcc_retile_use_uint16
,
1668 surf
->u
.gfx9
.dcc_retile_num_elements
, &addrin
);
1669 if (!surf
->u
.gfx9
.dcc_retile_map
)
1670 return ADDR_OUTOFMEMORY
;
1676 if (in
->numSamples
> 1 && info
->has_graphics
&& !(surf
->flags
& RADEON_SURF_NO_FMASK
)) {
1677 ADDR2_COMPUTE_FMASK_INFO_INPUT fin
= {0};
1678 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout
= {0};
1680 fin
.size
= sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT
);
1681 fout
.size
= sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT
);
1683 ret
= gfx9_get_preferred_swizzle_mode(addrlib
->handle
, surf
, in
, true, &fin
.swizzleMode
);
1687 fin
.unalignedWidth
= in
->width
;
1688 fin
.unalignedHeight
= in
->height
;
1689 fin
.numSlices
= in
->numSlices
;
1690 fin
.numSamples
= in
->numSamples
;
1691 fin
.numFrags
= in
->numFrags
;
1693 ret
= Addr2ComputeFmaskInfo(addrlib
->handle
, &fin
, &fout
);
1697 surf
->u
.gfx9
.fmask
.swizzle_mode
= fin
.swizzleMode
;
1698 surf
->u
.gfx9
.fmask
.epitch
= fout
.pitch
- 1;
1699 surf
->fmask_size
= fout
.fmaskBytes
;
1700 surf
->fmask_alignment
= fout
.baseAlign
;
1702 /* Compute tile swizzle for the FMASK surface. */
1703 if (config
->info
.fmask_surf_index
&& fin
.swizzleMode
>= ADDR_SW_64KB_Z_T
&&
1704 !(surf
->flags
& RADEON_SURF_SHAREABLE
)) {
1705 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin
= {0};
1706 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout
= {0};
1708 xin
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT
);
1709 xout
.size
= sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT
);
1711 /* This counter starts from 1 instead of 0. */
1712 xin
.surfIndex
= p_atomic_inc_return(config
->info
.fmask_surf_index
);
1713 xin
.flags
= in
->flags
;
1714 xin
.swizzleMode
= fin
.swizzleMode
;
1715 xin
.resourceType
= in
->resourceType
;
1716 xin
.format
= in
->format
;
1717 xin
.numSamples
= in
->numSamples
;
1718 xin
.numFrags
= in
->numFrags
;
1720 ret
= Addr2ComputePipeBankXor(addrlib
->handle
, &xin
, &xout
);
1724 assert(xout
.pipeBankXor
<= u_bit_consecutive(0, sizeof(surf
->fmask_tile_swizzle
) * 8));
1725 surf
->fmask_tile_swizzle
= xout
.pipeBankXor
;
1729 /* CMASK -- on GFX10 only for FMASK */
1730 if (in
->swizzleMode
!= ADDR_SW_LINEAR
&& in
->resourceType
== ADDR_RSRC_TEX_2D
&&
1731 ((info
->chip_class
<= GFX9
&& in
->numSamples
== 1 && in
->flags
.metaPipeUnaligned
== 0 &&
1732 in
->flags
.metaRbUnaligned
== 0) ||
1733 (surf
->fmask_size
&& in
->numSamples
>= 2))) {
1734 ADDR2_COMPUTE_CMASK_INFO_INPUT cin
= {0};
1735 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout
= {0};
1737 cin
.size
= sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT
);
1738 cout
.size
= sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT
);
1740 assert(in
->flags
.metaPipeUnaligned
== 0);
1741 assert(in
->flags
.metaRbUnaligned
== 0);
1743 cin
.cMaskFlags
.pipeAligned
= 1;
1744 cin
.cMaskFlags
.rbAligned
= 1;
1745 cin
.resourceType
= in
->resourceType
;
1746 cin
.unalignedWidth
= in
->width
;
1747 cin
.unalignedHeight
= in
->height
;
1748 cin
.numSlices
= in
->numSlices
;
1750 if (in
->numSamples
> 1)
1751 cin
.swizzleMode
= surf
->u
.gfx9
.fmask
.swizzle_mode
;
1753 cin
.swizzleMode
= in
->swizzleMode
;
1755 ret
= Addr2ComputeCmaskInfo(addrlib
->handle
, &cin
, &cout
);
1759 surf
->cmask_size
= cout
.cmaskBytes
;
1760 surf
->cmask_alignment
= cout
.baseAlign
;
1767 static int gfx9_compute_surface(struct ac_addrlib
*addrlib
, const struct radeon_info
*info
,
1768 const struct ac_surf_config
*config
, enum radeon_surf_mode mode
,
1769 struct radeon_surf
*surf
)
1772 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn
= {0};
1775 AddrSurfInfoIn
.size
= sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT
);
1777 compressed
= surf
->blk_w
== 4 && surf
->blk_h
== 4;
1779 /* The format must be set correctly for the allocation of compressed
1780 * textures to work. In other cases, setting the bpp is sufficient. */
1782 switch (surf
->bpe
) {
1784 AddrSurfInfoIn
.format
= ADDR_FMT_BC1
;
1787 AddrSurfInfoIn
.format
= ADDR_FMT_BC3
;
1793 switch (surf
->bpe
) {
1795 assert(!(surf
->flags
& RADEON_SURF_ZBUFFER
));
1796 AddrSurfInfoIn
.format
= ADDR_FMT_8
;
1799 assert(surf
->flags
& RADEON_SURF_ZBUFFER
|| !(surf
->flags
& RADEON_SURF_SBUFFER
));
1800 AddrSurfInfoIn
.format
= ADDR_FMT_16
;
1803 assert(surf
->flags
& RADEON_SURF_ZBUFFER
|| !(surf
->flags
& RADEON_SURF_SBUFFER
));
1804 AddrSurfInfoIn
.format
= ADDR_FMT_32
;
1807 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1808 AddrSurfInfoIn
.format
= ADDR_FMT_32_32
;
1811 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1812 AddrSurfInfoIn
.format
= ADDR_FMT_32_32_32
;
1815 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1816 AddrSurfInfoIn
.format
= ADDR_FMT_32_32_32_32
;
1821 AddrSurfInfoIn
.bpp
= surf
->bpe
* 8;
1824 bool is_color_surface
= !(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
);
1825 AddrSurfInfoIn
.flags
.color
= is_color_surface
&& !(surf
->flags
& RADEON_SURF_NO_RENDER_TARGET
);
1826 AddrSurfInfoIn
.flags
.depth
= (surf
->flags
& RADEON_SURF_ZBUFFER
) != 0;
1827 AddrSurfInfoIn
.flags
.display
= get_display_flag(config
, surf
);
1828 /* flags.texture currently refers to TC-compatible HTILE */
1829 AddrSurfInfoIn
.flags
.texture
= is_color_surface
|| surf
->flags
& RADEON_SURF_TC_COMPATIBLE_HTILE
;
1830 AddrSurfInfoIn
.flags
.opt4space
= 1;
1832 AddrSurfInfoIn
.numMipLevels
= config
->info
.levels
;
1833 AddrSurfInfoIn
.numSamples
= MAX2(1, config
->info
.samples
);
1834 AddrSurfInfoIn
.numFrags
= AddrSurfInfoIn
.numSamples
;
1836 if (!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
))
1837 AddrSurfInfoIn
.numFrags
= MAX2(1, config
->info
.storage_samples
);
1839 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1840 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1841 * must sample 1D textures as 2D. */
1843 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_3D
;
1844 else if (info
->chip_class
!= GFX9
&& config
->is_1d
)
1845 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_1D
;
1847 AddrSurfInfoIn
.resourceType
= ADDR_RSRC_TEX_2D
;
1849 AddrSurfInfoIn
.width
= config
->info
.width
;
1850 AddrSurfInfoIn
.height
= config
->info
.height
;
1853 AddrSurfInfoIn
.numSlices
= config
->info
.depth
;
1854 else if (config
->is_cube
)
1855 AddrSurfInfoIn
.numSlices
= 6;
1857 AddrSurfInfoIn
.numSlices
= config
->info
.array_size
;
1859 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1860 AddrSurfInfoIn
.flags
.metaPipeUnaligned
= 0;
1861 AddrSurfInfoIn
.flags
.metaRbUnaligned
= 0;
1863 /* Optimal values for the L2 cache. */
1864 if (info
->chip_class
== GFX9
) {
1865 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1866 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 0;
1867 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1868 } else if (info
->chip_class
>= GFX10
) {
1869 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 0;
1870 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 1;
1871 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_128B
;
1874 if (AddrSurfInfoIn
.flags
.display
) {
1875 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1876 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1878 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1879 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1880 * after rendering, so PIPE_ALIGNED=1 is recommended.
1882 if (info
->use_display_dcc_unaligned
) {
1883 AddrSurfInfoIn
.flags
.metaPipeUnaligned
= 1;
1884 AddrSurfInfoIn
.flags
.metaRbUnaligned
= 1;
1887 /* Adjust DCC settings to meet DCN requirements. */
1888 if (info
->use_display_dcc_unaligned
|| info
->use_display_dcc_with_retile_blit
) {
1889 /* Only Navi12/14 support independent 64B blocks in L2,
1890 * but without DCC image stores.
1892 if (info
->family
== CHIP_NAVI12
|| info
->family
== CHIP_NAVI14
) {
1893 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1894 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 0;
1895 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1898 if (info
->chip_class
>= GFX10_3
) {
1899 surf
->u
.gfx9
.dcc
.independent_64B_blocks
= 1;
1900 surf
->u
.gfx9
.dcc
.independent_128B_blocks
= 1;
1901 surf
->u
.gfx9
.dcc
.max_compressed_block_size
= V_028C78_MAX_BLOCK_SIZE_64B
;
1907 case RADEON_SURF_MODE_LINEAR_ALIGNED
:
1908 assert(config
->info
.samples
<= 1);
1909 assert(!(surf
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
1910 AddrSurfInfoIn
.swizzleMode
= ADDR_SW_LINEAR
;
1913 case RADEON_SURF_MODE_1D
:
1914 case RADEON_SURF_MODE_2D
:
1915 if (surf
->flags
& RADEON_SURF_IMPORTED
||
1916 (info
->chip_class
>= GFX10
&& surf
->flags
& RADEON_SURF_FORCE_SWIZZLE_MODE
)) {
1917 AddrSurfInfoIn
.swizzleMode
= surf
->u
.gfx9
.surf
.swizzle_mode
;
1921 r
= gfx9_get_preferred_swizzle_mode(addrlib
->handle
, surf
, &AddrSurfInfoIn
, false,
1922 &AddrSurfInfoIn
.swizzleMode
);
1931 surf
->u
.gfx9
.resource_type
= AddrSurfInfoIn
.resourceType
;
1932 surf
->has_stencil
= !!(surf
->flags
& RADEON_SURF_SBUFFER
);
1934 surf
->num_dcc_levels
= 0;
1935 surf
->surf_size
= 0;
1936 surf
->fmask_size
= 0;
1938 surf
->htile_size
= 0;
1939 surf
->htile_slice_size
= 0;
1940 surf
->u
.gfx9
.surf_offset
= 0;
1941 surf
->u
.gfx9
.stencil_offset
= 0;
1942 surf
->cmask_size
= 0;
1943 surf
->u
.gfx9
.dcc_retile_use_uint16
= false;
1944 surf
->u
.gfx9
.dcc_retile_num_elements
= 0;
1945 surf
->u
.gfx9
.dcc_retile_map
= NULL
;
1947 /* Calculate texture layout information. */
1948 r
= gfx9_compute_miptree(addrlib
, info
, config
, surf
, compressed
, &AddrSurfInfoIn
);
1952 /* Calculate texture layout information for stencil. */
1953 if (surf
->flags
& RADEON_SURF_SBUFFER
) {
1954 AddrSurfInfoIn
.flags
.stencil
= 1;
1955 AddrSurfInfoIn
.bpp
= 8;
1956 AddrSurfInfoIn
.format
= ADDR_FMT_8
;
1958 if (!AddrSurfInfoIn
.flags
.depth
) {
1959 r
= gfx9_get_preferred_swizzle_mode(addrlib
->handle
, surf
, &AddrSurfInfoIn
, false,
1960 &AddrSurfInfoIn
.swizzleMode
);
1964 AddrSurfInfoIn
.flags
.depth
= 0;
1966 r
= gfx9_compute_miptree(addrlib
, info
, config
, surf
, compressed
, &AddrSurfInfoIn
);
1971 surf
->is_linear
= surf
->u
.gfx9
.surf
.swizzle_mode
== ADDR_SW_LINEAR
;
1973 /* Query whether the surface is displayable. */
1974 /* This is only useful for surfaces that are allocated without SCANOUT. */
1975 bool displayable
= false;
1976 if (!config
->is_3d
&& !config
->is_cube
) {
1977 r
= Addr2IsValidDisplaySwizzleMode(addrlib
->handle
, surf
->u
.gfx9
.surf
.swizzle_mode
,
1978 surf
->bpe
* 8, &displayable
);
1982 /* Display needs unaligned DCC. */
1983 if (surf
->num_dcc_levels
&&
1984 (!is_dcc_supported_by_DCN(info
, config
, surf
, surf
->u
.gfx9
.dcc
.rb_aligned
,
1985 surf
->u
.gfx9
.dcc
.pipe_aligned
) ||
1986 /* Don't set is_displayable if displayable DCC is missing. */
1987 (info
->use_display_dcc_with_retile_blit
&& !surf
->u
.gfx9
.dcc_retile_num_elements
)))
1988 displayable
= false;
1990 surf
->is_displayable
= displayable
;
1992 /* Validate that we allocated a displayable surface if requested. */
1993 assert(!AddrSurfInfoIn
.flags
.display
|| surf
->is_displayable
);
1995 /* Validate that DCC is set up correctly. */
1996 if (surf
->num_dcc_levels
) {
1997 assert(is_dcc_supported_by_L2(info
, surf
));
1998 if (AddrSurfInfoIn
.flags
.color
)
1999 assert(is_dcc_supported_by_CB(info
, surf
->u
.gfx9
.surf
.swizzle_mode
));
2000 if (AddrSurfInfoIn
.flags
.display
) {
2001 assert(is_dcc_supported_by_DCN(info
, config
, surf
, surf
->u
.gfx9
.dcc
.rb_aligned
,
2002 surf
->u
.gfx9
.dcc
.pipe_aligned
));
2006 if (info
->has_graphics
&& !compressed
&& !config
->is_3d
&& config
->info
.levels
== 1 &&
2007 AddrSurfInfoIn
.flags
.color
&& !surf
->is_linear
&&
2008 surf
->surf_alignment
>= 64 * 1024 && /* 64KB tiling */
2009 !(surf
->flags
& (RADEON_SURF_DISABLE_DCC
| RADEON_SURF_FORCE_SWIZZLE_MODE
|
2010 RADEON_SURF_FORCE_MICRO_TILE_MODE
))) {
2011 /* Validate that DCC is enabled if DCN can do it. */
2012 if ((info
->use_display_dcc_unaligned
|| info
->use_display_dcc_with_retile_blit
) &&
2013 AddrSurfInfoIn
.flags
.display
&& surf
->bpe
== 4) {
2014 assert(surf
->num_dcc_levels
);
2017 /* Validate that non-scanout DCC is always enabled. */
2018 if (!AddrSurfInfoIn
.flags
.display
)
2019 assert(surf
->num_dcc_levels
);
2022 if (!surf
->htile_size
) {
2023 /* Unset this if HTILE is not present. */
2024 surf
->flags
&= ~RADEON_SURF_TC_COMPATIBLE_HTILE
;
2027 switch (surf
->u
.gfx9
.surf
.swizzle_mode
) {
2029 case ADDR_SW_256B_S
:
2031 case ADDR_SW_64KB_S
:
2032 case ADDR_SW_64KB_S_T
:
2033 case ADDR_SW_4KB_S_X
:
2034 case ADDR_SW_64KB_S_X
:
2035 surf
->micro_tile_mode
= RADEON_MICRO_MODE_STANDARD
;
2039 case ADDR_SW_LINEAR
:
2040 case ADDR_SW_256B_D
:
2042 case ADDR_SW_64KB_D
:
2043 case ADDR_SW_64KB_D_T
:
2044 case ADDR_SW_4KB_D_X
:
2045 case ADDR_SW_64KB_D_X
:
2046 surf
->micro_tile_mode
= RADEON_MICRO_MODE_DISPLAY
;
2049 /* R = rotated (gfx9), render target (gfx10). */
2050 case ADDR_SW_256B_R
:
2052 case ADDR_SW_64KB_R
:
2053 case ADDR_SW_64KB_R_T
:
2054 case ADDR_SW_4KB_R_X
:
2055 case ADDR_SW_64KB_R_X
:
2056 case ADDR_SW_VAR_R_X
:
2057 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2058 * used at the same time. We currently do not use rotated
2061 assert(info
->chip_class
>= GFX10
|| !"rotate micro tile mode is unsupported");
2062 surf
->micro_tile_mode
= RADEON_MICRO_MODE_RENDER
;
2067 case ADDR_SW_64KB_Z
:
2068 case ADDR_SW_64KB_Z_T
:
2069 case ADDR_SW_4KB_Z_X
:
2070 case ADDR_SW_64KB_Z_X
:
2071 case ADDR_SW_VAR_Z_X
:
2072 surf
->micro_tile_mode
= RADEON_MICRO_MODE_DEPTH
;
2082 int ac_compute_surface(struct ac_addrlib
*addrlib
, const struct radeon_info
*info
,
2083 const struct ac_surf_config
*config
, enum radeon_surf_mode mode
,
2084 struct radeon_surf
*surf
)
2088 r
= surf_config_sanity(config
, surf
->flags
);
2092 if (info
->chip_class
>= GFX9
)
2093 r
= gfx9_compute_surface(addrlib
, info
, config
, mode
, surf
);
2095 r
= gfx6_compute_surface(addrlib
->handle
, info
, config
, mode
, surf
);
2100 /* Determine the memory layout of multiple allocations in one buffer. */
2101 surf
->total_size
= surf
->surf_size
;
2102 surf
->alignment
= surf
->surf_alignment
;
2104 if (surf
->htile_size
) {
2105 surf
->htile_offset
= align64(surf
->total_size
, surf
->htile_alignment
);
2106 surf
->total_size
= surf
->htile_offset
+ surf
->htile_size
;
2107 surf
->alignment
= MAX2(surf
->alignment
, surf
->htile_alignment
);
2110 if (surf
->fmask_size
) {
2111 assert(config
->info
.samples
>= 2);
2112 surf
->fmask_offset
= align64(surf
->total_size
, surf
->fmask_alignment
);
2113 surf
->total_size
= surf
->fmask_offset
+ surf
->fmask_size
;
2114 surf
->alignment
= MAX2(surf
->alignment
, surf
->fmask_alignment
);
2117 /* Single-sample CMASK is in a separate buffer. */
2118 if (surf
->cmask_size
&& config
->info
.samples
>= 2) {
2119 surf
->cmask_offset
= align64(surf
->total_size
, surf
->cmask_alignment
);
2120 surf
->total_size
= surf
->cmask_offset
+ surf
->cmask_size
;
2121 surf
->alignment
= MAX2(surf
->alignment
, surf
->cmask_alignment
);
2124 if (surf
->is_displayable
)
2125 surf
->flags
|= RADEON_SURF_SCANOUT
;
2127 if (surf
->dcc_size
&&
2128 /* dcc_size is computed on GFX9+ only if it's displayable. */
2129 (info
->chip_class
>= GFX9
|| !get_display_flag(config
, surf
))) {
2130 /* It's better when displayable DCC is immediately after
2131 * the image due to hw-specific reasons.
2133 if (info
->chip_class
>= GFX9
&& surf
->u
.gfx9
.dcc_retile_num_elements
) {
2134 /* Add space for the displayable DCC buffer. */
2135 surf
->display_dcc_offset
= align64(surf
->total_size
, surf
->u
.gfx9
.display_dcc_alignment
);
2136 surf
->total_size
= surf
->display_dcc_offset
+ surf
->u
.gfx9
.display_dcc_size
;
2138 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
2139 surf
->dcc_retile_map_offset
= align64(surf
->total_size
, info
->tcc_cache_line_size
);
2141 if (surf
->u
.gfx9
.dcc_retile_use_uint16
) {
2143 surf
->dcc_retile_map_offset
+ surf
->u
.gfx9
.dcc_retile_num_elements
* 2;
2146 surf
->dcc_retile_map_offset
+ surf
->u
.gfx9
.dcc_retile_num_elements
* 4;
2150 surf
->dcc_offset
= align64(surf
->total_size
, surf
->dcc_alignment
);
2151 surf
->total_size
= surf
->dcc_offset
+ surf
->dcc_size
;
2152 surf
->alignment
= MAX2(surf
->alignment
, surf
->dcc_alignment
);
2158 /* This is meant to be used for disabling DCC. */
2159 void ac_surface_zero_dcc_fields(struct radeon_surf
*surf
)
2161 surf
->dcc_offset
= 0;
2162 surf
->display_dcc_offset
= 0;
2163 surf
->dcc_retile_map_offset
= 0;
2166 static unsigned eg_tile_split(unsigned tile_split
)
2168 switch (tile_split
) {
2195 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
2197 switch (eg_tile_split
) {
2216 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2217 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2219 /* This should be called before ac_compute_surface. */
2220 void ac_surface_set_bo_metadata(const struct radeon_info
*info
, struct radeon_surf
*surf
,
2221 uint64_t tiling_flags
, enum radeon_surf_mode
*mode
)
2225 if (info
->chip_class
>= GFX9
) {
2226 surf
->u
.gfx9
.surf
.swizzle_mode
= AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
2227 surf
->u
.gfx9
.dcc
.independent_64B_blocks
=
2228 AMDGPU_TILING_GET(tiling_flags
, DCC_INDEPENDENT_64B
);
2229 surf
->u
.gfx9
.dcc
.independent_128B_blocks
=
2230 AMDGPU_TILING_GET(tiling_flags
, DCC_INDEPENDENT_128B
);
2231 surf
->u
.gfx9
.dcc
.max_compressed_block_size
=
2232 AMDGPU_TILING_GET(tiling_flags
, DCC_MAX_COMPRESSED_BLOCK_SIZE
);
2233 surf
->u
.gfx9
.display_dcc_pitch_max
= AMDGPU_TILING_GET(tiling_flags
, DCC_PITCH_MAX
);
2234 scanout
= AMDGPU_TILING_GET(tiling_flags
, SCANOUT
);
2236 surf
->u
.gfx9
.surf
.swizzle_mode
> 0 ? RADEON_SURF_MODE_2D
: RADEON_SURF_MODE_LINEAR_ALIGNED
;
2238 surf
->u
.legacy
.pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2239 surf
->u
.legacy
.bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2240 surf
->u
.legacy
.bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2241 surf
->u
.legacy
.tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
2242 surf
->u
.legacy
.mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2243 surf
->u
.legacy
.num_banks
= 2 << AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2244 scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
2246 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
2247 *mode
= RADEON_SURF_MODE_2D
;
2248 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
2249 *mode
= RADEON_SURF_MODE_1D
;
2251 *mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
2255 surf
->flags
|= RADEON_SURF_SCANOUT
;
2257 surf
->flags
&= ~RADEON_SURF_SCANOUT
;
2260 void ac_surface_get_bo_metadata(const struct radeon_info
*info
, struct radeon_surf
*surf
,
2261 uint64_t *tiling_flags
)
2265 if (info
->chip_class
>= GFX9
) {
2266 uint64_t dcc_offset
= 0;
2268 if (surf
->dcc_offset
) {
2269 dcc_offset
= surf
->display_dcc_offset
? surf
->display_dcc_offset
: surf
->dcc_offset
;
2270 assert((dcc_offset
>> 8) != 0 && (dcc_offset
>> 8) < (1 << 24));
2273 *tiling_flags
|= AMDGPU_TILING_SET(SWIZZLE_MODE
, surf
->u
.gfx9
.surf
.swizzle_mode
);
2274 *tiling_flags
|= AMDGPU_TILING_SET(DCC_OFFSET_256B
, dcc_offset
>> 8);
2275 *tiling_flags
|= AMDGPU_TILING_SET(DCC_PITCH_MAX
, surf
->u
.gfx9
.display_dcc_pitch_max
);
2277 AMDGPU_TILING_SET(DCC_INDEPENDENT_64B
, surf
->u
.gfx9
.dcc
.independent_64B_blocks
);
2279 AMDGPU_TILING_SET(DCC_INDEPENDENT_128B
, surf
->u
.gfx9
.dcc
.independent_128B_blocks
);
2280 *tiling_flags
|= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE
,
2281 surf
->u
.gfx9
.dcc
.max_compressed_block_size
);
2282 *tiling_flags
|= AMDGPU_TILING_SET(SCANOUT
, (surf
->flags
& RADEON_SURF_SCANOUT
) != 0);
2284 if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
)
2285 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
2286 else if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
)
2287 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
2289 *tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
2291 *tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, surf
->u
.legacy
.pipe_config
);
2292 *tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(surf
->u
.legacy
.bankw
));
2293 *tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(surf
->u
.legacy
.bankh
));
2294 if (surf
->u
.legacy
.tile_split
)
2296 AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(surf
->u
.legacy
.tile_split
));
2297 *tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(surf
->u
.legacy
.mtilea
));
2298 *tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(surf
->u
.legacy
.num_banks
) - 1);
2300 if (surf
->flags
& RADEON_SURF_SCANOUT
)
2301 *tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
2303 *tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
2307 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info
*info
)
2309 return (ATI_VENDOR_ID
<< 16) | info
->pci_id
;
2312 /* This should be called after ac_compute_surface. */
2313 bool ac_surface_set_umd_metadata(const struct radeon_info
*info
, struct radeon_surf
*surf
,
2314 unsigned num_storage_samples
, unsigned num_mipmap_levels
,
2315 unsigned size_metadata
, uint32_t metadata
[64])
2317 uint32_t *desc
= &metadata
[2];
2320 if (info
->chip_class
>= GFX9
)
2321 offset
= surf
->u
.gfx9
.surf_offset
;
2323 offset
= surf
->u
.legacy
.level
[0].offset
;
2325 if (offset
|| /* Non-zero planes ignore metadata. */
2326 size_metadata
< 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2327 metadata
[0] == 0 || /* invalid version number */
2328 metadata
[1] != ac_get_umd_metadata_word1(info
)) /* invalid PCI ID */ {
2329 /* Disable DCC because it might not be enabled. */
2330 ac_surface_zero_dcc_fields(surf
);
2332 /* Don't report an error if the texture comes from an incompatible driver,
2333 * but this might not work.
2338 /* Validate that sample counts and the number of mipmap levels match. */
2339 unsigned desc_last_level
= G_008F1C_LAST_LEVEL(desc
[3]);
2340 unsigned type
= G_008F1C_TYPE(desc
[3]);
2342 if (type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA
|| type
== V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY
) {
2343 unsigned log_samples
= util_logbase2(MAX2(1, num_storage_samples
));
2345 if (desc_last_level
!= log_samples
) {
2347 "amdgpu: invalid MSAA texture import, "
2348 "metadata has log2(samples) = %u, the caller set %u\n",
2349 desc_last_level
, log_samples
);
2353 if (desc_last_level
!= num_mipmap_levels
- 1) {
2355 "amdgpu: invalid mipmapped texture import, "
2356 "metadata has last_level = %u, the caller set %u\n",
2357 desc_last_level
, num_mipmap_levels
- 1);
2362 if (info
->chip_class
>= GFX8
&& G_008F28_COMPRESSION_EN(desc
[6])) {
2363 /* Read DCC information. */
2364 switch (info
->chip_class
) {
2366 surf
->dcc_offset
= (uint64_t)desc
[7] << 8;
2371 ((uint64_t)desc
[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc
[5]) << 40);
2372 surf
->u
.gfx9
.dcc
.pipe_aligned
= G_008F24_META_PIPE_ALIGNED(desc
[5]);
2373 surf
->u
.gfx9
.dcc
.rb_aligned
= G_008F24_META_RB_ALIGNED(desc
[5]);
2375 /* If DCC is unaligned, this can only be a displayable image. */
2376 if (!surf
->u
.gfx9
.dcc
.pipe_aligned
&& !surf
->u
.gfx9
.dcc
.rb_aligned
)
2377 assert(surf
->is_displayable
);
2383 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc
[6]) << 8) | ((uint64_t)desc
[7] << 16);
2384 surf
->u
.gfx9
.dcc
.pipe_aligned
= G_00A018_META_PIPE_ALIGNED(desc
[6]);
2392 /* Disable DCC. dcc_offset is always set by texture_from_handle
2393 * and must be cleared here.
2395 ac_surface_zero_dcc_fields(surf
);
2401 void ac_surface_get_umd_metadata(const struct radeon_info
*info
, struct radeon_surf
*surf
,
2402 unsigned num_mipmap_levels
, uint32_t desc
[8],
2403 unsigned *size_metadata
, uint32_t metadata
[64])
2405 /* Clear the base address and set the relative DCC offset. */
2407 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
2409 switch (info
->chip_class
) {
2414 desc
[7] = surf
->dcc_offset
>> 8;
2417 desc
[7] = surf
->dcc_offset
>> 8;
2418 desc
[5] &= C_008F24_META_DATA_ADDRESS
;
2419 desc
[5] |= S_008F24_META_DATA_ADDRESS(surf
->dcc_offset
>> 40);
2423 desc
[6] &= C_00A018_META_DATA_ADDRESS_LO
;
2424 desc
[6] |= S_00A018_META_DATA_ADDRESS_LO(surf
->dcc_offset
>> 8);
2425 desc
[7] = surf
->dcc_offset
>> 16;
2431 /* Metadata image format format version 1:
2432 * [0] = 1 (metadata format identifier)
2433 * [1] = (VENDOR_ID << 16) | PCI_ID
2434 * [2:9] = image descriptor for the whole resource
2435 * [2] is always 0, because the base address is cleared
2436 * [9] is the DCC offset bits [39:8] from the beginning of
2438 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2441 metadata
[0] = 1; /* metadata image format version 1 */
2443 /* Tiling modes are ambiguous without a PCI ID. */
2444 metadata
[1] = ac_get_umd_metadata_word1(info
);
2446 /* Dwords [2:9] contain the image descriptor. */
2447 memcpy(&metadata
[2], desc
, 8 * 4);
2448 *size_metadata
= 10 * 4;
2450 /* Dwords [10:..] contain the mipmap level offsets. */
2451 if (info
->chip_class
<= GFX8
) {
2452 for (unsigned i
= 0; i
< num_mipmap_levels
; i
++)
2453 metadata
[10 + i
] = surf
->u
.legacy
.level
[i
].offset
>> 8;
2455 *size_metadata
+= num_mipmap_levels
* 4;
2459 void ac_surface_override_offset_stride(const struct radeon_info
*info
, struct radeon_surf
*surf
,
2460 unsigned num_mipmap_levels
, uint64_t offset
, unsigned pitch
)
2462 if (info
->chip_class
>= GFX9
) {
2464 surf
->u
.gfx9
.surf_pitch
= pitch
;
2465 if (num_mipmap_levels
== 1)
2466 surf
->u
.gfx9
.surf
.epitch
= pitch
- 1;
2467 surf
->u
.gfx9
.surf_slice_size
= (uint64_t)pitch
* surf
->u
.gfx9
.surf_height
* surf
->bpe
;
2469 surf
->u
.gfx9
.surf_offset
= offset
;
2470 if (surf
->u
.gfx9
.stencil_offset
)
2471 surf
->u
.gfx9
.stencil_offset
+= offset
;
2474 surf
->u
.legacy
.level
[0].nblk_x
= pitch
;
2475 surf
->u
.legacy
.level
[0].slice_size_dw
=
2476 ((uint64_t)pitch
* surf
->u
.legacy
.level
[0].nblk_y
* surf
->bpe
) / 4;
2480 for (unsigned i
= 0; i
< ARRAY_SIZE(surf
->u
.legacy
.level
); ++i
)
2481 surf
->u
.legacy
.level
[i
].offset
+= offset
;
2485 if (surf
->htile_offset
)
2486 surf
->htile_offset
+= offset
;
2487 if (surf
->fmask_offset
)
2488 surf
->fmask_offset
+= offset
;
2489 if (surf
->cmask_offset
)
2490 surf
->cmask_offset
+= offset
;
2491 if (surf
->dcc_offset
)
2492 surf
->dcc_offset
+= offset
;
2493 if (surf
->display_dcc_offset
)
2494 surf
->display_dcc_offset
+= offset
;
2495 if (surf
->dcc_retile_map_offset
)
2496 surf
->dcc_retile_map_offset
+= offset
;