ac/surface: enable DCC for the first level in the mip tail on gfx10
[mesa.git] / src / amd / common / ac_surface.c
1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "ac_surface.h"
29 #include "amd_family.h"
30 #include "addrlib/src/amdgpu_asic_addr.h"
31 #include "ac_gpu_info.h"
32 #include "util/hash_table.h"
33 #include "util/macros.h"
34 #include "util/simple_mtx.h"
35 #include "util/u_atomic.h"
36 #include "util/u_math.h"
37 #include "util/u_memory.h"
38 #include "sid.h"
39
40 #include <errno.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <amdgpu.h>
44 #include "drm-uapi/amdgpu_drm.h"
45
46 #include "addrlib/inc/addrinterface.h"
47
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
50 #endif
51
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
54 #endif
55
56 struct ac_addrlib {
57 ADDR_HANDLE handle;
58
59 /* The cache of DCC retile maps for reuse when allocating images of
60 * similar sizes.
61 */
62 simple_mtx_t dcc_retile_map_lock;
63 struct hash_table *dcc_retile_maps;
64 };
65
66 struct dcc_retile_map_key {
67 enum radeon_family family;
68 unsigned retile_width;
69 unsigned retile_height;
70 bool rb_aligned;
71 bool pipe_aligned;
72 unsigned dcc_retile_num_elements;
73 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input;
74 };
75
76 static uint32_t dcc_retile_map_hash_key(const void *key)
77 {
78 return _mesa_hash_data(key, sizeof(struct dcc_retile_map_key));
79 }
80
81 static bool dcc_retile_map_keys_equal(const void *a, const void *b)
82 {
83 return memcmp(a, b, sizeof(struct dcc_retile_map_key)) == 0;
84 }
85
86 static void dcc_retile_map_free(struct hash_entry *entry)
87 {
88 free((void*)entry->key);
89 free(entry->data);
90 }
91
92 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib *addrlib,
93 const struct radeon_info *info,
94 unsigned retile_width, unsigned retile_height,
95 bool rb_aligned, bool pipe_aligned, bool use_uint16,
96 unsigned dcc_retile_num_elements,
97 const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT *in)
98 {
99 unsigned dcc_retile_map_size = dcc_retile_num_elements * (use_uint16 ? 2 : 4);
100 struct dcc_retile_map_key key;
101
102 assert(in->numFrags == 1 && in->numSlices == 1 && in->numMipLevels == 1);
103
104 memset(&key, 0, sizeof(key));
105 key.family = info->family;
106 key.retile_width = retile_width;
107 key.retile_height = retile_height;
108 key.rb_aligned = rb_aligned;
109 key.pipe_aligned = pipe_aligned;
110 key.dcc_retile_num_elements = dcc_retile_num_elements;
111 memcpy(&key.input, in, sizeof(*in));
112
113 simple_mtx_lock(&addrlib->dcc_retile_map_lock);
114
115 /* If we have already computed this retile map, get it from the hash table. */
116 struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_maps, &key);
117 if (entry) {
118 uint32_t *map = entry->data;
119 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
120 return map;
121 }
122
123 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
124 memcpy(&addrin, in, sizeof(*in));
125
126 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {};
127 addrout.size = sizeof(addrout);
128
129 void *dcc_retile_map = malloc(dcc_retile_map_size);
130 if (!dcc_retile_map) {
131 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
132 return NULL;
133 }
134
135 unsigned index = 0;
136
137 for (unsigned y = 0; y < retile_height; y += in->compressBlkHeight) {
138 addrin.y = y;
139
140 for (unsigned x = 0; x < retile_width; x += in->compressBlkWidth) {
141 addrin.x = x;
142
143 /* Compute src DCC address */
144 addrin.dccKeyFlags.pipeAligned = pipe_aligned;
145 addrin.dccKeyFlags.rbAligned = rb_aligned;
146 addrout.addr = 0;
147
148 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
149 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
150 return NULL;
151 }
152
153 if (use_uint16)
154 ((uint16_t*)dcc_retile_map)[index * 2] = addrout.addr;
155 else
156 ((uint32_t*)dcc_retile_map)[index * 2] = addrout.addr;
157
158 /* Compute dst DCC address */
159 addrin.dccKeyFlags.pipeAligned = 0;
160 addrin.dccKeyFlags.rbAligned = 0;
161 addrout.addr = 0;
162
163 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
164 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
165 return NULL;
166 }
167
168 if (use_uint16)
169 ((uint16_t*)dcc_retile_map)[index * 2 + 1] = addrout.addr;
170 else
171 ((uint32_t*)dcc_retile_map)[index * 2 + 1] = addrout.addr;
172
173 assert(index * 2 + 1 < dcc_retile_num_elements);
174 index++;
175 }
176 }
177 /* Fill the remaining pairs with the last one (for the compute shader). */
178 for (unsigned i = index * 2; i < dcc_retile_num_elements; i++) {
179 if (use_uint16)
180 ((uint16_t*)dcc_retile_map)[i] = ((uint16_t*)dcc_retile_map)[i - 2];
181 else
182 ((uint32_t*)dcc_retile_map)[i] = ((uint32_t*)dcc_retile_map)[i - 2];
183 }
184
185 /* Insert the retile map into the hash table, so that it can be reused and
186 * the computation can be skipped for similar image sizes.
187 */
188 _mesa_hash_table_insert(addrlib->dcc_retile_maps,
189 mem_dup(&key, sizeof(key)), dcc_retile_map);
190
191 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
192 return dcc_retile_map;
193 }
194
195 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)
196 {
197 return malloc(pInput->sizeInBytes);
198 }
199
200 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)
201 {
202 free(pInput->pVirtAddr);
203 return ADDR_OK;
204 }
205
206 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
207 const struct amdgpu_gpu_info *amdinfo,
208 uint64_t *max_alignment)
209 {
210 ADDR_CREATE_INPUT addrCreateInput = {0};
211 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
212 ADDR_REGISTER_VALUE regValue = {0};
213 ADDR_CREATE_FLAGS createFlags = {{0}};
214 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
215 ADDR_E_RETURNCODE addrRet;
216
217 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
218 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
219
220 regValue.gbAddrConfig = amdinfo->gb_addr_cfg;
221 createFlags.value = 0;
222
223 addrCreateInput.chipFamily = info->family_id;
224 addrCreateInput.chipRevision = info->chip_external_rev;
225
226 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
227 return NULL;
228
229 if (addrCreateInput.chipFamily >= FAMILY_AI) {
230 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
231 } else {
232 regValue.noOfBanks = amdinfo->mc_arb_ramcfg & 0x3;
233 regValue.noOfRanks = (amdinfo->mc_arb_ramcfg & 0x4) >> 2;
234
235 regValue.backendDisables = amdinfo->enabled_rb_pipes_mask;
236 regValue.pTileConfig = amdinfo->gb_tile_mode;
237 regValue.noOfEntries = ARRAY_SIZE(amdinfo->gb_tile_mode);
238 if (addrCreateInput.chipFamily == FAMILY_SI) {
239 regValue.pMacroTileConfig = NULL;
240 regValue.noOfMacroEntries = 0;
241 } else {
242 regValue.pMacroTileConfig = amdinfo->gb_macro_tile_mode;
243 regValue.noOfMacroEntries = ARRAY_SIZE(amdinfo->gb_macro_tile_mode);
244 }
245
246 createFlags.useTileIndex = 1;
247 createFlags.useHtileSliceAlign = 1;
248
249 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
250 }
251
252 addrCreateInput.callbacks.allocSysMem = allocSysMem;
253 addrCreateInput.callbacks.freeSysMem = freeSysMem;
254 addrCreateInput.callbacks.debugPrint = 0;
255 addrCreateInput.createFlags = createFlags;
256 addrCreateInput.regValue = regValue;
257
258 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
259 if (addrRet != ADDR_OK)
260 return NULL;
261
262 if (max_alignment) {
263 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
264 if (addrRet == ADDR_OK){
265 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
266 }
267 }
268
269 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
270 if (!addrlib) {
271 AddrDestroy(addrCreateOutput.hLib);
272 return NULL;
273 }
274
275 addrlib->handle = addrCreateOutput.hLib;
276 simple_mtx_init(&addrlib->dcc_retile_map_lock, mtx_plain);
277 addrlib->dcc_retile_maps = _mesa_hash_table_create(NULL, dcc_retile_map_hash_key,
278 dcc_retile_map_keys_equal);
279 return addrlib;
280 }
281
282 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
283 {
284 AddrDestroy(addrlib->handle);
285 simple_mtx_destroy(&addrlib->dcc_retile_map_lock);
286 _mesa_hash_table_destroy(addrlib->dcc_retile_maps, dcc_retile_map_free);
287 free(addrlib);
288 }
289
290 static int surf_config_sanity(const struct ac_surf_config *config,
291 unsigned flags)
292 {
293 /* FMASK is allocated together with the color surface and can't be
294 * allocated separately.
295 */
296 assert(!(flags & RADEON_SURF_FMASK));
297 if (flags & RADEON_SURF_FMASK)
298 return -EINVAL;
299
300 /* all dimension must be at least 1 ! */
301 if (!config->info.width || !config->info.height || !config->info.depth ||
302 !config->info.array_size || !config->info.levels)
303 return -EINVAL;
304
305 switch (config->info.samples) {
306 case 0:
307 case 1:
308 case 2:
309 case 4:
310 case 8:
311 break;
312 case 16:
313 if (flags & RADEON_SURF_Z_OR_SBUFFER)
314 return -EINVAL;
315 break;
316 default:
317 return -EINVAL;
318 }
319
320 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
321 switch (config->info.storage_samples) {
322 case 0:
323 case 1:
324 case 2:
325 case 4:
326 case 8:
327 break;
328 default:
329 return -EINVAL;
330 }
331 }
332
333 if (config->is_3d && config->info.array_size > 1)
334 return -EINVAL;
335 if (config->is_cube && config->info.depth > 1)
336 return -EINVAL;
337
338 return 0;
339 }
340
341 static int gfx6_compute_level(ADDR_HANDLE addrlib,
342 const struct ac_surf_config *config,
343 struct radeon_surf *surf, bool is_stencil,
344 unsigned level, bool compressed,
345 ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
346 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
347 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
348 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
349 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
350 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
351 {
352 struct legacy_surf_level *surf_level;
353 ADDR_E_RETURNCODE ret;
354
355 AddrSurfInfoIn->mipLevel = level;
356 AddrSurfInfoIn->width = u_minify(config->info.width, level);
357 AddrSurfInfoIn->height = u_minify(config->info.height, level);
358
359 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
360 * because GFX9 needs linear alignment of 256 bytes.
361 */
362 if (config->info.levels == 1 &&
363 AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
364 AddrSurfInfoIn->bpp &&
365 util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
366 unsigned alignment = 256 / (AddrSurfInfoIn->bpp / 8);
367
368 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
369 }
370
371 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
372 * true for r32g32b32 formats. */
373 if (AddrSurfInfoIn->bpp == 96) {
374 assert(config->info.levels == 1);
375 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
376
377 /* The least common multiple of 64 bytes and 12 bytes/pixel is
378 * 192 bytes, or 16 pixels. */
379 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
380 }
381
382 if (config->is_3d)
383 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
384 else if (config->is_cube)
385 AddrSurfInfoIn->numSlices = 6;
386 else
387 AddrSurfInfoIn->numSlices = config->info.array_size;
388
389 if (level > 0) {
390 /* Set the base level pitch. This is needed for calculation
391 * of non-zero levels. */
392 if (is_stencil)
393 AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
394 else
395 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
396
397 /* Convert blocks to pixels for compressed formats. */
398 if (compressed)
399 AddrSurfInfoIn->basePitch *= surf->blk_w;
400 }
401
402 ret = AddrComputeSurfaceInfo(addrlib,
403 AddrSurfInfoIn,
404 AddrSurfInfoOut);
405 if (ret != ADDR_OK) {
406 return ret;
407 }
408
409 surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
410 surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
411 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
412 surf_level->nblk_x = AddrSurfInfoOut->pitch;
413 surf_level->nblk_y = AddrSurfInfoOut->height;
414
415 switch (AddrSurfInfoOut->tileMode) {
416 case ADDR_TM_LINEAR_ALIGNED:
417 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
418 break;
419 case ADDR_TM_1D_TILED_THIN1:
420 surf_level->mode = RADEON_SURF_MODE_1D;
421 break;
422 case ADDR_TM_2D_TILED_THIN1:
423 surf_level->mode = RADEON_SURF_MODE_2D;
424 break;
425 default:
426 assert(0);
427 }
428
429 if (is_stencil)
430 surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
431 else
432 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
433
434 surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
435
436 /* Clear DCC fields at the beginning. */
437 surf_level->dcc_offset = 0;
438
439 /* The previous level's flag tells us if we can use DCC for this level. */
440 if (AddrSurfInfoIn->flags.dccCompatible &&
441 (level == 0 || AddrDccOut->subLvlCompressible)) {
442 bool prev_level_clearable = level == 0 ||
443 AddrDccOut->dccRamSizeAligned;
444
445 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
446 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
447 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
448 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
449 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
450
451 ret = AddrComputeDccInfo(addrlib,
452 AddrDccIn,
453 AddrDccOut);
454
455 if (ret == ADDR_OK) {
456 surf_level->dcc_offset = surf->dcc_size;
457 surf->num_dcc_levels = level + 1;
458 surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
459 surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
460
461 /* If the DCC size of a subresource (1 mip level or 1 slice)
462 * is not aligned, the DCC memory layout is not contiguous for
463 * that subresource, which means we can't use fast clear.
464 *
465 * We only do fast clears for whole mipmap levels. If we did
466 * per-slice fast clears, the same restriction would apply.
467 * (i.e. only compute the slice size and see if it's aligned)
468 *
469 * The last level can be non-contiguous and still be clearable
470 * if it's interleaved with the next level that doesn't exist.
471 */
472 if (AddrDccOut->dccRamSizeAligned ||
473 (prev_level_clearable && level == config->info.levels - 1))
474 surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
475 else
476 surf_level->dcc_fast_clear_size = 0;
477
478 /* Compute the DCC slice size because addrlib doesn't
479 * provide this info. As DCC memory is linear (each
480 * slice is the same size) it's easy to compute.
481 */
482 surf->dcc_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
483
484 /* For arrays, we have to compute the DCC info again
485 * with one slice size to get a correct fast clear
486 * size.
487 */
488 if (config->info.array_size > 1) {
489 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
490 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
491 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
492 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
493 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
494
495 ret = AddrComputeDccInfo(addrlib,
496 AddrDccIn, AddrDccOut);
497 if (ret == ADDR_OK) {
498 /* If the DCC memory isn't properly
499 * aligned, the data are interleaved
500 * accross slices.
501 */
502 if (AddrDccOut->dccRamSizeAligned)
503 surf_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
504 else
505 surf_level->dcc_slice_fast_clear_size = 0;
506 }
507
508 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
509 surf->dcc_slice_size != surf_level->dcc_slice_fast_clear_size) {
510 surf->dcc_size = 0;
511 surf->num_dcc_levels = 0;
512 AddrDccOut->subLvlCompressible = false;
513 }
514 } else {
515 surf_level->dcc_slice_fast_clear_size = surf_level->dcc_fast_clear_size;
516 }
517 }
518 }
519
520 /* HTILE. */
521 if (!is_stencil &&
522 AddrSurfInfoIn->flags.depth &&
523 surf_level->mode == RADEON_SURF_MODE_2D &&
524 level == 0 &&
525 !(surf->flags & RADEON_SURF_NO_HTILE)) {
526 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
527 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
528 AddrHtileIn->height = AddrSurfInfoOut->height;
529 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
530 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
531 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
532 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
533 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
534 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
535
536 ret = AddrComputeHtileInfo(addrlib,
537 AddrHtileIn,
538 AddrHtileOut);
539
540 if (ret == ADDR_OK) {
541 surf->htile_size = AddrHtileOut->htileBytes;
542 surf->htile_slice_size = AddrHtileOut->sliceSize;
543 surf->htile_alignment = AddrHtileOut->baseAlign;
544 }
545 }
546
547 return 0;
548 }
549
550 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf,
551 const struct radeon_info *info)
552 {
553 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
554
555 if (info->chip_class >= GFX7)
556 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
557 else
558 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
559 }
560
561 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
562 {
563 unsigned index, tileb;
564
565 tileb = 8 * 8 * surf->bpe;
566 tileb = MIN2(surf->u.legacy.tile_split, tileb);
567
568 for (index = 0; tileb > 64; index++)
569 tileb >>= 1;
570
571 assert(index < 16);
572 return index;
573 }
574
575 static bool get_display_flag(const struct ac_surf_config *config,
576 const struct radeon_surf *surf)
577 {
578 unsigned num_channels = config->info.num_channels;
579 unsigned bpe = surf->bpe;
580
581 if (!config->is_3d &&
582 !config->is_cube &&
583 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
584 surf->flags & RADEON_SURF_SCANOUT &&
585 config->info.samples <= 1 &&
586 surf->blk_w <= 2 && surf->blk_h == 1) {
587 /* subsampled */
588 if (surf->blk_w == 2 && surf->blk_h == 1)
589 return true;
590
591 if (/* RGBA8 or RGBA16F */
592 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
593 /* R5G6B5 or R5G5B5A1 */
594 (bpe == 2 && num_channels >= 3) ||
595 /* C8 palette */
596 (bpe == 1 && num_channels == 1))
597 return true;
598 }
599 return false;
600 }
601
602 /**
603 * This must be called after the first level is computed.
604 *
605 * Copy surface-global settings like pipe/bank config from level 0 surface
606 * computation, and compute tile swizzle.
607 */
608 static int gfx6_surface_settings(ADDR_HANDLE addrlib,
609 const struct radeon_info *info,
610 const struct ac_surf_config *config,
611 ADDR_COMPUTE_SURFACE_INFO_OUTPUT* csio,
612 struct radeon_surf *surf)
613 {
614 surf->surf_alignment = csio->baseAlign;
615 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
616 gfx6_set_micro_tile_mode(surf, info);
617
618 /* For 2D modes only. */
619 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
620 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
621 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
622 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
623 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
624 surf->u.legacy.num_banks = csio->pTileInfo->banks;
625 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
626 } else {
627 surf->u.legacy.macro_tile_index = 0;
628 }
629
630 /* Compute tile swizzle. */
631 /* TODO: fix tile swizzle with mipmapping for GFX6 */
632 if ((info->chip_class >= GFX7 || config->info.levels == 1) &&
633 config->info.surf_index &&
634 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
635 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
636 !get_display_flag(config, surf)) {
637 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
638 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
639
640 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
641 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
642
643 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
644 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
645 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
646 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
647 AddrBaseSwizzleIn.tileMode = csio->tileMode;
648
649 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn,
650 &AddrBaseSwizzleOut);
651 if (r != ADDR_OK)
652 return r;
653
654 assert(AddrBaseSwizzleOut.tileSwizzle <=
655 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
656 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
657 }
658 return 0;
659 }
660
661 static void ac_compute_cmask(const struct radeon_info *info,
662 const struct ac_surf_config *config,
663 struct radeon_surf *surf)
664 {
665 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
666 unsigned num_pipes = info->num_tile_pipes;
667 unsigned cl_width, cl_height;
668
669 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
670 (config->info.samples >= 2 && !surf->fmask_size))
671 return;
672
673 assert(info->chip_class <= GFX8);
674
675 switch (num_pipes) {
676 case 2:
677 cl_width = 32;
678 cl_height = 16;
679 break;
680 case 4:
681 cl_width = 32;
682 cl_height = 32;
683 break;
684 case 8:
685 cl_width = 64;
686 cl_height = 32;
687 break;
688 case 16: /* Hawaii */
689 cl_width = 64;
690 cl_height = 64;
691 break;
692 default:
693 assert(0);
694 return;
695 }
696
697 unsigned base_align = num_pipes * pipe_interleave_bytes;
698
699 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width*8);
700 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height*8);
701 unsigned slice_elements = (width * height) / (8*8);
702
703 /* Each element of CMASK is a nibble. */
704 unsigned slice_bytes = slice_elements / 2;
705
706 surf->u.legacy.cmask_slice_tile_max = (width * height) / (128*128);
707 if (surf->u.legacy.cmask_slice_tile_max)
708 surf->u.legacy.cmask_slice_tile_max -= 1;
709
710 unsigned num_layers;
711 if (config->is_3d)
712 num_layers = config->info.depth;
713 else if (config->is_cube)
714 num_layers = 6;
715 else
716 num_layers = config->info.array_size;
717
718 surf->cmask_alignment = MAX2(256, base_align);
719 surf->cmask_slice_size = align(slice_bytes, base_align);
720 surf->cmask_size = surf->cmask_slice_size * num_layers;
721 }
722
723 /**
724 * Fill in the tiling information in \p surf based on the given surface config.
725 *
726 * The following fields of \p surf must be initialized by the caller:
727 * blk_w, blk_h, bpe, flags.
728 */
729 static int gfx6_compute_surface(ADDR_HANDLE addrlib,
730 const struct radeon_info *info,
731 const struct ac_surf_config *config,
732 enum radeon_surf_mode mode,
733 struct radeon_surf *surf)
734 {
735 unsigned level;
736 bool compressed;
737 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
738 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
739 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
740 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
741 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
742 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
743 ADDR_TILEINFO AddrTileInfoIn = {0};
744 ADDR_TILEINFO AddrTileInfoOut = {0};
745 int r;
746
747 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
748 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
749 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
750 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
751 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
752 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
753 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
754
755 compressed = surf->blk_w == 4 && surf->blk_h == 4;
756
757 /* MSAA requires 2D tiling. */
758 if (config->info.samples > 1)
759 mode = RADEON_SURF_MODE_2D;
760
761 /* DB doesn't support linear layouts. */
762 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) &&
763 mode < RADEON_SURF_MODE_1D)
764 mode = RADEON_SURF_MODE_1D;
765
766 /* Set the requested tiling mode. */
767 switch (mode) {
768 case RADEON_SURF_MODE_LINEAR_ALIGNED:
769 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
770 break;
771 case RADEON_SURF_MODE_1D:
772 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
773 break;
774 case RADEON_SURF_MODE_2D:
775 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
776 break;
777 default:
778 assert(0);
779 }
780
781 /* The format must be set correctly for the allocation of compressed
782 * textures to work. In other cases, setting the bpp is sufficient.
783 */
784 if (compressed) {
785 switch (surf->bpe) {
786 case 8:
787 AddrSurfInfoIn.format = ADDR_FMT_BC1;
788 break;
789 case 16:
790 AddrSurfInfoIn.format = ADDR_FMT_BC3;
791 break;
792 default:
793 assert(0);
794 }
795 }
796 else {
797 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
798 }
799
800 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples =
801 MAX2(1, config->info.samples);
802 AddrSurfInfoIn.tileIndex = -1;
803
804 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
805 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags =
806 MAX2(1, config->info.storage_samples);
807 }
808
809 /* Set the micro tile type. */
810 if (surf->flags & RADEON_SURF_SCANOUT)
811 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
812 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
813 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
814 else
815 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
816
817 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
818 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
819 AddrSurfInfoIn.flags.cube = config->is_cube;
820 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
821 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
822 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
823
824 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
825 * requested, because TC-compatible HTILE requires 2D tiling.
826 */
827 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
828 !AddrSurfInfoIn.flags.fmask &&
829 config->info.samples <= 1 &&
830 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
831
832 /* DCC notes:
833 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
834 * with samples >= 4.
835 * - Mipmapped array textures have low performance (discovered by a closed
836 * driver team).
837 */
838 AddrSurfInfoIn.flags.dccCompatible =
839 info->chip_class >= GFX8 &&
840 info->has_graphics && /* disable DCC on compute-only chips */
841 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
842 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
843 !compressed &&
844 ((config->info.array_size == 1 && config->info.depth == 1) ||
845 config->info.levels == 1);
846
847 AddrSurfInfoIn.flags.noStencil = (surf->flags & RADEON_SURF_SBUFFER) == 0;
848 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
849
850 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
851 * for Z and stencil. This can cause a number of problems which we work
852 * around here:
853 *
854 * - a depth part that is incompatible with mipmapped texturing
855 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
856 * incorrect tiling applied to the stencil part, stencil buffer
857 * memory accesses that go out of bounds) even without mipmapping
858 *
859 * Some piglit tests that are prone to different types of related
860 * failures:
861 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
862 * ./bin/framebuffer-blit-levels {draw,read} stencil
863 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
864 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
865 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
866 */
867 int stencil_tile_idx = -1;
868
869 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
870 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
871 /* Compute stencilTileIdx that is compatible with the (depth)
872 * tileIdx. This degrades the depth surface if necessary to
873 * ensure that a matching stencilTileIdx exists. */
874 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
875
876 /* Keep the depth mip-tail compatible with texturing. */
877 AddrSurfInfoIn.flags.noStencil = 1;
878 }
879
880 /* Set preferred macrotile parameters. This is usually required
881 * for shared resources. This is for 2D tiling only. */
882 if (AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 &&
883 surf->u.legacy.bankw && surf->u.legacy.bankh &&
884 surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
885 /* If any of these parameters are incorrect, the calculation
886 * will fail. */
887 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
888 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
889 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
890 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
891 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
892 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
893 AddrSurfInfoIn.flags.opt4Space = 0;
894 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
895
896 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
897 * the tile index, because we are expected to know it if
898 * we know the other parameters.
899 *
900 * This is something that can easily be fixed in Addrlib.
901 * For now, just figure it out here.
902 * Note that only 2D_TILE_THIN1 is handled here.
903 */
904 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
905 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
906
907 if (info->chip_class == GFX6) {
908 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
909 if (surf->bpe == 2)
910 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
911 else
912 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
913 } else {
914 if (surf->bpe == 1)
915 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
916 else if (surf->bpe == 2)
917 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
918 else if (surf->bpe == 4)
919 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
920 else
921 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
922 }
923 } else {
924 /* GFX7 - GFX8 */
925 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
926 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
927 else
928 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
929
930 /* Addrlib doesn't set this if tileIndex is forced like above. */
931 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
932 }
933 }
934
935 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
936 surf->num_dcc_levels = 0;
937 surf->surf_size = 0;
938 surf->dcc_size = 0;
939 surf->dcc_alignment = 1;
940 surf->htile_size = 0;
941 surf->htile_slice_size = 0;
942 surf->htile_alignment = 1;
943
944 const bool only_stencil = (surf->flags & RADEON_SURF_SBUFFER) &&
945 !(surf->flags & RADEON_SURF_ZBUFFER);
946
947 /* Calculate texture layout information. */
948 if (!only_stencil) {
949 for (level = 0; level < config->info.levels; level++) {
950 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed,
951 &AddrSurfInfoIn, &AddrSurfInfoOut,
952 &AddrDccIn, &AddrDccOut, &AddrHtileIn, &AddrHtileOut);
953 if (r)
954 return r;
955
956 if (level > 0)
957 continue;
958
959 if (!AddrSurfInfoOut.tcCompatible) {
960 AddrSurfInfoIn.flags.tcCompatible = 0;
961 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
962 }
963
964 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
965 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
966 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
967 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
968
969 assert(stencil_tile_idx >= 0);
970 }
971
972 r = gfx6_surface_settings(addrlib, info, config,
973 &AddrSurfInfoOut, surf);
974 if (r)
975 return r;
976 }
977 }
978
979 /* Calculate texture layout information for stencil. */
980 if (surf->flags & RADEON_SURF_SBUFFER) {
981 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
982 AddrSurfInfoIn.bpp = 8;
983 AddrSurfInfoIn.flags.depth = 0;
984 AddrSurfInfoIn.flags.stencil = 1;
985 AddrSurfInfoIn.flags.tcCompatible = 0;
986 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
987 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
988
989 for (level = 0; level < config->info.levels; level++) {
990 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed,
991 &AddrSurfInfoIn, &AddrSurfInfoOut,
992 &AddrDccIn, &AddrDccOut,
993 NULL, NULL);
994 if (r)
995 return r;
996
997 /* DB uses the depth pitch for both stencil and depth. */
998 if (!only_stencil) {
999 if (surf->u.legacy.stencil_level[level].nblk_x !=
1000 surf->u.legacy.level[level].nblk_x)
1001 surf->u.legacy.stencil_adjusted = true;
1002 } else {
1003 surf->u.legacy.level[level].nblk_x =
1004 surf->u.legacy.stencil_level[level].nblk_x;
1005 }
1006
1007 if (level == 0) {
1008 if (only_stencil) {
1009 r = gfx6_surface_settings(addrlib, info, config,
1010 &AddrSurfInfoOut, surf);
1011 if (r)
1012 return r;
1013 }
1014
1015 /* For 2D modes only. */
1016 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1017 surf->u.legacy.stencil_tile_split =
1018 AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1019 }
1020 }
1021 }
1022 }
1023
1024 /* Compute FMASK. */
1025 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color &&
1026 info->has_graphics && !(surf->flags & RADEON_SURF_NO_FMASK)) {
1027 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1028 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1029 ADDR_TILEINFO fmask_tile_info = {};
1030
1031 fin.size = sizeof(fin);
1032 fout.size = sizeof(fout);
1033
1034 fin.tileMode = AddrSurfInfoOut.tileMode;
1035 fin.pitch = AddrSurfInfoOut.pitch;
1036 fin.height = config->info.height;
1037 fin.numSlices = AddrSurfInfoIn.numSlices;
1038 fin.numSamples = AddrSurfInfoIn.numSamples;
1039 fin.numFrags = AddrSurfInfoIn.numFrags;
1040 fin.tileIndex = -1;
1041 fout.pTileInfo = &fmask_tile_info;
1042
1043 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1044 if (r)
1045 return r;
1046
1047 surf->fmask_size = fout.fmaskBytes;
1048 surf->fmask_alignment = fout.baseAlign;
1049 surf->fmask_tile_swizzle = 0;
1050
1051 surf->u.legacy.fmask.slice_tile_max =
1052 (fout.pitch * fout.height) / 64;
1053 if (surf->u.legacy.fmask.slice_tile_max)
1054 surf->u.legacy.fmask.slice_tile_max -= 1;
1055
1056 surf->u.legacy.fmask.tiling_index = fout.tileIndex;
1057 surf->u.legacy.fmask.bankh = fout.pTileInfo->bankHeight;
1058 surf->u.legacy.fmask.pitch_in_pixels = fout.pitch;
1059 surf->u.legacy.fmask.slice_size = fout.sliceSize;
1060
1061 /* Compute tile swizzle for FMASK. */
1062 if (config->info.fmask_surf_index &&
1063 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1064 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1065 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1066
1067 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1068 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1069
1070 /* This counter starts from 1 instead of 0. */
1071 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1072 xin.tileIndex = fout.tileIndex;
1073 xin.macroModeIndex = fout.macroModeIndex;
1074 xin.pTileInfo = fout.pTileInfo;
1075 xin.tileMode = fin.tileMode;
1076
1077 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1078 if (r != ADDR_OK)
1079 return r;
1080
1081 assert(xout.tileSwizzle <=
1082 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1083 surf->fmask_tile_swizzle = xout.tileSwizzle;
1084 }
1085 }
1086
1087 /* Recalculate the whole DCC miptree size including disabled levels.
1088 * This is what addrlib does, but calling addrlib would be a lot more
1089 * complicated.
1090 */
1091 if (surf->dcc_size && config->info.levels > 1) {
1092 /* The smallest miplevels that are never compressed by DCC
1093 * still read the DCC buffer via TC if the base level uses DCC,
1094 * and for some reason the DCC buffer needs to be larger if
1095 * the miptree uses non-zero tile_swizzle. Otherwise there are
1096 * VM faults.
1097 *
1098 * "dcc_alignment * 4" was determined by trial and error.
1099 */
1100 surf->dcc_size = align64(surf->surf_size >> 8,
1101 surf->dcc_alignment * 4);
1102 }
1103
1104 /* Make sure HTILE covers the whole miptree, because the shader reads
1105 * TC-compatible HTILE even for levels where it's disabled by DB.
1106 */
1107 if (surf->htile_size && config->info.levels > 1 &&
1108 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
1109 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1110 const unsigned total_pixels = surf->surf_size / surf->bpe;
1111 const unsigned htile_block_size = 8 * 8;
1112 const unsigned htile_element_size = 4;
1113
1114 surf->htile_size = (total_pixels / htile_block_size) *
1115 htile_element_size;
1116 surf->htile_size = align(surf->htile_size, surf->htile_alignment);
1117 } else if (!surf->htile_size) {
1118 /* Unset this if HTILE is not present. */
1119 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1120 }
1121
1122 surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
1123 surf->is_displayable = (surf->is_linear ||
1124 surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1125 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER /* rotated */) &&
1126 !surf->dcc_size;
1127
1128 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1129 * used at the same time. This case is not currently expected to occur
1130 * because we don't use rotated. Enforce this restriction on all chips
1131 * to facilitate testing.
1132 */
1133 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1134 assert(!"rotate micro tile mode is unsupported");
1135 return ADDR_ERROR;
1136 }
1137
1138 ac_compute_cmask(info, config, surf);
1139 return 0;
1140 }
1141
1142 /* This is only called when expecting a tiled layout. */
1143 static int
1144 gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,
1145 struct radeon_surf *surf,
1146 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in,
1147 bool is_fmask, AddrSwizzleMode *swizzle_mode)
1148 {
1149 ADDR_E_RETURNCODE ret;
1150 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1151 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1152
1153 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1154 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1155
1156 sin.flags = in->flags;
1157 sin.resourceType = in->resourceType;
1158 sin.format = in->format;
1159 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1160 /* TODO: We could allow some of these: */
1161 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1162 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1163 sin.bpp = in->bpp;
1164 sin.width = in->width;
1165 sin.height = in->height;
1166 sin.numSlices = in->numSlices;
1167 sin.numMipLevels = in->numMipLevels;
1168 sin.numSamples = in->numSamples;
1169 sin.numFrags = in->numFrags;
1170
1171 if (is_fmask) {
1172 sin.flags.display = 0;
1173 sin.flags.color = 0;
1174 sin.flags.fmask = 1;
1175 }
1176
1177 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1178 sin.forbiddenBlock.linear = 1;
1179
1180 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1181 sin.preferredSwSet.sw_D = 1;
1182 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1183 sin.preferredSwSet.sw_S = 1;
1184 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1185 sin.preferredSwSet.sw_Z = 1;
1186 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1187 sin.preferredSwSet.sw_R = 1;
1188 }
1189
1190 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1191 if (ret != ADDR_OK)
1192 return ret;
1193
1194 *swizzle_mode = sout.swizzleMode;
1195 return 0;
1196 }
1197
1198 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1199 {
1200 if (info->chip_class >= GFX10)
1201 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1202
1203 return sw_mode != ADDR_SW_LINEAR;
1204 }
1205
1206 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1207 const struct radeon_surf *surf)
1208 {
1209 if (info->chip_class <= GFX9) {
1210 /* Only independent 64B blocks are supported. */
1211 return surf->u.gfx9.dcc.independent_64B_blocks &&
1212 !surf->u.gfx9.dcc.independent_128B_blocks &&
1213 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1214 }
1215
1216 if (info->family == CHIP_NAVI10) {
1217 /* Only independent 128B blocks are supported. */
1218 return !surf->u.gfx9.dcc.independent_64B_blocks &&
1219 surf->u.gfx9.dcc.independent_128B_blocks &&
1220 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1221 }
1222
1223 if (info->family == CHIP_NAVI12 ||
1224 info->family == CHIP_NAVI14) {
1225 /* Either 64B or 128B can be used, but not both.
1226 * If 64B is used, DCC image stores are unsupported.
1227 */
1228 return surf->u.gfx9.dcc.independent_64B_blocks !=
1229 surf->u.gfx9.dcc.independent_128B_blocks &&
1230 (!surf->u.gfx9.dcc.independent_64B_blocks ||
1231 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) &&
1232 (!surf->u.gfx9.dcc.independent_128B_blocks ||
1233 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B);
1234 }
1235
1236 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1237 * Since there is no reason to ever disable 128B, require it.
1238 * DCC image stores are always supported.
1239 */
1240 return surf->u.gfx9.dcc.independent_128B_blocks &&
1241 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1242 }
1243
1244 static bool is_dcc_supported_by_DCN(const struct radeon_info *info,
1245 const struct ac_surf_config *config,
1246 const struct radeon_surf *surf,
1247 bool rb_aligned, bool pipe_aligned)
1248 {
1249 if (!info->use_display_dcc_unaligned &&
1250 !info->use_display_dcc_with_retile_blit)
1251 return false;
1252
1253 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1254 if (surf->bpe != 4)
1255 return false;
1256
1257 /* Handle unaligned DCC. */
1258 if (info->use_display_dcc_unaligned &&
1259 (rb_aligned || pipe_aligned))
1260 return false;
1261
1262 switch (info->chip_class) {
1263 case GFX9:
1264 /* There are more constraints, but we always set
1265 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1266 * which always works.
1267 */
1268 assert(surf->u.gfx9.dcc.independent_64B_blocks &&
1269 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1270 return true;
1271 case GFX10:
1272 case GFX10_3:
1273 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1274 if (info->chip_class == GFX10 &&
1275 surf->u.gfx9.dcc.independent_128B_blocks)
1276 return false;
1277
1278 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1279 return ((config->info.width <= 2560 &&
1280 config->info.height <= 2560) ||
1281 (surf->u.gfx9.dcc.independent_64B_blocks &&
1282 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1283 default:
1284 unreachable("unhandled chip");
1285 return false;
1286 }
1287 }
1288
1289 static int gfx9_compute_miptree(struct ac_addrlib *addrlib,
1290 const struct radeon_info *info,
1291 const struct ac_surf_config *config,
1292 struct radeon_surf *surf, bool compressed,
1293 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1294 {
1295 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
1296 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1297 ADDR_E_RETURNCODE ret;
1298
1299 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1300 out.pMipInfo = mip_info;
1301
1302 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1303 if (ret != ADDR_OK)
1304 return ret;
1305
1306 if (in->flags.stencil) {
1307 surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
1308 surf->u.gfx9.stencil.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1309 out.mipChainPitch - 1;
1310 surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
1311 surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
1312 surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
1313 return 0;
1314 }
1315
1316 surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
1317 surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1318 out.mipChainPitch - 1;
1319
1320 /* CMASK fast clear uses these even if FMASK isn't allocated.
1321 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1322 */
1323 surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
1324 surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
1325
1326 surf->u.gfx9.surf_slice_size = out.sliceSize;
1327 surf->u.gfx9.surf_pitch = out.pitch;
1328 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1329 surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR) {
1330 /* Adjust surf_pitch to be in elements units,
1331 * not in pixels */
1332 surf->u.gfx9.surf_pitch =
1333 align(surf->u.gfx9.surf_pitch / surf->blk_w, 256 / surf->bpe);
1334 surf->u.gfx9.surf.epitch = MAX2(surf->u.gfx9.surf.epitch,
1335 surf->u.gfx9.surf_pitch * surf->blk_w - 1);
1336 }
1337 surf->u.gfx9.surf_height = out.height;
1338 surf->surf_size = out.surfSize;
1339 surf->surf_alignment = out.baseAlign;
1340
1341 if (in->swizzleMode == ADDR_SW_LINEAR) {
1342 for (unsigned i = 0; i < in->numMipLevels; i++) {
1343 surf->u.gfx9.offset[i] = mip_info[i].offset;
1344 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1345 }
1346 }
1347
1348 if (in->flags.depth) {
1349 assert(in->swizzleMode != ADDR_SW_LINEAR);
1350
1351 if (surf->flags & RADEON_SURF_NO_HTILE)
1352 return 0;
1353
1354 /* HTILE */
1355 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1356 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1357
1358 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1359 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1360
1361 assert(in->flags.metaPipeUnaligned == 0);
1362 assert(in->flags.metaRbUnaligned == 0);
1363
1364 hin.hTileFlags.pipeAligned = 1;
1365 hin.hTileFlags.rbAligned = 1;
1366 hin.depthFlags = in->flags;
1367 hin.swizzleMode = in->swizzleMode;
1368 hin.unalignedWidth = in->width;
1369 hin.unalignedHeight = in->height;
1370 hin.numSlices = in->numSlices;
1371 hin.numMipLevels = in->numMipLevels;
1372 hin.firstMipIdInTail = out.firstMipIdInTail;
1373
1374 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
1375 if (ret != ADDR_OK)
1376 return ret;
1377
1378 surf->htile_size = hout.htileBytes;
1379 surf->htile_slice_size = hout.sliceSize;
1380 surf->htile_alignment = hout.baseAlign;
1381 return 0;
1382 }
1383
1384 {
1385 /* Compute tile swizzle for the color surface.
1386 * All *_X and *_T modes can use the swizzle.
1387 */
1388 if (config->info.surf_index &&
1389 in->swizzleMode >= ADDR_SW_64KB_Z_T &&
1390 !out.mipChainInTail &&
1391 !(surf->flags & RADEON_SURF_SHAREABLE) &&
1392 !in->flags.display) {
1393 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1394 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1395
1396 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1397 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1398
1399 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1400 xin.flags = in->flags;
1401 xin.swizzleMode = in->swizzleMode;
1402 xin.resourceType = in->resourceType;
1403 xin.format = in->format;
1404 xin.numSamples = in->numSamples;
1405 xin.numFrags = in->numFrags;
1406
1407 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1408 if (ret != ADDR_OK)
1409 return ret;
1410
1411 assert(xout.pipeBankXor <=
1412 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1413 surf->tile_swizzle = xout.pipeBankXor;
1414 }
1415
1416 /* DCC */
1417 if (info->has_graphics &&
1418 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1419 !compressed &&
1420 is_dcc_supported_by_CB(info, in->swizzleMode) &&
1421 (!in->flags.display ||
1422 is_dcc_supported_by_DCN(info, config, surf,
1423 !in->flags.metaRbUnaligned,
1424 !in->flags.metaPipeUnaligned))) {
1425 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
1426 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
1427 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {};
1428
1429 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
1430 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
1431 dout.pMipInfo = meta_mip_info;
1432
1433 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
1434 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
1435 din.colorFlags = in->flags;
1436 din.resourceType = in->resourceType;
1437 din.swizzleMode = in->swizzleMode;
1438 din.bpp = in->bpp;
1439 din.unalignedWidth = in->width;
1440 din.unalignedHeight = in->height;
1441 din.numSlices = in->numSlices;
1442 din.numFrags = in->numFrags;
1443 din.numMipLevels = in->numMipLevels;
1444 din.dataSurfaceSize = out.surfSize;
1445 din.firstMipIdInTail = out.firstMipIdInTail;
1446
1447 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1448 if (ret != ADDR_OK)
1449 return ret;
1450
1451 surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
1452 surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
1453 surf->u.gfx9.dcc_block_width = dout.compressBlkWidth;
1454 surf->u.gfx9.dcc_block_height = dout.compressBlkHeight;
1455 surf->u.gfx9.dcc_block_depth = dout.compressBlkDepth;
1456 surf->dcc_size = dout.dccRamSize;
1457 surf->dcc_alignment = dout.dccRamBaseAlign;
1458 surf->num_dcc_levels = in->numMipLevels;
1459
1460 /* Disable DCC for levels that are in the mip tail.
1461 *
1462 * There are two issues that this is intended to
1463 * address:
1464 *
1465 * 1. Multiple mip levels may share a cache line. This
1466 * can lead to corruption when switching between
1467 * rendering to different mip levels because the
1468 * RBs don't maintain coherency.
1469 *
1470 * 2. Texturing with metadata after rendering sometimes
1471 * fails with corruption, probably for a similar
1472 * reason.
1473 *
1474 * Working around these issues for all levels in the
1475 * mip tail may be overly conservative, but it's what
1476 * Vulkan does.
1477 *
1478 * Alternative solutions that also work but are worse:
1479 * - Disable DCC entirely.
1480 * - Flush TC L2 after rendering.
1481 */
1482 for (unsigned i = 0; i < in->numMipLevels; i++) {
1483 if (meta_mip_info[i].inMiptail) {
1484 /* GFX10 can only compress the first level
1485 * in the mip tail.
1486 *
1487 * TODO: Try to do the same thing for gfx9
1488 * if there are no regressions.
1489 */
1490 if (info->chip_class >= GFX10)
1491 surf->num_dcc_levels = i + 1;
1492 else
1493 surf->num_dcc_levels = i;
1494 break;
1495 }
1496 }
1497
1498 if (!surf->num_dcc_levels)
1499 surf->dcc_size = 0;
1500
1501 surf->u.gfx9.display_dcc_size = surf->dcc_size;
1502 surf->u.gfx9.display_dcc_alignment = surf->dcc_alignment;
1503 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1504
1505 /* Compute displayable DCC. */
1506 if (in->flags.display &&
1507 surf->num_dcc_levels &&
1508 info->use_display_dcc_with_retile_blit) {
1509 /* Compute displayable DCC info. */
1510 din.dccKeyFlags.pipeAligned = 0;
1511 din.dccKeyFlags.rbAligned = 0;
1512
1513 assert(din.numSlices == 1);
1514 assert(din.numMipLevels == 1);
1515 assert(din.numFrags == 1);
1516 assert(surf->tile_swizzle == 0);
1517 assert(surf->u.gfx9.dcc.pipe_aligned ||
1518 surf->u.gfx9.dcc.rb_aligned);
1519
1520 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1521 if (ret != ADDR_OK)
1522 return ret;
1523
1524 surf->u.gfx9.display_dcc_size = dout.dccRamSize;
1525 surf->u.gfx9.display_dcc_alignment = dout.dccRamBaseAlign;
1526 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1527 assert(surf->u.gfx9.display_dcc_size <= surf->dcc_size);
1528
1529 surf->u.gfx9.dcc_retile_use_uint16 =
1530 surf->u.gfx9.display_dcc_size <= UINT16_MAX + 1 &&
1531 surf->dcc_size <= UINT16_MAX + 1;
1532
1533 /* Align the retile map size to get more hash table hits and
1534 * decrease the maximum memory footprint when all retile maps
1535 * are cached in the hash table.
1536 */
1537 unsigned retile_dim[2] = {in->width, in->height};
1538
1539 for (unsigned i = 0; i < 2; i++) {
1540 /* Increase the alignment as the size increases.
1541 * Greater alignment increases retile compute work,
1542 * but decreases maximum memory footprint for the cache.
1543 *
1544 * With this alignment, the worst case memory footprint of
1545 * the cache is:
1546 * 1920x1080: 55 MB
1547 * 2560x1440: 99 MB
1548 * 3840x2160: 305 MB
1549 *
1550 * The worst case size in MB can be computed in Haskell as follows:
1551 * (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1552 * [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]]))))) `div` 1024^2
1553 * where
1554 * alignment x = if x <= 512 then 16 else if x <= 1024 then 32 else if x <= 2048 then 64 else 128
1555 * align x = (x + (alignment x) - 1) `div` (alignment x) * (alignment x)
1556 * align_pair e = (align (fst e), align (snd e))
1557 * deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a) == (snd b))) . sortBy compare
1558 * get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1559 * get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else 4)
1560 * bpp = 4; maxwidth = 3840; maxheight = 2160
1561 */
1562 if (retile_dim[i] <= 512)
1563 retile_dim[i] = align(retile_dim[i], 16);
1564 else if (retile_dim[i] <= 1024)
1565 retile_dim[i] = align(retile_dim[i], 32);
1566 else if (retile_dim[i] <= 2048)
1567 retile_dim[i] = align(retile_dim[i], 64);
1568 else
1569 retile_dim[i] = align(retile_dim[i], 128);
1570
1571 /* Don't align more than the DCC pixel alignment. */
1572 assert(dout.metaBlkWidth >= 128 && dout.metaBlkHeight >= 128);
1573 }
1574
1575 surf->u.gfx9.dcc_retile_num_elements =
1576 DIV_ROUND_UP(retile_dim[0], dout.compressBlkWidth) *
1577 DIV_ROUND_UP(retile_dim[1], dout.compressBlkHeight) * 2;
1578 /* Align the size to 4 (for the compute shader). */
1579 surf->u.gfx9.dcc_retile_num_elements =
1580 align(surf->u.gfx9.dcc_retile_num_elements, 4);
1581
1582 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
1583 /* Compute address mapping from non-displayable to displayable DCC. */
1584 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
1585 memset(&addrin, 0, sizeof(addrin));
1586 addrin.size = sizeof(addrin);
1587 addrin.swizzleMode = din.swizzleMode;
1588 addrin.resourceType = din.resourceType;
1589 addrin.bpp = din.bpp;
1590 addrin.numSlices = 1;
1591 addrin.numMipLevels = 1;
1592 addrin.numFrags = 1;
1593 addrin.pitch = dout.pitch;
1594 addrin.height = dout.height;
1595 addrin.compressBlkWidth = dout.compressBlkWidth;
1596 addrin.compressBlkHeight = dout.compressBlkHeight;
1597 addrin.compressBlkDepth = dout.compressBlkDepth;
1598 addrin.metaBlkWidth = dout.metaBlkWidth;
1599 addrin.metaBlkHeight = dout.metaBlkHeight;
1600 addrin.metaBlkDepth = dout.metaBlkDepth;
1601 addrin.dccRamSliceSize = 0; /* Don't care for non-layered images. */
1602
1603 surf->u.gfx9.dcc_retile_map =
1604 ac_compute_dcc_retile_map(addrlib, info,
1605 retile_dim[0], retile_dim[1],
1606 surf->u.gfx9.dcc.rb_aligned,
1607 surf->u.gfx9.dcc.pipe_aligned,
1608 surf->u.gfx9.dcc_retile_use_uint16,
1609 surf->u.gfx9.dcc_retile_num_elements,
1610 &addrin);
1611 if (!surf->u.gfx9.dcc_retile_map)
1612 return ADDR_OUTOFMEMORY;
1613 }
1614 }
1615 }
1616
1617 /* FMASK */
1618 if (in->numSamples > 1 && info->has_graphics &&
1619 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1620 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
1621 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1622
1623 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
1624 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
1625
1626 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, in,
1627 true, &fin.swizzleMode);
1628 if (ret != ADDR_OK)
1629 return ret;
1630
1631 fin.unalignedWidth = in->width;
1632 fin.unalignedHeight = in->height;
1633 fin.numSlices = in->numSlices;
1634 fin.numSamples = in->numSamples;
1635 fin.numFrags = in->numFrags;
1636
1637 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
1638 if (ret != ADDR_OK)
1639 return ret;
1640
1641 surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
1642 surf->u.gfx9.fmask.epitch = fout.pitch - 1;
1643 surf->fmask_size = fout.fmaskBytes;
1644 surf->fmask_alignment = fout.baseAlign;
1645
1646 /* Compute tile swizzle for the FMASK surface. */
1647 if (config->info.fmask_surf_index &&
1648 fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
1649 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1650 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1651 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1652
1653 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1654 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1655
1656 /* This counter starts from 1 instead of 0. */
1657 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1658 xin.flags = in->flags;
1659 xin.swizzleMode = fin.swizzleMode;
1660 xin.resourceType = in->resourceType;
1661 xin.format = in->format;
1662 xin.numSamples = in->numSamples;
1663 xin.numFrags = in->numFrags;
1664
1665 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1666 if (ret != ADDR_OK)
1667 return ret;
1668
1669 assert(xout.pipeBankXor <=
1670 u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
1671 surf->fmask_tile_swizzle = xout.pipeBankXor;
1672 }
1673 }
1674
1675 /* CMASK -- on GFX10 only for FMASK */
1676 if (in->swizzleMode != ADDR_SW_LINEAR &&
1677 in->resourceType == ADDR_RSRC_TEX_2D &&
1678 ((info->chip_class <= GFX9 &&
1679 in->numSamples == 1 &&
1680 in->flags.metaPipeUnaligned == 0 &&
1681 in->flags.metaRbUnaligned == 0) ||
1682 (surf->fmask_size && in->numSamples >= 2))) {
1683 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
1684 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
1685
1686 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
1687 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
1688
1689 assert(in->flags.metaPipeUnaligned == 0);
1690 assert(in->flags.metaRbUnaligned == 0);
1691
1692 cin.cMaskFlags.pipeAligned = 1;
1693 cin.cMaskFlags.rbAligned = 1;
1694 cin.colorFlags = in->flags;
1695 cin.resourceType = in->resourceType;
1696 cin.unalignedWidth = in->width;
1697 cin.unalignedHeight = in->height;
1698 cin.numSlices = in->numSlices;
1699
1700 if (in->numSamples > 1)
1701 cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
1702 else
1703 cin.swizzleMode = in->swizzleMode;
1704
1705 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
1706 if (ret != ADDR_OK)
1707 return ret;
1708
1709 surf->cmask_size = cout.cmaskBytes;
1710 surf->cmask_alignment = cout.baseAlign;
1711 }
1712 }
1713
1714 return 0;
1715 }
1716
1717 static int gfx9_compute_surface(struct ac_addrlib *addrlib,
1718 const struct radeon_info *info,
1719 const struct ac_surf_config *config,
1720 enum radeon_surf_mode mode,
1721 struct radeon_surf *surf)
1722 {
1723 bool compressed;
1724 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1725 int r;
1726
1727 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
1728
1729 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1730
1731 /* The format must be set correctly for the allocation of compressed
1732 * textures to work. In other cases, setting the bpp is sufficient. */
1733 if (compressed) {
1734 switch (surf->bpe) {
1735 case 8:
1736 AddrSurfInfoIn.format = ADDR_FMT_BC1;
1737 break;
1738 case 16:
1739 AddrSurfInfoIn.format = ADDR_FMT_BC3;
1740 break;
1741 default:
1742 assert(0);
1743 }
1744 } else {
1745 switch (surf->bpe) {
1746 case 1:
1747 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
1748 AddrSurfInfoIn.format = ADDR_FMT_8;
1749 break;
1750 case 2:
1751 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1752 !(surf->flags & RADEON_SURF_SBUFFER));
1753 AddrSurfInfoIn.format = ADDR_FMT_16;
1754 break;
1755 case 4:
1756 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1757 !(surf->flags & RADEON_SURF_SBUFFER));
1758 AddrSurfInfoIn.format = ADDR_FMT_32;
1759 break;
1760 case 8:
1761 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1762 AddrSurfInfoIn.format = ADDR_FMT_32_32;
1763 break;
1764 case 12:
1765 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1766 AddrSurfInfoIn.format = ADDR_FMT_32_32_32;
1767 break;
1768 case 16:
1769 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1770 AddrSurfInfoIn.format = ADDR_FMT_32_32_32_32;
1771 break;
1772 default:
1773 assert(0);
1774 }
1775 AddrSurfInfoIn.bpp = surf->bpe * 8;
1776 }
1777
1778 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1779 AddrSurfInfoIn.flags.color = is_color_surface &&
1780 !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1781 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1782 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1783 /* flags.texture currently refers to TC-compatible HTILE */
1784 AddrSurfInfoIn.flags.texture = is_color_surface ||
1785 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1786 AddrSurfInfoIn.flags.opt4space = 1;
1787
1788 AddrSurfInfoIn.numMipLevels = config->info.levels;
1789 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1790 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
1791
1792 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
1793 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1794
1795 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1796 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1797 * must sample 1D textures as 2D. */
1798 if (config->is_3d)
1799 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
1800 else if (info->chip_class != GFX9 && config->is_1d)
1801 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
1802 else
1803 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
1804
1805 AddrSurfInfoIn.width = config->info.width;
1806 AddrSurfInfoIn.height = config->info.height;
1807
1808 if (config->is_3d)
1809 AddrSurfInfoIn.numSlices = config->info.depth;
1810 else if (config->is_cube)
1811 AddrSurfInfoIn.numSlices = 6;
1812 else
1813 AddrSurfInfoIn.numSlices = config->info.array_size;
1814
1815 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1816 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
1817 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
1818
1819 /* Optimal values for the L2 cache. */
1820 if (info->chip_class == GFX9) {
1821 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1822 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1823 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1824 } else if (info->chip_class >= GFX10) {
1825 surf->u.gfx9.dcc.independent_64B_blocks = 0;
1826 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1827 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
1828 }
1829
1830 if (AddrSurfInfoIn.flags.display) {
1831 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1832 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1833 *
1834 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1835 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1836 * after rendering, so PIPE_ALIGNED=1 is recommended.
1837 */
1838 if (info->use_display_dcc_unaligned) {
1839 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
1840 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
1841 }
1842
1843 /* Adjust DCC settings to meet DCN requirements. */
1844 if (info->use_display_dcc_unaligned ||
1845 info->use_display_dcc_with_retile_blit) {
1846 /* Only Navi12/14 support independent 64B blocks in L2,
1847 * but without DCC image stores.
1848 */
1849 if (info->family == CHIP_NAVI12 ||
1850 info->family == CHIP_NAVI14) {
1851 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1852 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1853 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1854 }
1855
1856 if (info->chip_class >= GFX10_3) {
1857 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1858 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1859 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1860 }
1861 }
1862 }
1863
1864 switch (mode) {
1865 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1866 assert(config->info.samples <= 1);
1867 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1868 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
1869 break;
1870
1871 case RADEON_SURF_MODE_1D:
1872 case RADEON_SURF_MODE_2D:
1873 if (surf->flags & RADEON_SURF_IMPORTED ||
1874 (info->chip_class >= GFX10 &&
1875 surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
1876 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.surf.swizzle_mode;
1877 break;
1878 }
1879
1880 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn,
1881 false, &AddrSurfInfoIn.swizzleMode);
1882 if (r)
1883 return r;
1884 break;
1885
1886 default:
1887 assert(0);
1888 }
1889
1890 surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
1891 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1892
1893 surf->num_dcc_levels = 0;
1894 surf->surf_size = 0;
1895 surf->fmask_size = 0;
1896 surf->dcc_size = 0;
1897 surf->htile_size = 0;
1898 surf->htile_slice_size = 0;
1899 surf->u.gfx9.surf_offset = 0;
1900 surf->u.gfx9.stencil_offset = 0;
1901 surf->cmask_size = 0;
1902 surf->u.gfx9.dcc_retile_use_uint16 = false;
1903 surf->u.gfx9.dcc_retile_num_elements = 0;
1904 surf->u.gfx9.dcc_retile_map = NULL;
1905
1906 /* Calculate texture layout information. */
1907 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1908 &AddrSurfInfoIn);
1909 if (r)
1910 goto error;
1911
1912 /* Calculate texture layout information for stencil. */
1913 if (surf->flags & RADEON_SURF_SBUFFER) {
1914 AddrSurfInfoIn.flags.stencil = 1;
1915 AddrSurfInfoIn.bpp = 8;
1916 AddrSurfInfoIn.format = ADDR_FMT_8;
1917
1918 if (!AddrSurfInfoIn.flags.depth) {
1919 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn,
1920 false, &AddrSurfInfoIn.swizzleMode);
1921 if (r)
1922 goto error;
1923 } else
1924 AddrSurfInfoIn.flags.depth = 0;
1925
1926 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1927 &AddrSurfInfoIn);
1928 if (r)
1929 goto error;
1930 }
1931
1932 surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
1933
1934 /* Query whether the surface is displayable. */
1935 /* This is only useful for surfaces that are allocated without SCANOUT. */
1936 bool displayable = false;
1937 if (!config->is_3d && !config->is_cube) {
1938 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.surf.swizzle_mode,
1939 surf->bpe * 8, &displayable);
1940 if (r)
1941 goto error;
1942
1943 /* Display needs unaligned DCC. */
1944 if (surf->num_dcc_levels &&
1945 (!is_dcc_supported_by_DCN(info, config, surf,
1946 surf->u.gfx9.dcc.rb_aligned,
1947 surf->u.gfx9.dcc.pipe_aligned) ||
1948 /* Don't set is_displayable if displayable DCC is missing. */
1949 (info->use_display_dcc_with_retile_blit &&
1950 !surf->u.gfx9.dcc_retile_num_elements)))
1951 displayable = false;
1952 }
1953 surf->is_displayable = displayable;
1954
1955 /* Validate that we allocated a displayable surface if requested. */
1956 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
1957
1958 /* Validate that DCC is set up correctly. */
1959 if (surf->num_dcc_levels) {
1960 assert(is_dcc_supported_by_L2(info, surf));
1961 if (AddrSurfInfoIn.flags.color)
1962 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.surf.swizzle_mode));
1963 if (AddrSurfInfoIn.flags.display) {
1964 assert(is_dcc_supported_by_DCN(info, config, surf,
1965 surf->u.gfx9.dcc.rb_aligned,
1966 surf->u.gfx9.dcc.pipe_aligned));
1967 }
1968 }
1969
1970 if (info->has_graphics &&
1971 !compressed &&
1972 !config->is_3d &&
1973 config->info.levels == 1 &&
1974 AddrSurfInfoIn.flags.color &&
1975 !surf->is_linear &&
1976 surf->surf_alignment >= 64 * 1024 && /* 64KB tiling */
1977 !(surf->flags & (RADEON_SURF_DISABLE_DCC |
1978 RADEON_SURF_FORCE_SWIZZLE_MODE |
1979 RADEON_SURF_FORCE_MICRO_TILE_MODE))) {
1980 /* Validate that DCC is enabled if DCN can do it. */
1981 if ((info->use_display_dcc_unaligned ||
1982 info->use_display_dcc_with_retile_blit) &&
1983 AddrSurfInfoIn.flags.display &&
1984 surf->bpe == 4) {
1985 assert(surf->num_dcc_levels);
1986 }
1987
1988 /* Validate that non-scanout DCC is always enabled. */
1989 if (!AddrSurfInfoIn.flags.display)
1990 assert(surf->num_dcc_levels);
1991 }
1992
1993 if (!surf->htile_size) {
1994 /* Unset this if HTILE is not present. */
1995 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1996 }
1997
1998 switch (surf->u.gfx9.surf.swizzle_mode) {
1999 /* S = standard. */
2000 case ADDR_SW_256B_S:
2001 case ADDR_SW_4KB_S:
2002 case ADDR_SW_64KB_S:
2003 case ADDR_SW_64KB_S_T:
2004 case ADDR_SW_4KB_S_X:
2005 case ADDR_SW_64KB_S_X:
2006 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2007 break;
2008
2009 /* D = display. */
2010 case ADDR_SW_LINEAR:
2011 case ADDR_SW_256B_D:
2012 case ADDR_SW_4KB_D:
2013 case ADDR_SW_64KB_D:
2014 case ADDR_SW_64KB_D_T:
2015 case ADDR_SW_4KB_D_X:
2016 case ADDR_SW_64KB_D_X:
2017 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2018 break;
2019
2020 /* R = rotated (gfx9), render target (gfx10). */
2021 case ADDR_SW_256B_R:
2022 case ADDR_SW_4KB_R:
2023 case ADDR_SW_64KB_R:
2024 case ADDR_SW_64KB_R_T:
2025 case ADDR_SW_4KB_R_X:
2026 case ADDR_SW_64KB_R_X:
2027 case ADDR_SW_VAR_R_X:
2028 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2029 * used at the same time. We currently do not use rotated
2030 * in gfx9.
2031 */
2032 assert(info->chip_class >= GFX10 ||
2033 !"rotate micro tile mode is unsupported");
2034 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2035 break;
2036
2037 /* Z = depth. */
2038 case ADDR_SW_4KB_Z:
2039 case ADDR_SW_64KB_Z:
2040 case ADDR_SW_64KB_Z_T:
2041 case ADDR_SW_4KB_Z_X:
2042 case ADDR_SW_64KB_Z_X:
2043 case ADDR_SW_VAR_Z_X:
2044 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2045 break;
2046
2047 default:
2048 assert(0);
2049 }
2050
2051 return 0;
2052
2053 error:
2054 free(surf->u.gfx9.dcc_retile_map);
2055 surf->u.gfx9.dcc_retile_map = NULL;
2056 return r;
2057 }
2058
2059 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2060 const struct ac_surf_config *config,
2061 enum radeon_surf_mode mode,
2062 struct radeon_surf *surf)
2063 {
2064 int r;
2065
2066 r = surf_config_sanity(config, surf->flags);
2067 if (r)
2068 return r;
2069
2070 if (info->chip_class >= GFX9)
2071 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
2072 else
2073 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
2074
2075 if (r)
2076 return r;
2077
2078 /* Determine the memory layout of multiple allocations in one buffer. */
2079 surf->total_size = surf->surf_size;
2080 surf->alignment = surf->surf_alignment;
2081
2082 if (surf->htile_size) {
2083 surf->htile_offset = align64(surf->total_size, surf->htile_alignment);
2084 surf->total_size = surf->htile_offset + surf->htile_size;
2085 surf->alignment = MAX2(surf->alignment, surf->htile_alignment);
2086 }
2087
2088 if (surf->fmask_size) {
2089 assert(config->info.samples >= 2);
2090 surf->fmask_offset = align64(surf->total_size, surf->fmask_alignment);
2091 surf->total_size = surf->fmask_offset + surf->fmask_size;
2092 surf->alignment = MAX2(surf->alignment, surf->fmask_alignment);
2093 }
2094
2095 /* Single-sample CMASK is in a separate buffer. */
2096 if (surf->cmask_size && config->info.samples >= 2) {
2097 surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
2098 surf->total_size = surf->cmask_offset + surf->cmask_size;
2099 surf->alignment = MAX2(surf->alignment, surf->cmask_alignment);
2100 }
2101
2102 if (surf->is_displayable)
2103 surf->flags |= RADEON_SURF_SCANOUT;
2104
2105 if (surf->dcc_size &&
2106 /* dcc_size is computed on GFX9+ only if it's displayable. */
2107 (info->chip_class >= GFX9 || !get_display_flag(config, surf))) {
2108 /* It's better when displayable DCC is immediately after
2109 * the image due to hw-specific reasons.
2110 */
2111 if (info->chip_class >= GFX9 &&
2112 surf->u.gfx9.dcc_retile_num_elements) {
2113 /* Add space for the displayable DCC buffer. */
2114 surf->display_dcc_offset =
2115 align64(surf->total_size, surf->u.gfx9.display_dcc_alignment);
2116 surf->total_size = surf->display_dcc_offset +
2117 surf->u.gfx9.display_dcc_size;
2118
2119 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
2120 surf->dcc_retile_map_offset =
2121 align64(surf->total_size, info->tcc_cache_line_size);
2122
2123 if (surf->u.gfx9.dcc_retile_use_uint16) {
2124 surf->total_size = surf->dcc_retile_map_offset +
2125 surf->u.gfx9.dcc_retile_num_elements * 2;
2126 } else {
2127 surf->total_size = surf->dcc_retile_map_offset +
2128 surf->u.gfx9.dcc_retile_num_elements * 4;
2129 }
2130 }
2131
2132 surf->dcc_offset = align64(surf->total_size, surf->dcc_alignment);
2133 surf->total_size = surf->dcc_offset + surf->dcc_size;
2134 surf->alignment = MAX2(surf->alignment, surf->dcc_alignment);
2135 }
2136
2137 return 0;
2138 }
2139
2140 /* This is meant to be used for disabling DCC. */
2141 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
2142 {
2143 surf->dcc_offset = 0;
2144 surf->display_dcc_offset = 0;
2145 surf->dcc_retile_map_offset = 0;
2146 }
2147
2148 static unsigned eg_tile_split(unsigned tile_split)
2149 {
2150 switch (tile_split) {
2151 case 0: tile_split = 64; break;
2152 case 1: tile_split = 128; break;
2153 case 2: tile_split = 256; break;
2154 case 3: tile_split = 512; break;
2155 default:
2156 case 4: tile_split = 1024; break;
2157 case 5: tile_split = 2048; break;
2158 case 6: tile_split = 4096; break;
2159 }
2160 return tile_split;
2161 }
2162
2163 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
2164 {
2165 switch (eg_tile_split) {
2166 case 64: return 0;
2167 case 128: return 1;
2168 case 256: return 2;
2169 case 512: return 3;
2170 default:
2171 case 1024: return 4;
2172 case 2048: return 5;
2173 case 4096: return 6;
2174 }
2175 }
2176
2177 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2178 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2179
2180 /* This should be called before ac_compute_surface. */
2181 void ac_surface_set_bo_metadata(const struct radeon_info *info,
2182 struct radeon_surf *surf, uint64_t tiling_flags,
2183 enum radeon_surf_mode *mode)
2184 {
2185 bool scanout;
2186
2187 if (info->chip_class >= GFX9) {
2188 surf->u.gfx9.surf.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2189 surf->u.gfx9.dcc.independent_64B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
2190 surf->u.gfx9.dcc.independent_128B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
2191 surf->u.gfx9.dcc.max_compressed_block_size = AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
2192 surf->u.gfx9.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
2193 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
2194 *mode = surf->u.gfx9.surf.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
2195 } else {
2196 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2197 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2198 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2199 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
2200 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2201 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2202 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
2203
2204 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
2205 *mode = RADEON_SURF_MODE_2D;
2206 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
2207 *mode = RADEON_SURF_MODE_1D;
2208 else
2209 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2210 }
2211
2212 if (scanout)
2213 surf->flags |= RADEON_SURF_SCANOUT;
2214 else
2215 surf->flags &= ~RADEON_SURF_SCANOUT;
2216 }
2217
2218 void ac_surface_get_bo_metadata(const struct radeon_info *info,
2219 struct radeon_surf *surf, uint64_t *tiling_flags)
2220 {
2221 *tiling_flags = 0;
2222
2223 if (info->chip_class >= GFX9) {
2224 uint64_t dcc_offset = 0;
2225
2226 if (surf->dcc_offset) {
2227 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset
2228 : surf->dcc_offset;
2229 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2230 }
2231
2232 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.surf.swizzle_mode);
2233 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2234 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.display_dcc_pitch_max);
2235 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.dcc.independent_64B_blocks);
2236 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.dcc.independent_128B_blocks);
2237 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE, surf->u.gfx9.dcc.max_compressed_block_size);
2238 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2239 } else {
2240 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2241 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2242 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2243 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2244 else
2245 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2246
2247 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2248 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2249 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2250 if (surf->u.legacy.tile_split)
2251 *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2252 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2253 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks)-1);
2254
2255 if (surf->flags & RADEON_SURF_SCANOUT)
2256 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2257 else
2258 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2259 }
2260 }
2261
2262 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2263 {
2264 return (ATI_VENDOR_ID << 16) | info->pci_id;
2265 }
2266
2267 /* This should be called after ac_compute_surface. */
2268 bool ac_surface_set_umd_metadata(const struct radeon_info *info,
2269 struct radeon_surf *surf,
2270 unsigned num_storage_samples,
2271 unsigned num_mipmap_levels,
2272 unsigned size_metadata,
2273 uint32_t metadata[64])
2274 {
2275 uint32_t *desc = &metadata[2];
2276 uint64_t offset;
2277
2278 if (info->chip_class >= GFX9)
2279 offset = surf->u.gfx9.surf_offset;
2280 else
2281 offset = surf->u.legacy.level[0].offset;
2282
2283 if (offset || /* Non-zero planes ignore metadata. */
2284 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2285 metadata[0] == 0 || /* invalid version number */
2286 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2287 /* Disable DCC because it might not be enabled. */
2288 ac_surface_zero_dcc_fields(surf);
2289
2290 /* Don't report an error if the texture comes from an incompatible driver,
2291 * but this might not work.
2292 */
2293 return true;
2294 }
2295
2296 /* Validate that sample counts and the number of mipmap levels match. */
2297 unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2298 unsigned type = G_008F1C_TYPE(desc[3]);
2299
2300 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2301 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2302
2303 if (desc_last_level != log_samples) {
2304 fprintf(stderr,
2305 "amdgpu: invalid MSAA texture import, "
2306 "metadata has log2(samples) = %u, the caller set %u\n",
2307 desc_last_level, log_samples);
2308 return false;
2309 }
2310 } else {
2311 if (desc_last_level != num_mipmap_levels - 1) {
2312 fprintf(stderr,
2313 "amdgpu: invalid mipmapped texture import, "
2314 "metadata has last_level = %u, the caller set %u\n",
2315 desc_last_level, num_mipmap_levels - 1);
2316 return false;
2317 }
2318 }
2319
2320 if (info->chip_class >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2321 /* Read DCC information. */
2322 switch (info->chip_class) {
2323 case GFX8:
2324 surf->dcc_offset = (uint64_t)desc[7] << 8;
2325 break;
2326
2327 case GFX9:
2328 surf->dcc_offset =
2329 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2330 surf->u.gfx9.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2331 surf->u.gfx9.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2332
2333 /* If DCC is unaligned, this can only be a displayable image. */
2334 if (!surf->u.gfx9.dcc.pipe_aligned && !surf->u.gfx9.dcc.rb_aligned)
2335 assert(surf->is_displayable);
2336 break;
2337
2338 case GFX10:
2339 case GFX10_3:
2340 surf->dcc_offset =
2341 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2342 surf->u.gfx9.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2343 break;
2344
2345 default:
2346 assert(0);
2347 return false;
2348 }
2349 } else {
2350 /* Disable DCC. dcc_offset is always set by texture_from_handle
2351 * and must be cleared here.
2352 */
2353 ac_surface_zero_dcc_fields(surf);
2354 }
2355
2356 return true;
2357 }
2358
2359 void ac_surface_get_umd_metadata(const struct radeon_info *info,
2360 struct radeon_surf *surf,
2361 unsigned num_mipmap_levels,
2362 uint32_t desc[8],
2363 unsigned *size_metadata, uint32_t metadata[64])
2364 {
2365 /* Clear the base address and set the relative DCC offset. */
2366 desc[0] = 0;
2367 desc[1] &= C_008F14_BASE_ADDRESS_HI;
2368
2369 switch (info->chip_class) {
2370 case GFX6:
2371 case GFX7:
2372 break;
2373 case GFX8:
2374 desc[7] = surf->dcc_offset >> 8;
2375 break;
2376 case GFX9:
2377 desc[7] = surf->dcc_offset >> 8;
2378 desc[5] &= C_008F24_META_DATA_ADDRESS;
2379 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->dcc_offset >> 40);
2380 break;
2381 case GFX10:
2382 case GFX10_3:
2383 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2384 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->dcc_offset >> 8);
2385 desc[7] = surf->dcc_offset >> 16;
2386 break;
2387 default:
2388 assert(0);
2389 }
2390
2391 /* Metadata image format format version 1:
2392 * [0] = 1 (metadata format identifier)
2393 * [1] = (VENDOR_ID << 16) | PCI_ID
2394 * [2:9] = image descriptor for the whole resource
2395 * [2] is always 0, because the base address is cleared
2396 * [9] is the DCC offset bits [39:8] from the beginning of
2397 * the buffer
2398 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2399 */
2400
2401 metadata[0] = 1; /* metadata image format version 1 */
2402
2403 /* Tiling modes are ambiguous without a PCI ID. */
2404 metadata[1] = ac_get_umd_metadata_word1(info);
2405
2406 /* Dwords [2:9] contain the image descriptor. */
2407 memcpy(&metadata[2], desc, 8 * 4);
2408 *size_metadata = 10 * 4;
2409
2410 /* Dwords [10:..] contain the mipmap level offsets. */
2411 if (info->chip_class <= GFX8) {
2412 for (unsigned i = 0; i < num_mipmap_levels; i++)
2413 metadata[10 + i] = surf->u.legacy.level[i].offset >> 8;
2414
2415 *size_metadata += num_mipmap_levels * 4;
2416 }
2417 }
2418
2419 void ac_surface_override_offset_stride(const struct radeon_info *info,
2420 struct radeon_surf *surf,
2421 unsigned num_mipmap_levels,
2422 uint64_t offset, unsigned pitch)
2423 {
2424 if (info->chip_class >= GFX9) {
2425 if (pitch) {
2426 surf->u.gfx9.surf_pitch = pitch;
2427 if (num_mipmap_levels == 1)
2428 surf->u.gfx9.surf.epitch = pitch - 1;
2429 surf->u.gfx9.surf_slice_size =
2430 (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
2431 }
2432 surf->u.gfx9.surf_offset = offset;
2433 if (surf->u.gfx9.stencil_offset)
2434 surf->u.gfx9.stencil_offset += offset;
2435 } else {
2436 if (pitch) {
2437 surf->u.legacy.level[0].nblk_x = pitch;
2438 surf->u.legacy.level[0].slice_size_dw =
2439 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
2440 }
2441
2442 if (offset) {
2443 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
2444 surf->u.legacy.level[i].offset += offset;
2445 }
2446 }
2447
2448 if (surf->htile_offset)
2449 surf->htile_offset += offset;
2450 if (surf->fmask_offset)
2451 surf->fmask_offset += offset;
2452 if (surf->cmask_offset)
2453 surf->cmask_offset += offset;
2454 if (surf->dcc_offset)
2455 surf->dcc_offset += offset;
2456 if (surf->display_dcc_offset)
2457 surf->display_dcc_offset += offset;
2458 if (surf->dcc_retile_map_offset)
2459 surf->dcc_retile_map_offset += offset;
2460 }