Revert "ac/surface: require that gfx8 doesn't have DCC in order to be displayable"
[mesa.git] / src / amd / common / ac_surface.c
1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "ac_surface.h"
29 #include "amd_family.h"
30 #include "addrlib/src/amdgpu_asic_addr.h"
31 #include "ac_gpu_info.h"
32 #include "util/hash_table.h"
33 #include "util/macros.h"
34 #include "util/simple_mtx.h"
35 #include "util/u_atomic.h"
36 #include "util/u_math.h"
37 #include "util/u_memory.h"
38 #include "sid.h"
39
40 #include <errno.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <amdgpu.h>
44 #include "drm-uapi/amdgpu_drm.h"
45
46 #include "addrlib/inc/addrinterface.h"
47
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
50 #endif
51
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
54 #endif
55
56 struct ac_addrlib {
57 ADDR_HANDLE handle;
58
59 /* The cache of DCC retile maps for reuse when allocating images of
60 * similar sizes.
61 */
62 simple_mtx_t dcc_retile_map_lock;
63 struct hash_table *dcc_retile_maps;
64 };
65
66 struct dcc_retile_map_key {
67 enum radeon_family family;
68 unsigned retile_width;
69 unsigned retile_height;
70 bool rb_aligned;
71 bool pipe_aligned;
72 unsigned dcc_retile_num_elements;
73 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input;
74 };
75
76 static uint32_t dcc_retile_map_hash_key(const void *key)
77 {
78 return _mesa_hash_data(key, sizeof(struct dcc_retile_map_key));
79 }
80
81 static bool dcc_retile_map_keys_equal(const void *a, const void *b)
82 {
83 return memcmp(a, b, sizeof(struct dcc_retile_map_key)) == 0;
84 }
85
86 static void dcc_retile_map_free(struct hash_entry *entry)
87 {
88 free((void*)entry->key);
89 free(entry->data);
90 }
91
92 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib *addrlib,
93 const struct radeon_info *info,
94 unsigned retile_width, unsigned retile_height,
95 bool rb_aligned, bool pipe_aligned, bool use_uint16,
96 unsigned dcc_retile_num_elements,
97 const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT *in)
98 {
99 unsigned dcc_retile_map_size = dcc_retile_num_elements * (use_uint16 ? 2 : 4);
100 struct dcc_retile_map_key key;
101
102 assert(in->numFrags == 1 && in->numSlices == 1 && in->numMipLevels == 1);
103
104 memset(&key, 0, sizeof(key));
105 key.family = info->family;
106 key.retile_width = retile_width;
107 key.retile_height = retile_height;
108 key.rb_aligned = rb_aligned;
109 key.pipe_aligned = pipe_aligned;
110 key.dcc_retile_num_elements = dcc_retile_num_elements;
111 memcpy(&key.input, in, sizeof(*in));
112
113 simple_mtx_lock(&addrlib->dcc_retile_map_lock);
114
115 /* If we have already computed this retile map, get it from the hash table. */
116 struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_maps, &key);
117 if (entry) {
118 uint32_t *map = entry->data;
119 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
120 return map;
121 }
122
123 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
124 memcpy(&addrin, in, sizeof(*in));
125
126 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {};
127 addrout.size = sizeof(addrout);
128
129 void *dcc_retile_map = malloc(dcc_retile_map_size);
130 if (!dcc_retile_map) {
131 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
132 return NULL;
133 }
134
135 unsigned index = 0;
136
137 for (unsigned y = 0; y < retile_height; y += in->compressBlkHeight) {
138 addrin.y = y;
139
140 for (unsigned x = 0; x < retile_width; x += in->compressBlkWidth) {
141 addrin.x = x;
142
143 /* Compute src DCC address */
144 addrin.dccKeyFlags.pipeAligned = pipe_aligned;
145 addrin.dccKeyFlags.rbAligned = rb_aligned;
146 addrout.addr = 0;
147
148 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
149 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
150 return NULL;
151 }
152
153 if (use_uint16)
154 ((uint16_t*)dcc_retile_map)[index * 2] = addrout.addr;
155 else
156 ((uint32_t*)dcc_retile_map)[index * 2] = addrout.addr;
157
158 /* Compute dst DCC address */
159 addrin.dccKeyFlags.pipeAligned = 0;
160 addrin.dccKeyFlags.rbAligned = 0;
161 addrout.addr = 0;
162
163 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
164 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
165 return NULL;
166 }
167
168 if (use_uint16)
169 ((uint16_t*)dcc_retile_map)[index * 2 + 1] = addrout.addr;
170 else
171 ((uint32_t*)dcc_retile_map)[index * 2 + 1] = addrout.addr;
172
173 assert(index * 2 + 1 < dcc_retile_num_elements);
174 index++;
175 }
176 }
177 /* Fill the remaining pairs with the last one (for the compute shader). */
178 for (unsigned i = index * 2; i < dcc_retile_num_elements; i++) {
179 if (use_uint16)
180 ((uint16_t*)dcc_retile_map)[i] = ((uint16_t*)dcc_retile_map)[i - 2];
181 else
182 ((uint32_t*)dcc_retile_map)[i] = ((uint32_t*)dcc_retile_map)[i - 2];
183 }
184
185 /* Insert the retile map into the hash table, so that it can be reused and
186 * the computation can be skipped for similar image sizes.
187 */
188 _mesa_hash_table_insert(addrlib->dcc_retile_maps,
189 mem_dup(&key, sizeof(key)), dcc_retile_map);
190
191 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
192 return dcc_retile_map;
193 }
194
195 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)
196 {
197 return malloc(pInput->sizeInBytes);
198 }
199
200 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)
201 {
202 free(pInput->pVirtAddr);
203 return ADDR_OK;
204 }
205
206 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
207 const struct amdgpu_gpu_info *amdinfo,
208 uint64_t *max_alignment)
209 {
210 ADDR_CREATE_INPUT addrCreateInput = {0};
211 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
212 ADDR_REGISTER_VALUE regValue = {0};
213 ADDR_CREATE_FLAGS createFlags = {{0}};
214 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
215 ADDR_E_RETURNCODE addrRet;
216
217 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
218 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
219
220 regValue.gbAddrConfig = amdinfo->gb_addr_cfg;
221 createFlags.value = 0;
222
223 addrCreateInput.chipFamily = info->family_id;
224 addrCreateInput.chipRevision = info->chip_external_rev;
225
226 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
227 return NULL;
228
229 if (addrCreateInput.chipFamily >= FAMILY_AI) {
230 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
231 } else {
232 regValue.noOfBanks = amdinfo->mc_arb_ramcfg & 0x3;
233 regValue.noOfRanks = (amdinfo->mc_arb_ramcfg & 0x4) >> 2;
234
235 regValue.backendDisables = amdinfo->enabled_rb_pipes_mask;
236 regValue.pTileConfig = amdinfo->gb_tile_mode;
237 regValue.noOfEntries = ARRAY_SIZE(amdinfo->gb_tile_mode);
238 if (addrCreateInput.chipFamily == FAMILY_SI) {
239 regValue.pMacroTileConfig = NULL;
240 regValue.noOfMacroEntries = 0;
241 } else {
242 regValue.pMacroTileConfig = amdinfo->gb_macro_tile_mode;
243 regValue.noOfMacroEntries = ARRAY_SIZE(amdinfo->gb_macro_tile_mode);
244 }
245
246 createFlags.useTileIndex = 1;
247 createFlags.useHtileSliceAlign = 1;
248
249 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
250 }
251
252 addrCreateInput.callbacks.allocSysMem = allocSysMem;
253 addrCreateInput.callbacks.freeSysMem = freeSysMem;
254 addrCreateInput.callbacks.debugPrint = 0;
255 addrCreateInput.createFlags = createFlags;
256 addrCreateInput.regValue = regValue;
257
258 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
259 if (addrRet != ADDR_OK)
260 return NULL;
261
262 if (max_alignment) {
263 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
264 if (addrRet == ADDR_OK){
265 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
266 }
267 }
268
269 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
270 if (!addrlib) {
271 AddrDestroy(addrCreateOutput.hLib);
272 return NULL;
273 }
274
275 addrlib->handle = addrCreateOutput.hLib;
276 simple_mtx_init(&addrlib->dcc_retile_map_lock, mtx_plain);
277 addrlib->dcc_retile_maps = _mesa_hash_table_create(NULL, dcc_retile_map_hash_key,
278 dcc_retile_map_keys_equal);
279 return addrlib;
280 }
281
282 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
283 {
284 AddrDestroy(addrlib->handle);
285 simple_mtx_destroy(&addrlib->dcc_retile_map_lock);
286 _mesa_hash_table_destroy(addrlib->dcc_retile_maps, dcc_retile_map_free);
287 free(addrlib);
288 }
289
290 static int surf_config_sanity(const struct ac_surf_config *config,
291 unsigned flags)
292 {
293 /* FMASK is allocated together with the color surface and can't be
294 * allocated separately.
295 */
296 assert(!(flags & RADEON_SURF_FMASK));
297 if (flags & RADEON_SURF_FMASK)
298 return -EINVAL;
299
300 /* all dimension must be at least 1 ! */
301 if (!config->info.width || !config->info.height || !config->info.depth ||
302 !config->info.array_size || !config->info.levels)
303 return -EINVAL;
304
305 switch (config->info.samples) {
306 case 0:
307 case 1:
308 case 2:
309 case 4:
310 case 8:
311 break;
312 case 16:
313 if (flags & RADEON_SURF_Z_OR_SBUFFER)
314 return -EINVAL;
315 break;
316 default:
317 return -EINVAL;
318 }
319
320 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
321 switch (config->info.storage_samples) {
322 case 0:
323 case 1:
324 case 2:
325 case 4:
326 case 8:
327 break;
328 default:
329 return -EINVAL;
330 }
331 }
332
333 if (config->is_3d && config->info.array_size > 1)
334 return -EINVAL;
335 if (config->is_cube && config->info.depth > 1)
336 return -EINVAL;
337
338 return 0;
339 }
340
341 static int gfx6_compute_level(ADDR_HANDLE addrlib,
342 const struct ac_surf_config *config,
343 struct radeon_surf *surf, bool is_stencil,
344 unsigned level, bool compressed,
345 ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
346 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
347 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
348 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
349 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
350 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
351 {
352 struct legacy_surf_level *surf_level;
353 ADDR_E_RETURNCODE ret;
354
355 AddrSurfInfoIn->mipLevel = level;
356 AddrSurfInfoIn->width = u_minify(config->info.width, level);
357 AddrSurfInfoIn->height = u_minify(config->info.height, level);
358
359 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
360 * because GFX9 needs linear alignment of 256 bytes.
361 */
362 if (config->info.levels == 1 &&
363 AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
364 AddrSurfInfoIn->bpp &&
365 util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
366 unsigned alignment = 256 / (AddrSurfInfoIn->bpp / 8);
367
368 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
369 }
370
371 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
372 * true for r32g32b32 formats. */
373 if (AddrSurfInfoIn->bpp == 96) {
374 assert(config->info.levels == 1);
375 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
376
377 /* The least common multiple of 64 bytes and 12 bytes/pixel is
378 * 192 bytes, or 16 pixels. */
379 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
380 }
381
382 if (config->is_3d)
383 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
384 else if (config->is_cube)
385 AddrSurfInfoIn->numSlices = 6;
386 else
387 AddrSurfInfoIn->numSlices = config->info.array_size;
388
389 if (level > 0) {
390 /* Set the base level pitch. This is needed for calculation
391 * of non-zero levels. */
392 if (is_stencil)
393 AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
394 else
395 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
396
397 /* Convert blocks to pixels for compressed formats. */
398 if (compressed)
399 AddrSurfInfoIn->basePitch *= surf->blk_w;
400 }
401
402 ret = AddrComputeSurfaceInfo(addrlib,
403 AddrSurfInfoIn,
404 AddrSurfInfoOut);
405 if (ret != ADDR_OK) {
406 return ret;
407 }
408
409 surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
410 surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
411 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
412 surf_level->nblk_x = AddrSurfInfoOut->pitch;
413 surf_level->nblk_y = AddrSurfInfoOut->height;
414
415 switch (AddrSurfInfoOut->tileMode) {
416 case ADDR_TM_LINEAR_ALIGNED:
417 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
418 break;
419 case ADDR_TM_1D_TILED_THIN1:
420 surf_level->mode = RADEON_SURF_MODE_1D;
421 break;
422 case ADDR_TM_2D_TILED_THIN1:
423 surf_level->mode = RADEON_SURF_MODE_2D;
424 break;
425 default:
426 assert(0);
427 }
428
429 if (is_stencil)
430 surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
431 else
432 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
433
434 surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
435
436 /* Clear DCC fields at the beginning. */
437 surf_level->dcc_offset = 0;
438
439 /* The previous level's flag tells us if we can use DCC for this level. */
440 if (AddrSurfInfoIn->flags.dccCompatible &&
441 (level == 0 || AddrDccOut->subLvlCompressible)) {
442 bool prev_level_clearable = level == 0 ||
443 AddrDccOut->dccRamSizeAligned;
444
445 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
446 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
447 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
448 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
449 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
450
451 ret = AddrComputeDccInfo(addrlib,
452 AddrDccIn,
453 AddrDccOut);
454
455 if (ret == ADDR_OK) {
456 surf_level->dcc_offset = surf->dcc_size;
457 surf->num_dcc_levels = level + 1;
458 surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
459 surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
460
461 /* If the DCC size of a subresource (1 mip level or 1 slice)
462 * is not aligned, the DCC memory layout is not contiguous for
463 * that subresource, which means we can't use fast clear.
464 *
465 * We only do fast clears for whole mipmap levels. If we did
466 * per-slice fast clears, the same restriction would apply.
467 * (i.e. only compute the slice size and see if it's aligned)
468 *
469 * The last level can be non-contiguous and still be clearable
470 * if it's interleaved with the next level that doesn't exist.
471 */
472 if (AddrDccOut->dccRamSizeAligned ||
473 (prev_level_clearable && level == config->info.levels - 1))
474 surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
475 else
476 surf_level->dcc_fast_clear_size = 0;
477
478 /* Compute the DCC slice size because addrlib doesn't
479 * provide this info. As DCC memory is linear (each
480 * slice is the same size) it's easy to compute.
481 */
482 surf->dcc_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
483
484 /* For arrays, we have to compute the DCC info again
485 * with one slice size to get a correct fast clear
486 * size.
487 */
488 if (config->info.array_size > 1) {
489 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
490 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
491 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
492 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
493 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
494
495 ret = AddrComputeDccInfo(addrlib,
496 AddrDccIn, AddrDccOut);
497 if (ret == ADDR_OK) {
498 /* If the DCC memory isn't properly
499 * aligned, the data are interleaved
500 * accross slices.
501 */
502 if (AddrDccOut->dccRamSizeAligned)
503 surf_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
504 else
505 surf_level->dcc_slice_fast_clear_size = 0;
506 }
507
508 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
509 surf->dcc_slice_size != surf_level->dcc_slice_fast_clear_size) {
510 surf->dcc_size = 0;
511 surf->num_dcc_levels = 0;
512 AddrDccOut->subLvlCompressible = false;
513 }
514 } else {
515 surf_level->dcc_slice_fast_clear_size = surf_level->dcc_fast_clear_size;
516 }
517 }
518 }
519
520 /* HTILE. */
521 if (!is_stencil &&
522 AddrSurfInfoIn->flags.depth &&
523 surf_level->mode == RADEON_SURF_MODE_2D &&
524 level == 0 &&
525 !(surf->flags & RADEON_SURF_NO_HTILE)) {
526 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
527 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
528 AddrHtileIn->height = AddrSurfInfoOut->height;
529 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
530 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
531 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
532 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
533 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
534 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
535
536 ret = AddrComputeHtileInfo(addrlib,
537 AddrHtileIn,
538 AddrHtileOut);
539
540 if (ret == ADDR_OK) {
541 surf->htile_size = AddrHtileOut->htileBytes;
542 surf->htile_slice_size = AddrHtileOut->sliceSize;
543 surf->htile_alignment = AddrHtileOut->baseAlign;
544 }
545 }
546
547 return 0;
548 }
549
550 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf,
551 const struct radeon_info *info)
552 {
553 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
554
555 if (info->chip_class >= GFX7)
556 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
557 else
558 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
559 }
560
561 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
562 {
563 unsigned index, tileb;
564
565 tileb = 8 * 8 * surf->bpe;
566 tileb = MIN2(surf->u.legacy.tile_split, tileb);
567
568 for (index = 0; tileb > 64; index++)
569 tileb >>= 1;
570
571 assert(index < 16);
572 return index;
573 }
574
575 static bool get_display_flag(const struct ac_surf_config *config,
576 const struct radeon_surf *surf)
577 {
578 unsigned num_channels = config->info.num_channels;
579 unsigned bpe = surf->bpe;
580
581 if (!config->is_3d &&
582 !config->is_cube &&
583 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
584 surf->flags & RADEON_SURF_SCANOUT &&
585 config->info.samples <= 1 &&
586 surf->blk_w <= 2 && surf->blk_h == 1) {
587 /* subsampled */
588 if (surf->blk_w == 2 && surf->blk_h == 1)
589 return true;
590
591 if (/* RGBA8 or RGBA16F */
592 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
593 /* R5G6B5 or R5G5B5A1 */
594 (bpe == 2 && num_channels >= 3) ||
595 /* C8 palette */
596 (bpe == 1 && num_channels == 1))
597 return true;
598 }
599 return false;
600 }
601
602 /**
603 * This must be called after the first level is computed.
604 *
605 * Copy surface-global settings like pipe/bank config from level 0 surface
606 * computation, and compute tile swizzle.
607 */
608 static int gfx6_surface_settings(ADDR_HANDLE addrlib,
609 const struct radeon_info *info,
610 const struct ac_surf_config *config,
611 ADDR_COMPUTE_SURFACE_INFO_OUTPUT* csio,
612 struct radeon_surf *surf)
613 {
614 surf->surf_alignment = csio->baseAlign;
615 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
616 gfx6_set_micro_tile_mode(surf, info);
617
618 /* For 2D modes only. */
619 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
620 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
621 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
622 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
623 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
624 surf->u.legacy.num_banks = csio->pTileInfo->banks;
625 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
626 } else {
627 surf->u.legacy.macro_tile_index = 0;
628 }
629
630 /* Compute tile swizzle. */
631 /* TODO: fix tile swizzle with mipmapping for GFX6 */
632 if ((info->chip_class >= GFX7 || config->info.levels == 1) &&
633 config->info.surf_index &&
634 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
635 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
636 !get_display_flag(config, surf)) {
637 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
638 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
639
640 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
641 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
642
643 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
644 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
645 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
646 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
647 AddrBaseSwizzleIn.tileMode = csio->tileMode;
648
649 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn,
650 &AddrBaseSwizzleOut);
651 if (r != ADDR_OK)
652 return r;
653
654 assert(AddrBaseSwizzleOut.tileSwizzle <=
655 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
656 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
657 }
658 return 0;
659 }
660
661 static void ac_compute_cmask(const struct radeon_info *info,
662 const struct ac_surf_config *config,
663 struct radeon_surf *surf)
664 {
665 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
666 unsigned num_pipes = info->num_tile_pipes;
667 unsigned cl_width, cl_height;
668
669 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
670 (config->info.samples >= 2 && !surf->fmask_size))
671 return;
672
673 assert(info->chip_class <= GFX8);
674
675 switch (num_pipes) {
676 case 2:
677 cl_width = 32;
678 cl_height = 16;
679 break;
680 case 4:
681 cl_width = 32;
682 cl_height = 32;
683 break;
684 case 8:
685 cl_width = 64;
686 cl_height = 32;
687 break;
688 case 16: /* Hawaii */
689 cl_width = 64;
690 cl_height = 64;
691 break;
692 default:
693 assert(0);
694 return;
695 }
696
697 unsigned base_align = num_pipes * pipe_interleave_bytes;
698
699 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width*8);
700 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height*8);
701 unsigned slice_elements = (width * height) / (8*8);
702
703 /* Each element of CMASK is a nibble. */
704 unsigned slice_bytes = slice_elements / 2;
705
706 surf->u.legacy.cmask_slice_tile_max = (width * height) / (128*128);
707 if (surf->u.legacy.cmask_slice_tile_max)
708 surf->u.legacy.cmask_slice_tile_max -= 1;
709
710 unsigned num_layers;
711 if (config->is_3d)
712 num_layers = config->info.depth;
713 else if (config->is_cube)
714 num_layers = 6;
715 else
716 num_layers = config->info.array_size;
717
718 surf->cmask_alignment = MAX2(256, base_align);
719 surf->cmask_slice_size = align(slice_bytes, base_align);
720 surf->cmask_size = surf->cmask_slice_size * num_layers;
721 }
722
723 /**
724 * Fill in the tiling information in \p surf based on the given surface config.
725 *
726 * The following fields of \p surf must be initialized by the caller:
727 * blk_w, blk_h, bpe, flags.
728 */
729 static int gfx6_compute_surface(ADDR_HANDLE addrlib,
730 const struct radeon_info *info,
731 const struct ac_surf_config *config,
732 enum radeon_surf_mode mode,
733 struct radeon_surf *surf)
734 {
735 unsigned level;
736 bool compressed;
737 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
738 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
739 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
740 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
741 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
742 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
743 ADDR_TILEINFO AddrTileInfoIn = {0};
744 ADDR_TILEINFO AddrTileInfoOut = {0};
745 int r;
746
747 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
748 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
749 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
750 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
751 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
752 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
753 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
754
755 compressed = surf->blk_w == 4 && surf->blk_h == 4;
756
757 /* MSAA requires 2D tiling. */
758 if (config->info.samples > 1)
759 mode = RADEON_SURF_MODE_2D;
760
761 /* DB doesn't support linear layouts. */
762 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) &&
763 mode < RADEON_SURF_MODE_1D)
764 mode = RADEON_SURF_MODE_1D;
765
766 /* Set the requested tiling mode. */
767 switch (mode) {
768 case RADEON_SURF_MODE_LINEAR_ALIGNED:
769 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
770 break;
771 case RADEON_SURF_MODE_1D:
772 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
773 break;
774 case RADEON_SURF_MODE_2D:
775 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
776 break;
777 default:
778 assert(0);
779 }
780
781 /* The format must be set correctly for the allocation of compressed
782 * textures to work. In other cases, setting the bpp is sufficient.
783 */
784 if (compressed) {
785 switch (surf->bpe) {
786 case 8:
787 AddrSurfInfoIn.format = ADDR_FMT_BC1;
788 break;
789 case 16:
790 AddrSurfInfoIn.format = ADDR_FMT_BC3;
791 break;
792 default:
793 assert(0);
794 }
795 }
796 else {
797 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
798 }
799
800 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples =
801 MAX2(1, config->info.samples);
802 AddrSurfInfoIn.tileIndex = -1;
803
804 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
805 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags =
806 MAX2(1, config->info.storage_samples);
807 }
808
809 /* Set the micro tile type. */
810 if (surf->flags & RADEON_SURF_SCANOUT)
811 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
812 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
813 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
814 else
815 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
816
817 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
818 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
819 AddrSurfInfoIn.flags.cube = config->is_cube;
820 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
821 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
822 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
823
824 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
825 * requested, because TC-compatible HTILE requires 2D tiling.
826 */
827 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
828 !AddrSurfInfoIn.flags.fmask &&
829 config->info.samples <= 1 &&
830 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
831
832 /* DCC notes:
833 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
834 * with samples >= 4.
835 * - Mipmapped array textures have low performance (discovered by a closed
836 * driver team).
837 */
838 AddrSurfInfoIn.flags.dccCompatible =
839 info->chip_class >= GFX8 &&
840 info->has_graphics && /* disable DCC on compute-only chips */
841 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
842 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
843 !compressed &&
844 ((config->info.array_size == 1 && config->info.depth == 1) ||
845 config->info.levels == 1);
846
847 AddrSurfInfoIn.flags.noStencil = (surf->flags & RADEON_SURF_SBUFFER) == 0;
848 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
849
850 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
851 * for Z and stencil. This can cause a number of problems which we work
852 * around here:
853 *
854 * - a depth part that is incompatible with mipmapped texturing
855 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
856 * incorrect tiling applied to the stencil part, stencil buffer
857 * memory accesses that go out of bounds) even without mipmapping
858 *
859 * Some piglit tests that are prone to different types of related
860 * failures:
861 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
862 * ./bin/framebuffer-blit-levels {draw,read} stencil
863 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
864 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
865 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
866 */
867 int stencil_tile_idx = -1;
868
869 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
870 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
871 /* Compute stencilTileIdx that is compatible with the (depth)
872 * tileIdx. This degrades the depth surface if necessary to
873 * ensure that a matching stencilTileIdx exists. */
874 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
875
876 /* Keep the depth mip-tail compatible with texturing. */
877 AddrSurfInfoIn.flags.noStencil = 1;
878 }
879
880 /* Set preferred macrotile parameters. This is usually required
881 * for shared resources. This is for 2D tiling only. */
882 if (AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 &&
883 surf->u.legacy.bankw && surf->u.legacy.bankh &&
884 surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
885 /* If any of these parameters are incorrect, the calculation
886 * will fail. */
887 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
888 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
889 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
890 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
891 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
892 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
893 AddrSurfInfoIn.flags.opt4Space = 0;
894 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
895
896 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
897 * the tile index, because we are expected to know it if
898 * we know the other parameters.
899 *
900 * This is something that can easily be fixed in Addrlib.
901 * For now, just figure it out here.
902 * Note that only 2D_TILE_THIN1 is handled here.
903 */
904 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
905 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
906
907 if (info->chip_class == GFX6) {
908 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
909 if (surf->bpe == 2)
910 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
911 else
912 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
913 } else {
914 if (surf->bpe == 1)
915 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
916 else if (surf->bpe == 2)
917 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
918 else if (surf->bpe == 4)
919 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
920 else
921 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
922 }
923 } else {
924 /* GFX7 - GFX8 */
925 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
926 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
927 else
928 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
929
930 /* Addrlib doesn't set this if tileIndex is forced like above. */
931 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
932 }
933 }
934
935 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
936 surf->num_dcc_levels = 0;
937 surf->surf_size = 0;
938 surf->dcc_size = 0;
939 surf->dcc_alignment = 1;
940 surf->htile_size = 0;
941 surf->htile_slice_size = 0;
942 surf->htile_alignment = 1;
943
944 const bool only_stencil = (surf->flags & RADEON_SURF_SBUFFER) &&
945 !(surf->flags & RADEON_SURF_ZBUFFER);
946
947 /* Calculate texture layout information. */
948 if (!only_stencil) {
949 for (level = 0; level < config->info.levels; level++) {
950 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed,
951 &AddrSurfInfoIn, &AddrSurfInfoOut,
952 &AddrDccIn, &AddrDccOut, &AddrHtileIn, &AddrHtileOut);
953 if (r)
954 return r;
955
956 if (level > 0)
957 continue;
958
959 if (!AddrSurfInfoOut.tcCompatible) {
960 AddrSurfInfoIn.flags.tcCompatible = 0;
961 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
962 }
963
964 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
965 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
966 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
967 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
968
969 assert(stencil_tile_idx >= 0);
970 }
971
972 r = gfx6_surface_settings(addrlib, info, config,
973 &AddrSurfInfoOut, surf);
974 if (r)
975 return r;
976 }
977 }
978
979 /* Calculate texture layout information for stencil. */
980 if (surf->flags & RADEON_SURF_SBUFFER) {
981 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
982 AddrSurfInfoIn.bpp = 8;
983 AddrSurfInfoIn.flags.depth = 0;
984 AddrSurfInfoIn.flags.stencil = 1;
985 AddrSurfInfoIn.flags.tcCompatible = 0;
986 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
987 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
988
989 for (level = 0; level < config->info.levels; level++) {
990 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed,
991 &AddrSurfInfoIn, &AddrSurfInfoOut,
992 &AddrDccIn, &AddrDccOut,
993 NULL, NULL);
994 if (r)
995 return r;
996
997 /* DB uses the depth pitch for both stencil and depth. */
998 if (!only_stencil) {
999 if (surf->u.legacy.stencil_level[level].nblk_x !=
1000 surf->u.legacy.level[level].nblk_x)
1001 surf->u.legacy.stencil_adjusted = true;
1002 } else {
1003 surf->u.legacy.level[level].nblk_x =
1004 surf->u.legacy.stencil_level[level].nblk_x;
1005 }
1006
1007 if (level == 0) {
1008 if (only_stencil) {
1009 r = gfx6_surface_settings(addrlib, info, config,
1010 &AddrSurfInfoOut, surf);
1011 if (r)
1012 return r;
1013 }
1014
1015 /* For 2D modes only. */
1016 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1017 surf->u.legacy.stencil_tile_split =
1018 AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1019 }
1020 }
1021 }
1022 }
1023
1024 /* Compute FMASK. */
1025 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color &&
1026 info->has_graphics && !(surf->flags & RADEON_SURF_NO_FMASK)) {
1027 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1028 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1029 ADDR_TILEINFO fmask_tile_info = {};
1030
1031 fin.size = sizeof(fin);
1032 fout.size = sizeof(fout);
1033
1034 fin.tileMode = AddrSurfInfoOut.tileMode;
1035 fin.pitch = AddrSurfInfoOut.pitch;
1036 fin.height = config->info.height;
1037 fin.numSlices = AddrSurfInfoIn.numSlices;
1038 fin.numSamples = AddrSurfInfoIn.numSamples;
1039 fin.numFrags = AddrSurfInfoIn.numFrags;
1040 fin.tileIndex = -1;
1041 fout.pTileInfo = &fmask_tile_info;
1042
1043 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1044 if (r)
1045 return r;
1046
1047 surf->fmask_size = fout.fmaskBytes;
1048 surf->fmask_alignment = fout.baseAlign;
1049 surf->fmask_tile_swizzle = 0;
1050
1051 surf->u.legacy.fmask.slice_tile_max =
1052 (fout.pitch * fout.height) / 64;
1053 if (surf->u.legacy.fmask.slice_tile_max)
1054 surf->u.legacy.fmask.slice_tile_max -= 1;
1055
1056 surf->u.legacy.fmask.tiling_index = fout.tileIndex;
1057 surf->u.legacy.fmask.bankh = fout.pTileInfo->bankHeight;
1058 surf->u.legacy.fmask.pitch_in_pixels = fout.pitch;
1059 surf->u.legacy.fmask.slice_size = fout.sliceSize;
1060
1061 /* Compute tile swizzle for FMASK. */
1062 if (config->info.fmask_surf_index &&
1063 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1064 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1065 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1066
1067 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1068 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1069
1070 /* This counter starts from 1 instead of 0. */
1071 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1072 xin.tileIndex = fout.tileIndex;
1073 xin.macroModeIndex = fout.macroModeIndex;
1074 xin.pTileInfo = fout.pTileInfo;
1075 xin.tileMode = fin.tileMode;
1076
1077 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1078 if (r != ADDR_OK)
1079 return r;
1080
1081 assert(xout.tileSwizzle <=
1082 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1083 surf->fmask_tile_swizzle = xout.tileSwizzle;
1084 }
1085 }
1086
1087 /* Recalculate the whole DCC miptree size including disabled levels.
1088 * This is what addrlib does, but calling addrlib would be a lot more
1089 * complicated.
1090 */
1091 if (surf->dcc_size && config->info.levels > 1) {
1092 /* The smallest miplevels that are never compressed by DCC
1093 * still read the DCC buffer via TC if the base level uses DCC,
1094 * and for some reason the DCC buffer needs to be larger if
1095 * the miptree uses non-zero tile_swizzle. Otherwise there are
1096 * VM faults.
1097 *
1098 * "dcc_alignment * 4" was determined by trial and error.
1099 */
1100 surf->dcc_size = align64(surf->surf_size >> 8,
1101 surf->dcc_alignment * 4);
1102 }
1103
1104 /* Make sure HTILE covers the whole miptree, because the shader reads
1105 * TC-compatible HTILE even for levels where it's disabled by DB.
1106 */
1107 if (surf->htile_size && config->info.levels > 1 &&
1108 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
1109 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1110 const unsigned total_pixels = surf->surf_size / surf->bpe;
1111 const unsigned htile_block_size = 8 * 8;
1112 const unsigned htile_element_size = 4;
1113
1114 surf->htile_size = (total_pixels / htile_block_size) *
1115 htile_element_size;
1116 surf->htile_size = align(surf->htile_size, surf->htile_alignment);
1117 } else if (!surf->htile_size) {
1118 /* Unset this if HTILE is not present. */
1119 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1120 }
1121
1122 surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
1123 surf->is_displayable = surf->is_linear ||
1124 surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1125 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1126
1127 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1128 * used at the same time. This case is not currently expected to occur
1129 * because we don't use rotated. Enforce this restriction on all chips
1130 * to facilitate testing.
1131 */
1132 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1133 assert(!"rotate micro tile mode is unsupported");
1134 return ADDR_ERROR;
1135 }
1136
1137 ac_compute_cmask(info, config, surf);
1138 return 0;
1139 }
1140
1141 /* This is only called when expecting a tiled layout. */
1142 static int
1143 gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,
1144 struct radeon_surf *surf,
1145 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in,
1146 bool is_fmask, AddrSwizzleMode *swizzle_mode)
1147 {
1148 ADDR_E_RETURNCODE ret;
1149 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1150 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1151
1152 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1153 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1154
1155 sin.flags = in->flags;
1156 sin.resourceType = in->resourceType;
1157 sin.format = in->format;
1158 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1159 /* TODO: We could allow some of these: */
1160 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1161 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1162 sin.bpp = in->bpp;
1163 sin.width = in->width;
1164 sin.height = in->height;
1165 sin.numSlices = in->numSlices;
1166 sin.numMipLevels = in->numMipLevels;
1167 sin.numSamples = in->numSamples;
1168 sin.numFrags = in->numFrags;
1169
1170 if (is_fmask) {
1171 sin.flags.display = 0;
1172 sin.flags.color = 0;
1173 sin.flags.fmask = 1;
1174 }
1175
1176 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1177 sin.forbiddenBlock.linear = 1;
1178
1179 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1180 sin.preferredSwSet.sw_D = 1;
1181 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1182 sin.preferredSwSet.sw_S = 1;
1183 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1184 sin.preferredSwSet.sw_Z = 1;
1185 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1186 sin.preferredSwSet.sw_R = 1;
1187 }
1188
1189 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1190 if (ret != ADDR_OK)
1191 return ret;
1192
1193 *swizzle_mode = sout.swizzleMode;
1194 return 0;
1195 }
1196
1197 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1198 {
1199 if (info->chip_class >= GFX10)
1200 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1201
1202 return sw_mode != ADDR_SW_LINEAR;
1203 }
1204
1205 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1206 const struct radeon_surf *surf)
1207 {
1208 if (info->chip_class <= GFX9) {
1209 /* Only independent 64B blocks are supported. */
1210 return surf->u.gfx9.dcc.independent_64B_blocks &&
1211 !surf->u.gfx9.dcc.independent_128B_blocks &&
1212 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1213 }
1214
1215 if (info->family == CHIP_NAVI10) {
1216 /* Only independent 128B blocks are supported. */
1217 return !surf->u.gfx9.dcc.independent_64B_blocks &&
1218 surf->u.gfx9.dcc.independent_128B_blocks &&
1219 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1220 }
1221
1222 if (info->family == CHIP_NAVI12 ||
1223 info->family == CHIP_NAVI14) {
1224 /* Either 64B or 128B can be used, but not both.
1225 * If 64B is used, DCC image stores are unsupported.
1226 */
1227 return surf->u.gfx9.dcc.independent_64B_blocks !=
1228 surf->u.gfx9.dcc.independent_128B_blocks &&
1229 (!surf->u.gfx9.dcc.independent_64B_blocks ||
1230 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) &&
1231 (!surf->u.gfx9.dcc.independent_128B_blocks ||
1232 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B);
1233 }
1234
1235 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1236 * Since there is no reason to ever disable 128B, require it.
1237 * DCC image stores are always supported.
1238 */
1239 return surf->u.gfx9.dcc.independent_128B_blocks &&
1240 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1241 }
1242
1243 static bool is_dcc_supported_by_DCN(const struct radeon_info *info,
1244 const struct ac_surf_config *config,
1245 const struct radeon_surf *surf,
1246 bool rb_aligned, bool pipe_aligned)
1247 {
1248 if (!info->use_display_dcc_unaligned &&
1249 !info->use_display_dcc_with_retile_blit)
1250 return false;
1251
1252 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1253 if (surf->bpe != 4)
1254 return false;
1255
1256 /* Handle unaligned DCC. */
1257 if (info->use_display_dcc_unaligned &&
1258 (rb_aligned || pipe_aligned))
1259 return false;
1260
1261 switch (info->chip_class) {
1262 case GFX9:
1263 /* There are more constraints, but we always set
1264 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1265 * which always works.
1266 */
1267 assert(surf->u.gfx9.dcc.independent_64B_blocks &&
1268 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1269 return true;
1270 case GFX10:
1271 case GFX10_3:
1272 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1273 if (info->chip_class == GFX10 &&
1274 surf->u.gfx9.dcc.independent_128B_blocks)
1275 return false;
1276
1277 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1278 return ((config->info.width <= 2560 &&
1279 config->info.height <= 2560) ||
1280 (surf->u.gfx9.dcc.independent_64B_blocks &&
1281 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1282 default:
1283 unreachable("unhandled chip");
1284 return false;
1285 }
1286 }
1287
1288 static int gfx9_compute_miptree(struct ac_addrlib *addrlib,
1289 const struct radeon_info *info,
1290 const struct ac_surf_config *config,
1291 struct radeon_surf *surf, bool compressed,
1292 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1293 {
1294 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
1295 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1296 ADDR_E_RETURNCODE ret;
1297
1298 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1299 out.pMipInfo = mip_info;
1300
1301 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1302 if (ret != ADDR_OK)
1303 return ret;
1304
1305 if (in->flags.stencil) {
1306 surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
1307 surf->u.gfx9.stencil.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1308 out.mipChainPitch - 1;
1309 surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
1310 surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
1311 surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
1312 return 0;
1313 }
1314
1315 surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
1316 surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1317 out.mipChainPitch - 1;
1318
1319 /* CMASK fast clear uses these even if FMASK isn't allocated.
1320 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1321 */
1322 surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
1323 surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
1324
1325 surf->u.gfx9.surf_slice_size = out.sliceSize;
1326 surf->u.gfx9.surf_pitch = out.pitch;
1327 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1328 surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR) {
1329 /* Adjust surf_pitch to be in elements units,
1330 * not in pixels */
1331 surf->u.gfx9.surf_pitch =
1332 align(surf->u.gfx9.surf_pitch / surf->blk_w, 256 / surf->bpe);
1333 surf->u.gfx9.surf.epitch = MAX2(surf->u.gfx9.surf.epitch,
1334 surf->u.gfx9.surf_pitch * surf->blk_w - 1);
1335 }
1336 surf->u.gfx9.surf_height = out.height;
1337 surf->surf_size = out.surfSize;
1338 surf->surf_alignment = out.baseAlign;
1339
1340 if (in->swizzleMode == ADDR_SW_LINEAR) {
1341 for (unsigned i = 0; i < in->numMipLevels; i++) {
1342 surf->u.gfx9.offset[i] = mip_info[i].offset;
1343 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1344 }
1345 }
1346
1347 if (in->flags.depth) {
1348 assert(in->swizzleMode != ADDR_SW_LINEAR);
1349
1350 if (surf->flags & RADEON_SURF_NO_HTILE)
1351 return 0;
1352
1353 /* HTILE */
1354 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1355 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1356
1357 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1358 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1359
1360 assert(in->flags.metaPipeUnaligned == 0);
1361 assert(in->flags.metaRbUnaligned == 0);
1362
1363 hin.hTileFlags.pipeAligned = 1;
1364 hin.hTileFlags.rbAligned = 1;
1365 hin.depthFlags = in->flags;
1366 hin.swizzleMode = in->swizzleMode;
1367 hin.unalignedWidth = in->width;
1368 hin.unalignedHeight = in->height;
1369 hin.numSlices = in->numSlices;
1370 hin.numMipLevels = in->numMipLevels;
1371 hin.firstMipIdInTail = out.firstMipIdInTail;
1372
1373 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
1374 if (ret != ADDR_OK)
1375 return ret;
1376
1377 surf->htile_size = hout.htileBytes;
1378 surf->htile_slice_size = hout.sliceSize;
1379 surf->htile_alignment = hout.baseAlign;
1380 return 0;
1381 }
1382
1383 {
1384 /* Compute tile swizzle for the color surface.
1385 * All *_X and *_T modes can use the swizzle.
1386 */
1387 if (config->info.surf_index &&
1388 in->swizzleMode >= ADDR_SW_64KB_Z_T &&
1389 !out.mipChainInTail &&
1390 !(surf->flags & RADEON_SURF_SHAREABLE) &&
1391 !in->flags.display) {
1392 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1393 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1394
1395 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1396 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1397
1398 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1399 xin.flags = in->flags;
1400 xin.swizzleMode = in->swizzleMode;
1401 xin.resourceType = in->resourceType;
1402 xin.format = in->format;
1403 xin.numSamples = in->numSamples;
1404 xin.numFrags = in->numFrags;
1405
1406 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1407 if (ret != ADDR_OK)
1408 return ret;
1409
1410 assert(xout.pipeBankXor <=
1411 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1412 surf->tile_swizzle = xout.pipeBankXor;
1413 }
1414
1415 /* DCC */
1416 if (info->has_graphics &&
1417 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1418 !compressed &&
1419 is_dcc_supported_by_CB(info, in->swizzleMode) &&
1420 (!in->flags.display ||
1421 is_dcc_supported_by_DCN(info, config, surf,
1422 !in->flags.metaRbUnaligned,
1423 !in->flags.metaPipeUnaligned))) {
1424 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
1425 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
1426 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {};
1427
1428 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
1429 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
1430 dout.pMipInfo = meta_mip_info;
1431
1432 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
1433 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
1434 din.colorFlags = in->flags;
1435 din.resourceType = in->resourceType;
1436 din.swizzleMode = in->swizzleMode;
1437 din.bpp = in->bpp;
1438 din.unalignedWidth = in->width;
1439 din.unalignedHeight = in->height;
1440 din.numSlices = in->numSlices;
1441 din.numFrags = in->numFrags;
1442 din.numMipLevels = in->numMipLevels;
1443 din.dataSurfaceSize = out.surfSize;
1444 din.firstMipIdInTail = out.firstMipIdInTail;
1445
1446 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1447 if (ret != ADDR_OK)
1448 return ret;
1449
1450 surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
1451 surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
1452 surf->u.gfx9.dcc_block_width = dout.compressBlkWidth;
1453 surf->u.gfx9.dcc_block_height = dout.compressBlkHeight;
1454 surf->u.gfx9.dcc_block_depth = dout.compressBlkDepth;
1455 surf->dcc_size = dout.dccRamSize;
1456 surf->dcc_alignment = dout.dccRamBaseAlign;
1457 surf->num_dcc_levels = in->numMipLevels;
1458
1459 /* Disable DCC for levels that are in the mip tail.
1460 *
1461 * There are two issues that this is intended to
1462 * address:
1463 *
1464 * 1. Multiple mip levels may share a cache line. This
1465 * can lead to corruption when switching between
1466 * rendering to different mip levels because the
1467 * RBs don't maintain coherency.
1468 *
1469 * 2. Texturing with metadata after rendering sometimes
1470 * fails with corruption, probably for a similar
1471 * reason.
1472 *
1473 * Working around these issues for all levels in the
1474 * mip tail may be overly conservative, but it's what
1475 * Vulkan does.
1476 *
1477 * Alternative solutions that also work but are worse:
1478 * - Disable DCC entirely.
1479 * - Flush TC L2 after rendering.
1480 */
1481 for (unsigned i = 0; i < in->numMipLevels; i++) {
1482 if (meta_mip_info[i].inMiptail) {
1483 /* GFX10 can only compress the first level
1484 * in the mip tail.
1485 *
1486 * TODO: Try to do the same thing for gfx9
1487 * if there are no regressions.
1488 */
1489 if (info->chip_class >= GFX10)
1490 surf->num_dcc_levels = i + 1;
1491 else
1492 surf->num_dcc_levels = i;
1493 break;
1494 }
1495 }
1496
1497 if (!surf->num_dcc_levels)
1498 surf->dcc_size = 0;
1499
1500 surf->u.gfx9.display_dcc_size = surf->dcc_size;
1501 surf->u.gfx9.display_dcc_alignment = surf->dcc_alignment;
1502 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1503
1504 /* Compute displayable DCC. */
1505 if (in->flags.display &&
1506 surf->num_dcc_levels &&
1507 info->use_display_dcc_with_retile_blit) {
1508 /* Compute displayable DCC info. */
1509 din.dccKeyFlags.pipeAligned = 0;
1510 din.dccKeyFlags.rbAligned = 0;
1511
1512 assert(din.numSlices == 1);
1513 assert(din.numMipLevels == 1);
1514 assert(din.numFrags == 1);
1515 assert(surf->tile_swizzle == 0);
1516 assert(surf->u.gfx9.dcc.pipe_aligned ||
1517 surf->u.gfx9.dcc.rb_aligned);
1518
1519 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1520 if (ret != ADDR_OK)
1521 return ret;
1522
1523 surf->u.gfx9.display_dcc_size = dout.dccRamSize;
1524 surf->u.gfx9.display_dcc_alignment = dout.dccRamBaseAlign;
1525 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1526 assert(surf->u.gfx9.display_dcc_size <= surf->dcc_size);
1527
1528 surf->u.gfx9.dcc_retile_use_uint16 =
1529 surf->u.gfx9.display_dcc_size <= UINT16_MAX + 1 &&
1530 surf->dcc_size <= UINT16_MAX + 1;
1531
1532 /* Align the retile map size to get more hash table hits and
1533 * decrease the maximum memory footprint when all retile maps
1534 * are cached in the hash table.
1535 */
1536 unsigned retile_dim[2] = {in->width, in->height};
1537
1538 for (unsigned i = 0; i < 2; i++) {
1539 /* Increase the alignment as the size increases.
1540 * Greater alignment increases retile compute work,
1541 * but decreases maximum memory footprint for the cache.
1542 *
1543 * With this alignment, the worst case memory footprint of
1544 * the cache is:
1545 * 1920x1080: 55 MB
1546 * 2560x1440: 99 MB
1547 * 3840x2160: 305 MB
1548 *
1549 * The worst case size in MB can be computed in Haskell as follows:
1550 * (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1551 * [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]]))))) `div` 1024^2
1552 * where
1553 * alignment x = if x <= 512 then 16 else if x <= 1024 then 32 else if x <= 2048 then 64 else 128
1554 * align x = (x + (alignment x) - 1) `div` (alignment x) * (alignment x)
1555 * align_pair e = (align (fst e), align (snd e))
1556 * deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a) == (snd b))) . sortBy compare
1557 * get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1558 * get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else 4)
1559 * bpp = 4; maxwidth = 3840; maxheight = 2160
1560 */
1561 if (retile_dim[i] <= 512)
1562 retile_dim[i] = align(retile_dim[i], 16);
1563 else if (retile_dim[i] <= 1024)
1564 retile_dim[i] = align(retile_dim[i], 32);
1565 else if (retile_dim[i] <= 2048)
1566 retile_dim[i] = align(retile_dim[i], 64);
1567 else
1568 retile_dim[i] = align(retile_dim[i], 128);
1569
1570 /* Don't align more than the DCC pixel alignment. */
1571 assert(dout.metaBlkWidth >= 128 && dout.metaBlkHeight >= 128);
1572 }
1573
1574 surf->u.gfx9.dcc_retile_num_elements =
1575 DIV_ROUND_UP(retile_dim[0], dout.compressBlkWidth) *
1576 DIV_ROUND_UP(retile_dim[1], dout.compressBlkHeight) * 2;
1577 /* Align the size to 4 (for the compute shader). */
1578 surf->u.gfx9.dcc_retile_num_elements =
1579 align(surf->u.gfx9.dcc_retile_num_elements, 4);
1580
1581 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
1582 /* Compute address mapping from non-displayable to displayable DCC. */
1583 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
1584 memset(&addrin, 0, sizeof(addrin));
1585 addrin.size = sizeof(addrin);
1586 addrin.swizzleMode = din.swizzleMode;
1587 addrin.resourceType = din.resourceType;
1588 addrin.bpp = din.bpp;
1589 addrin.numSlices = 1;
1590 addrin.numMipLevels = 1;
1591 addrin.numFrags = 1;
1592 addrin.pitch = dout.pitch;
1593 addrin.height = dout.height;
1594 addrin.compressBlkWidth = dout.compressBlkWidth;
1595 addrin.compressBlkHeight = dout.compressBlkHeight;
1596 addrin.compressBlkDepth = dout.compressBlkDepth;
1597 addrin.metaBlkWidth = dout.metaBlkWidth;
1598 addrin.metaBlkHeight = dout.metaBlkHeight;
1599 addrin.metaBlkDepth = dout.metaBlkDepth;
1600 addrin.dccRamSliceSize = 0; /* Don't care for non-layered images. */
1601
1602 surf->u.gfx9.dcc_retile_map =
1603 ac_compute_dcc_retile_map(addrlib, info,
1604 retile_dim[0], retile_dim[1],
1605 surf->u.gfx9.dcc.rb_aligned,
1606 surf->u.gfx9.dcc.pipe_aligned,
1607 surf->u.gfx9.dcc_retile_use_uint16,
1608 surf->u.gfx9.dcc_retile_num_elements,
1609 &addrin);
1610 if (!surf->u.gfx9.dcc_retile_map)
1611 return ADDR_OUTOFMEMORY;
1612 }
1613 }
1614 }
1615
1616 /* FMASK */
1617 if (in->numSamples > 1 && info->has_graphics &&
1618 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1619 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
1620 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1621
1622 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
1623 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
1624
1625 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, in,
1626 true, &fin.swizzleMode);
1627 if (ret != ADDR_OK)
1628 return ret;
1629
1630 fin.unalignedWidth = in->width;
1631 fin.unalignedHeight = in->height;
1632 fin.numSlices = in->numSlices;
1633 fin.numSamples = in->numSamples;
1634 fin.numFrags = in->numFrags;
1635
1636 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
1637 if (ret != ADDR_OK)
1638 return ret;
1639
1640 surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
1641 surf->u.gfx9.fmask.epitch = fout.pitch - 1;
1642 surf->fmask_size = fout.fmaskBytes;
1643 surf->fmask_alignment = fout.baseAlign;
1644
1645 /* Compute tile swizzle for the FMASK surface. */
1646 if (config->info.fmask_surf_index &&
1647 fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
1648 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1649 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1650 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1651
1652 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1653 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1654
1655 /* This counter starts from 1 instead of 0. */
1656 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1657 xin.flags = in->flags;
1658 xin.swizzleMode = fin.swizzleMode;
1659 xin.resourceType = in->resourceType;
1660 xin.format = in->format;
1661 xin.numSamples = in->numSamples;
1662 xin.numFrags = in->numFrags;
1663
1664 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1665 if (ret != ADDR_OK)
1666 return ret;
1667
1668 assert(xout.pipeBankXor <=
1669 u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
1670 surf->fmask_tile_swizzle = xout.pipeBankXor;
1671 }
1672 }
1673
1674 /* CMASK -- on GFX10 only for FMASK */
1675 if (in->swizzleMode != ADDR_SW_LINEAR &&
1676 in->resourceType == ADDR_RSRC_TEX_2D &&
1677 ((info->chip_class <= GFX9 &&
1678 in->numSamples == 1 &&
1679 in->flags.metaPipeUnaligned == 0 &&
1680 in->flags.metaRbUnaligned == 0) ||
1681 (surf->fmask_size && in->numSamples >= 2))) {
1682 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
1683 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
1684
1685 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
1686 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
1687
1688 assert(in->flags.metaPipeUnaligned == 0);
1689 assert(in->flags.metaRbUnaligned == 0);
1690
1691 cin.cMaskFlags.pipeAligned = 1;
1692 cin.cMaskFlags.rbAligned = 1;
1693 cin.colorFlags = in->flags;
1694 cin.resourceType = in->resourceType;
1695 cin.unalignedWidth = in->width;
1696 cin.unalignedHeight = in->height;
1697 cin.numSlices = in->numSlices;
1698
1699 if (in->numSamples > 1)
1700 cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
1701 else
1702 cin.swizzleMode = in->swizzleMode;
1703
1704 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
1705 if (ret != ADDR_OK)
1706 return ret;
1707
1708 surf->cmask_size = cout.cmaskBytes;
1709 surf->cmask_alignment = cout.baseAlign;
1710 }
1711 }
1712
1713 return 0;
1714 }
1715
1716 static int gfx9_compute_surface(struct ac_addrlib *addrlib,
1717 const struct radeon_info *info,
1718 const struct ac_surf_config *config,
1719 enum radeon_surf_mode mode,
1720 struct radeon_surf *surf)
1721 {
1722 bool compressed;
1723 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1724 int r;
1725
1726 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
1727
1728 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1729
1730 /* The format must be set correctly for the allocation of compressed
1731 * textures to work. In other cases, setting the bpp is sufficient. */
1732 if (compressed) {
1733 switch (surf->bpe) {
1734 case 8:
1735 AddrSurfInfoIn.format = ADDR_FMT_BC1;
1736 break;
1737 case 16:
1738 AddrSurfInfoIn.format = ADDR_FMT_BC3;
1739 break;
1740 default:
1741 assert(0);
1742 }
1743 } else {
1744 switch (surf->bpe) {
1745 case 1:
1746 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
1747 AddrSurfInfoIn.format = ADDR_FMT_8;
1748 break;
1749 case 2:
1750 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1751 !(surf->flags & RADEON_SURF_SBUFFER));
1752 AddrSurfInfoIn.format = ADDR_FMT_16;
1753 break;
1754 case 4:
1755 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1756 !(surf->flags & RADEON_SURF_SBUFFER));
1757 AddrSurfInfoIn.format = ADDR_FMT_32;
1758 break;
1759 case 8:
1760 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1761 AddrSurfInfoIn.format = ADDR_FMT_32_32;
1762 break;
1763 case 12:
1764 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1765 AddrSurfInfoIn.format = ADDR_FMT_32_32_32;
1766 break;
1767 case 16:
1768 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1769 AddrSurfInfoIn.format = ADDR_FMT_32_32_32_32;
1770 break;
1771 default:
1772 assert(0);
1773 }
1774 AddrSurfInfoIn.bpp = surf->bpe * 8;
1775 }
1776
1777 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1778 AddrSurfInfoIn.flags.color = is_color_surface &&
1779 !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1780 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1781 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1782 /* flags.texture currently refers to TC-compatible HTILE */
1783 AddrSurfInfoIn.flags.texture = is_color_surface ||
1784 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1785 AddrSurfInfoIn.flags.opt4space = 1;
1786
1787 AddrSurfInfoIn.numMipLevels = config->info.levels;
1788 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1789 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
1790
1791 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
1792 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1793
1794 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1795 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1796 * must sample 1D textures as 2D. */
1797 if (config->is_3d)
1798 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
1799 else if (info->chip_class != GFX9 && config->is_1d)
1800 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
1801 else
1802 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
1803
1804 AddrSurfInfoIn.width = config->info.width;
1805 AddrSurfInfoIn.height = config->info.height;
1806
1807 if (config->is_3d)
1808 AddrSurfInfoIn.numSlices = config->info.depth;
1809 else if (config->is_cube)
1810 AddrSurfInfoIn.numSlices = 6;
1811 else
1812 AddrSurfInfoIn.numSlices = config->info.array_size;
1813
1814 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1815 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
1816 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
1817
1818 /* Optimal values for the L2 cache. */
1819 if (info->chip_class == GFX9) {
1820 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1821 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1822 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1823 } else if (info->chip_class >= GFX10) {
1824 surf->u.gfx9.dcc.independent_64B_blocks = 0;
1825 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1826 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
1827 }
1828
1829 if (AddrSurfInfoIn.flags.display) {
1830 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1831 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1832 *
1833 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1834 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1835 * after rendering, so PIPE_ALIGNED=1 is recommended.
1836 */
1837 if (info->use_display_dcc_unaligned) {
1838 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
1839 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
1840 }
1841
1842 /* Adjust DCC settings to meet DCN requirements. */
1843 if (info->use_display_dcc_unaligned ||
1844 info->use_display_dcc_with_retile_blit) {
1845 /* Only Navi12/14 support independent 64B blocks in L2,
1846 * but without DCC image stores.
1847 */
1848 if (info->family == CHIP_NAVI12 ||
1849 info->family == CHIP_NAVI14) {
1850 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1851 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1852 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1853 }
1854
1855 if (info->chip_class >= GFX10_3) {
1856 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1857 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1858 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1859 }
1860 }
1861 }
1862
1863 switch (mode) {
1864 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1865 assert(config->info.samples <= 1);
1866 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1867 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
1868 break;
1869
1870 case RADEON_SURF_MODE_1D:
1871 case RADEON_SURF_MODE_2D:
1872 if (surf->flags & RADEON_SURF_IMPORTED ||
1873 (info->chip_class >= GFX10 &&
1874 surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
1875 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.surf.swizzle_mode;
1876 break;
1877 }
1878
1879 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn,
1880 false, &AddrSurfInfoIn.swizzleMode);
1881 if (r)
1882 return r;
1883 break;
1884
1885 default:
1886 assert(0);
1887 }
1888
1889 surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
1890 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1891
1892 surf->num_dcc_levels = 0;
1893 surf->surf_size = 0;
1894 surf->fmask_size = 0;
1895 surf->dcc_size = 0;
1896 surf->htile_size = 0;
1897 surf->htile_slice_size = 0;
1898 surf->u.gfx9.surf_offset = 0;
1899 surf->u.gfx9.stencil_offset = 0;
1900 surf->cmask_size = 0;
1901 surf->u.gfx9.dcc_retile_use_uint16 = false;
1902 surf->u.gfx9.dcc_retile_num_elements = 0;
1903 surf->u.gfx9.dcc_retile_map = NULL;
1904
1905 /* Calculate texture layout information. */
1906 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1907 &AddrSurfInfoIn);
1908 if (r)
1909 return r;
1910
1911 /* Calculate texture layout information for stencil. */
1912 if (surf->flags & RADEON_SURF_SBUFFER) {
1913 AddrSurfInfoIn.flags.stencil = 1;
1914 AddrSurfInfoIn.bpp = 8;
1915 AddrSurfInfoIn.format = ADDR_FMT_8;
1916
1917 if (!AddrSurfInfoIn.flags.depth) {
1918 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn,
1919 false, &AddrSurfInfoIn.swizzleMode);
1920 if (r)
1921 return r;
1922 } else
1923 AddrSurfInfoIn.flags.depth = 0;
1924
1925 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1926 &AddrSurfInfoIn);
1927 if (r)
1928 return r;
1929 }
1930
1931 surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
1932
1933 /* Query whether the surface is displayable. */
1934 /* This is only useful for surfaces that are allocated without SCANOUT. */
1935 bool displayable = false;
1936 if (!config->is_3d && !config->is_cube) {
1937 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.surf.swizzle_mode,
1938 surf->bpe * 8, &displayable);
1939 if (r)
1940 return r;
1941
1942 /* Display needs unaligned DCC. */
1943 if (surf->num_dcc_levels &&
1944 (!is_dcc_supported_by_DCN(info, config, surf,
1945 surf->u.gfx9.dcc.rb_aligned,
1946 surf->u.gfx9.dcc.pipe_aligned) ||
1947 /* Don't set is_displayable if displayable DCC is missing. */
1948 (info->use_display_dcc_with_retile_blit &&
1949 !surf->u.gfx9.dcc_retile_num_elements)))
1950 displayable = false;
1951 }
1952 surf->is_displayable = displayable;
1953
1954 /* Validate that we allocated a displayable surface if requested. */
1955 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
1956
1957 /* Validate that DCC is set up correctly. */
1958 if (surf->num_dcc_levels) {
1959 assert(is_dcc_supported_by_L2(info, surf));
1960 if (AddrSurfInfoIn.flags.color)
1961 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.surf.swizzle_mode));
1962 if (AddrSurfInfoIn.flags.display) {
1963 assert(is_dcc_supported_by_DCN(info, config, surf,
1964 surf->u.gfx9.dcc.rb_aligned,
1965 surf->u.gfx9.dcc.pipe_aligned));
1966 }
1967 }
1968
1969 if (info->has_graphics &&
1970 !compressed &&
1971 !config->is_3d &&
1972 config->info.levels == 1 &&
1973 AddrSurfInfoIn.flags.color &&
1974 !surf->is_linear &&
1975 surf->surf_alignment >= 64 * 1024 && /* 64KB tiling */
1976 !(surf->flags & (RADEON_SURF_DISABLE_DCC |
1977 RADEON_SURF_FORCE_SWIZZLE_MODE |
1978 RADEON_SURF_FORCE_MICRO_TILE_MODE))) {
1979 /* Validate that DCC is enabled if DCN can do it. */
1980 if ((info->use_display_dcc_unaligned ||
1981 info->use_display_dcc_with_retile_blit) &&
1982 AddrSurfInfoIn.flags.display &&
1983 surf->bpe == 4) {
1984 assert(surf->num_dcc_levels);
1985 }
1986
1987 /* Validate that non-scanout DCC is always enabled. */
1988 if (!AddrSurfInfoIn.flags.display)
1989 assert(surf->num_dcc_levels);
1990 }
1991
1992 if (!surf->htile_size) {
1993 /* Unset this if HTILE is not present. */
1994 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1995 }
1996
1997 switch (surf->u.gfx9.surf.swizzle_mode) {
1998 /* S = standard. */
1999 case ADDR_SW_256B_S:
2000 case ADDR_SW_4KB_S:
2001 case ADDR_SW_64KB_S:
2002 case ADDR_SW_64KB_S_T:
2003 case ADDR_SW_4KB_S_X:
2004 case ADDR_SW_64KB_S_X:
2005 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2006 break;
2007
2008 /* D = display. */
2009 case ADDR_SW_LINEAR:
2010 case ADDR_SW_256B_D:
2011 case ADDR_SW_4KB_D:
2012 case ADDR_SW_64KB_D:
2013 case ADDR_SW_64KB_D_T:
2014 case ADDR_SW_4KB_D_X:
2015 case ADDR_SW_64KB_D_X:
2016 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2017 break;
2018
2019 /* R = rotated (gfx9), render target (gfx10). */
2020 case ADDR_SW_256B_R:
2021 case ADDR_SW_4KB_R:
2022 case ADDR_SW_64KB_R:
2023 case ADDR_SW_64KB_R_T:
2024 case ADDR_SW_4KB_R_X:
2025 case ADDR_SW_64KB_R_X:
2026 case ADDR_SW_VAR_R_X:
2027 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2028 * used at the same time. We currently do not use rotated
2029 * in gfx9.
2030 */
2031 assert(info->chip_class >= GFX10 ||
2032 !"rotate micro tile mode is unsupported");
2033 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2034 break;
2035
2036 /* Z = depth. */
2037 case ADDR_SW_4KB_Z:
2038 case ADDR_SW_64KB_Z:
2039 case ADDR_SW_64KB_Z_T:
2040 case ADDR_SW_4KB_Z_X:
2041 case ADDR_SW_64KB_Z_X:
2042 case ADDR_SW_VAR_Z_X:
2043 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2044 break;
2045
2046 default:
2047 assert(0);
2048 }
2049
2050 return 0;
2051 }
2052
2053 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2054 const struct ac_surf_config *config,
2055 enum radeon_surf_mode mode,
2056 struct radeon_surf *surf)
2057 {
2058 int r;
2059
2060 r = surf_config_sanity(config, surf->flags);
2061 if (r)
2062 return r;
2063
2064 if (info->chip_class >= GFX9)
2065 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
2066 else
2067 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
2068
2069 if (r)
2070 return r;
2071
2072 /* Determine the memory layout of multiple allocations in one buffer. */
2073 surf->total_size = surf->surf_size;
2074 surf->alignment = surf->surf_alignment;
2075
2076 if (surf->htile_size) {
2077 surf->htile_offset = align64(surf->total_size, surf->htile_alignment);
2078 surf->total_size = surf->htile_offset + surf->htile_size;
2079 surf->alignment = MAX2(surf->alignment, surf->htile_alignment);
2080 }
2081
2082 if (surf->fmask_size) {
2083 assert(config->info.samples >= 2);
2084 surf->fmask_offset = align64(surf->total_size, surf->fmask_alignment);
2085 surf->total_size = surf->fmask_offset + surf->fmask_size;
2086 surf->alignment = MAX2(surf->alignment, surf->fmask_alignment);
2087 }
2088
2089 /* Single-sample CMASK is in a separate buffer. */
2090 if (surf->cmask_size && config->info.samples >= 2) {
2091 surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
2092 surf->total_size = surf->cmask_offset + surf->cmask_size;
2093 surf->alignment = MAX2(surf->alignment, surf->cmask_alignment);
2094 }
2095
2096 if (surf->is_displayable)
2097 surf->flags |= RADEON_SURF_SCANOUT;
2098
2099 if (surf->dcc_size &&
2100 /* dcc_size is computed on GFX9+ only if it's displayable. */
2101 (info->chip_class >= GFX9 || !get_display_flag(config, surf))) {
2102 /* It's better when displayable DCC is immediately after
2103 * the image due to hw-specific reasons.
2104 */
2105 if (info->chip_class >= GFX9 &&
2106 surf->u.gfx9.dcc_retile_num_elements) {
2107 /* Add space for the displayable DCC buffer. */
2108 surf->display_dcc_offset =
2109 align64(surf->total_size, surf->u.gfx9.display_dcc_alignment);
2110 surf->total_size = surf->display_dcc_offset +
2111 surf->u.gfx9.display_dcc_size;
2112
2113 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
2114 surf->dcc_retile_map_offset =
2115 align64(surf->total_size, info->tcc_cache_line_size);
2116
2117 if (surf->u.gfx9.dcc_retile_use_uint16) {
2118 surf->total_size = surf->dcc_retile_map_offset +
2119 surf->u.gfx9.dcc_retile_num_elements * 2;
2120 } else {
2121 surf->total_size = surf->dcc_retile_map_offset +
2122 surf->u.gfx9.dcc_retile_num_elements * 4;
2123 }
2124 }
2125
2126 surf->dcc_offset = align64(surf->total_size, surf->dcc_alignment);
2127 surf->total_size = surf->dcc_offset + surf->dcc_size;
2128 surf->alignment = MAX2(surf->alignment, surf->dcc_alignment);
2129 }
2130
2131 return 0;
2132 }
2133
2134 /* This is meant to be used for disabling DCC. */
2135 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
2136 {
2137 surf->dcc_offset = 0;
2138 surf->display_dcc_offset = 0;
2139 surf->dcc_retile_map_offset = 0;
2140 }
2141
2142 static unsigned eg_tile_split(unsigned tile_split)
2143 {
2144 switch (tile_split) {
2145 case 0: tile_split = 64; break;
2146 case 1: tile_split = 128; break;
2147 case 2: tile_split = 256; break;
2148 case 3: tile_split = 512; break;
2149 default:
2150 case 4: tile_split = 1024; break;
2151 case 5: tile_split = 2048; break;
2152 case 6: tile_split = 4096; break;
2153 }
2154 return tile_split;
2155 }
2156
2157 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
2158 {
2159 switch (eg_tile_split) {
2160 case 64: return 0;
2161 case 128: return 1;
2162 case 256: return 2;
2163 case 512: return 3;
2164 default:
2165 case 1024: return 4;
2166 case 2048: return 5;
2167 case 4096: return 6;
2168 }
2169 }
2170
2171 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2172 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2173
2174 /* This should be called before ac_compute_surface. */
2175 void ac_surface_set_bo_metadata(const struct radeon_info *info,
2176 struct radeon_surf *surf, uint64_t tiling_flags,
2177 enum radeon_surf_mode *mode)
2178 {
2179 bool scanout;
2180
2181 if (info->chip_class >= GFX9) {
2182 surf->u.gfx9.surf.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2183 surf->u.gfx9.dcc.independent_64B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
2184 surf->u.gfx9.dcc.independent_128B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
2185 surf->u.gfx9.dcc.max_compressed_block_size = AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
2186 surf->u.gfx9.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
2187 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
2188 *mode = surf->u.gfx9.surf.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
2189 } else {
2190 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2191 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2192 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2193 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
2194 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2195 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2196 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
2197
2198 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
2199 *mode = RADEON_SURF_MODE_2D;
2200 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
2201 *mode = RADEON_SURF_MODE_1D;
2202 else
2203 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2204 }
2205
2206 if (scanout)
2207 surf->flags |= RADEON_SURF_SCANOUT;
2208 else
2209 surf->flags &= ~RADEON_SURF_SCANOUT;
2210 }
2211
2212 void ac_surface_get_bo_metadata(const struct radeon_info *info,
2213 struct radeon_surf *surf, uint64_t *tiling_flags)
2214 {
2215 *tiling_flags = 0;
2216
2217 if (info->chip_class >= GFX9) {
2218 uint64_t dcc_offset = 0;
2219
2220 if (surf->dcc_offset) {
2221 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset
2222 : surf->dcc_offset;
2223 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2224 }
2225
2226 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.surf.swizzle_mode);
2227 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2228 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.display_dcc_pitch_max);
2229 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.dcc.independent_64B_blocks);
2230 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.dcc.independent_128B_blocks);
2231 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE, surf->u.gfx9.dcc.max_compressed_block_size);
2232 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2233 } else {
2234 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2235 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2236 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2237 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2238 else
2239 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2240
2241 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2242 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2243 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2244 if (surf->u.legacy.tile_split)
2245 *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2246 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2247 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks)-1);
2248
2249 if (surf->flags & RADEON_SURF_SCANOUT)
2250 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2251 else
2252 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2253 }
2254 }
2255
2256 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2257 {
2258 return (ATI_VENDOR_ID << 16) | info->pci_id;
2259 }
2260
2261 /* This should be called after ac_compute_surface. */
2262 bool ac_surface_set_umd_metadata(const struct radeon_info *info,
2263 struct radeon_surf *surf,
2264 unsigned num_storage_samples,
2265 unsigned num_mipmap_levels,
2266 unsigned size_metadata,
2267 uint32_t metadata[64])
2268 {
2269 uint32_t *desc = &metadata[2];
2270 uint64_t offset;
2271
2272 if (info->chip_class >= GFX9)
2273 offset = surf->u.gfx9.surf_offset;
2274 else
2275 offset = surf->u.legacy.level[0].offset;
2276
2277 if (offset || /* Non-zero planes ignore metadata. */
2278 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2279 metadata[0] == 0 || /* invalid version number */
2280 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2281 /* Disable DCC because it might not be enabled. */
2282 ac_surface_zero_dcc_fields(surf);
2283
2284 /* Don't report an error if the texture comes from an incompatible driver,
2285 * but this might not work.
2286 */
2287 return true;
2288 }
2289
2290 /* Validate that sample counts and the number of mipmap levels match. */
2291 unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2292 unsigned type = G_008F1C_TYPE(desc[3]);
2293
2294 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2295 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2296
2297 if (desc_last_level != log_samples) {
2298 fprintf(stderr,
2299 "amdgpu: invalid MSAA texture import, "
2300 "metadata has log2(samples) = %u, the caller set %u\n",
2301 desc_last_level, log_samples);
2302 return false;
2303 }
2304 } else {
2305 if (desc_last_level != num_mipmap_levels - 1) {
2306 fprintf(stderr,
2307 "amdgpu: invalid mipmapped texture import, "
2308 "metadata has last_level = %u, the caller set %u\n",
2309 desc_last_level, num_mipmap_levels - 1);
2310 return false;
2311 }
2312 }
2313
2314 if (info->chip_class >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2315 /* Read DCC information. */
2316 switch (info->chip_class) {
2317 case GFX8:
2318 surf->dcc_offset = (uint64_t)desc[7] << 8;
2319 break;
2320
2321 case GFX9:
2322 surf->dcc_offset =
2323 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2324 surf->u.gfx9.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2325 surf->u.gfx9.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2326
2327 /* If DCC is unaligned, this can only be a displayable image. */
2328 if (!surf->u.gfx9.dcc.pipe_aligned && !surf->u.gfx9.dcc.rb_aligned)
2329 assert(surf->is_displayable);
2330 break;
2331
2332 case GFX10:
2333 case GFX10_3:
2334 surf->dcc_offset =
2335 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2336 surf->u.gfx9.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2337 break;
2338
2339 default:
2340 assert(0);
2341 return false;
2342 }
2343 } else {
2344 /* Disable DCC. dcc_offset is always set by texture_from_handle
2345 * and must be cleared here.
2346 */
2347 ac_surface_zero_dcc_fields(surf);
2348 }
2349
2350 return true;
2351 }
2352
2353 void ac_surface_get_umd_metadata(const struct radeon_info *info,
2354 struct radeon_surf *surf,
2355 unsigned num_mipmap_levels,
2356 uint32_t desc[8],
2357 unsigned *size_metadata, uint32_t metadata[64])
2358 {
2359 /* Clear the base address and set the relative DCC offset. */
2360 desc[0] = 0;
2361 desc[1] &= C_008F14_BASE_ADDRESS_HI;
2362
2363 switch (info->chip_class) {
2364 case GFX6:
2365 case GFX7:
2366 break;
2367 case GFX8:
2368 desc[7] = surf->dcc_offset >> 8;
2369 break;
2370 case GFX9:
2371 desc[7] = surf->dcc_offset >> 8;
2372 desc[5] &= C_008F24_META_DATA_ADDRESS;
2373 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->dcc_offset >> 40);
2374 break;
2375 case GFX10:
2376 case GFX10_3:
2377 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2378 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->dcc_offset >> 8);
2379 desc[7] = surf->dcc_offset >> 16;
2380 break;
2381 default:
2382 assert(0);
2383 }
2384
2385 /* Metadata image format format version 1:
2386 * [0] = 1 (metadata format identifier)
2387 * [1] = (VENDOR_ID << 16) | PCI_ID
2388 * [2:9] = image descriptor for the whole resource
2389 * [2] is always 0, because the base address is cleared
2390 * [9] is the DCC offset bits [39:8] from the beginning of
2391 * the buffer
2392 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2393 */
2394
2395 metadata[0] = 1; /* metadata image format version 1 */
2396
2397 /* Tiling modes are ambiguous without a PCI ID. */
2398 metadata[1] = ac_get_umd_metadata_word1(info);
2399
2400 /* Dwords [2:9] contain the image descriptor. */
2401 memcpy(&metadata[2], desc, 8 * 4);
2402 *size_metadata = 10 * 4;
2403
2404 /* Dwords [10:..] contain the mipmap level offsets. */
2405 if (info->chip_class <= GFX8) {
2406 for (unsigned i = 0; i < num_mipmap_levels; i++)
2407 metadata[10 + i] = surf->u.legacy.level[i].offset >> 8;
2408
2409 *size_metadata += num_mipmap_levels * 4;
2410 }
2411 }
2412
2413 void ac_surface_override_offset_stride(const struct radeon_info *info,
2414 struct radeon_surf *surf,
2415 unsigned num_mipmap_levels,
2416 uint64_t offset, unsigned pitch)
2417 {
2418 if (info->chip_class >= GFX9) {
2419 if (pitch) {
2420 surf->u.gfx9.surf_pitch = pitch;
2421 if (num_mipmap_levels == 1)
2422 surf->u.gfx9.surf.epitch = pitch - 1;
2423 surf->u.gfx9.surf_slice_size =
2424 (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
2425 }
2426 surf->u.gfx9.surf_offset = offset;
2427 if (surf->u.gfx9.stencil_offset)
2428 surf->u.gfx9.stencil_offset += offset;
2429 } else {
2430 if (pitch) {
2431 surf->u.legacy.level[0].nblk_x = pitch;
2432 surf->u.legacy.level[0].slice_size_dw =
2433 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
2434 }
2435
2436 if (offset) {
2437 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
2438 surf->u.legacy.level[i].offset += offset;
2439 }
2440 }
2441
2442 if (surf->htile_offset)
2443 surf->htile_offset += offset;
2444 if (surf->fmask_offset)
2445 surf->fmask_offset += offset;
2446 if (surf->cmask_offset)
2447 surf->cmask_offset += offset;
2448 if (surf->dcc_offset)
2449 surf->dcc_offset += offset;
2450 if (surf->display_dcc_offset)
2451 surf->display_dcc_offset += offset;
2452 if (surf->dcc_retile_map_offset)
2453 surf->dcc_retile_map_offset += offset;
2454 }