ac/surface: cache DCC retile maps (v2)
[mesa.git] / src / amd / common / ac_surface.c
1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "ac_surface.h"
29 #include "amd_family.h"
30 #include "addrlib/src/amdgpu_asic_addr.h"
31 #include "ac_gpu_info.h"
32 #include "util/hash_table.h"
33 #include "util/macros.h"
34 #include "util/simple_mtx.h"
35 #include "util/u_atomic.h"
36 #include "util/u_math.h"
37 #include "util/u_memory.h"
38 #include "sid.h"
39
40 #include <errno.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <amdgpu.h>
44 #include "drm-uapi/amdgpu_drm.h"
45
46 #include "addrlib/inc/addrinterface.h"
47
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
50 #endif
51
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
54 #endif
55
56 struct ac_addrlib {
57 ADDR_HANDLE handle;
58
59 /* The cache of DCC retile maps for reuse when allocating images of
60 * similar sizes.
61 */
62 simple_mtx_t dcc_retile_map_lock;
63 struct hash_table *dcc_retile_maps;
64 };
65
66 struct dcc_retile_map_key {
67 enum radeon_family family;
68 unsigned retile_width;
69 unsigned retile_height;
70 bool rb_aligned;
71 bool pipe_aligned;
72 unsigned dcc_retile_num_elements;
73 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input;
74 };
75
76 static uint32_t dcc_retile_map_hash_key(const void *key)
77 {
78 return _mesa_hash_data(key, sizeof(struct dcc_retile_map_key));
79 }
80
81 static bool dcc_retile_map_keys_equal(const void *a, const void *b)
82 {
83 return memcmp(a, b, sizeof(struct dcc_retile_map_key)) == 0;
84 }
85
86 static void dcc_retile_map_free(struct hash_entry *entry)
87 {
88 free((void*)entry->key);
89 free(entry->data);
90 }
91
92 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib *addrlib,
93 const struct radeon_info *info,
94 unsigned retile_width, unsigned retile_height,
95 bool rb_aligned, bool pipe_aligned, bool use_uint16,
96 unsigned dcc_retile_num_elements,
97 const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT *in)
98 {
99 unsigned dcc_retile_map_size = dcc_retile_num_elements * (use_uint16 ? 2 : 4);
100 struct dcc_retile_map_key key;
101
102 assert(in->numFrags == 1 && in->numSlices == 1 && in->numMipLevels == 1);
103
104 memset(&key, 0, sizeof(key));
105 key.family = info->family;
106 key.retile_width = retile_width;
107 key.retile_height = retile_height;
108 key.rb_aligned = rb_aligned;
109 key.pipe_aligned = pipe_aligned;
110 key.dcc_retile_num_elements = dcc_retile_num_elements;
111 memcpy(&key.input, in, sizeof(*in));
112
113 simple_mtx_lock(&addrlib->dcc_retile_map_lock);
114
115 /* If we have already computed this retile map, get it from the hash table. */
116 struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_maps, &key);
117 if (entry) {
118 uint32_t *map = entry->data;
119 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
120 return map;
121 }
122
123 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
124 memcpy(&addrin, in, sizeof(*in));
125
126 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {};
127 addrout.size = sizeof(addrout);
128
129 void *dcc_retile_map = malloc(dcc_retile_map_size);
130 if (!dcc_retile_map) {
131 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
132 return NULL;
133 }
134
135 unsigned index = 0;
136
137 for (unsigned y = 0; y < retile_height; y += in->compressBlkHeight) {
138 addrin.y = y;
139
140 for (unsigned x = 0; x < retile_width; x += in->compressBlkWidth) {
141 addrin.x = x;
142
143 /* Compute src DCC address */
144 addrin.dccKeyFlags.pipeAligned = pipe_aligned;
145 addrin.dccKeyFlags.rbAligned = rb_aligned;
146 addrout.addr = 0;
147
148 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
149 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
150 return NULL;
151 }
152
153 if (use_uint16)
154 ((uint16_t*)dcc_retile_map)[index * 2] = addrout.addr;
155 else
156 ((uint32_t*)dcc_retile_map)[index * 2] = addrout.addr;
157
158 /* Compute dst DCC address */
159 addrin.dccKeyFlags.pipeAligned = 0;
160 addrin.dccKeyFlags.rbAligned = 0;
161 addrout.addr = 0;
162
163 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
164 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
165 return NULL;
166 }
167
168 if (use_uint16)
169 ((uint16_t*)dcc_retile_map)[index * 2 + 1] = addrout.addr;
170 else
171 ((uint32_t*)dcc_retile_map)[index * 2 + 1] = addrout.addr;
172
173 assert(index * 2 + 1 < dcc_retile_num_elements);
174 index++;
175 }
176 }
177 /* Fill the remaining pairs with the last one (for the compute shader). */
178 for (unsigned i = index * 2; i < dcc_retile_num_elements; i++) {
179 if (use_uint16)
180 ((uint16_t*)dcc_retile_map)[i] = ((uint16_t*)dcc_retile_map)[i - 2];
181 else
182 ((uint32_t*)dcc_retile_map)[i] = ((uint32_t*)dcc_retile_map)[i - 2];
183 }
184
185 /* Insert the retile map into the hash table, so that it can be reused and
186 * the computation can be skipped for similar image sizes.
187 */
188 _mesa_hash_table_insert(addrlib->dcc_retile_maps,
189 mem_dup(&key, sizeof(key)), dcc_retile_map);
190
191 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
192 return dcc_retile_map;
193 }
194
195 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)
196 {
197 return malloc(pInput->sizeInBytes);
198 }
199
200 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)
201 {
202 free(pInput->pVirtAddr);
203 return ADDR_OK;
204 }
205
206 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
207 const struct amdgpu_gpu_info *amdinfo,
208 uint64_t *max_alignment)
209 {
210 ADDR_CREATE_INPUT addrCreateInput = {0};
211 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
212 ADDR_REGISTER_VALUE regValue = {0};
213 ADDR_CREATE_FLAGS createFlags = {{0}};
214 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
215 ADDR_E_RETURNCODE addrRet;
216
217 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
218 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
219
220 regValue.gbAddrConfig = amdinfo->gb_addr_cfg;
221 createFlags.value = 0;
222
223 addrCreateInput.chipFamily = info->family_id;
224 addrCreateInput.chipRevision = info->chip_external_rev;
225
226 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
227 return NULL;
228
229 if (addrCreateInput.chipFamily >= FAMILY_AI) {
230 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
231 } else {
232 regValue.noOfBanks = amdinfo->mc_arb_ramcfg & 0x3;
233 regValue.noOfRanks = (amdinfo->mc_arb_ramcfg & 0x4) >> 2;
234
235 regValue.backendDisables = amdinfo->enabled_rb_pipes_mask;
236 regValue.pTileConfig = amdinfo->gb_tile_mode;
237 regValue.noOfEntries = ARRAY_SIZE(amdinfo->gb_tile_mode);
238 if (addrCreateInput.chipFamily == FAMILY_SI) {
239 regValue.pMacroTileConfig = NULL;
240 regValue.noOfMacroEntries = 0;
241 } else {
242 regValue.pMacroTileConfig = amdinfo->gb_macro_tile_mode;
243 regValue.noOfMacroEntries = ARRAY_SIZE(amdinfo->gb_macro_tile_mode);
244 }
245
246 createFlags.useTileIndex = 1;
247 createFlags.useHtileSliceAlign = 1;
248
249 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
250 }
251
252 addrCreateInput.callbacks.allocSysMem = allocSysMem;
253 addrCreateInput.callbacks.freeSysMem = freeSysMem;
254 addrCreateInput.callbacks.debugPrint = 0;
255 addrCreateInput.createFlags = createFlags;
256 addrCreateInput.regValue = regValue;
257
258 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
259 if (addrRet != ADDR_OK)
260 return NULL;
261
262 if (max_alignment) {
263 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
264 if (addrRet == ADDR_OK){
265 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
266 }
267 }
268
269 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
270 if (!addrlib) {
271 AddrDestroy(addrCreateOutput.hLib);
272 return NULL;
273 }
274
275 addrlib->handle = addrCreateOutput.hLib;
276 simple_mtx_init(&addrlib->dcc_retile_map_lock, mtx_plain);
277 addrlib->dcc_retile_maps = _mesa_hash_table_create(NULL, dcc_retile_map_hash_key,
278 dcc_retile_map_keys_equal);
279 return addrlib;
280 }
281
282 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
283 {
284 AddrDestroy(addrlib->handle);
285 simple_mtx_destroy(&addrlib->dcc_retile_map_lock);
286 _mesa_hash_table_destroy(addrlib->dcc_retile_maps, dcc_retile_map_free);
287 free(addrlib);
288 }
289
290 static int surf_config_sanity(const struct ac_surf_config *config,
291 unsigned flags)
292 {
293 /* FMASK is allocated together with the color surface and can't be
294 * allocated separately.
295 */
296 assert(!(flags & RADEON_SURF_FMASK));
297 if (flags & RADEON_SURF_FMASK)
298 return -EINVAL;
299
300 /* all dimension must be at least 1 ! */
301 if (!config->info.width || !config->info.height || !config->info.depth ||
302 !config->info.array_size || !config->info.levels)
303 return -EINVAL;
304
305 switch (config->info.samples) {
306 case 0:
307 case 1:
308 case 2:
309 case 4:
310 case 8:
311 break;
312 case 16:
313 if (flags & RADEON_SURF_Z_OR_SBUFFER)
314 return -EINVAL;
315 break;
316 default:
317 return -EINVAL;
318 }
319
320 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
321 switch (config->info.storage_samples) {
322 case 0:
323 case 1:
324 case 2:
325 case 4:
326 case 8:
327 break;
328 default:
329 return -EINVAL;
330 }
331 }
332
333 if (config->is_3d && config->info.array_size > 1)
334 return -EINVAL;
335 if (config->is_cube && config->info.depth > 1)
336 return -EINVAL;
337
338 return 0;
339 }
340
341 static int gfx6_compute_level(ADDR_HANDLE addrlib,
342 const struct ac_surf_config *config,
343 struct radeon_surf *surf, bool is_stencil,
344 unsigned level, bool compressed,
345 ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
346 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
347 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
348 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
349 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
350 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
351 {
352 struct legacy_surf_level *surf_level;
353 ADDR_E_RETURNCODE ret;
354
355 AddrSurfInfoIn->mipLevel = level;
356 AddrSurfInfoIn->width = u_minify(config->info.width, level);
357 AddrSurfInfoIn->height = u_minify(config->info.height, level);
358
359 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
360 * because GFX9 needs linear alignment of 256 bytes.
361 */
362 if (config->info.levels == 1 &&
363 AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
364 AddrSurfInfoIn->bpp &&
365 util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
366 unsigned alignment = 256 / (AddrSurfInfoIn->bpp / 8);
367
368 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
369 }
370
371 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
372 * true for r32g32b32 formats. */
373 if (AddrSurfInfoIn->bpp == 96) {
374 assert(config->info.levels == 1);
375 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
376
377 /* The least common multiple of 64 bytes and 12 bytes/pixel is
378 * 192 bytes, or 16 pixels. */
379 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
380 }
381
382 if (config->is_3d)
383 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
384 else if (config->is_cube)
385 AddrSurfInfoIn->numSlices = 6;
386 else
387 AddrSurfInfoIn->numSlices = config->info.array_size;
388
389 if (level > 0) {
390 /* Set the base level pitch. This is needed for calculation
391 * of non-zero levels. */
392 if (is_stencil)
393 AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
394 else
395 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
396
397 /* Convert blocks to pixels for compressed formats. */
398 if (compressed)
399 AddrSurfInfoIn->basePitch *= surf->blk_w;
400 }
401
402 ret = AddrComputeSurfaceInfo(addrlib,
403 AddrSurfInfoIn,
404 AddrSurfInfoOut);
405 if (ret != ADDR_OK) {
406 return ret;
407 }
408
409 surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
410 surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
411 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
412 surf_level->nblk_x = AddrSurfInfoOut->pitch;
413 surf_level->nblk_y = AddrSurfInfoOut->height;
414
415 switch (AddrSurfInfoOut->tileMode) {
416 case ADDR_TM_LINEAR_ALIGNED:
417 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
418 break;
419 case ADDR_TM_1D_TILED_THIN1:
420 surf_level->mode = RADEON_SURF_MODE_1D;
421 break;
422 case ADDR_TM_2D_TILED_THIN1:
423 surf_level->mode = RADEON_SURF_MODE_2D;
424 break;
425 default:
426 assert(0);
427 }
428
429 if (is_stencil)
430 surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
431 else
432 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
433
434 surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
435
436 /* Clear DCC fields at the beginning. */
437 surf_level->dcc_offset = 0;
438
439 /* The previous level's flag tells us if we can use DCC for this level. */
440 if (AddrSurfInfoIn->flags.dccCompatible &&
441 (level == 0 || AddrDccOut->subLvlCompressible)) {
442 bool prev_level_clearable = level == 0 ||
443 AddrDccOut->dccRamSizeAligned;
444
445 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
446 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
447 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
448 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
449 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
450
451 ret = AddrComputeDccInfo(addrlib,
452 AddrDccIn,
453 AddrDccOut);
454
455 if (ret == ADDR_OK) {
456 surf_level->dcc_offset = surf->dcc_size;
457 surf->num_dcc_levels = level + 1;
458 surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
459 surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
460
461 /* If the DCC size of a subresource (1 mip level or 1 slice)
462 * is not aligned, the DCC memory layout is not contiguous for
463 * that subresource, which means we can't use fast clear.
464 *
465 * We only do fast clears for whole mipmap levels. If we did
466 * per-slice fast clears, the same restriction would apply.
467 * (i.e. only compute the slice size and see if it's aligned)
468 *
469 * The last level can be non-contiguous and still be clearable
470 * if it's interleaved with the next level that doesn't exist.
471 */
472 if (AddrDccOut->dccRamSizeAligned ||
473 (prev_level_clearable && level == config->info.levels - 1))
474 surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
475 else
476 surf_level->dcc_fast_clear_size = 0;
477
478 /* Compute the DCC slice size because addrlib doesn't
479 * provide this info. As DCC memory is linear (each
480 * slice is the same size) it's easy to compute.
481 */
482 surf->dcc_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
483
484 /* For arrays, we have to compute the DCC info again
485 * with one slice size to get a correct fast clear
486 * size.
487 */
488 if (config->info.array_size > 1) {
489 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
490 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
491 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
492 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
493 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
494
495 ret = AddrComputeDccInfo(addrlib,
496 AddrDccIn, AddrDccOut);
497 if (ret == ADDR_OK) {
498 /* If the DCC memory isn't properly
499 * aligned, the data are interleaved
500 * accross slices.
501 */
502 if (AddrDccOut->dccRamSizeAligned)
503 surf_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
504 else
505 surf_level->dcc_slice_fast_clear_size = 0;
506 }
507
508 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
509 surf->dcc_slice_size != surf_level->dcc_slice_fast_clear_size) {
510 surf->dcc_size = 0;
511 surf->num_dcc_levels = 0;
512 AddrDccOut->subLvlCompressible = false;
513 }
514 } else {
515 surf_level->dcc_slice_fast_clear_size = surf_level->dcc_fast_clear_size;
516 }
517 }
518 }
519
520 /* HTILE. */
521 if (!is_stencil &&
522 AddrSurfInfoIn->flags.depth &&
523 surf_level->mode == RADEON_SURF_MODE_2D &&
524 level == 0 &&
525 !(surf->flags & RADEON_SURF_NO_HTILE)) {
526 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
527 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
528 AddrHtileIn->height = AddrSurfInfoOut->height;
529 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
530 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
531 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
532 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
533 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
534 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
535
536 ret = AddrComputeHtileInfo(addrlib,
537 AddrHtileIn,
538 AddrHtileOut);
539
540 if (ret == ADDR_OK) {
541 surf->htile_size = AddrHtileOut->htileBytes;
542 surf->htile_slice_size = AddrHtileOut->sliceSize;
543 surf->htile_alignment = AddrHtileOut->baseAlign;
544 }
545 }
546
547 return 0;
548 }
549
550 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf,
551 const struct radeon_info *info)
552 {
553 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
554
555 if (info->chip_class >= GFX7)
556 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
557 else
558 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
559 }
560
561 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
562 {
563 unsigned index, tileb;
564
565 tileb = 8 * 8 * surf->bpe;
566 tileb = MIN2(surf->u.legacy.tile_split, tileb);
567
568 for (index = 0; tileb > 64; index++)
569 tileb >>= 1;
570
571 assert(index < 16);
572 return index;
573 }
574
575 static bool get_display_flag(const struct ac_surf_config *config,
576 const struct radeon_surf *surf)
577 {
578 unsigned num_channels = config->info.num_channels;
579 unsigned bpe = surf->bpe;
580
581 if (!config->is_3d &&
582 !config->is_cube &&
583 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
584 surf->flags & RADEON_SURF_SCANOUT &&
585 config->info.samples <= 1 &&
586 surf->blk_w <= 2 && surf->blk_h == 1) {
587 /* subsampled */
588 if (surf->blk_w == 2 && surf->blk_h == 1)
589 return true;
590
591 if (/* RGBA8 or RGBA16F */
592 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
593 /* R5G6B5 or R5G5B5A1 */
594 (bpe == 2 && num_channels >= 3) ||
595 /* C8 palette */
596 (bpe == 1 && num_channels == 1))
597 return true;
598 }
599 return false;
600 }
601
602 /**
603 * This must be called after the first level is computed.
604 *
605 * Copy surface-global settings like pipe/bank config from level 0 surface
606 * computation, and compute tile swizzle.
607 */
608 static int gfx6_surface_settings(ADDR_HANDLE addrlib,
609 const struct radeon_info *info,
610 const struct ac_surf_config *config,
611 ADDR_COMPUTE_SURFACE_INFO_OUTPUT* csio,
612 struct radeon_surf *surf)
613 {
614 surf->surf_alignment = csio->baseAlign;
615 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
616 gfx6_set_micro_tile_mode(surf, info);
617
618 /* For 2D modes only. */
619 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
620 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
621 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
622 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
623 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
624 surf->u.legacy.num_banks = csio->pTileInfo->banks;
625 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
626 } else {
627 surf->u.legacy.macro_tile_index = 0;
628 }
629
630 /* Compute tile swizzle. */
631 /* TODO: fix tile swizzle with mipmapping for GFX6 */
632 if ((info->chip_class >= GFX7 || config->info.levels == 1) &&
633 config->info.surf_index &&
634 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
635 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
636 !get_display_flag(config, surf)) {
637 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
638 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
639
640 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
641 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
642
643 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
644 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
645 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
646 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
647 AddrBaseSwizzleIn.tileMode = csio->tileMode;
648
649 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn,
650 &AddrBaseSwizzleOut);
651 if (r != ADDR_OK)
652 return r;
653
654 assert(AddrBaseSwizzleOut.tileSwizzle <=
655 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
656 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
657 }
658 return 0;
659 }
660
661 static void ac_compute_cmask(const struct radeon_info *info,
662 const struct ac_surf_config *config,
663 struct radeon_surf *surf)
664 {
665 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
666 unsigned num_pipes = info->num_tile_pipes;
667 unsigned cl_width, cl_height;
668
669 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
670 (config->info.samples >= 2 && !surf->fmask_size))
671 return;
672
673 assert(info->chip_class <= GFX8);
674
675 switch (num_pipes) {
676 case 2:
677 cl_width = 32;
678 cl_height = 16;
679 break;
680 case 4:
681 cl_width = 32;
682 cl_height = 32;
683 break;
684 case 8:
685 cl_width = 64;
686 cl_height = 32;
687 break;
688 case 16: /* Hawaii */
689 cl_width = 64;
690 cl_height = 64;
691 break;
692 default:
693 assert(0);
694 return;
695 }
696
697 unsigned base_align = num_pipes * pipe_interleave_bytes;
698
699 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width*8);
700 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height*8);
701 unsigned slice_elements = (width * height) / (8*8);
702
703 /* Each element of CMASK is a nibble. */
704 unsigned slice_bytes = slice_elements / 2;
705
706 surf->u.legacy.cmask_slice_tile_max = (width * height) / (128*128);
707 if (surf->u.legacy.cmask_slice_tile_max)
708 surf->u.legacy.cmask_slice_tile_max -= 1;
709
710 unsigned num_layers;
711 if (config->is_3d)
712 num_layers = config->info.depth;
713 else if (config->is_cube)
714 num_layers = 6;
715 else
716 num_layers = config->info.array_size;
717
718 surf->cmask_alignment = MAX2(256, base_align);
719 surf->cmask_slice_size = align(slice_bytes, base_align);
720 surf->cmask_size = surf->cmask_slice_size * num_layers;
721 }
722
723 /**
724 * Fill in the tiling information in \p surf based on the given surface config.
725 *
726 * The following fields of \p surf must be initialized by the caller:
727 * blk_w, blk_h, bpe, flags.
728 */
729 static int gfx6_compute_surface(ADDR_HANDLE addrlib,
730 const struct radeon_info *info,
731 const struct ac_surf_config *config,
732 enum radeon_surf_mode mode,
733 struct radeon_surf *surf)
734 {
735 unsigned level;
736 bool compressed;
737 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
738 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
739 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
740 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
741 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
742 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
743 ADDR_TILEINFO AddrTileInfoIn = {0};
744 ADDR_TILEINFO AddrTileInfoOut = {0};
745 int r;
746
747 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
748 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
749 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
750 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
751 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
752 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
753 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
754
755 compressed = surf->blk_w == 4 && surf->blk_h == 4;
756
757 /* MSAA requires 2D tiling. */
758 if (config->info.samples > 1)
759 mode = RADEON_SURF_MODE_2D;
760
761 /* DB doesn't support linear layouts. */
762 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) &&
763 mode < RADEON_SURF_MODE_1D)
764 mode = RADEON_SURF_MODE_1D;
765
766 /* Set the requested tiling mode. */
767 switch (mode) {
768 case RADEON_SURF_MODE_LINEAR_ALIGNED:
769 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
770 break;
771 case RADEON_SURF_MODE_1D:
772 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
773 break;
774 case RADEON_SURF_MODE_2D:
775 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
776 break;
777 default:
778 assert(0);
779 }
780
781 /* The format must be set correctly for the allocation of compressed
782 * textures to work. In other cases, setting the bpp is sufficient.
783 */
784 if (compressed) {
785 switch (surf->bpe) {
786 case 8:
787 AddrSurfInfoIn.format = ADDR_FMT_BC1;
788 break;
789 case 16:
790 AddrSurfInfoIn.format = ADDR_FMT_BC3;
791 break;
792 default:
793 assert(0);
794 }
795 }
796 else {
797 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
798 }
799
800 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples =
801 MAX2(1, config->info.samples);
802 AddrSurfInfoIn.tileIndex = -1;
803
804 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
805 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags =
806 MAX2(1, config->info.storage_samples);
807 }
808
809 /* Set the micro tile type. */
810 if (surf->flags & RADEON_SURF_SCANOUT)
811 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
812 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
813 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
814 else
815 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
816
817 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
818 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
819 AddrSurfInfoIn.flags.cube = config->is_cube;
820 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
821 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
822 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
823
824 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
825 * requested, because TC-compatible HTILE requires 2D tiling.
826 */
827 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
828 !AddrSurfInfoIn.flags.fmask &&
829 config->info.samples <= 1 &&
830 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
831
832 /* DCC notes:
833 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
834 * with samples >= 4.
835 * - Mipmapped array textures have low performance (discovered by a closed
836 * driver team).
837 */
838 AddrSurfInfoIn.flags.dccCompatible =
839 info->chip_class >= GFX8 &&
840 info->has_graphics && /* disable DCC on compute-only chips */
841 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
842 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
843 !compressed &&
844 ((config->info.array_size == 1 && config->info.depth == 1) ||
845 config->info.levels == 1);
846
847 AddrSurfInfoIn.flags.noStencil = (surf->flags & RADEON_SURF_SBUFFER) == 0;
848 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
849
850 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
851 * for Z and stencil. This can cause a number of problems which we work
852 * around here:
853 *
854 * - a depth part that is incompatible with mipmapped texturing
855 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
856 * incorrect tiling applied to the stencil part, stencil buffer
857 * memory accesses that go out of bounds) even without mipmapping
858 *
859 * Some piglit tests that are prone to different types of related
860 * failures:
861 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
862 * ./bin/framebuffer-blit-levels {draw,read} stencil
863 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
864 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
865 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
866 */
867 int stencil_tile_idx = -1;
868
869 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
870 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
871 /* Compute stencilTileIdx that is compatible with the (depth)
872 * tileIdx. This degrades the depth surface if necessary to
873 * ensure that a matching stencilTileIdx exists. */
874 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
875
876 /* Keep the depth mip-tail compatible with texturing. */
877 AddrSurfInfoIn.flags.noStencil = 1;
878 }
879
880 /* Set preferred macrotile parameters. This is usually required
881 * for shared resources. This is for 2D tiling only. */
882 if (AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 &&
883 surf->u.legacy.bankw && surf->u.legacy.bankh &&
884 surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
885 /* If any of these parameters are incorrect, the calculation
886 * will fail. */
887 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
888 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
889 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
890 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
891 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
892 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
893 AddrSurfInfoIn.flags.opt4Space = 0;
894 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
895
896 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
897 * the tile index, because we are expected to know it if
898 * we know the other parameters.
899 *
900 * This is something that can easily be fixed in Addrlib.
901 * For now, just figure it out here.
902 * Note that only 2D_TILE_THIN1 is handled here.
903 */
904 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
905 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
906
907 if (info->chip_class == GFX6) {
908 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
909 if (surf->bpe == 2)
910 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
911 else
912 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
913 } else {
914 if (surf->bpe == 1)
915 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
916 else if (surf->bpe == 2)
917 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
918 else if (surf->bpe == 4)
919 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
920 else
921 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
922 }
923 } else {
924 /* GFX7 - GFX8 */
925 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
926 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
927 else
928 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
929
930 /* Addrlib doesn't set this if tileIndex is forced like above. */
931 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
932 }
933 }
934
935 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
936 surf->num_dcc_levels = 0;
937 surf->surf_size = 0;
938 surf->dcc_size = 0;
939 surf->dcc_alignment = 1;
940 surf->htile_size = 0;
941 surf->htile_slice_size = 0;
942 surf->htile_alignment = 1;
943
944 const bool only_stencil = (surf->flags & RADEON_SURF_SBUFFER) &&
945 !(surf->flags & RADEON_SURF_ZBUFFER);
946
947 /* Calculate texture layout information. */
948 if (!only_stencil) {
949 for (level = 0; level < config->info.levels; level++) {
950 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed,
951 &AddrSurfInfoIn, &AddrSurfInfoOut,
952 &AddrDccIn, &AddrDccOut, &AddrHtileIn, &AddrHtileOut);
953 if (r)
954 return r;
955
956 if (level > 0)
957 continue;
958
959 if (!AddrSurfInfoOut.tcCompatible) {
960 AddrSurfInfoIn.flags.tcCompatible = 0;
961 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
962 }
963
964 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
965 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
966 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
967 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
968
969 assert(stencil_tile_idx >= 0);
970 }
971
972 r = gfx6_surface_settings(addrlib, info, config,
973 &AddrSurfInfoOut, surf);
974 if (r)
975 return r;
976 }
977 }
978
979 /* Calculate texture layout information for stencil. */
980 if (surf->flags & RADEON_SURF_SBUFFER) {
981 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
982 AddrSurfInfoIn.bpp = 8;
983 AddrSurfInfoIn.flags.depth = 0;
984 AddrSurfInfoIn.flags.stencil = 1;
985 AddrSurfInfoIn.flags.tcCompatible = 0;
986 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
987 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
988
989 for (level = 0; level < config->info.levels; level++) {
990 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed,
991 &AddrSurfInfoIn, &AddrSurfInfoOut,
992 &AddrDccIn, &AddrDccOut,
993 NULL, NULL);
994 if (r)
995 return r;
996
997 /* DB uses the depth pitch for both stencil and depth. */
998 if (!only_stencil) {
999 if (surf->u.legacy.stencil_level[level].nblk_x !=
1000 surf->u.legacy.level[level].nblk_x)
1001 surf->u.legacy.stencil_adjusted = true;
1002 } else {
1003 surf->u.legacy.level[level].nblk_x =
1004 surf->u.legacy.stencil_level[level].nblk_x;
1005 }
1006
1007 if (level == 0) {
1008 if (only_stencil) {
1009 r = gfx6_surface_settings(addrlib, info, config,
1010 &AddrSurfInfoOut, surf);
1011 if (r)
1012 return r;
1013 }
1014
1015 /* For 2D modes only. */
1016 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1017 surf->u.legacy.stencil_tile_split =
1018 AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1019 }
1020 }
1021 }
1022 }
1023
1024 /* Compute FMASK. */
1025 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color &&
1026 info->has_graphics && !(surf->flags & RADEON_SURF_NO_FMASK)) {
1027 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1028 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1029 ADDR_TILEINFO fmask_tile_info = {};
1030
1031 fin.size = sizeof(fin);
1032 fout.size = sizeof(fout);
1033
1034 fin.tileMode = AddrSurfInfoOut.tileMode;
1035 fin.pitch = AddrSurfInfoOut.pitch;
1036 fin.height = config->info.height;
1037 fin.numSlices = AddrSurfInfoIn.numSlices;
1038 fin.numSamples = AddrSurfInfoIn.numSamples;
1039 fin.numFrags = AddrSurfInfoIn.numFrags;
1040 fin.tileIndex = -1;
1041 fout.pTileInfo = &fmask_tile_info;
1042
1043 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1044 if (r)
1045 return r;
1046
1047 surf->fmask_size = fout.fmaskBytes;
1048 surf->fmask_alignment = fout.baseAlign;
1049 surf->fmask_tile_swizzle = 0;
1050
1051 surf->u.legacy.fmask.slice_tile_max =
1052 (fout.pitch * fout.height) / 64;
1053 if (surf->u.legacy.fmask.slice_tile_max)
1054 surf->u.legacy.fmask.slice_tile_max -= 1;
1055
1056 surf->u.legacy.fmask.tiling_index = fout.tileIndex;
1057 surf->u.legacy.fmask.bankh = fout.pTileInfo->bankHeight;
1058 surf->u.legacy.fmask.pitch_in_pixels = fout.pitch;
1059 surf->u.legacy.fmask.slice_size = fout.sliceSize;
1060
1061 /* Compute tile swizzle for FMASK. */
1062 if (config->info.fmask_surf_index &&
1063 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1064 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1065 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1066
1067 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1068 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1069
1070 /* This counter starts from 1 instead of 0. */
1071 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1072 xin.tileIndex = fout.tileIndex;
1073 xin.macroModeIndex = fout.macroModeIndex;
1074 xin.pTileInfo = fout.pTileInfo;
1075 xin.tileMode = fin.tileMode;
1076
1077 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1078 if (r != ADDR_OK)
1079 return r;
1080
1081 assert(xout.tileSwizzle <=
1082 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1083 surf->fmask_tile_swizzle = xout.tileSwizzle;
1084 }
1085 }
1086
1087 /* Recalculate the whole DCC miptree size including disabled levels.
1088 * This is what addrlib does, but calling addrlib would be a lot more
1089 * complicated.
1090 */
1091 if (surf->dcc_size && config->info.levels > 1) {
1092 /* The smallest miplevels that are never compressed by DCC
1093 * still read the DCC buffer via TC if the base level uses DCC,
1094 * and for some reason the DCC buffer needs to be larger if
1095 * the miptree uses non-zero tile_swizzle. Otherwise there are
1096 * VM faults.
1097 *
1098 * "dcc_alignment * 4" was determined by trial and error.
1099 */
1100 surf->dcc_size = align64(surf->surf_size >> 8,
1101 surf->dcc_alignment * 4);
1102 }
1103
1104 /* Make sure HTILE covers the whole miptree, because the shader reads
1105 * TC-compatible HTILE even for levels where it's disabled by DB.
1106 */
1107 if (surf->htile_size && config->info.levels > 1 &&
1108 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
1109 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1110 const unsigned total_pixels = surf->surf_size / surf->bpe;
1111 const unsigned htile_block_size = 8 * 8;
1112 const unsigned htile_element_size = 4;
1113
1114 surf->htile_size = (total_pixels / htile_block_size) *
1115 htile_element_size;
1116 surf->htile_size = align(surf->htile_size, surf->htile_alignment);
1117 } else if (!surf->htile_size) {
1118 /* Unset this if HTILE is not present. */
1119 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1120 }
1121
1122 surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
1123 surf->is_displayable = surf->is_linear ||
1124 surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1125 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1126
1127 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1128 * used at the same time. This case is not currently expected to occur
1129 * because we don't use rotated. Enforce this restriction on all chips
1130 * to facilitate testing.
1131 */
1132 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1133 assert(!"rotate micro tile mode is unsupported");
1134 return ADDR_ERROR;
1135 }
1136
1137 ac_compute_cmask(info, config, surf);
1138 return 0;
1139 }
1140
1141 /* This is only called when expecting a tiled layout. */
1142 static int
1143 gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,
1144 struct radeon_surf *surf,
1145 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in,
1146 bool is_fmask, AddrSwizzleMode *swizzle_mode)
1147 {
1148 ADDR_E_RETURNCODE ret;
1149 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1150 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1151
1152 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1153 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1154
1155 sin.flags = in->flags;
1156 sin.resourceType = in->resourceType;
1157 sin.format = in->format;
1158 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1159 /* TODO: We could allow some of these: */
1160 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1161 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1162 sin.bpp = in->bpp;
1163 sin.width = in->width;
1164 sin.height = in->height;
1165 sin.numSlices = in->numSlices;
1166 sin.numMipLevels = in->numMipLevels;
1167 sin.numSamples = in->numSamples;
1168 sin.numFrags = in->numFrags;
1169
1170 if (is_fmask) {
1171 sin.flags.display = 0;
1172 sin.flags.color = 0;
1173 sin.flags.fmask = 1;
1174 }
1175
1176 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1177 sin.forbiddenBlock.linear = 1;
1178
1179 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1180 sin.preferredSwSet.sw_D = 1;
1181 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1182 sin.preferredSwSet.sw_S = 1;
1183 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1184 sin.preferredSwSet.sw_Z = 1;
1185 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1186 sin.preferredSwSet.sw_R = 1;
1187 }
1188
1189 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1190 if (ret != ADDR_OK)
1191 return ret;
1192
1193 *swizzle_mode = sout.swizzleMode;
1194 return 0;
1195 }
1196
1197 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1198 {
1199 if (info->chip_class >= GFX10)
1200 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1201
1202 return sw_mode != ADDR_SW_LINEAR;
1203 }
1204
1205 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1206 const struct radeon_surf *surf)
1207 {
1208 if (info->chip_class <= GFX9) {
1209 /* Only independent 64B blocks are supported. */
1210 return surf->u.gfx9.dcc.independent_64B_blocks &&
1211 !surf->u.gfx9.dcc.independent_128B_blocks &&
1212 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1213 }
1214
1215 if (info->family == CHIP_NAVI10) {
1216 /* Only independent 128B blocks are supported. */
1217 return !surf->u.gfx9.dcc.independent_64B_blocks &&
1218 surf->u.gfx9.dcc.independent_128B_blocks &&
1219 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1220 }
1221
1222 if (info->family == CHIP_NAVI12 ||
1223 info->family == CHIP_NAVI14) {
1224 /* Either 64B or 128B can be used, but not both.
1225 * If 64B is used, DCC image stores are unsupported.
1226 */
1227 return surf->u.gfx9.dcc.independent_64B_blocks !=
1228 surf->u.gfx9.dcc.independent_128B_blocks &&
1229 (!surf->u.gfx9.dcc.independent_64B_blocks ||
1230 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) &&
1231 (!surf->u.gfx9.dcc.independent_128B_blocks ||
1232 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B);
1233 }
1234
1235 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1236 * Since there is no reason to ever disable 128B, require it.
1237 * DCC image stores are always supported.
1238 */
1239 return surf->u.gfx9.dcc.independent_128B_blocks &&
1240 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1241 }
1242
1243 static bool is_dcc_supported_by_DCN(const struct radeon_info *info,
1244 const struct ac_surf_config *config,
1245 const struct radeon_surf *surf,
1246 bool rb_aligned, bool pipe_aligned)
1247 {
1248 if (!info->use_display_dcc_unaligned &&
1249 !info->use_display_dcc_with_retile_blit)
1250 return false;
1251
1252 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1253 if (surf->bpe != 4)
1254 return false;
1255
1256 /* Handle unaligned DCC. */
1257 if (info->use_display_dcc_unaligned &&
1258 (rb_aligned || pipe_aligned))
1259 return false;
1260
1261 switch (info->chip_class) {
1262 case GFX9:
1263 /* There are more constraints, but we always set
1264 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1265 * which always works.
1266 */
1267 assert(surf->u.gfx9.dcc.independent_64B_blocks &&
1268 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1269 return true;
1270 case GFX10:
1271 case GFX10_3:
1272 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1273 if (info->chip_class == GFX10 &&
1274 surf->u.gfx9.dcc.independent_128B_blocks)
1275 return false;
1276
1277 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1278 return ((config->info.width <= 2560 &&
1279 config->info.height <= 2560) ||
1280 (surf->u.gfx9.dcc.independent_64B_blocks &&
1281 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1282 default:
1283 unreachable("unhandled chip");
1284 return false;
1285 }
1286 }
1287
1288 static int gfx9_compute_miptree(struct ac_addrlib *addrlib,
1289 const struct radeon_info *info,
1290 const struct ac_surf_config *config,
1291 struct radeon_surf *surf, bool compressed,
1292 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1293 {
1294 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
1295 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1296 ADDR_E_RETURNCODE ret;
1297
1298 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1299 out.pMipInfo = mip_info;
1300
1301 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1302 if (ret != ADDR_OK)
1303 return ret;
1304
1305 if (in->flags.stencil) {
1306 surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
1307 surf->u.gfx9.stencil.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1308 out.mipChainPitch - 1;
1309 surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
1310 surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
1311 surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
1312 return 0;
1313 }
1314
1315 surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
1316 surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1317 out.mipChainPitch - 1;
1318
1319 /* CMASK fast clear uses these even if FMASK isn't allocated.
1320 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1321 */
1322 surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
1323 surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
1324
1325 surf->u.gfx9.surf_slice_size = out.sliceSize;
1326 surf->u.gfx9.surf_pitch = out.pitch;
1327 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1328 surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR) {
1329 /* Adjust surf_pitch to be in elements units,
1330 * not in pixels */
1331 surf->u.gfx9.surf_pitch =
1332 align(surf->u.gfx9.surf_pitch / surf->blk_w, 256 / surf->bpe);
1333 surf->u.gfx9.surf.epitch = MAX2(surf->u.gfx9.surf.epitch,
1334 surf->u.gfx9.surf_pitch * surf->blk_w - 1);
1335 }
1336 surf->u.gfx9.surf_height = out.height;
1337 surf->surf_size = out.surfSize;
1338 surf->surf_alignment = out.baseAlign;
1339
1340 if (in->swizzleMode == ADDR_SW_LINEAR) {
1341 for (unsigned i = 0; i < in->numMipLevels; i++) {
1342 surf->u.gfx9.offset[i] = mip_info[i].offset;
1343 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1344 }
1345 }
1346
1347 if (in->flags.depth) {
1348 assert(in->swizzleMode != ADDR_SW_LINEAR);
1349
1350 if (surf->flags & RADEON_SURF_NO_HTILE)
1351 return 0;
1352
1353 /* HTILE */
1354 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1355 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1356
1357 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1358 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1359
1360 assert(in->flags.metaPipeUnaligned == 0);
1361 assert(in->flags.metaRbUnaligned == 0);
1362
1363 hin.hTileFlags.pipeAligned = 1;
1364 hin.hTileFlags.rbAligned = 1;
1365 hin.depthFlags = in->flags;
1366 hin.swizzleMode = in->swizzleMode;
1367 hin.unalignedWidth = in->width;
1368 hin.unalignedHeight = in->height;
1369 hin.numSlices = in->numSlices;
1370 hin.numMipLevels = in->numMipLevels;
1371 hin.firstMipIdInTail = out.firstMipIdInTail;
1372
1373 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
1374 if (ret != ADDR_OK)
1375 return ret;
1376
1377 surf->htile_size = hout.htileBytes;
1378 surf->htile_slice_size = hout.sliceSize;
1379 surf->htile_alignment = hout.baseAlign;
1380 return 0;
1381 }
1382
1383 {
1384 /* Compute tile swizzle for the color surface.
1385 * All *_X and *_T modes can use the swizzle.
1386 */
1387 if (config->info.surf_index &&
1388 in->swizzleMode >= ADDR_SW_64KB_Z_T &&
1389 !out.mipChainInTail &&
1390 !(surf->flags & RADEON_SURF_SHAREABLE) &&
1391 !in->flags.display) {
1392 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1393 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1394
1395 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1396 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1397
1398 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1399 xin.flags = in->flags;
1400 xin.swizzleMode = in->swizzleMode;
1401 xin.resourceType = in->resourceType;
1402 xin.format = in->format;
1403 xin.numSamples = in->numSamples;
1404 xin.numFrags = in->numFrags;
1405
1406 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1407 if (ret != ADDR_OK)
1408 return ret;
1409
1410 assert(xout.pipeBankXor <=
1411 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1412 surf->tile_swizzle = xout.pipeBankXor;
1413 }
1414
1415 /* DCC */
1416 if (info->has_graphics &&
1417 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1418 !compressed &&
1419 is_dcc_supported_by_CB(info, in->swizzleMode) &&
1420 (!in->flags.display ||
1421 is_dcc_supported_by_DCN(info, config, surf,
1422 !in->flags.metaRbUnaligned,
1423 !in->flags.metaPipeUnaligned))) {
1424 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
1425 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
1426 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {};
1427
1428 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
1429 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
1430 dout.pMipInfo = meta_mip_info;
1431
1432 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
1433 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
1434 din.colorFlags = in->flags;
1435 din.resourceType = in->resourceType;
1436 din.swizzleMode = in->swizzleMode;
1437 din.bpp = in->bpp;
1438 din.unalignedWidth = in->width;
1439 din.unalignedHeight = in->height;
1440 din.numSlices = in->numSlices;
1441 din.numFrags = in->numFrags;
1442 din.numMipLevels = in->numMipLevels;
1443 din.dataSurfaceSize = out.surfSize;
1444 din.firstMipIdInTail = out.firstMipIdInTail;
1445
1446 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1447 if (ret != ADDR_OK)
1448 return ret;
1449
1450 surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
1451 surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
1452 surf->u.gfx9.dcc_block_width = dout.compressBlkWidth;
1453 surf->u.gfx9.dcc_block_height = dout.compressBlkHeight;
1454 surf->u.gfx9.dcc_block_depth = dout.compressBlkDepth;
1455 surf->dcc_size = dout.dccRamSize;
1456 surf->dcc_alignment = dout.dccRamBaseAlign;
1457 surf->num_dcc_levels = in->numMipLevels;
1458
1459 /* Disable DCC for levels that are in the mip tail.
1460 *
1461 * There are two issues that this is intended to
1462 * address:
1463 *
1464 * 1. Multiple mip levels may share a cache line. This
1465 * can lead to corruption when switching between
1466 * rendering to different mip levels because the
1467 * RBs don't maintain coherency.
1468 *
1469 * 2. Texturing with metadata after rendering sometimes
1470 * fails with corruption, probably for a similar
1471 * reason.
1472 *
1473 * Working around these issues for all levels in the
1474 * mip tail may be overly conservative, but it's what
1475 * Vulkan does.
1476 *
1477 * Alternative solutions that also work but are worse:
1478 * - Disable DCC entirely.
1479 * - Flush TC L2 after rendering.
1480 */
1481 for (unsigned i = 0; i < in->numMipLevels; i++) {
1482 if (meta_mip_info[i].inMiptail) {
1483 surf->num_dcc_levels = i;
1484 break;
1485 }
1486 }
1487
1488 if (!surf->num_dcc_levels)
1489 surf->dcc_size = 0;
1490
1491 surf->u.gfx9.display_dcc_size = surf->dcc_size;
1492 surf->u.gfx9.display_dcc_alignment = surf->dcc_alignment;
1493 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1494
1495 /* Compute displayable DCC. */
1496 if (in->flags.display &&
1497 surf->num_dcc_levels &&
1498 info->use_display_dcc_with_retile_blit) {
1499 /* Compute displayable DCC info. */
1500 din.dccKeyFlags.pipeAligned = 0;
1501 din.dccKeyFlags.rbAligned = 0;
1502
1503 assert(din.numSlices == 1);
1504 assert(din.numMipLevels == 1);
1505 assert(din.numFrags == 1);
1506 assert(surf->tile_swizzle == 0);
1507 assert(surf->u.gfx9.dcc.pipe_aligned ||
1508 surf->u.gfx9.dcc.rb_aligned);
1509
1510 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1511 if (ret != ADDR_OK)
1512 return ret;
1513
1514 surf->u.gfx9.display_dcc_size = dout.dccRamSize;
1515 surf->u.gfx9.display_dcc_alignment = dout.dccRamBaseAlign;
1516 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1517 assert(surf->u.gfx9.display_dcc_size <= surf->dcc_size);
1518
1519 surf->u.gfx9.dcc_retile_use_uint16 =
1520 surf->u.gfx9.display_dcc_size <= UINT16_MAX + 1 &&
1521 surf->dcc_size <= UINT16_MAX + 1;
1522
1523 /* Align the retile map size to get more hash table hits and
1524 * decrease the maximum memory footprint when all retile maps
1525 * are cached in the hash table.
1526 */
1527 unsigned retile_dim[2] = {in->width, in->height};
1528
1529 for (unsigned i = 0; i < 2; i++) {
1530 /* Increase the alignment as the size increases.
1531 * Greater alignment increases retile compute work,
1532 * but decreases maximum memory footprint for the cache.
1533 *
1534 * With this alignment, the worst case memory footprint of
1535 * the cache is:
1536 * 1920x1080: 55 MB
1537 * 2560x1440: 99 MB
1538 * 3840x2160: 305 MB
1539 *
1540 * The worst case size in MB can be computed in Haskell as follows:
1541 * (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1542 * [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]]))))) `div` 1024^2
1543 * where
1544 * alignment x = if x <= 512 then 16 else if x <= 1024 then 32 else if x <= 2048 then 64 else 128
1545 * align x = (x + (alignment x) - 1) `div` (alignment x) * (alignment x)
1546 * align_pair e = (align (fst e), align (snd e))
1547 * deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a) == (snd b))) . sortBy compare
1548 * get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1549 * get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else 4)
1550 * bpp = 4; maxwidth = 3840; maxheight = 2160
1551 */
1552 if (retile_dim[i] <= 512)
1553 retile_dim[i] = align(retile_dim[i], 16);
1554 else if (retile_dim[i] <= 1024)
1555 retile_dim[i] = align(retile_dim[i], 32);
1556 else if (retile_dim[i] <= 2048)
1557 retile_dim[i] = align(retile_dim[i], 64);
1558 else
1559 retile_dim[i] = align(retile_dim[i], 128);
1560
1561 /* Don't align more than the DCC pixel alignment. */
1562 assert(dout.metaBlkWidth >= 128 && dout.metaBlkHeight >= 128);
1563 }
1564
1565 surf->u.gfx9.dcc_retile_num_elements =
1566 DIV_ROUND_UP(retile_dim[0], dout.compressBlkWidth) *
1567 DIV_ROUND_UP(retile_dim[1], dout.compressBlkHeight) * 2;
1568 /* Align the size to 4 (for the compute shader). */
1569 surf->u.gfx9.dcc_retile_num_elements =
1570 align(surf->u.gfx9.dcc_retile_num_elements, 4);
1571
1572 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
1573 /* Compute address mapping from non-displayable to displayable DCC. */
1574 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
1575 memset(&addrin, 0, sizeof(addrin));
1576 addrin.size = sizeof(addrin);
1577 addrin.swizzleMode = din.swizzleMode;
1578 addrin.resourceType = din.resourceType;
1579 addrin.bpp = din.bpp;
1580 addrin.numSlices = 1;
1581 addrin.numMipLevels = 1;
1582 addrin.numFrags = 1;
1583 addrin.pitch = dout.pitch;
1584 addrin.height = dout.height;
1585 addrin.compressBlkWidth = dout.compressBlkWidth;
1586 addrin.compressBlkHeight = dout.compressBlkHeight;
1587 addrin.compressBlkDepth = dout.compressBlkDepth;
1588 addrin.metaBlkWidth = dout.metaBlkWidth;
1589 addrin.metaBlkHeight = dout.metaBlkHeight;
1590 addrin.metaBlkDepth = dout.metaBlkDepth;
1591 addrin.dccRamSliceSize = 0; /* Don't care for non-layered images. */
1592
1593 surf->u.gfx9.dcc_retile_map =
1594 ac_compute_dcc_retile_map(addrlib, info,
1595 retile_dim[0], retile_dim[1],
1596 surf->u.gfx9.dcc.rb_aligned,
1597 surf->u.gfx9.dcc.pipe_aligned,
1598 surf->u.gfx9.dcc_retile_use_uint16,
1599 surf->u.gfx9.dcc_retile_num_elements,
1600 &addrin);
1601 if (!surf->u.gfx9.dcc_retile_map)
1602 return ADDR_OUTOFMEMORY;
1603 }
1604 }
1605 }
1606
1607 /* FMASK */
1608 if (in->numSamples > 1 && info->has_graphics &&
1609 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1610 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
1611 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1612
1613 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
1614 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
1615
1616 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, in,
1617 true, &fin.swizzleMode);
1618 if (ret != ADDR_OK)
1619 return ret;
1620
1621 fin.unalignedWidth = in->width;
1622 fin.unalignedHeight = in->height;
1623 fin.numSlices = in->numSlices;
1624 fin.numSamples = in->numSamples;
1625 fin.numFrags = in->numFrags;
1626
1627 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
1628 if (ret != ADDR_OK)
1629 return ret;
1630
1631 surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
1632 surf->u.gfx9.fmask.epitch = fout.pitch - 1;
1633 surf->fmask_size = fout.fmaskBytes;
1634 surf->fmask_alignment = fout.baseAlign;
1635
1636 /* Compute tile swizzle for the FMASK surface. */
1637 if (config->info.fmask_surf_index &&
1638 fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
1639 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1640 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1641 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1642
1643 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1644 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1645
1646 /* This counter starts from 1 instead of 0. */
1647 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1648 xin.flags = in->flags;
1649 xin.swizzleMode = fin.swizzleMode;
1650 xin.resourceType = in->resourceType;
1651 xin.format = in->format;
1652 xin.numSamples = in->numSamples;
1653 xin.numFrags = in->numFrags;
1654
1655 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1656 if (ret != ADDR_OK)
1657 return ret;
1658
1659 assert(xout.pipeBankXor <=
1660 u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
1661 surf->fmask_tile_swizzle = xout.pipeBankXor;
1662 }
1663 }
1664
1665 /* CMASK -- on GFX10 only for FMASK */
1666 if (in->swizzleMode != ADDR_SW_LINEAR &&
1667 in->resourceType == ADDR_RSRC_TEX_2D &&
1668 ((info->chip_class <= GFX9 &&
1669 in->numSamples == 1 &&
1670 in->flags.metaPipeUnaligned == 0 &&
1671 in->flags.metaRbUnaligned == 0) ||
1672 (surf->fmask_size && in->numSamples >= 2))) {
1673 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
1674 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
1675
1676 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
1677 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
1678
1679 assert(in->flags.metaPipeUnaligned == 0);
1680 assert(in->flags.metaRbUnaligned == 0);
1681
1682 cin.cMaskFlags.pipeAligned = 1;
1683 cin.cMaskFlags.rbAligned = 1;
1684 cin.colorFlags = in->flags;
1685 cin.resourceType = in->resourceType;
1686 cin.unalignedWidth = in->width;
1687 cin.unalignedHeight = in->height;
1688 cin.numSlices = in->numSlices;
1689
1690 if (in->numSamples > 1)
1691 cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
1692 else
1693 cin.swizzleMode = in->swizzleMode;
1694
1695 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
1696 if (ret != ADDR_OK)
1697 return ret;
1698
1699 surf->cmask_size = cout.cmaskBytes;
1700 surf->cmask_alignment = cout.baseAlign;
1701 }
1702 }
1703
1704 return 0;
1705 }
1706
1707 static int gfx9_compute_surface(struct ac_addrlib *addrlib,
1708 const struct radeon_info *info,
1709 const struct ac_surf_config *config,
1710 enum radeon_surf_mode mode,
1711 struct radeon_surf *surf)
1712 {
1713 bool compressed;
1714 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1715 int r;
1716
1717 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
1718
1719 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1720
1721 /* The format must be set correctly for the allocation of compressed
1722 * textures to work. In other cases, setting the bpp is sufficient. */
1723 if (compressed) {
1724 switch (surf->bpe) {
1725 case 8:
1726 AddrSurfInfoIn.format = ADDR_FMT_BC1;
1727 break;
1728 case 16:
1729 AddrSurfInfoIn.format = ADDR_FMT_BC3;
1730 break;
1731 default:
1732 assert(0);
1733 }
1734 } else {
1735 switch (surf->bpe) {
1736 case 1:
1737 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
1738 AddrSurfInfoIn.format = ADDR_FMT_8;
1739 break;
1740 case 2:
1741 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1742 !(surf->flags & RADEON_SURF_SBUFFER));
1743 AddrSurfInfoIn.format = ADDR_FMT_16;
1744 break;
1745 case 4:
1746 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1747 !(surf->flags & RADEON_SURF_SBUFFER));
1748 AddrSurfInfoIn.format = ADDR_FMT_32;
1749 break;
1750 case 8:
1751 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1752 AddrSurfInfoIn.format = ADDR_FMT_32_32;
1753 break;
1754 case 12:
1755 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1756 AddrSurfInfoIn.format = ADDR_FMT_32_32_32;
1757 break;
1758 case 16:
1759 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1760 AddrSurfInfoIn.format = ADDR_FMT_32_32_32_32;
1761 break;
1762 default:
1763 assert(0);
1764 }
1765 AddrSurfInfoIn.bpp = surf->bpe * 8;
1766 }
1767
1768 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1769 AddrSurfInfoIn.flags.color = is_color_surface &&
1770 !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1771 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1772 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1773 /* flags.texture currently refers to TC-compatible HTILE */
1774 AddrSurfInfoIn.flags.texture = is_color_surface ||
1775 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1776 AddrSurfInfoIn.flags.opt4space = 1;
1777
1778 AddrSurfInfoIn.numMipLevels = config->info.levels;
1779 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1780 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
1781
1782 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
1783 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1784
1785 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1786 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1787 * must sample 1D textures as 2D. */
1788 if (config->is_3d)
1789 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
1790 else if (info->chip_class != GFX9 && config->is_1d)
1791 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
1792 else
1793 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
1794
1795 AddrSurfInfoIn.width = config->info.width;
1796 AddrSurfInfoIn.height = config->info.height;
1797
1798 if (config->is_3d)
1799 AddrSurfInfoIn.numSlices = config->info.depth;
1800 else if (config->is_cube)
1801 AddrSurfInfoIn.numSlices = 6;
1802 else
1803 AddrSurfInfoIn.numSlices = config->info.array_size;
1804
1805 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1806 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
1807 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
1808
1809 /* Optimal values for the L2 cache. */
1810 if (info->chip_class == GFX9) {
1811 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1812 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1813 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1814 } else if (info->chip_class >= GFX10) {
1815 surf->u.gfx9.dcc.independent_64B_blocks = 0;
1816 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1817 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
1818 }
1819
1820 if (AddrSurfInfoIn.flags.display) {
1821 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1822 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1823 *
1824 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1825 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1826 * after rendering, so PIPE_ALIGNED=1 is recommended.
1827 */
1828 if (info->use_display_dcc_unaligned) {
1829 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
1830 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
1831 }
1832
1833 /* Adjust DCC settings to meet DCN requirements. */
1834 if (info->use_display_dcc_unaligned ||
1835 info->use_display_dcc_with_retile_blit) {
1836 /* Only Navi12/14 support independent 64B blocks in L2,
1837 * but without DCC image stores.
1838 */
1839 if (info->family == CHIP_NAVI12 ||
1840 info->family == CHIP_NAVI14) {
1841 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1842 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1843 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1844 }
1845
1846 if (info->chip_class >= GFX10_3) {
1847 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1848 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1849 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1850 }
1851 }
1852 }
1853
1854 switch (mode) {
1855 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1856 assert(config->info.samples <= 1);
1857 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1858 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
1859 break;
1860
1861 case RADEON_SURF_MODE_1D:
1862 case RADEON_SURF_MODE_2D:
1863 if (surf->flags & RADEON_SURF_IMPORTED ||
1864 (info->chip_class >= GFX10 &&
1865 surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
1866 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.surf.swizzle_mode;
1867 break;
1868 }
1869
1870 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn,
1871 false, &AddrSurfInfoIn.swizzleMode);
1872 if (r)
1873 return r;
1874 break;
1875
1876 default:
1877 assert(0);
1878 }
1879
1880 surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
1881 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1882
1883 surf->num_dcc_levels = 0;
1884 surf->surf_size = 0;
1885 surf->fmask_size = 0;
1886 surf->dcc_size = 0;
1887 surf->htile_size = 0;
1888 surf->htile_slice_size = 0;
1889 surf->u.gfx9.surf_offset = 0;
1890 surf->u.gfx9.stencil_offset = 0;
1891 surf->cmask_size = 0;
1892 surf->u.gfx9.dcc_retile_use_uint16 = false;
1893 surf->u.gfx9.dcc_retile_num_elements = 0;
1894 surf->u.gfx9.dcc_retile_map = NULL;
1895
1896 /* Calculate texture layout information. */
1897 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1898 &AddrSurfInfoIn);
1899 if (r)
1900 goto error;
1901
1902 /* Calculate texture layout information for stencil. */
1903 if (surf->flags & RADEON_SURF_SBUFFER) {
1904 AddrSurfInfoIn.flags.stencil = 1;
1905 AddrSurfInfoIn.bpp = 8;
1906 AddrSurfInfoIn.format = ADDR_FMT_8;
1907
1908 if (!AddrSurfInfoIn.flags.depth) {
1909 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn,
1910 false, &AddrSurfInfoIn.swizzleMode);
1911 if (r)
1912 goto error;
1913 } else
1914 AddrSurfInfoIn.flags.depth = 0;
1915
1916 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1917 &AddrSurfInfoIn);
1918 if (r)
1919 goto error;
1920 }
1921
1922 surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
1923
1924 /* Query whether the surface is displayable. */
1925 /* This is only useful for surfaces that are allocated without SCANOUT. */
1926 bool displayable = false;
1927 if (!config->is_3d && !config->is_cube) {
1928 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.surf.swizzle_mode,
1929 surf->bpe * 8, &displayable);
1930 if (r)
1931 goto error;
1932
1933 /* Display needs unaligned DCC. */
1934 if (surf->num_dcc_levels &&
1935 !is_dcc_supported_by_DCN(info, config, surf,
1936 surf->u.gfx9.dcc.rb_aligned,
1937 surf->u.gfx9.dcc.pipe_aligned))
1938 displayable = false;
1939 }
1940 surf->is_displayable = displayable;
1941
1942 /* Validate that we allocated a displayable surface if requested. */
1943 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
1944
1945 /* Validate that DCC is set up correctly. */
1946 if (surf->num_dcc_levels) {
1947 assert(is_dcc_supported_by_L2(info, surf));
1948 if (AddrSurfInfoIn.flags.color)
1949 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.surf.swizzle_mode));
1950 if (AddrSurfInfoIn.flags.display) {
1951 assert(is_dcc_supported_by_DCN(info, config, surf,
1952 surf->u.gfx9.dcc.rb_aligned,
1953 surf->u.gfx9.dcc.pipe_aligned));
1954 }
1955 }
1956
1957 if (info->has_graphics &&
1958 !compressed &&
1959 !config->is_3d &&
1960 config->info.levels == 1 &&
1961 AddrSurfInfoIn.flags.color &&
1962 !surf->is_linear &&
1963 surf->surf_alignment >= 64 * 1024 && /* 64KB tiling */
1964 !(surf->flags & (RADEON_SURF_DISABLE_DCC |
1965 RADEON_SURF_FORCE_SWIZZLE_MODE |
1966 RADEON_SURF_FORCE_MICRO_TILE_MODE))) {
1967 /* Validate that DCC is enabled if DCN can do it. */
1968 if ((info->use_display_dcc_unaligned ||
1969 info->use_display_dcc_with_retile_blit) &&
1970 AddrSurfInfoIn.flags.display &&
1971 surf->bpe == 4) {
1972 assert(surf->num_dcc_levels);
1973 }
1974
1975 /* Validate that non-scanout DCC is always enabled. */
1976 if (!AddrSurfInfoIn.flags.display)
1977 assert(surf->num_dcc_levels);
1978 }
1979
1980 if (!surf->htile_size) {
1981 /* Unset this if HTILE is not present. */
1982 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1983 }
1984
1985 switch (surf->u.gfx9.surf.swizzle_mode) {
1986 /* S = standard. */
1987 case ADDR_SW_256B_S:
1988 case ADDR_SW_4KB_S:
1989 case ADDR_SW_64KB_S:
1990 case ADDR_SW_64KB_S_T:
1991 case ADDR_SW_4KB_S_X:
1992 case ADDR_SW_64KB_S_X:
1993 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
1994 break;
1995
1996 /* D = display. */
1997 case ADDR_SW_LINEAR:
1998 case ADDR_SW_256B_D:
1999 case ADDR_SW_4KB_D:
2000 case ADDR_SW_64KB_D:
2001 case ADDR_SW_64KB_D_T:
2002 case ADDR_SW_4KB_D_X:
2003 case ADDR_SW_64KB_D_X:
2004 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2005 break;
2006
2007 /* R = rotated (gfx9), render target (gfx10). */
2008 case ADDR_SW_256B_R:
2009 case ADDR_SW_4KB_R:
2010 case ADDR_SW_64KB_R:
2011 case ADDR_SW_64KB_R_T:
2012 case ADDR_SW_4KB_R_X:
2013 case ADDR_SW_64KB_R_X:
2014 case ADDR_SW_VAR_R_X:
2015 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2016 * used at the same time. We currently do not use rotated
2017 * in gfx9.
2018 */
2019 assert(info->chip_class >= GFX10 ||
2020 !"rotate micro tile mode is unsupported");
2021 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2022 break;
2023
2024 /* Z = depth. */
2025 case ADDR_SW_4KB_Z:
2026 case ADDR_SW_64KB_Z:
2027 case ADDR_SW_64KB_Z_T:
2028 case ADDR_SW_4KB_Z_X:
2029 case ADDR_SW_64KB_Z_X:
2030 case ADDR_SW_VAR_Z_X:
2031 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2032 break;
2033
2034 default:
2035 assert(0);
2036 }
2037
2038 return 0;
2039
2040 error:
2041 free(surf->u.gfx9.dcc_retile_map);
2042 surf->u.gfx9.dcc_retile_map = NULL;
2043 return r;
2044 }
2045
2046 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2047 const struct ac_surf_config *config,
2048 enum radeon_surf_mode mode,
2049 struct radeon_surf *surf)
2050 {
2051 int r;
2052
2053 r = surf_config_sanity(config, surf->flags);
2054 if (r)
2055 return r;
2056
2057 if (info->chip_class >= GFX9)
2058 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
2059 else
2060 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
2061
2062 if (r)
2063 return r;
2064
2065 /* Determine the memory layout of multiple allocations in one buffer. */
2066 surf->total_size = surf->surf_size;
2067 surf->alignment = surf->surf_alignment;
2068
2069 if (surf->htile_size) {
2070 surf->htile_offset = align64(surf->total_size, surf->htile_alignment);
2071 surf->total_size = surf->htile_offset + surf->htile_size;
2072 surf->alignment = MAX2(surf->alignment, surf->htile_alignment);
2073 }
2074
2075 if (surf->fmask_size) {
2076 assert(config->info.samples >= 2);
2077 surf->fmask_offset = align64(surf->total_size, surf->fmask_alignment);
2078 surf->total_size = surf->fmask_offset + surf->fmask_size;
2079 surf->alignment = MAX2(surf->alignment, surf->fmask_alignment);
2080 }
2081
2082 /* Single-sample CMASK is in a separate buffer. */
2083 if (surf->cmask_size && config->info.samples >= 2) {
2084 surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
2085 surf->total_size = surf->cmask_offset + surf->cmask_size;
2086 surf->alignment = MAX2(surf->alignment, surf->cmask_alignment);
2087 }
2088
2089 if (surf->is_displayable)
2090 surf->flags |= RADEON_SURF_SCANOUT;
2091
2092 if (surf->dcc_size &&
2093 /* dcc_size is computed on GFX9+ only if it's displayable. */
2094 (info->chip_class >= GFX9 || !get_display_flag(config, surf))) {
2095 /* It's better when displayable DCC is immediately after
2096 * the image due to hw-specific reasons.
2097 */
2098 if (info->chip_class >= GFX9 &&
2099 surf->u.gfx9.dcc_retile_num_elements) {
2100 /* Add space for the displayable DCC buffer. */
2101 surf->display_dcc_offset =
2102 align64(surf->total_size, surf->u.gfx9.display_dcc_alignment);
2103 surf->total_size = surf->display_dcc_offset +
2104 surf->u.gfx9.display_dcc_size;
2105
2106 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
2107 surf->dcc_retile_map_offset =
2108 align64(surf->total_size, info->tcc_cache_line_size);
2109
2110 if (surf->u.gfx9.dcc_retile_use_uint16) {
2111 surf->total_size = surf->dcc_retile_map_offset +
2112 surf->u.gfx9.dcc_retile_num_elements * 2;
2113 } else {
2114 surf->total_size = surf->dcc_retile_map_offset +
2115 surf->u.gfx9.dcc_retile_num_elements * 4;
2116 }
2117 }
2118
2119 surf->dcc_offset = align64(surf->total_size, surf->dcc_alignment);
2120 surf->total_size = surf->dcc_offset + surf->dcc_size;
2121 surf->alignment = MAX2(surf->alignment, surf->dcc_alignment);
2122 }
2123
2124 return 0;
2125 }
2126
2127 /* This is meant to be used for disabling DCC. */
2128 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
2129 {
2130 surf->dcc_offset = 0;
2131 surf->display_dcc_offset = 0;
2132 surf->dcc_retile_map_offset = 0;
2133 }
2134
2135 static unsigned eg_tile_split(unsigned tile_split)
2136 {
2137 switch (tile_split) {
2138 case 0: tile_split = 64; break;
2139 case 1: tile_split = 128; break;
2140 case 2: tile_split = 256; break;
2141 case 3: tile_split = 512; break;
2142 default:
2143 case 4: tile_split = 1024; break;
2144 case 5: tile_split = 2048; break;
2145 case 6: tile_split = 4096; break;
2146 }
2147 return tile_split;
2148 }
2149
2150 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
2151 {
2152 switch (eg_tile_split) {
2153 case 64: return 0;
2154 case 128: return 1;
2155 case 256: return 2;
2156 case 512: return 3;
2157 default:
2158 case 1024: return 4;
2159 case 2048: return 5;
2160 case 4096: return 6;
2161 }
2162 }
2163
2164 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2165 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2166
2167 /* This should be called before ac_compute_surface. */
2168 void ac_surface_set_bo_metadata(const struct radeon_info *info,
2169 struct radeon_surf *surf, uint64_t tiling_flags,
2170 enum radeon_surf_mode *mode)
2171 {
2172 bool scanout;
2173
2174 if (info->chip_class >= GFX9) {
2175 surf->u.gfx9.surf.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2176 surf->u.gfx9.dcc.independent_64B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
2177 surf->u.gfx9.dcc.independent_128B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
2178 surf->u.gfx9.dcc.max_compressed_block_size = AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
2179 surf->u.gfx9.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
2180 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
2181 *mode = surf->u.gfx9.surf.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
2182 } else {
2183 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2184 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2185 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2186 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
2187 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2188 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2189 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
2190
2191 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
2192 *mode = RADEON_SURF_MODE_2D;
2193 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
2194 *mode = RADEON_SURF_MODE_1D;
2195 else
2196 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2197 }
2198
2199 if (scanout)
2200 surf->flags |= RADEON_SURF_SCANOUT;
2201 else
2202 surf->flags &= ~RADEON_SURF_SCANOUT;
2203 }
2204
2205 void ac_surface_get_bo_metadata(const struct radeon_info *info,
2206 struct radeon_surf *surf, uint64_t *tiling_flags)
2207 {
2208 *tiling_flags = 0;
2209
2210 if (info->chip_class >= GFX9) {
2211 uint64_t dcc_offset = 0;
2212
2213 if (surf->dcc_offset) {
2214 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset
2215 : surf->dcc_offset;
2216 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2217 }
2218
2219 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.surf.swizzle_mode);
2220 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2221 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.display_dcc_pitch_max);
2222 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.dcc.independent_64B_blocks);
2223 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.dcc.independent_128B_blocks);
2224 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE, surf->u.gfx9.dcc.max_compressed_block_size);
2225 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2226 } else {
2227 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2228 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2229 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2230 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2231 else
2232 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2233
2234 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2235 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2236 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2237 if (surf->u.legacy.tile_split)
2238 *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2239 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2240 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks)-1);
2241
2242 if (surf->flags & RADEON_SURF_SCANOUT)
2243 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2244 else
2245 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2246 }
2247 }
2248
2249 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2250 {
2251 return (ATI_VENDOR_ID << 16) | info->pci_id;
2252 }
2253
2254 /* This should be called after ac_compute_surface. */
2255 bool ac_surface_set_umd_metadata(const struct radeon_info *info,
2256 struct radeon_surf *surf,
2257 unsigned num_storage_samples,
2258 unsigned num_mipmap_levels,
2259 unsigned size_metadata,
2260 uint32_t metadata[64])
2261 {
2262 uint32_t *desc = &metadata[2];
2263 uint64_t offset;
2264
2265 if (info->chip_class >= GFX9)
2266 offset = surf->u.gfx9.surf_offset;
2267 else
2268 offset = surf->u.legacy.level[0].offset;
2269
2270 if (offset || /* Non-zero planes ignore metadata. */
2271 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2272 metadata[0] == 0 || /* invalid version number */
2273 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2274 /* Disable DCC because it might not be enabled. */
2275 ac_surface_zero_dcc_fields(surf);
2276
2277 /* Don't report an error if the texture comes from an incompatible driver,
2278 * but this might not work.
2279 */
2280 return true;
2281 }
2282
2283 /* Validate that sample counts and the number of mipmap levels match. */
2284 unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2285 unsigned type = G_008F1C_TYPE(desc[3]);
2286
2287 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2288 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2289
2290 if (desc_last_level != log_samples) {
2291 fprintf(stderr,
2292 "amdgpu: invalid MSAA texture import, "
2293 "metadata has log2(samples) = %u, the caller set %u\n",
2294 desc_last_level, log_samples);
2295 return false;
2296 }
2297 } else {
2298 if (desc_last_level != num_mipmap_levels - 1) {
2299 fprintf(stderr,
2300 "amdgpu: invalid mipmapped texture import, "
2301 "metadata has last_level = %u, the caller set %u\n",
2302 desc_last_level, num_mipmap_levels - 1);
2303 return false;
2304 }
2305 }
2306
2307 if (info->chip_class >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2308 /* Read DCC information. */
2309 switch (info->chip_class) {
2310 case GFX8:
2311 surf->dcc_offset = (uint64_t)desc[7] << 8;
2312 break;
2313
2314 case GFX9:
2315 surf->dcc_offset =
2316 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2317 surf->u.gfx9.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2318 surf->u.gfx9.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2319
2320 /* If DCC is unaligned, this can only be a displayable image. */
2321 if (!surf->u.gfx9.dcc.pipe_aligned && !surf->u.gfx9.dcc.rb_aligned)
2322 assert(surf->is_displayable);
2323 break;
2324
2325 case GFX10:
2326 case GFX10_3:
2327 surf->dcc_offset =
2328 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2329 surf->u.gfx9.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2330 break;
2331
2332 default:
2333 assert(0);
2334 return false;
2335 }
2336 } else {
2337 /* Disable DCC. dcc_offset is always set by texture_from_handle
2338 * and must be cleared here.
2339 */
2340 ac_surface_zero_dcc_fields(surf);
2341 }
2342
2343 return true;
2344 }
2345
2346 void ac_surface_get_umd_metadata(const struct radeon_info *info,
2347 struct radeon_surf *surf,
2348 unsigned num_mipmap_levels,
2349 uint32_t desc[8],
2350 unsigned *size_metadata, uint32_t metadata[64])
2351 {
2352 /* Clear the base address and set the relative DCC offset. */
2353 desc[0] = 0;
2354 desc[1] &= C_008F14_BASE_ADDRESS_HI;
2355
2356 switch (info->chip_class) {
2357 case GFX6:
2358 case GFX7:
2359 break;
2360 case GFX8:
2361 desc[7] = surf->dcc_offset >> 8;
2362 break;
2363 case GFX9:
2364 desc[7] = surf->dcc_offset >> 8;
2365 desc[5] &= C_008F24_META_DATA_ADDRESS;
2366 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->dcc_offset >> 40);
2367 break;
2368 case GFX10:
2369 case GFX10_3:
2370 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2371 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->dcc_offset >> 8);
2372 desc[7] = surf->dcc_offset >> 16;
2373 break;
2374 default:
2375 assert(0);
2376 }
2377
2378 /* Metadata image format format version 1:
2379 * [0] = 1 (metadata format identifier)
2380 * [1] = (VENDOR_ID << 16) | PCI_ID
2381 * [2:9] = image descriptor for the whole resource
2382 * [2] is always 0, because the base address is cleared
2383 * [9] is the DCC offset bits [39:8] from the beginning of
2384 * the buffer
2385 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2386 */
2387
2388 metadata[0] = 1; /* metadata image format version 1 */
2389
2390 /* Tiling modes are ambiguous without a PCI ID. */
2391 metadata[1] = ac_get_umd_metadata_word1(info);
2392
2393 /* Dwords [2:9] contain the image descriptor. */
2394 memcpy(&metadata[2], desc, 8 * 4);
2395 *size_metadata = 10 * 4;
2396
2397 /* Dwords [10:..] contain the mipmap level offsets. */
2398 if (info->chip_class <= GFX8) {
2399 for (unsigned i = 0; i < num_mipmap_levels; i++)
2400 metadata[10 + i] = surf->u.legacy.level[i].offset >> 8;
2401
2402 *size_metadata += num_mipmap_levels * 4;
2403 }
2404 }
2405
2406 void ac_surface_override_offset_stride(const struct radeon_info *info,
2407 struct radeon_surf *surf,
2408 unsigned num_mipmap_levels,
2409 uint64_t offset, unsigned pitch)
2410 {
2411 if (info->chip_class >= GFX9) {
2412 if (pitch) {
2413 surf->u.gfx9.surf_pitch = pitch;
2414 if (num_mipmap_levels == 1)
2415 surf->u.gfx9.surf.epitch = pitch - 1;
2416 surf->u.gfx9.surf_slice_size =
2417 (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
2418 }
2419 surf->u.gfx9.surf_offset = offset;
2420 if (surf->u.gfx9.stencil_offset)
2421 surf->u.gfx9.stencil_offset += offset;
2422 } else {
2423 if (pitch) {
2424 surf->u.legacy.level[0].nblk_x = pitch;
2425 surf->u.legacy.level[0].slice_size_dw =
2426 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
2427 }
2428
2429 if (offset) {
2430 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
2431 surf->u.legacy.level[i].offset += offset;
2432 }
2433 }
2434
2435 if (surf->htile_offset)
2436 surf->htile_offset += offset;
2437 if (surf->fmask_offset)
2438 surf->fmask_offset += offset;
2439 if (surf->cmask_offset)
2440 surf->cmask_offset += offset;
2441 if (surf->dcc_offset)
2442 surf->dcc_offset += offset;
2443 if (surf->display_dcc_offset)
2444 surf->display_dcc_offset += offset;
2445 if (surf->dcc_retile_map_offset)
2446 surf->dcc_retile_map_offset += offset;
2447 }