ac/surface: don't compute single-sample CMASK if it's unaligned
[mesa.git] / src / amd / common / ac_surface.c
1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "ac_surface.h"
29 #include "amd_family.h"
30 #include "addrlib/src/amdgpu_asic_addr.h"
31 #include "ac_gpu_info.h"
32 #include "util/macros.h"
33 #include "util/u_atomic.h"
34 #include "util/u_math.h"
35 #include "sid.h"
36
37 #include <errno.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <amdgpu.h>
41 #include "drm-uapi/amdgpu_drm.h"
42
43 #include "addrlib/inc/addrinterface.h"
44
45 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
46 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
47 #endif
48
49 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
50 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
51 #endif
52
53 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)
54 {
55 return malloc(pInput->sizeInBytes);
56 }
57
58 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)
59 {
60 free(pInput->pVirtAddr);
61 return ADDR_OK;
62 }
63
64 ADDR_HANDLE amdgpu_addr_create(const struct radeon_info *info,
65 const struct amdgpu_gpu_info *amdinfo,
66 uint64_t *max_alignment)
67 {
68 ADDR_CREATE_INPUT addrCreateInput = {0};
69 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
70 ADDR_REGISTER_VALUE regValue = {0};
71 ADDR_CREATE_FLAGS createFlags = {{0}};
72 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
73 ADDR_E_RETURNCODE addrRet;
74
75 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
76 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
77
78 regValue.gbAddrConfig = amdinfo->gb_addr_cfg;
79 createFlags.value = 0;
80
81 addrCreateInput.chipFamily = info->family_id;
82 addrCreateInput.chipRevision = info->chip_external_rev;
83
84 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
85 return NULL;
86
87 if (addrCreateInput.chipFamily >= FAMILY_AI) {
88 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
89 } else {
90 regValue.noOfBanks = amdinfo->mc_arb_ramcfg & 0x3;
91 regValue.noOfRanks = (amdinfo->mc_arb_ramcfg & 0x4) >> 2;
92
93 regValue.backendDisables = amdinfo->enabled_rb_pipes_mask;
94 regValue.pTileConfig = amdinfo->gb_tile_mode;
95 regValue.noOfEntries = ARRAY_SIZE(amdinfo->gb_tile_mode);
96 if (addrCreateInput.chipFamily == FAMILY_SI) {
97 regValue.pMacroTileConfig = NULL;
98 regValue.noOfMacroEntries = 0;
99 } else {
100 regValue.pMacroTileConfig = amdinfo->gb_macro_tile_mode;
101 regValue.noOfMacroEntries = ARRAY_SIZE(amdinfo->gb_macro_tile_mode);
102 }
103
104 createFlags.useTileIndex = 1;
105 createFlags.useHtileSliceAlign = 1;
106
107 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
108 }
109
110 addrCreateInput.callbacks.allocSysMem = allocSysMem;
111 addrCreateInput.callbacks.freeSysMem = freeSysMem;
112 addrCreateInput.callbacks.debugPrint = 0;
113 addrCreateInput.createFlags = createFlags;
114 addrCreateInput.regValue = regValue;
115
116 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
117 if (addrRet != ADDR_OK)
118 return NULL;
119
120 if (max_alignment) {
121 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
122 if (addrRet == ADDR_OK){
123 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
124 }
125 }
126 return addrCreateOutput.hLib;
127 }
128
129 static int surf_config_sanity(const struct ac_surf_config *config,
130 unsigned flags)
131 {
132 /* FMASK is allocated together with the color surface and can't be
133 * allocated separately.
134 */
135 assert(!(flags & RADEON_SURF_FMASK));
136 if (flags & RADEON_SURF_FMASK)
137 return -EINVAL;
138
139 /* all dimension must be at least 1 ! */
140 if (!config->info.width || !config->info.height || !config->info.depth ||
141 !config->info.array_size || !config->info.levels)
142 return -EINVAL;
143
144 switch (config->info.samples) {
145 case 0:
146 case 1:
147 case 2:
148 case 4:
149 case 8:
150 break;
151 case 16:
152 if (flags & RADEON_SURF_Z_OR_SBUFFER)
153 return -EINVAL;
154 break;
155 default:
156 return -EINVAL;
157 }
158
159 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
160 switch (config->info.storage_samples) {
161 case 0:
162 case 1:
163 case 2:
164 case 4:
165 case 8:
166 break;
167 default:
168 return -EINVAL;
169 }
170 }
171
172 if (config->is_3d && config->info.array_size > 1)
173 return -EINVAL;
174 if (config->is_cube && config->info.depth > 1)
175 return -EINVAL;
176
177 return 0;
178 }
179
180 static int gfx6_compute_level(ADDR_HANDLE addrlib,
181 const struct ac_surf_config *config,
182 struct radeon_surf *surf, bool is_stencil,
183 unsigned level, bool compressed,
184 ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
185 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
186 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
187 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
188 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
189 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
190 {
191 struct legacy_surf_level *surf_level;
192 ADDR_E_RETURNCODE ret;
193
194 AddrSurfInfoIn->mipLevel = level;
195 AddrSurfInfoIn->width = u_minify(config->info.width, level);
196 AddrSurfInfoIn->height = u_minify(config->info.height, level);
197
198 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
199 * because GFX9 needs linear alignment of 256 bytes.
200 */
201 if (config->info.levels == 1 &&
202 AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
203 AddrSurfInfoIn->bpp &&
204 util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
205 unsigned alignment = 256 / (AddrSurfInfoIn->bpp / 8);
206
207 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
208 }
209
210 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
211 * true for r32g32b32 formats. */
212 if (AddrSurfInfoIn->bpp == 96) {
213 assert(config->info.levels == 1);
214 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
215
216 /* The least common multiple of 64 bytes and 12 bytes/pixel is
217 * 192 bytes, or 16 pixels. */
218 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
219 }
220
221 if (config->is_3d)
222 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
223 else if (config->is_cube)
224 AddrSurfInfoIn->numSlices = 6;
225 else
226 AddrSurfInfoIn->numSlices = config->info.array_size;
227
228 if (level > 0) {
229 /* Set the base level pitch. This is needed for calculation
230 * of non-zero levels. */
231 if (is_stencil)
232 AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
233 else
234 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
235
236 /* Convert blocks to pixels for compressed formats. */
237 if (compressed)
238 AddrSurfInfoIn->basePitch *= surf->blk_w;
239 }
240
241 ret = AddrComputeSurfaceInfo(addrlib,
242 AddrSurfInfoIn,
243 AddrSurfInfoOut);
244 if (ret != ADDR_OK) {
245 return ret;
246 }
247
248 surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
249 surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
250 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
251 surf_level->nblk_x = AddrSurfInfoOut->pitch;
252 surf_level->nblk_y = AddrSurfInfoOut->height;
253
254 switch (AddrSurfInfoOut->tileMode) {
255 case ADDR_TM_LINEAR_ALIGNED:
256 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
257 break;
258 case ADDR_TM_1D_TILED_THIN1:
259 surf_level->mode = RADEON_SURF_MODE_1D;
260 break;
261 case ADDR_TM_2D_TILED_THIN1:
262 surf_level->mode = RADEON_SURF_MODE_2D;
263 break;
264 default:
265 assert(0);
266 }
267
268 if (is_stencil)
269 surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
270 else
271 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
272
273 surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
274
275 /* Clear DCC fields at the beginning. */
276 surf_level->dcc_offset = 0;
277
278 /* The previous level's flag tells us if we can use DCC for this level. */
279 if (AddrSurfInfoIn->flags.dccCompatible &&
280 (level == 0 || AddrDccOut->subLvlCompressible)) {
281 bool prev_level_clearable = level == 0 ||
282 AddrDccOut->dccRamSizeAligned;
283
284 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
285 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
286 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
287 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
288 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
289
290 ret = AddrComputeDccInfo(addrlib,
291 AddrDccIn,
292 AddrDccOut);
293
294 if (ret == ADDR_OK) {
295 surf_level->dcc_offset = surf->dcc_size;
296 surf->num_dcc_levels = level + 1;
297 surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
298 surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
299
300 /* If the DCC size of a subresource (1 mip level or 1 slice)
301 * is not aligned, the DCC memory layout is not contiguous for
302 * that subresource, which means we can't use fast clear.
303 *
304 * We only do fast clears for whole mipmap levels. If we did
305 * per-slice fast clears, the same restriction would apply.
306 * (i.e. only compute the slice size and see if it's aligned)
307 *
308 * The last level can be non-contiguous and still be clearable
309 * if it's interleaved with the next level that doesn't exist.
310 */
311 if (AddrDccOut->dccRamSizeAligned ||
312 (prev_level_clearable && level == config->info.levels - 1))
313 surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
314 else
315 surf_level->dcc_fast_clear_size = 0;
316
317 /* Compute the DCC slice size because addrlib doesn't
318 * provide this info. As DCC memory is linear (each
319 * slice is the same size) it's easy to compute.
320 */
321 surf->dcc_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
322
323 /* For arrays, we have to compute the DCC info again
324 * with one slice size to get a correct fast clear
325 * size.
326 */
327 if (config->info.array_size > 1) {
328 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
329 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
330 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
331 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
332 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
333
334 ret = AddrComputeDccInfo(addrlib,
335 AddrDccIn, AddrDccOut);
336 if (ret == ADDR_OK) {
337 /* If the DCC memory isn't properly
338 * aligned, the data are interleaved
339 * accross slices.
340 */
341 if (AddrDccOut->dccRamSizeAligned)
342 surf_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
343 else
344 surf_level->dcc_slice_fast_clear_size = 0;
345 }
346 } else {
347 surf_level->dcc_slice_fast_clear_size = surf_level->dcc_fast_clear_size;
348 }
349 }
350 }
351
352 /* HTILE. */
353 if (!is_stencil &&
354 AddrSurfInfoIn->flags.depth &&
355 surf_level->mode == RADEON_SURF_MODE_2D &&
356 level == 0 &&
357 !(surf->flags & RADEON_SURF_NO_HTILE)) {
358 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
359 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
360 AddrHtileIn->height = AddrSurfInfoOut->height;
361 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
362 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
363 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
364 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
365 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
366 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
367
368 ret = AddrComputeHtileInfo(addrlib,
369 AddrHtileIn,
370 AddrHtileOut);
371
372 if (ret == ADDR_OK) {
373 surf->htile_size = AddrHtileOut->htileBytes;
374 surf->htile_slice_size = AddrHtileOut->sliceSize;
375 surf->htile_alignment = AddrHtileOut->baseAlign;
376 }
377 }
378
379 return 0;
380 }
381
382 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf,
383 const struct radeon_info *info)
384 {
385 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
386
387 if (info->chip_class >= GFX7)
388 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
389 else
390 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
391 }
392
393 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
394 {
395 unsigned index, tileb;
396
397 tileb = 8 * 8 * surf->bpe;
398 tileb = MIN2(surf->u.legacy.tile_split, tileb);
399
400 for (index = 0; tileb > 64; index++)
401 tileb >>= 1;
402
403 assert(index < 16);
404 return index;
405 }
406
407 static bool get_display_flag(const struct ac_surf_config *config,
408 const struct radeon_surf *surf)
409 {
410 unsigned num_channels = config->info.num_channels;
411 unsigned bpe = surf->bpe;
412
413 if (!config->is_3d &&
414 !config->is_cube &&
415 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
416 surf->flags & RADEON_SURF_SCANOUT &&
417 config->info.samples <= 1 &&
418 surf->blk_w <= 2 && surf->blk_h == 1) {
419 /* subsampled */
420 if (surf->blk_w == 2 && surf->blk_h == 1)
421 return true;
422
423 if (/* RGBA8 or RGBA16F */
424 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
425 /* R5G6B5 or R5G5B5A1 */
426 (bpe == 2 && num_channels >= 3) ||
427 /* C8 palette */
428 (bpe == 1 && num_channels == 1))
429 return true;
430 }
431 return false;
432 }
433
434 /**
435 * This must be called after the first level is computed.
436 *
437 * Copy surface-global settings like pipe/bank config from level 0 surface
438 * computation, and compute tile swizzle.
439 */
440 static int gfx6_surface_settings(ADDR_HANDLE addrlib,
441 const struct radeon_info *info,
442 const struct ac_surf_config *config,
443 ADDR_COMPUTE_SURFACE_INFO_OUTPUT* csio,
444 struct radeon_surf *surf)
445 {
446 surf->surf_alignment = csio->baseAlign;
447 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
448 gfx6_set_micro_tile_mode(surf, info);
449
450 /* For 2D modes only. */
451 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
452 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
453 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
454 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
455 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
456 surf->u.legacy.num_banks = csio->pTileInfo->banks;
457 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
458 } else {
459 surf->u.legacy.macro_tile_index = 0;
460 }
461
462 /* Compute tile swizzle. */
463 /* TODO: fix tile swizzle with mipmapping for GFX6 */
464 if ((info->chip_class >= GFX7 || config->info.levels == 1) &&
465 config->info.surf_index &&
466 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
467 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
468 !get_display_flag(config, surf)) {
469 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
470 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
471
472 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
473 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
474
475 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
476 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
477 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
478 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
479 AddrBaseSwizzleIn.tileMode = csio->tileMode;
480
481 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn,
482 &AddrBaseSwizzleOut);
483 if (r != ADDR_OK)
484 return r;
485
486 assert(AddrBaseSwizzleOut.tileSwizzle <=
487 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
488 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
489 }
490 return 0;
491 }
492
493 static void ac_compute_cmask(const struct radeon_info *info,
494 const struct ac_surf_config *config,
495 struct radeon_surf *surf)
496 {
497 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
498 unsigned num_pipes = info->num_tile_pipes;
499 unsigned cl_width, cl_height;
500
501 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER ||
502 (config->info.samples >= 2 && !surf->fmask_size))
503 return;
504
505 assert(info->chip_class <= GFX8);
506
507 switch (num_pipes) {
508 case 2:
509 cl_width = 32;
510 cl_height = 16;
511 break;
512 case 4:
513 cl_width = 32;
514 cl_height = 32;
515 break;
516 case 8:
517 cl_width = 64;
518 cl_height = 32;
519 break;
520 case 16: /* Hawaii */
521 cl_width = 64;
522 cl_height = 64;
523 break;
524 default:
525 assert(0);
526 return;
527 }
528
529 unsigned base_align = num_pipes * pipe_interleave_bytes;
530
531 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width*8);
532 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height*8);
533 unsigned slice_elements = (width * height) / (8*8);
534
535 /* Each element of CMASK is a nibble. */
536 unsigned slice_bytes = slice_elements / 2;
537
538 surf->u.legacy.cmask_slice_tile_max = (width * height) / (128*128);
539 if (surf->u.legacy.cmask_slice_tile_max)
540 surf->u.legacy.cmask_slice_tile_max -= 1;
541
542 unsigned num_layers;
543 if (config->is_3d)
544 num_layers = config->info.depth;
545 else if (config->is_cube)
546 num_layers = 6;
547 else
548 num_layers = config->info.array_size;
549
550 surf->cmask_alignment = MAX2(256, base_align);
551 surf->cmask_slice_size = align(slice_bytes, base_align);
552 surf->cmask_size = surf->cmask_slice_size * num_layers;
553 }
554
555 /**
556 * Fill in the tiling information in \p surf based on the given surface config.
557 *
558 * The following fields of \p surf must be initialized by the caller:
559 * blk_w, blk_h, bpe, flags.
560 */
561 static int gfx6_compute_surface(ADDR_HANDLE addrlib,
562 const struct radeon_info *info,
563 const struct ac_surf_config *config,
564 enum radeon_surf_mode mode,
565 struct radeon_surf *surf)
566 {
567 unsigned level;
568 bool compressed;
569 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
570 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
571 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
572 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
573 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
574 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
575 ADDR_TILEINFO AddrTileInfoIn = {0};
576 ADDR_TILEINFO AddrTileInfoOut = {0};
577 int r;
578
579 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
580 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
581 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
582 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
583 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
584 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
585 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
586
587 compressed = surf->blk_w == 4 && surf->blk_h == 4;
588
589 /* MSAA requires 2D tiling. */
590 if (config->info.samples > 1)
591 mode = RADEON_SURF_MODE_2D;
592
593 /* DB doesn't support linear layouts. */
594 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) &&
595 mode < RADEON_SURF_MODE_1D)
596 mode = RADEON_SURF_MODE_1D;
597
598 /* Set the requested tiling mode. */
599 switch (mode) {
600 case RADEON_SURF_MODE_LINEAR_ALIGNED:
601 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
602 break;
603 case RADEON_SURF_MODE_1D:
604 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
605 break;
606 case RADEON_SURF_MODE_2D:
607 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
608 break;
609 default:
610 assert(0);
611 }
612
613 /* The format must be set correctly for the allocation of compressed
614 * textures to work. In other cases, setting the bpp is sufficient.
615 */
616 if (compressed) {
617 switch (surf->bpe) {
618 case 8:
619 AddrSurfInfoIn.format = ADDR_FMT_BC1;
620 break;
621 case 16:
622 AddrSurfInfoIn.format = ADDR_FMT_BC3;
623 break;
624 default:
625 assert(0);
626 }
627 }
628 else {
629 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
630 }
631
632 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples =
633 MAX2(1, config->info.samples);
634 AddrSurfInfoIn.tileIndex = -1;
635
636 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
637 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags =
638 MAX2(1, config->info.storage_samples);
639 }
640
641 /* Set the micro tile type. */
642 if (surf->flags & RADEON_SURF_SCANOUT)
643 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
644 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
645 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
646 else
647 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
648
649 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
650 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
651 AddrSurfInfoIn.flags.cube = config->is_cube;
652 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
653 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
654 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
655
656 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
657 * requested, because TC-compatible HTILE requires 2D tiling.
658 */
659 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
660 !AddrSurfInfoIn.flags.fmask &&
661 config->info.samples <= 1 &&
662 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
663
664 /* DCC notes:
665 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
666 * with samples >= 4.
667 * - Mipmapped array textures have low performance (discovered by a closed
668 * driver team).
669 */
670 AddrSurfInfoIn.flags.dccCompatible =
671 info->chip_class >= GFX8 &&
672 info->has_graphics && /* disable DCC on compute-only chips */
673 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
674 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
675 !compressed &&
676 ((config->info.array_size == 1 && config->info.depth == 1) ||
677 config->info.levels == 1);
678
679 AddrSurfInfoIn.flags.noStencil = (surf->flags & RADEON_SURF_SBUFFER) == 0;
680 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
681
682 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
683 * for Z and stencil. This can cause a number of problems which we work
684 * around here:
685 *
686 * - a depth part that is incompatible with mipmapped texturing
687 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
688 * incorrect tiling applied to the stencil part, stencil buffer
689 * memory accesses that go out of bounds) even without mipmapping
690 *
691 * Some piglit tests that are prone to different types of related
692 * failures:
693 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
694 * ./bin/framebuffer-blit-levels {draw,read} stencil
695 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
696 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
697 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
698 */
699 int stencil_tile_idx = -1;
700
701 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
702 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
703 /* Compute stencilTileIdx that is compatible with the (depth)
704 * tileIdx. This degrades the depth surface if necessary to
705 * ensure that a matching stencilTileIdx exists. */
706 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
707
708 /* Keep the depth mip-tail compatible with texturing. */
709 AddrSurfInfoIn.flags.noStencil = 1;
710 }
711
712 /* Set preferred macrotile parameters. This is usually required
713 * for shared resources. This is for 2D tiling only. */
714 if (AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 &&
715 surf->u.legacy.bankw && surf->u.legacy.bankh &&
716 surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
717 /* If any of these parameters are incorrect, the calculation
718 * will fail. */
719 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
720 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
721 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
722 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
723 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
724 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
725 AddrSurfInfoIn.flags.opt4Space = 0;
726 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
727
728 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
729 * the tile index, because we are expected to know it if
730 * we know the other parameters.
731 *
732 * This is something that can easily be fixed in Addrlib.
733 * For now, just figure it out here.
734 * Note that only 2D_TILE_THIN1 is handled here.
735 */
736 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
737 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
738
739 if (info->chip_class == GFX6) {
740 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
741 if (surf->bpe == 2)
742 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
743 else
744 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
745 } else {
746 if (surf->bpe == 1)
747 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
748 else if (surf->bpe == 2)
749 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
750 else if (surf->bpe == 4)
751 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
752 else
753 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
754 }
755 } else {
756 /* GFX7 - GFX8 */
757 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
758 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
759 else
760 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
761
762 /* Addrlib doesn't set this if tileIndex is forced like above. */
763 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
764 }
765 }
766
767 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
768 surf->num_dcc_levels = 0;
769 surf->surf_size = 0;
770 surf->dcc_size = 0;
771 surf->dcc_alignment = 1;
772 surf->htile_size = 0;
773 surf->htile_slice_size = 0;
774 surf->htile_alignment = 1;
775
776 const bool only_stencil = (surf->flags & RADEON_SURF_SBUFFER) &&
777 !(surf->flags & RADEON_SURF_ZBUFFER);
778
779 /* Calculate texture layout information. */
780 if (!only_stencil) {
781 for (level = 0; level < config->info.levels; level++) {
782 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed,
783 &AddrSurfInfoIn, &AddrSurfInfoOut,
784 &AddrDccIn, &AddrDccOut, &AddrHtileIn, &AddrHtileOut);
785 if (r)
786 return r;
787
788 if (level > 0)
789 continue;
790
791 if (!AddrSurfInfoOut.tcCompatible) {
792 AddrSurfInfoIn.flags.tcCompatible = 0;
793 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
794 }
795
796 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
797 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
798 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
799 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
800
801 assert(stencil_tile_idx >= 0);
802 }
803
804 r = gfx6_surface_settings(addrlib, info, config,
805 &AddrSurfInfoOut, surf);
806 if (r)
807 return r;
808 }
809 }
810
811 /* Calculate texture layout information for stencil. */
812 if (surf->flags & RADEON_SURF_SBUFFER) {
813 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
814 AddrSurfInfoIn.bpp = 8;
815 AddrSurfInfoIn.flags.depth = 0;
816 AddrSurfInfoIn.flags.stencil = 1;
817 AddrSurfInfoIn.flags.tcCompatible = 0;
818 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
819 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
820
821 for (level = 0; level < config->info.levels; level++) {
822 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed,
823 &AddrSurfInfoIn, &AddrSurfInfoOut,
824 &AddrDccIn, &AddrDccOut,
825 NULL, NULL);
826 if (r)
827 return r;
828
829 /* DB uses the depth pitch for both stencil and depth. */
830 if (!only_stencil) {
831 if (surf->u.legacy.stencil_level[level].nblk_x !=
832 surf->u.legacy.level[level].nblk_x)
833 surf->u.legacy.stencil_adjusted = true;
834 } else {
835 surf->u.legacy.level[level].nblk_x =
836 surf->u.legacy.stencil_level[level].nblk_x;
837 }
838
839 if (level == 0) {
840 if (only_stencil) {
841 r = gfx6_surface_settings(addrlib, info, config,
842 &AddrSurfInfoOut, surf);
843 if (r)
844 return r;
845 }
846
847 /* For 2D modes only. */
848 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
849 surf->u.legacy.stencil_tile_split =
850 AddrSurfInfoOut.pTileInfo->tileSplitBytes;
851 }
852 }
853 }
854 }
855
856 /* Compute FMASK. */
857 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color &&
858 info->has_graphics && !(surf->flags & RADEON_SURF_NO_FMASK)) {
859 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
860 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
861 ADDR_TILEINFO fmask_tile_info = {};
862
863 fin.size = sizeof(fin);
864 fout.size = sizeof(fout);
865
866 fin.tileMode = AddrSurfInfoOut.tileMode;
867 fin.pitch = AddrSurfInfoOut.pitch;
868 fin.height = config->info.height;
869 fin.numSlices = AddrSurfInfoIn.numSlices;
870 fin.numSamples = AddrSurfInfoIn.numSamples;
871 fin.numFrags = AddrSurfInfoIn.numFrags;
872 fin.tileIndex = -1;
873 fout.pTileInfo = &fmask_tile_info;
874
875 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
876 if (r)
877 return r;
878
879 surf->fmask_size = fout.fmaskBytes;
880 surf->fmask_alignment = fout.baseAlign;
881 surf->fmask_tile_swizzle = 0;
882
883 surf->u.legacy.fmask.slice_tile_max =
884 (fout.pitch * fout.height) / 64;
885 if (surf->u.legacy.fmask.slice_tile_max)
886 surf->u.legacy.fmask.slice_tile_max -= 1;
887
888 surf->u.legacy.fmask.tiling_index = fout.tileIndex;
889 surf->u.legacy.fmask.bankh = fout.pTileInfo->bankHeight;
890 surf->u.legacy.fmask.pitch_in_pixels = fout.pitch;
891 surf->u.legacy.fmask.slice_size = fout.sliceSize;
892
893 /* Compute tile swizzle for FMASK. */
894 if (config->info.fmask_surf_index &&
895 !(surf->flags & RADEON_SURF_SHAREABLE)) {
896 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
897 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
898
899 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
900 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
901
902 /* This counter starts from 1 instead of 0. */
903 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
904 xin.tileIndex = fout.tileIndex;
905 xin.macroModeIndex = fout.macroModeIndex;
906 xin.pTileInfo = fout.pTileInfo;
907 xin.tileMode = fin.tileMode;
908
909 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
910 if (r != ADDR_OK)
911 return r;
912
913 assert(xout.tileSwizzle <=
914 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
915 surf->fmask_tile_swizzle = xout.tileSwizzle;
916 }
917 }
918
919 /* Recalculate the whole DCC miptree size including disabled levels.
920 * This is what addrlib does, but calling addrlib would be a lot more
921 * complicated.
922 */
923 if (surf->dcc_size && config->info.levels > 1) {
924 /* The smallest miplevels that are never compressed by DCC
925 * still read the DCC buffer via TC if the base level uses DCC,
926 * and for some reason the DCC buffer needs to be larger if
927 * the miptree uses non-zero tile_swizzle. Otherwise there are
928 * VM faults.
929 *
930 * "dcc_alignment * 4" was determined by trial and error.
931 */
932 surf->dcc_size = align64(surf->surf_size >> 8,
933 surf->dcc_alignment * 4);
934 }
935
936 /* Make sure HTILE covers the whole miptree, because the shader reads
937 * TC-compatible HTILE even for levels where it's disabled by DB.
938 */
939 if (surf->htile_size && config->info.levels > 1 &&
940 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
941 /* MSAA can't occur with levels > 1, so ignore the sample count. */
942 const unsigned total_pixels = surf->surf_size / surf->bpe;
943 const unsigned htile_block_size = 8 * 8;
944 const unsigned htile_element_size = 4;
945
946 surf->htile_size = (total_pixels / htile_block_size) *
947 htile_element_size;
948 surf->htile_size = align(surf->htile_size, surf->htile_alignment);
949 } else if (!surf->htile_size) {
950 /* Unset this if HTILE is not present. */
951 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
952 }
953
954 surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
955 surf->is_displayable = surf->is_linear ||
956 surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
957 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
958
959 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
960 * used at the same time. This case is not currently expected to occur
961 * because we don't use rotated. Enforce this restriction on all chips
962 * to facilitate testing.
963 */
964 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
965 assert(!"rotate micro tile mode is unsupported");
966 return ADDR_ERROR;
967 }
968
969 ac_compute_cmask(info, config, surf);
970 return 0;
971 }
972
973 /* This is only called when expecting a tiled layout. */
974 static int
975 gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,
976 struct radeon_surf *surf,
977 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in,
978 bool is_fmask, AddrSwizzleMode *swizzle_mode)
979 {
980 ADDR_E_RETURNCODE ret;
981 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
982 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
983
984 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
985 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
986
987 sin.flags = in->flags;
988 sin.resourceType = in->resourceType;
989 sin.format = in->format;
990 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
991 /* TODO: We could allow some of these: */
992 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
993 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
994 sin.bpp = in->bpp;
995 sin.width = in->width;
996 sin.height = in->height;
997 sin.numSlices = in->numSlices;
998 sin.numMipLevels = in->numMipLevels;
999 sin.numSamples = in->numSamples;
1000 sin.numFrags = in->numFrags;
1001
1002 if (is_fmask) {
1003 sin.flags.display = 0;
1004 sin.flags.color = 0;
1005 sin.flags.fmask = 1;
1006 }
1007
1008 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1009 sin.forbiddenBlock.linear = 1;
1010
1011 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1012 sin.preferredSwSet.sw_D = 1;
1013 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1014 sin.preferredSwSet.sw_S = 1;
1015 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1016 sin.preferredSwSet.sw_Z = 1;
1017 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1018 sin.preferredSwSet.sw_R = 1;
1019 }
1020
1021 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1022 if (ret != ADDR_OK)
1023 return ret;
1024
1025 *swizzle_mode = sout.swizzleMode;
1026 return 0;
1027 }
1028
1029 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1030 {
1031 if (info->chip_class >= GFX10)
1032 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1033
1034 return sw_mode != ADDR_SW_LINEAR;
1035 }
1036
1037 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1038 const struct radeon_surf *surf)
1039 {
1040 if (info->chip_class <= GFX9) {
1041 /* Only independent 64B blocks are supported. */
1042 return surf->u.gfx9.dcc.independent_64B_blocks &&
1043 !surf->u.gfx9.dcc.independent_128B_blocks &&
1044 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1045 }
1046
1047 if (info->family == CHIP_NAVI10) {
1048 /* Only independent 128B blocks are supported. */
1049 return !surf->u.gfx9.dcc.independent_64B_blocks &&
1050 surf->u.gfx9.dcc.independent_128B_blocks &&
1051 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1052 }
1053
1054 if (info->family == CHIP_NAVI12 ||
1055 info->family == CHIP_NAVI14) {
1056 /* Either 64B or 128B can be used, but not both.
1057 * If 64B is used, DCC image stores are unsupported.
1058 */
1059 return surf->u.gfx9.dcc.independent_64B_blocks !=
1060 surf->u.gfx9.dcc.independent_128B_blocks &&
1061 (!surf->u.gfx9.dcc.independent_64B_blocks ||
1062 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) &&
1063 (!surf->u.gfx9.dcc.independent_128B_blocks ||
1064 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B);
1065 }
1066
1067 unreachable("unhandled chip");
1068 return false;
1069 }
1070
1071 static bool is_dcc_supported_by_DCN(const struct radeon_info *info,
1072 const struct ac_surf_config *config,
1073 const struct radeon_surf *surf,
1074 bool rb_aligned, bool pipe_aligned)
1075 {
1076 if (!info->use_display_dcc_unaligned &&
1077 !info->use_display_dcc_with_retile_blit)
1078 return false;
1079
1080 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1081 if (surf->bpe != 4)
1082 return false;
1083
1084 /* Handle unaligned DCC. */
1085 if (info->use_display_dcc_unaligned &&
1086 (rb_aligned || pipe_aligned))
1087 return false;
1088
1089 switch (info->chip_class) {
1090 case GFX9:
1091 /* There are more constraints, but we always set
1092 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1093 * which always works.
1094 */
1095 assert(surf->u.gfx9.dcc.independent_64B_blocks &&
1096 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1097 return true;
1098 case GFX10:
1099 /* DCN requires INDEPENDENT_128B_BLOCKS = 0.
1100 * For 4K, it also requires INDEPENDENT_64B_BLOCKS = 1.
1101 */
1102 return !surf->u.gfx9.dcc.independent_128B_blocks &&
1103 ((config->info.width <= 2560 &&
1104 config->info.height <= 2560) ||
1105 (surf->u.gfx9.dcc.independent_64B_blocks &&
1106 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1107 default:
1108 unreachable("unhandled chip");
1109 return false;
1110 }
1111 }
1112
1113 static int gfx9_compute_miptree(ADDR_HANDLE addrlib,
1114 const struct radeon_info *info,
1115 const struct ac_surf_config *config,
1116 struct radeon_surf *surf, bool compressed,
1117 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1118 {
1119 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
1120 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1121 ADDR_E_RETURNCODE ret;
1122
1123 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1124 out.pMipInfo = mip_info;
1125
1126 ret = Addr2ComputeSurfaceInfo(addrlib, in, &out);
1127 if (ret != ADDR_OK)
1128 return ret;
1129
1130 if (in->flags.stencil) {
1131 surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
1132 surf->u.gfx9.stencil.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1133 out.mipChainPitch - 1;
1134 surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
1135 surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
1136 surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
1137 return 0;
1138 }
1139
1140 surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
1141 surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 :
1142 out.mipChainPitch - 1;
1143
1144 /* CMASK fast clear uses these even if FMASK isn't allocated.
1145 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1146 */
1147 surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
1148 surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
1149
1150 surf->u.gfx9.surf_slice_size = out.sliceSize;
1151 surf->u.gfx9.surf_pitch = out.pitch;
1152 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch) {
1153 /* Adjust surf_pitch to be in elements units,
1154 * not in pixels */
1155 surf->u.gfx9.surf_pitch /= surf->blk_w;
1156 }
1157 surf->u.gfx9.surf_height = out.height;
1158 surf->surf_size = out.surfSize;
1159 surf->surf_alignment = out.baseAlign;
1160
1161 if (in->swizzleMode == ADDR_SW_LINEAR) {
1162 for (unsigned i = 0; i < in->numMipLevels; i++) {
1163 surf->u.gfx9.offset[i] = mip_info[i].offset;
1164 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1165 }
1166 }
1167
1168 if (in->flags.depth) {
1169 assert(in->swizzleMode != ADDR_SW_LINEAR);
1170
1171 if (surf->flags & RADEON_SURF_NO_HTILE)
1172 return 0;
1173
1174 /* HTILE */
1175 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1176 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1177
1178 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1179 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1180
1181 assert(in->flags.metaPipeUnaligned == 0);
1182 assert(in->flags.metaRbUnaligned == 0);
1183
1184 hin.hTileFlags.pipeAligned = 1;
1185 hin.hTileFlags.rbAligned = 1;
1186 hin.depthFlags = in->flags;
1187 hin.swizzleMode = in->swizzleMode;
1188 hin.unalignedWidth = in->width;
1189 hin.unalignedHeight = in->height;
1190 hin.numSlices = in->numSlices;
1191 hin.numMipLevels = in->numMipLevels;
1192 hin.firstMipIdInTail = out.firstMipIdInTail;
1193
1194 ret = Addr2ComputeHtileInfo(addrlib, &hin, &hout);
1195 if (ret != ADDR_OK)
1196 return ret;
1197
1198 surf->htile_size = hout.htileBytes;
1199 surf->htile_slice_size = hout.sliceSize;
1200 surf->htile_alignment = hout.baseAlign;
1201 return 0;
1202 }
1203
1204 {
1205 /* Compute tile swizzle for the color surface.
1206 * All *_X and *_T modes can use the swizzle.
1207 */
1208 if (config->info.surf_index &&
1209 in->swizzleMode >= ADDR_SW_64KB_Z_T &&
1210 !out.mipChainInTail &&
1211 !(surf->flags & RADEON_SURF_SHAREABLE) &&
1212 !in->flags.display) {
1213 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1214 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1215
1216 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1217 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1218
1219 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1220 xin.flags = in->flags;
1221 xin.swizzleMode = in->swizzleMode;
1222 xin.resourceType = in->resourceType;
1223 xin.format = in->format;
1224 xin.numSamples = in->numSamples;
1225 xin.numFrags = in->numFrags;
1226
1227 ret = Addr2ComputePipeBankXor(addrlib, &xin, &xout);
1228 if (ret != ADDR_OK)
1229 return ret;
1230
1231 assert(xout.pipeBankXor <=
1232 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1233 surf->tile_swizzle = xout.pipeBankXor;
1234 }
1235
1236 /* DCC */
1237 if (info->has_graphics &&
1238 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1239 !compressed &&
1240 is_dcc_supported_by_CB(info, in->swizzleMode) &&
1241 (!in->flags.display ||
1242 is_dcc_supported_by_DCN(info, config, surf,
1243 !in->flags.metaRbUnaligned,
1244 !in->flags.metaPipeUnaligned))) {
1245 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
1246 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
1247 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {};
1248
1249 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
1250 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
1251 dout.pMipInfo = meta_mip_info;
1252
1253 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
1254 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
1255 din.colorFlags = in->flags;
1256 din.resourceType = in->resourceType;
1257 din.swizzleMode = in->swizzleMode;
1258 din.bpp = in->bpp;
1259 din.unalignedWidth = in->width;
1260 din.unalignedHeight = in->height;
1261 din.numSlices = in->numSlices;
1262 din.numFrags = in->numFrags;
1263 din.numMipLevels = in->numMipLevels;
1264 din.dataSurfaceSize = out.surfSize;
1265 din.firstMipIdInTail = out.firstMipIdInTail;
1266
1267 ret = Addr2ComputeDccInfo(addrlib, &din, &dout);
1268 if (ret != ADDR_OK)
1269 return ret;
1270
1271 surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
1272 surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
1273 surf->u.gfx9.dcc_block_width = dout.compressBlkWidth;
1274 surf->u.gfx9.dcc_block_height = dout.compressBlkHeight;
1275 surf->u.gfx9.dcc_block_depth = dout.compressBlkDepth;
1276 surf->dcc_size = dout.dccRamSize;
1277 surf->dcc_alignment = dout.dccRamBaseAlign;
1278 surf->num_dcc_levels = in->numMipLevels;
1279
1280 /* Disable DCC for levels that are in the mip tail.
1281 *
1282 * There are two issues that this is intended to
1283 * address:
1284 *
1285 * 1. Multiple mip levels may share a cache line. This
1286 * can lead to corruption when switching between
1287 * rendering to different mip levels because the
1288 * RBs don't maintain coherency.
1289 *
1290 * 2. Texturing with metadata after rendering sometimes
1291 * fails with corruption, probably for a similar
1292 * reason.
1293 *
1294 * Working around these issues for all levels in the
1295 * mip tail may be overly conservative, but it's what
1296 * Vulkan does.
1297 *
1298 * Alternative solutions that also work but are worse:
1299 * - Disable DCC entirely.
1300 * - Flush TC L2 after rendering.
1301 */
1302 for (unsigned i = 0; i < in->numMipLevels; i++) {
1303 if (meta_mip_info[i].inMiptail) {
1304 surf->num_dcc_levels = i;
1305 break;
1306 }
1307 }
1308
1309 if (!surf->num_dcc_levels)
1310 surf->dcc_size = 0;
1311
1312 surf->u.gfx9.display_dcc_size = surf->dcc_size;
1313 surf->u.gfx9.display_dcc_alignment = surf->dcc_alignment;
1314 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1315
1316 /* Compute displayable DCC. */
1317 if (in->flags.display &&
1318 surf->num_dcc_levels &&
1319 info->use_display_dcc_with_retile_blit) {
1320 /* Compute displayable DCC info. */
1321 din.dccKeyFlags.pipeAligned = 0;
1322 din.dccKeyFlags.rbAligned = 0;
1323
1324 assert(din.numSlices == 1);
1325 assert(din.numMipLevels == 1);
1326 assert(din.numFrags == 1);
1327 assert(surf->tile_swizzle == 0);
1328 assert(surf->u.gfx9.dcc.pipe_aligned ||
1329 surf->u.gfx9.dcc.rb_aligned);
1330
1331 ret = Addr2ComputeDccInfo(addrlib, &din, &dout);
1332 if (ret != ADDR_OK)
1333 return ret;
1334
1335 surf->u.gfx9.display_dcc_size = dout.dccRamSize;
1336 surf->u.gfx9.display_dcc_alignment = dout.dccRamBaseAlign;
1337 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1338 assert(surf->u.gfx9.display_dcc_size <= surf->dcc_size);
1339
1340 /* Compute address mapping from non-displayable to displayable DCC. */
1341 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin = {};
1342 addrin.size = sizeof(addrin);
1343 addrin.colorFlags.color = 1;
1344 addrin.swizzleMode = din.swizzleMode;
1345 addrin.resourceType = din.resourceType;
1346 addrin.bpp = din.bpp;
1347 addrin.unalignedWidth = din.unalignedWidth;
1348 addrin.unalignedHeight = din.unalignedHeight;
1349 addrin.numSlices = 1;
1350 addrin.numMipLevels = 1;
1351 addrin.numFrags = 1;
1352
1353 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {};
1354 addrout.size = sizeof(addrout);
1355
1356 surf->u.gfx9.dcc_retile_num_elements =
1357 DIV_ROUND_UP(in->width, dout.compressBlkWidth) *
1358 DIV_ROUND_UP(in->height, dout.compressBlkHeight) * 2;
1359 /* Align the size to 4 (for the compute shader). */
1360 surf->u.gfx9.dcc_retile_num_elements =
1361 align(surf->u.gfx9.dcc_retile_num_elements, 4);
1362
1363 surf->u.gfx9.dcc_retile_map =
1364 malloc(surf->u.gfx9.dcc_retile_num_elements * 4);
1365 if (!surf->u.gfx9.dcc_retile_map)
1366 return ADDR_OUTOFMEMORY;
1367
1368 unsigned index = 0;
1369 surf->u.gfx9.dcc_retile_use_uint16 = true;
1370
1371 for (unsigned y = 0; y < in->height; y += dout.compressBlkHeight) {
1372 addrin.y = y;
1373
1374 for (unsigned x = 0; x < in->width; x += dout.compressBlkWidth) {
1375 addrin.x = x;
1376
1377 /* Compute src DCC address */
1378 addrin.dccKeyFlags.pipeAligned = surf->u.gfx9.dcc.pipe_aligned;
1379 addrin.dccKeyFlags.rbAligned = surf->u.gfx9.dcc.rb_aligned;
1380 addrout.addr = 0;
1381
1382 ret = Addr2ComputeDccAddrFromCoord(addrlib, &addrin, &addrout);
1383 if (ret != ADDR_OK)
1384 return ret;
1385
1386 surf->u.gfx9.dcc_retile_map[index * 2] = addrout.addr;
1387 if (addrout.addr > UINT16_MAX)
1388 surf->u.gfx9.dcc_retile_use_uint16 = false;
1389
1390 /* Compute dst DCC address */
1391 addrin.dccKeyFlags.pipeAligned = 0;
1392 addrin.dccKeyFlags.rbAligned = 0;
1393 addrout.addr = 0;
1394
1395 ret = Addr2ComputeDccAddrFromCoord(addrlib, &addrin, &addrout);
1396 if (ret != ADDR_OK)
1397 return ret;
1398
1399 surf->u.gfx9.dcc_retile_map[index * 2 + 1] = addrout.addr;
1400 if (addrout.addr > UINT16_MAX)
1401 surf->u.gfx9.dcc_retile_use_uint16 = false;
1402
1403 assert(index * 2 + 1 < surf->u.gfx9.dcc_retile_num_elements);
1404 index++;
1405 }
1406 }
1407 /* Fill the remaining pairs with the last one (for the compute shader). */
1408 for (unsigned i = index * 2; i < surf->u.gfx9.dcc_retile_num_elements; i++)
1409 surf->u.gfx9.dcc_retile_map[i] = surf->u.gfx9.dcc_retile_map[i - 2];
1410 }
1411 }
1412
1413 /* FMASK */
1414 if (in->numSamples > 1 && info->has_graphics &&
1415 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1416 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
1417 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1418
1419 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
1420 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
1421
1422 ret = gfx9_get_preferred_swizzle_mode(addrlib, surf, in,
1423 true, &fin.swizzleMode);
1424 if (ret != ADDR_OK)
1425 return ret;
1426
1427 fin.unalignedWidth = in->width;
1428 fin.unalignedHeight = in->height;
1429 fin.numSlices = in->numSlices;
1430 fin.numSamples = in->numSamples;
1431 fin.numFrags = in->numFrags;
1432
1433 ret = Addr2ComputeFmaskInfo(addrlib, &fin, &fout);
1434 if (ret != ADDR_OK)
1435 return ret;
1436
1437 surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
1438 surf->u.gfx9.fmask.epitch = fout.pitch - 1;
1439 surf->fmask_size = fout.fmaskBytes;
1440 surf->fmask_alignment = fout.baseAlign;
1441
1442 /* Compute tile swizzle for the FMASK surface. */
1443 if (config->info.fmask_surf_index &&
1444 fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
1445 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1446 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1447 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1448
1449 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1450 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1451
1452 /* This counter starts from 1 instead of 0. */
1453 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1454 xin.flags = in->flags;
1455 xin.swizzleMode = fin.swizzleMode;
1456 xin.resourceType = in->resourceType;
1457 xin.format = in->format;
1458 xin.numSamples = in->numSamples;
1459 xin.numFrags = in->numFrags;
1460
1461 ret = Addr2ComputePipeBankXor(addrlib, &xin, &xout);
1462 if (ret != ADDR_OK)
1463 return ret;
1464
1465 assert(xout.pipeBankXor <=
1466 u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
1467 surf->fmask_tile_swizzle = xout.pipeBankXor;
1468 }
1469 }
1470
1471 /* CMASK -- on GFX10 only for FMASK */
1472 if (in->swizzleMode != ADDR_SW_LINEAR &&
1473 in->resourceType == ADDR_RSRC_TEX_2D &&
1474 ((info->chip_class <= GFX9 &&
1475 in->numSamples == 1 &&
1476 in->flags.metaPipeUnaligned == 0 &&
1477 in->flags.metaRbUnaligned == 0) ||
1478 (surf->fmask_size && in->numSamples >= 2))) {
1479 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
1480 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
1481
1482 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
1483 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
1484
1485 assert(in->flags.metaPipeUnaligned == 0);
1486 assert(in->flags.metaRbUnaligned == 0);
1487
1488 cin.cMaskFlags.pipeAligned = 1;
1489 cin.cMaskFlags.rbAligned = 1;
1490 cin.colorFlags = in->flags;
1491 cin.resourceType = in->resourceType;
1492 cin.unalignedWidth = in->width;
1493 cin.unalignedHeight = in->height;
1494 cin.numSlices = in->numSlices;
1495
1496 if (in->numSamples > 1)
1497 cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
1498 else
1499 cin.swizzleMode = in->swizzleMode;
1500
1501 ret = Addr2ComputeCmaskInfo(addrlib, &cin, &cout);
1502 if (ret != ADDR_OK)
1503 return ret;
1504
1505 surf->cmask_size = cout.cmaskBytes;
1506 surf->cmask_alignment = cout.baseAlign;
1507 }
1508 }
1509
1510 return 0;
1511 }
1512
1513 static int gfx9_compute_surface(ADDR_HANDLE addrlib,
1514 const struct radeon_info *info,
1515 const struct ac_surf_config *config,
1516 enum radeon_surf_mode mode,
1517 struct radeon_surf *surf)
1518 {
1519 bool compressed;
1520 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1521 int r;
1522
1523 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
1524
1525 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1526
1527 /* The format must be set correctly for the allocation of compressed
1528 * textures to work. In other cases, setting the bpp is sufficient. */
1529 if (compressed) {
1530 switch (surf->bpe) {
1531 case 8:
1532 AddrSurfInfoIn.format = ADDR_FMT_BC1;
1533 break;
1534 case 16:
1535 AddrSurfInfoIn.format = ADDR_FMT_BC3;
1536 break;
1537 default:
1538 assert(0);
1539 }
1540 } else {
1541 switch (surf->bpe) {
1542 case 1:
1543 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
1544 AddrSurfInfoIn.format = ADDR_FMT_8;
1545 break;
1546 case 2:
1547 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1548 !(surf->flags & RADEON_SURF_SBUFFER));
1549 AddrSurfInfoIn.format = ADDR_FMT_16;
1550 break;
1551 case 4:
1552 assert(surf->flags & RADEON_SURF_ZBUFFER ||
1553 !(surf->flags & RADEON_SURF_SBUFFER));
1554 AddrSurfInfoIn.format = ADDR_FMT_32;
1555 break;
1556 case 8:
1557 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1558 AddrSurfInfoIn.format = ADDR_FMT_32_32;
1559 break;
1560 case 12:
1561 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1562 AddrSurfInfoIn.format = ADDR_FMT_32_32_32;
1563 break;
1564 case 16:
1565 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1566 AddrSurfInfoIn.format = ADDR_FMT_32_32_32_32;
1567 break;
1568 default:
1569 assert(0);
1570 }
1571 AddrSurfInfoIn.bpp = surf->bpe * 8;
1572 }
1573
1574 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1575 AddrSurfInfoIn.flags.color = is_color_surface &&
1576 !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1577 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1578 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1579 /* flags.texture currently refers to TC-compatible HTILE */
1580 AddrSurfInfoIn.flags.texture = is_color_surface ||
1581 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1582 AddrSurfInfoIn.flags.opt4space = 1;
1583
1584 AddrSurfInfoIn.numMipLevels = config->info.levels;
1585 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1586 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
1587
1588 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
1589 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1590
1591 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1592 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1593 * must sample 1D textures as 2D. */
1594 if (config->is_3d)
1595 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
1596 else if (info->chip_class != GFX9 && config->is_1d)
1597 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
1598 else
1599 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
1600
1601 AddrSurfInfoIn.width = config->info.width;
1602 AddrSurfInfoIn.height = config->info.height;
1603
1604 if (config->is_3d)
1605 AddrSurfInfoIn.numSlices = config->info.depth;
1606 else if (config->is_cube)
1607 AddrSurfInfoIn.numSlices = 6;
1608 else
1609 AddrSurfInfoIn.numSlices = config->info.array_size;
1610
1611 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1612 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
1613 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
1614
1615 /* Optimal values for the L2 cache. */
1616 if (info->chip_class == GFX9) {
1617 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1618 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1619 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1620 } else if (info->chip_class >= GFX10) {
1621 surf->u.gfx9.dcc.independent_64B_blocks = 0;
1622 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1623 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
1624 }
1625
1626 if (AddrSurfInfoIn.flags.display) {
1627 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1628 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1629 *
1630 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1631 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1632 * after rendering, so PIPE_ALIGNED=1 is recommended.
1633 */
1634 if (info->use_display_dcc_unaligned) {
1635 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
1636 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
1637 }
1638
1639 /* Adjust DCC settings to meet DCN requirements. */
1640 if (info->use_display_dcc_unaligned ||
1641 info->use_display_dcc_with_retile_blit) {
1642 /* Only Navi12/14 support independent 64B blocks in L2,
1643 * but without DCC image stores.
1644 */
1645 if (info->family == CHIP_NAVI12 ||
1646 info->family == CHIP_NAVI14) {
1647 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1648 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1649 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1650 }
1651 }
1652 }
1653
1654 switch (mode) {
1655 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1656 assert(config->info.samples <= 1);
1657 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1658 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
1659 break;
1660
1661 case RADEON_SURF_MODE_1D:
1662 case RADEON_SURF_MODE_2D:
1663 if (surf->flags & RADEON_SURF_IMPORTED ||
1664 (info->chip_class >= GFX10 &&
1665 surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
1666 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.surf.swizzle_mode;
1667 break;
1668 }
1669
1670 r = gfx9_get_preferred_swizzle_mode(addrlib, surf, &AddrSurfInfoIn,
1671 false, &AddrSurfInfoIn.swizzleMode);
1672 if (r)
1673 return r;
1674 break;
1675
1676 default:
1677 assert(0);
1678 }
1679
1680 surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
1681 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1682
1683 surf->num_dcc_levels = 0;
1684 surf->surf_size = 0;
1685 surf->fmask_size = 0;
1686 surf->dcc_size = 0;
1687 surf->htile_size = 0;
1688 surf->htile_slice_size = 0;
1689 surf->u.gfx9.surf_offset = 0;
1690 surf->u.gfx9.stencil_offset = 0;
1691 surf->cmask_size = 0;
1692 surf->u.gfx9.dcc_retile_use_uint16 = false;
1693 surf->u.gfx9.dcc_retile_num_elements = 0;
1694 surf->u.gfx9.dcc_retile_map = NULL;
1695
1696 /* Calculate texture layout information. */
1697 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1698 &AddrSurfInfoIn);
1699 if (r)
1700 goto error;
1701
1702 /* Calculate texture layout information for stencil. */
1703 if (surf->flags & RADEON_SURF_SBUFFER) {
1704 AddrSurfInfoIn.flags.stencil = 1;
1705 AddrSurfInfoIn.bpp = 8;
1706 AddrSurfInfoIn.format = ADDR_FMT_8;
1707
1708 if (!AddrSurfInfoIn.flags.depth) {
1709 r = gfx9_get_preferred_swizzle_mode(addrlib, surf, &AddrSurfInfoIn,
1710 false, &AddrSurfInfoIn.swizzleMode);
1711 if (r)
1712 goto error;
1713 } else
1714 AddrSurfInfoIn.flags.depth = 0;
1715
1716 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed,
1717 &AddrSurfInfoIn);
1718 if (r)
1719 goto error;
1720 }
1721
1722 surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
1723
1724 /* Query whether the surface is displayable. */
1725 /* This is only useful for surfaces that are allocated without SCANOUT. */
1726 bool displayable = false;
1727 if (!config->is_3d && !config->is_cube) {
1728 r = Addr2IsValidDisplaySwizzleMode(addrlib, surf->u.gfx9.surf.swizzle_mode,
1729 surf->bpe * 8, &displayable);
1730 if (r)
1731 goto error;
1732
1733 /* Display needs unaligned DCC. */
1734 if (surf->num_dcc_levels &&
1735 !is_dcc_supported_by_DCN(info, config, surf,
1736 surf->u.gfx9.dcc.rb_aligned,
1737 surf->u.gfx9.dcc.pipe_aligned))
1738 displayable = false;
1739 }
1740 surf->is_displayable = displayable;
1741
1742 /* Validate that we allocated a displayable surface if requested. */
1743 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
1744
1745 /* Validate that DCC is set up correctly. */
1746 if (surf->num_dcc_levels) {
1747 assert(is_dcc_supported_by_L2(info, surf));
1748 if (AddrSurfInfoIn.flags.color)
1749 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.surf.swizzle_mode));
1750 if (AddrSurfInfoIn.flags.display) {
1751 assert(is_dcc_supported_by_DCN(info, config, surf,
1752 surf->u.gfx9.dcc.rb_aligned,
1753 surf->u.gfx9.dcc.pipe_aligned));
1754 }
1755 }
1756
1757 if (info->has_graphics &&
1758 !compressed &&
1759 !config->is_3d &&
1760 config->info.levels == 1 &&
1761 AddrSurfInfoIn.flags.color &&
1762 !surf->is_linear &&
1763 surf->surf_alignment >= 64 * 1024 && /* 64KB tiling */
1764 !(surf->flags & (RADEON_SURF_DISABLE_DCC |
1765 RADEON_SURF_FORCE_SWIZZLE_MODE |
1766 RADEON_SURF_FORCE_MICRO_TILE_MODE))) {
1767 /* Validate that DCC is enabled if DCN can do it. */
1768 if ((info->use_display_dcc_unaligned ||
1769 info->use_display_dcc_with_retile_blit) &&
1770 AddrSurfInfoIn.flags.display &&
1771 surf->bpe == 4) {
1772 assert(surf->num_dcc_levels);
1773 }
1774
1775 /* Validate that non-scanout DCC is always enabled. */
1776 if (!AddrSurfInfoIn.flags.display)
1777 assert(surf->num_dcc_levels);
1778 }
1779
1780 if (!surf->htile_size) {
1781 /* Unset this if HTILE is not present. */
1782 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1783 }
1784
1785 switch (surf->u.gfx9.surf.swizzle_mode) {
1786 /* S = standard. */
1787 case ADDR_SW_256B_S:
1788 case ADDR_SW_4KB_S:
1789 case ADDR_SW_64KB_S:
1790 case ADDR_SW_64KB_S_T:
1791 case ADDR_SW_4KB_S_X:
1792 case ADDR_SW_64KB_S_X:
1793 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
1794 break;
1795
1796 /* D = display. */
1797 case ADDR_SW_LINEAR:
1798 case ADDR_SW_256B_D:
1799 case ADDR_SW_4KB_D:
1800 case ADDR_SW_64KB_D:
1801 case ADDR_SW_64KB_D_T:
1802 case ADDR_SW_4KB_D_X:
1803 case ADDR_SW_64KB_D_X:
1804 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
1805 break;
1806
1807 /* R = rotated (gfx9), render target (gfx10). */
1808 case ADDR_SW_256B_R:
1809 case ADDR_SW_4KB_R:
1810 case ADDR_SW_64KB_R:
1811 case ADDR_SW_64KB_R_T:
1812 case ADDR_SW_4KB_R_X:
1813 case ADDR_SW_64KB_R_X:
1814 case ADDR_SW_VAR_R_X:
1815 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1816 * used at the same time. We currently do not use rotated
1817 * in gfx9.
1818 */
1819 assert(info->chip_class >= GFX10 ||
1820 !"rotate micro tile mode is unsupported");
1821 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
1822 break;
1823
1824 /* Z = depth. */
1825 case ADDR_SW_4KB_Z:
1826 case ADDR_SW_64KB_Z:
1827 case ADDR_SW_64KB_Z_T:
1828 case ADDR_SW_4KB_Z_X:
1829 case ADDR_SW_64KB_Z_X:
1830 case ADDR_SW_VAR_Z_X:
1831 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
1832 break;
1833
1834 default:
1835 assert(0);
1836 }
1837
1838 return 0;
1839
1840 error:
1841 free(surf->u.gfx9.dcc_retile_map);
1842 surf->u.gfx9.dcc_retile_map = NULL;
1843 return r;
1844 }
1845
1846 int ac_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *info,
1847 const struct ac_surf_config *config,
1848 enum radeon_surf_mode mode,
1849 struct radeon_surf *surf)
1850 {
1851 int r;
1852
1853 r = surf_config_sanity(config, surf->flags);
1854 if (r)
1855 return r;
1856
1857 if (info->chip_class >= GFX9)
1858 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
1859 else
1860 r = gfx6_compute_surface(addrlib, info, config, mode, surf);
1861
1862 if (r)
1863 return r;
1864
1865 /* Determine the memory layout of multiple allocations in one buffer. */
1866 surf->total_size = surf->surf_size;
1867
1868 if (surf->htile_size) {
1869 surf->htile_offset = align64(surf->total_size, surf->htile_alignment);
1870 surf->total_size = surf->htile_offset + surf->htile_size;
1871 }
1872
1873 if (surf->fmask_size) {
1874 assert(config->info.samples >= 2);
1875 surf->fmask_offset = align64(surf->total_size, surf->fmask_alignment);
1876 surf->total_size = surf->fmask_offset + surf->fmask_size;
1877 }
1878
1879 /* Single-sample CMASK is in a separate buffer. */
1880 if (surf->cmask_size && config->info.samples >= 2) {
1881 surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
1882 surf->total_size = surf->cmask_offset + surf->cmask_size;
1883 }
1884
1885 if (surf->dcc_size &&
1886 /* dcc_size is computed on GFX9+ only if it's displayable. */
1887 (info->chip_class >= GFX9 || !get_display_flag(config, surf))) {
1888 /* It's better when displayable DCC is immediately after
1889 * the image due to hw-specific reasons.
1890 */
1891 if (info->chip_class >= GFX9 &&
1892 surf->u.gfx9.dcc_retile_num_elements) {
1893 /* Add space for the displayable DCC buffer. */
1894 surf->display_dcc_offset =
1895 align64(surf->total_size, surf->u.gfx9.display_dcc_alignment);
1896 surf->total_size = surf->display_dcc_offset +
1897 surf->u.gfx9.display_dcc_size;
1898
1899 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
1900 surf->dcc_retile_map_offset =
1901 align64(surf->total_size, info->tcc_cache_line_size);
1902
1903 if (surf->u.gfx9.dcc_retile_use_uint16) {
1904 surf->total_size = surf->dcc_retile_map_offset +
1905 surf->u.gfx9.dcc_retile_num_elements * 2;
1906 } else {
1907 surf->total_size = surf->dcc_retile_map_offset +
1908 surf->u.gfx9.dcc_retile_num_elements * 4;
1909 }
1910 }
1911
1912 surf->dcc_offset = align64(surf->total_size, surf->dcc_alignment);
1913 surf->total_size = surf->dcc_offset + surf->dcc_size;
1914 }
1915
1916 return 0;
1917 }
1918
1919 /* This is meant to be used for disabling DCC. */
1920 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
1921 {
1922 surf->dcc_offset = 0;
1923 surf->display_dcc_offset = 0;
1924 surf->dcc_retile_map_offset = 0;
1925 }
1926
1927 static unsigned eg_tile_split(unsigned tile_split)
1928 {
1929 switch (tile_split) {
1930 case 0: tile_split = 64; break;
1931 case 1: tile_split = 128; break;
1932 case 2: tile_split = 256; break;
1933 case 3: tile_split = 512; break;
1934 default:
1935 case 4: tile_split = 1024; break;
1936 case 5: tile_split = 2048; break;
1937 case 6: tile_split = 4096; break;
1938 }
1939 return tile_split;
1940 }
1941
1942 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1943 {
1944 switch (eg_tile_split) {
1945 case 64: return 0;
1946 case 128: return 1;
1947 case 256: return 2;
1948 case 512: return 3;
1949 default:
1950 case 1024: return 4;
1951 case 2048: return 5;
1952 case 4096: return 6;
1953 }
1954 }
1955
1956 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
1957 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
1958
1959 /* This should be called before ac_compute_surface. */
1960 void ac_surface_set_bo_metadata(const struct radeon_info *info,
1961 struct radeon_surf *surf, uint64_t tiling_flags,
1962 enum radeon_surf_mode *mode)
1963 {
1964 bool scanout;
1965
1966 if (info->chip_class >= GFX9) {
1967 surf->u.gfx9.surf.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1968 surf->u.gfx9.dcc.independent_64B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
1969 surf->u.gfx9.dcc.independent_128B_blocks = AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
1970 surf->u.gfx9.dcc.max_compressed_block_size = AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
1971 surf->u.gfx9.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
1972 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
1973 *mode = surf->u.gfx9.surf.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
1974 } else {
1975 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1976 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1977 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1978 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1979 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1980 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1981 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1982
1983 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1984 *mode = RADEON_SURF_MODE_2D;
1985 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1986 *mode = RADEON_SURF_MODE_1D;
1987 else
1988 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
1989 }
1990
1991 if (scanout)
1992 surf->flags |= RADEON_SURF_SCANOUT;
1993 else
1994 surf->flags &= ~RADEON_SURF_SCANOUT;
1995 }
1996
1997 void ac_surface_get_bo_metadata(const struct radeon_info *info,
1998 struct radeon_surf *surf, uint64_t *tiling_flags)
1999 {
2000 *tiling_flags = 0;
2001
2002 if (info->chip_class >= GFX9) {
2003 uint64_t dcc_offset = 0;
2004
2005 if (surf->dcc_offset) {
2006 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset
2007 : surf->dcc_offset;
2008 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2009 }
2010
2011 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.surf.swizzle_mode);
2012 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2013 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.display_dcc_pitch_max);
2014 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.dcc.independent_64B_blocks);
2015 *tiling_flags |= AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.dcc.independent_128B_blocks);
2016 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE, surf->u.gfx9.dcc.max_compressed_block_size);
2017 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2018 } else {
2019 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2020 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2021 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2022 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2023 else
2024 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2025
2026 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2027 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2028 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2029 if (surf->u.legacy.tile_split)
2030 *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2031 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2032 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks)-1);
2033
2034 if (surf->flags & RADEON_SURF_SCANOUT)
2035 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2036 else
2037 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2038 }
2039 }
2040
2041 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2042 {
2043 return (ATI_VENDOR_ID << 16) | info->pci_id;
2044 }
2045
2046 /* This should be called after ac_compute_surface. */
2047 bool ac_surface_set_umd_metadata(const struct radeon_info *info,
2048 struct radeon_surf *surf,
2049 unsigned num_storage_samples,
2050 unsigned num_mipmap_levels,
2051 unsigned size_metadata,
2052 uint32_t metadata[64])
2053 {
2054 uint32_t *desc = &metadata[2];
2055 uint64_t offset;
2056
2057 if (info->chip_class >= GFX9)
2058 offset = surf->u.gfx9.surf_offset;
2059 else
2060 offset = surf->u.legacy.level[0].offset;
2061
2062 if (offset || /* Non-zero planes ignore metadata. */
2063 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2064 metadata[0] == 0 || /* invalid version number */
2065 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2066 /* Disable DCC because it might not be enabled. */
2067 ac_surface_zero_dcc_fields(surf);
2068
2069 /* Don't report an error if the texture comes from an incompatible driver,
2070 * but this might not work.
2071 */
2072 return true;
2073 }
2074
2075 /* Validate that sample counts and the number of mipmap levels match. */
2076 unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2077 unsigned type = G_008F1C_TYPE(desc[3]);
2078
2079 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2080 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2081
2082 if (desc_last_level != log_samples) {
2083 fprintf(stderr,
2084 "amdgpu: invalid MSAA texture import, "
2085 "metadata has log2(samples) = %u, the caller set %u\n",
2086 desc_last_level, log_samples);
2087 return false;
2088 }
2089 } else {
2090 if (desc_last_level != num_mipmap_levels - 1) {
2091 fprintf(stderr,
2092 "amdgpu: invalid mipmapped texture import, "
2093 "metadata has last_level = %u, the caller set %u\n",
2094 desc_last_level, num_mipmap_levels - 1);
2095 return false;
2096 }
2097 }
2098
2099 if (info->chip_class >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2100 /* Read DCC information. */
2101 switch (info->chip_class) {
2102 case GFX8:
2103 surf->dcc_offset = (uint64_t)desc[7] << 8;
2104 break;
2105
2106 case GFX9:
2107 surf->dcc_offset =
2108 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2109 surf->u.gfx9.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2110 surf->u.gfx9.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2111
2112 /* If DCC is unaligned, this can only be a displayable image. */
2113 if (!surf->u.gfx9.dcc.pipe_aligned && !surf->u.gfx9.dcc.rb_aligned)
2114 assert(surf->is_displayable);
2115 break;
2116
2117 case GFX10:
2118 surf->dcc_offset =
2119 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2120 surf->u.gfx9.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2121 break;
2122
2123 default:
2124 assert(0);
2125 return false;
2126 }
2127 } else {
2128 /* Disable DCC. dcc_offset is always set by texture_from_handle
2129 * and must be cleared here.
2130 */
2131 ac_surface_zero_dcc_fields(surf);
2132 }
2133
2134 return true;
2135 }
2136
2137 void ac_surface_get_umd_metadata(const struct radeon_info *info,
2138 struct radeon_surf *surf,
2139 unsigned num_mipmap_levels,
2140 uint32_t desc[8],
2141 unsigned *size_metadata, uint32_t metadata[64])
2142 {
2143 /* Clear the base address and set the relative DCC offset. */
2144 desc[0] = 0;
2145 desc[1] &= C_008F14_BASE_ADDRESS_HI;
2146
2147 switch (info->chip_class) {
2148 case GFX6:
2149 case GFX7:
2150 break;
2151 case GFX8:
2152 desc[7] = surf->dcc_offset >> 8;
2153 break;
2154 case GFX9:
2155 desc[7] = surf->dcc_offset >> 8;
2156 desc[5] &= C_008F24_META_DATA_ADDRESS;
2157 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->dcc_offset >> 40);
2158 break;
2159 case GFX10:
2160 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2161 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->dcc_offset >> 8);
2162 desc[7] = surf->dcc_offset >> 16;
2163 break;
2164 default:
2165 assert(0);
2166 }
2167
2168 /* Metadata image format format version 1:
2169 * [0] = 1 (metadata format identifier)
2170 * [1] = (VENDOR_ID << 16) | PCI_ID
2171 * [2:9] = image descriptor for the whole resource
2172 * [2] is always 0, because the base address is cleared
2173 * [9] is the DCC offset bits [39:8] from the beginning of
2174 * the buffer
2175 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2176 */
2177
2178 metadata[0] = 1; /* metadata image format version 1 */
2179
2180 /* Tiling modes are ambiguous without a PCI ID. */
2181 metadata[1] = ac_get_umd_metadata_word1(info);
2182
2183 /* Dwords [2:9] contain the image descriptor. */
2184 memcpy(&metadata[2], desc, 8 * 4);
2185 *size_metadata = 10 * 4;
2186
2187 /* Dwords [10:..] contain the mipmap level offsets. */
2188 if (info->chip_class <= GFX8) {
2189 for (unsigned i = 0; i < num_mipmap_levels; i++)
2190 metadata[10 + i] = surf->u.legacy.level[i].offset >> 8;
2191
2192 *size_metadata += num_mipmap_levels * 4;
2193 }
2194 }
2195
2196 void ac_surface_override_offset_stride(const struct radeon_info *info,
2197 struct radeon_surf *surf,
2198 unsigned num_mipmap_levels,
2199 uint64_t offset, unsigned pitch)
2200 {
2201 if (info->chip_class >= GFX9) {
2202 if (pitch) {
2203 surf->u.gfx9.surf_pitch = pitch;
2204 if (num_mipmap_levels == 1)
2205 surf->u.gfx9.surf.epitch = pitch - 1;
2206 surf->u.gfx9.surf_slice_size =
2207 (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
2208 }
2209 surf->u.gfx9.surf_offset = offset;
2210 if (surf->u.gfx9.stencil_offset)
2211 surf->u.gfx9.stencil_offset += offset;
2212 } else {
2213 if (pitch) {
2214 surf->u.legacy.level[0].nblk_x = pitch;
2215 surf->u.legacy.level[0].slice_size_dw =
2216 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
2217 }
2218
2219 if (offset) {
2220 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
2221 surf->u.legacy.level[i].offset += offset;
2222 }
2223 }
2224
2225 if (surf->htile_offset)
2226 surf->htile_offset += offset;
2227 if (surf->fmask_offset)
2228 surf->fmask_offset += offset;
2229 if (surf->cmask_offset)
2230 surf->cmask_offset += offset;
2231 if (surf->dcc_offset)
2232 surf->dcc_offset += offset;
2233 if (surf->display_dcc_offset)
2234 surf->display_dcc_offset += offset;
2235 if (surf->dcc_retile_map_offset)
2236 surf->dcc_retile_map_offset += offset;
2237 }