amd/common: switch to 3-spaces style
[mesa.git] / src / amd / common / ac_surface.c
1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "ac_surface.h"
29
30 #include "ac_gpu_info.h"
31 #include "addrlib/inc/addrinterface.h"
32 #include "addrlib/src/amdgpu_asic_addr.h"
33 #include "amd_family.h"
34 #include "drm-uapi/amdgpu_drm.h"
35 #include "sid.h"
36 #include "util/hash_table.h"
37 #include "util/macros.h"
38 #include "util/simple_mtx.h"
39 #include "util/u_atomic.h"
40 #include "util/u_math.h"
41 #include "util/u_memory.h"
42
43 #include <amdgpu.h>
44 #include <errno.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
50 #endif
51
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
54 #endif
55
56 struct ac_addrlib {
57 ADDR_HANDLE handle;
58
59 /* The cache of DCC retile maps for reuse when allocating images of
60 * similar sizes.
61 */
62 simple_mtx_t dcc_retile_map_lock;
63 struct hash_table *dcc_retile_maps;
64 struct hash_table *dcc_retile_tile_indices;
65 };
66
67 struct dcc_retile_map_key {
68 enum radeon_family family;
69 unsigned retile_width;
70 unsigned retile_height;
71 bool rb_aligned;
72 bool pipe_aligned;
73 unsigned dcc_retile_num_elements;
74 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input;
75 };
76
77 static uint32_t dcc_retile_map_hash_key(const void *key)
78 {
79 return _mesa_hash_data(key, sizeof(struct dcc_retile_map_key));
80 }
81
82 static bool dcc_retile_map_keys_equal(const void *a, const void *b)
83 {
84 return memcmp(a, b, sizeof(struct dcc_retile_map_key)) == 0;
85 }
86
87 static void dcc_retile_map_free(struct hash_entry *entry)
88 {
89 free((void *)entry->key);
90 free(entry->data);
91 }
92
93 struct dcc_retile_tile_key {
94 enum radeon_family family;
95 unsigned bpp;
96 unsigned swizzle_mode;
97 bool rb_aligned;
98 bool pipe_aligned;
99 };
100
101 struct dcc_retile_tile_data {
102 unsigned tile_width_log2;
103 unsigned tile_height_log2;
104 uint16_t *data;
105 };
106
107 static uint32_t dcc_retile_tile_hash_key(const void *key)
108 {
109 return _mesa_hash_data(key, sizeof(struct dcc_retile_tile_key));
110 }
111
112 static bool dcc_retile_tile_keys_equal(const void *a, const void *b)
113 {
114 return memcmp(a, b, sizeof(struct dcc_retile_tile_key)) == 0;
115 }
116
117 static void dcc_retile_tile_free(struct hash_entry *entry)
118 {
119 free((void *)entry->key);
120 free(((struct dcc_retile_tile_data *)entry->data)->data);
121 free(entry->data);
122 }
123
124 /* Assumes dcc_retile_map_lock is taken. */
125 static const struct dcc_retile_tile_data *
126 ac_compute_dcc_retile_tile_indices(struct ac_addrlib *addrlib, const struct radeon_info *info,
127 unsigned bpp, unsigned swizzle_mode, bool rb_aligned,
128 bool pipe_aligned)
129 {
130 struct dcc_retile_tile_key key = (struct dcc_retile_tile_key){.family = info->family,
131 .bpp = bpp,
132 .swizzle_mode = swizzle_mode,
133 .rb_aligned = rb_aligned,
134 .pipe_aligned = pipe_aligned};
135
136 struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_tile_indices, &key);
137 if (entry)
138 return entry->data;
139
140 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
141 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
142 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
143 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
144
145 din.dccKeyFlags.pipeAligned = pipe_aligned;
146 din.dccKeyFlags.rbAligned = rb_aligned;
147 din.resourceType = ADDR_RSRC_TEX_2D;
148 din.swizzleMode = swizzle_mode;
149 din.bpp = bpp;
150 din.unalignedWidth = 1;
151 din.unalignedHeight = 1;
152 din.numSlices = 1;
153 din.numFrags = 1;
154 din.numMipLevels = 1;
155
156 ADDR_E_RETURNCODE ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
157 if (ret != ADDR_OK)
158 return NULL;
159
160 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin = {0};
161 addrin.size = sizeof(addrin);
162 addrin.swizzleMode = swizzle_mode;
163 addrin.resourceType = ADDR_RSRC_TEX_2D;
164 addrin.bpp = bpp;
165 addrin.numSlices = 1;
166 addrin.numMipLevels = 1;
167 addrin.numFrags = 1;
168 addrin.pitch = dout.pitch;
169 addrin.height = dout.height;
170 addrin.compressBlkWidth = dout.compressBlkWidth;
171 addrin.compressBlkHeight = dout.compressBlkHeight;
172 addrin.compressBlkDepth = dout.compressBlkDepth;
173 addrin.metaBlkWidth = dout.metaBlkWidth;
174 addrin.metaBlkHeight = dout.metaBlkHeight;
175 addrin.metaBlkDepth = dout.metaBlkDepth;
176 addrin.dccKeyFlags.pipeAligned = pipe_aligned;
177 addrin.dccKeyFlags.rbAligned = rb_aligned;
178
179 unsigned w = dout.metaBlkWidth / dout.compressBlkWidth;
180 unsigned h = dout.metaBlkHeight / dout.compressBlkHeight;
181 uint16_t *indices = malloc(w * h * sizeof(uint16_t));
182 if (!indices)
183 return NULL;
184
185 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {};
186 addrout.size = sizeof(addrout);
187
188 for (unsigned y = 0; y < h; ++y) {
189 addrin.y = y * dout.compressBlkHeight;
190 for (unsigned x = 0; x < w; ++x) {
191 addrin.x = x * dout.compressBlkWidth;
192 addrout.addr = 0;
193
194 if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
195 free(indices);
196 return NULL;
197 }
198 indices[y * w + x] = addrout.addr;
199 }
200 }
201
202 struct dcc_retile_tile_data *data = calloc(1, sizeof(*data));
203 if (!data) {
204 free(indices);
205 return NULL;
206 }
207
208 data->tile_width_log2 = util_logbase2(w);
209 data->tile_height_log2 = util_logbase2(h);
210 data->data = indices;
211
212 struct dcc_retile_tile_key *heap_key = mem_dup(&key, sizeof(key));
213 if (!heap_key) {
214 free(data);
215 free(indices);
216 return NULL;
217 }
218
219 entry = _mesa_hash_table_insert(addrlib->dcc_retile_tile_indices, heap_key, data);
220 if (!entry) {
221 free(heap_key);
222 free(data);
223 free(indices);
224 }
225 return data;
226 }
227
228 static uint32_t ac_compute_retile_tile_addr(const struct dcc_retile_tile_data *tile,
229 unsigned stride, unsigned x, unsigned y)
230 {
231 unsigned x_mask = (1u << tile->tile_width_log2) - 1;
232 unsigned y_mask = (1u << tile->tile_height_log2) - 1;
233 unsigned tile_size_log2 = tile->tile_width_log2 + tile->tile_height_log2;
234
235 unsigned base = ((y >> tile->tile_height_log2) * stride + (x >> tile->tile_width_log2))
236 << tile_size_log2;
237 unsigned offset_in_tile = tile->data[((y & y_mask) << tile->tile_width_log2) + (x & x_mask)];
238 return base + offset_in_tile;
239 }
240
241 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib *addrlib,
242 const struct radeon_info *info, unsigned retile_width,
243 unsigned retile_height, bool rb_aligned,
244 bool pipe_aligned, bool use_uint16,
245 unsigned dcc_retile_num_elements,
246 const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT *in)
247 {
248 unsigned dcc_retile_map_size = dcc_retile_num_elements * (use_uint16 ? 2 : 4);
249 struct dcc_retile_map_key key;
250
251 assert(in->numFrags == 1 && in->numSlices == 1 && in->numMipLevels == 1);
252
253 memset(&key, 0, sizeof(key));
254 key.family = info->family;
255 key.retile_width = retile_width;
256 key.retile_height = retile_height;
257 key.rb_aligned = rb_aligned;
258 key.pipe_aligned = pipe_aligned;
259 key.dcc_retile_num_elements = dcc_retile_num_elements;
260 memcpy(&key.input, in, sizeof(*in));
261
262 simple_mtx_lock(&addrlib->dcc_retile_map_lock);
263
264 /* If we have already computed this retile map, get it from the hash table. */
265 struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_maps, &key);
266 if (entry) {
267 uint32_t *map = entry->data;
268 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
269 return map;
270 }
271
272 const struct dcc_retile_tile_data *src_tile = ac_compute_dcc_retile_tile_indices(
273 addrlib, info, in->bpp, in->swizzleMode, rb_aligned, pipe_aligned);
274 const struct dcc_retile_tile_data *dst_tile =
275 ac_compute_dcc_retile_tile_indices(addrlib, info, in->bpp, in->swizzleMode, false, false);
276 if (!src_tile || !dst_tile) {
277 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
278 return NULL;
279 }
280
281 void *dcc_retile_map = malloc(dcc_retile_map_size);
282 if (!dcc_retile_map) {
283 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
284 return NULL;
285 }
286
287 unsigned index = 0;
288 unsigned w = DIV_ROUND_UP(retile_width, in->compressBlkWidth);
289 unsigned h = DIV_ROUND_UP(retile_height, in->compressBlkHeight);
290 unsigned src_stride = DIV_ROUND_UP(w, 1u << src_tile->tile_width_log2);
291 unsigned dst_stride = DIV_ROUND_UP(w, 1u << dst_tile->tile_width_log2);
292
293 for (unsigned y = 0; y < h; ++y) {
294 for (unsigned x = 0; x < w; ++x) {
295 unsigned src_addr = ac_compute_retile_tile_addr(src_tile, src_stride, x, y);
296 unsigned dst_addr = ac_compute_retile_tile_addr(dst_tile, dst_stride, x, y);
297
298 if (use_uint16) {
299 ((uint16_t *)dcc_retile_map)[2 * index] = src_addr;
300 ((uint16_t *)dcc_retile_map)[2 * index + 1] = dst_addr;
301 } else {
302 ((uint32_t *)dcc_retile_map)[2 * index] = src_addr;
303 ((uint32_t *)dcc_retile_map)[2 * index + 1] = dst_addr;
304 }
305 ++index;
306 }
307 }
308
309 /* Fill the remaining pairs with the last one (for the compute shader). */
310 for (unsigned i = index * 2; i < dcc_retile_num_elements; i++) {
311 if (use_uint16)
312 ((uint16_t *)dcc_retile_map)[i] = ((uint16_t *)dcc_retile_map)[i - 2];
313 else
314 ((uint32_t *)dcc_retile_map)[i] = ((uint32_t *)dcc_retile_map)[i - 2];
315 }
316
317 /* Insert the retile map into the hash table, so that it can be reused and
318 * the computation can be skipped for similar image sizes.
319 */
320 _mesa_hash_table_insert(addrlib->dcc_retile_maps, mem_dup(&key, sizeof(key)), dcc_retile_map);
321
322 simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
323 return dcc_retile_map;
324 }
325
326 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT *pInput)
327 {
328 return malloc(pInput->sizeInBytes);
329 }
330
331 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT *pInput)
332 {
333 free(pInput->pVirtAddr);
334 return ADDR_OK;
335 }
336
337 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
338 const struct amdgpu_gpu_info *amdinfo, uint64_t *max_alignment)
339 {
340 ADDR_CREATE_INPUT addrCreateInput = {0};
341 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
342 ADDR_REGISTER_VALUE regValue = {0};
343 ADDR_CREATE_FLAGS createFlags = {{0}};
344 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
345 ADDR_E_RETURNCODE addrRet;
346
347 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
348 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
349
350 regValue.gbAddrConfig = amdinfo->gb_addr_cfg;
351 createFlags.value = 0;
352
353 addrCreateInput.chipFamily = info->family_id;
354 addrCreateInput.chipRevision = info->chip_external_rev;
355
356 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
357 return NULL;
358
359 if (addrCreateInput.chipFamily >= FAMILY_AI) {
360 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
361 } else {
362 regValue.noOfBanks = amdinfo->mc_arb_ramcfg & 0x3;
363 regValue.noOfRanks = (amdinfo->mc_arb_ramcfg & 0x4) >> 2;
364
365 regValue.backendDisables = amdinfo->enabled_rb_pipes_mask;
366 regValue.pTileConfig = amdinfo->gb_tile_mode;
367 regValue.noOfEntries = ARRAY_SIZE(amdinfo->gb_tile_mode);
368 if (addrCreateInput.chipFamily == FAMILY_SI) {
369 regValue.pMacroTileConfig = NULL;
370 regValue.noOfMacroEntries = 0;
371 } else {
372 regValue.pMacroTileConfig = amdinfo->gb_macro_tile_mode;
373 regValue.noOfMacroEntries = ARRAY_SIZE(amdinfo->gb_macro_tile_mode);
374 }
375
376 createFlags.useTileIndex = 1;
377 createFlags.useHtileSliceAlign = 1;
378
379 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
380 }
381
382 addrCreateInput.callbacks.allocSysMem = allocSysMem;
383 addrCreateInput.callbacks.freeSysMem = freeSysMem;
384 addrCreateInput.callbacks.debugPrint = 0;
385 addrCreateInput.createFlags = createFlags;
386 addrCreateInput.regValue = regValue;
387
388 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
389 if (addrRet != ADDR_OK)
390 return NULL;
391
392 if (max_alignment) {
393 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
394 if (addrRet == ADDR_OK) {
395 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
396 }
397 }
398
399 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
400 if (!addrlib) {
401 AddrDestroy(addrCreateOutput.hLib);
402 return NULL;
403 }
404
405 addrlib->handle = addrCreateOutput.hLib;
406 simple_mtx_init(&addrlib->dcc_retile_map_lock, mtx_plain);
407 addrlib->dcc_retile_maps =
408 _mesa_hash_table_create(NULL, dcc_retile_map_hash_key, dcc_retile_map_keys_equal);
409 addrlib->dcc_retile_tile_indices =
410 _mesa_hash_table_create(NULL, dcc_retile_tile_hash_key, dcc_retile_tile_keys_equal);
411 return addrlib;
412 }
413
414 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
415 {
416 AddrDestroy(addrlib->handle);
417 simple_mtx_destroy(&addrlib->dcc_retile_map_lock);
418 _mesa_hash_table_destroy(addrlib->dcc_retile_maps, dcc_retile_map_free);
419 _mesa_hash_table_destroy(addrlib->dcc_retile_tile_indices, dcc_retile_tile_free);
420 free(addrlib);
421 }
422
423 static int surf_config_sanity(const struct ac_surf_config *config, unsigned flags)
424 {
425 /* FMASK is allocated together with the color surface and can't be
426 * allocated separately.
427 */
428 assert(!(flags & RADEON_SURF_FMASK));
429 if (flags & RADEON_SURF_FMASK)
430 return -EINVAL;
431
432 /* all dimension must be at least 1 ! */
433 if (!config->info.width || !config->info.height || !config->info.depth ||
434 !config->info.array_size || !config->info.levels)
435 return -EINVAL;
436
437 switch (config->info.samples) {
438 case 0:
439 case 1:
440 case 2:
441 case 4:
442 case 8:
443 break;
444 case 16:
445 if (flags & RADEON_SURF_Z_OR_SBUFFER)
446 return -EINVAL;
447 break;
448 default:
449 return -EINVAL;
450 }
451
452 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
453 switch (config->info.storage_samples) {
454 case 0:
455 case 1:
456 case 2:
457 case 4:
458 case 8:
459 break;
460 default:
461 return -EINVAL;
462 }
463 }
464
465 if (config->is_3d && config->info.array_size > 1)
466 return -EINVAL;
467 if (config->is_cube && config->info.depth > 1)
468 return -EINVAL;
469
470 return 0;
471 }
472
473 static int gfx6_compute_level(ADDR_HANDLE addrlib, const struct ac_surf_config *config,
474 struct radeon_surf *surf, bool is_stencil, unsigned level,
475 bool compressed, ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
476 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
477 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
478 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
479 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
480 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
481 {
482 struct legacy_surf_level *surf_level;
483 ADDR_E_RETURNCODE ret;
484
485 AddrSurfInfoIn->mipLevel = level;
486 AddrSurfInfoIn->width = u_minify(config->info.width, level);
487 AddrSurfInfoIn->height = u_minify(config->info.height, level);
488
489 /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
490 * because GFX9 needs linear alignment of 256 bytes.
491 */
492 if (config->info.levels == 1 && AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
493 AddrSurfInfoIn->bpp && util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
494 unsigned alignment = 256 / (AddrSurfInfoIn->bpp / 8);
495
496 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
497 }
498
499 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
500 * true for r32g32b32 formats. */
501 if (AddrSurfInfoIn->bpp == 96) {
502 assert(config->info.levels == 1);
503 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
504
505 /* The least common multiple of 64 bytes and 12 bytes/pixel is
506 * 192 bytes, or 16 pixels. */
507 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
508 }
509
510 if (config->is_3d)
511 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
512 else if (config->is_cube)
513 AddrSurfInfoIn->numSlices = 6;
514 else
515 AddrSurfInfoIn->numSlices = config->info.array_size;
516
517 if (level > 0) {
518 /* Set the base level pitch. This is needed for calculation
519 * of non-zero levels. */
520 if (is_stencil)
521 AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
522 else
523 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
524
525 /* Convert blocks to pixels for compressed formats. */
526 if (compressed)
527 AddrSurfInfoIn->basePitch *= surf->blk_w;
528 }
529
530 ret = AddrComputeSurfaceInfo(addrlib, AddrSurfInfoIn, AddrSurfInfoOut);
531 if (ret != ADDR_OK) {
532 return ret;
533 }
534
535 surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
536 surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
537 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
538 surf_level->nblk_x = AddrSurfInfoOut->pitch;
539 surf_level->nblk_y = AddrSurfInfoOut->height;
540
541 switch (AddrSurfInfoOut->tileMode) {
542 case ADDR_TM_LINEAR_ALIGNED:
543 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
544 break;
545 case ADDR_TM_1D_TILED_THIN1:
546 surf_level->mode = RADEON_SURF_MODE_1D;
547 break;
548 case ADDR_TM_2D_TILED_THIN1:
549 surf_level->mode = RADEON_SURF_MODE_2D;
550 break;
551 default:
552 assert(0);
553 }
554
555 if (is_stencil)
556 surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
557 else
558 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
559
560 surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
561
562 /* Clear DCC fields at the beginning. */
563 surf_level->dcc_offset = 0;
564
565 /* The previous level's flag tells us if we can use DCC for this level. */
566 if (AddrSurfInfoIn->flags.dccCompatible && (level == 0 || AddrDccOut->subLvlCompressible)) {
567 bool prev_level_clearable = level == 0 || AddrDccOut->dccRamSizeAligned;
568
569 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
570 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
571 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
572 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
573 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
574
575 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
576
577 if (ret == ADDR_OK) {
578 surf_level->dcc_offset = surf->dcc_size;
579 surf->num_dcc_levels = level + 1;
580 surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
581 surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
582
583 /* If the DCC size of a subresource (1 mip level or 1 slice)
584 * is not aligned, the DCC memory layout is not contiguous for
585 * that subresource, which means we can't use fast clear.
586 *
587 * We only do fast clears for whole mipmap levels. If we did
588 * per-slice fast clears, the same restriction would apply.
589 * (i.e. only compute the slice size and see if it's aligned)
590 *
591 * The last level can be non-contiguous and still be clearable
592 * if it's interleaved with the next level that doesn't exist.
593 */
594 if (AddrDccOut->dccRamSizeAligned ||
595 (prev_level_clearable && level == config->info.levels - 1))
596 surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
597 else
598 surf_level->dcc_fast_clear_size = 0;
599
600 /* Compute the DCC slice size because addrlib doesn't
601 * provide this info. As DCC memory is linear (each
602 * slice is the same size) it's easy to compute.
603 */
604 surf->dcc_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
605
606 /* For arrays, we have to compute the DCC info again
607 * with one slice size to get a correct fast clear
608 * size.
609 */
610 if (config->info.array_size > 1) {
611 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
612 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
613 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
614 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
615 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
616
617 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
618 if (ret == ADDR_OK) {
619 /* If the DCC memory isn't properly
620 * aligned, the data are interleaved
621 * accross slices.
622 */
623 if (AddrDccOut->dccRamSizeAligned)
624 surf_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
625 else
626 surf_level->dcc_slice_fast_clear_size = 0;
627 }
628
629 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
630 surf->dcc_slice_size != surf_level->dcc_slice_fast_clear_size) {
631 surf->dcc_size = 0;
632 surf->num_dcc_levels = 0;
633 AddrDccOut->subLvlCompressible = false;
634 }
635 } else {
636 surf_level->dcc_slice_fast_clear_size = surf_level->dcc_fast_clear_size;
637 }
638 }
639 }
640
641 /* HTILE. */
642 if (!is_stencil && AddrSurfInfoIn->flags.depth && surf_level->mode == RADEON_SURF_MODE_2D &&
643 level == 0 && !(surf->flags & RADEON_SURF_NO_HTILE)) {
644 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
645 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
646 AddrHtileIn->height = AddrSurfInfoOut->height;
647 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
648 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
649 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
650 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
651 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
652 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
653
654 ret = AddrComputeHtileInfo(addrlib, AddrHtileIn, AddrHtileOut);
655
656 if (ret == ADDR_OK) {
657 surf->htile_size = AddrHtileOut->htileBytes;
658 surf->htile_slice_size = AddrHtileOut->sliceSize;
659 surf->htile_alignment = AddrHtileOut->baseAlign;
660 }
661 }
662
663 return 0;
664 }
665
666 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf, const struct radeon_info *info)
667 {
668 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
669
670 if (info->chip_class >= GFX7)
671 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
672 else
673 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
674 }
675
676 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
677 {
678 unsigned index, tileb;
679
680 tileb = 8 * 8 * surf->bpe;
681 tileb = MIN2(surf->u.legacy.tile_split, tileb);
682
683 for (index = 0; tileb > 64; index++)
684 tileb >>= 1;
685
686 assert(index < 16);
687 return index;
688 }
689
690 static bool get_display_flag(const struct ac_surf_config *config, const struct radeon_surf *surf)
691 {
692 unsigned num_channels = config->info.num_channels;
693 unsigned bpe = surf->bpe;
694
695 if (!config->is_3d && !config->is_cube && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
696 surf->flags & RADEON_SURF_SCANOUT && config->info.samples <= 1 && surf->blk_w <= 2 &&
697 surf->blk_h == 1) {
698 /* subsampled */
699 if (surf->blk_w == 2 && surf->blk_h == 1)
700 return true;
701
702 if (/* RGBA8 or RGBA16F */
703 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
704 /* R5G6B5 or R5G5B5A1 */
705 (bpe == 2 && num_channels >= 3) ||
706 /* C8 palette */
707 (bpe == 1 && num_channels == 1))
708 return true;
709 }
710 return false;
711 }
712
713 /**
714 * This must be called after the first level is computed.
715 *
716 * Copy surface-global settings like pipe/bank config from level 0 surface
717 * computation, and compute tile swizzle.
718 */
719 static int gfx6_surface_settings(ADDR_HANDLE addrlib, const struct radeon_info *info,
720 const struct ac_surf_config *config,
721 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *csio, struct radeon_surf *surf)
722 {
723 surf->surf_alignment = csio->baseAlign;
724 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
725 gfx6_set_micro_tile_mode(surf, info);
726
727 /* For 2D modes only. */
728 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
729 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
730 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
731 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
732 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
733 surf->u.legacy.num_banks = csio->pTileInfo->banks;
734 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
735 } else {
736 surf->u.legacy.macro_tile_index = 0;
737 }
738
739 /* Compute tile swizzle. */
740 /* TODO: fix tile swizzle with mipmapping for GFX6 */
741 if ((info->chip_class >= GFX7 || config->info.levels == 1) && config->info.surf_index &&
742 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
743 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
744 !get_display_flag(config, surf)) {
745 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
746 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
747
748 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
749 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
750
751 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
752 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
753 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
754 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
755 AddrBaseSwizzleIn.tileMode = csio->tileMode;
756
757 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn, &AddrBaseSwizzleOut);
758 if (r != ADDR_OK)
759 return r;
760
761 assert(AddrBaseSwizzleOut.tileSwizzle <=
762 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
763 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
764 }
765 return 0;
766 }
767
768 static void ac_compute_cmask(const struct radeon_info *info, const struct ac_surf_config *config,
769 struct radeon_surf *surf)
770 {
771 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
772 unsigned num_pipes = info->num_tile_pipes;
773 unsigned cl_width, cl_height;
774
775 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
776 (config->info.samples >= 2 && !surf->fmask_size))
777 return;
778
779 assert(info->chip_class <= GFX8);
780
781 switch (num_pipes) {
782 case 2:
783 cl_width = 32;
784 cl_height = 16;
785 break;
786 case 4:
787 cl_width = 32;
788 cl_height = 32;
789 break;
790 case 8:
791 cl_width = 64;
792 cl_height = 32;
793 break;
794 case 16: /* Hawaii */
795 cl_width = 64;
796 cl_height = 64;
797 break;
798 default:
799 assert(0);
800 return;
801 }
802
803 unsigned base_align = num_pipes * pipe_interleave_bytes;
804
805 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width * 8);
806 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height * 8);
807 unsigned slice_elements = (width * height) / (8 * 8);
808
809 /* Each element of CMASK is a nibble. */
810 unsigned slice_bytes = slice_elements / 2;
811
812 surf->u.legacy.cmask_slice_tile_max = (width * height) / (128 * 128);
813 if (surf->u.legacy.cmask_slice_tile_max)
814 surf->u.legacy.cmask_slice_tile_max -= 1;
815
816 unsigned num_layers;
817 if (config->is_3d)
818 num_layers = config->info.depth;
819 else if (config->is_cube)
820 num_layers = 6;
821 else
822 num_layers = config->info.array_size;
823
824 surf->cmask_alignment = MAX2(256, base_align);
825 surf->cmask_slice_size = align(slice_bytes, base_align);
826 surf->cmask_size = surf->cmask_slice_size * num_layers;
827 }
828
829 /**
830 * Fill in the tiling information in \p surf based on the given surface config.
831 *
832 * The following fields of \p surf must be initialized by the caller:
833 * blk_w, blk_h, bpe, flags.
834 */
835 static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *info,
836 const struct ac_surf_config *config, enum radeon_surf_mode mode,
837 struct radeon_surf *surf)
838 {
839 unsigned level;
840 bool compressed;
841 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
842 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
843 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
844 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
845 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
846 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
847 ADDR_TILEINFO AddrTileInfoIn = {0};
848 ADDR_TILEINFO AddrTileInfoOut = {0};
849 int r;
850
851 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
852 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
853 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
854 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
855 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
856 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
857 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
858
859 compressed = surf->blk_w == 4 && surf->blk_h == 4;
860
861 /* MSAA requires 2D tiling. */
862 if (config->info.samples > 1)
863 mode = RADEON_SURF_MODE_2D;
864
865 /* DB doesn't support linear layouts. */
866 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) && mode < RADEON_SURF_MODE_1D)
867 mode = RADEON_SURF_MODE_1D;
868
869 /* Set the requested tiling mode. */
870 switch (mode) {
871 case RADEON_SURF_MODE_LINEAR_ALIGNED:
872 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
873 break;
874 case RADEON_SURF_MODE_1D:
875 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
876 break;
877 case RADEON_SURF_MODE_2D:
878 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
879 break;
880 default:
881 assert(0);
882 }
883
884 /* The format must be set correctly for the allocation of compressed
885 * textures to work. In other cases, setting the bpp is sufficient.
886 */
887 if (compressed) {
888 switch (surf->bpe) {
889 case 8:
890 AddrSurfInfoIn.format = ADDR_FMT_BC1;
891 break;
892 case 16:
893 AddrSurfInfoIn.format = ADDR_FMT_BC3;
894 break;
895 default:
896 assert(0);
897 }
898 } else {
899 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
900 }
901
902 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
903 AddrSurfInfoIn.tileIndex = -1;
904
905 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
906 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
907 }
908
909 /* Set the micro tile type. */
910 if (surf->flags & RADEON_SURF_SCANOUT)
911 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
912 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
913 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
914 else
915 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
916
917 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
918 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
919 AddrSurfInfoIn.flags.cube = config->is_cube;
920 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
921 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
922 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
923
924 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
925 * requested, because TC-compatible HTILE requires 2D tiling.
926 */
927 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
928 !AddrSurfInfoIn.flags.fmask && config->info.samples <= 1 &&
929 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
930
931 /* DCC notes:
932 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
933 * with samples >= 4.
934 * - Mipmapped array textures have low performance (discovered by a closed
935 * driver team).
936 */
937 AddrSurfInfoIn.flags.dccCompatible =
938 info->chip_class >= GFX8 && info->has_graphics && /* disable DCC on compute-only chips */
939 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
940 !compressed &&
941 ((config->info.array_size == 1 && config->info.depth == 1) || config->info.levels == 1);
942
943 AddrSurfInfoIn.flags.noStencil = (surf->flags & RADEON_SURF_SBUFFER) == 0;
944 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
945
946 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
947 * for Z and stencil. This can cause a number of problems which we work
948 * around here:
949 *
950 * - a depth part that is incompatible with mipmapped texturing
951 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
952 * incorrect tiling applied to the stencil part, stencil buffer
953 * memory accesses that go out of bounds) even without mipmapping
954 *
955 * Some piglit tests that are prone to different types of related
956 * failures:
957 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
958 * ./bin/framebuffer-blit-levels {draw,read} stencil
959 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
960 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
961 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
962 */
963 int stencil_tile_idx = -1;
964
965 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
966 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
967 /* Compute stencilTileIdx that is compatible with the (depth)
968 * tileIdx. This degrades the depth surface if necessary to
969 * ensure that a matching stencilTileIdx exists. */
970 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
971
972 /* Keep the depth mip-tail compatible with texturing. */
973 AddrSurfInfoIn.flags.noStencil = 1;
974 }
975
976 /* Set preferred macrotile parameters. This is usually required
977 * for shared resources. This is for 2D tiling only. */
978 if (AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 && surf->u.legacy.bankw &&
979 surf->u.legacy.bankh && surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
980 /* If any of these parameters are incorrect, the calculation
981 * will fail. */
982 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
983 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
984 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
985 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
986 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
987 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
988 AddrSurfInfoIn.flags.opt4Space = 0;
989 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
990
991 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
992 * the tile index, because we are expected to know it if
993 * we know the other parameters.
994 *
995 * This is something that can easily be fixed in Addrlib.
996 * For now, just figure it out here.
997 * Note that only 2D_TILE_THIN1 is handled here.
998 */
999 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1000 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
1001
1002 if (info->chip_class == GFX6) {
1003 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
1004 if (surf->bpe == 2)
1005 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
1006 else
1007 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
1008 } else {
1009 if (surf->bpe == 1)
1010 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
1011 else if (surf->bpe == 2)
1012 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
1013 else if (surf->bpe == 4)
1014 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
1015 else
1016 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
1017 }
1018 } else {
1019 /* GFX7 - GFX8 */
1020 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
1021 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
1022 else
1023 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
1024
1025 /* Addrlib doesn't set this if tileIndex is forced like above. */
1026 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
1027 }
1028 }
1029
1030 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1031 surf->num_dcc_levels = 0;
1032 surf->surf_size = 0;
1033 surf->dcc_size = 0;
1034 surf->dcc_alignment = 1;
1035 surf->htile_size = 0;
1036 surf->htile_slice_size = 0;
1037 surf->htile_alignment = 1;
1038
1039 const bool only_stencil =
1040 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
1041
1042 /* Calculate texture layout information. */
1043 if (!only_stencil) {
1044 for (level = 0; level < config->info.levels; level++) {
1045 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed, &AddrSurfInfoIn,
1046 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, &AddrHtileIn,
1047 &AddrHtileOut);
1048 if (r)
1049 return r;
1050
1051 if (level > 0)
1052 continue;
1053
1054 if (!AddrSurfInfoOut.tcCompatible) {
1055 AddrSurfInfoIn.flags.tcCompatible = 0;
1056 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1057 }
1058
1059 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
1060 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
1061 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
1062 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
1063
1064 assert(stencil_tile_idx >= 0);
1065 }
1066
1067 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1068 if (r)
1069 return r;
1070 }
1071 }
1072
1073 /* Calculate texture layout information for stencil. */
1074 if (surf->flags & RADEON_SURF_SBUFFER) {
1075 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
1076 AddrSurfInfoIn.bpp = 8;
1077 AddrSurfInfoIn.flags.depth = 0;
1078 AddrSurfInfoIn.flags.stencil = 1;
1079 AddrSurfInfoIn.flags.tcCompatible = 0;
1080 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
1081 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
1082
1083 for (level = 0; level < config->info.levels; level++) {
1084 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed, &AddrSurfInfoIn,
1085 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, NULL, NULL);
1086 if (r)
1087 return r;
1088
1089 /* DB uses the depth pitch for both stencil and depth. */
1090 if (!only_stencil) {
1091 if (surf->u.legacy.stencil_level[level].nblk_x != surf->u.legacy.level[level].nblk_x)
1092 surf->u.legacy.stencil_adjusted = true;
1093 } else {
1094 surf->u.legacy.level[level].nblk_x = surf->u.legacy.stencil_level[level].nblk_x;
1095 }
1096
1097 if (level == 0) {
1098 if (only_stencil) {
1099 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1100 if (r)
1101 return r;
1102 }
1103
1104 /* For 2D modes only. */
1105 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1106 surf->u.legacy.stencil_tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1107 }
1108 }
1109 }
1110 }
1111
1112 /* Compute FMASK. */
1113 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color && info->has_graphics &&
1114 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1115 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1116 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1117 ADDR_TILEINFO fmask_tile_info = {};
1118
1119 fin.size = sizeof(fin);
1120 fout.size = sizeof(fout);
1121
1122 fin.tileMode = AddrSurfInfoOut.tileMode;
1123 fin.pitch = AddrSurfInfoOut.pitch;
1124 fin.height = config->info.height;
1125 fin.numSlices = AddrSurfInfoIn.numSlices;
1126 fin.numSamples = AddrSurfInfoIn.numSamples;
1127 fin.numFrags = AddrSurfInfoIn.numFrags;
1128 fin.tileIndex = -1;
1129 fout.pTileInfo = &fmask_tile_info;
1130
1131 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1132 if (r)
1133 return r;
1134
1135 surf->fmask_size = fout.fmaskBytes;
1136 surf->fmask_alignment = fout.baseAlign;
1137 surf->fmask_tile_swizzle = 0;
1138
1139 surf->u.legacy.fmask.slice_tile_max = (fout.pitch * fout.height) / 64;
1140 if (surf->u.legacy.fmask.slice_tile_max)
1141 surf->u.legacy.fmask.slice_tile_max -= 1;
1142
1143 surf->u.legacy.fmask.tiling_index = fout.tileIndex;
1144 surf->u.legacy.fmask.bankh = fout.pTileInfo->bankHeight;
1145 surf->u.legacy.fmask.pitch_in_pixels = fout.pitch;
1146 surf->u.legacy.fmask.slice_size = fout.sliceSize;
1147
1148 /* Compute tile swizzle for FMASK. */
1149 if (config->info.fmask_surf_index && !(surf->flags & RADEON_SURF_SHAREABLE)) {
1150 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1151 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1152
1153 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1154 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1155
1156 /* This counter starts from 1 instead of 0. */
1157 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1158 xin.tileIndex = fout.tileIndex;
1159 xin.macroModeIndex = fout.macroModeIndex;
1160 xin.pTileInfo = fout.pTileInfo;
1161 xin.tileMode = fin.tileMode;
1162
1163 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1164 if (r != ADDR_OK)
1165 return r;
1166
1167 assert(xout.tileSwizzle <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1168 surf->fmask_tile_swizzle = xout.tileSwizzle;
1169 }
1170 }
1171
1172 /* Recalculate the whole DCC miptree size including disabled levels.
1173 * This is what addrlib does, but calling addrlib would be a lot more
1174 * complicated.
1175 */
1176 if (surf->dcc_size && config->info.levels > 1) {
1177 /* The smallest miplevels that are never compressed by DCC
1178 * still read the DCC buffer via TC if the base level uses DCC,
1179 * and for some reason the DCC buffer needs to be larger if
1180 * the miptree uses non-zero tile_swizzle. Otherwise there are
1181 * VM faults.
1182 *
1183 * "dcc_alignment * 4" was determined by trial and error.
1184 */
1185 surf->dcc_size = align64(surf->surf_size >> 8, surf->dcc_alignment * 4);
1186 }
1187
1188 /* Make sure HTILE covers the whole miptree, because the shader reads
1189 * TC-compatible HTILE even for levels where it's disabled by DB.
1190 */
1191 if (surf->htile_size && config->info.levels > 1 &&
1192 surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
1193 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1194 const unsigned total_pixels = surf->surf_size / surf->bpe;
1195 const unsigned htile_block_size = 8 * 8;
1196 const unsigned htile_element_size = 4;
1197
1198 surf->htile_size = (total_pixels / htile_block_size) * htile_element_size;
1199 surf->htile_size = align(surf->htile_size, surf->htile_alignment);
1200 } else if (!surf->htile_size) {
1201 /* Unset this if HTILE is not present. */
1202 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1203 }
1204
1205 surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
1206 surf->is_displayable = surf->is_linear || surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1207 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1208
1209 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1210 * used at the same time. This case is not currently expected to occur
1211 * because we don't use rotated. Enforce this restriction on all chips
1212 * to facilitate testing.
1213 */
1214 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1215 assert(!"rotate micro tile mode is unsupported");
1216 return ADDR_ERROR;
1217 }
1218
1219 ac_compute_cmask(info, config, surf);
1220 return 0;
1221 }
1222
1223 /* This is only called when expecting a tiled layout. */
1224 static int gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib, struct radeon_surf *surf,
1225 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in, bool is_fmask,
1226 AddrSwizzleMode *swizzle_mode)
1227 {
1228 ADDR_E_RETURNCODE ret;
1229 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1230 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1231
1232 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1233 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1234
1235 sin.flags = in->flags;
1236 sin.resourceType = in->resourceType;
1237 sin.format = in->format;
1238 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1239 /* TODO: We could allow some of these: */
1240 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1241 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1242 sin.bpp = in->bpp;
1243 sin.width = in->width;
1244 sin.height = in->height;
1245 sin.numSlices = in->numSlices;
1246 sin.numMipLevels = in->numMipLevels;
1247 sin.numSamples = in->numSamples;
1248 sin.numFrags = in->numFrags;
1249
1250 if (is_fmask) {
1251 sin.flags.display = 0;
1252 sin.flags.color = 0;
1253 sin.flags.fmask = 1;
1254 }
1255
1256 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1257 sin.forbiddenBlock.linear = 1;
1258
1259 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1260 sin.preferredSwSet.sw_D = 1;
1261 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1262 sin.preferredSwSet.sw_S = 1;
1263 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1264 sin.preferredSwSet.sw_Z = 1;
1265 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1266 sin.preferredSwSet.sw_R = 1;
1267 }
1268
1269 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1270 if (ret != ADDR_OK)
1271 return ret;
1272
1273 *swizzle_mode = sout.swizzleMode;
1274 return 0;
1275 }
1276
1277 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1278 {
1279 if (info->chip_class >= GFX10)
1280 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1281
1282 return sw_mode != ADDR_SW_LINEAR;
1283 }
1284
1285 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1286 const struct radeon_surf *surf)
1287 {
1288 if (info->chip_class <= GFX9) {
1289 /* Only independent 64B blocks are supported. */
1290 return surf->u.gfx9.dcc.independent_64B_blocks && !surf->u.gfx9.dcc.independent_128B_blocks &&
1291 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1292 }
1293
1294 if (info->family == CHIP_NAVI10) {
1295 /* Only independent 128B blocks are supported. */
1296 return !surf->u.gfx9.dcc.independent_64B_blocks && surf->u.gfx9.dcc.independent_128B_blocks &&
1297 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1298 }
1299
1300 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1301 /* Either 64B or 128B can be used, but not both.
1302 * If 64B is used, DCC image stores are unsupported.
1303 */
1304 return surf->u.gfx9.dcc.independent_64B_blocks != surf->u.gfx9.dcc.independent_128B_blocks &&
1305 (!surf->u.gfx9.dcc.independent_64B_blocks ||
1306 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) &&
1307 (!surf->u.gfx9.dcc.independent_128B_blocks ||
1308 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B);
1309 }
1310
1311 /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1312 * Since there is no reason to ever disable 128B, require it.
1313 * DCC image stores are always supported.
1314 */
1315 return surf->u.gfx9.dcc.independent_128B_blocks &&
1316 surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1317 }
1318
1319 static bool is_dcc_supported_by_DCN(const struct radeon_info *info,
1320 const struct ac_surf_config *config,
1321 const struct radeon_surf *surf, bool rb_aligned,
1322 bool pipe_aligned)
1323 {
1324 if (!info->use_display_dcc_unaligned && !info->use_display_dcc_with_retile_blit)
1325 return false;
1326
1327 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1328 if (surf->bpe != 4)
1329 return false;
1330
1331 /* Handle unaligned DCC. */
1332 if (info->use_display_dcc_unaligned && (rb_aligned || pipe_aligned))
1333 return false;
1334
1335 switch (info->chip_class) {
1336 case GFX9:
1337 /* There are more constraints, but we always set
1338 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1339 * which always works.
1340 */
1341 assert(surf->u.gfx9.dcc.independent_64B_blocks &&
1342 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1343 return true;
1344 case GFX10:
1345 case GFX10_3:
1346 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1347 if (info->chip_class == GFX10 && surf->u.gfx9.dcc.independent_128B_blocks)
1348 return false;
1349
1350 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1351 return ((config->info.width <= 2560 && config->info.height <= 2560) ||
1352 (surf->u.gfx9.dcc.independent_64B_blocks &&
1353 surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1354 default:
1355 unreachable("unhandled chip");
1356 return false;
1357 }
1358 }
1359
1360 static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
1361 const struct ac_surf_config *config, struct radeon_surf *surf,
1362 bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1363 {
1364 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
1365 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1366 ADDR_E_RETURNCODE ret;
1367
1368 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1369 out.pMipInfo = mip_info;
1370
1371 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1372 if (ret != ADDR_OK)
1373 return ret;
1374
1375 if (in->flags.stencil) {
1376 surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
1377 surf->u.gfx9.stencil.epitch =
1378 out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1379 surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
1380 surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
1381 surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
1382 return 0;
1383 }
1384
1385 surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
1386 surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1387
1388 /* CMASK fast clear uses these even if FMASK isn't allocated.
1389 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1390 */
1391 surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
1392 surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
1393
1394 surf->u.gfx9.surf_slice_size = out.sliceSize;
1395 surf->u.gfx9.surf_pitch = out.pitch;
1396 surf->u.gfx9.surf_height = out.height;
1397 surf->surf_size = out.surfSize;
1398 surf->surf_alignment = out.baseAlign;
1399
1400 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1401 surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR) {
1402 /* Adjust surf_pitch to be in elements units not in pixels */
1403 surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w, 256 / surf->bpe);
1404 surf->u.gfx9.surf.epitch =
1405 MAX2(surf->u.gfx9.surf.epitch, surf->u.gfx9.surf_pitch * surf->blk_w - 1);
1406 /* The surface is really a surf->bpe bytes per pixel surface even if we
1407 * use it as a surf->bpe bytes per element one.
1408 * Adjust surf_slice_size and surf_size to reflect the change
1409 * made to surf_pitch.
1410 */
1411 surf->u.gfx9.surf_slice_size =
1412 MAX2(surf->u.gfx9.surf_slice_size,
1413 surf->u.gfx9.surf_pitch * out.height * surf->bpe * surf->blk_w);
1414 surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
1415 }
1416
1417 if (in->swizzleMode == ADDR_SW_LINEAR) {
1418 for (unsigned i = 0; i < in->numMipLevels; i++) {
1419 surf->u.gfx9.offset[i] = mip_info[i].offset;
1420 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1421 }
1422 }
1423
1424 if (in->flags.depth) {
1425 assert(in->swizzleMode != ADDR_SW_LINEAR);
1426
1427 if (surf->flags & RADEON_SURF_NO_HTILE)
1428 return 0;
1429
1430 /* HTILE */
1431 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1432 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1433
1434 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1435 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1436
1437 assert(in->flags.metaPipeUnaligned == 0);
1438 assert(in->flags.metaRbUnaligned == 0);
1439
1440 hin.hTileFlags.pipeAligned = 1;
1441 hin.hTileFlags.rbAligned = 1;
1442 hin.depthFlags = in->flags;
1443 hin.swizzleMode = in->swizzleMode;
1444 hin.unalignedWidth = in->width;
1445 hin.unalignedHeight = in->height;
1446 hin.numSlices = in->numSlices;
1447 hin.numMipLevels = in->numMipLevels;
1448 hin.firstMipIdInTail = out.firstMipIdInTail;
1449
1450 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
1451 if (ret != ADDR_OK)
1452 return ret;
1453
1454 surf->htile_size = hout.htileBytes;
1455 surf->htile_slice_size = hout.sliceSize;
1456 surf->htile_alignment = hout.baseAlign;
1457 return 0;
1458 }
1459
1460 {
1461 /* Compute tile swizzle for the color surface.
1462 * All *_X and *_T modes can use the swizzle.
1463 */
1464 if (config->info.surf_index && in->swizzleMode >= ADDR_SW_64KB_Z_T && !out.mipChainInTail &&
1465 !(surf->flags & RADEON_SURF_SHAREABLE) && !in->flags.display) {
1466 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1467 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1468
1469 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1470 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1471
1472 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1473 xin.flags = in->flags;
1474 xin.swizzleMode = in->swizzleMode;
1475 xin.resourceType = in->resourceType;
1476 xin.format = in->format;
1477 xin.numSamples = in->numSamples;
1478 xin.numFrags = in->numFrags;
1479
1480 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1481 if (ret != ADDR_OK)
1482 return ret;
1483
1484 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1485 surf->tile_swizzle = xout.pipeBankXor;
1486 }
1487
1488 /* DCC */
1489 if (info->has_graphics && !(surf->flags & RADEON_SURF_DISABLE_DCC) && !compressed &&
1490 is_dcc_supported_by_CB(info, in->swizzleMode) &&
1491 (!in->flags.display ||
1492 is_dcc_supported_by_DCN(info, config, surf, !in->flags.metaRbUnaligned,
1493 !in->flags.metaPipeUnaligned))) {
1494 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
1495 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
1496 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {};
1497
1498 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
1499 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
1500 dout.pMipInfo = meta_mip_info;
1501
1502 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
1503 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
1504 din.resourceType = in->resourceType;
1505 din.swizzleMode = in->swizzleMode;
1506 din.bpp = in->bpp;
1507 din.unalignedWidth = in->width;
1508 din.unalignedHeight = in->height;
1509 din.numSlices = in->numSlices;
1510 din.numFrags = in->numFrags;
1511 din.numMipLevels = in->numMipLevels;
1512 din.dataSurfaceSize = out.surfSize;
1513 din.firstMipIdInTail = out.firstMipIdInTail;
1514
1515 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1516 if (ret != ADDR_OK)
1517 return ret;
1518
1519 surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
1520 surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
1521 surf->u.gfx9.dcc_block_width = dout.compressBlkWidth;
1522 surf->u.gfx9.dcc_block_height = dout.compressBlkHeight;
1523 surf->u.gfx9.dcc_block_depth = dout.compressBlkDepth;
1524 surf->dcc_size = dout.dccRamSize;
1525 surf->dcc_alignment = dout.dccRamBaseAlign;
1526 surf->num_dcc_levels = in->numMipLevels;
1527
1528 /* Disable DCC for levels that are in the mip tail.
1529 *
1530 * There are two issues that this is intended to
1531 * address:
1532 *
1533 * 1. Multiple mip levels may share a cache line. This
1534 * can lead to corruption when switching between
1535 * rendering to different mip levels because the
1536 * RBs don't maintain coherency.
1537 *
1538 * 2. Texturing with metadata after rendering sometimes
1539 * fails with corruption, probably for a similar
1540 * reason.
1541 *
1542 * Working around these issues for all levels in the
1543 * mip tail may be overly conservative, but it's what
1544 * Vulkan does.
1545 *
1546 * Alternative solutions that also work but are worse:
1547 * - Disable DCC entirely.
1548 * - Flush TC L2 after rendering.
1549 */
1550 for (unsigned i = 0; i < in->numMipLevels; i++) {
1551 if (meta_mip_info[i].inMiptail) {
1552 /* GFX10 can only compress the first level
1553 * in the mip tail.
1554 *
1555 * TODO: Try to do the same thing for gfx9
1556 * if there are no regressions.
1557 */
1558 if (info->chip_class >= GFX10)
1559 surf->num_dcc_levels = i + 1;
1560 else
1561 surf->num_dcc_levels = i;
1562 break;
1563 }
1564 }
1565
1566 if (!surf->num_dcc_levels)
1567 surf->dcc_size = 0;
1568
1569 surf->u.gfx9.display_dcc_size = surf->dcc_size;
1570 surf->u.gfx9.display_dcc_alignment = surf->dcc_alignment;
1571 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1572
1573 /* Compute displayable DCC. */
1574 if (in->flags.display && surf->num_dcc_levels && info->use_display_dcc_with_retile_blit) {
1575 /* Compute displayable DCC info. */
1576 din.dccKeyFlags.pipeAligned = 0;
1577 din.dccKeyFlags.rbAligned = 0;
1578
1579 assert(din.numSlices == 1);
1580 assert(din.numMipLevels == 1);
1581 assert(din.numFrags == 1);
1582 assert(surf->tile_swizzle == 0);
1583 assert(surf->u.gfx9.dcc.pipe_aligned || surf->u.gfx9.dcc.rb_aligned);
1584
1585 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1586 if (ret != ADDR_OK)
1587 return ret;
1588
1589 surf->u.gfx9.display_dcc_size = dout.dccRamSize;
1590 surf->u.gfx9.display_dcc_alignment = dout.dccRamBaseAlign;
1591 surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1592 assert(surf->u.gfx9.display_dcc_size <= surf->dcc_size);
1593
1594 surf->u.gfx9.dcc_retile_use_uint16 =
1595 surf->u.gfx9.display_dcc_size <= UINT16_MAX + 1 && surf->dcc_size <= UINT16_MAX + 1;
1596
1597 /* Align the retile map size to get more hash table hits and
1598 * decrease the maximum memory footprint when all retile maps
1599 * are cached in the hash table.
1600 */
1601 unsigned retile_dim[2] = {in->width, in->height};
1602
1603 for (unsigned i = 0; i < 2; i++) {
1604 /* Increase the alignment as the size increases.
1605 * Greater alignment increases retile compute work,
1606 * but decreases maximum memory footprint for the cache.
1607 *
1608 * With this alignment, the worst case memory footprint of
1609 * the cache is:
1610 * 1920x1080: 55 MB
1611 * 2560x1440: 99 MB
1612 * 3840x2160: 305 MB
1613 *
1614 * The worst case size in MB can be computed in Haskell as follows:
1615 * (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1616 * [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]])))))
1617 * `div` 1024^2 where alignment x = if x <= 512 then 16 else if x <= 1024 then 32
1618 * else if x <= 2048 then 64 else 128 align x = (x + (alignment x) - 1) `div`
1619 * (alignment x) * (alignment x) align_pair e = (align (fst e), align (snd e))
1620 * deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a)
1621 * == (snd b))) . sortBy compare get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1622 * get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else
1623 * 4) bpp = 4; maxwidth = 3840; maxheight = 2160
1624 */
1625 if (retile_dim[i] <= 512)
1626 retile_dim[i] = align(retile_dim[i], 16);
1627 else if (retile_dim[i] <= 1024)
1628 retile_dim[i] = align(retile_dim[i], 32);
1629 else if (retile_dim[i] <= 2048)
1630 retile_dim[i] = align(retile_dim[i], 64);
1631 else
1632 retile_dim[i] = align(retile_dim[i], 128);
1633
1634 /* Don't align more than the DCC pixel alignment. */
1635 assert(dout.metaBlkWidth >= 128 && dout.metaBlkHeight >= 128);
1636 }
1637
1638 surf->u.gfx9.dcc_retile_num_elements =
1639 DIV_ROUND_UP(retile_dim[0], dout.compressBlkWidth) *
1640 DIV_ROUND_UP(retile_dim[1], dout.compressBlkHeight) * 2;
1641 /* Align the size to 4 (for the compute shader). */
1642 surf->u.gfx9.dcc_retile_num_elements = align(surf->u.gfx9.dcc_retile_num_elements, 4);
1643
1644 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
1645 /* Compute address mapping from non-displayable to displayable DCC. */
1646 ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
1647 memset(&addrin, 0, sizeof(addrin));
1648 addrin.size = sizeof(addrin);
1649 addrin.swizzleMode = din.swizzleMode;
1650 addrin.resourceType = din.resourceType;
1651 addrin.bpp = din.bpp;
1652 addrin.numSlices = 1;
1653 addrin.numMipLevels = 1;
1654 addrin.numFrags = 1;
1655 addrin.pitch = dout.pitch;
1656 addrin.height = dout.height;
1657 addrin.compressBlkWidth = dout.compressBlkWidth;
1658 addrin.compressBlkHeight = dout.compressBlkHeight;
1659 addrin.compressBlkDepth = dout.compressBlkDepth;
1660 addrin.metaBlkWidth = dout.metaBlkWidth;
1661 addrin.metaBlkHeight = dout.metaBlkHeight;
1662 addrin.metaBlkDepth = dout.metaBlkDepth;
1663 addrin.dccRamSliceSize = 0; /* Don't care for non-layered images. */
1664
1665 surf->u.gfx9.dcc_retile_map = ac_compute_dcc_retile_map(
1666 addrlib, info, retile_dim[0], retile_dim[1], surf->u.gfx9.dcc.rb_aligned,
1667 surf->u.gfx9.dcc.pipe_aligned, surf->u.gfx9.dcc_retile_use_uint16,
1668 surf->u.gfx9.dcc_retile_num_elements, &addrin);
1669 if (!surf->u.gfx9.dcc_retile_map)
1670 return ADDR_OUTOFMEMORY;
1671 }
1672 }
1673 }
1674
1675 /* FMASK */
1676 if (in->numSamples > 1 && info->has_graphics && !(surf->flags & RADEON_SURF_NO_FMASK)) {
1677 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
1678 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1679
1680 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
1681 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
1682
1683 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, in, true, &fin.swizzleMode);
1684 if (ret != ADDR_OK)
1685 return ret;
1686
1687 fin.unalignedWidth = in->width;
1688 fin.unalignedHeight = in->height;
1689 fin.numSlices = in->numSlices;
1690 fin.numSamples = in->numSamples;
1691 fin.numFrags = in->numFrags;
1692
1693 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
1694 if (ret != ADDR_OK)
1695 return ret;
1696
1697 surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
1698 surf->u.gfx9.fmask.epitch = fout.pitch - 1;
1699 surf->fmask_size = fout.fmaskBytes;
1700 surf->fmask_alignment = fout.baseAlign;
1701
1702 /* Compute tile swizzle for the FMASK surface. */
1703 if (config->info.fmask_surf_index && fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
1704 !(surf->flags & RADEON_SURF_SHAREABLE)) {
1705 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1706 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1707
1708 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1709 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1710
1711 /* This counter starts from 1 instead of 0. */
1712 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1713 xin.flags = in->flags;
1714 xin.swizzleMode = fin.swizzleMode;
1715 xin.resourceType = in->resourceType;
1716 xin.format = in->format;
1717 xin.numSamples = in->numSamples;
1718 xin.numFrags = in->numFrags;
1719
1720 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1721 if (ret != ADDR_OK)
1722 return ret;
1723
1724 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
1725 surf->fmask_tile_swizzle = xout.pipeBankXor;
1726 }
1727 }
1728
1729 /* CMASK -- on GFX10 only for FMASK */
1730 if (in->swizzleMode != ADDR_SW_LINEAR && in->resourceType == ADDR_RSRC_TEX_2D &&
1731 ((info->chip_class <= GFX9 && in->numSamples == 1 && in->flags.metaPipeUnaligned == 0 &&
1732 in->flags.metaRbUnaligned == 0) ||
1733 (surf->fmask_size && in->numSamples >= 2))) {
1734 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
1735 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
1736
1737 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
1738 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
1739
1740 assert(in->flags.metaPipeUnaligned == 0);
1741 assert(in->flags.metaRbUnaligned == 0);
1742
1743 cin.cMaskFlags.pipeAligned = 1;
1744 cin.cMaskFlags.rbAligned = 1;
1745 cin.resourceType = in->resourceType;
1746 cin.unalignedWidth = in->width;
1747 cin.unalignedHeight = in->height;
1748 cin.numSlices = in->numSlices;
1749
1750 if (in->numSamples > 1)
1751 cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
1752 else
1753 cin.swizzleMode = in->swizzleMode;
1754
1755 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
1756 if (ret != ADDR_OK)
1757 return ret;
1758
1759 surf->cmask_size = cout.cmaskBytes;
1760 surf->cmask_alignment = cout.baseAlign;
1761 }
1762 }
1763
1764 return 0;
1765 }
1766
1767 static int gfx9_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
1768 const struct ac_surf_config *config, enum radeon_surf_mode mode,
1769 struct radeon_surf *surf)
1770 {
1771 bool compressed;
1772 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1773 int r;
1774
1775 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
1776
1777 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1778
1779 /* The format must be set correctly for the allocation of compressed
1780 * textures to work. In other cases, setting the bpp is sufficient. */
1781 if (compressed) {
1782 switch (surf->bpe) {
1783 case 8:
1784 AddrSurfInfoIn.format = ADDR_FMT_BC1;
1785 break;
1786 case 16:
1787 AddrSurfInfoIn.format = ADDR_FMT_BC3;
1788 break;
1789 default:
1790 assert(0);
1791 }
1792 } else {
1793 switch (surf->bpe) {
1794 case 1:
1795 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
1796 AddrSurfInfoIn.format = ADDR_FMT_8;
1797 break;
1798 case 2:
1799 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
1800 AddrSurfInfoIn.format = ADDR_FMT_16;
1801 break;
1802 case 4:
1803 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
1804 AddrSurfInfoIn.format = ADDR_FMT_32;
1805 break;
1806 case 8:
1807 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1808 AddrSurfInfoIn.format = ADDR_FMT_32_32;
1809 break;
1810 case 12:
1811 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1812 AddrSurfInfoIn.format = ADDR_FMT_32_32_32;
1813 break;
1814 case 16:
1815 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1816 AddrSurfInfoIn.format = ADDR_FMT_32_32_32_32;
1817 break;
1818 default:
1819 assert(0);
1820 }
1821 AddrSurfInfoIn.bpp = surf->bpe * 8;
1822 }
1823
1824 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1825 AddrSurfInfoIn.flags.color = is_color_surface && !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1826 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1827 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1828 /* flags.texture currently refers to TC-compatible HTILE */
1829 AddrSurfInfoIn.flags.texture = is_color_surface || surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1830 AddrSurfInfoIn.flags.opt4space = 1;
1831
1832 AddrSurfInfoIn.numMipLevels = config->info.levels;
1833 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1834 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
1835
1836 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
1837 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1838
1839 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1840 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1841 * must sample 1D textures as 2D. */
1842 if (config->is_3d)
1843 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
1844 else if (info->chip_class != GFX9 && config->is_1d)
1845 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
1846 else
1847 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
1848
1849 AddrSurfInfoIn.width = config->info.width;
1850 AddrSurfInfoIn.height = config->info.height;
1851
1852 if (config->is_3d)
1853 AddrSurfInfoIn.numSlices = config->info.depth;
1854 else if (config->is_cube)
1855 AddrSurfInfoIn.numSlices = 6;
1856 else
1857 AddrSurfInfoIn.numSlices = config->info.array_size;
1858
1859 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1860 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
1861 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
1862
1863 /* Optimal values for the L2 cache. */
1864 if (info->chip_class == GFX9) {
1865 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1866 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1867 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1868 } else if (info->chip_class >= GFX10) {
1869 surf->u.gfx9.dcc.independent_64B_blocks = 0;
1870 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1871 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
1872 }
1873
1874 if (AddrSurfInfoIn.flags.display) {
1875 /* The display hardware can only read DCC with RB_ALIGNED=0 and
1876 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1877 *
1878 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1879 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1880 * after rendering, so PIPE_ALIGNED=1 is recommended.
1881 */
1882 if (info->use_display_dcc_unaligned) {
1883 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
1884 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
1885 }
1886
1887 /* Adjust DCC settings to meet DCN requirements. */
1888 if (info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) {
1889 /* Only Navi12/14 support independent 64B blocks in L2,
1890 * but without DCC image stores.
1891 */
1892 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1893 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1894 surf->u.gfx9.dcc.independent_128B_blocks = 0;
1895 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1896 }
1897
1898 if (info->chip_class >= GFX10_3) {
1899 surf->u.gfx9.dcc.independent_64B_blocks = 1;
1900 surf->u.gfx9.dcc.independent_128B_blocks = 1;
1901 surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1902 }
1903 }
1904 }
1905
1906 switch (mode) {
1907 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1908 assert(config->info.samples <= 1);
1909 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1910 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
1911 break;
1912
1913 case RADEON_SURF_MODE_1D:
1914 case RADEON_SURF_MODE_2D:
1915 if (surf->flags & RADEON_SURF_IMPORTED ||
1916 (info->chip_class >= GFX10 && surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
1917 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.surf.swizzle_mode;
1918 break;
1919 }
1920
1921 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn, false,
1922 &AddrSurfInfoIn.swizzleMode);
1923 if (r)
1924 return r;
1925 break;
1926
1927 default:
1928 assert(0);
1929 }
1930
1931 surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
1932 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1933
1934 surf->num_dcc_levels = 0;
1935 surf->surf_size = 0;
1936 surf->fmask_size = 0;
1937 surf->dcc_size = 0;
1938 surf->htile_size = 0;
1939 surf->htile_slice_size = 0;
1940 surf->u.gfx9.surf_offset = 0;
1941 surf->u.gfx9.stencil_offset = 0;
1942 surf->cmask_size = 0;
1943 surf->u.gfx9.dcc_retile_use_uint16 = false;
1944 surf->u.gfx9.dcc_retile_num_elements = 0;
1945 surf->u.gfx9.dcc_retile_map = NULL;
1946
1947 /* Calculate texture layout information. */
1948 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
1949 if (r)
1950 return r;
1951
1952 /* Calculate texture layout information for stencil. */
1953 if (surf->flags & RADEON_SURF_SBUFFER) {
1954 AddrSurfInfoIn.flags.stencil = 1;
1955 AddrSurfInfoIn.bpp = 8;
1956 AddrSurfInfoIn.format = ADDR_FMT_8;
1957
1958 if (!AddrSurfInfoIn.flags.depth) {
1959 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn, false,
1960 &AddrSurfInfoIn.swizzleMode);
1961 if (r)
1962 return r;
1963 } else
1964 AddrSurfInfoIn.flags.depth = 0;
1965
1966 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
1967 if (r)
1968 return r;
1969 }
1970
1971 surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
1972
1973 /* Query whether the surface is displayable. */
1974 /* This is only useful for surfaces that are allocated without SCANOUT. */
1975 bool displayable = false;
1976 if (!config->is_3d && !config->is_cube) {
1977 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.surf.swizzle_mode,
1978 surf->bpe * 8, &displayable);
1979 if (r)
1980 return r;
1981
1982 /* Display needs unaligned DCC. */
1983 if (surf->num_dcc_levels &&
1984 (!is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.dcc.rb_aligned,
1985 surf->u.gfx9.dcc.pipe_aligned) ||
1986 /* Don't set is_displayable if displayable DCC is missing. */
1987 (info->use_display_dcc_with_retile_blit && !surf->u.gfx9.dcc_retile_num_elements)))
1988 displayable = false;
1989 }
1990 surf->is_displayable = displayable;
1991
1992 /* Validate that we allocated a displayable surface if requested. */
1993 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
1994
1995 /* Validate that DCC is set up correctly. */
1996 if (surf->num_dcc_levels) {
1997 assert(is_dcc_supported_by_L2(info, surf));
1998 if (AddrSurfInfoIn.flags.color)
1999 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.surf.swizzle_mode));
2000 if (AddrSurfInfoIn.flags.display) {
2001 assert(is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.dcc.rb_aligned,
2002 surf->u.gfx9.dcc.pipe_aligned));
2003 }
2004 }
2005
2006 if (info->has_graphics && !compressed && !config->is_3d && config->info.levels == 1 &&
2007 AddrSurfInfoIn.flags.color && !surf->is_linear &&
2008 surf->surf_alignment >= 64 * 1024 && /* 64KB tiling */
2009 !(surf->flags & (RADEON_SURF_DISABLE_DCC | RADEON_SURF_FORCE_SWIZZLE_MODE |
2010 RADEON_SURF_FORCE_MICRO_TILE_MODE))) {
2011 /* Validate that DCC is enabled if DCN can do it. */
2012 if ((info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) &&
2013 AddrSurfInfoIn.flags.display && surf->bpe == 4) {
2014 assert(surf->num_dcc_levels);
2015 }
2016
2017 /* Validate that non-scanout DCC is always enabled. */
2018 if (!AddrSurfInfoIn.flags.display)
2019 assert(surf->num_dcc_levels);
2020 }
2021
2022 if (!surf->htile_size) {
2023 /* Unset this if HTILE is not present. */
2024 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
2025 }
2026
2027 switch (surf->u.gfx9.surf.swizzle_mode) {
2028 /* S = standard. */
2029 case ADDR_SW_256B_S:
2030 case ADDR_SW_4KB_S:
2031 case ADDR_SW_64KB_S:
2032 case ADDR_SW_64KB_S_T:
2033 case ADDR_SW_4KB_S_X:
2034 case ADDR_SW_64KB_S_X:
2035 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2036 break;
2037
2038 /* D = display. */
2039 case ADDR_SW_LINEAR:
2040 case ADDR_SW_256B_D:
2041 case ADDR_SW_4KB_D:
2042 case ADDR_SW_64KB_D:
2043 case ADDR_SW_64KB_D_T:
2044 case ADDR_SW_4KB_D_X:
2045 case ADDR_SW_64KB_D_X:
2046 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2047 break;
2048
2049 /* R = rotated (gfx9), render target (gfx10). */
2050 case ADDR_SW_256B_R:
2051 case ADDR_SW_4KB_R:
2052 case ADDR_SW_64KB_R:
2053 case ADDR_SW_64KB_R_T:
2054 case ADDR_SW_4KB_R_X:
2055 case ADDR_SW_64KB_R_X:
2056 case ADDR_SW_VAR_R_X:
2057 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2058 * used at the same time. We currently do not use rotated
2059 * in gfx9.
2060 */
2061 assert(info->chip_class >= GFX10 || !"rotate micro tile mode is unsupported");
2062 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2063 break;
2064
2065 /* Z = depth. */
2066 case ADDR_SW_4KB_Z:
2067 case ADDR_SW_64KB_Z:
2068 case ADDR_SW_64KB_Z_T:
2069 case ADDR_SW_4KB_Z_X:
2070 case ADDR_SW_64KB_Z_X:
2071 case ADDR_SW_VAR_Z_X:
2072 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2073 break;
2074
2075 default:
2076 assert(0);
2077 }
2078
2079 return 0;
2080 }
2081
2082 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2083 const struct ac_surf_config *config, enum radeon_surf_mode mode,
2084 struct radeon_surf *surf)
2085 {
2086 int r;
2087
2088 r = surf_config_sanity(config, surf->flags);
2089 if (r)
2090 return r;
2091
2092 if (info->chip_class >= GFX9)
2093 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
2094 else
2095 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
2096
2097 if (r)
2098 return r;
2099
2100 /* Determine the memory layout of multiple allocations in one buffer. */
2101 surf->total_size = surf->surf_size;
2102 surf->alignment = surf->surf_alignment;
2103
2104 if (surf->htile_size) {
2105 surf->htile_offset = align64(surf->total_size, surf->htile_alignment);
2106 surf->total_size = surf->htile_offset + surf->htile_size;
2107 surf->alignment = MAX2(surf->alignment, surf->htile_alignment);
2108 }
2109
2110 if (surf->fmask_size) {
2111 assert(config->info.samples >= 2);
2112 surf->fmask_offset = align64(surf->total_size, surf->fmask_alignment);
2113 surf->total_size = surf->fmask_offset + surf->fmask_size;
2114 surf->alignment = MAX2(surf->alignment, surf->fmask_alignment);
2115 }
2116
2117 /* Single-sample CMASK is in a separate buffer. */
2118 if (surf->cmask_size && config->info.samples >= 2) {
2119 surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
2120 surf->total_size = surf->cmask_offset + surf->cmask_size;
2121 surf->alignment = MAX2(surf->alignment, surf->cmask_alignment);
2122 }
2123
2124 if (surf->is_displayable)
2125 surf->flags |= RADEON_SURF_SCANOUT;
2126
2127 if (surf->dcc_size &&
2128 /* dcc_size is computed on GFX9+ only if it's displayable. */
2129 (info->chip_class >= GFX9 || !get_display_flag(config, surf))) {
2130 /* It's better when displayable DCC is immediately after
2131 * the image due to hw-specific reasons.
2132 */
2133 if (info->chip_class >= GFX9 && surf->u.gfx9.dcc_retile_num_elements) {
2134 /* Add space for the displayable DCC buffer. */
2135 surf->display_dcc_offset = align64(surf->total_size, surf->u.gfx9.display_dcc_alignment);
2136 surf->total_size = surf->display_dcc_offset + surf->u.gfx9.display_dcc_size;
2137
2138 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
2139 surf->dcc_retile_map_offset = align64(surf->total_size, info->tcc_cache_line_size);
2140
2141 if (surf->u.gfx9.dcc_retile_use_uint16) {
2142 surf->total_size =
2143 surf->dcc_retile_map_offset + surf->u.gfx9.dcc_retile_num_elements * 2;
2144 } else {
2145 surf->total_size =
2146 surf->dcc_retile_map_offset + surf->u.gfx9.dcc_retile_num_elements * 4;
2147 }
2148 }
2149
2150 surf->dcc_offset = align64(surf->total_size, surf->dcc_alignment);
2151 surf->total_size = surf->dcc_offset + surf->dcc_size;
2152 surf->alignment = MAX2(surf->alignment, surf->dcc_alignment);
2153 }
2154
2155 return 0;
2156 }
2157
2158 /* This is meant to be used for disabling DCC. */
2159 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
2160 {
2161 surf->dcc_offset = 0;
2162 surf->display_dcc_offset = 0;
2163 surf->dcc_retile_map_offset = 0;
2164 }
2165
2166 static unsigned eg_tile_split(unsigned tile_split)
2167 {
2168 switch (tile_split) {
2169 case 0:
2170 tile_split = 64;
2171 break;
2172 case 1:
2173 tile_split = 128;
2174 break;
2175 case 2:
2176 tile_split = 256;
2177 break;
2178 case 3:
2179 tile_split = 512;
2180 break;
2181 default:
2182 case 4:
2183 tile_split = 1024;
2184 break;
2185 case 5:
2186 tile_split = 2048;
2187 break;
2188 case 6:
2189 tile_split = 4096;
2190 break;
2191 }
2192 return tile_split;
2193 }
2194
2195 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
2196 {
2197 switch (eg_tile_split) {
2198 case 64:
2199 return 0;
2200 case 128:
2201 return 1;
2202 case 256:
2203 return 2;
2204 case 512:
2205 return 3;
2206 default:
2207 case 1024:
2208 return 4;
2209 case 2048:
2210 return 5;
2211 case 4096:
2212 return 6;
2213 }
2214 }
2215
2216 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2217 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2218
2219 /* This should be called before ac_compute_surface. */
2220 void ac_surface_set_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2221 uint64_t tiling_flags, enum radeon_surf_mode *mode)
2222 {
2223 bool scanout;
2224
2225 if (info->chip_class >= GFX9) {
2226 surf->u.gfx9.surf.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2227 surf->u.gfx9.dcc.independent_64B_blocks =
2228 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
2229 surf->u.gfx9.dcc.independent_128B_blocks =
2230 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
2231 surf->u.gfx9.dcc.max_compressed_block_size =
2232 AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
2233 surf->u.gfx9.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
2234 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
2235 *mode =
2236 surf->u.gfx9.surf.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
2237 } else {
2238 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2239 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2240 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2241 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
2242 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2243 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2244 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
2245
2246 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
2247 *mode = RADEON_SURF_MODE_2D;
2248 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
2249 *mode = RADEON_SURF_MODE_1D;
2250 else
2251 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2252 }
2253
2254 if (scanout)
2255 surf->flags |= RADEON_SURF_SCANOUT;
2256 else
2257 surf->flags &= ~RADEON_SURF_SCANOUT;
2258 }
2259
2260 void ac_surface_get_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2261 uint64_t *tiling_flags)
2262 {
2263 *tiling_flags = 0;
2264
2265 if (info->chip_class >= GFX9) {
2266 uint64_t dcc_offset = 0;
2267
2268 if (surf->dcc_offset) {
2269 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset : surf->dcc_offset;
2270 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2271 }
2272
2273 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.surf.swizzle_mode);
2274 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2275 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.display_dcc_pitch_max);
2276 *tiling_flags |=
2277 AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.dcc.independent_64B_blocks);
2278 *tiling_flags |=
2279 AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.dcc.independent_128B_blocks);
2280 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE,
2281 surf->u.gfx9.dcc.max_compressed_block_size);
2282 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2283 } else {
2284 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2285 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2286 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2287 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2288 else
2289 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2290
2291 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2292 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2293 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2294 if (surf->u.legacy.tile_split)
2295 *tiling_flags |=
2296 AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2297 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2298 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks) - 1);
2299
2300 if (surf->flags & RADEON_SURF_SCANOUT)
2301 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2302 else
2303 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2304 }
2305 }
2306
2307 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2308 {
2309 return (ATI_VENDOR_ID << 16) | info->pci_id;
2310 }
2311
2312 /* This should be called after ac_compute_surface. */
2313 bool ac_surface_set_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2314 unsigned num_storage_samples, unsigned num_mipmap_levels,
2315 unsigned size_metadata, uint32_t metadata[64])
2316 {
2317 uint32_t *desc = &metadata[2];
2318 uint64_t offset;
2319
2320 if (info->chip_class >= GFX9)
2321 offset = surf->u.gfx9.surf_offset;
2322 else
2323 offset = surf->u.legacy.level[0].offset;
2324
2325 if (offset || /* Non-zero planes ignore metadata. */
2326 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2327 metadata[0] == 0 || /* invalid version number */
2328 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2329 /* Disable DCC because it might not be enabled. */
2330 ac_surface_zero_dcc_fields(surf);
2331
2332 /* Don't report an error if the texture comes from an incompatible driver,
2333 * but this might not work.
2334 */
2335 return true;
2336 }
2337
2338 /* Validate that sample counts and the number of mipmap levels match. */
2339 unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2340 unsigned type = G_008F1C_TYPE(desc[3]);
2341
2342 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2343 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2344
2345 if (desc_last_level != log_samples) {
2346 fprintf(stderr,
2347 "amdgpu: invalid MSAA texture import, "
2348 "metadata has log2(samples) = %u, the caller set %u\n",
2349 desc_last_level, log_samples);
2350 return false;
2351 }
2352 } else {
2353 if (desc_last_level != num_mipmap_levels - 1) {
2354 fprintf(stderr,
2355 "amdgpu: invalid mipmapped texture import, "
2356 "metadata has last_level = %u, the caller set %u\n",
2357 desc_last_level, num_mipmap_levels - 1);
2358 return false;
2359 }
2360 }
2361
2362 if (info->chip_class >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2363 /* Read DCC information. */
2364 switch (info->chip_class) {
2365 case GFX8:
2366 surf->dcc_offset = (uint64_t)desc[7] << 8;
2367 break;
2368
2369 case GFX9:
2370 surf->dcc_offset =
2371 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2372 surf->u.gfx9.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2373 surf->u.gfx9.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2374
2375 /* If DCC is unaligned, this can only be a displayable image. */
2376 if (!surf->u.gfx9.dcc.pipe_aligned && !surf->u.gfx9.dcc.rb_aligned)
2377 assert(surf->is_displayable);
2378 break;
2379
2380 case GFX10:
2381 case GFX10_3:
2382 surf->dcc_offset =
2383 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2384 surf->u.gfx9.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2385 break;
2386
2387 default:
2388 assert(0);
2389 return false;
2390 }
2391 } else {
2392 /* Disable DCC. dcc_offset is always set by texture_from_handle
2393 * and must be cleared here.
2394 */
2395 ac_surface_zero_dcc_fields(surf);
2396 }
2397
2398 return true;
2399 }
2400
2401 void ac_surface_get_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2402 unsigned num_mipmap_levels, uint32_t desc[8],
2403 unsigned *size_metadata, uint32_t metadata[64])
2404 {
2405 /* Clear the base address and set the relative DCC offset. */
2406 desc[0] = 0;
2407 desc[1] &= C_008F14_BASE_ADDRESS_HI;
2408
2409 switch (info->chip_class) {
2410 case GFX6:
2411 case GFX7:
2412 break;
2413 case GFX8:
2414 desc[7] = surf->dcc_offset >> 8;
2415 break;
2416 case GFX9:
2417 desc[7] = surf->dcc_offset >> 8;
2418 desc[5] &= C_008F24_META_DATA_ADDRESS;
2419 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->dcc_offset >> 40);
2420 break;
2421 case GFX10:
2422 case GFX10_3:
2423 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2424 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->dcc_offset >> 8);
2425 desc[7] = surf->dcc_offset >> 16;
2426 break;
2427 default:
2428 assert(0);
2429 }
2430
2431 /* Metadata image format format version 1:
2432 * [0] = 1 (metadata format identifier)
2433 * [1] = (VENDOR_ID << 16) | PCI_ID
2434 * [2:9] = image descriptor for the whole resource
2435 * [2] is always 0, because the base address is cleared
2436 * [9] is the DCC offset bits [39:8] from the beginning of
2437 * the buffer
2438 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2439 */
2440
2441 metadata[0] = 1; /* metadata image format version 1 */
2442
2443 /* Tiling modes are ambiguous without a PCI ID. */
2444 metadata[1] = ac_get_umd_metadata_word1(info);
2445
2446 /* Dwords [2:9] contain the image descriptor. */
2447 memcpy(&metadata[2], desc, 8 * 4);
2448 *size_metadata = 10 * 4;
2449
2450 /* Dwords [10:..] contain the mipmap level offsets. */
2451 if (info->chip_class <= GFX8) {
2452 for (unsigned i = 0; i < num_mipmap_levels; i++)
2453 metadata[10 + i] = surf->u.legacy.level[i].offset >> 8;
2454
2455 *size_metadata += num_mipmap_levels * 4;
2456 }
2457 }
2458
2459 void ac_surface_override_offset_stride(const struct radeon_info *info, struct radeon_surf *surf,
2460 unsigned num_mipmap_levels, uint64_t offset, unsigned pitch)
2461 {
2462 if (info->chip_class >= GFX9) {
2463 if (pitch) {
2464 surf->u.gfx9.surf_pitch = pitch;
2465 if (num_mipmap_levels == 1)
2466 surf->u.gfx9.surf.epitch = pitch - 1;
2467 surf->u.gfx9.surf_slice_size = (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
2468 }
2469 surf->u.gfx9.surf_offset = offset;
2470 if (surf->u.gfx9.stencil_offset)
2471 surf->u.gfx9.stencil_offset += offset;
2472 } else {
2473 if (pitch) {
2474 surf->u.legacy.level[0].nblk_x = pitch;
2475 surf->u.legacy.level[0].slice_size_dw =
2476 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
2477 }
2478
2479 if (offset) {
2480 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
2481 surf->u.legacy.level[i].offset += offset;
2482 }
2483 }
2484
2485 if (surf->htile_offset)
2486 surf->htile_offset += offset;
2487 if (surf->fmask_offset)
2488 surf->fmask_offset += offset;
2489 if (surf->cmask_offset)
2490 surf->cmask_offset += offset;
2491 if (surf->dcc_offset)
2492 surf->dcc_offset += offset;
2493 if (surf->display_dcc_offset)
2494 surf->display_dcc_offset += offset;
2495 if (surf->dcc_retile_map_offset)
2496 surf->dcc_retile_map_offset += offset;
2497 }