77117ea961c4a586042a1c5e59aa1e2a908fc130
[mesa.git] / src / gallium / drivers / radeonsi / si_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "radeonsi/si_pipe.h"
27 #include "radeonsi/si_query.h"
28 #include "util/u_format.h"
29 #include "util/u_log.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include "util/u_resource.h"
33 #include "util/u_surface.h"
34 #include "util/os_time.h"
35 #include <errno.h>
36 #include <inttypes.h>
37 #include "state_tracker/drm_driver.h"
38 #include "amd/common/sid.h"
39
40 static enum radeon_surf_mode
41 si_choose_tiling(struct si_screen *sscreen,
42 const struct pipe_resource *templ);
43
44
45 bool si_prepare_for_dma_blit(struct si_context *sctx,
46 struct r600_texture *rdst,
47 unsigned dst_level, unsigned dstx,
48 unsigned dsty, unsigned dstz,
49 struct r600_texture *rsrc,
50 unsigned src_level,
51 const struct pipe_box *src_box)
52 {
53 if (!sctx->b.dma_cs)
54 return false;
55
56 if (rdst->surface.bpe != rsrc->surface.bpe)
57 return false;
58
59 /* MSAA: Blits don't exist in the real world. */
60 if (rsrc->resource.b.b.nr_samples > 1 ||
61 rdst->resource.b.b.nr_samples > 1)
62 return false;
63
64 /* Depth-stencil surfaces:
65 * When dst is linear, the DB->CB copy preserves HTILE.
66 * When dst is tiled, the 3D path must be used to update HTILE.
67 */
68 if (rsrc->is_depth || rdst->is_depth)
69 return false;
70
71 /* DCC as:
72 * src: Use the 3D path. DCC decompression is expensive.
73 * dst: Use the 3D path to compress the pixels with DCC.
74 */
75 if (vi_dcc_enabled(rsrc, src_level) ||
76 vi_dcc_enabled(rdst, dst_level))
77 return false;
78
79 /* CMASK as:
80 * src: Both texture and SDMA paths need decompression. Use SDMA.
81 * dst: If overwriting the whole texture, discard CMASK and use
82 * SDMA. Otherwise, use the 3D path.
83 */
84 if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
85 /* The CMASK clear is only enabled for the first level. */
86 assert(dst_level == 0);
87 if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
88 dstx, dsty, dstz, src_box->width,
89 src_box->height, src_box->depth))
90 return false;
91
92 si_texture_discard_cmask(sctx->screen, rdst);
93 }
94
95 /* All requirements are met. Prepare textures for SDMA. */
96 if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
97 sctx->b.b.flush_resource(&sctx->b.b, &rsrc->resource.b.b);
98
99 assert(!(rsrc->dirty_level_mask & (1 << src_level)));
100 assert(!(rdst->dirty_level_mask & (1 << dst_level)));
101
102 return true;
103 }
104
105 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
106 static void si_copy_region_with_blit(struct pipe_context *pipe,
107 struct pipe_resource *dst,
108 unsigned dst_level,
109 unsigned dstx, unsigned dsty, unsigned dstz,
110 struct pipe_resource *src,
111 unsigned src_level,
112 const struct pipe_box *src_box)
113 {
114 struct pipe_blit_info blit;
115
116 memset(&blit, 0, sizeof(blit));
117 blit.src.resource = src;
118 blit.src.format = src->format;
119 blit.src.level = src_level;
120 blit.src.box = *src_box;
121 blit.dst.resource = dst;
122 blit.dst.format = dst->format;
123 blit.dst.level = dst_level;
124 blit.dst.box.x = dstx;
125 blit.dst.box.y = dsty;
126 blit.dst.box.z = dstz;
127 blit.dst.box.width = src_box->width;
128 blit.dst.box.height = src_box->height;
129 blit.dst.box.depth = src_box->depth;
130 blit.mask = util_format_get_mask(src->format) &
131 util_format_get_mask(dst->format);
132 blit.filter = PIPE_TEX_FILTER_NEAREST;
133
134 if (blit.mask) {
135 pipe->blit(pipe, &blit);
136 }
137 }
138
139 /* Copy from a full GPU texture to a transfer's staging one. */
140 static void si_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
141 {
142 struct si_context *sctx = (struct si_context*)ctx;
143 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
144 struct pipe_resource *dst = &rtransfer->staging->b.b;
145 struct pipe_resource *src = transfer->resource;
146
147 if (src->nr_samples > 1) {
148 si_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
149 src, transfer->level, &transfer->box);
150 return;
151 }
152
153 sctx->b.dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
154 &transfer->box);
155 }
156
157 /* Copy from a transfer's staging texture to a full GPU one. */
158 static void si_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
159 {
160 struct si_context *sctx = (struct si_context*)ctx;
161 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
162 struct pipe_resource *dst = transfer->resource;
163 struct pipe_resource *src = &rtransfer->staging->b.b;
164 struct pipe_box sbox;
165
166 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
167
168 if (dst->nr_samples > 1) {
169 si_copy_region_with_blit(ctx, dst, transfer->level,
170 transfer->box.x, transfer->box.y, transfer->box.z,
171 src, 0, &sbox);
172 return;
173 }
174
175 sctx->b.dma_copy(ctx, dst, transfer->level,
176 transfer->box.x, transfer->box.y, transfer->box.z,
177 src, 0, &sbox);
178 }
179
180 static unsigned si_texture_get_offset(struct si_screen *sscreen,
181 struct r600_texture *rtex, unsigned level,
182 const struct pipe_box *box,
183 unsigned *stride,
184 unsigned *layer_stride)
185 {
186 if (sscreen->info.chip_class >= GFX9) {
187 *stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
188 *layer_stride = rtex->surface.u.gfx9.surf_slice_size;
189
190 if (!box)
191 return 0;
192
193 /* Each texture is an array of slices. Each slice is an array
194 * of mipmap levels. */
195 return box->z * rtex->surface.u.gfx9.surf_slice_size +
196 rtex->surface.u.gfx9.offset[level] +
197 (box->y / rtex->surface.blk_h *
198 rtex->surface.u.gfx9.surf_pitch +
199 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
200 } else {
201 *stride = rtex->surface.u.legacy.level[level].nblk_x *
202 rtex->surface.bpe;
203 assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
204 *layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4;
205
206 if (!box)
207 return rtex->surface.u.legacy.level[level].offset;
208
209 /* Each texture is an array of mipmap levels. Each level is
210 * an array of slices. */
211 return rtex->surface.u.legacy.level[level].offset +
212 box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 +
213 (box->y / rtex->surface.blk_h *
214 rtex->surface.u.legacy.level[level].nblk_x +
215 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
216 }
217 }
218
219 static int si_init_surface(struct si_screen *sscreen,
220 struct radeon_surf *surface,
221 const struct pipe_resource *ptex,
222 enum radeon_surf_mode array_mode,
223 unsigned pitch_in_bytes_override,
224 unsigned offset,
225 bool is_imported,
226 bool is_scanout,
227 bool is_flushed_depth,
228 bool tc_compatible_htile)
229 {
230 const struct util_format_description *desc =
231 util_format_description(ptex->format);
232 bool is_depth, is_stencil;
233 int r;
234 unsigned i, bpe, flags = 0;
235
236 is_depth = util_format_has_depth(desc);
237 is_stencil = util_format_has_stencil(desc);
238
239 if (!is_flushed_depth &&
240 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
241 bpe = 4; /* stencil is allocated separately on evergreen */
242 } else {
243 bpe = util_format_get_blocksize(ptex->format);
244 assert(util_is_power_of_two_or_zero(bpe));
245 }
246
247 if (!is_flushed_depth && is_depth) {
248 flags |= RADEON_SURF_ZBUFFER;
249
250 if (tc_compatible_htile &&
251 (sscreen->info.chip_class >= GFX9 ||
252 array_mode == RADEON_SURF_MODE_2D)) {
253 /* TC-compatible HTILE only supports Z32_FLOAT.
254 * GFX9 also supports Z16_UNORM.
255 * On VI, promote Z16 to Z32. DB->CB copies will convert
256 * the format for transfers.
257 */
258 if (sscreen->info.chip_class == VI)
259 bpe = 4;
260
261 flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
262 }
263
264 if (is_stencil)
265 flags |= RADEON_SURF_SBUFFER;
266 }
267
268 if (sscreen->info.chip_class >= VI &&
269 (ptex->flags & SI_RESOURCE_FLAG_DISABLE_DCC ||
270 ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT ||
271 /* DCC MSAA array textures are disallowed due to incomplete clear impl. */
272 (ptex->nr_samples >= 2 &&
273 (!sscreen->dcc_msaa_allowed || ptex->array_size > 1))))
274 flags |= RADEON_SURF_DISABLE_DCC;
275
276 if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {
277 /* This should catch bugs in gallium users setting incorrect flags. */
278 assert(ptex->nr_samples <= 1 &&
279 ptex->array_size == 1 &&
280 ptex->depth0 == 1 &&
281 ptex->last_level == 0 &&
282 !(flags & RADEON_SURF_Z_OR_SBUFFER));
283
284 flags |= RADEON_SURF_SCANOUT;
285 }
286
287 if (ptex->bind & PIPE_BIND_SHARED)
288 flags |= RADEON_SURF_SHAREABLE;
289 if (is_imported)
290 flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
291 if (!(ptex->flags & SI_RESOURCE_FLAG_FORCE_TILING))
292 flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
293
294 r = sscreen->ws->surface_init(sscreen->ws, ptex, flags, bpe,
295 array_mode, surface);
296 if (r) {
297 return r;
298 }
299
300 unsigned pitch = pitch_in_bytes_override / bpe;
301
302 if (sscreen->info.chip_class >= GFX9) {
303 if (pitch) {
304 surface->u.gfx9.surf_pitch = pitch;
305 surface->u.gfx9.surf_slice_size =
306 (uint64_t)pitch * surface->u.gfx9.surf_height * bpe;
307 }
308 surface->u.gfx9.surf_offset = offset;
309 } else {
310 if (pitch) {
311 surface->u.legacy.level[0].nblk_x = pitch;
312 surface->u.legacy.level[0].slice_size_dw =
313 ((uint64_t)pitch * surface->u.legacy.level[0].nblk_y * bpe) / 4;
314 }
315 if (offset) {
316 for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
317 surface->u.legacy.level[i].offset += offset;
318 }
319 }
320 return 0;
321 }
322
323 static void si_texture_init_metadata(struct si_screen *sscreen,
324 struct r600_texture *rtex,
325 struct radeon_bo_metadata *metadata)
326 {
327 struct radeon_surf *surface = &rtex->surface;
328
329 memset(metadata, 0, sizeof(*metadata));
330
331 if (sscreen->info.chip_class >= GFX9) {
332 metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
333 } else {
334 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
335 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
336 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
337 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
338 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
339 metadata->u.legacy.bankw = surface->u.legacy.bankw;
340 metadata->u.legacy.bankh = surface->u.legacy.bankh;
341 metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
342 metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
343 metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
344 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
345 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
346 }
347 }
348
349 static void si_surface_import_metadata(struct si_screen *sscreen,
350 struct radeon_surf *surf,
351 struct radeon_bo_metadata *metadata,
352 enum radeon_surf_mode *array_mode,
353 bool *is_scanout)
354 {
355 if (sscreen->info.chip_class >= GFX9) {
356 if (metadata->u.gfx9.swizzle_mode > 0)
357 *array_mode = RADEON_SURF_MODE_2D;
358 else
359 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
360
361 *is_scanout = metadata->u.gfx9.swizzle_mode == 0 ||
362 metadata->u.gfx9.swizzle_mode % 4 == 2;
363
364 surf->u.gfx9.surf.swizzle_mode = metadata->u.gfx9.swizzle_mode;
365 } else {
366 surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
367 surf->u.legacy.bankw = metadata->u.legacy.bankw;
368 surf->u.legacy.bankh = metadata->u.legacy.bankh;
369 surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
370 surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
371 surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
372
373 if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
374 *array_mode = RADEON_SURF_MODE_2D;
375 else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
376 *array_mode = RADEON_SURF_MODE_1D;
377 else
378 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
379
380 *is_scanout = metadata->u.legacy.scanout;
381 }
382 }
383
384 void si_eliminate_fast_color_clear(struct si_context *sctx,
385 struct r600_texture *rtex)
386 {
387 struct si_screen *sscreen = sctx->screen;
388 struct pipe_context *ctx = &sctx->b.b;
389
390 if (ctx == sscreen->aux_context)
391 mtx_lock(&sscreen->aux_context_lock);
392
393 unsigned n = sctx->b.num_decompress_calls;
394 ctx->flush_resource(ctx, &rtex->resource.b.b);
395
396 /* Flush only if any fast clear elimination took place. */
397 if (n != sctx->b.num_decompress_calls)
398 ctx->flush(ctx, NULL, 0);
399
400 if (ctx == sscreen->aux_context)
401 mtx_unlock(&sscreen->aux_context_lock);
402 }
403
404 void si_texture_discard_cmask(struct si_screen *sscreen,
405 struct r600_texture *rtex)
406 {
407 if (!rtex->cmask.size)
408 return;
409
410 assert(rtex->resource.b.b.nr_samples <= 1);
411
412 /* Disable CMASK. */
413 memset(&rtex->cmask, 0, sizeof(rtex->cmask));
414 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
415 rtex->dirty_level_mask = 0;
416
417 rtex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
418
419 if (rtex->cmask_buffer != &rtex->resource)
420 r600_resource_reference(&rtex->cmask_buffer, NULL);
421
422 /* Notify all contexts about the change. */
423 p_atomic_inc(&sscreen->dirty_tex_counter);
424 p_atomic_inc(&sscreen->compressed_colortex_counter);
425 }
426
427 static bool si_can_disable_dcc(struct r600_texture *rtex)
428 {
429 /* We can't disable DCC if it can be written by another process. */
430 return rtex->dcc_offset &&
431 (!rtex->resource.b.is_shared ||
432 !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
433 }
434
435 static bool si_texture_discard_dcc(struct si_screen *sscreen,
436 struct r600_texture *rtex)
437 {
438 if (!si_can_disable_dcc(rtex))
439 return false;
440
441 assert(rtex->dcc_separate_buffer == NULL);
442
443 /* Disable DCC. */
444 rtex->dcc_offset = 0;
445
446 /* Notify all contexts about the change. */
447 p_atomic_inc(&sscreen->dirty_tex_counter);
448 return true;
449 }
450
451 /**
452 * Disable DCC for the texture. (first decompress, then discard metadata).
453 *
454 * There is unresolved multi-context synchronization issue between
455 * screen::aux_context and the current context. If applications do this with
456 * multiple contexts, it's already undefined behavior for them and we don't
457 * have to worry about that. The scenario is:
458 *
459 * If context 1 disables DCC and context 2 has queued commands that write
460 * to the texture via CB with DCC enabled, and the order of operations is
461 * as follows:
462 * context 2 queues draw calls rendering to the texture, but doesn't flush
463 * context 1 disables DCC and flushes
464 * context 1 & 2 reset descriptors and FB state
465 * context 2 flushes (new compressed tiles written by the draw calls)
466 * context 1 & 2 read garbage, because DCC is disabled, yet there are
467 * compressed tiled
468 *
469 * \param sctx the current context if you have one, or rscreen->aux_context
470 * if you don't.
471 */
472 bool si_texture_disable_dcc(struct si_context *sctx,
473 struct r600_texture *rtex)
474 {
475 struct si_screen *sscreen = sctx->screen;
476
477 if (!si_can_disable_dcc(rtex))
478 return false;
479
480 if (&sctx->b.b == sscreen->aux_context)
481 mtx_lock(&sscreen->aux_context_lock);
482
483 /* Decompress DCC. */
484 si_decompress_dcc(sctx, rtex);
485 sctx->b.b.flush(&sctx->b.b, NULL, 0);
486
487 if (&sctx->b.b == sscreen->aux_context)
488 mtx_unlock(&sscreen->aux_context_lock);
489
490 return si_texture_discard_dcc(sscreen, rtex);
491 }
492
493 static void si_reallocate_texture_inplace(struct si_context *sctx,
494 struct r600_texture *rtex,
495 unsigned new_bind_flag,
496 bool invalidate_storage)
497 {
498 struct pipe_screen *screen = sctx->b.b.screen;
499 struct r600_texture *new_tex;
500 struct pipe_resource templ = rtex->resource.b.b;
501 unsigned i;
502
503 templ.bind |= new_bind_flag;
504
505 if (rtex->resource.b.is_shared)
506 return;
507
508 if (new_bind_flag == PIPE_BIND_LINEAR) {
509 if (rtex->surface.is_linear)
510 return;
511
512 /* This fails with MSAA, depth, and compressed textures. */
513 if (si_choose_tiling(sctx->screen, &templ) !=
514 RADEON_SURF_MODE_LINEAR_ALIGNED)
515 return;
516 }
517
518 new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
519 if (!new_tex)
520 return;
521
522 /* Copy the pixels to the new texture. */
523 if (!invalidate_storage) {
524 for (i = 0; i <= templ.last_level; i++) {
525 struct pipe_box box;
526
527 u_box_3d(0, 0, 0,
528 u_minify(templ.width0, i), u_minify(templ.height0, i),
529 util_num_layers(&templ, i), &box);
530
531 sctx->b.dma_copy(&sctx->b.b, &new_tex->resource.b.b, i, 0, 0, 0,
532 &rtex->resource.b.b, i, &box);
533 }
534 }
535
536 if (new_bind_flag == PIPE_BIND_LINEAR) {
537 si_texture_discard_cmask(sctx->screen, rtex);
538 si_texture_discard_dcc(sctx->screen, rtex);
539 }
540
541 /* Replace the structure fields of rtex. */
542 rtex->resource.b.b.bind = templ.bind;
543 pb_reference(&rtex->resource.buf, new_tex->resource.buf);
544 rtex->resource.gpu_address = new_tex->resource.gpu_address;
545 rtex->resource.vram_usage = new_tex->resource.vram_usage;
546 rtex->resource.gart_usage = new_tex->resource.gart_usage;
547 rtex->resource.bo_size = new_tex->resource.bo_size;
548 rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
549 rtex->resource.domains = new_tex->resource.domains;
550 rtex->resource.flags = new_tex->resource.flags;
551 rtex->size = new_tex->size;
552 rtex->db_render_format = new_tex->db_render_format;
553 rtex->db_compatible = new_tex->db_compatible;
554 rtex->can_sample_z = new_tex->can_sample_z;
555 rtex->can_sample_s = new_tex->can_sample_s;
556 rtex->surface = new_tex->surface;
557 rtex->fmask = new_tex->fmask;
558 rtex->cmask = new_tex->cmask;
559 rtex->cb_color_info = new_tex->cb_color_info;
560 rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
561 rtex->htile_offset = new_tex->htile_offset;
562 rtex->tc_compatible_htile = new_tex->tc_compatible_htile;
563 rtex->depth_cleared = new_tex->depth_cleared;
564 rtex->stencil_cleared = new_tex->stencil_cleared;
565 rtex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
566 rtex->framebuffers_bound = new_tex->framebuffers_bound;
567
568 if (new_bind_flag == PIPE_BIND_LINEAR) {
569 assert(!rtex->htile_offset);
570 assert(!rtex->cmask.size);
571 assert(!rtex->fmask.size);
572 assert(!rtex->dcc_offset);
573 assert(!rtex->is_depth);
574 }
575
576 r600_texture_reference(&new_tex, NULL);
577
578 p_atomic_inc(&sctx->screen->dirty_tex_counter);
579 }
580
581 static uint32_t si_get_bo_metadata_word1(struct si_screen *sscreen)
582 {
583 return (ATI_VENDOR_ID << 16) | sscreen->info.pci_id;
584 }
585
586 static void si_query_opaque_metadata(struct si_screen *sscreen,
587 struct r600_texture *rtex,
588 struct radeon_bo_metadata *md)
589 {
590 struct pipe_resource *res = &rtex->resource.b.b;
591 static const unsigned char swizzle[] = {
592 PIPE_SWIZZLE_X,
593 PIPE_SWIZZLE_Y,
594 PIPE_SWIZZLE_Z,
595 PIPE_SWIZZLE_W
596 };
597 uint32_t desc[8], i;
598 bool is_array = util_texture_is_array(res->target);
599
600 /* DRM 2.x.x doesn't support this. */
601 if (sscreen->info.drm_major != 3)
602 return;
603
604 assert(rtex->dcc_separate_buffer == NULL);
605 assert(rtex->fmask.size == 0);
606
607 /* Metadata image format format version 1:
608 * [0] = 1 (metadata format identifier)
609 * [1] = (VENDOR_ID << 16) | PCI_ID
610 * [2:9] = image descriptor for the whole resource
611 * [2] is always 0, because the base address is cleared
612 * [9] is the DCC offset bits [39:8] from the beginning of
613 * the buffer
614 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
615 */
616
617 md->metadata[0] = 1; /* metadata image format version 1 */
618
619 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
620 md->metadata[1] = si_get_bo_metadata_word1(sscreen);
621
622 si_make_texture_descriptor(sscreen, rtex, true,
623 res->target, res->format,
624 swizzle, 0, res->last_level, 0,
625 is_array ? res->array_size - 1 : 0,
626 res->width0, res->height0, res->depth0,
627 desc, NULL);
628
629 si_set_mutable_tex_desc_fields(sscreen, rtex, &rtex->surface.u.legacy.level[0],
630 0, 0, rtex->surface.blk_w, false, desc);
631
632 /* Clear the base address and set the relative DCC offset. */
633 desc[0] = 0;
634 desc[1] &= C_008F14_BASE_ADDRESS_HI;
635 desc[7] = rtex->dcc_offset >> 8;
636
637 /* Dwords [2:9] contain the image descriptor. */
638 memcpy(&md->metadata[2], desc, sizeof(desc));
639 md->size_metadata = 10 * 4;
640
641 /* Dwords [10:..] contain the mipmap level offsets. */
642 if (sscreen->info.chip_class <= VI) {
643 for (i = 0; i <= res->last_level; i++)
644 md->metadata[10+i] = rtex->surface.u.legacy.level[i].offset >> 8;
645
646 md->size_metadata += (1 + res->last_level) * 4;
647 }
648 }
649
650 static void si_apply_opaque_metadata(struct si_screen *sscreen,
651 struct r600_texture *rtex,
652 struct radeon_bo_metadata *md)
653 {
654 uint32_t *desc = &md->metadata[2];
655
656 if (sscreen->info.chip_class < VI)
657 return;
658
659 /* Return if DCC is enabled. The texture should be set up with it
660 * already.
661 */
662 if (md->size_metadata >= 10 * 4 && /* at least 2(header) + 8(desc) dwords */
663 md->metadata[0] != 0 &&
664 md->metadata[1] == si_get_bo_metadata_word1(sscreen) &&
665 G_008F28_COMPRESSION_EN(desc[6])) {
666 rtex->dcc_offset = (uint64_t)desc[7] << 8;
667 return;
668 }
669
670 /* Disable DCC. These are always set by texture_from_handle and must
671 * be cleared here.
672 */
673 rtex->dcc_offset = 0;
674 }
675
676 static boolean si_texture_get_handle(struct pipe_screen* screen,
677 struct pipe_context *ctx,
678 struct pipe_resource *resource,
679 struct winsys_handle *whandle,
680 unsigned usage)
681 {
682 struct si_screen *sscreen = (struct si_screen*)screen;
683 struct si_context *sctx;
684 struct r600_resource *res = (struct r600_resource*)resource;
685 struct r600_texture *rtex = (struct r600_texture*)resource;
686 struct radeon_bo_metadata metadata;
687 bool update_metadata = false;
688 unsigned stride, offset, slice_size;
689 bool flush = false;
690
691 ctx = threaded_context_unwrap_sync(ctx);
692 sctx = (struct si_context*)(ctx ? ctx : sscreen->aux_context);
693
694 if (resource->target != PIPE_BUFFER) {
695 /* This is not supported now, but it might be required for OpenCL
696 * interop in the future.
697 */
698 if (resource->nr_samples > 1 || rtex->is_depth)
699 return false;
700
701 /* Move a suballocated texture into a non-suballocated allocation. */
702 if (sscreen->ws->buffer_is_suballocated(res->buf) ||
703 rtex->surface.tile_swizzle ||
704 (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
705 sscreen->info.has_local_buffers &&
706 whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
707 assert(!res->b.is_shared);
708 si_reallocate_texture_inplace(sctx, rtex,
709 PIPE_BIND_SHARED, false);
710 flush = true;
711 assert(res->b.b.bind & PIPE_BIND_SHARED);
712 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
713 assert(!(res->flags & RADEON_FLAG_NO_INTERPROCESS_SHARING));
714 assert(rtex->surface.tile_swizzle == 0);
715 }
716
717 /* Since shader image stores don't support DCC on VI,
718 * disable it for external clients that want write
719 * access.
720 */
721 if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
722 if (si_texture_disable_dcc(sctx, rtex)) {
723 update_metadata = true;
724 /* si_texture_disable_dcc flushes the context */
725 flush = false;
726 }
727 }
728
729 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
730 (rtex->cmask.size || rtex->dcc_offset)) {
731 /* Eliminate fast clear (both CMASK and DCC) */
732 si_eliminate_fast_color_clear(sctx, rtex);
733 /* eliminate_fast_color_clear flushes the context */
734 flush = false;
735
736 /* Disable CMASK if flush_resource isn't going
737 * to be called.
738 */
739 if (rtex->cmask.size)
740 si_texture_discard_cmask(sscreen, rtex);
741 }
742
743 /* Set metadata. */
744 if (!res->b.is_shared || update_metadata) {
745 si_texture_init_metadata(sscreen, rtex, &metadata);
746 si_query_opaque_metadata(sscreen, rtex, &metadata);
747
748 sscreen->ws->buffer_set_metadata(res->buf, &metadata);
749 }
750
751 if (sscreen->info.chip_class >= GFX9) {
752 offset = rtex->surface.u.gfx9.surf_offset;
753 stride = rtex->surface.u.gfx9.surf_pitch *
754 rtex->surface.bpe;
755 slice_size = rtex->surface.u.gfx9.surf_slice_size;
756 } else {
757 offset = rtex->surface.u.legacy.level[0].offset;
758 stride = rtex->surface.u.legacy.level[0].nblk_x *
759 rtex->surface.bpe;
760 slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
761 }
762 } else {
763 /* Buffer exports are for the OpenCL interop. */
764 /* Move a suballocated buffer into a non-suballocated allocation. */
765 if (sscreen->ws->buffer_is_suballocated(res->buf) ||
766 /* A DMABUF export always fails if the BO is local. */
767 (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
768 sscreen->info.has_local_buffers)) {
769 assert(!res->b.is_shared);
770
771 /* Allocate a new buffer with PIPE_BIND_SHARED. */
772 struct pipe_resource templ = res->b.b;
773 templ.bind |= PIPE_BIND_SHARED;
774
775 struct pipe_resource *newb =
776 screen->resource_create(screen, &templ);
777 if (!newb)
778 return false;
779
780 /* Copy the old buffer contents to the new one. */
781 struct pipe_box box;
782 u_box_1d(0, newb->width0, &box);
783 sctx->b.b.resource_copy_region(&sctx->b.b, newb, 0, 0, 0, 0,
784 &res->b.b, 0, &box);
785 flush = true;
786 /* Move the new buffer storage to the old pipe_resource. */
787 si_replace_buffer_storage(&sctx->b.b, &res->b.b, newb);
788 pipe_resource_reference(&newb, NULL);
789
790 assert(res->b.b.bind & PIPE_BIND_SHARED);
791 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
792 }
793
794 /* Buffers */
795 offset = 0;
796 stride = 0;
797 slice_size = 0;
798 }
799
800 if (flush)
801 sctx->b.b.flush(&sctx->b.b, NULL, 0);
802
803 if (res->b.is_shared) {
804 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
805 * doesn't set it.
806 */
807 res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
808 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
809 res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
810 } else {
811 res->b.is_shared = true;
812 res->external_usage = usage;
813 }
814
815 return sscreen->ws->buffer_get_handle(res->buf, stride, offset,
816 slice_size, whandle);
817 }
818
819 static void si_texture_destroy(struct pipe_screen *screen,
820 struct pipe_resource *ptex)
821 {
822 struct r600_texture *rtex = (struct r600_texture*)ptex;
823 struct r600_resource *resource = &rtex->resource;
824
825 r600_texture_reference(&rtex->flushed_depth_texture, NULL);
826
827 if (rtex->cmask_buffer != &rtex->resource) {
828 r600_resource_reference(&rtex->cmask_buffer, NULL);
829 }
830 pb_reference(&resource->buf, NULL);
831 r600_resource_reference(&rtex->dcc_separate_buffer, NULL);
832 r600_resource_reference(&rtex->last_dcc_separate_buffer, NULL);
833 FREE(rtex);
834 }
835
836 static const struct u_resource_vtbl si_texture_vtbl;
837
838 /* The number of samples can be specified independently of the texture. */
839 void si_texture_get_fmask_info(struct si_screen *sscreen,
840 struct r600_texture *rtex,
841 unsigned nr_samples,
842 struct r600_fmask_info *out)
843 {
844 /* FMASK is allocated like an ordinary texture. */
845 struct pipe_resource templ = rtex->resource.b.b;
846 struct radeon_surf fmask = {};
847 unsigned flags, bpe;
848
849 memset(out, 0, sizeof(*out));
850
851 if (sscreen->info.chip_class >= GFX9) {
852 out->alignment = rtex->surface.u.gfx9.fmask_alignment;
853 out->size = rtex->surface.u.gfx9.fmask_size;
854 out->tile_swizzle = rtex->surface.u.gfx9.fmask_tile_swizzle;
855 return;
856 }
857
858 templ.nr_samples = 1;
859 flags = rtex->surface.flags | RADEON_SURF_FMASK;
860
861 switch (nr_samples) {
862 case 2:
863 case 4:
864 bpe = 1;
865 break;
866 case 8:
867 bpe = 4;
868 break;
869 default:
870 PRINT_ERR("Invalid sample count for FMASK allocation.\n");
871 return;
872 }
873
874 if (sscreen->ws->surface_init(sscreen->ws, &templ, flags, bpe,
875 RADEON_SURF_MODE_2D, &fmask)) {
876 PRINT_ERR("Got error in surface_init while allocating FMASK.\n");
877 return;
878 }
879
880 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
881
882 out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
883 if (out->slice_tile_max)
884 out->slice_tile_max -= 1;
885
886 out->tile_mode_index = fmask.u.legacy.tiling_index[0];
887 out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
888 out->bank_height = fmask.u.legacy.bankh;
889 out->tile_swizzle = fmask.tile_swizzle;
890 out->alignment = MAX2(256, fmask.surf_alignment);
891 out->size = fmask.surf_size;
892 }
893
894 static void si_texture_allocate_fmask(struct si_screen *sscreen,
895 struct r600_texture *rtex)
896 {
897 si_texture_get_fmask_info(sscreen, rtex,
898 rtex->resource.b.b.nr_samples, &rtex->fmask);
899
900 rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
901 rtex->size = rtex->fmask.offset + rtex->fmask.size;
902 }
903
904 void si_texture_get_cmask_info(struct si_screen *sscreen,
905 struct r600_texture *rtex,
906 struct r600_cmask_info *out)
907 {
908 unsigned pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
909 unsigned num_pipes = sscreen->info.num_tile_pipes;
910 unsigned cl_width, cl_height;
911
912 if (sscreen->info.chip_class >= GFX9) {
913 out->alignment = rtex->surface.u.gfx9.cmask_alignment;
914 out->size = rtex->surface.u.gfx9.cmask_size;
915 return;
916 }
917
918 switch (num_pipes) {
919 case 2:
920 cl_width = 32;
921 cl_height = 16;
922 break;
923 case 4:
924 cl_width = 32;
925 cl_height = 32;
926 break;
927 case 8:
928 cl_width = 64;
929 cl_height = 32;
930 break;
931 case 16: /* Hawaii */
932 cl_width = 64;
933 cl_height = 64;
934 break;
935 default:
936 assert(0);
937 return;
938 }
939
940 unsigned base_align = num_pipes * pipe_interleave_bytes;
941
942 unsigned width = align(rtex->resource.b.b.width0, cl_width*8);
943 unsigned height = align(rtex->resource.b.b.height0, cl_height*8);
944 unsigned slice_elements = (width * height) / (8*8);
945
946 /* Each element of CMASK is a nibble. */
947 unsigned slice_bytes = slice_elements / 2;
948
949 out->slice_tile_max = (width * height) / (128*128);
950 if (out->slice_tile_max)
951 out->slice_tile_max -= 1;
952
953 out->alignment = MAX2(256, base_align);
954 out->size = util_num_layers(&rtex->resource.b.b, 0) *
955 align(slice_bytes, base_align);
956 }
957
958 static void si_texture_allocate_cmask(struct si_screen *sscreen,
959 struct r600_texture *rtex)
960 {
961 si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
962
963 rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
964 rtex->size = rtex->cmask.offset + rtex->cmask.size;
965
966 rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
967 }
968
969 static void si_texture_get_htile_size(struct si_screen *sscreen,
970 struct r600_texture *rtex)
971 {
972 unsigned cl_width, cl_height, width, height;
973 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
974 unsigned num_pipes = sscreen->info.num_tile_pipes;
975
976 assert(sscreen->info.chip_class <= VI);
977
978 rtex->surface.htile_size = 0;
979
980 /* HTILE is broken with 1D tiling on old kernels and CIK. */
981 if (sscreen->info.chip_class >= CIK &&
982 rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
983 sscreen->info.drm_major == 2 && sscreen->info.drm_minor < 38)
984 return;
985
986 /* Overalign HTILE on P2 configs to work around GPU hangs in
987 * piglit/depthstencil-render-miplevels 585.
988 *
989 * This has been confirmed to help Kabini & Stoney, where the hangs
990 * are always reproducible. I think I have seen the test hang
991 * on Carrizo too, though it was very rare there.
992 */
993 if (sscreen->info.chip_class >= CIK && num_pipes < 4)
994 num_pipes = 4;
995
996 switch (num_pipes) {
997 case 1:
998 cl_width = 32;
999 cl_height = 16;
1000 break;
1001 case 2:
1002 cl_width = 32;
1003 cl_height = 32;
1004 break;
1005 case 4:
1006 cl_width = 64;
1007 cl_height = 32;
1008 break;
1009 case 8:
1010 cl_width = 64;
1011 cl_height = 64;
1012 break;
1013 case 16:
1014 cl_width = 128;
1015 cl_height = 64;
1016 break;
1017 default:
1018 assert(0);
1019 return;
1020 }
1021
1022 width = align(rtex->resource.b.b.width0, cl_width * 8);
1023 height = align(rtex->resource.b.b.height0, cl_height * 8);
1024
1025 slice_elements = (width * height) / (8 * 8);
1026 slice_bytes = slice_elements * 4;
1027
1028 pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
1029 base_align = num_pipes * pipe_interleave_bytes;
1030
1031 rtex->surface.htile_alignment = base_align;
1032 rtex->surface.htile_size =
1033 util_num_layers(&rtex->resource.b.b, 0) *
1034 align(slice_bytes, base_align);
1035 }
1036
1037 static void si_texture_allocate_htile(struct si_screen *sscreen,
1038 struct r600_texture *rtex)
1039 {
1040 if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
1041 si_texture_get_htile_size(sscreen, rtex);
1042
1043 if (!rtex->surface.htile_size)
1044 return;
1045
1046 rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
1047 rtex->size = rtex->htile_offset + rtex->surface.htile_size;
1048 }
1049
1050 void si_print_texture_info(struct si_screen *sscreen,
1051 struct r600_texture *rtex, struct u_log_context *log)
1052 {
1053 int i;
1054
1055 /* Common parameters. */
1056 u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
1057 "blk_h=%u, array_size=%u, last_level=%u, "
1058 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
1059 rtex->resource.b.b.width0, rtex->resource.b.b.height0,
1060 rtex->resource.b.b.depth0, rtex->surface.blk_w,
1061 rtex->surface.blk_h,
1062 rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
1063 rtex->surface.bpe, rtex->resource.b.b.nr_samples,
1064 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
1065
1066 if (sscreen->info.chip_class >= GFX9) {
1067 u_log_printf(log, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
1068 "alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
1069 rtex->surface.surf_size,
1070 rtex->surface.u.gfx9.surf_slice_size,
1071 rtex->surface.surf_alignment,
1072 rtex->surface.u.gfx9.surf.swizzle_mode,
1073 rtex->surface.u.gfx9.surf.epitch,
1074 rtex->surface.u.gfx9.surf_pitch);
1075
1076 if (rtex->fmask.size) {
1077 u_log_printf(log, " FMASK: offset=%"PRIu64", size=%"PRIu64", "
1078 "alignment=%u, swmode=%u, epitch=%u\n",
1079 rtex->fmask.offset,
1080 rtex->surface.u.gfx9.fmask_size,
1081 rtex->surface.u.gfx9.fmask_alignment,
1082 rtex->surface.u.gfx9.fmask.swizzle_mode,
1083 rtex->surface.u.gfx9.fmask.epitch);
1084 }
1085
1086 if (rtex->cmask.size) {
1087 u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", "
1088 "alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
1089 rtex->cmask.offset,
1090 rtex->surface.u.gfx9.cmask_size,
1091 rtex->surface.u.gfx9.cmask_alignment,
1092 rtex->surface.u.gfx9.cmask.rb_aligned,
1093 rtex->surface.u.gfx9.cmask.pipe_aligned);
1094 }
1095
1096 if (rtex->htile_offset) {
1097 u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, alignment=%u, "
1098 "rb_aligned=%u, pipe_aligned=%u\n",
1099 rtex->htile_offset,
1100 rtex->surface.htile_size,
1101 rtex->surface.htile_alignment,
1102 rtex->surface.u.gfx9.htile.rb_aligned,
1103 rtex->surface.u.gfx9.htile.pipe_aligned);
1104 }
1105
1106 if (rtex->dcc_offset) {
1107 u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, "
1108 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
1109 rtex->dcc_offset, rtex->surface.dcc_size,
1110 rtex->surface.dcc_alignment,
1111 rtex->surface.u.gfx9.dcc_pitch_max,
1112 rtex->surface.num_dcc_levels);
1113 }
1114
1115 if (rtex->surface.u.gfx9.stencil_offset) {
1116 u_log_printf(log, " Stencil: offset=%"PRIu64", swmode=%u, epitch=%u\n",
1117 rtex->surface.u.gfx9.stencil_offset,
1118 rtex->surface.u.gfx9.stencil.swizzle_mode,
1119 rtex->surface.u.gfx9.stencil.epitch);
1120 }
1121 return;
1122 }
1123
1124 u_log_printf(log, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
1125 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
1126 rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw,
1127 rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
1128 rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
1129 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
1130
1131 if (rtex->fmask.size)
1132 u_log_printf(log, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
1133 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
1134 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
1135 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
1136 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
1137
1138 if (rtex->cmask.size)
1139 u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
1140 "slice_tile_max=%u\n",
1141 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
1142 rtex->cmask.slice_tile_max);
1143
1144 if (rtex->htile_offset)
1145 u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, "
1146 "alignment=%u, TC_compatible = %u\n",
1147 rtex->htile_offset, rtex->surface.htile_size,
1148 rtex->surface.htile_alignment,
1149 rtex->tc_compatible_htile);
1150
1151 if (rtex->dcc_offset) {
1152 u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
1153 rtex->dcc_offset, rtex->surface.dcc_size,
1154 rtex->surface.dcc_alignment);
1155 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
1156 u_log_printf(log, " DCCLevel[%i]: enabled=%u, offset=%u, "
1157 "fast_clear_size=%u\n",
1158 i, i < rtex->surface.num_dcc_levels,
1159 rtex->surface.u.legacy.level[i].dcc_offset,
1160 rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
1161 }
1162
1163 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
1164 u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
1165 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1166 "mode=%u, tiling_index = %u\n",
1167 i, rtex->surface.u.legacy.level[i].offset,
1168 (uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
1169 u_minify(rtex->resource.b.b.width0, i),
1170 u_minify(rtex->resource.b.b.height0, i),
1171 u_minify(rtex->resource.b.b.depth0, i),
1172 rtex->surface.u.legacy.level[i].nblk_x,
1173 rtex->surface.u.legacy.level[i].nblk_y,
1174 rtex->surface.u.legacy.level[i].mode,
1175 rtex->surface.u.legacy.tiling_index[i]);
1176
1177 if (rtex->surface.has_stencil) {
1178 u_log_printf(log, " StencilLayout: tilesplit=%u\n",
1179 rtex->surface.u.legacy.stencil_tile_split);
1180 for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
1181 u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
1182 "slice_size=%"PRIu64", npix_x=%u, "
1183 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1184 "mode=%u, tiling_index = %u\n",
1185 i, rtex->surface.u.legacy.stencil_level[i].offset,
1186 (uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
1187 u_minify(rtex->resource.b.b.width0, i),
1188 u_minify(rtex->resource.b.b.height0, i),
1189 u_minify(rtex->resource.b.b.depth0, i),
1190 rtex->surface.u.legacy.stencil_level[i].nblk_x,
1191 rtex->surface.u.legacy.stencil_level[i].nblk_y,
1192 rtex->surface.u.legacy.stencil_level[i].mode,
1193 rtex->surface.u.legacy.stencil_tiling_index[i]);
1194 }
1195 }
1196 }
1197
1198 /* Common processing for r600_texture_create and r600_texture_from_handle */
1199 static struct r600_texture *
1200 si_texture_create_object(struct pipe_screen *screen,
1201 const struct pipe_resource *base,
1202 struct pb_buffer *buf,
1203 struct radeon_surf *surface)
1204 {
1205 struct r600_texture *rtex;
1206 struct r600_resource *resource;
1207 struct si_screen *sscreen = (struct si_screen*)screen;
1208
1209 rtex = CALLOC_STRUCT(r600_texture);
1210 if (!rtex)
1211 return NULL;
1212
1213 resource = &rtex->resource;
1214 resource->b.b = *base;
1215 resource->b.b.next = NULL;
1216 resource->b.vtbl = &si_texture_vtbl;
1217 pipe_reference_init(&resource->b.b.reference, 1);
1218 resource->b.b.screen = screen;
1219
1220 /* don't include stencil-only formats which we don't support for rendering */
1221 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
1222
1223 rtex->surface = *surface;
1224 rtex->size = rtex->surface.surf_size;
1225
1226 rtex->tc_compatible_htile = rtex->surface.htile_size != 0 &&
1227 (rtex->surface.flags &
1228 RADEON_SURF_TC_COMPATIBLE_HTILE);
1229
1230 /* TC-compatible HTILE:
1231 * - VI only supports Z32_FLOAT.
1232 * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
1233 if (rtex->tc_compatible_htile) {
1234 if (sscreen->info.chip_class >= GFX9 &&
1235 base->format == PIPE_FORMAT_Z16_UNORM)
1236 rtex->db_render_format = base->format;
1237 else {
1238 rtex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
1239 rtex->upgraded_depth = base->format != PIPE_FORMAT_Z32_FLOAT &&
1240 base->format != PIPE_FORMAT_Z32_FLOAT_S8X24_UINT;
1241 }
1242 } else {
1243 rtex->db_render_format = base->format;
1244 }
1245
1246 /* Applies to GCN. */
1247 rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
1248
1249 /* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
1250 * between frames, so the only thing that can enable separate DCC
1251 * with DRI2 is multiple slow clears within a frame.
1252 */
1253 rtex->ps_draw_ratio = 0;
1254
1255 if (rtex->is_depth) {
1256 if (sscreen->info.chip_class >= GFX9) {
1257 rtex->can_sample_z = true;
1258 rtex->can_sample_s = true;
1259 } else {
1260 rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
1261 rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
1262 }
1263
1264 if (!(base->flags & (SI_RESOURCE_FLAG_TRANSFER |
1265 SI_RESOURCE_FLAG_FLUSHED_DEPTH))) {
1266 rtex->db_compatible = true;
1267
1268 if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
1269 si_texture_allocate_htile(sscreen, rtex);
1270 }
1271 } else {
1272 if (base->nr_samples > 1 &&
1273 !buf &&
1274 !(sscreen->debug_flags & DBG(NO_FMASK))) {
1275 si_texture_allocate_fmask(sscreen, rtex);
1276 si_texture_allocate_cmask(sscreen, rtex);
1277 rtex->cmask_buffer = &rtex->resource;
1278
1279 if (!rtex->fmask.size || !rtex->cmask.size) {
1280 FREE(rtex);
1281 return NULL;
1282 }
1283 }
1284
1285 /* Shared textures must always set up DCC here.
1286 * If it's not present, it will be disabled by
1287 * apply_opaque_metadata later.
1288 */
1289 if (rtex->surface.dcc_size &&
1290 (buf || !(sscreen->debug_flags & DBG(NO_DCC))) &&
1291 !(rtex->surface.flags & RADEON_SURF_SCANOUT)) {
1292 /* Reserve space for the DCC buffer. */
1293 rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
1294 rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
1295 }
1296 }
1297
1298 /* Now create the backing buffer. */
1299 if (!buf) {
1300 si_init_resource_fields(sscreen, resource, rtex->size,
1301 rtex->surface.surf_alignment);
1302
1303 if (!si_alloc_resource(sscreen, resource)) {
1304 FREE(rtex);
1305 return NULL;
1306 }
1307 } else {
1308 resource->buf = buf;
1309 resource->gpu_address = sscreen->ws->buffer_get_virtual_address(resource->buf);
1310 resource->bo_size = buf->size;
1311 resource->bo_alignment = buf->alignment;
1312 resource->domains = sscreen->ws->buffer_get_initial_domain(resource->buf);
1313 if (resource->domains & RADEON_DOMAIN_VRAM)
1314 resource->vram_usage = buf->size;
1315 else if (resource->domains & RADEON_DOMAIN_GTT)
1316 resource->gart_usage = buf->size;
1317 }
1318
1319 if (rtex->cmask.size) {
1320 /* Initialize the cmask to 0xCC (= compressed state). */
1321 si_screen_clear_buffer(sscreen, &rtex->cmask_buffer->b.b,
1322 rtex->cmask.offset, rtex->cmask.size,
1323 0xCCCCCCCC);
1324 }
1325 if (rtex->htile_offset) {
1326 uint32_t clear_value = 0;
1327
1328 if (sscreen->info.chip_class >= GFX9 || rtex->tc_compatible_htile)
1329 clear_value = 0x0000030F;
1330
1331 si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
1332 rtex->htile_offset,
1333 rtex->surface.htile_size,
1334 clear_value);
1335 }
1336
1337 /* Initialize DCC only if the texture is not being imported. */
1338 if (!buf && rtex->dcc_offset) {
1339 si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
1340 rtex->dcc_offset,
1341 rtex->surface.dcc_size,
1342 0xFFFFFFFF);
1343 }
1344
1345 /* Initialize the CMASK base register value. */
1346 rtex->cmask.base_address_reg =
1347 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1348
1349 if (sscreen->debug_flags & DBG(VM)) {
1350 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1351 rtex->resource.gpu_address,
1352 rtex->resource.gpu_address + rtex->resource.buf->size,
1353 base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
1354 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
1355 }
1356
1357 if (sscreen->debug_flags & DBG(TEX)) {
1358 puts("Texture:");
1359 struct u_log_context log;
1360 u_log_context_init(&log);
1361 si_print_texture_info(sscreen, rtex, &log);
1362 u_log_new_page_print(&log, stdout);
1363 fflush(stdout);
1364 u_log_context_destroy(&log);
1365 }
1366
1367 return rtex;
1368 }
1369
1370 static enum radeon_surf_mode
1371 si_choose_tiling(struct si_screen *sscreen,
1372 const struct pipe_resource *templ)
1373 {
1374 const struct util_format_description *desc = util_format_description(templ->format);
1375 bool force_tiling = templ->flags & SI_RESOURCE_FLAG_FORCE_TILING;
1376 bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
1377 !(templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH);
1378
1379 /* MSAA resources must be 2D tiled. */
1380 if (templ->nr_samples > 1)
1381 return RADEON_SURF_MODE_2D;
1382
1383 /* Transfer resources should be linear. */
1384 if (templ->flags & SI_RESOURCE_FLAG_TRANSFER)
1385 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1386
1387 /* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
1388 * which requires 2D tiling.
1389 */
1390 if (sscreen->info.chip_class == VI &&
1391 is_depth_stencil &&
1392 (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY))
1393 return RADEON_SURF_MODE_2D;
1394
1395 /* Handle common candidates for the linear mode.
1396 * Compressed textures and DB surfaces must always be tiled.
1397 */
1398 if (!force_tiling &&
1399 !is_depth_stencil &&
1400 !util_format_is_compressed(templ->format)) {
1401 if (sscreen->debug_flags & DBG(NO_TILING))
1402 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1403
1404 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1405 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
1406 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1407
1408 /* Cursors are linear on SI.
1409 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1410 if (templ->bind & PIPE_BIND_CURSOR)
1411 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1412
1413 if (templ->bind & PIPE_BIND_LINEAR)
1414 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1415
1416 /* Textures with a very small height are recommended to be linear. */
1417 if (templ->target == PIPE_TEXTURE_1D ||
1418 templ->target == PIPE_TEXTURE_1D_ARRAY ||
1419 /* Only very thin and long 2D textures should benefit from
1420 * linear_aligned. */
1421 (templ->width0 > 8 && templ->height0 <= 2))
1422 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1423
1424 /* Textures likely to be mapped often. */
1425 if (templ->usage == PIPE_USAGE_STAGING ||
1426 templ->usage == PIPE_USAGE_STREAM)
1427 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1428 }
1429
1430 /* Make small textures 1D tiled. */
1431 if (templ->width0 <= 16 || templ->height0 <= 16 ||
1432 (sscreen->debug_flags & DBG(NO_2D_TILING)))
1433 return RADEON_SURF_MODE_1D;
1434
1435 /* The allocator will switch to 1D if needed. */
1436 return RADEON_SURF_MODE_2D;
1437 }
1438
1439 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
1440 const struct pipe_resource *templ)
1441 {
1442 struct si_screen *sscreen = (struct si_screen*)screen;
1443 struct radeon_surf surface = {0};
1444 bool is_flushed_depth = templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH;
1445 bool tc_compatible_htile =
1446 sscreen->info.chip_class >= VI &&
1447 (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
1448 !(sscreen->debug_flags & DBG(NO_HYPERZ)) &&
1449 !is_flushed_depth &&
1450 templ->nr_samples <= 1 && /* TC-compat HTILE is less efficient with MSAA */
1451 util_format_is_depth_or_stencil(templ->format);
1452
1453 int r;
1454
1455 r = si_init_surface(sscreen, &surface, templ,
1456 si_choose_tiling(sscreen, templ), 0, 0,
1457 false, false, is_flushed_depth,
1458 tc_compatible_htile);
1459 if (r) {
1460 return NULL;
1461 }
1462
1463 return (struct pipe_resource *)
1464 si_texture_create_object(screen, templ, NULL, &surface);
1465 }
1466
1467 static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
1468 const struct pipe_resource *templ,
1469 struct winsys_handle *whandle,
1470 unsigned usage)
1471 {
1472 struct si_screen *sscreen = (struct si_screen*)screen;
1473 struct pb_buffer *buf = NULL;
1474 unsigned stride = 0, offset = 0;
1475 enum radeon_surf_mode array_mode;
1476 struct radeon_surf surface = {};
1477 int r;
1478 struct radeon_bo_metadata metadata = {};
1479 struct r600_texture *rtex;
1480 bool is_scanout;
1481
1482 /* Support only 2D textures without mipmaps */
1483 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
1484 templ->depth0 != 1 || templ->last_level != 0)
1485 return NULL;
1486
1487 buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle, &stride, &offset);
1488 if (!buf)
1489 return NULL;
1490
1491 sscreen->ws->buffer_get_metadata(buf, &metadata);
1492 si_surface_import_metadata(sscreen, &surface, &metadata,
1493 &array_mode, &is_scanout);
1494
1495 r = si_init_surface(sscreen, &surface, templ, array_mode, stride,
1496 offset, true, is_scanout, false, false);
1497 if (r) {
1498 return NULL;
1499 }
1500
1501 rtex = si_texture_create_object(screen, templ, buf, &surface);
1502 if (!rtex)
1503 return NULL;
1504
1505 rtex->resource.b.is_shared = true;
1506 rtex->resource.external_usage = usage;
1507
1508 si_apply_opaque_metadata(sscreen, rtex, &metadata);
1509
1510 assert(rtex->surface.tile_swizzle == 0);
1511 return &rtex->resource.b.b;
1512 }
1513
1514 bool si_init_flushed_depth_texture(struct pipe_context *ctx,
1515 struct pipe_resource *texture,
1516 struct r600_texture **staging)
1517 {
1518 struct r600_texture *rtex = (struct r600_texture*)texture;
1519 struct pipe_resource resource;
1520 struct r600_texture **flushed_depth_texture = staging ?
1521 staging : &rtex->flushed_depth_texture;
1522 enum pipe_format pipe_format = texture->format;
1523
1524 if (!staging) {
1525 if (rtex->flushed_depth_texture)
1526 return true; /* it's ready */
1527
1528 if (!rtex->can_sample_z && rtex->can_sample_s) {
1529 switch (pipe_format) {
1530 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1531 /* Save memory by not allocating the S plane. */
1532 pipe_format = PIPE_FORMAT_Z32_FLOAT;
1533 break;
1534 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1535 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1536 /* Save memory bandwidth by not copying the
1537 * stencil part during flush.
1538 *
1539 * This potentially increases memory bandwidth
1540 * if an application uses both Z and S texturing
1541 * simultaneously (a flushed Z24S8 texture
1542 * would be stored compactly), but how often
1543 * does that really happen?
1544 */
1545 pipe_format = PIPE_FORMAT_Z24X8_UNORM;
1546 break;
1547 default:;
1548 }
1549 } else if (!rtex->can_sample_s && rtex->can_sample_z) {
1550 assert(util_format_has_stencil(util_format_description(pipe_format)));
1551
1552 /* DB->CB copies to an 8bpp surface don't work. */
1553 pipe_format = PIPE_FORMAT_X24S8_UINT;
1554 }
1555 }
1556
1557 memset(&resource, 0, sizeof(resource));
1558 resource.target = texture->target;
1559 resource.format = pipe_format;
1560 resource.width0 = texture->width0;
1561 resource.height0 = texture->height0;
1562 resource.depth0 = texture->depth0;
1563 resource.array_size = texture->array_size;
1564 resource.last_level = texture->last_level;
1565 resource.nr_samples = texture->nr_samples;
1566 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1567 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
1568 resource.flags = texture->flags | SI_RESOURCE_FLAG_FLUSHED_DEPTH;
1569
1570 if (staging)
1571 resource.flags |= SI_RESOURCE_FLAG_TRANSFER;
1572
1573 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
1574 if (*flushed_depth_texture == NULL) {
1575 PRINT_ERR("failed to create temporary texture to hold flushed depth\n");
1576 return false;
1577 }
1578 return true;
1579 }
1580
1581 /**
1582 * Initialize the pipe_resource descriptor to be of the same size as the box,
1583 * which is supposed to hold a subregion of the texture "orig" at the given
1584 * mipmap level.
1585 */
1586 static void si_init_temp_resource_from_box(struct pipe_resource *res,
1587 struct pipe_resource *orig,
1588 const struct pipe_box *box,
1589 unsigned level, unsigned flags)
1590 {
1591 memset(res, 0, sizeof(*res));
1592 res->format = orig->format;
1593 res->width0 = box->width;
1594 res->height0 = box->height;
1595 res->depth0 = 1;
1596 res->array_size = 1;
1597 res->usage = flags & SI_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1598 res->flags = flags;
1599
1600 /* We must set the correct texture target and dimensions for a 3D box. */
1601 if (box->depth > 1 && util_max_layer(orig, level) > 0) {
1602 res->target = PIPE_TEXTURE_2D_ARRAY;
1603 res->array_size = box->depth;
1604 } else {
1605 res->target = PIPE_TEXTURE_2D;
1606 }
1607 }
1608
1609 static bool si_can_invalidate_texture(struct si_screen *sscreen,
1610 struct r600_texture *rtex,
1611 unsigned transfer_usage,
1612 const struct pipe_box *box)
1613 {
1614 return !rtex->resource.b.is_shared &&
1615 !(transfer_usage & PIPE_TRANSFER_READ) &&
1616 rtex->resource.b.b.last_level == 0 &&
1617 util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
1618 box->x, box->y, box->z,
1619 box->width, box->height,
1620 box->depth);
1621 }
1622
1623 static void si_texture_invalidate_storage(struct si_context *sctx,
1624 struct r600_texture *rtex)
1625 {
1626 struct si_screen *sscreen = sctx->screen;
1627
1628 /* There is no point in discarding depth and tiled buffers. */
1629 assert(!rtex->is_depth);
1630 assert(rtex->surface.is_linear);
1631
1632 /* Reallocate the buffer in the same pipe_resource. */
1633 si_alloc_resource(sscreen, &rtex->resource);
1634
1635 /* Initialize the CMASK base address (needed even without CMASK). */
1636 rtex->cmask.base_address_reg =
1637 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1638
1639 p_atomic_inc(&sscreen->dirty_tex_counter);
1640
1641 sctx->b.num_alloc_tex_transfer_bytes += rtex->size;
1642 }
1643
1644 static void *si_texture_transfer_map(struct pipe_context *ctx,
1645 struct pipe_resource *texture,
1646 unsigned level,
1647 unsigned usage,
1648 const struct pipe_box *box,
1649 struct pipe_transfer **ptransfer)
1650 {
1651 struct si_context *sctx = (struct si_context*)ctx;
1652 struct r600_texture *rtex = (struct r600_texture*)texture;
1653 struct r600_transfer *trans;
1654 struct r600_resource *buf;
1655 unsigned offset = 0;
1656 char *map;
1657 bool use_staging_texture = false;
1658
1659 assert(!(texture->flags & SI_RESOURCE_FLAG_TRANSFER));
1660 assert(box->width && box->height && box->depth);
1661
1662 /* Depth textures use staging unconditionally. */
1663 if (!rtex->is_depth) {
1664 /* Degrade the tile mode if we get too many transfers on APUs.
1665 * On dGPUs, the staging texture is always faster.
1666 * Only count uploads that are at least 4x4 pixels large.
1667 */
1668 if (!sctx->screen->info.has_dedicated_vram &&
1669 level == 0 &&
1670 box->width >= 4 && box->height >= 4 &&
1671 p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
1672 bool can_invalidate =
1673 si_can_invalidate_texture(sctx->screen, rtex,
1674 usage, box);
1675
1676 si_reallocate_texture_inplace(sctx, rtex,
1677 PIPE_BIND_LINEAR,
1678 can_invalidate);
1679 }
1680
1681 /* Tiled textures need to be converted into a linear texture for CPU
1682 * access. The staging texture is always linear and is placed in GART.
1683 *
1684 * Reading from VRAM or GTT WC is slow, always use the staging
1685 * texture in this case.
1686 *
1687 * Use the staging texture for uploads if the underlying BO
1688 * is busy.
1689 */
1690 if (!rtex->surface.is_linear)
1691 use_staging_texture = true;
1692 else if (usage & PIPE_TRANSFER_READ)
1693 use_staging_texture =
1694 rtex->resource.domains & RADEON_DOMAIN_VRAM ||
1695 rtex->resource.flags & RADEON_FLAG_GTT_WC;
1696 /* Write & linear only: */
1697 else if (si_rings_is_buffer_referenced(sctx, rtex->resource.buf,
1698 RADEON_USAGE_READWRITE) ||
1699 !sctx->b.ws->buffer_wait(rtex->resource.buf, 0,
1700 RADEON_USAGE_READWRITE)) {
1701 /* It's busy. */
1702 if (si_can_invalidate_texture(sctx->screen, rtex,
1703 usage, box))
1704 si_texture_invalidate_storage(sctx, rtex);
1705 else
1706 use_staging_texture = true;
1707 }
1708 }
1709
1710 trans = CALLOC_STRUCT(r600_transfer);
1711 if (!trans)
1712 return NULL;
1713 pipe_resource_reference(&trans->b.b.resource, texture);
1714 trans->b.b.level = level;
1715 trans->b.b.usage = usage;
1716 trans->b.b.box = *box;
1717
1718 if (rtex->is_depth) {
1719 struct r600_texture *staging_depth;
1720
1721 if (rtex->resource.b.b.nr_samples > 1) {
1722 /* MSAA depth buffers need to be converted to single sample buffers.
1723 *
1724 * Mapping MSAA depth buffers can occur if ReadPixels is called
1725 * with a multisample GLX visual.
1726 *
1727 * First downsample the depth buffer to a temporary texture,
1728 * then decompress the temporary one to staging.
1729 *
1730 * Only the region being mapped is transfered.
1731 */
1732 struct pipe_resource resource;
1733
1734 si_init_temp_resource_from_box(&resource, texture, box, level, 0);
1735
1736 if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1737 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1738 FREE(trans);
1739 return NULL;
1740 }
1741
1742 if (usage & PIPE_TRANSFER_READ) {
1743 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1744 if (!temp) {
1745 PRINT_ERR("failed to create a temporary depth texture\n");
1746 FREE(trans);
1747 return NULL;
1748 }
1749
1750 si_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1751 si_blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1752 0, 0, 0, box->depth, 0, 0);
1753 pipe_resource_reference(&temp, NULL);
1754 }
1755
1756 /* Just get the strides. */
1757 si_texture_get_offset(sctx->screen, staging_depth, level, NULL,
1758 &trans->b.b.stride,
1759 &trans->b.b.layer_stride);
1760 } else {
1761 /* XXX: only readback the rectangle which is being mapped? */
1762 /* XXX: when discard is true, no need to read back from depth texture */
1763 if (!si_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1764 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1765 FREE(trans);
1766 return NULL;
1767 }
1768
1769 si_blit_decompress_depth(ctx, rtex, staging_depth,
1770 level, level,
1771 box->z, box->z + box->depth - 1,
1772 0, 0);
1773
1774 offset = si_texture_get_offset(sctx->screen, staging_depth,
1775 level, box,
1776 &trans->b.b.stride,
1777 &trans->b.b.layer_stride);
1778 }
1779
1780 trans->staging = (struct r600_resource*)staging_depth;
1781 buf = trans->staging;
1782 } else if (use_staging_texture) {
1783 struct pipe_resource resource;
1784 struct r600_texture *staging;
1785
1786 si_init_temp_resource_from_box(&resource, texture, box, level,
1787 SI_RESOURCE_FLAG_TRANSFER);
1788 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1789 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1790
1791 /* Create the temporary texture. */
1792 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1793 if (!staging) {
1794 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1795 FREE(trans);
1796 return NULL;
1797 }
1798 trans->staging = &staging->resource;
1799
1800 /* Just get the strides. */
1801 si_texture_get_offset(sctx->screen, staging, 0, NULL,
1802 &trans->b.b.stride,
1803 &trans->b.b.layer_stride);
1804
1805 if (usage & PIPE_TRANSFER_READ)
1806 si_copy_to_staging_texture(ctx, trans);
1807 else
1808 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1809
1810 buf = trans->staging;
1811 } else {
1812 /* the resource is mapped directly */
1813 offset = si_texture_get_offset(sctx->screen, rtex, level, box,
1814 &trans->b.b.stride,
1815 &trans->b.b.layer_stride);
1816 buf = &rtex->resource;
1817 }
1818
1819 if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage))) {
1820 r600_resource_reference(&trans->staging, NULL);
1821 FREE(trans);
1822 return NULL;
1823 }
1824
1825 *ptransfer = &trans->b.b;
1826 return map + offset;
1827 }
1828
1829 static void si_texture_transfer_unmap(struct pipe_context *ctx,
1830 struct pipe_transfer* transfer)
1831 {
1832 struct si_context *sctx = (struct si_context*)ctx;
1833 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1834 struct pipe_resource *texture = transfer->resource;
1835 struct r600_texture *rtex = (struct r600_texture*)texture;
1836
1837 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1838 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1839 ctx->resource_copy_region(ctx, texture, transfer->level,
1840 transfer->box.x, transfer->box.y, transfer->box.z,
1841 &rtransfer->staging->b.b, transfer->level,
1842 &transfer->box);
1843 } else {
1844 si_copy_from_staging_texture(ctx, rtransfer);
1845 }
1846 }
1847
1848 if (rtransfer->staging) {
1849 sctx->b.num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
1850 r600_resource_reference(&rtransfer->staging, NULL);
1851 }
1852
1853 /* Heuristic for {upload, draw, upload, draw, ..}:
1854 *
1855 * Flush the gfx IB if we've allocated too much texture storage.
1856 *
1857 * The idea is that we don't want to build IBs that use too much
1858 * memory and put pressure on the kernel memory manager and we also
1859 * want to make temporary and invalidated buffers go idle ASAP to
1860 * decrease the total memory usage or make them reusable. The memory
1861 * usage will be slightly higher than given here because of the buffer
1862 * cache in the winsys.
1863 *
1864 * The result is that the kernel memory manager is never a bottleneck.
1865 */
1866 if (sctx->b.num_alloc_tex_transfer_bytes > sctx->screen->info.gart_size / 4) {
1867 si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
1868 sctx->b.num_alloc_tex_transfer_bytes = 0;
1869 }
1870
1871 pipe_resource_reference(&transfer->resource, NULL);
1872 FREE(transfer);
1873 }
1874
1875 static const struct u_resource_vtbl si_texture_vtbl =
1876 {
1877 NULL, /* get_handle */
1878 si_texture_destroy, /* resource_destroy */
1879 si_texture_transfer_map, /* transfer_map */
1880 u_default_transfer_flush_region, /* transfer_flush_region */
1881 si_texture_transfer_unmap, /* transfer_unmap */
1882 };
1883
1884 /* DCC channel type categories within which formats can be reinterpreted
1885 * while keeping the same DCC encoding. The swizzle must also match. */
1886 enum dcc_channel_type {
1887 dcc_channel_float,
1888 /* uint and sint can be merged if we never use TC-compatible DCC clear
1889 * encoding with the clear value of 1. */
1890 dcc_channel_uint,
1891 dcc_channel_sint,
1892 dcc_channel_uint_10_10_10_2,
1893 dcc_channel_incompatible,
1894 };
1895
1896 /* Return the type of DCC encoding. */
1897 static enum dcc_channel_type
1898 vi_get_dcc_channel_type(const struct util_format_description *desc)
1899 {
1900 int i;
1901
1902 /* Find the first non-void channel. */
1903 for (i = 0; i < desc->nr_channels; i++)
1904 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
1905 break;
1906 if (i == desc->nr_channels)
1907 return dcc_channel_incompatible;
1908
1909 switch (desc->channel[i].size) {
1910 case 32:
1911 case 16:
1912 case 8:
1913 if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
1914 return dcc_channel_float;
1915 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1916 return dcc_channel_uint;
1917 return dcc_channel_sint;
1918 case 10:
1919 return dcc_channel_uint_10_10_10_2;
1920 default:
1921 return dcc_channel_incompatible;
1922 }
1923 }
1924
1925 /* Return if it's allowed to reinterpret one format as another with DCC enabled. */
1926 bool vi_dcc_formats_compatible(enum pipe_format format1,
1927 enum pipe_format format2)
1928 {
1929 const struct util_format_description *desc1, *desc2;
1930 enum dcc_channel_type type1, type2;
1931 int i;
1932
1933 if (format1 == format2)
1934 return true;
1935
1936 desc1 = util_format_description(format1);
1937 desc2 = util_format_description(format2);
1938
1939 if (desc1->nr_channels != desc2->nr_channels)
1940 return false;
1941
1942 /* Swizzles must be the same. */
1943 for (i = 0; i < desc1->nr_channels; i++)
1944 if (desc1->swizzle[i] <= PIPE_SWIZZLE_W &&
1945 desc2->swizzle[i] <= PIPE_SWIZZLE_W &&
1946 desc1->swizzle[i] != desc2->swizzle[i])
1947 return false;
1948
1949 type1 = vi_get_dcc_channel_type(desc1);
1950 type2 = vi_get_dcc_channel_type(desc2);
1951
1952 return type1 != dcc_channel_incompatible &&
1953 type2 != dcc_channel_incompatible &&
1954 type1 == type2;
1955 }
1956
1957 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
1958 unsigned level,
1959 enum pipe_format view_format)
1960 {
1961 struct r600_texture *rtex = (struct r600_texture *)tex;
1962
1963 return vi_dcc_enabled(rtex, level) &&
1964 !vi_dcc_formats_compatible(tex->format, view_format);
1965 }
1966
1967 /* This can't be merged with the above function, because
1968 * vi_dcc_formats_compatible should be called only when DCC is enabled. */
1969 void vi_disable_dcc_if_incompatible_format(struct si_context *sctx,
1970 struct pipe_resource *tex,
1971 unsigned level,
1972 enum pipe_format view_format)
1973 {
1974 struct r600_texture *rtex = (struct r600_texture *)tex;
1975
1976 if (vi_dcc_formats_are_incompatible(tex, level, view_format))
1977 if (!si_texture_disable_dcc(sctx, (struct r600_texture*)tex))
1978 si_decompress_dcc(sctx, rtex);
1979 }
1980
1981 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
1982 struct pipe_resource *texture,
1983 const struct pipe_surface *templ,
1984 unsigned width0, unsigned height0,
1985 unsigned width, unsigned height)
1986 {
1987 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1988
1989 if (!surface)
1990 return NULL;
1991
1992 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
1993 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
1994
1995 pipe_reference_init(&surface->base.reference, 1);
1996 pipe_resource_reference(&surface->base.texture, texture);
1997 surface->base.context = pipe;
1998 surface->base.format = templ->format;
1999 surface->base.width = width;
2000 surface->base.height = height;
2001 surface->base.u = templ->u;
2002
2003 surface->width0 = width0;
2004 surface->height0 = height0;
2005
2006 surface->dcc_incompatible =
2007 texture->target != PIPE_BUFFER &&
2008 vi_dcc_formats_are_incompatible(texture, templ->u.tex.level,
2009 templ->format);
2010 return &surface->base;
2011 }
2012
2013 static struct pipe_surface *si_create_surface(struct pipe_context *pipe,
2014 struct pipe_resource *tex,
2015 const struct pipe_surface *templ)
2016 {
2017 unsigned level = templ->u.tex.level;
2018 unsigned width = u_minify(tex->width0, level);
2019 unsigned height = u_minify(tex->height0, level);
2020 unsigned width0 = tex->width0;
2021 unsigned height0 = tex->height0;
2022
2023 if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
2024 const struct util_format_description *tex_desc
2025 = util_format_description(tex->format);
2026 const struct util_format_description *templ_desc
2027 = util_format_description(templ->format);
2028
2029 assert(tex_desc->block.bits == templ_desc->block.bits);
2030
2031 /* Adjust size of surface if and only if the block width or
2032 * height is changed. */
2033 if (tex_desc->block.width != templ_desc->block.width ||
2034 tex_desc->block.height != templ_desc->block.height) {
2035 unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
2036 unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
2037
2038 width = nblks_x * templ_desc->block.width;
2039 height = nblks_y * templ_desc->block.height;
2040
2041 width0 = util_format_get_nblocksx(tex->format, width0);
2042 height0 = util_format_get_nblocksy(tex->format, height0);
2043 }
2044 }
2045
2046 return si_create_surface_custom(pipe, tex, templ,
2047 width0, height0,
2048 width, height);
2049 }
2050
2051 static void si_surface_destroy(struct pipe_context *pipe,
2052 struct pipe_surface *surface)
2053 {
2054 pipe_resource_reference(&surface->texture, NULL);
2055 FREE(surface);
2056 }
2057
2058 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap)
2059 {
2060 const struct util_format_description *desc = util_format_description(format);
2061
2062 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
2063
2064 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
2065 return V_028C70_SWAP_STD;
2066
2067 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
2068 return ~0U;
2069
2070 switch (desc->nr_channels) {
2071 case 1:
2072 if (HAS_SWIZZLE(0,X))
2073 return V_028C70_SWAP_STD; /* X___ */
2074 else if (HAS_SWIZZLE(3,X))
2075 return V_028C70_SWAP_ALT_REV; /* ___X */
2076 break;
2077 case 2:
2078 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
2079 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
2080 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
2081 return V_028C70_SWAP_STD; /* XY__ */
2082 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
2083 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
2084 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
2085 /* YX__ */
2086 return (do_endian_swap ? V_028C70_SWAP_STD : V_028C70_SWAP_STD_REV);
2087 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
2088 return V_028C70_SWAP_ALT; /* X__Y */
2089 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
2090 return V_028C70_SWAP_ALT_REV; /* Y__X */
2091 break;
2092 case 3:
2093 if (HAS_SWIZZLE(0,X))
2094 return (do_endian_swap ? V_028C70_SWAP_STD_REV : V_028C70_SWAP_STD);
2095 else if (HAS_SWIZZLE(0,Z))
2096 return V_028C70_SWAP_STD_REV; /* ZYX */
2097 break;
2098 case 4:
2099 /* check the middle channels, the 1st and 4th channel can be NONE */
2100 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
2101 return V_028C70_SWAP_STD; /* XYZW */
2102 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
2103 return V_028C70_SWAP_STD_REV; /* WZYX */
2104 } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
2105 return V_028C70_SWAP_ALT; /* ZYXW */
2106 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
2107 /* YZWX */
2108 if (desc->is_array)
2109 return V_028C70_SWAP_ALT_REV;
2110 else
2111 return (do_endian_swap ? V_028C70_SWAP_ALT : V_028C70_SWAP_ALT_REV);
2112 }
2113 break;
2114 }
2115 return ~0U;
2116 }
2117
2118 /* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
2119
2120 static void vi_dcc_clean_up_context_slot(struct si_context *sctx,
2121 int slot)
2122 {
2123 int i;
2124
2125 if (sctx->b.dcc_stats[slot].query_active)
2126 vi_separate_dcc_stop_query(sctx,
2127 sctx->b.dcc_stats[slot].tex);
2128
2129 for (i = 0; i < ARRAY_SIZE(sctx->b.dcc_stats[slot].ps_stats); i++)
2130 if (sctx->b.dcc_stats[slot].ps_stats[i]) {
2131 sctx->b.b.destroy_query(&sctx->b.b,
2132 sctx->b.dcc_stats[slot].ps_stats[i]);
2133 sctx->b.dcc_stats[slot].ps_stats[i] = NULL;
2134 }
2135
2136 r600_texture_reference(&sctx->b.dcc_stats[slot].tex, NULL);
2137 }
2138
2139 /**
2140 * Return the per-context slot where DCC statistics queries for the texture live.
2141 */
2142 static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
2143 struct r600_texture *tex)
2144 {
2145 int i, empty_slot = -1;
2146
2147 /* Remove zombie textures (textures kept alive by this array only). */
2148 for (i = 0; i < ARRAY_SIZE(sctx->b.dcc_stats); i++)
2149 if (sctx->b.dcc_stats[i].tex &&
2150 sctx->b.dcc_stats[i].tex->resource.b.b.reference.count == 1)
2151 vi_dcc_clean_up_context_slot(sctx, i);
2152
2153 /* Find the texture. */
2154 for (i = 0; i < ARRAY_SIZE(sctx->b.dcc_stats); i++) {
2155 /* Return if found. */
2156 if (sctx->b.dcc_stats[i].tex == tex) {
2157 sctx->b.dcc_stats[i].last_use_timestamp = os_time_get();
2158 return i;
2159 }
2160
2161 /* Record the first seen empty slot. */
2162 if (empty_slot == -1 && !sctx->b.dcc_stats[i].tex)
2163 empty_slot = i;
2164 }
2165
2166 /* Not found. Remove the oldest member to make space in the array. */
2167 if (empty_slot == -1) {
2168 int oldest_slot = 0;
2169
2170 /* Find the oldest slot. */
2171 for (i = 1; i < ARRAY_SIZE(sctx->b.dcc_stats); i++)
2172 if (sctx->b.dcc_stats[oldest_slot].last_use_timestamp >
2173 sctx->b.dcc_stats[i].last_use_timestamp)
2174 oldest_slot = i;
2175
2176 /* Clean up the oldest slot. */
2177 vi_dcc_clean_up_context_slot(sctx, oldest_slot);
2178 empty_slot = oldest_slot;
2179 }
2180
2181 /* Add the texture to the new slot. */
2182 r600_texture_reference(&sctx->b.dcc_stats[empty_slot].tex, tex);
2183 sctx->b.dcc_stats[empty_slot].last_use_timestamp = os_time_get();
2184 return empty_slot;
2185 }
2186
2187 static struct pipe_query *
2188 vi_create_resuming_pipestats_query(struct si_context *sctx)
2189 {
2190 struct si_query_hw *query = (struct si_query_hw*)
2191 sctx->b.b.create_query(&sctx->b.b, PIPE_QUERY_PIPELINE_STATISTICS, 0);
2192
2193 query->flags |= SI_QUERY_HW_FLAG_BEGIN_RESUMES;
2194 return (struct pipe_query*)query;
2195 }
2196
2197 /**
2198 * Called when binding a color buffer.
2199 */
2200 void vi_separate_dcc_start_query(struct si_context *sctx,
2201 struct r600_texture *tex)
2202 {
2203 unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
2204
2205 assert(!sctx->b.dcc_stats[i].query_active);
2206
2207 if (!sctx->b.dcc_stats[i].ps_stats[0])
2208 sctx->b.dcc_stats[i].ps_stats[0] = vi_create_resuming_pipestats_query(sctx);
2209
2210 /* begin or resume the query */
2211 sctx->b.b.begin_query(&sctx->b.b, sctx->b.dcc_stats[i].ps_stats[0]);
2212 sctx->b.dcc_stats[i].query_active = true;
2213 }
2214
2215 /**
2216 * Called when unbinding a color buffer.
2217 */
2218 void vi_separate_dcc_stop_query(struct si_context *sctx,
2219 struct r600_texture *tex)
2220 {
2221 unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
2222
2223 assert(sctx->b.dcc_stats[i].query_active);
2224 assert(sctx->b.dcc_stats[i].ps_stats[0]);
2225
2226 /* pause or end the query */
2227 sctx->b.b.end_query(&sctx->b.b, sctx->b.dcc_stats[i].ps_stats[0]);
2228 sctx->b.dcc_stats[i].query_active = false;
2229 }
2230
2231 static bool vi_should_enable_separate_dcc(struct r600_texture *tex)
2232 {
2233 /* The minimum number of fullscreen draws per frame that is required
2234 * to enable DCC. */
2235 return tex->ps_draw_ratio + tex->num_slow_clears >= 5;
2236 }
2237
2238 /* Called by fast clear. */
2239 void vi_separate_dcc_try_enable(struct si_context *sctx,
2240 struct r600_texture *tex)
2241 {
2242 /* The intent is to use this with shared displayable back buffers,
2243 * but it's not strictly limited only to them.
2244 */
2245 if (!tex->resource.b.is_shared ||
2246 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
2247 tex->resource.b.b.target != PIPE_TEXTURE_2D ||
2248 tex->resource.b.b.last_level > 0 ||
2249 !tex->surface.dcc_size)
2250 return;
2251
2252 if (tex->dcc_offset)
2253 return; /* already enabled */
2254
2255 /* Enable the DCC stat gathering. */
2256 if (!tex->dcc_gather_statistics) {
2257 tex->dcc_gather_statistics = true;
2258 vi_separate_dcc_start_query(sctx, tex);
2259 }
2260
2261 if (!vi_should_enable_separate_dcc(tex))
2262 return; /* stats show that DCC decompression is too expensive */
2263
2264 assert(tex->surface.num_dcc_levels);
2265 assert(!tex->dcc_separate_buffer);
2266
2267 si_texture_discard_cmask(sctx->screen, tex);
2268
2269 /* Get a DCC buffer. */
2270 if (tex->last_dcc_separate_buffer) {
2271 assert(tex->dcc_gather_statistics);
2272 assert(!tex->dcc_separate_buffer);
2273 tex->dcc_separate_buffer = tex->last_dcc_separate_buffer;
2274 tex->last_dcc_separate_buffer = NULL;
2275 } else {
2276 tex->dcc_separate_buffer = (struct r600_resource*)
2277 si_aligned_buffer_create(sctx->b.b.screen,
2278 SI_RESOURCE_FLAG_UNMAPPABLE,
2279 PIPE_USAGE_DEFAULT,
2280 tex->surface.dcc_size,
2281 tex->surface.dcc_alignment);
2282 if (!tex->dcc_separate_buffer)
2283 return;
2284 }
2285
2286 /* dcc_offset is the absolute GPUVM address. */
2287 tex->dcc_offset = tex->dcc_separate_buffer->gpu_address;
2288
2289 /* no need to flag anything since this is called by fast clear that
2290 * flags framebuffer state
2291 */
2292 }
2293
2294 /**
2295 * Called by pipe_context::flush_resource, the place where DCC decompression
2296 * takes place.
2297 */
2298 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
2299 struct r600_texture *tex)
2300 {
2301 struct si_context *sctx = (struct si_context*)ctx;
2302 struct pipe_query *tmp;
2303 unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
2304 bool query_active = sctx->b.dcc_stats[i].query_active;
2305 bool disable = false;
2306
2307 if (sctx->b.dcc_stats[i].ps_stats[2]) {
2308 union pipe_query_result result;
2309
2310 /* Read the results. */
2311 ctx->get_query_result(ctx, sctx->b.dcc_stats[i].ps_stats[2],
2312 true, &result);
2313 si_query_hw_reset_buffers(sctx,
2314 (struct si_query_hw*)
2315 sctx->b.dcc_stats[i].ps_stats[2]);
2316
2317 /* Compute the approximate number of fullscreen draws. */
2318 tex->ps_draw_ratio =
2319 result.pipeline_statistics.ps_invocations /
2320 (tex->resource.b.b.width0 * tex->resource.b.b.height0);
2321 sctx->b.last_tex_ps_draw_ratio = tex->ps_draw_ratio;
2322
2323 disable = tex->dcc_separate_buffer &&
2324 !vi_should_enable_separate_dcc(tex);
2325 }
2326
2327 tex->num_slow_clears = 0;
2328
2329 /* stop the statistics query for ps_stats[0] */
2330 if (query_active)
2331 vi_separate_dcc_stop_query(sctx, tex);
2332
2333 /* Move the queries in the queue by one. */
2334 tmp = sctx->b.dcc_stats[i].ps_stats[2];
2335 sctx->b.dcc_stats[i].ps_stats[2] = sctx->b.dcc_stats[i].ps_stats[1];
2336 sctx->b.dcc_stats[i].ps_stats[1] = sctx->b.dcc_stats[i].ps_stats[0];
2337 sctx->b.dcc_stats[i].ps_stats[0] = tmp;
2338
2339 /* create and start a new query as ps_stats[0] */
2340 if (query_active)
2341 vi_separate_dcc_start_query(sctx, tex);
2342
2343 if (disable) {
2344 assert(!tex->last_dcc_separate_buffer);
2345 tex->last_dcc_separate_buffer = tex->dcc_separate_buffer;
2346 tex->dcc_separate_buffer = NULL;
2347 tex->dcc_offset = 0;
2348 /* no need to flag anything since this is called after
2349 * decompression that re-sets framebuffer state
2350 */
2351 }
2352 }
2353
2354 static struct pipe_memory_object *
2355 si_memobj_from_handle(struct pipe_screen *screen,
2356 struct winsys_handle *whandle,
2357 bool dedicated)
2358 {
2359 struct si_screen *sscreen = (struct si_screen*)screen;
2360 struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
2361 struct pb_buffer *buf = NULL;
2362 uint32_t stride, offset;
2363
2364 if (!memobj)
2365 return NULL;
2366
2367 buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle,
2368 &stride, &offset);
2369 if (!buf) {
2370 free(memobj);
2371 return NULL;
2372 }
2373
2374 memobj->b.dedicated = dedicated;
2375 memobj->buf = buf;
2376 memobj->stride = stride;
2377 memobj->offset = offset;
2378
2379 return (struct pipe_memory_object *)memobj;
2380
2381 }
2382
2383 static void
2384 si_memobj_destroy(struct pipe_screen *screen,
2385 struct pipe_memory_object *_memobj)
2386 {
2387 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
2388
2389 pb_reference(&memobj->buf, NULL);
2390 free(memobj);
2391 }
2392
2393 static struct pipe_resource *
2394 si_texture_from_memobj(struct pipe_screen *screen,
2395 const struct pipe_resource *templ,
2396 struct pipe_memory_object *_memobj,
2397 uint64_t offset)
2398 {
2399 int r;
2400 struct si_screen *sscreen = (struct si_screen*)screen;
2401 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
2402 struct r600_texture *rtex;
2403 struct radeon_surf surface = {};
2404 struct radeon_bo_metadata metadata = {};
2405 enum radeon_surf_mode array_mode;
2406 bool is_scanout;
2407 struct pb_buffer *buf = NULL;
2408
2409 if (memobj->b.dedicated) {
2410 sscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
2411 si_surface_import_metadata(sscreen, &surface, &metadata,
2412 &array_mode, &is_scanout);
2413 } else {
2414 /**
2415 * The bo metadata is unset for un-dedicated images. So we fall
2416 * back to linear. See answer to question 5 of the
2417 * VK_KHX_external_memory spec for some details.
2418 *
2419 * It is possible that this case isn't going to work if the
2420 * surface pitch isn't correctly aligned by default.
2421 *
2422 * In order to support it correctly we require multi-image
2423 * metadata to be syncrhonized between radv and radeonsi. The
2424 * semantics of associating multiple image metadata to a memory
2425 * object on the vulkan export side are not concretely defined
2426 * either.
2427 *
2428 * All the use cases we are aware of at the moment for memory
2429 * objects use dedicated allocations. So lets keep the initial
2430 * implementation simple.
2431 *
2432 * A possible alternative is to attempt to reconstruct the
2433 * tiling information when the TexParameter TEXTURE_TILING_EXT
2434 * is set.
2435 */
2436 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2437 is_scanout = false;
2438
2439 }
2440
2441 r = si_init_surface(sscreen, &surface, templ,
2442 array_mode, memobj->stride,
2443 offset, true, is_scanout,
2444 false, false);
2445 if (r)
2446 return NULL;
2447
2448 rtex = si_texture_create_object(screen, templ, memobj->buf, &surface);
2449 if (!rtex)
2450 return NULL;
2451
2452 /* r600_texture_create_object doesn't increment refcount of
2453 * memobj->buf, so increment it here.
2454 */
2455 pb_reference(&buf, memobj->buf);
2456
2457 rtex->resource.b.is_shared = true;
2458 rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
2459
2460 si_apply_opaque_metadata(sscreen, rtex, &metadata);
2461
2462 return &rtex->resource.b.b;
2463 }
2464
2465 static bool si_check_resource_capability(struct pipe_screen *screen,
2466 struct pipe_resource *resource,
2467 unsigned bind)
2468 {
2469 struct r600_texture *tex = (struct r600_texture*)resource;
2470
2471 /* Buffers only support the linear flag. */
2472 if (resource->target == PIPE_BUFFER)
2473 return (bind & ~PIPE_BIND_LINEAR) == 0;
2474
2475 if (bind & PIPE_BIND_LINEAR && !tex->surface.is_linear)
2476 return false;
2477
2478 if (bind & PIPE_BIND_SCANOUT && !tex->surface.is_displayable)
2479 return false;
2480
2481 /* TODO: PIPE_BIND_CURSOR - do we care? */
2482 return true;
2483 }
2484
2485 void si_init_screen_texture_functions(struct si_screen *sscreen)
2486 {
2487 sscreen->b.resource_from_handle = si_texture_from_handle;
2488 sscreen->b.resource_get_handle = si_texture_get_handle;
2489 sscreen->b.resource_from_memobj = si_texture_from_memobj;
2490 sscreen->b.memobj_create_from_handle = si_memobj_from_handle;
2491 sscreen->b.memobj_destroy = si_memobj_destroy;
2492 sscreen->b.check_resource_capability = si_check_resource_capability;
2493 }
2494
2495 void si_init_context_texture_functions(struct si_context *sctx)
2496 {
2497 sctx->b.b.create_surface = si_create_surface;
2498 sctx->b.b.surface_destroy = si_surface_destroy;
2499 }