ac/surface: compute tile swizzle for GFX9
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "radeonsi/si_pipe.h"
25 #include "r600_cs.h"
26 #include "r600_query.h"
27 #include "util/u_format.h"
28 #include "util/u_log.h"
29 #include "util/u_memory.h"
30 #include "util/u_pack_color.h"
31 #include "util/u_resource.h"
32 #include "util/u_surface.h"
33 #include "util/os_time.h"
34 #include <errno.h>
35 #include <inttypes.h>
36 #include "state_tracker/drm_driver.h"
37 #include "amd/common/sid.h"
38
39 static enum radeon_surf_mode
40 r600_choose_tiling(struct si_screen *sscreen,
41 const struct pipe_resource *templ);
42
43
44 bool si_prepare_for_dma_blit(struct r600_common_context *rctx,
45 struct r600_texture *rdst,
46 unsigned dst_level, unsigned dstx,
47 unsigned dsty, unsigned dstz,
48 struct r600_texture *rsrc,
49 unsigned src_level,
50 const struct pipe_box *src_box)
51 {
52 if (!rctx->dma.cs)
53 return false;
54
55 if (rdst->surface.bpe != rsrc->surface.bpe)
56 return false;
57
58 /* MSAA: Blits don't exist in the real world. */
59 if (rsrc->resource.b.b.nr_samples > 1 ||
60 rdst->resource.b.b.nr_samples > 1)
61 return false;
62
63 /* Depth-stencil surfaces:
64 * When dst is linear, the DB->CB copy preserves HTILE.
65 * When dst is tiled, the 3D path must be used to update HTILE.
66 */
67 if (rsrc->is_depth || rdst->is_depth)
68 return false;
69
70 /* DCC as:
71 * src: Use the 3D path. DCC decompression is expensive.
72 * dst: Use the 3D path to compress the pixels with DCC.
73 */
74 if (vi_dcc_enabled(rsrc, src_level) ||
75 vi_dcc_enabled(rdst, dst_level))
76 return false;
77
78 /* CMASK as:
79 * src: Both texture and SDMA paths need decompression. Use SDMA.
80 * dst: If overwriting the whole texture, discard CMASK and use
81 * SDMA. Otherwise, use the 3D path.
82 */
83 if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
84 /* The CMASK clear is only enabled for the first level. */
85 assert(dst_level == 0);
86 if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
87 dstx, dsty, dstz, src_box->width,
88 src_box->height, src_box->depth))
89 return false;
90
91 si_texture_discard_cmask(rctx->screen, rdst);
92 }
93
94 /* All requirements are met. Prepare textures for SDMA. */
95 if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
96 rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
97
98 assert(!(rsrc->dirty_level_mask & (1 << src_level)));
99 assert(!(rdst->dirty_level_mask & (1 << dst_level)));
100
101 return true;
102 }
103
104 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
105 static void r600_copy_region_with_blit(struct pipe_context *pipe,
106 struct pipe_resource *dst,
107 unsigned dst_level,
108 unsigned dstx, unsigned dsty, unsigned dstz,
109 struct pipe_resource *src,
110 unsigned src_level,
111 const struct pipe_box *src_box)
112 {
113 struct pipe_blit_info blit;
114
115 memset(&blit, 0, sizeof(blit));
116 blit.src.resource = src;
117 blit.src.format = src->format;
118 blit.src.level = src_level;
119 blit.src.box = *src_box;
120 blit.dst.resource = dst;
121 blit.dst.format = dst->format;
122 blit.dst.level = dst_level;
123 blit.dst.box.x = dstx;
124 blit.dst.box.y = dsty;
125 blit.dst.box.z = dstz;
126 blit.dst.box.width = src_box->width;
127 blit.dst.box.height = src_box->height;
128 blit.dst.box.depth = src_box->depth;
129 blit.mask = util_format_get_mask(src->format) &
130 util_format_get_mask(dst->format);
131 blit.filter = PIPE_TEX_FILTER_NEAREST;
132
133 if (blit.mask) {
134 pipe->blit(pipe, &blit);
135 }
136 }
137
138 /* Copy from a full GPU texture to a transfer's staging one. */
139 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
140 {
141 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
142 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
143 struct pipe_resource *dst = &rtransfer->staging->b.b;
144 struct pipe_resource *src = transfer->resource;
145
146 if (src->nr_samples > 1) {
147 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
148 src, transfer->level, &transfer->box);
149 return;
150 }
151
152 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
153 &transfer->box);
154 }
155
156 /* Copy from a transfer's staging texture to a full GPU one. */
157 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
158 {
159 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
160 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
161 struct pipe_resource *dst = transfer->resource;
162 struct pipe_resource *src = &rtransfer->staging->b.b;
163 struct pipe_box sbox;
164
165 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
166
167 if (dst->nr_samples > 1) {
168 r600_copy_region_with_blit(ctx, dst, transfer->level,
169 transfer->box.x, transfer->box.y, transfer->box.z,
170 src, 0, &sbox);
171 return;
172 }
173
174 rctx->dma_copy(ctx, dst, transfer->level,
175 transfer->box.x, transfer->box.y, transfer->box.z,
176 src, 0, &sbox);
177 }
178
179 static unsigned r600_texture_get_offset(struct si_screen *sscreen,
180 struct r600_texture *rtex, unsigned level,
181 const struct pipe_box *box,
182 unsigned *stride,
183 unsigned *layer_stride)
184 {
185 if (sscreen->info.chip_class >= GFX9) {
186 *stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
187 *layer_stride = rtex->surface.u.gfx9.surf_slice_size;
188
189 if (!box)
190 return 0;
191
192 /* Each texture is an array of slices. Each slice is an array
193 * of mipmap levels. */
194 return box->z * rtex->surface.u.gfx9.surf_slice_size +
195 rtex->surface.u.gfx9.offset[level] +
196 (box->y / rtex->surface.blk_h *
197 rtex->surface.u.gfx9.surf_pitch +
198 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
199 } else {
200 *stride = rtex->surface.u.legacy.level[level].nblk_x *
201 rtex->surface.bpe;
202 assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
203 *layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4;
204
205 if (!box)
206 return rtex->surface.u.legacy.level[level].offset;
207
208 /* Each texture is an array of mipmap levels. Each level is
209 * an array of slices. */
210 return rtex->surface.u.legacy.level[level].offset +
211 box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 +
212 (box->y / rtex->surface.blk_h *
213 rtex->surface.u.legacy.level[level].nblk_x +
214 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
215 }
216 }
217
218 static int r600_init_surface(struct si_screen *sscreen,
219 struct radeon_surf *surface,
220 const struct pipe_resource *ptex,
221 enum radeon_surf_mode array_mode,
222 unsigned pitch_in_bytes_override,
223 unsigned offset,
224 bool is_imported,
225 bool is_scanout,
226 bool is_flushed_depth,
227 bool tc_compatible_htile)
228 {
229 const struct util_format_description *desc =
230 util_format_description(ptex->format);
231 bool is_depth, is_stencil;
232 int r;
233 unsigned i, bpe, flags = 0;
234
235 is_depth = util_format_has_depth(desc);
236 is_stencil = util_format_has_stencil(desc);
237
238 if (!is_flushed_depth &&
239 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
240 bpe = 4; /* stencil is allocated separately on evergreen */
241 } else {
242 bpe = util_format_get_blocksize(ptex->format);
243 assert(util_is_power_of_two(bpe));
244 }
245
246 if (!is_flushed_depth && is_depth) {
247 flags |= RADEON_SURF_ZBUFFER;
248
249 if (tc_compatible_htile &&
250 (sscreen->info.chip_class >= GFX9 ||
251 array_mode == RADEON_SURF_MODE_2D)) {
252 /* TC-compatible HTILE only supports Z32_FLOAT.
253 * GFX9 also supports Z16_UNORM.
254 * On VI, promote Z16 to Z32. DB->CB copies will convert
255 * the format for transfers.
256 */
257 if (sscreen->info.chip_class == VI)
258 bpe = 4;
259
260 flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
261 }
262
263 if (is_stencil)
264 flags |= RADEON_SURF_SBUFFER;
265 }
266
267 if (sscreen->info.chip_class >= VI &&
268 (ptex->flags & R600_RESOURCE_FLAG_DISABLE_DCC ||
269 ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT ||
270 /* DCC MSAA array textures are disallowed due to incomplete clear impl. */
271 (ptex->nr_samples >= 2 &&
272 (!sscreen->dcc_msaa_allowed || ptex->array_size > 1))))
273 flags |= RADEON_SURF_DISABLE_DCC;
274
275 if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {
276 /* This should catch bugs in gallium users setting incorrect flags. */
277 assert(ptex->nr_samples <= 1 &&
278 ptex->array_size == 1 &&
279 ptex->depth0 == 1 &&
280 ptex->last_level == 0 &&
281 !(flags & RADEON_SURF_Z_OR_SBUFFER));
282
283 flags |= RADEON_SURF_SCANOUT;
284 }
285
286 if (ptex->bind & PIPE_BIND_SHARED)
287 flags |= RADEON_SURF_SHAREABLE;
288 if (is_imported)
289 flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
290 if (!(ptex->flags & R600_RESOURCE_FLAG_FORCE_TILING))
291 flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
292
293 r = sscreen->ws->surface_init(sscreen->ws, ptex, flags, bpe,
294 array_mode, surface);
295 if (r) {
296 return r;
297 }
298
299 unsigned pitch = pitch_in_bytes_override / bpe;
300
301 if (sscreen->info.chip_class >= GFX9) {
302 if (pitch) {
303 surface->u.gfx9.surf_pitch = pitch;
304 surface->u.gfx9.surf_slice_size =
305 (uint64_t)pitch * surface->u.gfx9.surf_height * bpe;
306 }
307 surface->u.gfx9.surf_offset = offset;
308 } else {
309 if (pitch) {
310 surface->u.legacy.level[0].nblk_x = pitch;
311 surface->u.legacy.level[0].slice_size_dw =
312 ((uint64_t)pitch * surface->u.legacy.level[0].nblk_y * bpe) / 4;
313 }
314 if (offset) {
315 for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
316 surface->u.legacy.level[i].offset += offset;
317 }
318 }
319 return 0;
320 }
321
322 static void r600_texture_init_metadata(struct si_screen *sscreen,
323 struct r600_texture *rtex,
324 struct radeon_bo_metadata *metadata)
325 {
326 struct radeon_surf *surface = &rtex->surface;
327
328 memset(metadata, 0, sizeof(*metadata));
329
330 if (sscreen->info.chip_class >= GFX9) {
331 metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
332 } else {
333 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
334 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
335 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
336 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
337 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
338 metadata->u.legacy.bankw = surface->u.legacy.bankw;
339 metadata->u.legacy.bankh = surface->u.legacy.bankh;
340 metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
341 metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
342 metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
343 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
344 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
345 }
346 }
347
348 static void r600_surface_import_metadata(struct si_screen *sscreen,
349 struct radeon_surf *surf,
350 struct radeon_bo_metadata *metadata,
351 enum radeon_surf_mode *array_mode,
352 bool *is_scanout)
353 {
354 if (sscreen->info.chip_class >= GFX9) {
355 if (metadata->u.gfx9.swizzle_mode > 0)
356 *array_mode = RADEON_SURF_MODE_2D;
357 else
358 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
359
360 *is_scanout = metadata->u.gfx9.swizzle_mode == 0 ||
361 metadata->u.gfx9.swizzle_mode % 4 == 2;
362
363 surf->u.gfx9.surf.swizzle_mode = metadata->u.gfx9.swizzle_mode;
364 } else {
365 surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
366 surf->u.legacy.bankw = metadata->u.legacy.bankw;
367 surf->u.legacy.bankh = metadata->u.legacy.bankh;
368 surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
369 surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
370 surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
371
372 if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
373 *array_mode = RADEON_SURF_MODE_2D;
374 else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
375 *array_mode = RADEON_SURF_MODE_1D;
376 else
377 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
378
379 *is_scanout = metadata->u.legacy.scanout;
380 }
381 }
382
383 void si_eliminate_fast_color_clear(struct r600_common_context *rctx,
384 struct r600_texture *rtex)
385 {
386 struct si_screen *sscreen = rctx->screen;
387 struct pipe_context *ctx = &rctx->b;
388
389 if (ctx == sscreen->aux_context)
390 mtx_lock(&sscreen->aux_context_lock);
391
392 unsigned n = rctx->num_decompress_calls;
393 ctx->flush_resource(ctx, &rtex->resource.b.b);
394
395 /* Flush only if any fast clear elimination took place. */
396 if (n != rctx->num_decompress_calls)
397 ctx->flush(ctx, NULL, 0);
398
399 if (ctx == sscreen->aux_context)
400 mtx_unlock(&sscreen->aux_context_lock);
401 }
402
403 void si_texture_discard_cmask(struct si_screen *sscreen,
404 struct r600_texture *rtex)
405 {
406 if (!rtex->cmask.size)
407 return;
408
409 assert(rtex->resource.b.b.nr_samples <= 1);
410
411 /* Disable CMASK. */
412 memset(&rtex->cmask, 0, sizeof(rtex->cmask));
413 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
414 rtex->dirty_level_mask = 0;
415
416 rtex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
417
418 if (rtex->cmask_buffer != &rtex->resource)
419 r600_resource_reference(&rtex->cmask_buffer, NULL);
420
421 /* Notify all contexts about the change. */
422 p_atomic_inc(&sscreen->dirty_tex_counter);
423 p_atomic_inc(&sscreen->compressed_colortex_counter);
424 }
425
426 static bool r600_can_disable_dcc(struct r600_texture *rtex)
427 {
428 /* We can't disable DCC if it can be written by another process. */
429 return rtex->dcc_offset &&
430 (!rtex->resource.b.is_shared ||
431 !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
432 }
433
434 static bool r600_texture_discard_dcc(struct si_screen *sscreen,
435 struct r600_texture *rtex)
436 {
437 if (!r600_can_disable_dcc(rtex))
438 return false;
439
440 assert(rtex->dcc_separate_buffer == NULL);
441
442 /* Disable DCC. */
443 rtex->dcc_offset = 0;
444
445 /* Notify all contexts about the change. */
446 p_atomic_inc(&sscreen->dirty_tex_counter);
447 return true;
448 }
449
450 /**
451 * Disable DCC for the texture. (first decompress, then discard metadata).
452 *
453 * There is unresolved multi-context synchronization issue between
454 * screen::aux_context and the current context. If applications do this with
455 * multiple contexts, it's already undefined behavior for them and we don't
456 * have to worry about that. The scenario is:
457 *
458 * If context 1 disables DCC and context 2 has queued commands that write
459 * to the texture via CB with DCC enabled, and the order of operations is
460 * as follows:
461 * context 2 queues draw calls rendering to the texture, but doesn't flush
462 * context 1 disables DCC and flushes
463 * context 1 & 2 reset descriptors and FB state
464 * context 2 flushes (new compressed tiles written by the draw calls)
465 * context 1 & 2 read garbage, because DCC is disabled, yet there are
466 * compressed tiled
467 *
468 * \param rctx the current context if you have one, or rscreen->aux_context
469 * if you don't.
470 */
471 bool si_texture_disable_dcc(struct r600_common_context *rctx,
472 struct r600_texture *rtex)
473 {
474 struct si_screen *sscreen = rctx->screen;
475
476 if (!r600_can_disable_dcc(rtex))
477 return false;
478
479 if (&rctx->b == sscreen->aux_context)
480 mtx_lock(&sscreen->aux_context_lock);
481
482 /* Decompress DCC. */
483 rctx->decompress_dcc(&rctx->b, rtex);
484 rctx->b.flush(&rctx->b, NULL, 0);
485
486 if (&rctx->b == sscreen->aux_context)
487 mtx_unlock(&sscreen->aux_context_lock);
488
489 return r600_texture_discard_dcc(sscreen, rtex);
490 }
491
492 static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
493 struct r600_texture *rtex,
494 unsigned new_bind_flag,
495 bool invalidate_storage)
496 {
497 struct pipe_screen *screen = rctx->b.screen;
498 struct r600_texture *new_tex;
499 struct pipe_resource templ = rtex->resource.b.b;
500 unsigned i;
501
502 templ.bind |= new_bind_flag;
503
504 if (rtex->resource.b.is_shared)
505 return;
506
507 if (new_bind_flag == PIPE_BIND_LINEAR) {
508 if (rtex->surface.is_linear)
509 return;
510
511 /* This fails with MSAA, depth, and compressed textures. */
512 if (r600_choose_tiling(rctx->screen, &templ) !=
513 RADEON_SURF_MODE_LINEAR_ALIGNED)
514 return;
515 }
516
517 new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
518 if (!new_tex)
519 return;
520
521 /* Copy the pixels to the new texture. */
522 if (!invalidate_storage) {
523 for (i = 0; i <= templ.last_level; i++) {
524 struct pipe_box box;
525
526 u_box_3d(0, 0, 0,
527 u_minify(templ.width0, i), u_minify(templ.height0, i),
528 util_num_layers(&templ, i), &box);
529
530 rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
531 &rtex->resource.b.b, i, &box);
532 }
533 }
534
535 if (new_bind_flag == PIPE_BIND_LINEAR) {
536 si_texture_discard_cmask(rctx->screen, rtex);
537 r600_texture_discard_dcc(rctx->screen, rtex);
538 }
539
540 /* Replace the structure fields of rtex. */
541 rtex->resource.b.b.bind = templ.bind;
542 pb_reference(&rtex->resource.buf, new_tex->resource.buf);
543 rtex->resource.gpu_address = new_tex->resource.gpu_address;
544 rtex->resource.vram_usage = new_tex->resource.vram_usage;
545 rtex->resource.gart_usage = new_tex->resource.gart_usage;
546 rtex->resource.bo_size = new_tex->resource.bo_size;
547 rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
548 rtex->resource.domains = new_tex->resource.domains;
549 rtex->resource.flags = new_tex->resource.flags;
550 rtex->size = new_tex->size;
551 rtex->db_render_format = new_tex->db_render_format;
552 rtex->db_compatible = new_tex->db_compatible;
553 rtex->can_sample_z = new_tex->can_sample_z;
554 rtex->can_sample_s = new_tex->can_sample_s;
555 rtex->surface = new_tex->surface;
556 rtex->fmask = new_tex->fmask;
557 rtex->cmask = new_tex->cmask;
558 rtex->cb_color_info = new_tex->cb_color_info;
559 rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
560 rtex->htile_offset = new_tex->htile_offset;
561 rtex->tc_compatible_htile = new_tex->tc_compatible_htile;
562 rtex->depth_cleared = new_tex->depth_cleared;
563 rtex->stencil_cleared = new_tex->stencil_cleared;
564 rtex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
565 rtex->framebuffers_bound = new_tex->framebuffers_bound;
566
567 if (new_bind_flag == PIPE_BIND_LINEAR) {
568 assert(!rtex->htile_offset);
569 assert(!rtex->cmask.size);
570 assert(!rtex->fmask.size);
571 assert(!rtex->dcc_offset);
572 assert(!rtex->is_depth);
573 }
574
575 r600_texture_reference(&new_tex, NULL);
576
577 p_atomic_inc(&rctx->screen->dirty_tex_counter);
578 }
579
580 static uint32_t si_get_bo_metadata_word1(struct si_screen *sscreen)
581 {
582 return (ATI_VENDOR_ID << 16) | sscreen->info.pci_id;
583 }
584
585 static void si_query_opaque_metadata(struct si_screen *sscreen,
586 struct r600_texture *rtex,
587 struct radeon_bo_metadata *md)
588 {
589 struct pipe_resource *res = &rtex->resource.b.b;
590 static const unsigned char swizzle[] = {
591 PIPE_SWIZZLE_X,
592 PIPE_SWIZZLE_Y,
593 PIPE_SWIZZLE_Z,
594 PIPE_SWIZZLE_W
595 };
596 uint32_t desc[8], i;
597 bool is_array = util_texture_is_array(res->target);
598
599 /* DRM 2.x.x doesn't support this. */
600 if (sscreen->info.drm_major != 3)
601 return;
602
603 assert(rtex->dcc_separate_buffer == NULL);
604 assert(rtex->fmask.size == 0);
605
606 /* Metadata image format format version 1:
607 * [0] = 1 (metadata format identifier)
608 * [1] = (VENDOR_ID << 16) | PCI_ID
609 * [2:9] = image descriptor for the whole resource
610 * [2] is always 0, because the base address is cleared
611 * [9] is the DCC offset bits [39:8] from the beginning of
612 * the buffer
613 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
614 */
615
616 md->metadata[0] = 1; /* metadata image format version 1 */
617
618 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
619 md->metadata[1] = si_get_bo_metadata_word1(sscreen);
620
621 si_make_texture_descriptor(sscreen, rtex, true,
622 res->target, res->format,
623 swizzle, 0, res->last_level, 0,
624 is_array ? res->array_size - 1 : 0,
625 res->width0, res->height0, res->depth0,
626 desc, NULL);
627
628 si_set_mutable_tex_desc_fields(sscreen, rtex, &rtex->surface.u.legacy.level[0],
629 0, 0, rtex->surface.blk_w, false, desc);
630
631 /* Clear the base address and set the relative DCC offset. */
632 desc[0] = 0;
633 desc[1] &= C_008F14_BASE_ADDRESS_HI;
634 desc[7] = rtex->dcc_offset >> 8;
635
636 /* Dwords [2:9] contain the image descriptor. */
637 memcpy(&md->metadata[2], desc, sizeof(desc));
638 md->size_metadata = 10 * 4;
639
640 /* Dwords [10:..] contain the mipmap level offsets. */
641 if (sscreen->info.chip_class <= VI) {
642 for (i = 0; i <= res->last_level; i++)
643 md->metadata[10+i] = rtex->surface.u.legacy.level[i].offset >> 8;
644
645 md->size_metadata += (1 + res->last_level) * 4;
646 }
647 }
648
649 static void si_apply_opaque_metadata(struct si_screen *sscreen,
650 struct r600_texture *rtex,
651 struct radeon_bo_metadata *md)
652 {
653 uint32_t *desc = &md->metadata[2];
654
655 if (sscreen->info.chip_class < VI)
656 return;
657
658 /* Return if DCC is enabled. The texture should be set up with it
659 * already.
660 */
661 if (md->size_metadata >= 10 * 4 && /* at least 2(header) + 8(desc) dwords */
662 md->metadata[0] != 0 &&
663 md->metadata[1] == si_get_bo_metadata_word1(sscreen) &&
664 G_008F28_COMPRESSION_EN(desc[6])) {
665 rtex->dcc_offset = (uint64_t)desc[7] << 8;
666 return;
667 }
668
669 /* Disable DCC. These are always set by texture_from_handle and must
670 * be cleared here.
671 */
672 rtex->dcc_offset = 0;
673 }
674
675 static boolean r600_texture_get_handle(struct pipe_screen* screen,
676 struct pipe_context *ctx,
677 struct pipe_resource *resource,
678 struct winsys_handle *whandle,
679 unsigned usage)
680 {
681 struct si_screen *sscreen = (struct si_screen*)screen;
682 struct r600_common_context *rctx;
683 struct r600_resource *res = (struct r600_resource*)resource;
684 struct r600_texture *rtex = (struct r600_texture*)resource;
685 struct radeon_bo_metadata metadata;
686 bool update_metadata = false;
687 unsigned stride, offset, slice_size;
688 bool flush = false;
689
690 ctx = threaded_context_unwrap_sync(ctx);
691 rctx = (struct r600_common_context*)(ctx ? ctx : sscreen->aux_context);
692
693 if (resource->target != PIPE_BUFFER) {
694 /* This is not supported now, but it might be required for OpenCL
695 * interop in the future.
696 */
697 if (resource->nr_samples > 1 || rtex->is_depth)
698 return false;
699
700 /* Move a suballocated texture into a non-suballocated allocation. */
701 if (sscreen->ws->buffer_is_suballocated(res->buf) ||
702 rtex->surface.tile_swizzle ||
703 (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
704 whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
705 assert(!res->b.is_shared);
706 r600_reallocate_texture_inplace(rctx, rtex,
707 PIPE_BIND_SHARED, false);
708 flush = true;
709 assert(res->b.b.bind & PIPE_BIND_SHARED);
710 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
711 assert(!(res->flags & RADEON_FLAG_NO_INTERPROCESS_SHARING));
712 assert(rtex->surface.tile_swizzle == 0);
713 }
714
715 /* Since shader image stores don't support DCC on VI,
716 * disable it for external clients that want write
717 * access.
718 */
719 if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
720 if (si_texture_disable_dcc(rctx, rtex)) {
721 update_metadata = true;
722 /* si_texture_disable_dcc flushes the context */
723 flush = false;
724 }
725 }
726
727 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
728 (rtex->cmask.size || rtex->dcc_offset)) {
729 /* Eliminate fast clear (both CMASK and DCC) */
730 si_eliminate_fast_color_clear(rctx, rtex);
731 /* eliminate_fast_color_clear flushes the context */
732 flush = false;
733
734 /* Disable CMASK if flush_resource isn't going
735 * to be called.
736 */
737 if (rtex->cmask.size)
738 si_texture_discard_cmask(sscreen, rtex);
739 }
740
741 /* Set metadata. */
742 if (!res->b.is_shared || update_metadata) {
743 r600_texture_init_metadata(sscreen, rtex, &metadata);
744 si_query_opaque_metadata(sscreen, rtex, &metadata);
745
746 sscreen->ws->buffer_set_metadata(res->buf, &metadata);
747 }
748
749 if (sscreen->info.chip_class >= GFX9) {
750 offset = rtex->surface.u.gfx9.surf_offset;
751 stride = rtex->surface.u.gfx9.surf_pitch *
752 rtex->surface.bpe;
753 slice_size = rtex->surface.u.gfx9.surf_slice_size;
754 } else {
755 offset = rtex->surface.u.legacy.level[0].offset;
756 stride = rtex->surface.u.legacy.level[0].nblk_x *
757 rtex->surface.bpe;
758 slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
759 }
760 } else {
761 /* Buffer exports are for the OpenCL interop. */
762 /* Move a suballocated buffer into a non-suballocated allocation. */
763 if (sscreen->ws->buffer_is_suballocated(res->buf) ||
764 /* A DMABUF export always fails if the BO is local. */
765 rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING) {
766 assert(!res->b.is_shared);
767
768 /* Allocate a new buffer with PIPE_BIND_SHARED. */
769 struct pipe_resource templ = res->b.b;
770 templ.bind |= PIPE_BIND_SHARED;
771
772 struct pipe_resource *newb =
773 screen->resource_create(screen, &templ);
774 if (!newb)
775 return false;
776
777 /* Copy the old buffer contents to the new one. */
778 struct pipe_box box;
779 u_box_1d(0, newb->width0, &box);
780 rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
781 &res->b.b, 0, &box);
782 flush = true;
783 /* Move the new buffer storage to the old pipe_resource. */
784 si_replace_buffer_storage(&rctx->b, &res->b.b, newb);
785 pipe_resource_reference(&newb, NULL);
786
787 assert(res->b.b.bind & PIPE_BIND_SHARED);
788 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
789 }
790
791 /* Buffers */
792 offset = 0;
793 stride = 0;
794 slice_size = 0;
795 }
796
797 if (flush)
798 rctx->b.flush(&rctx->b, NULL, 0);
799
800 if (res->b.is_shared) {
801 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
802 * doesn't set it.
803 */
804 res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
805 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
806 res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
807 } else {
808 res->b.is_shared = true;
809 res->external_usage = usage;
810 }
811
812 return sscreen->ws->buffer_get_handle(res->buf, stride, offset,
813 slice_size, whandle);
814 }
815
816 static void r600_texture_destroy(struct pipe_screen *screen,
817 struct pipe_resource *ptex)
818 {
819 struct r600_texture *rtex = (struct r600_texture*)ptex;
820 struct r600_resource *resource = &rtex->resource;
821
822 r600_texture_reference(&rtex->flushed_depth_texture, NULL);
823
824 if (rtex->cmask_buffer != &rtex->resource) {
825 r600_resource_reference(&rtex->cmask_buffer, NULL);
826 }
827 pb_reference(&resource->buf, NULL);
828 r600_resource_reference(&rtex->dcc_separate_buffer, NULL);
829 r600_resource_reference(&rtex->last_dcc_separate_buffer, NULL);
830 FREE(rtex);
831 }
832
833 static const struct u_resource_vtbl r600_texture_vtbl;
834
835 /* The number of samples can be specified independently of the texture. */
836 void si_texture_get_fmask_info(struct si_screen *sscreen,
837 struct r600_texture *rtex,
838 unsigned nr_samples,
839 struct r600_fmask_info *out)
840 {
841 /* FMASK is allocated like an ordinary texture. */
842 struct pipe_resource templ = rtex->resource.b.b;
843 struct radeon_surf fmask = {};
844 unsigned flags, bpe;
845
846 memset(out, 0, sizeof(*out));
847
848 if (sscreen->info.chip_class >= GFX9) {
849 out->alignment = rtex->surface.u.gfx9.fmask_alignment;
850 out->size = rtex->surface.u.gfx9.fmask_size;
851 out->tile_swizzle = rtex->surface.u.gfx9.fmask_tile_swizzle;
852 return;
853 }
854
855 templ.nr_samples = 1;
856 flags = rtex->surface.flags | RADEON_SURF_FMASK;
857
858 switch (nr_samples) {
859 case 2:
860 case 4:
861 bpe = 1;
862 break;
863 case 8:
864 bpe = 4;
865 break;
866 default:
867 R600_ERR("Invalid sample count for FMASK allocation.\n");
868 return;
869 }
870
871 if (sscreen->ws->surface_init(sscreen->ws, &templ, flags, bpe,
872 RADEON_SURF_MODE_2D, &fmask)) {
873 R600_ERR("Got error in surface_init while allocating FMASK.\n");
874 return;
875 }
876
877 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
878
879 out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
880 if (out->slice_tile_max)
881 out->slice_tile_max -= 1;
882
883 out->tile_mode_index = fmask.u.legacy.tiling_index[0];
884 out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
885 out->bank_height = fmask.u.legacy.bankh;
886 out->tile_swizzle = fmask.tile_swizzle;
887 out->alignment = MAX2(256, fmask.surf_alignment);
888 out->size = fmask.surf_size;
889 }
890
891 static void r600_texture_allocate_fmask(struct si_screen *sscreen,
892 struct r600_texture *rtex)
893 {
894 si_texture_get_fmask_info(sscreen, rtex,
895 rtex->resource.b.b.nr_samples, &rtex->fmask);
896
897 rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
898 rtex->size = rtex->fmask.offset + rtex->fmask.size;
899 }
900
901 void si_texture_get_cmask_info(struct si_screen *sscreen,
902 struct r600_texture *rtex,
903 struct r600_cmask_info *out)
904 {
905 unsigned pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
906 unsigned num_pipes = sscreen->info.num_tile_pipes;
907 unsigned cl_width, cl_height;
908
909 if (sscreen->info.chip_class >= GFX9) {
910 out->alignment = rtex->surface.u.gfx9.cmask_alignment;
911 out->size = rtex->surface.u.gfx9.cmask_size;
912 return;
913 }
914
915 switch (num_pipes) {
916 case 2:
917 cl_width = 32;
918 cl_height = 16;
919 break;
920 case 4:
921 cl_width = 32;
922 cl_height = 32;
923 break;
924 case 8:
925 cl_width = 64;
926 cl_height = 32;
927 break;
928 case 16: /* Hawaii */
929 cl_width = 64;
930 cl_height = 64;
931 break;
932 default:
933 assert(0);
934 return;
935 }
936
937 unsigned base_align = num_pipes * pipe_interleave_bytes;
938
939 unsigned width = align(rtex->resource.b.b.width0, cl_width*8);
940 unsigned height = align(rtex->resource.b.b.height0, cl_height*8);
941 unsigned slice_elements = (width * height) / (8*8);
942
943 /* Each element of CMASK is a nibble. */
944 unsigned slice_bytes = slice_elements / 2;
945
946 out->slice_tile_max = (width * height) / (128*128);
947 if (out->slice_tile_max)
948 out->slice_tile_max -= 1;
949
950 out->alignment = MAX2(256, base_align);
951 out->size = util_num_layers(&rtex->resource.b.b, 0) *
952 align(slice_bytes, base_align);
953 }
954
955 static void r600_texture_allocate_cmask(struct si_screen *sscreen,
956 struct r600_texture *rtex)
957 {
958 si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
959
960 rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
961 rtex->size = rtex->cmask.offset + rtex->cmask.size;
962
963 rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
964 }
965
966 static void r600_texture_get_htile_size(struct si_screen *sscreen,
967 struct r600_texture *rtex)
968 {
969 unsigned cl_width, cl_height, width, height;
970 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
971 unsigned num_pipes = sscreen->info.num_tile_pipes;
972
973 assert(sscreen->info.chip_class <= VI);
974
975 rtex->surface.htile_size = 0;
976
977 /* HTILE is broken with 1D tiling on old kernels and CIK. */
978 if (sscreen->info.chip_class >= CIK &&
979 rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
980 sscreen->info.drm_major == 2 && sscreen->info.drm_minor < 38)
981 return;
982
983 /* Overalign HTILE on P2 configs to work around GPU hangs in
984 * piglit/depthstencil-render-miplevels 585.
985 *
986 * This has been confirmed to help Kabini & Stoney, where the hangs
987 * are always reproducible. I think I have seen the test hang
988 * on Carrizo too, though it was very rare there.
989 */
990 if (sscreen->info.chip_class >= CIK && num_pipes < 4)
991 num_pipes = 4;
992
993 switch (num_pipes) {
994 case 1:
995 cl_width = 32;
996 cl_height = 16;
997 break;
998 case 2:
999 cl_width = 32;
1000 cl_height = 32;
1001 break;
1002 case 4:
1003 cl_width = 64;
1004 cl_height = 32;
1005 break;
1006 case 8:
1007 cl_width = 64;
1008 cl_height = 64;
1009 break;
1010 case 16:
1011 cl_width = 128;
1012 cl_height = 64;
1013 break;
1014 default:
1015 assert(0);
1016 return;
1017 }
1018
1019 width = align(rtex->resource.b.b.width0, cl_width * 8);
1020 height = align(rtex->resource.b.b.height0, cl_height * 8);
1021
1022 slice_elements = (width * height) / (8 * 8);
1023 slice_bytes = slice_elements * 4;
1024
1025 pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
1026 base_align = num_pipes * pipe_interleave_bytes;
1027
1028 rtex->surface.htile_alignment = base_align;
1029 rtex->surface.htile_size =
1030 util_num_layers(&rtex->resource.b.b, 0) *
1031 align(slice_bytes, base_align);
1032 }
1033
1034 static void r600_texture_allocate_htile(struct si_screen *sscreen,
1035 struct r600_texture *rtex)
1036 {
1037 if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
1038 r600_texture_get_htile_size(sscreen, rtex);
1039
1040 if (!rtex->surface.htile_size)
1041 return;
1042
1043 rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
1044 rtex->size = rtex->htile_offset + rtex->surface.htile_size;
1045 }
1046
1047 void si_print_texture_info(struct si_screen *sscreen,
1048 struct r600_texture *rtex, struct u_log_context *log)
1049 {
1050 int i;
1051
1052 /* Common parameters. */
1053 u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
1054 "blk_h=%u, array_size=%u, last_level=%u, "
1055 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
1056 rtex->resource.b.b.width0, rtex->resource.b.b.height0,
1057 rtex->resource.b.b.depth0, rtex->surface.blk_w,
1058 rtex->surface.blk_h,
1059 rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
1060 rtex->surface.bpe, rtex->resource.b.b.nr_samples,
1061 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
1062
1063 if (sscreen->info.chip_class >= GFX9) {
1064 u_log_printf(log, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
1065 "alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
1066 rtex->surface.surf_size,
1067 rtex->surface.u.gfx9.surf_slice_size,
1068 rtex->surface.surf_alignment,
1069 rtex->surface.u.gfx9.surf.swizzle_mode,
1070 rtex->surface.u.gfx9.surf.epitch,
1071 rtex->surface.u.gfx9.surf_pitch);
1072
1073 if (rtex->fmask.size) {
1074 u_log_printf(log, " FMASK: offset=%"PRIu64", size=%"PRIu64", "
1075 "alignment=%u, swmode=%u, epitch=%u\n",
1076 rtex->fmask.offset,
1077 rtex->surface.u.gfx9.fmask_size,
1078 rtex->surface.u.gfx9.fmask_alignment,
1079 rtex->surface.u.gfx9.fmask.swizzle_mode,
1080 rtex->surface.u.gfx9.fmask.epitch);
1081 }
1082
1083 if (rtex->cmask.size) {
1084 u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", "
1085 "alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
1086 rtex->cmask.offset,
1087 rtex->surface.u.gfx9.cmask_size,
1088 rtex->surface.u.gfx9.cmask_alignment,
1089 rtex->surface.u.gfx9.cmask.rb_aligned,
1090 rtex->surface.u.gfx9.cmask.pipe_aligned);
1091 }
1092
1093 if (rtex->htile_offset) {
1094 u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, alignment=%u, "
1095 "rb_aligned=%u, pipe_aligned=%u\n",
1096 rtex->htile_offset,
1097 rtex->surface.htile_size,
1098 rtex->surface.htile_alignment,
1099 rtex->surface.u.gfx9.htile.rb_aligned,
1100 rtex->surface.u.gfx9.htile.pipe_aligned);
1101 }
1102
1103 if (rtex->dcc_offset) {
1104 u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, "
1105 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
1106 rtex->dcc_offset, rtex->surface.dcc_size,
1107 rtex->surface.dcc_alignment,
1108 rtex->surface.u.gfx9.dcc_pitch_max,
1109 rtex->surface.num_dcc_levels);
1110 }
1111
1112 if (rtex->surface.u.gfx9.stencil_offset) {
1113 u_log_printf(log, " Stencil: offset=%"PRIu64", swmode=%u, epitch=%u\n",
1114 rtex->surface.u.gfx9.stencil_offset,
1115 rtex->surface.u.gfx9.stencil.swizzle_mode,
1116 rtex->surface.u.gfx9.stencil.epitch);
1117 }
1118 return;
1119 }
1120
1121 u_log_printf(log, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
1122 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
1123 rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw,
1124 rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
1125 rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
1126 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
1127
1128 if (rtex->fmask.size)
1129 u_log_printf(log, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
1130 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
1131 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
1132 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
1133 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
1134
1135 if (rtex->cmask.size)
1136 u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
1137 "slice_tile_max=%u\n",
1138 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
1139 rtex->cmask.slice_tile_max);
1140
1141 if (rtex->htile_offset)
1142 u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, "
1143 "alignment=%u, TC_compatible = %u\n",
1144 rtex->htile_offset, rtex->surface.htile_size,
1145 rtex->surface.htile_alignment,
1146 rtex->tc_compatible_htile);
1147
1148 if (rtex->dcc_offset) {
1149 u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
1150 rtex->dcc_offset, rtex->surface.dcc_size,
1151 rtex->surface.dcc_alignment);
1152 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
1153 u_log_printf(log, " DCCLevel[%i]: enabled=%u, offset=%u, "
1154 "fast_clear_size=%u\n",
1155 i, i < rtex->surface.num_dcc_levels,
1156 rtex->surface.u.legacy.level[i].dcc_offset,
1157 rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
1158 }
1159
1160 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
1161 u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
1162 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1163 "mode=%u, tiling_index = %u\n",
1164 i, rtex->surface.u.legacy.level[i].offset,
1165 (uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
1166 u_minify(rtex->resource.b.b.width0, i),
1167 u_minify(rtex->resource.b.b.height0, i),
1168 u_minify(rtex->resource.b.b.depth0, i),
1169 rtex->surface.u.legacy.level[i].nblk_x,
1170 rtex->surface.u.legacy.level[i].nblk_y,
1171 rtex->surface.u.legacy.level[i].mode,
1172 rtex->surface.u.legacy.tiling_index[i]);
1173
1174 if (rtex->surface.has_stencil) {
1175 u_log_printf(log, " StencilLayout: tilesplit=%u\n",
1176 rtex->surface.u.legacy.stencil_tile_split);
1177 for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
1178 u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
1179 "slice_size=%"PRIu64", npix_x=%u, "
1180 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1181 "mode=%u, tiling_index = %u\n",
1182 i, rtex->surface.u.legacy.stencil_level[i].offset,
1183 (uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
1184 u_minify(rtex->resource.b.b.width0, i),
1185 u_minify(rtex->resource.b.b.height0, i),
1186 u_minify(rtex->resource.b.b.depth0, i),
1187 rtex->surface.u.legacy.stencil_level[i].nblk_x,
1188 rtex->surface.u.legacy.stencil_level[i].nblk_y,
1189 rtex->surface.u.legacy.stencil_level[i].mode,
1190 rtex->surface.u.legacy.stencil_tiling_index[i]);
1191 }
1192 }
1193 }
1194
1195 /* Common processing for r600_texture_create and r600_texture_from_handle */
1196 static struct r600_texture *
1197 r600_texture_create_object(struct pipe_screen *screen,
1198 const struct pipe_resource *base,
1199 struct pb_buffer *buf,
1200 struct radeon_surf *surface)
1201 {
1202 struct r600_texture *rtex;
1203 struct r600_resource *resource;
1204 struct si_screen *sscreen = (struct si_screen*)screen;
1205
1206 rtex = CALLOC_STRUCT(r600_texture);
1207 if (!rtex)
1208 return NULL;
1209
1210 resource = &rtex->resource;
1211 resource->b.b = *base;
1212 resource->b.b.next = NULL;
1213 resource->b.vtbl = &r600_texture_vtbl;
1214 pipe_reference_init(&resource->b.b.reference, 1);
1215 resource->b.b.screen = screen;
1216
1217 /* don't include stencil-only formats which we don't support for rendering */
1218 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
1219
1220 rtex->surface = *surface;
1221 rtex->size = rtex->surface.surf_size;
1222
1223 rtex->tc_compatible_htile = rtex->surface.htile_size != 0 &&
1224 (rtex->surface.flags &
1225 RADEON_SURF_TC_COMPATIBLE_HTILE);
1226
1227 /* TC-compatible HTILE:
1228 * - VI only supports Z32_FLOAT.
1229 * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
1230 if (rtex->tc_compatible_htile) {
1231 if (sscreen->info.chip_class >= GFX9 &&
1232 base->format == PIPE_FORMAT_Z16_UNORM)
1233 rtex->db_render_format = base->format;
1234 else {
1235 rtex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
1236 rtex->upgraded_depth = base->format != PIPE_FORMAT_Z32_FLOAT &&
1237 base->format != PIPE_FORMAT_Z32_FLOAT_S8X24_UINT;
1238 }
1239 } else {
1240 rtex->db_render_format = base->format;
1241 }
1242
1243 /* Applies to GCN. */
1244 rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
1245
1246 /* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
1247 * between frames, so the only thing that can enable separate DCC
1248 * with DRI2 is multiple slow clears within a frame.
1249 */
1250 rtex->ps_draw_ratio = 0;
1251
1252 if (rtex->is_depth) {
1253 if (sscreen->info.chip_class >= GFX9) {
1254 rtex->can_sample_z = true;
1255 rtex->can_sample_s = true;
1256 } else {
1257 rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
1258 rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
1259 }
1260
1261 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
1262 R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
1263 rtex->db_compatible = true;
1264
1265 if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
1266 r600_texture_allocate_htile(sscreen, rtex);
1267 }
1268 } else {
1269 if (base->nr_samples > 1) {
1270 if (!buf) {
1271 r600_texture_allocate_fmask(sscreen, rtex);
1272 r600_texture_allocate_cmask(sscreen, rtex);
1273 rtex->cmask_buffer = &rtex->resource;
1274 }
1275 if (!rtex->fmask.size || !rtex->cmask.size) {
1276 FREE(rtex);
1277 return NULL;
1278 }
1279 }
1280
1281 /* Shared textures must always set up DCC here.
1282 * If it's not present, it will be disabled by
1283 * apply_opaque_metadata later.
1284 */
1285 if (rtex->surface.dcc_size &&
1286 (buf || !(sscreen->debug_flags & DBG(NO_DCC))) &&
1287 !(rtex->surface.flags & RADEON_SURF_SCANOUT)) {
1288 /* Reserve space for the DCC buffer. */
1289 rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
1290 rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
1291 }
1292 }
1293
1294 /* Now create the backing buffer. */
1295 if (!buf) {
1296 si_init_resource_fields(sscreen, resource, rtex->size,
1297 rtex->surface.surf_alignment);
1298
1299 if (!si_alloc_resource(sscreen, resource)) {
1300 FREE(rtex);
1301 return NULL;
1302 }
1303 } else {
1304 resource->buf = buf;
1305 resource->gpu_address = sscreen->ws->buffer_get_virtual_address(resource->buf);
1306 resource->bo_size = buf->size;
1307 resource->bo_alignment = buf->alignment;
1308 resource->domains = sscreen->ws->buffer_get_initial_domain(resource->buf);
1309 if (resource->domains & RADEON_DOMAIN_VRAM)
1310 resource->vram_usage = buf->size;
1311 else if (resource->domains & RADEON_DOMAIN_GTT)
1312 resource->gart_usage = buf->size;
1313 }
1314
1315 if (rtex->cmask.size) {
1316 /* Initialize the cmask to 0xCC (= compressed state). */
1317 si_screen_clear_buffer(sscreen, &rtex->cmask_buffer->b.b,
1318 rtex->cmask.offset, rtex->cmask.size,
1319 0xCCCCCCCC);
1320 }
1321 if (rtex->htile_offset) {
1322 uint32_t clear_value = 0;
1323
1324 if (sscreen->info.chip_class >= GFX9 || rtex->tc_compatible_htile)
1325 clear_value = 0x0000030F;
1326
1327 si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
1328 rtex->htile_offset,
1329 rtex->surface.htile_size,
1330 clear_value);
1331 }
1332
1333 /* Initialize DCC only if the texture is not being imported. */
1334 if (!buf && rtex->dcc_offset) {
1335 si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
1336 rtex->dcc_offset,
1337 rtex->surface.dcc_size,
1338 0xFFFFFFFF);
1339 }
1340
1341 /* Initialize the CMASK base register value. */
1342 rtex->cmask.base_address_reg =
1343 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1344
1345 if (sscreen->debug_flags & DBG(VM)) {
1346 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1347 rtex->resource.gpu_address,
1348 rtex->resource.gpu_address + rtex->resource.buf->size,
1349 base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
1350 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
1351 }
1352
1353 if (sscreen->debug_flags & DBG(TEX)) {
1354 puts("Texture:");
1355 struct u_log_context log;
1356 u_log_context_init(&log);
1357 si_print_texture_info(sscreen, rtex, &log);
1358 u_log_new_page_print(&log, stdout);
1359 fflush(stdout);
1360 u_log_context_destroy(&log);
1361 }
1362
1363 return rtex;
1364 }
1365
1366 static enum radeon_surf_mode
1367 r600_choose_tiling(struct si_screen *sscreen,
1368 const struct pipe_resource *templ)
1369 {
1370 const struct util_format_description *desc = util_format_description(templ->format);
1371 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
1372 bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
1373 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
1374
1375 /* MSAA resources must be 2D tiled. */
1376 if (templ->nr_samples > 1)
1377 return RADEON_SURF_MODE_2D;
1378
1379 /* Transfer resources should be linear. */
1380 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
1381 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1382
1383 /* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
1384 * which requires 2D tiling.
1385 */
1386 if (sscreen->info.chip_class == VI &&
1387 is_depth_stencil &&
1388 (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY))
1389 return RADEON_SURF_MODE_2D;
1390
1391 /* Handle common candidates for the linear mode.
1392 * Compressed textures and DB surfaces must always be tiled.
1393 */
1394 if (!force_tiling &&
1395 !is_depth_stencil &&
1396 !util_format_is_compressed(templ->format)) {
1397 if (sscreen->debug_flags & DBG(NO_TILING))
1398 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1399
1400 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1401 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
1402 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1403
1404 /* Cursors are linear on SI.
1405 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1406 if (templ->bind & PIPE_BIND_CURSOR)
1407 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1408
1409 if (templ->bind & PIPE_BIND_LINEAR)
1410 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1411
1412 /* Textures with a very small height are recommended to be linear. */
1413 if (templ->target == PIPE_TEXTURE_1D ||
1414 templ->target == PIPE_TEXTURE_1D_ARRAY ||
1415 /* Only very thin and long 2D textures should benefit from
1416 * linear_aligned. */
1417 (templ->width0 > 8 && templ->height0 <= 2))
1418 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1419
1420 /* Textures likely to be mapped often. */
1421 if (templ->usage == PIPE_USAGE_STAGING ||
1422 templ->usage == PIPE_USAGE_STREAM)
1423 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1424 }
1425
1426 /* Make small textures 1D tiled. */
1427 if (templ->width0 <= 16 || templ->height0 <= 16 ||
1428 (sscreen->debug_flags & DBG(NO_2D_TILING)))
1429 return RADEON_SURF_MODE_1D;
1430
1431 /* The allocator will switch to 1D if needed. */
1432 return RADEON_SURF_MODE_2D;
1433 }
1434
1435 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
1436 const struct pipe_resource *templ)
1437 {
1438 struct si_screen *sscreen = (struct si_screen*)screen;
1439 struct radeon_surf surface = {0};
1440 bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1441 bool tc_compatible_htile =
1442 sscreen->info.chip_class >= VI &&
1443 (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
1444 !(sscreen->debug_flags & DBG(NO_HYPERZ)) &&
1445 !is_flushed_depth &&
1446 templ->nr_samples <= 1 && /* TC-compat HTILE is less efficient with MSAA */
1447 util_format_is_depth_or_stencil(templ->format);
1448
1449 int r;
1450
1451 r = r600_init_surface(sscreen, &surface, templ,
1452 r600_choose_tiling(sscreen, templ), 0, 0,
1453 false, false, is_flushed_depth,
1454 tc_compatible_htile);
1455 if (r) {
1456 return NULL;
1457 }
1458
1459 return (struct pipe_resource *)
1460 r600_texture_create_object(screen, templ, NULL, &surface);
1461 }
1462
1463 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
1464 const struct pipe_resource *templ,
1465 struct winsys_handle *whandle,
1466 unsigned usage)
1467 {
1468 struct si_screen *sscreen = (struct si_screen*)screen;
1469 struct pb_buffer *buf = NULL;
1470 unsigned stride = 0, offset = 0;
1471 enum radeon_surf_mode array_mode;
1472 struct radeon_surf surface = {};
1473 int r;
1474 struct radeon_bo_metadata metadata = {};
1475 struct r600_texture *rtex;
1476 bool is_scanout;
1477
1478 /* Support only 2D textures without mipmaps */
1479 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
1480 templ->depth0 != 1 || templ->last_level != 0)
1481 return NULL;
1482
1483 buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle, &stride, &offset);
1484 if (!buf)
1485 return NULL;
1486
1487 sscreen->ws->buffer_get_metadata(buf, &metadata);
1488 r600_surface_import_metadata(sscreen, &surface, &metadata,
1489 &array_mode, &is_scanout);
1490
1491 r = r600_init_surface(sscreen, &surface, templ, array_mode, stride,
1492 offset, true, is_scanout, false, false);
1493 if (r) {
1494 return NULL;
1495 }
1496
1497 rtex = r600_texture_create_object(screen, templ, buf, &surface);
1498 if (!rtex)
1499 return NULL;
1500
1501 rtex->resource.b.is_shared = true;
1502 rtex->resource.external_usage = usage;
1503
1504 si_apply_opaque_metadata(sscreen, rtex, &metadata);
1505
1506 assert(rtex->surface.tile_swizzle == 0);
1507 return &rtex->resource.b.b;
1508 }
1509
1510 bool si_init_flushed_depth_texture(struct pipe_context *ctx,
1511 struct pipe_resource *texture,
1512 struct r600_texture **staging)
1513 {
1514 struct r600_texture *rtex = (struct r600_texture*)texture;
1515 struct pipe_resource resource;
1516 struct r600_texture **flushed_depth_texture = staging ?
1517 staging : &rtex->flushed_depth_texture;
1518 enum pipe_format pipe_format = texture->format;
1519
1520 if (!staging) {
1521 if (rtex->flushed_depth_texture)
1522 return true; /* it's ready */
1523
1524 if (!rtex->can_sample_z && rtex->can_sample_s) {
1525 switch (pipe_format) {
1526 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1527 /* Save memory by not allocating the S plane. */
1528 pipe_format = PIPE_FORMAT_Z32_FLOAT;
1529 break;
1530 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1531 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1532 /* Save memory bandwidth by not copying the
1533 * stencil part during flush.
1534 *
1535 * This potentially increases memory bandwidth
1536 * if an application uses both Z and S texturing
1537 * simultaneously (a flushed Z24S8 texture
1538 * would be stored compactly), but how often
1539 * does that really happen?
1540 */
1541 pipe_format = PIPE_FORMAT_Z24X8_UNORM;
1542 break;
1543 default:;
1544 }
1545 } else if (!rtex->can_sample_s && rtex->can_sample_z) {
1546 assert(util_format_has_stencil(util_format_description(pipe_format)));
1547
1548 /* DB->CB copies to an 8bpp surface don't work. */
1549 pipe_format = PIPE_FORMAT_X24S8_UINT;
1550 }
1551 }
1552
1553 memset(&resource, 0, sizeof(resource));
1554 resource.target = texture->target;
1555 resource.format = pipe_format;
1556 resource.width0 = texture->width0;
1557 resource.height0 = texture->height0;
1558 resource.depth0 = texture->depth0;
1559 resource.array_size = texture->array_size;
1560 resource.last_level = texture->last_level;
1561 resource.nr_samples = texture->nr_samples;
1562 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1563 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
1564 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1565
1566 if (staging)
1567 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
1568
1569 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
1570 if (*flushed_depth_texture == NULL) {
1571 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1572 return false;
1573 }
1574 return true;
1575 }
1576
1577 /**
1578 * Initialize the pipe_resource descriptor to be of the same size as the box,
1579 * which is supposed to hold a subregion of the texture "orig" at the given
1580 * mipmap level.
1581 */
1582 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
1583 struct pipe_resource *orig,
1584 const struct pipe_box *box,
1585 unsigned level, unsigned flags)
1586 {
1587 memset(res, 0, sizeof(*res));
1588 res->format = orig->format;
1589 res->width0 = box->width;
1590 res->height0 = box->height;
1591 res->depth0 = 1;
1592 res->array_size = 1;
1593 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1594 res->flags = flags;
1595
1596 /* We must set the correct texture target and dimensions for a 3D box. */
1597 if (box->depth > 1 && util_max_layer(orig, level) > 0) {
1598 res->target = PIPE_TEXTURE_2D_ARRAY;
1599 res->array_size = box->depth;
1600 } else {
1601 res->target = PIPE_TEXTURE_2D;
1602 }
1603 }
1604
1605 static bool r600_can_invalidate_texture(struct si_screen *sscreen,
1606 struct r600_texture *rtex,
1607 unsigned transfer_usage,
1608 const struct pipe_box *box)
1609 {
1610 return !rtex->resource.b.is_shared &&
1611 !(transfer_usage & PIPE_TRANSFER_READ) &&
1612 rtex->resource.b.b.last_level == 0 &&
1613 util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
1614 box->x, box->y, box->z,
1615 box->width, box->height,
1616 box->depth);
1617 }
1618
1619 static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
1620 struct r600_texture *rtex)
1621 {
1622 struct si_screen *sscreen = rctx->screen;
1623
1624 /* There is no point in discarding depth and tiled buffers. */
1625 assert(!rtex->is_depth);
1626 assert(rtex->surface.is_linear);
1627
1628 /* Reallocate the buffer in the same pipe_resource. */
1629 si_alloc_resource(sscreen, &rtex->resource);
1630
1631 /* Initialize the CMASK base address (needed even without CMASK). */
1632 rtex->cmask.base_address_reg =
1633 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1634
1635 p_atomic_inc(&sscreen->dirty_tex_counter);
1636
1637 rctx->num_alloc_tex_transfer_bytes += rtex->size;
1638 }
1639
1640 static void *r600_texture_transfer_map(struct pipe_context *ctx,
1641 struct pipe_resource *texture,
1642 unsigned level,
1643 unsigned usage,
1644 const struct pipe_box *box,
1645 struct pipe_transfer **ptransfer)
1646 {
1647 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1648 struct r600_texture *rtex = (struct r600_texture*)texture;
1649 struct r600_transfer *trans;
1650 struct r600_resource *buf;
1651 unsigned offset = 0;
1652 char *map;
1653 bool use_staging_texture = false;
1654
1655 assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
1656 assert(box->width && box->height && box->depth);
1657
1658 /* Depth textures use staging unconditionally. */
1659 if (!rtex->is_depth) {
1660 /* Degrade the tile mode if we get too many transfers on APUs.
1661 * On dGPUs, the staging texture is always faster.
1662 * Only count uploads that are at least 4x4 pixels large.
1663 */
1664 if (!rctx->screen->info.has_dedicated_vram &&
1665 level == 0 &&
1666 box->width >= 4 && box->height >= 4 &&
1667 p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
1668 bool can_invalidate =
1669 r600_can_invalidate_texture(rctx->screen, rtex,
1670 usage, box);
1671
1672 r600_reallocate_texture_inplace(rctx, rtex,
1673 PIPE_BIND_LINEAR,
1674 can_invalidate);
1675 }
1676
1677 /* Tiled textures need to be converted into a linear texture for CPU
1678 * access. The staging texture is always linear and is placed in GART.
1679 *
1680 * Reading from VRAM or GTT WC is slow, always use the staging
1681 * texture in this case.
1682 *
1683 * Use the staging texture for uploads if the underlying BO
1684 * is busy.
1685 */
1686 if (!rtex->surface.is_linear)
1687 use_staging_texture = true;
1688 else if (usage & PIPE_TRANSFER_READ)
1689 use_staging_texture =
1690 rtex->resource.domains & RADEON_DOMAIN_VRAM ||
1691 rtex->resource.flags & RADEON_FLAG_GTT_WC;
1692 /* Write & linear only: */
1693 else if (si_rings_is_buffer_referenced(rctx, rtex->resource.buf,
1694 RADEON_USAGE_READWRITE) ||
1695 !rctx->ws->buffer_wait(rtex->resource.buf, 0,
1696 RADEON_USAGE_READWRITE)) {
1697 /* It's busy. */
1698 if (r600_can_invalidate_texture(rctx->screen, rtex,
1699 usage, box))
1700 r600_texture_invalidate_storage(rctx, rtex);
1701 else
1702 use_staging_texture = true;
1703 }
1704 }
1705
1706 trans = CALLOC_STRUCT(r600_transfer);
1707 if (!trans)
1708 return NULL;
1709 pipe_resource_reference(&trans->b.b.resource, texture);
1710 trans->b.b.level = level;
1711 trans->b.b.usage = usage;
1712 trans->b.b.box = *box;
1713
1714 if (rtex->is_depth) {
1715 struct r600_texture *staging_depth;
1716
1717 if (rtex->resource.b.b.nr_samples > 1) {
1718 /* MSAA depth buffers need to be converted to single sample buffers.
1719 *
1720 * Mapping MSAA depth buffers can occur if ReadPixels is called
1721 * with a multisample GLX visual.
1722 *
1723 * First downsample the depth buffer to a temporary texture,
1724 * then decompress the temporary one to staging.
1725 *
1726 * Only the region being mapped is transfered.
1727 */
1728 struct pipe_resource resource;
1729
1730 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
1731
1732 if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1733 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1734 FREE(trans);
1735 return NULL;
1736 }
1737
1738 if (usage & PIPE_TRANSFER_READ) {
1739 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1740 if (!temp) {
1741 R600_ERR("failed to create a temporary depth texture\n");
1742 FREE(trans);
1743 return NULL;
1744 }
1745
1746 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1747 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1748 0, 0, 0, box->depth, 0, 0);
1749 pipe_resource_reference(&temp, NULL);
1750 }
1751
1752 /* Just get the strides. */
1753 r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,
1754 &trans->b.b.stride,
1755 &trans->b.b.layer_stride);
1756 } else {
1757 /* XXX: only readback the rectangle which is being mapped? */
1758 /* XXX: when discard is true, no need to read back from depth texture */
1759 if (!si_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1760 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1761 FREE(trans);
1762 return NULL;
1763 }
1764
1765 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1766 level, level,
1767 box->z, box->z + box->depth - 1,
1768 0, 0);
1769
1770 offset = r600_texture_get_offset(rctx->screen, staging_depth,
1771 level, box,
1772 &trans->b.b.stride,
1773 &trans->b.b.layer_stride);
1774 }
1775
1776 trans->staging = (struct r600_resource*)staging_depth;
1777 buf = trans->staging;
1778 } else if (use_staging_texture) {
1779 struct pipe_resource resource;
1780 struct r600_texture *staging;
1781
1782 r600_init_temp_resource_from_box(&resource, texture, box, level,
1783 R600_RESOURCE_FLAG_TRANSFER);
1784 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1785 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1786
1787 /* Create the temporary texture. */
1788 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1789 if (!staging) {
1790 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1791 FREE(trans);
1792 return NULL;
1793 }
1794 trans->staging = &staging->resource;
1795
1796 /* Just get the strides. */
1797 r600_texture_get_offset(rctx->screen, staging, 0, NULL,
1798 &trans->b.b.stride,
1799 &trans->b.b.layer_stride);
1800
1801 if (usage & PIPE_TRANSFER_READ)
1802 r600_copy_to_staging_texture(ctx, trans);
1803 else
1804 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1805
1806 buf = trans->staging;
1807 } else {
1808 /* the resource is mapped directly */
1809 offset = r600_texture_get_offset(rctx->screen, rtex, level, box,
1810 &trans->b.b.stride,
1811 &trans->b.b.layer_stride);
1812 buf = &rtex->resource;
1813 }
1814
1815 if (!(map = si_buffer_map_sync_with_rings(rctx, buf, usage))) {
1816 r600_resource_reference(&trans->staging, NULL);
1817 FREE(trans);
1818 return NULL;
1819 }
1820
1821 *ptransfer = &trans->b.b;
1822 return map + offset;
1823 }
1824
1825 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1826 struct pipe_transfer* transfer)
1827 {
1828 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1829 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1830 struct pipe_resource *texture = transfer->resource;
1831 struct r600_texture *rtex = (struct r600_texture*)texture;
1832
1833 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1834 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1835 ctx->resource_copy_region(ctx, texture, transfer->level,
1836 transfer->box.x, transfer->box.y, transfer->box.z,
1837 &rtransfer->staging->b.b, transfer->level,
1838 &transfer->box);
1839 } else {
1840 r600_copy_from_staging_texture(ctx, rtransfer);
1841 }
1842 }
1843
1844 if (rtransfer->staging) {
1845 rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
1846 r600_resource_reference(&rtransfer->staging, NULL);
1847 }
1848
1849 /* Heuristic for {upload, draw, upload, draw, ..}:
1850 *
1851 * Flush the gfx IB if we've allocated too much texture storage.
1852 *
1853 * The idea is that we don't want to build IBs that use too much
1854 * memory and put pressure on the kernel memory manager and we also
1855 * want to make temporary and invalidated buffers go idle ASAP to
1856 * decrease the total memory usage or make them reusable. The memory
1857 * usage will be slightly higher than given here because of the buffer
1858 * cache in the winsys.
1859 *
1860 * The result is that the kernel memory manager is never a bottleneck.
1861 */
1862 if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
1863 rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
1864 rctx->num_alloc_tex_transfer_bytes = 0;
1865 }
1866
1867 pipe_resource_reference(&transfer->resource, NULL);
1868 FREE(transfer);
1869 }
1870
1871 static const struct u_resource_vtbl r600_texture_vtbl =
1872 {
1873 NULL, /* get_handle */
1874 r600_texture_destroy, /* resource_destroy */
1875 r600_texture_transfer_map, /* transfer_map */
1876 u_default_transfer_flush_region, /* transfer_flush_region */
1877 r600_texture_transfer_unmap, /* transfer_unmap */
1878 };
1879
1880 /* DCC channel type categories within which formats can be reinterpreted
1881 * while keeping the same DCC encoding. The swizzle must also match. */
1882 enum dcc_channel_type {
1883 dcc_channel_float32,
1884 dcc_channel_uint32,
1885 dcc_channel_sint32,
1886 dcc_channel_float16,
1887 dcc_channel_uint16,
1888 dcc_channel_sint16,
1889 dcc_channel_uint_10_10_10_2,
1890 dcc_channel_uint8,
1891 dcc_channel_sint8,
1892 dcc_channel_incompatible,
1893 };
1894
1895 /* Return the type of DCC encoding. */
1896 static enum dcc_channel_type
1897 vi_get_dcc_channel_type(const struct util_format_description *desc)
1898 {
1899 int i;
1900
1901 /* Find the first non-void channel. */
1902 for (i = 0; i < desc->nr_channels; i++)
1903 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
1904 break;
1905 if (i == desc->nr_channels)
1906 return dcc_channel_incompatible;
1907
1908 switch (desc->channel[i].size) {
1909 case 32:
1910 if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
1911 return dcc_channel_float32;
1912 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1913 return dcc_channel_uint32;
1914 return dcc_channel_sint32;
1915 case 16:
1916 if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
1917 return dcc_channel_float16;
1918 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1919 return dcc_channel_uint16;
1920 return dcc_channel_sint16;
1921 case 10:
1922 return dcc_channel_uint_10_10_10_2;
1923 case 8:
1924 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1925 return dcc_channel_uint8;
1926 return dcc_channel_sint8;
1927 default:
1928 return dcc_channel_incompatible;
1929 }
1930 }
1931
1932 /* Return if it's allowed to reinterpret one format as another with DCC enabled. */
1933 bool vi_dcc_formats_compatible(enum pipe_format format1,
1934 enum pipe_format format2)
1935 {
1936 const struct util_format_description *desc1, *desc2;
1937 enum dcc_channel_type type1, type2;
1938 int i;
1939
1940 if (format1 == format2)
1941 return true;
1942
1943 desc1 = util_format_description(format1);
1944 desc2 = util_format_description(format2);
1945
1946 if (desc1->nr_channels != desc2->nr_channels)
1947 return false;
1948
1949 /* Swizzles must be the same. */
1950 for (i = 0; i < desc1->nr_channels; i++)
1951 if (desc1->swizzle[i] <= PIPE_SWIZZLE_W &&
1952 desc2->swizzle[i] <= PIPE_SWIZZLE_W &&
1953 desc1->swizzle[i] != desc2->swizzle[i])
1954 return false;
1955
1956 type1 = vi_get_dcc_channel_type(desc1);
1957 type2 = vi_get_dcc_channel_type(desc2);
1958
1959 return type1 != dcc_channel_incompatible &&
1960 type2 != dcc_channel_incompatible &&
1961 type1 == type2;
1962 }
1963
1964 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
1965 unsigned level,
1966 enum pipe_format view_format)
1967 {
1968 struct r600_texture *rtex = (struct r600_texture *)tex;
1969
1970 return vi_dcc_enabled(rtex, level) &&
1971 !vi_dcc_formats_compatible(tex->format, view_format);
1972 }
1973
1974 /* This can't be merged with the above function, because
1975 * vi_dcc_formats_compatible should be called only when DCC is enabled. */
1976 void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx,
1977 struct pipe_resource *tex,
1978 unsigned level,
1979 enum pipe_format view_format)
1980 {
1981 struct r600_texture *rtex = (struct r600_texture *)tex;
1982
1983 if (vi_dcc_formats_are_incompatible(tex, level, view_format))
1984 if (!si_texture_disable_dcc(rctx, (struct r600_texture*)tex))
1985 rctx->decompress_dcc(&rctx->b, rtex);
1986 }
1987
1988 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
1989 struct pipe_resource *texture,
1990 const struct pipe_surface *templ,
1991 unsigned width0, unsigned height0,
1992 unsigned width, unsigned height)
1993 {
1994 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1995
1996 if (!surface)
1997 return NULL;
1998
1999 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
2000 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
2001
2002 pipe_reference_init(&surface->base.reference, 1);
2003 pipe_resource_reference(&surface->base.texture, texture);
2004 surface->base.context = pipe;
2005 surface->base.format = templ->format;
2006 surface->base.width = width;
2007 surface->base.height = height;
2008 surface->base.u = templ->u;
2009
2010 surface->width0 = width0;
2011 surface->height0 = height0;
2012
2013 surface->dcc_incompatible =
2014 texture->target != PIPE_BUFFER &&
2015 vi_dcc_formats_are_incompatible(texture, templ->u.tex.level,
2016 templ->format);
2017 return &surface->base;
2018 }
2019
2020 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
2021 struct pipe_resource *tex,
2022 const struct pipe_surface *templ)
2023 {
2024 unsigned level = templ->u.tex.level;
2025 unsigned width = u_minify(tex->width0, level);
2026 unsigned height = u_minify(tex->height0, level);
2027 unsigned width0 = tex->width0;
2028 unsigned height0 = tex->height0;
2029
2030 if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
2031 const struct util_format_description *tex_desc
2032 = util_format_description(tex->format);
2033 const struct util_format_description *templ_desc
2034 = util_format_description(templ->format);
2035
2036 assert(tex_desc->block.bits == templ_desc->block.bits);
2037
2038 /* Adjust size of surface if and only if the block width or
2039 * height is changed. */
2040 if (tex_desc->block.width != templ_desc->block.width ||
2041 tex_desc->block.height != templ_desc->block.height) {
2042 unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
2043 unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
2044
2045 width = nblks_x * templ_desc->block.width;
2046 height = nblks_y * templ_desc->block.height;
2047
2048 width0 = util_format_get_nblocksx(tex->format, width0);
2049 height0 = util_format_get_nblocksy(tex->format, height0);
2050 }
2051 }
2052
2053 return si_create_surface_custom(pipe, tex, templ,
2054 width0, height0,
2055 width, height);
2056 }
2057
2058 static void r600_surface_destroy(struct pipe_context *pipe,
2059 struct pipe_surface *surface)
2060 {
2061 pipe_resource_reference(&surface->texture, NULL);
2062 FREE(surface);
2063 }
2064
2065 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap)
2066 {
2067 const struct util_format_description *desc = util_format_description(format);
2068
2069 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
2070
2071 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
2072 return V_028C70_SWAP_STD;
2073
2074 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
2075 return ~0U;
2076
2077 switch (desc->nr_channels) {
2078 case 1:
2079 if (HAS_SWIZZLE(0,X))
2080 return V_028C70_SWAP_STD; /* X___ */
2081 else if (HAS_SWIZZLE(3,X))
2082 return V_028C70_SWAP_ALT_REV; /* ___X */
2083 break;
2084 case 2:
2085 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
2086 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
2087 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
2088 return V_028C70_SWAP_STD; /* XY__ */
2089 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
2090 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
2091 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
2092 /* YX__ */
2093 return (do_endian_swap ? V_028C70_SWAP_STD : V_028C70_SWAP_STD_REV);
2094 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
2095 return V_028C70_SWAP_ALT; /* X__Y */
2096 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
2097 return V_028C70_SWAP_ALT_REV; /* Y__X */
2098 break;
2099 case 3:
2100 if (HAS_SWIZZLE(0,X))
2101 return (do_endian_swap ? V_028C70_SWAP_STD_REV : V_028C70_SWAP_STD);
2102 else if (HAS_SWIZZLE(0,Z))
2103 return V_028C70_SWAP_STD_REV; /* ZYX */
2104 break;
2105 case 4:
2106 /* check the middle channels, the 1st and 4th channel can be NONE */
2107 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
2108 return V_028C70_SWAP_STD; /* XYZW */
2109 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
2110 return V_028C70_SWAP_STD_REV; /* WZYX */
2111 } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
2112 return V_028C70_SWAP_ALT; /* ZYXW */
2113 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
2114 /* YZWX */
2115 if (desc->is_array)
2116 return V_028C70_SWAP_ALT_REV;
2117 else
2118 return (do_endian_swap ? V_028C70_SWAP_ALT : V_028C70_SWAP_ALT_REV);
2119 }
2120 break;
2121 }
2122 return ~0U;
2123 }
2124
2125 /* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
2126
2127 static void vi_dcc_clean_up_context_slot(struct r600_common_context *rctx,
2128 int slot)
2129 {
2130 int i;
2131
2132 if (rctx->dcc_stats[slot].query_active)
2133 vi_separate_dcc_stop_query(&rctx->b,
2134 rctx->dcc_stats[slot].tex);
2135
2136 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats[slot].ps_stats); i++)
2137 if (rctx->dcc_stats[slot].ps_stats[i]) {
2138 rctx->b.destroy_query(&rctx->b,
2139 rctx->dcc_stats[slot].ps_stats[i]);
2140 rctx->dcc_stats[slot].ps_stats[i] = NULL;
2141 }
2142
2143 r600_texture_reference(&rctx->dcc_stats[slot].tex, NULL);
2144 }
2145
2146 /**
2147 * Return the per-context slot where DCC statistics queries for the texture live.
2148 */
2149 static unsigned vi_get_context_dcc_stats_index(struct r600_common_context *rctx,
2150 struct r600_texture *tex)
2151 {
2152 int i, empty_slot = -1;
2153
2154 /* Remove zombie textures (textures kept alive by this array only). */
2155 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++)
2156 if (rctx->dcc_stats[i].tex &&
2157 rctx->dcc_stats[i].tex->resource.b.b.reference.count == 1)
2158 vi_dcc_clean_up_context_slot(rctx, i);
2159
2160 /* Find the texture. */
2161 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++) {
2162 /* Return if found. */
2163 if (rctx->dcc_stats[i].tex == tex) {
2164 rctx->dcc_stats[i].last_use_timestamp = os_time_get();
2165 return i;
2166 }
2167
2168 /* Record the first seen empty slot. */
2169 if (empty_slot == -1 && !rctx->dcc_stats[i].tex)
2170 empty_slot = i;
2171 }
2172
2173 /* Not found. Remove the oldest member to make space in the array. */
2174 if (empty_slot == -1) {
2175 int oldest_slot = 0;
2176
2177 /* Find the oldest slot. */
2178 for (i = 1; i < ARRAY_SIZE(rctx->dcc_stats); i++)
2179 if (rctx->dcc_stats[oldest_slot].last_use_timestamp >
2180 rctx->dcc_stats[i].last_use_timestamp)
2181 oldest_slot = i;
2182
2183 /* Clean up the oldest slot. */
2184 vi_dcc_clean_up_context_slot(rctx, oldest_slot);
2185 empty_slot = oldest_slot;
2186 }
2187
2188 /* Add the texture to the new slot. */
2189 r600_texture_reference(&rctx->dcc_stats[empty_slot].tex, tex);
2190 rctx->dcc_stats[empty_slot].last_use_timestamp = os_time_get();
2191 return empty_slot;
2192 }
2193
2194 static struct pipe_query *
2195 vi_create_resuming_pipestats_query(struct pipe_context *ctx)
2196 {
2197 struct r600_query_hw *query = (struct r600_query_hw*)
2198 ctx->create_query(ctx, PIPE_QUERY_PIPELINE_STATISTICS, 0);
2199
2200 query->flags |= R600_QUERY_HW_FLAG_BEGIN_RESUMES;
2201 return (struct pipe_query*)query;
2202 }
2203
2204 /**
2205 * Called when binding a color buffer.
2206 */
2207 void vi_separate_dcc_start_query(struct pipe_context *ctx,
2208 struct r600_texture *tex)
2209 {
2210 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
2211 unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
2212
2213 assert(!rctx->dcc_stats[i].query_active);
2214
2215 if (!rctx->dcc_stats[i].ps_stats[0])
2216 rctx->dcc_stats[i].ps_stats[0] = vi_create_resuming_pipestats_query(ctx);
2217
2218 /* begin or resume the query */
2219 ctx->begin_query(ctx, rctx->dcc_stats[i].ps_stats[0]);
2220 rctx->dcc_stats[i].query_active = true;
2221 }
2222
2223 /**
2224 * Called when unbinding a color buffer.
2225 */
2226 void vi_separate_dcc_stop_query(struct pipe_context *ctx,
2227 struct r600_texture *tex)
2228 {
2229 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
2230 unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
2231
2232 assert(rctx->dcc_stats[i].query_active);
2233 assert(rctx->dcc_stats[i].ps_stats[0]);
2234
2235 /* pause or end the query */
2236 ctx->end_query(ctx, rctx->dcc_stats[i].ps_stats[0]);
2237 rctx->dcc_stats[i].query_active = false;
2238 }
2239
2240 static bool vi_should_enable_separate_dcc(struct r600_texture *tex)
2241 {
2242 /* The minimum number of fullscreen draws per frame that is required
2243 * to enable DCC. */
2244 return tex->ps_draw_ratio + tex->num_slow_clears >= 5;
2245 }
2246
2247 /* Called by fast clear. */
2248 void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
2249 struct r600_texture *tex)
2250 {
2251 /* The intent is to use this with shared displayable back buffers,
2252 * but it's not strictly limited only to them.
2253 */
2254 if (!tex->resource.b.is_shared ||
2255 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
2256 tex->resource.b.b.target != PIPE_TEXTURE_2D ||
2257 tex->resource.b.b.last_level > 0 ||
2258 !tex->surface.dcc_size)
2259 return;
2260
2261 if (tex->dcc_offset)
2262 return; /* already enabled */
2263
2264 /* Enable the DCC stat gathering. */
2265 if (!tex->dcc_gather_statistics) {
2266 tex->dcc_gather_statistics = true;
2267 vi_separate_dcc_start_query(&rctx->b, tex);
2268 }
2269
2270 if (!vi_should_enable_separate_dcc(tex))
2271 return; /* stats show that DCC decompression is too expensive */
2272
2273 assert(tex->surface.num_dcc_levels);
2274 assert(!tex->dcc_separate_buffer);
2275
2276 si_texture_discard_cmask(rctx->screen, tex);
2277
2278 /* Get a DCC buffer. */
2279 if (tex->last_dcc_separate_buffer) {
2280 assert(tex->dcc_gather_statistics);
2281 assert(!tex->dcc_separate_buffer);
2282 tex->dcc_separate_buffer = tex->last_dcc_separate_buffer;
2283 tex->last_dcc_separate_buffer = NULL;
2284 } else {
2285 tex->dcc_separate_buffer = (struct r600_resource*)
2286 si_aligned_buffer_create(rctx->b.screen,
2287 R600_RESOURCE_FLAG_UNMAPPABLE,
2288 PIPE_USAGE_DEFAULT,
2289 tex->surface.dcc_size,
2290 tex->surface.dcc_alignment);
2291 if (!tex->dcc_separate_buffer)
2292 return;
2293 }
2294
2295 /* dcc_offset is the absolute GPUVM address. */
2296 tex->dcc_offset = tex->dcc_separate_buffer->gpu_address;
2297
2298 /* no need to flag anything since this is called by fast clear that
2299 * flags framebuffer state
2300 */
2301 }
2302
2303 /**
2304 * Called by pipe_context::flush_resource, the place where DCC decompression
2305 * takes place.
2306 */
2307 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
2308 struct r600_texture *tex)
2309 {
2310 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
2311 struct pipe_query *tmp;
2312 unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
2313 bool query_active = rctx->dcc_stats[i].query_active;
2314 bool disable = false;
2315
2316 if (rctx->dcc_stats[i].ps_stats[2]) {
2317 union pipe_query_result result;
2318
2319 /* Read the results. */
2320 ctx->get_query_result(ctx, rctx->dcc_stats[i].ps_stats[2],
2321 true, &result);
2322 si_query_hw_reset_buffers(rctx,
2323 (struct r600_query_hw*)
2324 rctx->dcc_stats[i].ps_stats[2]);
2325
2326 /* Compute the approximate number of fullscreen draws. */
2327 tex->ps_draw_ratio =
2328 result.pipeline_statistics.ps_invocations /
2329 (tex->resource.b.b.width0 * tex->resource.b.b.height0);
2330 rctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
2331
2332 disable = tex->dcc_separate_buffer &&
2333 !vi_should_enable_separate_dcc(tex);
2334 }
2335
2336 tex->num_slow_clears = 0;
2337
2338 /* stop the statistics query for ps_stats[0] */
2339 if (query_active)
2340 vi_separate_dcc_stop_query(ctx, tex);
2341
2342 /* Move the queries in the queue by one. */
2343 tmp = rctx->dcc_stats[i].ps_stats[2];
2344 rctx->dcc_stats[i].ps_stats[2] = rctx->dcc_stats[i].ps_stats[1];
2345 rctx->dcc_stats[i].ps_stats[1] = rctx->dcc_stats[i].ps_stats[0];
2346 rctx->dcc_stats[i].ps_stats[0] = tmp;
2347
2348 /* create and start a new query as ps_stats[0] */
2349 if (query_active)
2350 vi_separate_dcc_start_query(ctx, tex);
2351
2352 if (disable) {
2353 assert(!tex->last_dcc_separate_buffer);
2354 tex->last_dcc_separate_buffer = tex->dcc_separate_buffer;
2355 tex->dcc_separate_buffer = NULL;
2356 tex->dcc_offset = 0;
2357 /* no need to flag anything since this is called after
2358 * decompression that re-sets framebuffer state
2359 */
2360 }
2361 }
2362
2363 static struct pipe_memory_object *
2364 r600_memobj_from_handle(struct pipe_screen *screen,
2365 struct winsys_handle *whandle,
2366 bool dedicated)
2367 {
2368 struct si_screen *sscreen = (struct si_screen*)screen;
2369 struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
2370 struct pb_buffer *buf = NULL;
2371 uint32_t stride, offset;
2372
2373 if (!memobj)
2374 return NULL;
2375
2376 buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle,
2377 &stride, &offset);
2378 if (!buf) {
2379 free(memobj);
2380 return NULL;
2381 }
2382
2383 memobj->b.dedicated = dedicated;
2384 memobj->buf = buf;
2385 memobj->stride = stride;
2386 memobj->offset = offset;
2387
2388 return (struct pipe_memory_object *)memobj;
2389
2390 }
2391
2392 static void
2393 r600_memobj_destroy(struct pipe_screen *screen,
2394 struct pipe_memory_object *_memobj)
2395 {
2396 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
2397
2398 pb_reference(&memobj->buf, NULL);
2399 free(memobj);
2400 }
2401
2402 static struct pipe_resource *
2403 r600_texture_from_memobj(struct pipe_screen *screen,
2404 const struct pipe_resource *templ,
2405 struct pipe_memory_object *_memobj,
2406 uint64_t offset)
2407 {
2408 int r;
2409 struct si_screen *sscreen = (struct si_screen*)screen;
2410 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
2411 struct r600_texture *rtex;
2412 struct radeon_surf surface = {};
2413 struct radeon_bo_metadata metadata = {};
2414 enum radeon_surf_mode array_mode;
2415 bool is_scanout;
2416 struct pb_buffer *buf = NULL;
2417
2418 if (memobj->b.dedicated) {
2419 sscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
2420 r600_surface_import_metadata(sscreen, &surface, &metadata,
2421 &array_mode, &is_scanout);
2422 } else {
2423 /**
2424 * The bo metadata is unset for un-dedicated images. So we fall
2425 * back to linear. See answer to question 5 of the
2426 * VK_KHX_external_memory spec for some details.
2427 *
2428 * It is possible that this case isn't going to work if the
2429 * surface pitch isn't correctly aligned by default.
2430 *
2431 * In order to support it correctly we require multi-image
2432 * metadata to be syncrhonized between radv and radeonsi. The
2433 * semantics of associating multiple image metadata to a memory
2434 * object on the vulkan export side are not concretely defined
2435 * either.
2436 *
2437 * All the use cases we are aware of at the moment for memory
2438 * objects use dedicated allocations. So lets keep the initial
2439 * implementation simple.
2440 *
2441 * A possible alternative is to attempt to reconstruct the
2442 * tiling information when the TexParameter TEXTURE_TILING_EXT
2443 * is set.
2444 */
2445 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2446 is_scanout = false;
2447
2448 }
2449
2450 r = r600_init_surface(sscreen, &surface, templ,
2451 array_mode, memobj->stride,
2452 offset, true, is_scanout,
2453 false, false);
2454 if (r)
2455 return NULL;
2456
2457 rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
2458 if (!rtex)
2459 return NULL;
2460
2461 /* r600_texture_create_object doesn't increment refcount of
2462 * memobj->buf, so increment it here.
2463 */
2464 pb_reference(&buf, memobj->buf);
2465
2466 rtex->resource.b.is_shared = true;
2467 rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
2468
2469 si_apply_opaque_metadata(sscreen, rtex, &metadata);
2470
2471 return &rtex->resource.b.b;
2472 }
2473
2474 static bool si_check_resource_capability(struct pipe_screen *screen,
2475 struct pipe_resource *resource,
2476 unsigned bind)
2477 {
2478 struct r600_texture *tex = (struct r600_texture*)resource;
2479
2480 /* Buffers only support the linear flag. */
2481 if (resource->target == PIPE_BUFFER)
2482 return (bind & ~PIPE_BIND_LINEAR) == 0;
2483
2484 if (bind & PIPE_BIND_LINEAR && !tex->surface.is_linear)
2485 return false;
2486
2487 if (bind & PIPE_BIND_SCANOUT && !tex->surface.is_displayable)
2488 return false;
2489
2490 /* TODO: PIPE_BIND_CURSOR - do we care? */
2491 return true;
2492 }
2493
2494 void si_init_screen_texture_functions(struct si_screen *sscreen)
2495 {
2496 sscreen->b.resource_from_handle = r600_texture_from_handle;
2497 sscreen->b.resource_get_handle = r600_texture_get_handle;
2498 sscreen->b.resource_from_memobj = r600_texture_from_memobj;
2499 sscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
2500 sscreen->b.memobj_destroy = r600_memobj_destroy;
2501 sscreen->b.check_resource_capability = si_check_resource_capability;
2502 }
2503
2504 void si_init_context_texture_functions(struct r600_common_context *rctx)
2505 {
2506 rctx->b.create_surface = r600_create_surface;
2507 rctx->b.surface_destroy = r600_surface_destroy;
2508 }