2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "util/u_format.h"
29 #include "util/u_log.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include "util/u_resource.h"
33 #include "util/u_surface.h"
34 #include "util/u_transfer.h"
35 #include "util/os_time.h"
38 #include "state_tracker/drm_driver.h"
41 static enum radeon_surf_mode
42 si_choose_tiling(struct si_screen
*sscreen
,
43 const struct pipe_resource
*templ
, bool tc_compatible_htile
);
46 bool si_prepare_for_dma_blit(struct si_context
*sctx
,
47 struct si_texture
*dst
,
48 unsigned dst_level
, unsigned dstx
,
49 unsigned dsty
, unsigned dstz
,
50 struct si_texture
*src
,
52 const struct pipe_box
*src_box
)
57 if (dst
->surface
.bpe
!= src
->surface
.bpe
)
60 /* MSAA: Blits don't exist in the real world. */
61 if (src
->buffer
.b
.b
.nr_samples
> 1 ||
62 dst
->buffer
.b
.b
.nr_samples
> 1)
65 /* Depth-stencil surfaces:
66 * When dst is linear, the DB->CB copy preserves HTILE.
67 * When dst is tiled, the 3D path must be used to update HTILE.
69 if (src
->is_depth
|| dst
->is_depth
)
73 * src: Use the 3D path. DCC decompression is expensive.
74 * dst: Use the 3D path to compress the pixels with DCC.
76 if (vi_dcc_enabled(src
, src_level
) ||
77 vi_dcc_enabled(dst
, dst_level
))
81 * src: Both texture and SDMA paths need decompression. Use SDMA.
82 * dst: If overwriting the whole texture, discard CMASK and use
83 * SDMA. Otherwise, use the 3D path.
85 if (dst
->cmask_buffer
&& dst
->dirty_level_mask
& (1 << dst_level
)) {
86 /* The CMASK clear is only enabled for the first level. */
87 assert(dst_level
== 0);
88 if (!util_texrange_covers_whole_level(&dst
->buffer
.b
.b
, dst_level
,
89 dstx
, dsty
, dstz
, src_box
->width
,
90 src_box
->height
, src_box
->depth
))
93 si_texture_discard_cmask(sctx
->screen
, dst
);
96 /* All requirements are met. Prepare textures for SDMA. */
97 if (src
->cmask_buffer
&& src
->dirty_level_mask
& (1 << src_level
))
98 sctx
->b
.flush_resource(&sctx
->b
, &src
->buffer
.b
.b
);
100 assert(!(src
->dirty_level_mask
& (1 << src_level
)));
101 assert(!(dst
->dirty_level_mask
& (1 << dst_level
)));
106 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
107 static void si_copy_region_with_blit(struct pipe_context
*pipe
,
108 struct pipe_resource
*dst
,
110 unsigned dstx
, unsigned dsty
, unsigned dstz
,
111 struct pipe_resource
*src
,
113 const struct pipe_box
*src_box
)
115 struct pipe_blit_info blit
;
117 memset(&blit
, 0, sizeof(blit
));
118 blit
.src
.resource
= src
;
119 blit
.src
.format
= src
->format
;
120 blit
.src
.level
= src_level
;
121 blit
.src
.box
= *src_box
;
122 blit
.dst
.resource
= dst
;
123 blit
.dst
.format
= dst
->format
;
124 blit
.dst
.level
= dst_level
;
125 blit
.dst
.box
.x
= dstx
;
126 blit
.dst
.box
.y
= dsty
;
127 blit
.dst
.box
.z
= dstz
;
128 blit
.dst
.box
.width
= src_box
->width
;
129 blit
.dst
.box
.height
= src_box
->height
;
130 blit
.dst
.box
.depth
= src_box
->depth
;
131 blit
.mask
= util_format_get_mask(src
->format
) &
132 util_format_get_mask(dst
->format
);
133 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
136 pipe
->blit(pipe
, &blit
);
140 /* Copy from a full GPU texture to a transfer's staging one. */
141 static void si_copy_to_staging_texture(struct pipe_context
*ctx
, struct si_transfer
*stransfer
)
143 struct si_context
*sctx
= (struct si_context
*)ctx
;
144 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)stransfer
;
145 struct pipe_resource
*dst
= &stransfer
->staging
->b
.b
;
146 struct pipe_resource
*src
= transfer
->resource
;
148 if (src
->nr_samples
> 1) {
149 si_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
150 src
, transfer
->level
, &transfer
->box
);
154 sctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
158 /* Copy from a transfer's staging texture to a full GPU one. */
159 static void si_copy_from_staging_texture(struct pipe_context
*ctx
, struct si_transfer
*stransfer
)
161 struct si_context
*sctx
= (struct si_context
*)ctx
;
162 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)stransfer
;
163 struct pipe_resource
*dst
= transfer
->resource
;
164 struct pipe_resource
*src
= &stransfer
->staging
->b
.b
;
165 struct pipe_box sbox
;
167 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
169 if (dst
->nr_samples
> 1) {
170 si_copy_region_with_blit(ctx
, dst
, transfer
->level
,
171 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
176 if (util_format_is_compressed(dst
->format
)) {
177 sbox
.width
= util_format_get_nblocksx(dst
->format
, sbox
.width
);
178 sbox
.height
= util_format_get_nblocksx(dst
->format
, sbox
.height
);
181 sctx
->dma_copy(ctx
, dst
, transfer
->level
,
182 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
186 static unsigned si_texture_get_offset(struct si_screen
*sscreen
,
187 struct si_texture
*tex
, unsigned level
,
188 const struct pipe_box
*box
,
190 unsigned *layer_stride
)
192 if (sscreen
->info
.chip_class
>= GFX9
) {
193 *stride
= tex
->surface
.u
.gfx9
.surf_pitch
* tex
->surface
.bpe
;
194 *layer_stride
= tex
->surface
.u
.gfx9
.surf_slice_size
;
199 /* Each texture is an array of slices. Each slice is an array
200 * of mipmap levels. */
201 return box
->z
* tex
->surface
.u
.gfx9
.surf_slice_size
+
202 tex
->surface
.u
.gfx9
.offset
[level
] +
203 (box
->y
/ tex
->surface
.blk_h
*
204 tex
->surface
.u
.gfx9
.surf_pitch
+
205 box
->x
/ tex
->surface
.blk_w
) * tex
->surface
.bpe
;
207 *stride
= tex
->surface
.u
.legacy
.level
[level
].nblk_x
*
209 assert((uint64_t)tex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 <= UINT_MAX
);
210 *layer_stride
= (uint64_t)tex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4;
213 return tex
->surface
.u
.legacy
.level
[level
].offset
;
215 /* Each texture is an array of mipmap levels. Each level is
216 * an array of slices. */
217 return tex
->surface
.u
.legacy
.level
[level
].offset
+
218 box
->z
* (uint64_t)tex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 +
219 (box
->y
/ tex
->surface
.blk_h
*
220 tex
->surface
.u
.legacy
.level
[level
].nblk_x
+
221 box
->x
/ tex
->surface
.blk_w
) * tex
->surface
.bpe
;
225 static int si_init_surface(struct si_screen
*sscreen
,
226 struct radeon_surf
*surface
,
227 const struct pipe_resource
*ptex
,
228 enum radeon_surf_mode array_mode
,
229 unsigned pitch_in_bytes_override
,
233 bool is_flushed_depth
,
234 bool tc_compatible_htile
)
236 const struct util_format_description
*desc
=
237 util_format_description(ptex
->format
);
238 bool is_depth
, is_stencil
;
240 unsigned i
, bpe
, flags
= 0;
242 is_depth
= util_format_has_depth(desc
);
243 is_stencil
= util_format_has_stencil(desc
);
245 if (!is_flushed_depth
&&
246 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
247 bpe
= 4; /* stencil is allocated separately */
249 bpe
= util_format_get_blocksize(ptex
->format
);
250 assert(util_is_power_of_two_or_zero(bpe
));
253 if (!is_flushed_depth
&& is_depth
) {
254 flags
|= RADEON_SURF_ZBUFFER
;
256 if (tc_compatible_htile
&&
257 (sscreen
->info
.chip_class
>= GFX9
||
258 array_mode
== RADEON_SURF_MODE_2D
)) {
259 /* TC-compatible HTILE only supports Z32_FLOAT.
260 * GFX9 also supports Z16_UNORM.
261 * On GFX8, promote Z16 to Z32. DB->CB copies will convert
262 * the format for transfers.
264 if (sscreen
->info
.chip_class
== GFX8
)
267 flags
|= RADEON_SURF_TC_COMPATIBLE_HTILE
;
271 flags
|= RADEON_SURF_SBUFFER
;
274 if (sscreen
->info
.chip_class
>= GFX8
&&
275 (ptex
->flags
& SI_RESOURCE_FLAG_DISABLE_DCC
||
276 ptex
->format
== PIPE_FORMAT_R9G9B9E5_FLOAT
||
277 (ptex
->nr_samples
>= 2 && !sscreen
->dcc_msaa_allowed
)))
278 flags
|= RADEON_SURF_DISABLE_DCC
;
280 /* Stoney: 128bpp MSAA textures randomly fail piglit tests with DCC. */
281 if (sscreen
->info
.family
== CHIP_STONEY
&&
282 bpe
== 16 && ptex
->nr_samples
>= 2)
283 flags
|= RADEON_SURF_DISABLE_DCC
;
285 /* GFX8: DCC clear for 4x and 8x MSAA array textures unimplemented. */
286 if (sscreen
->info
.chip_class
== GFX8
&&
287 ptex
->nr_storage_samples
>= 4 &&
288 ptex
->array_size
> 1)
289 flags
|= RADEON_SURF_DISABLE_DCC
;
291 /* GFX9: DCC clear for 4x and 8x MSAA textures unimplemented. */
292 if (sscreen
->info
.chip_class
>= GFX9
&&
293 ptex
->nr_storage_samples
>= 4)
294 flags
|= RADEON_SURF_DISABLE_DCC
;
296 if (ptex
->bind
& PIPE_BIND_SCANOUT
|| is_scanout
) {
297 /* This should catch bugs in gallium users setting incorrect flags. */
298 assert(ptex
->nr_samples
<= 1 &&
299 ptex
->array_size
== 1 &&
301 ptex
->last_level
== 0 &&
302 !(flags
& RADEON_SURF_Z_OR_SBUFFER
));
304 flags
|= RADEON_SURF_SCANOUT
;
307 if (ptex
->bind
& PIPE_BIND_SHARED
)
308 flags
|= RADEON_SURF_SHAREABLE
;
310 flags
|= RADEON_SURF_IMPORTED
| RADEON_SURF_SHAREABLE
;
311 if (!(ptex
->flags
& SI_RESOURCE_FLAG_FORCE_MSAA_TILING
))
312 flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
314 r
= sscreen
->ws
->surface_init(sscreen
->ws
, ptex
, flags
, bpe
,
315 array_mode
, surface
);
320 unsigned pitch
= pitch_in_bytes_override
/ bpe
;
322 if (sscreen
->info
.chip_class
>= GFX9
) {
324 surface
->u
.gfx9
.surf_pitch
= pitch
;
325 surface
->u
.gfx9
.surf_slice_size
=
326 (uint64_t)pitch
* surface
->u
.gfx9
.surf_height
* bpe
;
328 surface
->u
.gfx9
.surf_offset
= offset
;
331 surface
->u
.legacy
.level
[0].nblk_x
= pitch
;
332 surface
->u
.legacy
.level
[0].slice_size_dw
=
333 ((uint64_t)pitch
* surface
->u
.legacy
.level
[0].nblk_y
* bpe
) / 4;
336 for (i
= 0; i
< ARRAY_SIZE(surface
->u
.legacy
.level
); ++i
)
337 surface
->u
.legacy
.level
[i
].offset
+= offset
;
343 static void si_get_display_metadata(struct si_screen
*sscreen
,
344 struct radeon_surf
*surf
,
345 struct radeon_bo_metadata
*metadata
,
346 enum radeon_surf_mode
*array_mode
,
349 if (sscreen
->info
.chip_class
>= GFX9
) {
350 if (metadata
->u
.gfx9
.swizzle_mode
> 0)
351 *array_mode
= RADEON_SURF_MODE_2D
;
353 *array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
355 *is_scanout
= metadata
->u
.gfx9
.swizzle_mode
== 0 ||
356 metadata
->u
.gfx9
.swizzle_mode
% 4 == 2;
358 surf
->u
.gfx9
.surf
.swizzle_mode
= metadata
->u
.gfx9
.swizzle_mode
;
360 if (metadata
->u
.gfx9
.dcc_offset_256B
) {
361 surf
->u
.gfx9
.display_dcc_pitch_max
= metadata
->u
.gfx9
.dcc_pitch_max
;
362 assert(metadata
->u
.gfx9
.dcc_independent_64B
== 1);
365 surf
->u
.legacy
.pipe_config
= metadata
->u
.legacy
.pipe_config
;
366 surf
->u
.legacy
.bankw
= metadata
->u
.legacy
.bankw
;
367 surf
->u
.legacy
.bankh
= metadata
->u
.legacy
.bankh
;
368 surf
->u
.legacy
.tile_split
= metadata
->u
.legacy
.tile_split
;
369 surf
->u
.legacy
.mtilea
= metadata
->u
.legacy
.mtilea
;
370 surf
->u
.legacy
.num_banks
= metadata
->u
.legacy
.num_banks
;
372 if (metadata
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
373 *array_mode
= RADEON_SURF_MODE_2D
;
374 else if (metadata
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
375 *array_mode
= RADEON_SURF_MODE_1D
;
377 *array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
379 *is_scanout
= metadata
->u
.legacy
.scanout
;
383 void si_eliminate_fast_color_clear(struct si_context
*sctx
,
384 struct si_texture
*tex
)
386 struct si_screen
*sscreen
= sctx
->screen
;
387 struct pipe_context
*ctx
= &sctx
->b
;
389 if (ctx
== sscreen
->aux_context
)
390 mtx_lock(&sscreen
->aux_context_lock
);
392 unsigned n
= sctx
->num_decompress_calls
;
393 ctx
->flush_resource(ctx
, &tex
->buffer
.b
.b
);
395 /* Flush only if any fast clear elimination took place. */
396 if (n
!= sctx
->num_decompress_calls
)
397 ctx
->flush(ctx
, NULL
, 0);
399 if (ctx
== sscreen
->aux_context
)
400 mtx_unlock(&sscreen
->aux_context_lock
);
403 void si_texture_discard_cmask(struct si_screen
*sscreen
,
404 struct si_texture
*tex
)
406 if (!tex
->cmask_buffer
)
409 assert(tex
->buffer
.b
.b
.nr_samples
<= 1);
412 tex
->cmask_base_address_reg
= tex
->buffer
.gpu_address
>> 8;
413 tex
->dirty_level_mask
= 0;
415 tex
->cb_color_info
&= ~S_028C70_FAST_CLEAR(1);
417 if (tex
->cmask_buffer
!= &tex
->buffer
)
418 si_resource_reference(&tex
->cmask_buffer
, NULL
);
420 tex
->cmask_buffer
= NULL
;
422 /* Notify all contexts about the change. */
423 p_atomic_inc(&sscreen
->dirty_tex_counter
);
424 p_atomic_inc(&sscreen
->compressed_colortex_counter
);
427 static bool si_can_disable_dcc(struct si_texture
*tex
)
429 /* We can't disable DCC if it can be written by another process. */
430 return tex
->dcc_offset
&&
431 (!tex
->buffer
.b
.is_shared
||
432 !(tex
->buffer
.external_usage
& PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE
));
435 static bool si_texture_discard_dcc(struct si_screen
*sscreen
,
436 struct si_texture
*tex
)
438 if (!si_can_disable_dcc(tex
)) {
439 assert(tex
->display_dcc_offset
== 0);
443 assert(tex
->dcc_separate_buffer
== NULL
);
447 tex
->display_dcc_offset
= 0;
448 tex
->dcc_retile_map_offset
= 0;
450 /* Notify all contexts about the change. */
451 p_atomic_inc(&sscreen
->dirty_tex_counter
);
456 * Disable DCC for the texture. (first decompress, then discard metadata).
458 * There is unresolved multi-context synchronization issue between
459 * screen::aux_context and the current context. If applications do this with
460 * multiple contexts, it's already undefined behavior for them and we don't
461 * have to worry about that. The scenario is:
463 * If context 1 disables DCC and context 2 has queued commands that write
464 * to the texture via CB with DCC enabled, and the order of operations is
466 * context 2 queues draw calls rendering to the texture, but doesn't flush
467 * context 1 disables DCC and flushes
468 * context 1 & 2 reset descriptors and FB state
469 * context 2 flushes (new compressed tiles written by the draw calls)
470 * context 1 & 2 read garbage, because DCC is disabled, yet there are
473 * \param sctx the current context if you have one, or sscreen->aux_context
476 bool si_texture_disable_dcc(struct si_context
*sctx
,
477 struct si_texture
*tex
)
479 struct si_screen
*sscreen
= sctx
->screen
;
481 if (!sctx
->has_graphics
)
482 return si_texture_discard_dcc(sscreen
, tex
);
484 if (!si_can_disable_dcc(tex
))
487 if (&sctx
->b
== sscreen
->aux_context
)
488 mtx_lock(&sscreen
->aux_context_lock
);
490 /* Decompress DCC. */
491 si_decompress_dcc(sctx
, tex
);
492 sctx
->b
.flush(&sctx
->b
, NULL
, 0);
494 if (&sctx
->b
== sscreen
->aux_context
)
495 mtx_unlock(&sscreen
->aux_context_lock
);
497 return si_texture_discard_dcc(sscreen
, tex
);
500 static void si_reallocate_texture_inplace(struct si_context
*sctx
,
501 struct si_texture
*tex
,
502 unsigned new_bind_flag
,
503 bool invalidate_storage
)
505 struct pipe_screen
*screen
= sctx
->b
.screen
;
506 struct si_texture
*new_tex
;
507 struct pipe_resource templ
= tex
->buffer
.b
.b
;
510 templ
.bind
|= new_bind_flag
;
512 if (tex
->buffer
.b
.is_shared
)
515 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
516 if (tex
->surface
.is_linear
)
519 /* This fails with MSAA, depth, and compressed textures. */
520 if (si_choose_tiling(sctx
->screen
, &templ
, false) !=
521 RADEON_SURF_MODE_LINEAR_ALIGNED
)
525 new_tex
= (struct si_texture
*)screen
->resource_create(screen
, &templ
);
529 /* Copy the pixels to the new texture. */
530 if (!invalidate_storage
) {
531 for (i
= 0; i
<= templ
.last_level
; i
++) {
535 u_minify(templ
.width0
, i
), u_minify(templ
.height0
, i
),
536 util_num_layers(&templ
, i
), &box
);
538 sctx
->dma_copy(&sctx
->b
, &new_tex
->buffer
.b
.b
, i
, 0, 0, 0,
539 &tex
->buffer
.b
.b
, i
, &box
);
543 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
544 si_texture_discard_cmask(sctx
->screen
, tex
);
545 si_texture_discard_dcc(sctx
->screen
, tex
);
548 /* Replace the structure fields of tex. */
549 tex
->buffer
.b
.b
.bind
= templ
.bind
;
550 pb_reference(&tex
->buffer
.buf
, new_tex
->buffer
.buf
);
551 tex
->buffer
.gpu_address
= new_tex
->buffer
.gpu_address
;
552 tex
->buffer
.vram_usage
= new_tex
->buffer
.vram_usage
;
553 tex
->buffer
.gart_usage
= new_tex
->buffer
.gart_usage
;
554 tex
->buffer
.bo_size
= new_tex
->buffer
.bo_size
;
555 tex
->buffer
.bo_alignment
= new_tex
->buffer
.bo_alignment
;
556 tex
->buffer
.domains
= new_tex
->buffer
.domains
;
557 tex
->buffer
.flags
= new_tex
->buffer
.flags
;
559 tex
->surface
= new_tex
->surface
;
560 tex
->size
= new_tex
->size
;
561 si_texture_reference(&tex
->flushed_depth_texture
,
562 new_tex
->flushed_depth_texture
);
564 tex
->fmask_offset
= new_tex
->fmask_offset
;
565 tex
->cmask_offset
= new_tex
->cmask_offset
;
566 tex
->cmask_base_address_reg
= new_tex
->cmask_base_address_reg
;
568 if (tex
->cmask_buffer
== &tex
->buffer
)
569 tex
->cmask_buffer
= NULL
;
571 si_resource_reference(&tex
->cmask_buffer
, NULL
);
573 if (new_tex
->cmask_buffer
== &new_tex
->buffer
)
574 tex
->cmask_buffer
= &tex
->buffer
;
576 si_resource_reference(&tex
->cmask_buffer
, new_tex
->cmask_buffer
);
578 tex
->dcc_offset
= new_tex
->dcc_offset
;
579 tex
->cb_color_info
= new_tex
->cb_color_info
;
580 memcpy(tex
->color_clear_value
, new_tex
->color_clear_value
,
581 sizeof(tex
->color_clear_value
));
582 tex
->last_msaa_resolve_target_micro_mode
= new_tex
->last_msaa_resolve_target_micro_mode
;
584 tex
->htile_offset
= new_tex
->htile_offset
;
585 tex
->depth_clear_value
= new_tex
->depth_clear_value
;
586 tex
->dirty_level_mask
= new_tex
->dirty_level_mask
;
587 tex
->stencil_dirty_level_mask
= new_tex
->stencil_dirty_level_mask
;
588 tex
->db_render_format
= new_tex
->db_render_format
;
589 tex
->stencil_clear_value
= new_tex
->stencil_clear_value
;
590 tex
->tc_compatible_htile
= new_tex
->tc_compatible_htile
;
591 tex
->depth_cleared
= new_tex
->depth_cleared
;
592 tex
->stencil_cleared
= new_tex
->stencil_cleared
;
593 tex
->upgraded_depth
= new_tex
->upgraded_depth
;
594 tex
->db_compatible
= new_tex
->db_compatible
;
595 tex
->can_sample_z
= new_tex
->can_sample_z
;
596 tex
->can_sample_s
= new_tex
->can_sample_s
;
598 tex
->separate_dcc_dirty
= new_tex
->separate_dcc_dirty
;
599 tex
->dcc_gather_statistics
= new_tex
->dcc_gather_statistics
;
600 si_resource_reference(&tex
->dcc_separate_buffer
,
601 new_tex
->dcc_separate_buffer
);
602 si_resource_reference(&tex
->last_dcc_separate_buffer
,
603 new_tex
->last_dcc_separate_buffer
);
605 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
606 assert(!tex
->htile_offset
);
607 assert(!tex
->cmask_buffer
);
608 assert(!tex
->surface
.fmask_size
);
609 assert(!tex
->dcc_offset
);
610 assert(!tex
->is_depth
);
613 si_texture_reference(&new_tex
, NULL
);
615 p_atomic_inc(&sctx
->screen
->dirty_tex_counter
);
618 static uint32_t si_get_bo_metadata_word1(struct si_screen
*sscreen
)
620 return (ATI_VENDOR_ID
<< 16) | sscreen
->info
.pci_id
;
623 static void si_set_tex_bo_metadata(struct si_screen
*sscreen
,
624 struct si_texture
*tex
)
626 struct radeon_surf
*surface
= &tex
->surface
;
627 struct pipe_resource
*res
= &tex
->buffer
.b
.b
;
628 struct radeon_bo_metadata md
;
630 memset(&md
, 0, sizeof(md
));
632 if (sscreen
->info
.chip_class
>= GFX9
) {
633 md
.u
.gfx9
.swizzle_mode
= surface
->u
.gfx9
.surf
.swizzle_mode
;
635 if (tex
->dcc_offset
&& !tex
->dcc_separate_buffer
) {
636 uint64_t dcc_offset
=
637 tex
->display_dcc_offset
? tex
->display_dcc_offset
640 assert((dcc_offset
>> 8) != 0 && (dcc_offset
>> 8) < (1 << 24));
641 md
.u
.gfx9
.dcc_offset_256B
= dcc_offset
>> 8;
642 md
.u
.gfx9
.dcc_pitch_max
= tex
->surface
.u
.gfx9
.display_dcc_pitch_max
;
643 md
.u
.gfx9
.dcc_independent_64B
= 1;
646 md
.u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
647 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
648 md
.u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
649 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
650 md
.u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
651 md
.u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
652 md
.u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
653 md
.u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
654 md
.u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
655 md
.u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
656 md
.u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
657 md
.u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
660 assert(tex
->dcc_separate_buffer
== NULL
);
661 assert(tex
->surface
.fmask_size
== 0);
663 /* Metadata image format format version 1:
664 * [0] = 1 (metadata format identifier)
665 * [1] = (VENDOR_ID << 16) | PCI_ID
666 * [2:9] = image descriptor for the whole resource
667 * [2] is always 0, because the base address is cleared
668 * [9] is the DCC offset bits [39:8] from the beginning of
670 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
673 md
.metadata
[0] = 1; /* metadata image format version 1 */
675 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
676 md
.metadata
[1] = si_get_bo_metadata_word1(sscreen
);
678 static const unsigned char swizzle
[] = {
684 bool is_array
= util_texture_is_array(res
->target
);
687 si_make_texture_descriptor(sscreen
, tex
, true,
688 res
->target
, res
->format
,
689 swizzle
, 0, res
->last_level
, 0,
690 is_array
? res
->array_size
- 1 : 0,
691 res
->width0
, res
->height0
, res
->depth0
,
694 si_set_mutable_tex_desc_fields(sscreen
, tex
, &tex
->surface
.u
.legacy
.level
[0],
695 0, 0, tex
->surface
.blk_w
, false, desc
);
697 /* Clear the base address and set the relative DCC offset. */
699 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
700 desc
[7] = tex
->dcc_offset
>> 8;
702 /* Dwords [2:9] contain the image descriptor. */
703 memcpy(&md
.metadata
[2], desc
, sizeof(desc
));
704 md
.size_metadata
= 10 * 4;
706 /* Dwords [10:..] contain the mipmap level offsets. */
707 if (sscreen
->info
.chip_class
<= GFX8
) {
708 for (unsigned i
= 0; i
<= res
->last_level
; i
++)
709 md
.metadata
[10+i
] = tex
->surface
.u
.legacy
.level
[i
].offset
>> 8;
711 md
.size_metadata
+= (1 + res
->last_level
) * 4;
714 sscreen
->ws
->buffer_set_metadata(tex
->buffer
.buf
, &md
);
717 static void si_get_opaque_metadata(struct si_screen
*sscreen
,
718 struct si_texture
*tex
,
719 struct radeon_bo_metadata
*md
)
721 uint32_t *desc
= &md
->metadata
[2];
723 if (sscreen
->info
.chip_class
< GFX8
)
726 /* Return if DCC is enabled. The texture should be set up with it
729 if (md
->size_metadata
>= 10 * 4 && /* at least 2(header) + 8(desc) dwords */
730 md
->metadata
[0] != 0 &&
731 md
->metadata
[1] == si_get_bo_metadata_word1(sscreen
) &&
732 G_008F28_COMPRESSION_EN(desc
[6])) {
733 tex
->dcc_offset
= (uint64_t)desc
[7] << 8;
735 if (sscreen
->info
.chip_class
>= GFX9
) {
736 /* Fix up parameters for displayable DCC. Some state
737 * trackers don't set the SCANOUT flag when importing
738 * displayable images, so we have to recover the correct
741 tex
->surface
.u
.gfx9
.dcc
.pipe_aligned
=
742 G_008F24_META_PIPE_ALIGNED(desc
[5]);
743 tex
->surface
.u
.gfx9
.dcc
.rb_aligned
=
744 G_008F24_META_RB_ALIGNED(desc
[5]);
746 /* If DCC is unaligned, this can only be a displayable image. */
747 if (!tex
->surface
.u
.gfx9
.dcc
.pipe_aligned
&&
748 !tex
->surface
.u
.gfx9
.dcc
.rb_aligned
)
749 tex
->surface
.is_displayable
= true;
754 /* Disable DCC. These are always set by texture_from_handle and must
760 static bool si_has_displayable_dcc(struct si_texture
*tex
)
762 struct si_screen
*sscreen
= (struct si_screen
*)tex
->buffer
.b
.b
.screen
;
764 if (sscreen
->info
.chip_class
<= GFX8
)
767 /* This needs a cache flush before scanout.
768 * (it can't be scanned out and rendered to simultaneously)
770 if (sscreen
->info
.use_display_dcc_unaligned
&&
772 !tex
->surface
.u
.gfx9
.dcc
.pipe_aligned
&&
773 !tex
->surface
.u
.gfx9
.dcc
.rb_aligned
)
776 /* This needs an explicit flush (flush_resource). */
777 if (sscreen
->info
.use_display_dcc_with_retile_blit
&&
778 tex
->display_dcc_offset
)
784 static void si_texture_get_info(struct pipe_screen
* screen
,
785 struct pipe_resource
*resource
,
789 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
790 struct si_texture
*tex
= (struct si_texture
*)resource
;
794 if (!sscreen
|| !tex
)
797 if (resource
->target
!= PIPE_BUFFER
) {
798 if (sscreen
->info
.chip_class
>= GFX9
) {
799 offset
= tex
->surface
.u
.gfx9
.surf_offset
;
800 stride
= tex
->surface
.u
.gfx9
.surf_pitch
*
803 offset
= tex
->surface
.u
.legacy
.level
[0].offset
;
804 stride
= tex
->surface
.u
.legacy
.level
[0].nblk_x
*
816 static boolean
si_texture_get_handle(struct pipe_screen
* screen
,
817 struct pipe_context
*ctx
,
818 struct pipe_resource
*resource
,
819 struct winsys_handle
*whandle
,
822 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
823 struct si_context
*sctx
;
824 struct si_resource
*res
= si_resource(resource
);
825 struct si_texture
*tex
= (struct si_texture
*)resource
;
826 bool update_metadata
= false;
827 unsigned stride
, offset
, slice_size
;
830 ctx
= threaded_context_unwrap_sync(ctx
);
831 sctx
= (struct si_context
*)(ctx
? ctx
: sscreen
->aux_context
);
833 if (resource
->target
!= PIPE_BUFFER
) {
834 /* This is not supported now, but it might be required for OpenCL
835 * interop in the future.
837 if (resource
->nr_samples
> 1 || tex
->is_depth
)
840 /* Move a suballocated texture into a non-suballocated allocation. */
841 if (sscreen
->ws
->buffer_is_suballocated(res
->buf
) ||
842 tex
->surface
.tile_swizzle
||
843 (tex
->buffer
.flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
&&
844 sscreen
->info
.has_local_buffers
&&
845 whandle
->type
!= WINSYS_HANDLE_TYPE_KMS
)) {
846 assert(!res
->b
.is_shared
);
847 si_reallocate_texture_inplace(sctx
, tex
,
848 PIPE_BIND_SHARED
, false);
850 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
851 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
852 assert(!(res
->flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
));
853 assert(tex
->surface
.tile_swizzle
== 0);
856 /* Since shader image stores don't support DCC on GFX8,
857 * disable it for external clients that want write
860 if ((usage
& PIPE_HANDLE_USAGE_SHADER_WRITE
&& tex
->dcc_offset
) ||
861 /* Displayable DCC requires an explicit flush. */
862 (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
863 si_has_displayable_dcc(tex
))) {
864 if (si_texture_disable_dcc(sctx
, tex
)) {
865 update_metadata
= true;
866 /* si_texture_disable_dcc flushes the context */
871 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
872 (tex
->cmask_buffer
|| tex
->dcc_offset
)) {
873 /* Eliminate fast clear (both CMASK and DCC) */
874 si_eliminate_fast_color_clear(sctx
, tex
);
875 /* eliminate_fast_color_clear flushes the context */
878 /* Disable CMASK if flush_resource isn't going
881 if (tex
->cmask_buffer
)
882 si_texture_discard_cmask(sscreen
, tex
);
886 if (!res
->b
.is_shared
|| update_metadata
)
887 si_set_tex_bo_metadata(sscreen
, tex
);
889 if (sscreen
->info
.chip_class
>= GFX9
) {
890 slice_size
= tex
->surface
.u
.gfx9
.surf_slice_size
;
892 slice_size
= (uint64_t)tex
->surface
.u
.legacy
.level
[0].slice_size_dw
* 4;
895 /* Buffer exports are for the OpenCL interop. */
896 /* Move a suballocated buffer into a non-suballocated allocation. */
897 if (sscreen
->ws
->buffer_is_suballocated(res
->buf
) ||
898 /* A DMABUF export always fails if the BO is local. */
899 (tex
->buffer
.flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
&&
900 sscreen
->info
.has_local_buffers
)) {
901 assert(!res
->b
.is_shared
);
903 /* Allocate a new buffer with PIPE_BIND_SHARED. */
904 struct pipe_resource templ
= res
->b
.b
;
905 templ
.bind
|= PIPE_BIND_SHARED
;
907 struct pipe_resource
*newb
=
908 screen
->resource_create(screen
, &templ
);
912 /* Copy the old buffer contents to the new one. */
914 u_box_1d(0, newb
->width0
, &box
);
915 sctx
->b
.resource_copy_region(&sctx
->b
, newb
, 0, 0, 0, 0,
918 /* Move the new buffer storage to the old pipe_resource. */
919 si_replace_buffer_storage(&sctx
->b
, &res
->b
.b
, newb
);
920 pipe_resource_reference(&newb
, NULL
);
922 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
923 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
930 si_texture_get_info(screen
, resource
, &stride
, &offset
);
933 sctx
->b
.flush(&sctx
->b
, NULL
, 0);
935 if (res
->b
.is_shared
) {
936 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
939 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
940 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
941 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
943 res
->b
.is_shared
= true;
944 res
->external_usage
= usage
;
947 return sscreen
->ws
->buffer_get_handle(res
->buf
, stride
, offset
,
948 slice_size
, whandle
);
951 static void si_texture_destroy(struct pipe_screen
*screen
,
952 struct pipe_resource
*ptex
)
954 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
955 struct si_texture
*tex
= (struct si_texture
*)ptex
;
956 struct si_resource
*resource
= &tex
->buffer
;
958 if (sscreen
->info
.chip_class
>= GFX9
)
959 free(tex
->surface
.u
.gfx9
.dcc_retile_map
);
961 si_texture_reference(&tex
->flushed_depth_texture
, NULL
);
963 if (tex
->cmask_buffer
!= &tex
->buffer
) {
964 si_resource_reference(&tex
->cmask_buffer
, NULL
);
966 pb_reference(&resource
->buf
, NULL
);
967 si_resource_reference(&tex
->dcc_separate_buffer
, NULL
);
968 si_resource_reference(&tex
->last_dcc_separate_buffer
, NULL
);
972 static const struct u_resource_vtbl si_texture_vtbl
;
974 static void si_texture_get_htile_size(struct si_screen
*sscreen
,
975 struct si_texture
*tex
)
977 unsigned cl_width
, cl_height
, width
, height
;
978 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
979 unsigned num_pipes
= sscreen
->info
.num_tile_pipes
;
981 assert(sscreen
->info
.chip_class
<= GFX8
);
983 tex
->surface
.htile_size
= 0;
985 if (tex
->surface
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
986 !sscreen
->info
.htile_cmask_support_1d_tiling
)
989 /* Overalign HTILE on P2 configs to work around GPU hangs in
990 * piglit/depthstencil-render-miplevels 585.
992 * This has been confirmed to help Kabini & Stoney, where the hangs
993 * are always reproducible. I think I have seen the test hang
994 * on Carrizo too, though it was very rare there.
996 if (sscreen
->info
.chip_class
>= GFX7
&& num_pipes
< 4)
1025 width
= align(tex
->surface
.u
.legacy
.level
[0].nblk_x
, cl_width
* 8);
1026 height
= align(tex
->surface
.u
.legacy
.level
[0].nblk_y
, cl_height
* 8);
1028 slice_elements
= (width
* height
) / (8 * 8);
1029 slice_bytes
= slice_elements
* 4;
1031 pipe_interleave_bytes
= sscreen
->info
.pipe_interleave_bytes
;
1032 base_align
= num_pipes
* pipe_interleave_bytes
;
1034 tex
->surface
.htile_alignment
= base_align
;
1035 tex
->surface
.htile_size
=
1036 util_num_layers(&tex
->buffer
.b
.b
, 0) *
1037 align(slice_bytes
, base_align
);
1040 static void si_texture_allocate_htile(struct si_screen
*sscreen
,
1041 struct si_texture
*tex
)
1043 if (sscreen
->info
.chip_class
<= GFX8
&& !tex
->tc_compatible_htile
)
1044 si_texture_get_htile_size(sscreen
, tex
);
1046 if (!tex
->surface
.htile_size
)
1049 tex
->htile_offset
= align(tex
->size
, tex
->surface
.htile_alignment
);
1050 tex
->size
= tex
->htile_offset
+ tex
->surface
.htile_size
;
1053 void si_print_texture_info(struct si_screen
*sscreen
,
1054 struct si_texture
*tex
, struct u_log_context
*log
)
1058 /* Common parameters. */
1059 u_log_printf(log
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
1060 "blk_h=%u, array_size=%u, last_level=%u, "
1061 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
1062 tex
->buffer
.b
.b
.width0
, tex
->buffer
.b
.b
.height0
,
1063 tex
->buffer
.b
.b
.depth0
, tex
->surface
.blk_w
,
1065 tex
->buffer
.b
.b
.array_size
, tex
->buffer
.b
.b
.last_level
,
1066 tex
->surface
.bpe
, tex
->buffer
.b
.b
.nr_samples
,
1067 tex
->surface
.flags
, util_format_short_name(tex
->buffer
.b
.b
.format
));
1069 if (sscreen
->info
.chip_class
>= GFX9
) {
1070 u_log_printf(log
, " Surf: size=%"PRIu64
", slice_size=%"PRIu64
", "
1071 "alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
1072 tex
->surface
.surf_size
,
1073 tex
->surface
.u
.gfx9
.surf_slice_size
,
1074 tex
->surface
.surf_alignment
,
1075 tex
->surface
.u
.gfx9
.surf
.swizzle_mode
,
1076 tex
->surface
.u
.gfx9
.surf
.epitch
,
1077 tex
->surface
.u
.gfx9
.surf_pitch
);
1079 if (tex
->surface
.fmask_size
) {
1080 u_log_printf(log
, " FMASK: offset=%"PRIu64
", size=%"PRIu64
", "
1081 "alignment=%u, swmode=%u, epitch=%u\n",
1083 tex
->surface
.fmask_size
,
1084 tex
->surface
.fmask_alignment
,
1085 tex
->surface
.u
.gfx9
.fmask
.swizzle_mode
,
1086 tex
->surface
.u
.gfx9
.fmask
.epitch
);
1089 if (tex
->cmask_buffer
) {
1090 u_log_printf(log
, " CMask: offset=%"PRIu64
", size=%u, "
1091 "alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
1093 tex
->surface
.cmask_size
,
1094 tex
->surface
.cmask_alignment
,
1095 tex
->surface
.u
.gfx9
.cmask
.rb_aligned
,
1096 tex
->surface
.u
.gfx9
.cmask
.pipe_aligned
);
1099 if (tex
->htile_offset
) {
1100 u_log_printf(log
, " HTile: offset=%"PRIu64
", size=%u, alignment=%u, "
1101 "rb_aligned=%u, pipe_aligned=%u\n",
1103 tex
->surface
.htile_size
,
1104 tex
->surface
.htile_alignment
,
1105 tex
->surface
.u
.gfx9
.htile
.rb_aligned
,
1106 tex
->surface
.u
.gfx9
.htile
.pipe_aligned
);
1109 if (tex
->dcc_offset
) {
1110 u_log_printf(log
, " DCC: offset=%"PRIu64
", size=%u, "
1111 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
1112 tex
->dcc_offset
, tex
->surface
.dcc_size
,
1113 tex
->surface
.dcc_alignment
,
1114 tex
->surface
.u
.gfx9
.display_dcc_pitch_max
,
1115 tex
->surface
.num_dcc_levels
);
1118 if (tex
->surface
.u
.gfx9
.stencil_offset
) {
1119 u_log_printf(log
, " Stencil: offset=%"PRIu64
", swmode=%u, epitch=%u\n",
1120 tex
->surface
.u
.gfx9
.stencil_offset
,
1121 tex
->surface
.u
.gfx9
.stencil
.swizzle_mode
,
1122 tex
->surface
.u
.gfx9
.stencil
.epitch
);
1127 u_log_printf(log
, " Layout: size=%"PRIu64
", alignment=%u, bankw=%u, "
1128 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
1129 tex
->surface
.surf_size
, tex
->surface
.surf_alignment
, tex
->surface
.u
.legacy
.bankw
,
1130 tex
->surface
.u
.legacy
.bankh
, tex
->surface
.u
.legacy
.num_banks
, tex
->surface
.u
.legacy
.mtilea
,
1131 tex
->surface
.u
.legacy
.tile_split
, tex
->surface
.u
.legacy
.pipe_config
,
1132 (tex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
1134 if (tex
->surface
.fmask_size
)
1135 u_log_printf(log
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
1136 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
1137 tex
->fmask_offset
, tex
->surface
.fmask_size
, tex
->surface
.fmask_alignment
,
1138 tex
->surface
.u
.legacy
.fmask
.pitch_in_pixels
,
1139 tex
->surface
.u
.legacy
.fmask
.bankh
,
1140 tex
->surface
.u
.legacy
.fmask
.slice_tile_max
,
1141 tex
->surface
.u
.legacy
.fmask
.tiling_index
);
1143 if (tex
->cmask_buffer
)
1144 u_log_printf(log
, " CMask: offset=%"PRIu64
", size=%u, alignment=%u, "
1145 "slice_tile_max=%u\n",
1146 tex
->cmask_offset
, tex
->surface
.cmask_size
, tex
->surface
.cmask_alignment
,
1147 tex
->surface
.u
.legacy
.cmask_slice_tile_max
);
1149 if (tex
->htile_offset
)
1150 u_log_printf(log
, " HTile: offset=%"PRIu64
", size=%u, "
1151 "alignment=%u, TC_compatible = %u\n",
1152 tex
->htile_offset
, tex
->surface
.htile_size
,
1153 tex
->surface
.htile_alignment
,
1154 tex
->tc_compatible_htile
);
1156 if (tex
->dcc_offset
) {
1157 u_log_printf(log
, " DCC: offset=%"PRIu64
", size=%u, alignment=%u\n",
1158 tex
->dcc_offset
, tex
->surface
.dcc_size
,
1159 tex
->surface
.dcc_alignment
);
1160 for (i
= 0; i
<= tex
->buffer
.b
.b
.last_level
; i
++)
1161 u_log_printf(log
, " DCCLevel[%i]: enabled=%u, offset=%u, "
1162 "fast_clear_size=%u\n",
1163 i
, i
< tex
->surface
.num_dcc_levels
,
1164 tex
->surface
.u
.legacy
.level
[i
].dcc_offset
,
1165 tex
->surface
.u
.legacy
.level
[i
].dcc_fast_clear_size
);
1168 for (i
= 0; i
<= tex
->buffer
.b
.b
.last_level
; i
++)
1169 u_log_printf(log
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
1170 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1171 "mode=%u, tiling_index = %u\n",
1172 i
, tex
->surface
.u
.legacy
.level
[i
].offset
,
1173 (uint64_t)tex
->surface
.u
.legacy
.level
[i
].slice_size_dw
* 4,
1174 u_minify(tex
->buffer
.b
.b
.width0
, i
),
1175 u_minify(tex
->buffer
.b
.b
.height0
, i
),
1176 u_minify(tex
->buffer
.b
.b
.depth0
, i
),
1177 tex
->surface
.u
.legacy
.level
[i
].nblk_x
,
1178 tex
->surface
.u
.legacy
.level
[i
].nblk_y
,
1179 tex
->surface
.u
.legacy
.level
[i
].mode
,
1180 tex
->surface
.u
.legacy
.tiling_index
[i
]);
1182 if (tex
->surface
.has_stencil
) {
1183 u_log_printf(log
, " StencilLayout: tilesplit=%u\n",
1184 tex
->surface
.u
.legacy
.stencil_tile_split
);
1185 for (i
= 0; i
<= tex
->buffer
.b
.b
.last_level
; i
++) {
1186 u_log_printf(log
, " StencilLevel[%i]: offset=%"PRIu64
", "
1187 "slice_size=%"PRIu64
", npix_x=%u, "
1188 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1189 "mode=%u, tiling_index = %u\n",
1190 i
, tex
->surface
.u
.legacy
.stencil_level
[i
].offset
,
1191 (uint64_t)tex
->surface
.u
.legacy
.stencil_level
[i
].slice_size_dw
* 4,
1192 u_minify(tex
->buffer
.b
.b
.width0
, i
),
1193 u_minify(tex
->buffer
.b
.b
.height0
, i
),
1194 u_minify(tex
->buffer
.b
.b
.depth0
, i
),
1195 tex
->surface
.u
.legacy
.stencil_level
[i
].nblk_x
,
1196 tex
->surface
.u
.legacy
.stencil_level
[i
].nblk_y
,
1197 tex
->surface
.u
.legacy
.stencil_level
[i
].mode
,
1198 tex
->surface
.u
.legacy
.stencil_tiling_index
[i
]);
1203 /* Common processing for si_texture_create and si_texture_from_handle */
1204 static struct si_texture
*
1205 si_texture_create_object(struct pipe_screen
*screen
,
1206 const struct pipe_resource
*base
,
1207 struct pb_buffer
*buf
,
1208 struct radeon_surf
*surface
)
1210 struct si_texture
*tex
;
1211 struct si_resource
*resource
;
1212 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1214 tex
= CALLOC_STRUCT(si_texture
);
1218 resource
= &tex
->buffer
;
1219 resource
->b
.b
= *base
;
1220 resource
->b
.b
.next
= NULL
;
1221 resource
->b
.vtbl
= &si_texture_vtbl
;
1222 pipe_reference_init(&resource
->b
.b
.reference
, 1);
1223 resource
->b
.b
.screen
= screen
;
1225 /* don't include stencil-only formats which we don't support for rendering */
1226 tex
->is_depth
= util_format_has_depth(util_format_description(tex
->buffer
.b
.b
.format
));
1228 tex
->surface
= *surface
;
1229 tex
->size
= tex
->surface
.surf_size
;
1231 tex
->tc_compatible_htile
= tex
->surface
.htile_size
!= 0 &&
1232 (tex
->surface
.flags
&
1233 RADEON_SURF_TC_COMPATIBLE_HTILE
);
1235 /* TC-compatible HTILE:
1236 * - GFX8 only supports Z32_FLOAT.
1237 * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
1238 if (tex
->tc_compatible_htile
) {
1239 if (sscreen
->info
.chip_class
>= GFX9
&&
1240 base
->format
== PIPE_FORMAT_Z16_UNORM
)
1241 tex
->db_render_format
= base
->format
;
1243 tex
->db_render_format
= PIPE_FORMAT_Z32_FLOAT
;
1244 tex
->upgraded_depth
= base
->format
!= PIPE_FORMAT_Z32_FLOAT
&&
1245 base
->format
!= PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
;
1248 tex
->db_render_format
= base
->format
;
1251 /* Applies to GCN. */
1252 tex
->last_msaa_resolve_target_micro_mode
= tex
->surface
.micro_tile_mode
;
1254 /* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
1255 * between frames, so the only thing that can enable separate DCC
1256 * with DRI2 is multiple slow clears within a frame.
1258 tex
->ps_draw_ratio
= 0;
1260 if (tex
->is_depth
) {
1261 if (sscreen
->info
.chip_class
>= GFX9
) {
1262 tex
->can_sample_z
= true;
1263 tex
->can_sample_s
= true;
1265 tex
->can_sample_z
= !tex
->surface
.u
.legacy
.depth_adjusted
;
1266 tex
->can_sample_s
= !tex
->surface
.u
.legacy
.stencil_adjusted
;
1269 if (!(base
->flags
& (SI_RESOURCE_FLAG_TRANSFER
|
1270 SI_RESOURCE_FLAG_FLUSHED_DEPTH
))) {
1271 tex
->db_compatible
= true;
1273 if (!(sscreen
->debug_flags
& DBG(NO_HYPERZ
)))
1274 si_texture_allocate_htile(sscreen
, tex
);
1277 if (base
->nr_samples
> 1 &&
1279 !(sscreen
->debug_flags
& DBG(NO_FMASK
))) {
1280 /* Allocate FMASK. */
1281 tex
->fmask_offset
= align64(tex
->size
,
1282 tex
->surface
.fmask_alignment
);
1283 tex
->size
= tex
->fmask_offset
+ tex
->surface
.fmask_size
;
1285 /* Allocate CMASK. */
1286 tex
->cmask_offset
= align64(tex
->size
, tex
->surface
.cmask_alignment
);
1287 tex
->size
= tex
->cmask_offset
+ tex
->surface
.cmask_size
;
1288 tex
->cb_color_info
|= S_028C70_FAST_CLEAR(1);
1289 tex
->cmask_buffer
= &tex
->buffer
;
1291 if (!tex
->surface
.fmask_size
|| !tex
->surface
.cmask_size
)
1295 /* Shared textures must always set up DCC here.
1296 * If it's not present, it will be disabled by
1297 * apply_opaque_metadata later.
1299 if (tex
->surface
.dcc_size
&&
1300 (buf
|| !(sscreen
->debug_flags
& DBG(NO_DCC
))) &&
1301 (sscreen
->info
.use_display_dcc_unaligned
||
1302 sscreen
->info
.use_display_dcc_with_retile_blit
||
1303 !(tex
->surface
.flags
& RADEON_SURF_SCANOUT
))) {
1304 /* Add space for the DCC buffer. */
1305 tex
->dcc_offset
= align64(tex
->size
, tex
->surface
.dcc_alignment
);
1306 tex
->size
= tex
->dcc_offset
+ tex
->surface
.dcc_size
;
1308 if (sscreen
->info
.chip_class
>= GFX9
&&
1309 tex
->surface
.u
.gfx9
.dcc_retile_num_elements
) {
1310 /* Add space for the displayable DCC buffer. */
1311 tex
->display_dcc_offset
=
1312 align64(tex
->size
, tex
->surface
.u
.gfx9
.display_dcc_alignment
);
1313 tex
->size
= tex
->display_dcc_offset
+
1314 tex
->surface
.u
.gfx9
.display_dcc_size
;
1316 /* Add space for the DCC retile buffer. (16-bit or 32-bit elements) */
1317 tex
->dcc_retile_map_offset
=
1318 align64(tex
->size
, sscreen
->info
.tcc_cache_line_size
);
1320 if (tex
->surface
.u
.gfx9
.dcc_retile_use_uint16
) {
1321 tex
->size
= tex
->dcc_retile_map_offset
+
1322 tex
->surface
.u
.gfx9
.dcc_retile_num_elements
* 2;
1324 tex
->size
= tex
->dcc_retile_map_offset
+
1325 tex
->surface
.u
.gfx9
.dcc_retile_num_elements
* 4;
1331 /* Now create the backing buffer. */
1333 si_init_resource_fields(sscreen
, resource
, tex
->size
,
1334 tex
->surface
.surf_alignment
);
1336 if (!si_alloc_resource(sscreen
, resource
))
1339 resource
->buf
= buf
;
1340 resource
->gpu_address
= sscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
1341 resource
->bo_size
= buf
->size
;
1342 resource
->bo_alignment
= buf
->alignment
;
1343 resource
->domains
= sscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
1344 if (resource
->domains
& RADEON_DOMAIN_VRAM
)
1345 resource
->vram_usage
= buf
->size
;
1346 else if (resource
->domains
& RADEON_DOMAIN_GTT
)
1347 resource
->gart_usage
= buf
->size
;
1350 if (tex
->cmask_buffer
) {
1351 /* Initialize the cmask to 0xCC (= compressed state). */
1352 si_screen_clear_buffer(sscreen
, &tex
->cmask_buffer
->b
.b
,
1353 tex
->cmask_offset
, tex
->surface
.cmask_size
,
1356 if (tex
->htile_offset
) {
1357 uint32_t clear_value
= 0;
1359 if (sscreen
->info
.chip_class
>= GFX9
|| tex
->tc_compatible_htile
)
1360 clear_value
= 0x0000030F;
1362 si_screen_clear_buffer(sscreen
, &tex
->buffer
.b
.b
,
1364 tex
->surface
.htile_size
,
1368 /* Initialize DCC only if the texture is not being imported. */
1369 if (!buf
&& tex
->dcc_offset
) {
1370 /* Clear DCC to black for all tiles with DCC enabled.
1372 * This fixes corruption in 3DMark Slingshot Extreme, which
1373 * uses uninitialized textures, causing corruption.
1375 if (tex
->surface
.num_dcc_levels
== tex
->buffer
.b
.b
.last_level
+ 1 &&
1376 tex
->buffer
.b
.b
.nr_samples
<= 2) {
1377 /* Simple case - all tiles have DCC enabled. */
1378 si_screen_clear_buffer(sscreen
, &tex
->buffer
.b
.b
,
1380 tex
->surface
.dcc_size
,
1381 DCC_CLEAR_COLOR_0000
);
1382 } else if (sscreen
->info
.chip_class
>= GFX9
) {
1383 /* Clear to uncompressed. Clearing this to black is complicated. */
1384 si_screen_clear_buffer(sscreen
, &tex
->buffer
.b
.b
,
1386 tex
->surface
.dcc_size
,
1389 /* GFX8: Initialize mipmap levels and multisamples separately. */
1390 if (tex
->buffer
.b
.b
.nr_samples
>= 2) {
1391 /* Clearing this to black is complicated. */
1392 si_screen_clear_buffer(sscreen
, &tex
->buffer
.b
.b
,
1394 tex
->surface
.dcc_size
,
1397 /* Clear the enabled mipmap levels to black. */
1400 for (unsigned i
= 0; i
< tex
->surface
.num_dcc_levels
; i
++) {
1401 if (!tex
->surface
.u
.legacy
.level
[i
].dcc_fast_clear_size
)
1404 size
= tex
->surface
.u
.legacy
.level
[i
].dcc_offset
+
1405 tex
->surface
.u
.legacy
.level
[i
].dcc_fast_clear_size
;
1408 /* Mipmap levels with DCC. */
1410 si_screen_clear_buffer(sscreen
, &tex
->buffer
.b
.b
,
1411 tex
->dcc_offset
, size
,
1412 DCC_CLEAR_COLOR_0000
);
1414 /* Mipmap levels without DCC. */
1415 if (size
!= tex
->surface
.dcc_size
) {
1416 si_screen_clear_buffer(sscreen
, &tex
->buffer
.b
.b
,
1417 tex
->dcc_offset
+ size
,
1418 tex
->surface
.dcc_size
- size
,
1424 /* Upload the DCC retile map. */
1425 if (tex
->dcc_retile_map_offset
) {
1426 /* Use a staging buffer for the upload, because
1427 * the buffer backing the texture is unmappable.
1429 bool use_uint16
= tex
->surface
.u
.gfx9
.dcc_retile_use_uint16
;
1430 unsigned num_elements
= tex
->surface
.u
.gfx9
.dcc_retile_num_elements
;
1431 struct si_resource
*buf
=
1432 si_aligned_buffer_create(screen
, 0, PIPE_USAGE_STREAM
,
1433 num_elements
* (use_uint16
? 2 : 4),
1434 sscreen
->info
.tcc_cache_line_size
);
1435 uint32_t *ui
= (uint32_t*)sscreen
->ws
->buffer_map(buf
->buf
, NULL
,
1436 PIPE_TRANSFER_WRITE
);
1437 uint16_t *us
= (uint16_t*)ui
;
1439 /* Upload the retile map into a staging buffer. */
1441 for (unsigned i
= 0; i
< num_elements
; i
++)
1442 us
[i
] = tex
->surface
.u
.gfx9
.dcc_retile_map
[i
];
1444 for (unsigned i
= 0; i
< num_elements
; i
++)
1445 ui
[i
] = tex
->surface
.u
.gfx9
.dcc_retile_map
[i
];
1448 /* Copy the staging buffer to the buffer backing the texture. */
1449 struct si_context
*sctx
= (struct si_context
*)sscreen
->aux_context
;
1450 struct pipe_box box
;
1451 u_box_1d(0, buf
->b
.b
.width0
, &box
);
1453 assert(tex
->dcc_retile_map_offset
<= UINT_MAX
);
1454 mtx_lock(&sscreen
->aux_context_lock
);
1455 sctx
->dma_copy(&sctx
->b
, &tex
->buffer
.b
.b
, 0,
1456 tex
->dcc_retile_map_offset
, 0, 0,
1457 &buf
->b
.b
, 0, &box
);
1458 sscreen
->aux_context
->flush(sscreen
->aux_context
, NULL
, 0);
1459 mtx_unlock(&sscreen
->aux_context_lock
);
1461 si_resource_reference(&buf
, NULL
);
1465 /* Initialize the CMASK base register value. */
1466 tex
->cmask_base_address_reg
=
1467 (tex
->buffer
.gpu_address
+ tex
->cmask_offset
) >> 8;
1469 if (sscreen
->debug_flags
& DBG(VM
)) {
1470 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1471 tex
->buffer
.gpu_address
,
1472 tex
->buffer
.gpu_address
+ tex
->buffer
.buf
->size
,
1473 base
->width0
, base
->height0
, util_num_layers(base
, 0), base
->last_level
+1,
1474 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
1477 if (sscreen
->debug_flags
& DBG(TEX
)) {
1479 struct u_log_context log
;
1480 u_log_context_init(&log
);
1481 si_print_texture_info(sscreen
, tex
, &log
);
1482 u_log_new_page_print(&log
, stdout
);
1484 u_log_context_destroy(&log
);
1491 if (sscreen
->info
.chip_class
>= GFX9
)
1492 free(surface
->u
.gfx9
.dcc_retile_map
);
1496 static enum radeon_surf_mode
1497 si_choose_tiling(struct si_screen
*sscreen
,
1498 const struct pipe_resource
*templ
, bool tc_compatible_htile
)
1500 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1501 bool force_tiling
= templ
->flags
& SI_RESOURCE_FLAG_FORCE_MSAA_TILING
;
1502 bool is_depth_stencil
= util_format_is_depth_or_stencil(templ
->format
) &&
1503 !(templ
->flags
& SI_RESOURCE_FLAG_FLUSHED_DEPTH
);
1505 /* MSAA resources must be 2D tiled. */
1506 if (templ
->nr_samples
> 1)
1507 return RADEON_SURF_MODE_2D
;
1509 /* Transfer resources should be linear. */
1510 if (templ
->flags
& SI_RESOURCE_FLAG_TRANSFER
)
1511 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1513 /* Avoid Z/S decompress blits by forcing TC-compatible HTILE on GFX8,
1514 * which requires 2D tiling.
1516 if (sscreen
->info
.chip_class
== GFX8
&& tc_compatible_htile
)
1517 return RADEON_SURF_MODE_2D
;
1519 /* Handle common candidates for the linear mode.
1520 * Compressed textures and DB surfaces must always be tiled.
1522 if (!force_tiling
&&
1523 !is_depth_stencil
&&
1524 !util_format_is_compressed(templ
->format
)) {
1525 if (sscreen
->debug_flags
& DBG(NO_TILING
))
1526 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1528 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats. */
1529 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1530 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1532 /* Cursors are linear on AMD GCN.
1533 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1534 if (templ
->bind
& PIPE_BIND_CURSOR
)
1535 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1537 if (templ
->bind
& PIPE_BIND_LINEAR
)
1538 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1540 /* Textures with a very small height are recommended to be linear. */
1541 if (templ
->target
== PIPE_TEXTURE_1D
||
1542 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
1543 /* Only very thin and long 2D textures should benefit from
1544 * linear_aligned. */
1545 (templ
->width0
> 8 && templ
->height0
<= 2))
1546 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1548 /* Textures likely to be mapped often. */
1549 if (templ
->usage
== PIPE_USAGE_STAGING
||
1550 templ
->usage
== PIPE_USAGE_STREAM
)
1551 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1554 /* Make small textures 1D tiled. */
1555 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1556 (sscreen
->debug_flags
& DBG(NO_2D_TILING
)))
1557 return RADEON_SURF_MODE_1D
;
1559 /* The allocator will switch to 1D if needed. */
1560 return RADEON_SURF_MODE_2D
;
1563 struct pipe_resource
*si_texture_create(struct pipe_screen
*screen
,
1564 const struct pipe_resource
*templ
)
1566 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1567 bool is_zs
= util_format_is_depth_or_stencil(templ
->format
);
1569 if (templ
->nr_samples
>= 2) {
1570 /* This is hackish (overwriting the const pipe_resource template),
1571 * but should be harmless and state trackers can also see
1572 * the overriden number of samples in the created pipe_resource.
1574 if (is_zs
&& sscreen
->eqaa_force_z_samples
) {
1575 ((struct pipe_resource
*)templ
)->nr_samples
=
1576 ((struct pipe_resource
*)templ
)->nr_storage_samples
=
1577 sscreen
->eqaa_force_z_samples
;
1578 } else if (!is_zs
&& sscreen
->eqaa_force_color_samples
) {
1579 ((struct pipe_resource
*)templ
)->nr_samples
=
1580 sscreen
->eqaa_force_coverage_samples
;
1581 ((struct pipe_resource
*)templ
)->nr_storage_samples
=
1582 sscreen
->eqaa_force_color_samples
;
1586 struct radeon_surf surface
= {0};
1587 bool is_flushed_depth
= templ
->flags
& SI_RESOURCE_FLAG_FLUSHED_DEPTH
;
1588 bool tc_compatible_htile
=
1589 sscreen
->info
.chip_class
>= GFX8
&&
1590 /* There are issues with TC-compatible HTILE on Tonga (and
1591 * Iceland is the same design), and documented bug workarounds
1592 * don't help. For example, this fails:
1593 * piglit/bin/tex-miplevel-selection 'texture()' 2DShadow -auto
1595 sscreen
->info
.family
!= CHIP_TONGA
&&
1596 sscreen
->info
.family
!= CHIP_ICELAND
&&
1597 (templ
->flags
& PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY
) &&
1598 !(sscreen
->debug_flags
& DBG(NO_HYPERZ
)) &&
1599 !is_flushed_depth
&&
1600 templ
->nr_samples
<= 1 && /* TC-compat HTILE is less efficient with MSAA */
1604 r
= si_init_surface(sscreen
, &surface
, templ
,
1605 si_choose_tiling(sscreen
, templ
, tc_compatible_htile
),
1606 0, 0, false, false, is_flushed_depth
,
1607 tc_compatible_htile
);
1612 return (struct pipe_resource
*)
1613 si_texture_create_object(screen
, templ
, NULL
, &surface
);
1616 static struct pipe_resource
*si_texture_from_winsys_buffer(struct si_screen
*sscreen
,
1617 const struct pipe_resource
*templ
,
1618 struct pb_buffer
*buf
,
1624 enum radeon_surf_mode array_mode
;
1625 struct radeon_surf surface
= {};
1626 struct radeon_bo_metadata metadata
= {};
1627 struct si_texture
*tex
;
1632 sscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1633 si_get_display_metadata(sscreen
, &surface
, &metadata
,
1634 &array_mode
, &is_scanout
);
1637 * The bo metadata is unset for un-dedicated images. So we fall
1638 * back to linear. See answer to question 5 of the
1639 * VK_KHX_external_memory spec for some details.
1641 * It is possible that this case isn't going to work if the
1642 * surface pitch isn't correctly aligned by default.
1644 * In order to support it correctly we require multi-image
1645 * metadata to be syncrhonized between radv and radeonsi. The
1646 * semantics of associating multiple image metadata to a memory
1647 * object on the vulkan export side are not concretely defined
1650 * All the use cases we are aware of at the moment for memory
1651 * objects use dedicated allocations. So lets keep the initial
1652 * implementation simple.
1654 * A possible alternative is to attempt to reconstruct the
1655 * tiling information when the TexParameter TEXTURE_TILING_EXT
1658 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
1662 r
= si_init_surface(sscreen
, &surface
, templ
,
1663 array_mode
, stride
, offset
, true, is_scanout
,
1668 tex
= si_texture_create_object(&sscreen
->b
, templ
, buf
, &surface
);
1672 tex
->buffer
.b
.is_shared
= true;
1673 tex
->buffer
.external_usage
= usage
;
1675 si_get_opaque_metadata(sscreen
, tex
, &metadata
);
1677 /* Displayable DCC requires an explicit flush. */
1679 !(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
1680 si_has_displayable_dcc(tex
)) {
1681 /* TODO: do we need to decompress DCC? */
1682 if (si_texture_discard_dcc(sscreen
, tex
)) {
1683 /* Update BO metadata after disabling DCC. */
1684 si_set_tex_bo_metadata(sscreen
, tex
);
1688 assert(tex
->surface
.tile_swizzle
== 0);
1689 return &tex
->buffer
.b
.b
;
1692 static struct pipe_resource
*si_texture_from_handle(struct pipe_screen
*screen
,
1693 const struct pipe_resource
*templ
,
1694 struct winsys_handle
*whandle
,
1697 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1698 struct pb_buffer
*buf
= NULL
;
1699 unsigned stride
= 0, offset
= 0;
1701 /* Support only 2D textures without mipmaps */
1702 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1703 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1706 buf
= sscreen
->ws
->buffer_from_handle(sscreen
->ws
, whandle
,
1707 sscreen
->info
.max_alignment
,
1712 return si_texture_from_winsys_buffer(sscreen
, templ
, buf
, stride
,
1713 offset
, usage
, true);
1716 bool si_init_flushed_depth_texture(struct pipe_context
*ctx
,
1717 struct pipe_resource
*texture
,
1718 struct si_texture
**staging
)
1720 struct si_texture
*tex
= (struct si_texture
*)texture
;
1721 struct pipe_resource resource
;
1722 struct si_texture
**flushed_depth_texture
= staging
?
1723 staging
: &tex
->flushed_depth_texture
;
1724 enum pipe_format pipe_format
= texture
->format
;
1727 if (tex
->flushed_depth_texture
)
1728 return true; /* it's ready */
1730 if (!tex
->can_sample_z
&& tex
->can_sample_s
) {
1731 switch (pipe_format
) {
1732 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
1733 /* Save memory by not allocating the S plane. */
1734 pipe_format
= PIPE_FORMAT_Z32_FLOAT
;
1736 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1737 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
1738 /* Save memory bandwidth by not copying the
1739 * stencil part during flush.
1741 * This potentially increases memory bandwidth
1742 * if an application uses both Z and S texturing
1743 * simultaneously (a flushed Z24S8 texture
1744 * would be stored compactly), but how often
1745 * does that really happen?
1747 pipe_format
= PIPE_FORMAT_Z24X8_UNORM
;
1751 } else if (!tex
->can_sample_s
&& tex
->can_sample_z
) {
1752 assert(util_format_has_stencil(util_format_description(pipe_format
)));
1754 /* DB->CB copies to an 8bpp surface don't work. */
1755 pipe_format
= PIPE_FORMAT_X24S8_UINT
;
1759 memset(&resource
, 0, sizeof(resource
));
1760 resource
.target
= texture
->target
;
1761 resource
.format
= pipe_format
;
1762 resource
.width0
= texture
->width0
;
1763 resource
.height0
= texture
->height0
;
1764 resource
.depth0
= texture
->depth0
;
1765 resource
.array_size
= texture
->array_size
;
1766 resource
.last_level
= texture
->last_level
;
1767 resource
.nr_samples
= texture
->nr_samples
;
1768 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1769 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1770 resource
.flags
= texture
->flags
| SI_RESOURCE_FLAG_FLUSHED_DEPTH
;
1773 resource
.flags
|= SI_RESOURCE_FLAG_TRANSFER
;
1775 *flushed_depth_texture
= (struct si_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1776 if (*flushed_depth_texture
== NULL
) {
1777 PRINT_ERR("failed to create temporary texture to hold flushed depth\n");
1784 * Initialize the pipe_resource descriptor to be of the same size as the box,
1785 * which is supposed to hold a subregion of the texture "orig" at the given
1788 static void si_init_temp_resource_from_box(struct pipe_resource
*res
,
1789 struct pipe_resource
*orig
,
1790 const struct pipe_box
*box
,
1791 unsigned level
, unsigned flags
)
1793 memset(res
, 0, sizeof(*res
));
1794 res
->format
= orig
->format
;
1795 res
->width0
= box
->width
;
1796 res
->height0
= box
->height
;
1798 res
->array_size
= 1;
1799 res
->usage
= flags
& SI_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1802 if (flags
& SI_RESOURCE_FLAG_TRANSFER
&&
1803 util_format_is_compressed(orig
->format
)) {
1804 /* Transfer resources are allocated with linear tiling, which is
1805 * not supported for compressed formats.
1807 unsigned blocksize
=
1808 util_format_get_blocksize(orig
->format
);
1810 if (blocksize
== 8) {
1811 res
->format
= PIPE_FORMAT_R16G16B16A16_UINT
;
1813 assert(blocksize
== 16);
1814 res
->format
= PIPE_FORMAT_R32G32B32A32_UINT
;
1817 res
->width0
= util_format_get_nblocksx(orig
->format
, box
->width
);
1818 res
->height0
= util_format_get_nblocksy(orig
->format
, box
->height
);
1821 /* We must set the correct texture target and dimensions for a 3D box. */
1822 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1823 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1824 res
->array_size
= box
->depth
;
1826 res
->target
= PIPE_TEXTURE_2D
;
1830 static bool si_can_invalidate_texture(struct si_screen
*sscreen
,
1831 struct si_texture
*tex
,
1832 unsigned transfer_usage
,
1833 const struct pipe_box
*box
)
1835 return !tex
->buffer
.b
.is_shared
&&
1836 !(transfer_usage
& PIPE_TRANSFER_READ
) &&
1837 tex
->buffer
.b
.b
.last_level
== 0 &&
1838 util_texrange_covers_whole_level(&tex
->buffer
.b
.b
, 0,
1839 box
->x
, box
->y
, box
->z
,
1840 box
->width
, box
->height
,
1844 static void si_texture_invalidate_storage(struct si_context
*sctx
,
1845 struct si_texture
*tex
)
1847 struct si_screen
*sscreen
= sctx
->screen
;
1849 /* There is no point in discarding depth and tiled buffers. */
1850 assert(!tex
->is_depth
);
1851 assert(tex
->surface
.is_linear
);
1853 /* Reallocate the buffer in the same pipe_resource. */
1854 si_alloc_resource(sscreen
, &tex
->buffer
);
1856 /* Initialize the CMASK base address (needed even without CMASK). */
1857 tex
->cmask_base_address_reg
=
1858 (tex
->buffer
.gpu_address
+ tex
->cmask_offset
) >> 8;
1860 p_atomic_inc(&sscreen
->dirty_tex_counter
);
1862 sctx
->num_alloc_tex_transfer_bytes
+= tex
->size
;
1865 static void *si_texture_transfer_map(struct pipe_context
*ctx
,
1866 struct pipe_resource
*texture
,
1869 const struct pipe_box
*box
,
1870 struct pipe_transfer
**ptransfer
)
1872 struct si_context
*sctx
= (struct si_context
*)ctx
;
1873 struct si_texture
*tex
= (struct si_texture
*)texture
;
1874 struct si_transfer
*trans
;
1875 struct si_resource
*buf
;
1876 unsigned offset
= 0;
1878 bool use_staging_texture
= false;
1880 assert(!(texture
->flags
& SI_RESOURCE_FLAG_TRANSFER
));
1881 assert(box
->width
&& box
->height
&& box
->depth
);
1883 /* Depth textures use staging unconditionally. */
1884 if (!tex
->is_depth
) {
1885 /* Degrade the tile mode if we get too many transfers on APUs.
1886 * On dGPUs, the staging texture is always faster.
1887 * Only count uploads that are at least 4x4 pixels large.
1889 if (!sctx
->screen
->info
.has_dedicated_vram
&&
1891 box
->width
>= 4 && box
->height
>= 4 &&
1892 p_atomic_inc_return(&tex
->num_level0_transfers
) == 10) {
1893 bool can_invalidate
=
1894 si_can_invalidate_texture(sctx
->screen
, tex
,
1897 si_reallocate_texture_inplace(sctx
, tex
,
1902 /* Tiled textures need to be converted into a linear texture for CPU
1903 * access. The staging texture is always linear and is placed in GART.
1905 * Reading from VRAM or GTT WC is slow, always use the staging
1906 * texture in this case.
1908 * Use the staging texture for uploads if the underlying BO
1911 if (!tex
->surface
.is_linear
)
1912 use_staging_texture
= true;
1913 else if (usage
& PIPE_TRANSFER_READ
)
1914 use_staging_texture
=
1915 tex
->buffer
.domains
& RADEON_DOMAIN_VRAM
||
1916 tex
->buffer
.flags
& RADEON_FLAG_GTT_WC
;
1917 /* Write & linear only: */
1918 else if (si_rings_is_buffer_referenced(sctx
, tex
->buffer
.buf
,
1919 RADEON_USAGE_READWRITE
) ||
1920 !sctx
->ws
->buffer_wait(tex
->buffer
.buf
, 0,
1921 RADEON_USAGE_READWRITE
)) {
1923 if (si_can_invalidate_texture(sctx
->screen
, tex
,
1925 si_texture_invalidate_storage(sctx
, tex
);
1927 use_staging_texture
= true;
1931 trans
= CALLOC_STRUCT(si_transfer
);
1934 pipe_resource_reference(&trans
->b
.b
.resource
, texture
);
1935 trans
->b
.b
.level
= level
;
1936 trans
->b
.b
.usage
= usage
;
1937 trans
->b
.b
.box
= *box
;
1939 if (tex
->is_depth
) {
1940 struct si_texture
*staging_depth
;
1942 if (tex
->buffer
.b
.b
.nr_samples
> 1) {
1943 /* MSAA depth buffers need to be converted to single sample buffers.
1945 * Mapping MSAA depth buffers can occur if ReadPixels is called
1946 * with a multisample GLX visual.
1948 * First downsample the depth buffer to a temporary texture,
1949 * then decompress the temporary one to staging.
1951 * Only the region being mapped is transfered.
1953 struct pipe_resource resource
;
1955 si_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1957 if (!si_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1958 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1962 if (usage
& PIPE_TRANSFER_READ
) {
1963 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1965 PRINT_ERR("failed to create a temporary depth texture\n");
1969 si_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1970 si_blit_decompress_depth(ctx
, (struct si_texture
*)temp
, staging_depth
,
1971 0, 0, 0, box
->depth
, 0, 0);
1972 pipe_resource_reference(&temp
, NULL
);
1975 /* Just get the strides. */
1976 si_texture_get_offset(sctx
->screen
, staging_depth
, level
, NULL
,
1978 &trans
->b
.b
.layer_stride
);
1980 /* XXX: only readback the rectangle which is being mapped? */
1981 /* XXX: when discard is true, no need to read back from depth texture */
1982 if (!si_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1983 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1987 si_blit_decompress_depth(ctx
, tex
, staging_depth
,
1989 box
->z
, box
->z
+ box
->depth
- 1,
1992 offset
= si_texture_get_offset(sctx
->screen
, staging_depth
,
1995 &trans
->b
.b
.layer_stride
);
1998 trans
->staging
= &staging_depth
->buffer
;
1999 buf
= trans
->staging
;
2000 } else if (use_staging_texture
) {
2001 struct pipe_resource resource
;
2002 struct si_texture
*staging
;
2004 si_init_temp_resource_from_box(&resource
, texture
, box
, level
,
2005 SI_RESOURCE_FLAG_TRANSFER
);
2006 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
2007 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
2009 /* Create the temporary texture. */
2010 staging
= (struct si_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
2012 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
2015 trans
->staging
= &staging
->buffer
;
2017 /* Just get the strides. */
2018 si_texture_get_offset(sctx
->screen
, staging
, 0, NULL
,
2020 &trans
->b
.b
.layer_stride
);
2022 if (usage
& PIPE_TRANSFER_READ
)
2023 si_copy_to_staging_texture(ctx
, trans
);
2025 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
2027 buf
= trans
->staging
;
2029 /* the resource is mapped directly */
2030 offset
= si_texture_get_offset(sctx
->screen
, tex
, level
, box
,
2032 &trans
->b
.b
.layer_stride
);
2036 /* Always unmap texture CPU mappings on 32-bit architectures, so that
2037 * we don't run out of the CPU address space.
2039 if (sizeof(void*) == 4)
2040 usage
|= RADEON_TRANSFER_TEMPORARY
;
2042 if (!(map
= si_buffer_map_sync_with_rings(sctx
, buf
, usage
)))
2045 *ptransfer
= &trans
->b
.b
;
2046 return map
+ offset
;
2049 si_resource_reference(&trans
->staging
, NULL
);
2050 pipe_resource_reference(&trans
->b
.b
.resource
, NULL
);
2055 static void si_texture_transfer_unmap(struct pipe_context
*ctx
,
2056 struct pipe_transfer
* transfer
)
2058 struct si_context
*sctx
= (struct si_context
*)ctx
;
2059 struct si_transfer
*stransfer
= (struct si_transfer
*)transfer
;
2060 struct pipe_resource
*texture
= transfer
->resource
;
2061 struct si_texture
*tex
= (struct si_texture
*)texture
;
2063 /* Always unmap texture CPU mappings on 32-bit architectures, so that
2064 * we don't run out of the CPU address space.
2066 if (sizeof(void*) == 4) {
2067 struct si_resource
*buf
=
2068 stransfer
->staging
? stransfer
->staging
: &tex
->buffer
;
2070 sctx
->ws
->buffer_unmap(buf
->buf
);
2073 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && stransfer
->staging
) {
2074 if (tex
->is_depth
&& tex
->buffer
.b
.b
.nr_samples
<= 1) {
2075 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
2076 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
2077 &stransfer
->staging
->b
.b
, transfer
->level
,
2080 si_copy_from_staging_texture(ctx
, stransfer
);
2084 if (stransfer
->staging
) {
2085 sctx
->num_alloc_tex_transfer_bytes
+= stransfer
->staging
->buf
->size
;
2086 si_resource_reference(&stransfer
->staging
, NULL
);
2089 /* Heuristic for {upload, draw, upload, draw, ..}:
2091 * Flush the gfx IB if we've allocated too much texture storage.
2093 * The idea is that we don't want to build IBs that use too much
2094 * memory and put pressure on the kernel memory manager and we also
2095 * want to make temporary and invalidated buffers go idle ASAP to
2096 * decrease the total memory usage or make them reusable. The memory
2097 * usage will be slightly higher than given here because of the buffer
2098 * cache in the winsys.
2100 * The result is that the kernel memory manager is never a bottleneck.
2102 if (sctx
->num_alloc_tex_transfer_bytes
> sctx
->screen
->info
.gart_size
/ 4) {
2103 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
2104 sctx
->num_alloc_tex_transfer_bytes
= 0;
2107 pipe_resource_reference(&transfer
->resource
, NULL
);
2111 static const struct u_resource_vtbl si_texture_vtbl
=
2113 NULL
, /* get_handle */
2114 si_texture_destroy
, /* resource_destroy */
2115 si_texture_transfer_map
, /* transfer_map */
2116 u_default_transfer_flush_region
, /* transfer_flush_region */
2117 si_texture_transfer_unmap
, /* transfer_unmap */
2120 /* Return if it's allowed to reinterpret one format as another with DCC enabled.
2122 bool vi_dcc_formats_compatible(enum pipe_format format1
,
2123 enum pipe_format format2
)
2125 const struct util_format_description
*desc1
, *desc2
;
2127 /* No format change - exit early. */
2128 if (format1
== format2
)
2131 format1
= si_simplify_cb_format(format1
);
2132 format2
= si_simplify_cb_format(format2
);
2134 /* Check again after format adjustments. */
2135 if (format1
== format2
)
2138 desc1
= util_format_description(format1
);
2139 desc2
= util_format_description(format2
);
2141 if (desc1
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
2142 desc2
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
2145 /* Float and non-float are totally incompatible. */
2146 if ((desc1
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
) !=
2147 (desc2
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
))
2150 /* Channel sizes must match across DCC formats.
2151 * Comparing just the first 2 channels should be enough.
2153 if (desc1
->channel
[0].size
!= desc2
->channel
[0].size
||
2154 (desc1
->nr_channels
>= 2 &&
2155 desc1
->channel
[1].size
!= desc2
->channel
[1].size
))
2158 /* Everything below is not needed if the driver never uses the DCC
2159 * clear code with the value of 1.
2162 /* If the clear values are all 1 or all 0, this constraint can be
2164 if (vi_alpha_is_on_msb(format1
) != vi_alpha_is_on_msb(format2
))
2167 /* Channel types must match if the clear value of 1 is used.
2168 * The type categories are only float, signed, unsigned.
2169 * NORM and INT are always compatible.
2171 if (desc1
->channel
[0].type
!= desc2
->channel
[0].type
||
2172 (desc1
->nr_channels
>= 2 &&
2173 desc1
->channel
[1].type
!= desc2
->channel
[1].type
))
2179 bool vi_dcc_formats_are_incompatible(struct pipe_resource
*tex
,
2181 enum pipe_format view_format
)
2183 struct si_texture
*stex
= (struct si_texture
*)tex
;
2185 return vi_dcc_enabled(stex
, level
) &&
2186 !vi_dcc_formats_compatible(tex
->format
, view_format
);
2189 /* This can't be merged with the above function, because
2190 * vi_dcc_formats_compatible should be called only when DCC is enabled. */
2191 void vi_disable_dcc_if_incompatible_format(struct si_context
*sctx
,
2192 struct pipe_resource
*tex
,
2194 enum pipe_format view_format
)
2196 struct si_texture
*stex
= (struct si_texture
*)tex
;
2198 if (vi_dcc_formats_are_incompatible(tex
, level
, view_format
))
2199 if (!si_texture_disable_dcc(sctx
, stex
))
2200 si_decompress_dcc(sctx
, stex
);
2203 struct pipe_surface
*si_create_surface_custom(struct pipe_context
*pipe
,
2204 struct pipe_resource
*texture
,
2205 const struct pipe_surface
*templ
,
2206 unsigned width0
, unsigned height0
,
2207 unsigned width
, unsigned height
)
2209 struct si_surface
*surface
= CALLOC_STRUCT(si_surface
);
2214 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
2215 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
2217 pipe_reference_init(&surface
->base
.reference
, 1);
2218 pipe_resource_reference(&surface
->base
.texture
, texture
);
2219 surface
->base
.context
= pipe
;
2220 surface
->base
.format
= templ
->format
;
2221 surface
->base
.width
= width
;
2222 surface
->base
.height
= height
;
2223 surface
->base
.u
= templ
->u
;
2225 surface
->width0
= width0
;
2226 surface
->height0
= height0
;
2228 surface
->dcc_incompatible
=
2229 texture
->target
!= PIPE_BUFFER
&&
2230 vi_dcc_formats_are_incompatible(texture
, templ
->u
.tex
.level
,
2232 return &surface
->base
;
2235 static struct pipe_surface
*si_create_surface(struct pipe_context
*pipe
,
2236 struct pipe_resource
*tex
,
2237 const struct pipe_surface
*templ
)
2239 unsigned level
= templ
->u
.tex
.level
;
2240 unsigned width
= u_minify(tex
->width0
, level
);
2241 unsigned height
= u_minify(tex
->height0
, level
);
2242 unsigned width0
= tex
->width0
;
2243 unsigned height0
= tex
->height0
;
2245 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
2246 const struct util_format_description
*tex_desc
2247 = util_format_description(tex
->format
);
2248 const struct util_format_description
*templ_desc
2249 = util_format_description(templ
->format
);
2251 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
2253 /* Adjust size of surface if and only if the block width or
2254 * height is changed. */
2255 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
2256 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
2257 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
2258 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
2260 width
= nblks_x
* templ_desc
->block
.width
;
2261 height
= nblks_y
* templ_desc
->block
.height
;
2263 width0
= util_format_get_nblocksx(tex
->format
, width0
);
2264 height0
= util_format_get_nblocksy(tex
->format
, height0
);
2268 return si_create_surface_custom(pipe
, tex
, templ
,
2273 static void si_surface_destroy(struct pipe_context
*pipe
,
2274 struct pipe_surface
*surface
)
2276 pipe_resource_reference(&surface
->texture
, NULL
);
2280 unsigned si_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
2282 const struct util_format_description
*desc
= util_format_description(format
);
2284 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
2286 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
2287 return V_028C70_SWAP_STD
;
2289 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
2292 switch (desc
->nr_channels
) {
2294 if (HAS_SWIZZLE(0,X
))
2295 return V_028C70_SWAP_STD
; /* X___ */
2296 else if (HAS_SWIZZLE(3,X
))
2297 return V_028C70_SWAP_ALT_REV
; /* ___X */
2300 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
2301 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
2302 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
2303 return V_028C70_SWAP_STD
; /* XY__ */
2304 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
2305 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
2306 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
2308 return (do_endian_swap
? V_028C70_SWAP_STD
: V_028C70_SWAP_STD_REV
);
2309 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
2310 return V_028C70_SWAP_ALT
; /* X__Y */
2311 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
2312 return V_028C70_SWAP_ALT_REV
; /* Y__X */
2315 if (HAS_SWIZZLE(0,X
))
2316 return (do_endian_swap
? V_028C70_SWAP_STD_REV
: V_028C70_SWAP_STD
);
2317 else if (HAS_SWIZZLE(0,Z
))
2318 return V_028C70_SWAP_STD_REV
; /* ZYX */
2321 /* check the middle channels, the 1st and 4th channel can be NONE */
2322 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
2323 return V_028C70_SWAP_STD
; /* XYZW */
2324 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
2325 return V_028C70_SWAP_STD_REV
; /* WZYX */
2326 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
2327 return V_028C70_SWAP_ALT
; /* ZYXW */
2328 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
2331 return V_028C70_SWAP_ALT_REV
;
2333 return (do_endian_swap
? V_028C70_SWAP_ALT
: V_028C70_SWAP_ALT_REV
);
2340 /* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
2342 static void vi_dcc_clean_up_context_slot(struct si_context
*sctx
,
2347 if (sctx
->dcc_stats
[slot
].query_active
)
2348 vi_separate_dcc_stop_query(sctx
,
2349 sctx
->dcc_stats
[slot
].tex
);
2351 for (i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
[slot
].ps_stats
); i
++)
2352 if (sctx
->dcc_stats
[slot
].ps_stats
[i
]) {
2353 sctx
->b
.destroy_query(&sctx
->b
,
2354 sctx
->dcc_stats
[slot
].ps_stats
[i
]);
2355 sctx
->dcc_stats
[slot
].ps_stats
[i
] = NULL
;
2358 si_texture_reference(&sctx
->dcc_stats
[slot
].tex
, NULL
);
2362 * Return the per-context slot where DCC statistics queries for the texture live.
2364 static unsigned vi_get_context_dcc_stats_index(struct si_context
*sctx
,
2365 struct si_texture
*tex
)
2367 int i
, empty_slot
= -1;
2369 /* Remove zombie textures (textures kept alive by this array only). */
2370 for (i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++)
2371 if (sctx
->dcc_stats
[i
].tex
&&
2372 sctx
->dcc_stats
[i
].tex
->buffer
.b
.b
.reference
.count
== 1)
2373 vi_dcc_clean_up_context_slot(sctx
, i
);
2375 /* Find the texture. */
2376 for (i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++) {
2377 /* Return if found. */
2378 if (sctx
->dcc_stats
[i
].tex
== tex
) {
2379 sctx
->dcc_stats
[i
].last_use_timestamp
= os_time_get();
2383 /* Record the first seen empty slot. */
2384 if (empty_slot
== -1 && !sctx
->dcc_stats
[i
].tex
)
2388 /* Not found. Remove the oldest member to make space in the array. */
2389 if (empty_slot
== -1) {
2390 int oldest_slot
= 0;
2392 /* Find the oldest slot. */
2393 for (i
= 1; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++)
2394 if (sctx
->dcc_stats
[oldest_slot
].last_use_timestamp
>
2395 sctx
->dcc_stats
[i
].last_use_timestamp
)
2398 /* Clean up the oldest slot. */
2399 vi_dcc_clean_up_context_slot(sctx
, oldest_slot
);
2400 empty_slot
= oldest_slot
;
2403 /* Add the texture to the new slot. */
2404 si_texture_reference(&sctx
->dcc_stats
[empty_slot
].tex
, tex
);
2405 sctx
->dcc_stats
[empty_slot
].last_use_timestamp
= os_time_get();
2409 static struct pipe_query
*
2410 vi_create_resuming_pipestats_query(struct si_context
*sctx
)
2412 struct si_query_hw
*query
= (struct si_query_hw
*)
2413 sctx
->b
.create_query(&sctx
->b
, PIPE_QUERY_PIPELINE_STATISTICS
, 0);
2415 query
->flags
|= SI_QUERY_HW_FLAG_BEGIN_RESUMES
;
2416 return (struct pipe_query
*)query
;
2420 * Called when binding a color buffer.
2422 void vi_separate_dcc_start_query(struct si_context
*sctx
,
2423 struct si_texture
*tex
)
2425 unsigned i
= vi_get_context_dcc_stats_index(sctx
, tex
);
2427 assert(!sctx
->dcc_stats
[i
].query_active
);
2429 if (!sctx
->dcc_stats
[i
].ps_stats
[0])
2430 sctx
->dcc_stats
[i
].ps_stats
[0] = vi_create_resuming_pipestats_query(sctx
);
2432 /* begin or resume the query */
2433 sctx
->b
.begin_query(&sctx
->b
, sctx
->dcc_stats
[i
].ps_stats
[0]);
2434 sctx
->dcc_stats
[i
].query_active
= true;
2438 * Called when unbinding a color buffer.
2440 void vi_separate_dcc_stop_query(struct si_context
*sctx
,
2441 struct si_texture
*tex
)
2443 unsigned i
= vi_get_context_dcc_stats_index(sctx
, tex
);
2445 assert(sctx
->dcc_stats
[i
].query_active
);
2446 assert(sctx
->dcc_stats
[i
].ps_stats
[0]);
2448 /* pause or end the query */
2449 sctx
->b
.end_query(&sctx
->b
, sctx
->dcc_stats
[i
].ps_stats
[0]);
2450 sctx
->dcc_stats
[i
].query_active
= false;
2453 static bool vi_should_enable_separate_dcc(struct si_texture
*tex
)
2455 /* The minimum number of fullscreen draws per frame that is required
2457 return tex
->ps_draw_ratio
+ tex
->num_slow_clears
>= 5;
2460 /* Called by fast clear. */
2461 void vi_separate_dcc_try_enable(struct si_context
*sctx
,
2462 struct si_texture
*tex
)
2464 /* The intent is to use this with shared displayable back buffers,
2465 * but it's not strictly limited only to them.
2467 if (!tex
->buffer
.b
.is_shared
||
2468 !(tex
->buffer
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) ||
2469 tex
->buffer
.b
.b
.target
!= PIPE_TEXTURE_2D
||
2470 tex
->buffer
.b
.b
.last_level
> 0 ||
2471 !tex
->surface
.dcc_size
||
2472 sctx
->screen
->debug_flags
& DBG(NO_DCC
) ||
2473 sctx
->screen
->debug_flags
& DBG(NO_DCC_FB
))
2476 assert(sctx
->chip_class
>= GFX8
);
2478 if (tex
->dcc_offset
)
2479 return; /* already enabled */
2481 /* Enable the DCC stat gathering. */
2482 if (!tex
->dcc_gather_statistics
) {
2483 tex
->dcc_gather_statistics
= true;
2484 vi_separate_dcc_start_query(sctx
, tex
);
2487 if (!vi_should_enable_separate_dcc(tex
))
2488 return; /* stats show that DCC decompression is too expensive */
2490 assert(tex
->surface
.num_dcc_levels
);
2491 assert(!tex
->dcc_separate_buffer
);
2493 si_texture_discard_cmask(sctx
->screen
, tex
);
2495 /* Get a DCC buffer. */
2496 if (tex
->last_dcc_separate_buffer
) {
2497 assert(tex
->dcc_gather_statistics
);
2498 assert(!tex
->dcc_separate_buffer
);
2499 tex
->dcc_separate_buffer
= tex
->last_dcc_separate_buffer
;
2500 tex
->last_dcc_separate_buffer
= NULL
;
2502 tex
->dcc_separate_buffer
=
2503 si_aligned_buffer_create(sctx
->b
.screen
,
2504 SI_RESOURCE_FLAG_UNMAPPABLE
,
2506 tex
->surface
.dcc_size
,
2507 tex
->surface
.dcc_alignment
);
2508 if (!tex
->dcc_separate_buffer
)
2512 /* dcc_offset is the absolute GPUVM address. */
2513 tex
->dcc_offset
= tex
->dcc_separate_buffer
->gpu_address
;
2515 /* no need to flag anything since this is called by fast clear that
2516 * flags framebuffer state
2521 * Called by pipe_context::flush_resource, the place where DCC decompression
2524 void vi_separate_dcc_process_and_reset_stats(struct pipe_context
*ctx
,
2525 struct si_texture
*tex
)
2527 struct si_context
*sctx
= (struct si_context
*)ctx
;
2528 struct pipe_query
*tmp
;
2529 unsigned i
= vi_get_context_dcc_stats_index(sctx
, tex
);
2530 bool query_active
= sctx
->dcc_stats
[i
].query_active
;
2531 bool disable
= false;
2533 if (sctx
->dcc_stats
[i
].ps_stats
[2]) {
2534 union pipe_query_result result
;
2536 /* Read the results. */
2537 struct pipe_query
*query
= sctx
->dcc_stats
[i
].ps_stats
[2];
2538 ctx
->get_query_result(ctx
, query
,
2540 si_query_buffer_reset(sctx
, &((struct si_query_hw
*)query
)->buffer
);
2542 /* Compute the approximate number of fullscreen draws. */
2543 tex
->ps_draw_ratio
=
2544 result
.pipeline_statistics
.ps_invocations
/
2545 (tex
->buffer
.b
.b
.width0
* tex
->buffer
.b
.b
.height0
);
2546 sctx
->last_tex_ps_draw_ratio
= tex
->ps_draw_ratio
;
2548 disable
= tex
->dcc_separate_buffer
&&
2549 !vi_should_enable_separate_dcc(tex
);
2552 tex
->num_slow_clears
= 0;
2554 /* stop the statistics query for ps_stats[0] */
2556 vi_separate_dcc_stop_query(sctx
, tex
);
2558 /* Move the queries in the queue by one. */
2559 tmp
= sctx
->dcc_stats
[i
].ps_stats
[2];
2560 sctx
->dcc_stats
[i
].ps_stats
[2] = sctx
->dcc_stats
[i
].ps_stats
[1];
2561 sctx
->dcc_stats
[i
].ps_stats
[1] = sctx
->dcc_stats
[i
].ps_stats
[0];
2562 sctx
->dcc_stats
[i
].ps_stats
[0] = tmp
;
2564 /* create and start a new query as ps_stats[0] */
2566 vi_separate_dcc_start_query(sctx
, tex
);
2569 assert(!tex
->last_dcc_separate_buffer
);
2570 tex
->last_dcc_separate_buffer
= tex
->dcc_separate_buffer
;
2571 tex
->dcc_separate_buffer
= NULL
;
2572 tex
->dcc_offset
= 0;
2573 /* no need to flag anything since this is called after
2574 * decompression that re-sets framebuffer state
2579 static struct pipe_memory_object
*
2580 si_memobj_from_handle(struct pipe_screen
*screen
,
2581 struct winsys_handle
*whandle
,
2584 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
2585 struct si_memory_object
*memobj
= CALLOC_STRUCT(si_memory_object
);
2586 struct pb_buffer
*buf
= NULL
;
2587 uint32_t stride
, offset
;
2592 buf
= sscreen
->ws
->buffer_from_handle(sscreen
->ws
, whandle
,
2593 sscreen
->info
.max_alignment
,
2600 memobj
->b
.dedicated
= dedicated
;
2602 memobj
->stride
= stride
;
2604 return (struct pipe_memory_object
*)memobj
;
2609 si_memobj_destroy(struct pipe_screen
*screen
,
2610 struct pipe_memory_object
*_memobj
)
2612 struct si_memory_object
*memobj
= (struct si_memory_object
*)_memobj
;
2614 pb_reference(&memobj
->buf
, NULL
);
2618 static struct pipe_resource
*
2619 si_texture_from_memobj(struct pipe_screen
*screen
,
2620 const struct pipe_resource
*templ
,
2621 struct pipe_memory_object
*_memobj
,
2624 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
2625 struct si_memory_object
*memobj
= (struct si_memory_object
*)_memobj
;
2626 struct pipe_resource
*tex
=
2627 si_texture_from_winsys_buffer(sscreen
, templ
, memobj
->buf
,
2628 memobj
->stride
, offset
,
2629 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE
|
2630 PIPE_HANDLE_USAGE_SHADER_WRITE
,
2631 memobj
->b
.dedicated
);
2635 /* si_texture_from_winsys_buffer doesn't increment refcount of
2636 * memobj->buf, so increment it here.
2638 struct pb_buffer
*buf
= NULL
;
2639 pb_reference(&buf
, memobj
->buf
);
2643 static bool si_check_resource_capability(struct pipe_screen
*screen
,
2644 struct pipe_resource
*resource
,
2647 struct si_texture
*tex
= (struct si_texture
*)resource
;
2649 /* Buffers only support the linear flag. */
2650 if (resource
->target
== PIPE_BUFFER
)
2651 return (bind
& ~PIPE_BIND_LINEAR
) == 0;
2653 if (bind
& PIPE_BIND_LINEAR
&& !tex
->surface
.is_linear
)
2656 if (bind
& PIPE_BIND_SCANOUT
&& !tex
->surface
.is_displayable
)
2659 /* TODO: PIPE_BIND_CURSOR - do we care? */
2663 void si_init_screen_texture_functions(struct si_screen
*sscreen
)
2665 sscreen
->b
.resource_from_handle
= si_texture_from_handle
;
2666 sscreen
->b
.resource_get_handle
= si_texture_get_handle
;
2667 sscreen
->b
.resource_get_info
= si_texture_get_info
;
2668 sscreen
->b
.resource_from_memobj
= si_texture_from_memobj
;
2669 sscreen
->b
.memobj_create_from_handle
= si_memobj_from_handle
;
2670 sscreen
->b
.memobj_destroy
= si_memobj_destroy
;
2671 sscreen
->b
.check_resource_capability
= si_check_resource_capability
;
2674 void si_init_context_texture_functions(struct si_context
*sctx
)
2676 sctx
->b
.create_surface
= si_create_surface
;
2677 sctx
->b
.surface_destroy
= si_surface_destroy
;