2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "radeonsi/si_pipe.h"
27 #include "radeonsi/si_query.h"
28 #include "util/u_format.h"
29 #include "util/u_log.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include "util/u_resource.h"
33 #include "util/u_surface.h"
34 #include "util/u_transfer.h"
35 #include "util/os_time.h"
38 #include "state_tracker/drm_driver.h"
39 #include "amd/common/sid.h"
41 static enum radeon_surf_mode
42 si_choose_tiling(struct si_screen
*sscreen
,
43 const struct pipe_resource
*templ
, bool tc_compatible_htile
);
46 bool si_prepare_for_dma_blit(struct si_context
*sctx
,
47 struct r600_texture
*rdst
,
48 unsigned dst_level
, unsigned dstx
,
49 unsigned dsty
, unsigned dstz
,
50 struct r600_texture
*rsrc
,
52 const struct pipe_box
*src_box
)
57 if (rdst
->surface
.bpe
!= rsrc
->surface
.bpe
)
60 /* MSAA: Blits don't exist in the real world. */
61 if (rsrc
->resource
.b
.b
.nr_samples
> 1 ||
62 rdst
->resource
.b
.b
.nr_samples
> 1)
65 /* Depth-stencil surfaces:
66 * When dst is linear, the DB->CB copy preserves HTILE.
67 * When dst is tiled, the 3D path must be used to update HTILE.
69 if (rsrc
->is_depth
|| rdst
->is_depth
)
73 * src: Use the 3D path. DCC decompression is expensive.
74 * dst: Use the 3D path to compress the pixels with DCC.
76 if (vi_dcc_enabled(rsrc
, src_level
) ||
77 vi_dcc_enabled(rdst
, dst_level
))
81 * src: Both texture and SDMA paths need decompression. Use SDMA.
82 * dst: If overwriting the whole texture, discard CMASK and use
83 * SDMA. Otherwise, use the 3D path.
85 if (rdst
->cmask
.size
&& rdst
->dirty_level_mask
& (1 << dst_level
)) {
86 /* The CMASK clear is only enabled for the first level. */
87 assert(dst_level
== 0);
88 if (!util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
89 dstx
, dsty
, dstz
, src_box
->width
,
90 src_box
->height
, src_box
->depth
))
93 si_texture_discard_cmask(sctx
->screen
, rdst
);
96 /* All requirements are met. Prepare textures for SDMA. */
97 if (rsrc
->cmask
.size
&& rsrc
->dirty_level_mask
& (1 << src_level
))
98 sctx
->b
.flush_resource(&sctx
->b
, &rsrc
->resource
.b
.b
);
100 assert(!(rsrc
->dirty_level_mask
& (1 << src_level
)));
101 assert(!(rdst
->dirty_level_mask
& (1 << dst_level
)));
106 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
107 static void si_copy_region_with_blit(struct pipe_context
*pipe
,
108 struct pipe_resource
*dst
,
110 unsigned dstx
, unsigned dsty
, unsigned dstz
,
111 struct pipe_resource
*src
,
113 const struct pipe_box
*src_box
)
115 struct pipe_blit_info blit
;
117 memset(&blit
, 0, sizeof(blit
));
118 blit
.src
.resource
= src
;
119 blit
.src
.format
= src
->format
;
120 blit
.src
.level
= src_level
;
121 blit
.src
.box
= *src_box
;
122 blit
.dst
.resource
= dst
;
123 blit
.dst
.format
= dst
->format
;
124 blit
.dst
.level
= dst_level
;
125 blit
.dst
.box
.x
= dstx
;
126 blit
.dst
.box
.y
= dsty
;
127 blit
.dst
.box
.z
= dstz
;
128 blit
.dst
.box
.width
= src_box
->width
;
129 blit
.dst
.box
.height
= src_box
->height
;
130 blit
.dst
.box
.depth
= src_box
->depth
;
131 blit
.mask
= util_format_get_mask(src
->format
) &
132 util_format_get_mask(dst
->format
);
133 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
136 pipe
->blit(pipe
, &blit
);
140 /* Copy from a full GPU texture to a transfer's staging one. */
141 static void si_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
143 struct si_context
*sctx
= (struct si_context
*)ctx
;
144 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
145 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
146 struct pipe_resource
*src
= transfer
->resource
;
148 if (src
->nr_samples
> 1) {
149 si_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
150 src
, transfer
->level
, &transfer
->box
);
154 sctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
158 /* Copy from a transfer's staging texture to a full GPU one. */
159 static void si_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
161 struct si_context
*sctx
= (struct si_context
*)ctx
;
162 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
163 struct pipe_resource
*dst
= transfer
->resource
;
164 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
165 struct pipe_box sbox
;
167 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
169 if (dst
->nr_samples
> 1) {
170 si_copy_region_with_blit(ctx
, dst
, transfer
->level
,
171 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
176 sctx
->dma_copy(ctx
, dst
, transfer
->level
,
177 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
181 static unsigned si_texture_get_offset(struct si_screen
*sscreen
,
182 struct r600_texture
*rtex
, unsigned level
,
183 const struct pipe_box
*box
,
185 unsigned *layer_stride
)
187 if (sscreen
->info
.chip_class
>= GFX9
) {
188 *stride
= rtex
->surface
.u
.gfx9
.surf_pitch
* rtex
->surface
.bpe
;
189 *layer_stride
= rtex
->surface
.u
.gfx9
.surf_slice_size
;
194 /* Each texture is an array of slices. Each slice is an array
195 * of mipmap levels. */
196 return box
->z
* rtex
->surface
.u
.gfx9
.surf_slice_size
+
197 rtex
->surface
.u
.gfx9
.offset
[level
] +
198 (box
->y
/ rtex
->surface
.blk_h
*
199 rtex
->surface
.u
.gfx9
.surf_pitch
+
200 box
->x
/ rtex
->surface
.blk_w
) * rtex
->surface
.bpe
;
202 *stride
= rtex
->surface
.u
.legacy
.level
[level
].nblk_x
*
204 assert((uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 <= UINT_MAX
);
205 *layer_stride
= (uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4;
208 return rtex
->surface
.u
.legacy
.level
[level
].offset
;
210 /* Each texture is an array of mipmap levels. Each level is
211 * an array of slices. */
212 return rtex
->surface
.u
.legacy
.level
[level
].offset
+
213 box
->z
* (uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 +
214 (box
->y
/ rtex
->surface
.blk_h
*
215 rtex
->surface
.u
.legacy
.level
[level
].nblk_x
+
216 box
->x
/ rtex
->surface
.blk_w
) * rtex
->surface
.bpe
;
220 static int si_init_surface(struct si_screen
*sscreen
,
221 struct radeon_surf
*surface
,
222 const struct pipe_resource
*ptex
,
223 enum radeon_surf_mode array_mode
,
224 unsigned pitch_in_bytes_override
,
228 bool is_flushed_depth
,
229 bool tc_compatible_htile
)
231 const struct util_format_description
*desc
=
232 util_format_description(ptex
->format
);
233 bool is_depth
, is_stencil
;
235 unsigned i
, bpe
, flags
= 0;
237 is_depth
= util_format_has_depth(desc
);
238 is_stencil
= util_format_has_stencil(desc
);
240 if (!is_flushed_depth
&&
241 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
242 bpe
= 4; /* stencil is allocated separately on evergreen */
244 bpe
= util_format_get_blocksize(ptex
->format
);
245 assert(util_is_power_of_two_or_zero(bpe
));
248 if (!is_flushed_depth
&& is_depth
) {
249 flags
|= RADEON_SURF_ZBUFFER
;
251 if (tc_compatible_htile
&&
252 (sscreen
->info
.chip_class
>= GFX9
||
253 array_mode
== RADEON_SURF_MODE_2D
)) {
254 /* TC-compatible HTILE only supports Z32_FLOAT.
255 * GFX9 also supports Z16_UNORM.
256 * On VI, promote Z16 to Z32. DB->CB copies will convert
257 * the format for transfers.
259 if (sscreen
->info
.chip_class
== VI
)
262 flags
|= RADEON_SURF_TC_COMPATIBLE_HTILE
;
266 flags
|= RADEON_SURF_SBUFFER
;
269 if (sscreen
->info
.chip_class
>= VI
&&
270 (ptex
->flags
& SI_RESOURCE_FLAG_DISABLE_DCC
||
271 ptex
->format
== PIPE_FORMAT_R9G9B9E5_FLOAT
||
272 (ptex
->nr_samples
>= 2 && !sscreen
->dcc_msaa_allowed
)))
273 flags
|= RADEON_SURF_DISABLE_DCC
;
275 /* VI: DCC clear for 4x and 8x MSAA array textures unimplemented. */
276 if (sscreen
->info
.chip_class
== VI
&&
277 ptex
->nr_samples
>= 4 &&
278 ptex
->array_size
> 1)
279 flags
|= RADEON_SURF_DISABLE_DCC
;
281 /* GFX9: DCC clear for 4x and 8x MSAA textures unimplemented. */
282 if (sscreen
->info
.chip_class
>= GFX9
&&
283 ptex
->nr_samples
>= 4)
284 flags
|= RADEON_SURF_DISABLE_DCC
;
286 if (ptex
->bind
& PIPE_BIND_SCANOUT
|| is_scanout
) {
287 /* This should catch bugs in gallium users setting incorrect flags. */
288 assert(ptex
->nr_samples
<= 1 &&
289 ptex
->array_size
== 1 &&
291 ptex
->last_level
== 0 &&
292 !(flags
& RADEON_SURF_Z_OR_SBUFFER
));
294 flags
|= RADEON_SURF_SCANOUT
;
297 if (ptex
->bind
& PIPE_BIND_SHARED
)
298 flags
|= RADEON_SURF_SHAREABLE
;
300 flags
|= RADEON_SURF_IMPORTED
| RADEON_SURF_SHAREABLE
;
301 if (!(ptex
->flags
& SI_RESOURCE_FLAG_FORCE_TILING
))
302 flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
304 r
= sscreen
->ws
->surface_init(sscreen
->ws
, ptex
, flags
, bpe
,
305 array_mode
, surface
);
310 unsigned pitch
= pitch_in_bytes_override
/ bpe
;
312 if (sscreen
->info
.chip_class
>= GFX9
) {
314 surface
->u
.gfx9
.surf_pitch
= pitch
;
315 surface
->u
.gfx9
.surf_slice_size
=
316 (uint64_t)pitch
* surface
->u
.gfx9
.surf_height
* bpe
;
318 surface
->u
.gfx9
.surf_offset
= offset
;
321 surface
->u
.legacy
.level
[0].nblk_x
= pitch
;
322 surface
->u
.legacy
.level
[0].slice_size_dw
=
323 ((uint64_t)pitch
* surface
->u
.legacy
.level
[0].nblk_y
* bpe
) / 4;
326 for (i
= 0; i
< ARRAY_SIZE(surface
->u
.legacy
.level
); ++i
)
327 surface
->u
.legacy
.level
[i
].offset
+= offset
;
333 static void si_texture_init_metadata(struct si_screen
*sscreen
,
334 struct r600_texture
*rtex
,
335 struct radeon_bo_metadata
*metadata
)
337 struct radeon_surf
*surface
= &rtex
->surface
;
339 memset(metadata
, 0, sizeof(*metadata
));
341 if (sscreen
->info
.chip_class
>= GFX9
) {
342 metadata
->u
.gfx9
.swizzle_mode
= surface
->u
.gfx9
.surf
.swizzle_mode
;
344 metadata
->u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
345 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
346 metadata
->u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
347 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
348 metadata
->u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
349 metadata
->u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
350 metadata
->u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
351 metadata
->u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
352 metadata
->u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
353 metadata
->u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
354 metadata
->u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
355 metadata
->u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
359 static void si_surface_import_metadata(struct si_screen
*sscreen
,
360 struct radeon_surf
*surf
,
361 struct radeon_bo_metadata
*metadata
,
362 enum radeon_surf_mode
*array_mode
,
365 if (sscreen
->info
.chip_class
>= GFX9
) {
366 if (metadata
->u
.gfx9
.swizzle_mode
> 0)
367 *array_mode
= RADEON_SURF_MODE_2D
;
369 *array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
371 *is_scanout
= metadata
->u
.gfx9
.swizzle_mode
== 0 ||
372 metadata
->u
.gfx9
.swizzle_mode
% 4 == 2;
374 surf
->u
.gfx9
.surf
.swizzle_mode
= metadata
->u
.gfx9
.swizzle_mode
;
376 surf
->u
.legacy
.pipe_config
= metadata
->u
.legacy
.pipe_config
;
377 surf
->u
.legacy
.bankw
= metadata
->u
.legacy
.bankw
;
378 surf
->u
.legacy
.bankh
= metadata
->u
.legacy
.bankh
;
379 surf
->u
.legacy
.tile_split
= metadata
->u
.legacy
.tile_split
;
380 surf
->u
.legacy
.mtilea
= metadata
->u
.legacy
.mtilea
;
381 surf
->u
.legacy
.num_banks
= metadata
->u
.legacy
.num_banks
;
383 if (metadata
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
384 *array_mode
= RADEON_SURF_MODE_2D
;
385 else if (metadata
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
386 *array_mode
= RADEON_SURF_MODE_1D
;
388 *array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
390 *is_scanout
= metadata
->u
.legacy
.scanout
;
394 void si_eliminate_fast_color_clear(struct si_context
*sctx
,
395 struct r600_texture
*rtex
)
397 struct si_screen
*sscreen
= sctx
->screen
;
398 struct pipe_context
*ctx
= &sctx
->b
;
400 if (ctx
== sscreen
->aux_context
)
401 mtx_lock(&sscreen
->aux_context_lock
);
403 unsigned n
= sctx
->num_decompress_calls
;
404 ctx
->flush_resource(ctx
, &rtex
->resource
.b
.b
);
406 /* Flush only if any fast clear elimination took place. */
407 if (n
!= sctx
->num_decompress_calls
)
408 ctx
->flush(ctx
, NULL
, 0);
410 if (ctx
== sscreen
->aux_context
)
411 mtx_unlock(&sscreen
->aux_context_lock
);
414 void si_texture_discard_cmask(struct si_screen
*sscreen
,
415 struct r600_texture
*rtex
)
417 if (!rtex
->cmask
.size
)
420 assert(rtex
->resource
.b
.b
.nr_samples
<= 1);
423 memset(&rtex
->cmask
, 0, sizeof(rtex
->cmask
));
424 rtex
->cmask
.base_address_reg
= rtex
->resource
.gpu_address
>> 8;
425 rtex
->dirty_level_mask
= 0;
427 rtex
->cb_color_info
&= ~S_028C70_FAST_CLEAR(1);
429 if (rtex
->cmask_buffer
!= &rtex
->resource
)
430 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
432 /* Notify all contexts about the change. */
433 p_atomic_inc(&sscreen
->dirty_tex_counter
);
434 p_atomic_inc(&sscreen
->compressed_colortex_counter
);
437 static bool si_can_disable_dcc(struct r600_texture
*rtex
)
439 /* We can't disable DCC if it can be written by another process. */
440 return rtex
->dcc_offset
&&
441 (!rtex
->resource
.b
.is_shared
||
442 !(rtex
->resource
.external_usage
& PIPE_HANDLE_USAGE_WRITE
));
445 static bool si_texture_discard_dcc(struct si_screen
*sscreen
,
446 struct r600_texture
*rtex
)
448 if (!si_can_disable_dcc(rtex
))
451 assert(rtex
->dcc_separate_buffer
== NULL
);
454 rtex
->dcc_offset
= 0;
456 /* Notify all contexts about the change. */
457 p_atomic_inc(&sscreen
->dirty_tex_counter
);
462 * Disable DCC for the texture. (first decompress, then discard metadata).
464 * There is unresolved multi-context synchronization issue between
465 * screen::aux_context and the current context. If applications do this with
466 * multiple contexts, it's already undefined behavior for them and we don't
467 * have to worry about that. The scenario is:
469 * If context 1 disables DCC and context 2 has queued commands that write
470 * to the texture via CB with DCC enabled, and the order of operations is
472 * context 2 queues draw calls rendering to the texture, but doesn't flush
473 * context 1 disables DCC and flushes
474 * context 1 & 2 reset descriptors and FB state
475 * context 2 flushes (new compressed tiles written by the draw calls)
476 * context 1 & 2 read garbage, because DCC is disabled, yet there are
479 * \param sctx the current context if you have one, or rscreen->aux_context
482 bool si_texture_disable_dcc(struct si_context
*sctx
,
483 struct r600_texture
*rtex
)
485 struct si_screen
*sscreen
= sctx
->screen
;
487 if (!si_can_disable_dcc(rtex
))
490 if (&sctx
->b
== sscreen
->aux_context
)
491 mtx_lock(&sscreen
->aux_context_lock
);
493 /* Decompress DCC. */
494 si_decompress_dcc(sctx
, rtex
);
495 sctx
->b
.flush(&sctx
->b
, NULL
, 0);
497 if (&sctx
->b
== sscreen
->aux_context
)
498 mtx_unlock(&sscreen
->aux_context_lock
);
500 return si_texture_discard_dcc(sscreen
, rtex
);
503 static void si_reallocate_texture_inplace(struct si_context
*sctx
,
504 struct r600_texture
*rtex
,
505 unsigned new_bind_flag
,
506 bool invalidate_storage
)
508 struct pipe_screen
*screen
= sctx
->b
.screen
;
509 struct r600_texture
*new_tex
;
510 struct pipe_resource templ
= rtex
->resource
.b
.b
;
513 templ
.bind
|= new_bind_flag
;
515 if (rtex
->resource
.b
.is_shared
)
518 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
519 if (rtex
->surface
.is_linear
)
522 /* This fails with MSAA, depth, and compressed textures. */
523 if (si_choose_tiling(sctx
->screen
, &templ
, false) !=
524 RADEON_SURF_MODE_LINEAR_ALIGNED
)
528 new_tex
= (struct r600_texture
*)screen
->resource_create(screen
, &templ
);
532 /* Copy the pixels to the new texture. */
533 if (!invalidate_storage
) {
534 for (i
= 0; i
<= templ
.last_level
; i
++) {
538 u_minify(templ
.width0
, i
), u_minify(templ
.height0
, i
),
539 util_num_layers(&templ
, i
), &box
);
541 sctx
->dma_copy(&sctx
->b
, &new_tex
->resource
.b
.b
, i
, 0, 0, 0,
542 &rtex
->resource
.b
.b
, i
, &box
);
546 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
547 si_texture_discard_cmask(sctx
->screen
, rtex
);
548 si_texture_discard_dcc(sctx
->screen
, rtex
);
551 /* Replace the structure fields of rtex. */
552 rtex
->resource
.b
.b
.bind
= templ
.bind
;
553 pb_reference(&rtex
->resource
.buf
, new_tex
->resource
.buf
);
554 rtex
->resource
.gpu_address
= new_tex
->resource
.gpu_address
;
555 rtex
->resource
.vram_usage
= new_tex
->resource
.vram_usage
;
556 rtex
->resource
.gart_usage
= new_tex
->resource
.gart_usage
;
557 rtex
->resource
.bo_size
= new_tex
->resource
.bo_size
;
558 rtex
->resource
.bo_alignment
= new_tex
->resource
.bo_alignment
;
559 rtex
->resource
.domains
= new_tex
->resource
.domains
;
560 rtex
->resource
.flags
= new_tex
->resource
.flags
;
561 rtex
->size
= new_tex
->size
;
562 rtex
->db_render_format
= new_tex
->db_render_format
;
563 rtex
->db_compatible
= new_tex
->db_compatible
;
564 rtex
->can_sample_z
= new_tex
->can_sample_z
;
565 rtex
->can_sample_s
= new_tex
->can_sample_s
;
566 rtex
->surface
= new_tex
->surface
;
567 rtex
->fmask
= new_tex
->fmask
;
568 rtex
->cmask
= new_tex
->cmask
;
569 rtex
->cb_color_info
= new_tex
->cb_color_info
;
570 rtex
->last_msaa_resolve_target_micro_mode
= new_tex
->last_msaa_resolve_target_micro_mode
;
571 rtex
->htile_offset
= new_tex
->htile_offset
;
572 rtex
->tc_compatible_htile
= new_tex
->tc_compatible_htile
;
573 rtex
->depth_cleared
= new_tex
->depth_cleared
;
574 rtex
->stencil_cleared
= new_tex
->stencil_cleared
;
575 rtex
->dcc_gather_statistics
= new_tex
->dcc_gather_statistics
;
576 rtex
->framebuffers_bound
= new_tex
->framebuffers_bound
;
578 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
579 assert(!rtex
->htile_offset
);
580 assert(!rtex
->cmask
.size
);
581 assert(!rtex
->fmask
.size
);
582 assert(!rtex
->dcc_offset
);
583 assert(!rtex
->is_depth
);
586 r600_texture_reference(&new_tex
, NULL
);
588 p_atomic_inc(&sctx
->screen
->dirty_tex_counter
);
591 static uint32_t si_get_bo_metadata_word1(struct si_screen
*sscreen
)
593 return (ATI_VENDOR_ID
<< 16) | sscreen
->info
.pci_id
;
596 static void si_query_opaque_metadata(struct si_screen
*sscreen
,
597 struct r600_texture
*rtex
,
598 struct radeon_bo_metadata
*md
)
600 struct pipe_resource
*res
= &rtex
->resource
.b
.b
;
601 static const unsigned char swizzle
[] = {
608 bool is_array
= util_texture_is_array(res
->target
);
610 /* DRM 2.x.x doesn't support this. */
611 if (sscreen
->info
.drm_major
!= 3)
614 assert(rtex
->dcc_separate_buffer
== NULL
);
615 assert(rtex
->fmask
.size
== 0);
617 /* Metadata image format format version 1:
618 * [0] = 1 (metadata format identifier)
619 * [1] = (VENDOR_ID << 16) | PCI_ID
620 * [2:9] = image descriptor for the whole resource
621 * [2] is always 0, because the base address is cleared
622 * [9] is the DCC offset bits [39:8] from the beginning of
624 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
627 md
->metadata
[0] = 1; /* metadata image format version 1 */
629 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
630 md
->metadata
[1] = si_get_bo_metadata_word1(sscreen
);
632 si_make_texture_descriptor(sscreen
, rtex
, true,
633 res
->target
, res
->format
,
634 swizzle
, 0, res
->last_level
, 0,
635 is_array
? res
->array_size
- 1 : 0,
636 res
->width0
, res
->height0
, res
->depth0
,
639 si_set_mutable_tex_desc_fields(sscreen
, rtex
, &rtex
->surface
.u
.legacy
.level
[0],
640 0, 0, rtex
->surface
.blk_w
, false, desc
);
642 /* Clear the base address and set the relative DCC offset. */
644 desc
[1] &= C_008F14_BASE_ADDRESS_HI
;
645 desc
[7] = rtex
->dcc_offset
>> 8;
647 /* Dwords [2:9] contain the image descriptor. */
648 memcpy(&md
->metadata
[2], desc
, sizeof(desc
));
649 md
->size_metadata
= 10 * 4;
651 /* Dwords [10:..] contain the mipmap level offsets. */
652 if (sscreen
->info
.chip_class
<= VI
) {
653 for (i
= 0; i
<= res
->last_level
; i
++)
654 md
->metadata
[10+i
] = rtex
->surface
.u
.legacy
.level
[i
].offset
>> 8;
656 md
->size_metadata
+= (1 + res
->last_level
) * 4;
660 static void si_apply_opaque_metadata(struct si_screen
*sscreen
,
661 struct r600_texture
*rtex
,
662 struct radeon_bo_metadata
*md
)
664 uint32_t *desc
= &md
->metadata
[2];
666 if (sscreen
->info
.chip_class
< VI
)
669 /* Return if DCC is enabled. The texture should be set up with it
672 if (md
->size_metadata
>= 10 * 4 && /* at least 2(header) + 8(desc) dwords */
673 md
->metadata
[0] != 0 &&
674 md
->metadata
[1] == si_get_bo_metadata_word1(sscreen
) &&
675 G_008F28_COMPRESSION_EN(desc
[6])) {
676 rtex
->dcc_offset
= (uint64_t)desc
[7] << 8;
680 /* Disable DCC. These are always set by texture_from_handle and must
683 rtex
->dcc_offset
= 0;
686 static boolean
si_texture_get_handle(struct pipe_screen
* screen
,
687 struct pipe_context
*ctx
,
688 struct pipe_resource
*resource
,
689 struct winsys_handle
*whandle
,
692 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
693 struct si_context
*sctx
;
694 struct r600_resource
*res
= r600_resource(resource
);
695 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
696 struct radeon_bo_metadata metadata
;
697 bool update_metadata
= false;
698 unsigned stride
, offset
, slice_size
;
701 ctx
= threaded_context_unwrap_sync(ctx
);
702 sctx
= (struct si_context
*)(ctx
? ctx
: sscreen
->aux_context
);
704 if (resource
->target
!= PIPE_BUFFER
) {
705 /* This is not supported now, but it might be required for OpenCL
706 * interop in the future.
708 if (resource
->nr_samples
> 1 || rtex
->is_depth
)
711 /* Move a suballocated texture into a non-suballocated allocation. */
712 if (sscreen
->ws
->buffer_is_suballocated(res
->buf
) ||
713 rtex
->surface
.tile_swizzle
||
714 (rtex
->resource
.flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
&&
715 sscreen
->info
.has_local_buffers
&&
716 whandle
->type
!= DRM_API_HANDLE_TYPE_KMS
)) {
717 assert(!res
->b
.is_shared
);
718 si_reallocate_texture_inplace(sctx
, rtex
,
719 PIPE_BIND_SHARED
, false);
721 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
722 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
723 assert(!(res
->flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
));
724 assert(rtex
->surface
.tile_swizzle
== 0);
727 /* Since shader image stores don't support DCC on VI,
728 * disable it for external clients that want write
731 if (usage
& PIPE_HANDLE_USAGE_WRITE
&& rtex
->dcc_offset
) {
732 if (si_texture_disable_dcc(sctx
, rtex
)) {
733 update_metadata
= true;
734 /* si_texture_disable_dcc flushes the context */
739 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
740 (rtex
->cmask
.size
|| rtex
->dcc_offset
)) {
741 /* Eliminate fast clear (both CMASK and DCC) */
742 si_eliminate_fast_color_clear(sctx
, rtex
);
743 /* eliminate_fast_color_clear flushes the context */
746 /* Disable CMASK if flush_resource isn't going
749 if (rtex
->cmask
.size
)
750 si_texture_discard_cmask(sscreen
, rtex
);
754 if (!res
->b
.is_shared
|| update_metadata
) {
755 si_texture_init_metadata(sscreen
, rtex
, &metadata
);
756 si_query_opaque_metadata(sscreen
, rtex
, &metadata
);
758 sscreen
->ws
->buffer_set_metadata(res
->buf
, &metadata
);
761 if (sscreen
->info
.chip_class
>= GFX9
) {
762 offset
= rtex
->surface
.u
.gfx9
.surf_offset
;
763 stride
= rtex
->surface
.u
.gfx9
.surf_pitch
*
765 slice_size
= rtex
->surface
.u
.gfx9
.surf_slice_size
;
767 offset
= rtex
->surface
.u
.legacy
.level
[0].offset
;
768 stride
= rtex
->surface
.u
.legacy
.level
[0].nblk_x
*
770 slice_size
= (uint64_t)rtex
->surface
.u
.legacy
.level
[0].slice_size_dw
* 4;
773 /* Buffer exports are for the OpenCL interop. */
774 /* Move a suballocated buffer into a non-suballocated allocation. */
775 if (sscreen
->ws
->buffer_is_suballocated(res
->buf
) ||
776 /* A DMABUF export always fails if the BO is local. */
777 (rtex
->resource
.flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
&&
778 sscreen
->info
.has_local_buffers
)) {
779 assert(!res
->b
.is_shared
);
781 /* Allocate a new buffer with PIPE_BIND_SHARED. */
782 struct pipe_resource templ
= res
->b
.b
;
783 templ
.bind
|= PIPE_BIND_SHARED
;
785 struct pipe_resource
*newb
=
786 screen
->resource_create(screen
, &templ
);
790 /* Copy the old buffer contents to the new one. */
792 u_box_1d(0, newb
->width0
, &box
);
793 sctx
->b
.resource_copy_region(&sctx
->b
, newb
, 0, 0, 0, 0,
796 /* Move the new buffer storage to the old pipe_resource. */
797 si_replace_buffer_storage(&sctx
->b
, &res
->b
.b
, newb
);
798 pipe_resource_reference(&newb
, NULL
);
800 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
801 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
811 sctx
->b
.flush(&sctx
->b
, NULL
, 0);
813 if (res
->b
.is_shared
) {
814 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
817 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
818 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
819 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
821 res
->b
.is_shared
= true;
822 res
->external_usage
= usage
;
825 return sscreen
->ws
->buffer_get_handle(res
->buf
, stride
, offset
,
826 slice_size
, whandle
);
829 static void si_texture_destroy(struct pipe_screen
*screen
,
830 struct pipe_resource
*ptex
)
832 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
833 struct r600_resource
*resource
= &rtex
->resource
;
835 r600_texture_reference(&rtex
->flushed_depth_texture
, NULL
);
837 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
838 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
840 pb_reference(&resource
->buf
, NULL
);
841 r600_resource_reference(&rtex
->dcc_separate_buffer
, NULL
);
842 r600_resource_reference(&rtex
->last_dcc_separate_buffer
, NULL
);
846 static const struct u_resource_vtbl si_texture_vtbl
;
848 /* The number of samples can be specified independently of the texture. */
849 void si_texture_get_fmask_info(struct si_screen
*sscreen
,
850 struct r600_texture
*rtex
,
852 struct r600_fmask_info
*out
)
854 /* FMASK is allocated like an ordinary texture. */
855 struct pipe_resource templ
= rtex
->resource
.b
.b
;
856 struct radeon_surf fmask
= {};
859 memset(out
, 0, sizeof(*out
));
861 if (sscreen
->info
.chip_class
>= GFX9
) {
862 out
->alignment
= rtex
->surface
.u
.gfx9
.fmask_alignment
;
863 out
->size
= rtex
->surface
.u
.gfx9
.fmask_size
;
864 out
->tile_swizzle
= rtex
->surface
.u
.gfx9
.fmask_tile_swizzle
;
868 templ
.nr_samples
= 1;
869 flags
= rtex
->surface
.flags
| RADEON_SURF_FMASK
;
871 switch (nr_samples
) {
880 PRINT_ERR("Invalid sample count for FMASK allocation.\n");
884 if (sscreen
->ws
->surface_init(sscreen
->ws
, &templ
, flags
, bpe
,
885 RADEON_SURF_MODE_2D
, &fmask
)) {
886 PRINT_ERR("Got error in surface_init while allocating FMASK.\n");
890 assert(fmask
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
);
892 out
->slice_tile_max
= (fmask
.u
.legacy
.level
[0].nblk_x
* fmask
.u
.legacy
.level
[0].nblk_y
) / 64;
893 if (out
->slice_tile_max
)
894 out
->slice_tile_max
-= 1;
896 out
->tile_mode_index
= fmask
.u
.legacy
.tiling_index
[0];
897 out
->pitch_in_pixels
= fmask
.u
.legacy
.level
[0].nblk_x
;
898 out
->bank_height
= fmask
.u
.legacy
.bankh
;
899 out
->tile_swizzle
= fmask
.tile_swizzle
;
900 out
->alignment
= MAX2(256, fmask
.surf_alignment
);
901 out
->size
= fmask
.surf_size
;
904 static void si_texture_allocate_fmask(struct si_screen
*sscreen
,
905 struct r600_texture
*rtex
)
907 si_texture_get_fmask_info(sscreen
, rtex
,
908 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
910 rtex
->fmask
.offset
= align64(rtex
->size
, rtex
->fmask
.alignment
);
911 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
914 void si_texture_get_cmask_info(struct si_screen
*sscreen
,
915 struct r600_texture
*rtex
,
916 struct r600_cmask_info
*out
)
918 unsigned pipe_interleave_bytes
= sscreen
->info
.pipe_interleave_bytes
;
919 unsigned num_pipes
= sscreen
->info
.num_tile_pipes
;
920 unsigned cl_width
, cl_height
;
922 if (sscreen
->info
.chip_class
>= GFX9
) {
923 out
->alignment
= rtex
->surface
.u
.gfx9
.cmask_alignment
;
924 out
->size
= rtex
->surface
.u
.gfx9
.cmask_size
;
941 case 16: /* Hawaii */
950 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
952 unsigned width
= align(rtex
->resource
.b
.b
.width0
, cl_width
*8);
953 unsigned height
= align(rtex
->resource
.b
.b
.height0
, cl_height
*8);
954 unsigned slice_elements
= (width
* height
) / (8*8);
956 /* Each element of CMASK is a nibble. */
957 unsigned slice_bytes
= slice_elements
/ 2;
959 out
->slice_tile_max
= (width
* height
) / (128*128);
960 if (out
->slice_tile_max
)
961 out
->slice_tile_max
-= 1;
963 out
->alignment
= MAX2(256, base_align
);
964 out
->size
= util_num_layers(&rtex
->resource
.b
.b
, 0) *
965 align(slice_bytes
, base_align
);
968 static void si_texture_allocate_cmask(struct si_screen
*sscreen
,
969 struct r600_texture
*rtex
)
971 si_texture_get_cmask_info(sscreen
, rtex
, &rtex
->cmask
);
973 rtex
->cmask
.offset
= align64(rtex
->size
, rtex
->cmask
.alignment
);
974 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
976 rtex
->cb_color_info
|= S_028C70_FAST_CLEAR(1);
979 static void si_texture_get_htile_size(struct si_screen
*sscreen
,
980 struct r600_texture
*rtex
)
982 unsigned cl_width
, cl_height
, width
, height
;
983 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
984 unsigned num_pipes
= sscreen
->info
.num_tile_pipes
;
986 assert(sscreen
->info
.chip_class
<= VI
);
988 rtex
->surface
.htile_size
= 0;
990 /* HTILE is broken with 1D tiling on old kernels and CIK. */
991 if (sscreen
->info
.chip_class
>= CIK
&&
992 rtex
->surface
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
993 sscreen
->info
.drm_major
== 2 && sscreen
->info
.drm_minor
< 38)
996 /* Overalign HTILE on P2 configs to work around GPU hangs in
997 * piglit/depthstencil-render-miplevels 585.
999 * This has been confirmed to help Kabini & Stoney, where the hangs
1000 * are always reproducible. I think I have seen the test hang
1001 * on Carrizo too, though it was very rare there.
1003 if (sscreen
->info
.chip_class
>= CIK
&& num_pipes
< 4)
1006 switch (num_pipes
) {
1032 width
= align(rtex
->resource
.b
.b
.width0
, cl_width
* 8);
1033 height
= align(rtex
->resource
.b
.b
.height0
, cl_height
* 8);
1035 slice_elements
= (width
* height
) / (8 * 8);
1036 slice_bytes
= slice_elements
* 4;
1038 pipe_interleave_bytes
= sscreen
->info
.pipe_interleave_bytes
;
1039 base_align
= num_pipes
* pipe_interleave_bytes
;
1041 rtex
->surface
.htile_alignment
= base_align
;
1042 rtex
->surface
.htile_size
=
1043 util_num_layers(&rtex
->resource
.b
.b
, 0) *
1044 align(slice_bytes
, base_align
);
1047 static void si_texture_allocate_htile(struct si_screen
*sscreen
,
1048 struct r600_texture
*rtex
)
1050 if (sscreen
->info
.chip_class
<= VI
&& !rtex
->tc_compatible_htile
)
1051 si_texture_get_htile_size(sscreen
, rtex
);
1053 if (!rtex
->surface
.htile_size
)
1056 rtex
->htile_offset
= align(rtex
->size
, rtex
->surface
.htile_alignment
);
1057 rtex
->size
= rtex
->htile_offset
+ rtex
->surface
.htile_size
;
1060 void si_print_texture_info(struct si_screen
*sscreen
,
1061 struct r600_texture
*rtex
, struct u_log_context
*log
)
1065 /* Common parameters. */
1066 u_log_printf(log
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
1067 "blk_h=%u, array_size=%u, last_level=%u, "
1068 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
1069 rtex
->resource
.b
.b
.width0
, rtex
->resource
.b
.b
.height0
,
1070 rtex
->resource
.b
.b
.depth0
, rtex
->surface
.blk_w
,
1071 rtex
->surface
.blk_h
,
1072 rtex
->resource
.b
.b
.array_size
, rtex
->resource
.b
.b
.last_level
,
1073 rtex
->surface
.bpe
, rtex
->resource
.b
.b
.nr_samples
,
1074 rtex
->surface
.flags
, util_format_short_name(rtex
->resource
.b
.b
.format
));
1076 if (sscreen
->info
.chip_class
>= GFX9
) {
1077 u_log_printf(log
, " Surf: size=%"PRIu64
", slice_size=%"PRIu64
", "
1078 "alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
1079 rtex
->surface
.surf_size
,
1080 rtex
->surface
.u
.gfx9
.surf_slice_size
,
1081 rtex
->surface
.surf_alignment
,
1082 rtex
->surface
.u
.gfx9
.surf
.swizzle_mode
,
1083 rtex
->surface
.u
.gfx9
.surf
.epitch
,
1084 rtex
->surface
.u
.gfx9
.surf_pitch
);
1086 if (rtex
->fmask
.size
) {
1087 u_log_printf(log
, " FMASK: offset=%"PRIu64
", size=%"PRIu64
", "
1088 "alignment=%u, swmode=%u, epitch=%u\n",
1090 rtex
->surface
.u
.gfx9
.fmask_size
,
1091 rtex
->surface
.u
.gfx9
.fmask_alignment
,
1092 rtex
->surface
.u
.gfx9
.fmask
.swizzle_mode
,
1093 rtex
->surface
.u
.gfx9
.fmask
.epitch
);
1096 if (rtex
->cmask
.size
) {
1097 u_log_printf(log
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", "
1098 "alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
1100 rtex
->surface
.u
.gfx9
.cmask_size
,
1101 rtex
->surface
.u
.gfx9
.cmask_alignment
,
1102 rtex
->surface
.u
.gfx9
.cmask
.rb_aligned
,
1103 rtex
->surface
.u
.gfx9
.cmask
.pipe_aligned
);
1106 if (rtex
->htile_offset
) {
1107 u_log_printf(log
, " HTile: offset=%"PRIu64
", size=%u, alignment=%u, "
1108 "rb_aligned=%u, pipe_aligned=%u\n",
1110 rtex
->surface
.htile_size
,
1111 rtex
->surface
.htile_alignment
,
1112 rtex
->surface
.u
.gfx9
.htile
.rb_aligned
,
1113 rtex
->surface
.u
.gfx9
.htile
.pipe_aligned
);
1116 if (rtex
->dcc_offset
) {
1117 u_log_printf(log
, " DCC: offset=%"PRIu64
", size=%u, "
1118 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
1119 rtex
->dcc_offset
, rtex
->surface
.dcc_size
,
1120 rtex
->surface
.dcc_alignment
,
1121 rtex
->surface
.u
.gfx9
.dcc_pitch_max
,
1122 rtex
->surface
.num_dcc_levels
);
1125 if (rtex
->surface
.u
.gfx9
.stencil_offset
) {
1126 u_log_printf(log
, " Stencil: offset=%"PRIu64
", swmode=%u, epitch=%u\n",
1127 rtex
->surface
.u
.gfx9
.stencil_offset
,
1128 rtex
->surface
.u
.gfx9
.stencil
.swizzle_mode
,
1129 rtex
->surface
.u
.gfx9
.stencil
.epitch
);
1134 u_log_printf(log
, " Layout: size=%"PRIu64
", alignment=%u, bankw=%u, "
1135 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
1136 rtex
->surface
.surf_size
, rtex
->surface
.surf_alignment
, rtex
->surface
.u
.legacy
.bankw
,
1137 rtex
->surface
.u
.legacy
.bankh
, rtex
->surface
.u
.legacy
.num_banks
, rtex
->surface
.u
.legacy
.mtilea
,
1138 rtex
->surface
.u
.legacy
.tile_split
, rtex
->surface
.u
.legacy
.pipe_config
,
1139 (rtex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
1141 if (rtex
->fmask
.size
)
1142 u_log_printf(log
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
1143 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
1144 rtex
->fmask
.offset
, rtex
->fmask
.size
, rtex
->fmask
.alignment
,
1145 rtex
->fmask
.pitch_in_pixels
, rtex
->fmask
.bank_height
,
1146 rtex
->fmask
.slice_tile_max
, rtex
->fmask
.tile_mode_index
);
1148 if (rtex
->cmask
.size
)
1149 u_log_printf(log
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, "
1150 "slice_tile_max=%u\n",
1151 rtex
->cmask
.offset
, rtex
->cmask
.size
, rtex
->cmask
.alignment
,
1152 rtex
->cmask
.slice_tile_max
);
1154 if (rtex
->htile_offset
)
1155 u_log_printf(log
, " HTile: offset=%"PRIu64
", size=%u, "
1156 "alignment=%u, TC_compatible = %u\n",
1157 rtex
->htile_offset
, rtex
->surface
.htile_size
,
1158 rtex
->surface
.htile_alignment
,
1159 rtex
->tc_compatible_htile
);
1161 if (rtex
->dcc_offset
) {
1162 u_log_printf(log
, " DCC: offset=%"PRIu64
", size=%u, alignment=%u\n",
1163 rtex
->dcc_offset
, rtex
->surface
.dcc_size
,
1164 rtex
->surface
.dcc_alignment
);
1165 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++)
1166 u_log_printf(log
, " DCCLevel[%i]: enabled=%u, offset=%u, "
1167 "fast_clear_size=%u\n",
1168 i
, i
< rtex
->surface
.num_dcc_levels
,
1169 rtex
->surface
.u
.legacy
.level
[i
].dcc_offset
,
1170 rtex
->surface
.u
.legacy
.level
[i
].dcc_fast_clear_size
);
1173 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++)
1174 u_log_printf(log
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
1175 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1176 "mode=%u, tiling_index = %u\n",
1177 i
, rtex
->surface
.u
.legacy
.level
[i
].offset
,
1178 (uint64_t)rtex
->surface
.u
.legacy
.level
[i
].slice_size_dw
* 4,
1179 u_minify(rtex
->resource
.b
.b
.width0
, i
),
1180 u_minify(rtex
->resource
.b
.b
.height0
, i
),
1181 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
1182 rtex
->surface
.u
.legacy
.level
[i
].nblk_x
,
1183 rtex
->surface
.u
.legacy
.level
[i
].nblk_y
,
1184 rtex
->surface
.u
.legacy
.level
[i
].mode
,
1185 rtex
->surface
.u
.legacy
.tiling_index
[i
]);
1187 if (rtex
->surface
.has_stencil
) {
1188 u_log_printf(log
, " StencilLayout: tilesplit=%u\n",
1189 rtex
->surface
.u
.legacy
.stencil_tile_split
);
1190 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++) {
1191 u_log_printf(log
, " StencilLevel[%i]: offset=%"PRIu64
", "
1192 "slice_size=%"PRIu64
", npix_x=%u, "
1193 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1194 "mode=%u, tiling_index = %u\n",
1195 i
, rtex
->surface
.u
.legacy
.stencil_level
[i
].offset
,
1196 (uint64_t)rtex
->surface
.u
.legacy
.stencil_level
[i
].slice_size_dw
* 4,
1197 u_minify(rtex
->resource
.b
.b
.width0
, i
),
1198 u_minify(rtex
->resource
.b
.b
.height0
, i
),
1199 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
1200 rtex
->surface
.u
.legacy
.stencil_level
[i
].nblk_x
,
1201 rtex
->surface
.u
.legacy
.stencil_level
[i
].nblk_y
,
1202 rtex
->surface
.u
.legacy
.stencil_level
[i
].mode
,
1203 rtex
->surface
.u
.legacy
.stencil_tiling_index
[i
]);
1208 /* Common processing for r600_texture_create and r600_texture_from_handle */
1209 static struct r600_texture
*
1210 si_texture_create_object(struct pipe_screen
*screen
,
1211 const struct pipe_resource
*base
,
1212 struct pb_buffer
*buf
,
1213 struct radeon_surf
*surface
)
1215 struct r600_texture
*rtex
;
1216 struct r600_resource
*resource
;
1217 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1219 rtex
= CALLOC_STRUCT(r600_texture
);
1223 resource
= &rtex
->resource
;
1224 resource
->b
.b
= *base
;
1225 resource
->b
.b
.next
= NULL
;
1226 resource
->b
.vtbl
= &si_texture_vtbl
;
1227 pipe_reference_init(&resource
->b
.b
.reference
, 1);
1228 resource
->b
.b
.screen
= screen
;
1230 /* don't include stencil-only formats which we don't support for rendering */
1231 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
1233 rtex
->surface
= *surface
;
1234 rtex
->size
= rtex
->surface
.surf_size
;
1236 rtex
->tc_compatible_htile
= rtex
->surface
.htile_size
!= 0 &&
1237 (rtex
->surface
.flags
&
1238 RADEON_SURF_TC_COMPATIBLE_HTILE
);
1240 /* TC-compatible HTILE:
1241 * - VI only supports Z32_FLOAT.
1242 * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
1243 if (rtex
->tc_compatible_htile
) {
1244 if (sscreen
->info
.chip_class
>= GFX9
&&
1245 base
->format
== PIPE_FORMAT_Z16_UNORM
)
1246 rtex
->db_render_format
= base
->format
;
1248 rtex
->db_render_format
= PIPE_FORMAT_Z32_FLOAT
;
1249 rtex
->upgraded_depth
= base
->format
!= PIPE_FORMAT_Z32_FLOAT
&&
1250 base
->format
!= PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
;
1253 rtex
->db_render_format
= base
->format
;
1256 /* Applies to GCN. */
1257 rtex
->last_msaa_resolve_target_micro_mode
= rtex
->surface
.micro_tile_mode
;
1259 /* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
1260 * between frames, so the only thing that can enable separate DCC
1261 * with DRI2 is multiple slow clears within a frame.
1263 rtex
->ps_draw_ratio
= 0;
1265 if (rtex
->is_depth
) {
1266 if (sscreen
->info
.chip_class
>= GFX9
) {
1267 rtex
->can_sample_z
= true;
1268 rtex
->can_sample_s
= true;
1270 rtex
->can_sample_z
= !rtex
->surface
.u
.legacy
.depth_adjusted
;
1271 rtex
->can_sample_s
= !rtex
->surface
.u
.legacy
.stencil_adjusted
;
1274 if (!(base
->flags
& (SI_RESOURCE_FLAG_TRANSFER
|
1275 SI_RESOURCE_FLAG_FLUSHED_DEPTH
))) {
1276 rtex
->db_compatible
= true;
1278 if (!(sscreen
->debug_flags
& DBG(NO_HYPERZ
)))
1279 si_texture_allocate_htile(sscreen
, rtex
);
1282 if (base
->nr_samples
> 1 &&
1284 !(sscreen
->debug_flags
& DBG(NO_FMASK
))) {
1285 si_texture_allocate_fmask(sscreen
, rtex
);
1286 si_texture_allocate_cmask(sscreen
, rtex
);
1287 rtex
->cmask_buffer
= &rtex
->resource
;
1289 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
1295 /* Shared textures must always set up DCC here.
1296 * If it's not present, it will be disabled by
1297 * apply_opaque_metadata later.
1299 if (rtex
->surface
.dcc_size
&&
1300 (buf
|| !(sscreen
->debug_flags
& DBG(NO_DCC
))) &&
1301 !(rtex
->surface
.flags
& RADEON_SURF_SCANOUT
)) {
1302 /* Reserve space for the DCC buffer. */
1303 rtex
->dcc_offset
= align64(rtex
->size
, rtex
->surface
.dcc_alignment
);
1304 rtex
->size
= rtex
->dcc_offset
+ rtex
->surface
.dcc_size
;
1308 /* Now create the backing buffer. */
1310 si_init_resource_fields(sscreen
, resource
, rtex
->size
,
1311 rtex
->surface
.surf_alignment
);
1313 if (!si_alloc_resource(sscreen
, resource
)) {
1318 resource
->buf
= buf
;
1319 resource
->gpu_address
= sscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
1320 resource
->bo_size
= buf
->size
;
1321 resource
->bo_alignment
= buf
->alignment
;
1322 resource
->domains
= sscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
1323 if (resource
->domains
& RADEON_DOMAIN_VRAM
)
1324 resource
->vram_usage
= buf
->size
;
1325 else if (resource
->domains
& RADEON_DOMAIN_GTT
)
1326 resource
->gart_usage
= buf
->size
;
1329 if (rtex
->cmask
.size
) {
1330 /* Initialize the cmask to 0xCC (= compressed state). */
1331 si_screen_clear_buffer(sscreen
, &rtex
->cmask_buffer
->b
.b
,
1332 rtex
->cmask
.offset
, rtex
->cmask
.size
,
1335 if (rtex
->htile_offset
) {
1336 uint32_t clear_value
= 0;
1338 if (sscreen
->info
.chip_class
>= GFX9
|| rtex
->tc_compatible_htile
)
1339 clear_value
= 0x0000030F;
1341 si_screen_clear_buffer(sscreen
, &rtex
->resource
.b
.b
,
1343 rtex
->surface
.htile_size
,
1347 /* Initialize DCC only if the texture is not being imported. */
1348 if (!buf
&& rtex
->dcc_offset
) {
1349 si_screen_clear_buffer(sscreen
, &rtex
->resource
.b
.b
,
1351 rtex
->surface
.dcc_size
,
1355 /* Initialize the CMASK base register value. */
1356 rtex
->cmask
.base_address_reg
=
1357 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1359 if (sscreen
->debug_flags
& DBG(VM
)) {
1360 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1361 rtex
->resource
.gpu_address
,
1362 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
1363 base
->width0
, base
->height0
, util_num_layers(base
, 0), base
->last_level
+1,
1364 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
1367 if (sscreen
->debug_flags
& DBG(TEX
)) {
1369 struct u_log_context log
;
1370 u_log_context_init(&log
);
1371 si_print_texture_info(sscreen
, rtex
, &log
);
1372 u_log_new_page_print(&log
, stdout
);
1374 u_log_context_destroy(&log
);
1380 static enum radeon_surf_mode
1381 si_choose_tiling(struct si_screen
*sscreen
,
1382 const struct pipe_resource
*templ
, bool tc_compatible_htile
)
1384 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1385 bool force_tiling
= templ
->flags
& SI_RESOURCE_FLAG_FORCE_TILING
;
1386 bool is_depth_stencil
= util_format_is_depth_or_stencil(templ
->format
) &&
1387 !(templ
->flags
& SI_RESOURCE_FLAG_FLUSHED_DEPTH
);
1389 /* MSAA resources must be 2D tiled. */
1390 if (templ
->nr_samples
> 1)
1391 return RADEON_SURF_MODE_2D
;
1393 /* Transfer resources should be linear. */
1394 if (templ
->flags
& SI_RESOURCE_FLAG_TRANSFER
)
1395 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1397 /* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
1398 * which requires 2D tiling.
1400 if (sscreen
->info
.chip_class
== VI
&& tc_compatible_htile
)
1401 return RADEON_SURF_MODE_2D
;
1403 /* Handle common candidates for the linear mode.
1404 * Compressed textures and DB surfaces must always be tiled.
1406 if (!force_tiling
&&
1407 !is_depth_stencil
&&
1408 !util_format_is_compressed(templ
->format
)) {
1409 if (sscreen
->debug_flags
& DBG(NO_TILING
))
1410 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1412 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats. */
1413 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1414 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1416 /* Cursors are linear on SI.
1417 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1418 if (templ
->bind
& PIPE_BIND_CURSOR
)
1419 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1421 if (templ
->bind
& PIPE_BIND_LINEAR
)
1422 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1424 /* Textures with a very small height are recommended to be linear. */
1425 if (templ
->target
== PIPE_TEXTURE_1D
||
1426 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
1427 /* Only very thin and long 2D textures should benefit from
1428 * linear_aligned. */
1429 (templ
->width0
> 8 && templ
->height0
<= 2))
1430 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1432 /* Textures likely to be mapped often. */
1433 if (templ
->usage
== PIPE_USAGE_STAGING
||
1434 templ
->usage
== PIPE_USAGE_STREAM
)
1435 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1438 /* Make small textures 1D tiled. */
1439 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1440 (sscreen
->debug_flags
& DBG(NO_2D_TILING
)))
1441 return RADEON_SURF_MODE_1D
;
1443 /* The allocator will switch to 1D if needed. */
1444 return RADEON_SURF_MODE_2D
;
1447 struct pipe_resource
*si_texture_create(struct pipe_screen
*screen
,
1448 const struct pipe_resource
*templ
)
1450 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1451 struct radeon_surf surface
= {0};
1452 bool is_flushed_depth
= templ
->flags
& SI_RESOURCE_FLAG_FLUSHED_DEPTH
;
1453 bool tc_compatible_htile
=
1454 sscreen
->info
.chip_class
>= VI
&&
1455 /* There are issues with TC-compatible HTILE on Tonga (and
1456 * Iceland is the same design), and documented bug workarounds
1457 * don't help. For example, this fails:
1458 * piglit/bin/tex-miplevel-selection 'texture()' 2DShadow -auto
1460 sscreen
->info
.family
!= CHIP_TONGA
&&
1461 sscreen
->info
.family
!= CHIP_ICELAND
&&
1462 (templ
->flags
& PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY
) &&
1463 !(sscreen
->debug_flags
& DBG(NO_HYPERZ
)) &&
1464 !is_flushed_depth
&&
1465 templ
->nr_samples
<= 1 && /* TC-compat HTILE is less efficient with MSAA */
1466 util_format_is_depth_or_stencil(templ
->format
);
1470 r
= si_init_surface(sscreen
, &surface
, templ
,
1471 si_choose_tiling(sscreen
, templ
, tc_compatible_htile
),
1472 0, 0, false, false, is_flushed_depth
,
1473 tc_compatible_htile
);
1478 return (struct pipe_resource
*)
1479 si_texture_create_object(screen
, templ
, NULL
, &surface
);
1482 static struct pipe_resource
*si_texture_from_handle(struct pipe_screen
*screen
,
1483 const struct pipe_resource
*templ
,
1484 struct winsys_handle
*whandle
,
1487 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1488 struct pb_buffer
*buf
= NULL
;
1489 unsigned stride
= 0, offset
= 0;
1490 enum radeon_surf_mode array_mode
;
1491 struct radeon_surf surface
= {};
1493 struct radeon_bo_metadata metadata
= {};
1494 struct r600_texture
*rtex
;
1497 /* Support only 2D textures without mipmaps */
1498 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1499 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1502 buf
= sscreen
->ws
->buffer_from_handle(sscreen
->ws
, whandle
, &stride
, &offset
);
1506 sscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1507 si_surface_import_metadata(sscreen
, &surface
, &metadata
,
1508 &array_mode
, &is_scanout
);
1510 r
= si_init_surface(sscreen
, &surface
, templ
, array_mode
, stride
,
1511 offset
, true, is_scanout
, false, false);
1516 rtex
= si_texture_create_object(screen
, templ
, buf
, &surface
);
1520 rtex
->resource
.b
.is_shared
= true;
1521 rtex
->resource
.external_usage
= usage
;
1523 si_apply_opaque_metadata(sscreen
, rtex
, &metadata
);
1525 assert(rtex
->surface
.tile_swizzle
== 0);
1526 return &rtex
->resource
.b
.b
;
1529 bool si_init_flushed_depth_texture(struct pipe_context
*ctx
,
1530 struct pipe_resource
*texture
,
1531 struct r600_texture
**staging
)
1533 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1534 struct pipe_resource resource
;
1535 struct r600_texture
**flushed_depth_texture
= staging
?
1536 staging
: &rtex
->flushed_depth_texture
;
1537 enum pipe_format pipe_format
= texture
->format
;
1540 if (rtex
->flushed_depth_texture
)
1541 return true; /* it's ready */
1543 if (!rtex
->can_sample_z
&& rtex
->can_sample_s
) {
1544 switch (pipe_format
) {
1545 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
1546 /* Save memory by not allocating the S plane. */
1547 pipe_format
= PIPE_FORMAT_Z32_FLOAT
;
1549 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1550 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
1551 /* Save memory bandwidth by not copying the
1552 * stencil part during flush.
1554 * This potentially increases memory bandwidth
1555 * if an application uses both Z and S texturing
1556 * simultaneously (a flushed Z24S8 texture
1557 * would be stored compactly), but how often
1558 * does that really happen?
1560 pipe_format
= PIPE_FORMAT_Z24X8_UNORM
;
1564 } else if (!rtex
->can_sample_s
&& rtex
->can_sample_z
) {
1565 assert(util_format_has_stencil(util_format_description(pipe_format
)));
1567 /* DB->CB copies to an 8bpp surface don't work. */
1568 pipe_format
= PIPE_FORMAT_X24S8_UINT
;
1572 memset(&resource
, 0, sizeof(resource
));
1573 resource
.target
= texture
->target
;
1574 resource
.format
= pipe_format
;
1575 resource
.width0
= texture
->width0
;
1576 resource
.height0
= texture
->height0
;
1577 resource
.depth0
= texture
->depth0
;
1578 resource
.array_size
= texture
->array_size
;
1579 resource
.last_level
= texture
->last_level
;
1580 resource
.nr_samples
= texture
->nr_samples
;
1581 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1582 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1583 resource
.flags
= texture
->flags
| SI_RESOURCE_FLAG_FLUSHED_DEPTH
;
1586 resource
.flags
|= SI_RESOURCE_FLAG_TRANSFER
;
1588 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1589 if (*flushed_depth_texture
== NULL
) {
1590 PRINT_ERR("failed to create temporary texture to hold flushed depth\n");
1597 * Initialize the pipe_resource descriptor to be of the same size as the box,
1598 * which is supposed to hold a subregion of the texture "orig" at the given
1601 static void si_init_temp_resource_from_box(struct pipe_resource
*res
,
1602 struct pipe_resource
*orig
,
1603 const struct pipe_box
*box
,
1604 unsigned level
, unsigned flags
)
1606 memset(res
, 0, sizeof(*res
));
1607 res
->format
= orig
->format
;
1608 res
->width0
= box
->width
;
1609 res
->height0
= box
->height
;
1611 res
->array_size
= 1;
1612 res
->usage
= flags
& SI_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1615 /* We must set the correct texture target and dimensions for a 3D box. */
1616 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1617 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1618 res
->array_size
= box
->depth
;
1620 res
->target
= PIPE_TEXTURE_2D
;
1624 static bool si_can_invalidate_texture(struct si_screen
*sscreen
,
1625 struct r600_texture
*rtex
,
1626 unsigned transfer_usage
,
1627 const struct pipe_box
*box
)
1629 return !rtex
->resource
.b
.is_shared
&&
1630 !(transfer_usage
& PIPE_TRANSFER_READ
) &&
1631 rtex
->resource
.b
.b
.last_level
== 0 &&
1632 util_texrange_covers_whole_level(&rtex
->resource
.b
.b
, 0,
1633 box
->x
, box
->y
, box
->z
,
1634 box
->width
, box
->height
,
1638 static void si_texture_invalidate_storage(struct si_context
*sctx
,
1639 struct r600_texture
*rtex
)
1641 struct si_screen
*sscreen
= sctx
->screen
;
1643 /* There is no point in discarding depth and tiled buffers. */
1644 assert(!rtex
->is_depth
);
1645 assert(rtex
->surface
.is_linear
);
1647 /* Reallocate the buffer in the same pipe_resource. */
1648 si_alloc_resource(sscreen
, &rtex
->resource
);
1650 /* Initialize the CMASK base address (needed even without CMASK). */
1651 rtex
->cmask
.base_address_reg
=
1652 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1654 p_atomic_inc(&sscreen
->dirty_tex_counter
);
1656 sctx
->num_alloc_tex_transfer_bytes
+= rtex
->size
;
1659 static void *si_texture_transfer_map(struct pipe_context
*ctx
,
1660 struct pipe_resource
*texture
,
1663 const struct pipe_box
*box
,
1664 struct pipe_transfer
**ptransfer
)
1666 struct si_context
*sctx
= (struct si_context
*)ctx
;
1667 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1668 struct r600_transfer
*trans
;
1669 struct r600_resource
*buf
;
1670 unsigned offset
= 0;
1672 bool use_staging_texture
= false;
1674 assert(!(texture
->flags
& SI_RESOURCE_FLAG_TRANSFER
));
1675 assert(box
->width
&& box
->height
&& box
->depth
);
1677 /* Depth textures use staging unconditionally. */
1678 if (!rtex
->is_depth
) {
1679 /* Degrade the tile mode if we get too many transfers on APUs.
1680 * On dGPUs, the staging texture is always faster.
1681 * Only count uploads that are at least 4x4 pixels large.
1683 if (!sctx
->screen
->info
.has_dedicated_vram
&&
1685 box
->width
>= 4 && box
->height
>= 4 &&
1686 p_atomic_inc_return(&rtex
->num_level0_transfers
) == 10) {
1687 bool can_invalidate
=
1688 si_can_invalidate_texture(sctx
->screen
, rtex
,
1691 si_reallocate_texture_inplace(sctx
, rtex
,
1696 /* Tiled textures need to be converted into a linear texture for CPU
1697 * access. The staging texture is always linear and is placed in GART.
1699 * Reading from VRAM or GTT WC is slow, always use the staging
1700 * texture in this case.
1702 * Use the staging texture for uploads if the underlying BO
1705 if (!rtex
->surface
.is_linear
)
1706 use_staging_texture
= true;
1707 else if (usage
& PIPE_TRANSFER_READ
)
1708 use_staging_texture
=
1709 rtex
->resource
.domains
& RADEON_DOMAIN_VRAM
||
1710 rtex
->resource
.flags
& RADEON_FLAG_GTT_WC
;
1711 /* Write & linear only: */
1712 else if (si_rings_is_buffer_referenced(sctx
, rtex
->resource
.buf
,
1713 RADEON_USAGE_READWRITE
) ||
1714 !sctx
->ws
->buffer_wait(rtex
->resource
.buf
, 0,
1715 RADEON_USAGE_READWRITE
)) {
1717 if (si_can_invalidate_texture(sctx
->screen
, rtex
,
1719 si_texture_invalidate_storage(sctx
, rtex
);
1721 use_staging_texture
= true;
1725 trans
= CALLOC_STRUCT(r600_transfer
);
1728 pipe_resource_reference(&trans
->b
.b
.resource
, texture
);
1729 trans
->b
.b
.level
= level
;
1730 trans
->b
.b
.usage
= usage
;
1731 trans
->b
.b
.box
= *box
;
1733 if (rtex
->is_depth
) {
1734 struct r600_texture
*staging_depth
;
1736 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1737 /* MSAA depth buffers need to be converted to single sample buffers.
1739 * Mapping MSAA depth buffers can occur if ReadPixels is called
1740 * with a multisample GLX visual.
1742 * First downsample the depth buffer to a temporary texture,
1743 * then decompress the temporary one to staging.
1745 * Only the region being mapped is transfered.
1747 struct pipe_resource resource
;
1749 si_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1751 if (!si_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1752 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1756 if (usage
& PIPE_TRANSFER_READ
) {
1757 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1759 PRINT_ERR("failed to create a temporary depth texture\n");
1763 si_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1764 si_blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
1765 0, 0, 0, box
->depth
, 0, 0);
1766 pipe_resource_reference(&temp
, NULL
);
1769 /* Just get the strides. */
1770 si_texture_get_offset(sctx
->screen
, staging_depth
, level
, NULL
,
1772 &trans
->b
.b
.layer_stride
);
1774 /* XXX: only readback the rectangle which is being mapped? */
1775 /* XXX: when discard is true, no need to read back from depth texture */
1776 if (!si_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1777 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1781 si_blit_decompress_depth(ctx
, rtex
, staging_depth
,
1783 box
->z
, box
->z
+ box
->depth
- 1,
1786 offset
= si_texture_get_offset(sctx
->screen
, staging_depth
,
1789 &trans
->b
.b
.layer_stride
);
1792 trans
->staging
= &staging_depth
->resource
;
1793 buf
= trans
->staging
;
1794 } else if (use_staging_texture
) {
1795 struct pipe_resource resource
;
1796 struct r600_texture
*staging
;
1798 si_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1799 SI_RESOURCE_FLAG_TRANSFER
);
1800 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1801 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1803 /* Create the temporary texture. */
1804 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1806 PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
1809 trans
->staging
= &staging
->resource
;
1811 /* Just get the strides. */
1812 si_texture_get_offset(sctx
->screen
, staging
, 0, NULL
,
1814 &trans
->b
.b
.layer_stride
);
1816 if (usage
& PIPE_TRANSFER_READ
)
1817 si_copy_to_staging_texture(ctx
, trans
);
1819 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1821 buf
= trans
->staging
;
1823 /* the resource is mapped directly */
1824 offset
= si_texture_get_offset(sctx
->screen
, rtex
, level
, box
,
1826 &trans
->b
.b
.layer_stride
);
1827 buf
= &rtex
->resource
;
1830 if (!(map
= si_buffer_map_sync_with_rings(sctx
, buf
, usage
)))
1833 *ptransfer
= &trans
->b
.b
;
1834 return map
+ offset
;
1837 r600_resource_reference(&trans
->staging
, NULL
);
1838 pipe_resource_reference(&trans
->b
.b
.resource
, NULL
);
1843 static void si_texture_transfer_unmap(struct pipe_context
*ctx
,
1844 struct pipe_transfer
* transfer
)
1846 struct si_context
*sctx
= (struct si_context
*)ctx
;
1847 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1848 struct pipe_resource
*texture
= transfer
->resource
;
1849 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1851 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1852 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1853 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1854 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1855 &rtransfer
->staging
->b
.b
, transfer
->level
,
1858 si_copy_from_staging_texture(ctx
, rtransfer
);
1862 if (rtransfer
->staging
) {
1863 sctx
->num_alloc_tex_transfer_bytes
+= rtransfer
->staging
->buf
->size
;
1864 r600_resource_reference(&rtransfer
->staging
, NULL
);
1867 /* Heuristic for {upload, draw, upload, draw, ..}:
1869 * Flush the gfx IB if we've allocated too much texture storage.
1871 * The idea is that we don't want to build IBs that use too much
1872 * memory and put pressure on the kernel memory manager and we also
1873 * want to make temporary and invalidated buffers go idle ASAP to
1874 * decrease the total memory usage or make them reusable. The memory
1875 * usage will be slightly higher than given here because of the buffer
1876 * cache in the winsys.
1878 * The result is that the kernel memory manager is never a bottleneck.
1880 if (sctx
->num_alloc_tex_transfer_bytes
> sctx
->screen
->info
.gart_size
/ 4) {
1881 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
1882 sctx
->num_alloc_tex_transfer_bytes
= 0;
1885 pipe_resource_reference(&transfer
->resource
, NULL
);
1889 static const struct u_resource_vtbl si_texture_vtbl
=
1891 NULL
, /* get_handle */
1892 si_texture_destroy
, /* resource_destroy */
1893 si_texture_transfer_map
, /* transfer_map */
1894 u_default_transfer_flush_region
, /* transfer_flush_region */
1895 si_texture_transfer_unmap
, /* transfer_unmap */
1898 /* Return if it's allowed to reinterpret one format as another with DCC enabled.
1900 bool vi_dcc_formats_compatible(enum pipe_format format1
,
1901 enum pipe_format format2
)
1903 const struct util_format_description
*desc1
, *desc2
;
1905 /* No format change - exit early. */
1906 if (format1
== format2
)
1909 format1
= si_simplify_cb_format(format1
);
1910 format2
= si_simplify_cb_format(format2
);
1912 /* Check again after format adjustments. */
1913 if (format1
== format2
)
1916 desc1
= util_format_description(format1
);
1917 desc2
= util_format_description(format2
);
1919 if (desc1
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
1920 desc2
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1923 /* Float and non-float are totally incompatible. */
1924 if ((desc1
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
) !=
1925 (desc2
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
))
1928 /* Channel sizes must match across DCC formats.
1929 * Comparing just the first 2 channels should be enough.
1931 if (desc1
->channel
[0].size
!= desc2
->channel
[0].size
||
1932 (desc1
->nr_channels
>= 2 &&
1933 desc1
->channel
[1].size
!= desc2
->channel
[1].size
))
1936 /* Everything below is not needed if the driver never uses the DCC
1937 * clear code with the value of 1.
1940 /* If the clear values are all 1 or all 0, this constraint can be
1942 if (vi_alpha_is_on_msb(format1
) != vi_alpha_is_on_msb(format2
))
1945 /* Channel types must match if the clear value of 1 is used.
1946 * The type categories are only float, signed, unsigned.
1947 * NORM and INT are always compatible.
1949 if (desc1
->channel
[0].type
!= desc2
->channel
[0].type
||
1950 (desc1
->nr_channels
>= 2 &&
1951 desc1
->channel
[1].type
!= desc2
->channel
[1].type
))
1957 bool vi_dcc_formats_are_incompatible(struct pipe_resource
*tex
,
1959 enum pipe_format view_format
)
1961 struct r600_texture
*rtex
= (struct r600_texture
*)tex
;
1963 return vi_dcc_enabled(rtex
, level
) &&
1964 !vi_dcc_formats_compatible(tex
->format
, view_format
);
1967 /* This can't be merged with the above function, because
1968 * vi_dcc_formats_compatible should be called only when DCC is enabled. */
1969 void vi_disable_dcc_if_incompatible_format(struct si_context
*sctx
,
1970 struct pipe_resource
*tex
,
1972 enum pipe_format view_format
)
1974 struct r600_texture
*rtex
= (struct r600_texture
*)tex
;
1976 if (vi_dcc_formats_are_incompatible(tex
, level
, view_format
))
1977 if (!si_texture_disable_dcc(sctx
, (struct r600_texture
*)tex
))
1978 si_decompress_dcc(sctx
, rtex
);
1981 struct pipe_surface
*si_create_surface_custom(struct pipe_context
*pipe
,
1982 struct pipe_resource
*texture
,
1983 const struct pipe_surface
*templ
,
1984 unsigned width0
, unsigned height0
,
1985 unsigned width
, unsigned height
)
1987 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1992 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1993 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1995 pipe_reference_init(&surface
->base
.reference
, 1);
1996 pipe_resource_reference(&surface
->base
.texture
, texture
);
1997 surface
->base
.context
= pipe
;
1998 surface
->base
.format
= templ
->format
;
1999 surface
->base
.width
= width
;
2000 surface
->base
.height
= height
;
2001 surface
->base
.u
= templ
->u
;
2003 surface
->width0
= width0
;
2004 surface
->height0
= height0
;
2006 surface
->dcc_incompatible
=
2007 texture
->target
!= PIPE_BUFFER
&&
2008 vi_dcc_formats_are_incompatible(texture
, templ
->u
.tex
.level
,
2010 return &surface
->base
;
2013 static struct pipe_surface
*si_create_surface(struct pipe_context
*pipe
,
2014 struct pipe_resource
*tex
,
2015 const struct pipe_surface
*templ
)
2017 unsigned level
= templ
->u
.tex
.level
;
2018 unsigned width
= u_minify(tex
->width0
, level
);
2019 unsigned height
= u_minify(tex
->height0
, level
);
2020 unsigned width0
= tex
->width0
;
2021 unsigned height0
= tex
->height0
;
2023 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
2024 const struct util_format_description
*tex_desc
2025 = util_format_description(tex
->format
);
2026 const struct util_format_description
*templ_desc
2027 = util_format_description(templ
->format
);
2029 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
2031 /* Adjust size of surface if and only if the block width or
2032 * height is changed. */
2033 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
2034 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
2035 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
2036 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
2038 width
= nblks_x
* templ_desc
->block
.width
;
2039 height
= nblks_y
* templ_desc
->block
.height
;
2041 width0
= util_format_get_nblocksx(tex
->format
, width0
);
2042 height0
= util_format_get_nblocksy(tex
->format
, height0
);
2046 return si_create_surface_custom(pipe
, tex
, templ
,
2051 static void si_surface_destroy(struct pipe_context
*pipe
,
2052 struct pipe_surface
*surface
)
2054 pipe_resource_reference(&surface
->texture
, NULL
);
2058 unsigned si_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
2060 const struct util_format_description
*desc
= util_format_description(format
);
2062 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
2064 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
2065 return V_028C70_SWAP_STD
;
2067 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
2070 switch (desc
->nr_channels
) {
2072 if (HAS_SWIZZLE(0,X
))
2073 return V_028C70_SWAP_STD
; /* X___ */
2074 else if (HAS_SWIZZLE(3,X
))
2075 return V_028C70_SWAP_ALT_REV
; /* ___X */
2078 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
2079 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
2080 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
2081 return V_028C70_SWAP_STD
; /* XY__ */
2082 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
2083 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
2084 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
2086 return (do_endian_swap
? V_028C70_SWAP_STD
: V_028C70_SWAP_STD_REV
);
2087 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
2088 return V_028C70_SWAP_ALT
; /* X__Y */
2089 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
2090 return V_028C70_SWAP_ALT_REV
; /* Y__X */
2093 if (HAS_SWIZZLE(0,X
))
2094 return (do_endian_swap
? V_028C70_SWAP_STD_REV
: V_028C70_SWAP_STD
);
2095 else if (HAS_SWIZZLE(0,Z
))
2096 return V_028C70_SWAP_STD_REV
; /* ZYX */
2099 /* check the middle channels, the 1st and 4th channel can be NONE */
2100 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
2101 return V_028C70_SWAP_STD
; /* XYZW */
2102 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
2103 return V_028C70_SWAP_STD_REV
; /* WZYX */
2104 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
2105 return V_028C70_SWAP_ALT
; /* ZYXW */
2106 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
2109 return V_028C70_SWAP_ALT_REV
;
2111 return (do_endian_swap
? V_028C70_SWAP_ALT
: V_028C70_SWAP_ALT_REV
);
2118 /* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
2120 static void vi_dcc_clean_up_context_slot(struct si_context
*sctx
,
2125 if (sctx
->dcc_stats
[slot
].query_active
)
2126 vi_separate_dcc_stop_query(sctx
,
2127 sctx
->dcc_stats
[slot
].tex
);
2129 for (i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
[slot
].ps_stats
); i
++)
2130 if (sctx
->dcc_stats
[slot
].ps_stats
[i
]) {
2131 sctx
->b
.destroy_query(&sctx
->b
,
2132 sctx
->dcc_stats
[slot
].ps_stats
[i
]);
2133 sctx
->dcc_stats
[slot
].ps_stats
[i
] = NULL
;
2136 r600_texture_reference(&sctx
->dcc_stats
[slot
].tex
, NULL
);
2140 * Return the per-context slot where DCC statistics queries for the texture live.
2142 static unsigned vi_get_context_dcc_stats_index(struct si_context
*sctx
,
2143 struct r600_texture
*tex
)
2145 int i
, empty_slot
= -1;
2147 /* Remove zombie textures (textures kept alive by this array only). */
2148 for (i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++)
2149 if (sctx
->dcc_stats
[i
].tex
&&
2150 sctx
->dcc_stats
[i
].tex
->resource
.b
.b
.reference
.count
== 1)
2151 vi_dcc_clean_up_context_slot(sctx
, i
);
2153 /* Find the texture. */
2154 for (i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++) {
2155 /* Return if found. */
2156 if (sctx
->dcc_stats
[i
].tex
== tex
) {
2157 sctx
->dcc_stats
[i
].last_use_timestamp
= os_time_get();
2161 /* Record the first seen empty slot. */
2162 if (empty_slot
== -1 && !sctx
->dcc_stats
[i
].tex
)
2166 /* Not found. Remove the oldest member to make space in the array. */
2167 if (empty_slot
== -1) {
2168 int oldest_slot
= 0;
2170 /* Find the oldest slot. */
2171 for (i
= 1; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++)
2172 if (sctx
->dcc_stats
[oldest_slot
].last_use_timestamp
>
2173 sctx
->dcc_stats
[i
].last_use_timestamp
)
2176 /* Clean up the oldest slot. */
2177 vi_dcc_clean_up_context_slot(sctx
, oldest_slot
);
2178 empty_slot
= oldest_slot
;
2181 /* Add the texture to the new slot. */
2182 r600_texture_reference(&sctx
->dcc_stats
[empty_slot
].tex
, tex
);
2183 sctx
->dcc_stats
[empty_slot
].last_use_timestamp
= os_time_get();
2187 static struct pipe_query
*
2188 vi_create_resuming_pipestats_query(struct si_context
*sctx
)
2190 struct si_query_hw
*query
= (struct si_query_hw
*)
2191 sctx
->b
.create_query(&sctx
->b
, PIPE_QUERY_PIPELINE_STATISTICS
, 0);
2193 query
->flags
|= SI_QUERY_HW_FLAG_BEGIN_RESUMES
;
2194 return (struct pipe_query
*)query
;
2198 * Called when binding a color buffer.
2200 void vi_separate_dcc_start_query(struct si_context
*sctx
,
2201 struct r600_texture
*tex
)
2203 unsigned i
= vi_get_context_dcc_stats_index(sctx
, tex
);
2205 assert(!sctx
->dcc_stats
[i
].query_active
);
2207 if (!sctx
->dcc_stats
[i
].ps_stats
[0])
2208 sctx
->dcc_stats
[i
].ps_stats
[0] = vi_create_resuming_pipestats_query(sctx
);
2210 /* begin or resume the query */
2211 sctx
->b
.begin_query(&sctx
->b
, sctx
->dcc_stats
[i
].ps_stats
[0]);
2212 sctx
->dcc_stats
[i
].query_active
= true;
2216 * Called when unbinding a color buffer.
2218 void vi_separate_dcc_stop_query(struct si_context
*sctx
,
2219 struct r600_texture
*tex
)
2221 unsigned i
= vi_get_context_dcc_stats_index(sctx
, tex
);
2223 assert(sctx
->dcc_stats
[i
].query_active
);
2224 assert(sctx
->dcc_stats
[i
].ps_stats
[0]);
2226 /* pause or end the query */
2227 sctx
->b
.end_query(&sctx
->b
, sctx
->dcc_stats
[i
].ps_stats
[0]);
2228 sctx
->dcc_stats
[i
].query_active
= false;
2231 static bool vi_should_enable_separate_dcc(struct r600_texture
*tex
)
2233 /* The minimum number of fullscreen draws per frame that is required
2235 return tex
->ps_draw_ratio
+ tex
->num_slow_clears
>= 5;
2238 /* Called by fast clear. */
2239 void vi_separate_dcc_try_enable(struct si_context
*sctx
,
2240 struct r600_texture
*tex
)
2242 /* The intent is to use this with shared displayable back buffers,
2243 * but it's not strictly limited only to them.
2245 if (!tex
->resource
.b
.is_shared
||
2246 !(tex
->resource
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) ||
2247 tex
->resource
.b
.b
.target
!= PIPE_TEXTURE_2D
||
2248 tex
->resource
.b
.b
.last_level
> 0 ||
2249 !tex
->surface
.dcc_size
)
2252 if (tex
->dcc_offset
)
2253 return; /* already enabled */
2255 /* Enable the DCC stat gathering. */
2256 if (!tex
->dcc_gather_statistics
) {
2257 tex
->dcc_gather_statistics
= true;
2258 vi_separate_dcc_start_query(sctx
, tex
);
2261 if (!vi_should_enable_separate_dcc(tex
))
2262 return; /* stats show that DCC decompression is too expensive */
2264 assert(tex
->surface
.num_dcc_levels
);
2265 assert(!tex
->dcc_separate_buffer
);
2267 si_texture_discard_cmask(sctx
->screen
, tex
);
2269 /* Get a DCC buffer. */
2270 if (tex
->last_dcc_separate_buffer
) {
2271 assert(tex
->dcc_gather_statistics
);
2272 assert(!tex
->dcc_separate_buffer
);
2273 tex
->dcc_separate_buffer
= tex
->last_dcc_separate_buffer
;
2274 tex
->last_dcc_separate_buffer
= NULL
;
2276 tex
->dcc_separate_buffer
=
2277 si_aligned_buffer_create(sctx
->b
.screen
,
2278 SI_RESOURCE_FLAG_UNMAPPABLE
,
2280 tex
->surface
.dcc_size
,
2281 tex
->surface
.dcc_alignment
);
2282 if (!tex
->dcc_separate_buffer
)
2286 /* dcc_offset is the absolute GPUVM address. */
2287 tex
->dcc_offset
= tex
->dcc_separate_buffer
->gpu_address
;
2289 /* no need to flag anything since this is called by fast clear that
2290 * flags framebuffer state
2295 * Called by pipe_context::flush_resource, the place where DCC decompression
2298 void vi_separate_dcc_process_and_reset_stats(struct pipe_context
*ctx
,
2299 struct r600_texture
*tex
)
2301 struct si_context
*sctx
= (struct si_context
*)ctx
;
2302 struct pipe_query
*tmp
;
2303 unsigned i
= vi_get_context_dcc_stats_index(sctx
, tex
);
2304 bool query_active
= sctx
->dcc_stats
[i
].query_active
;
2305 bool disable
= false;
2307 if (sctx
->dcc_stats
[i
].ps_stats
[2]) {
2308 union pipe_query_result result
;
2310 /* Read the results. */
2311 ctx
->get_query_result(ctx
, sctx
->dcc_stats
[i
].ps_stats
[2],
2313 si_query_hw_reset_buffers(sctx
,
2314 (struct si_query_hw
*)
2315 sctx
->dcc_stats
[i
].ps_stats
[2]);
2317 /* Compute the approximate number of fullscreen draws. */
2318 tex
->ps_draw_ratio
=
2319 result
.pipeline_statistics
.ps_invocations
/
2320 (tex
->resource
.b
.b
.width0
* tex
->resource
.b
.b
.height0
);
2321 sctx
->last_tex_ps_draw_ratio
= tex
->ps_draw_ratio
;
2323 disable
= tex
->dcc_separate_buffer
&&
2324 !vi_should_enable_separate_dcc(tex
);
2327 tex
->num_slow_clears
= 0;
2329 /* stop the statistics query for ps_stats[0] */
2331 vi_separate_dcc_stop_query(sctx
, tex
);
2333 /* Move the queries in the queue by one. */
2334 tmp
= sctx
->dcc_stats
[i
].ps_stats
[2];
2335 sctx
->dcc_stats
[i
].ps_stats
[2] = sctx
->dcc_stats
[i
].ps_stats
[1];
2336 sctx
->dcc_stats
[i
].ps_stats
[1] = sctx
->dcc_stats
[i
].ps_stats
[0];
2337 sctx
->dcc_stats
[i
].ps_stats
[0] = tmp
;
2339 /* create and start a new query as ps_stats[0] */
2341 vi_separate_dcc_start_query(sctx
, tex
);
2344 assert(!tex
->last_dcc_separate_buffer
);
2345 tex
->last_dcc_separate_buffer
= tex
->dcc_separate_buffer
;
2346 tex
->dcc_separate_buffer
= NULL
;
2347 tex
->dcc_offset
= 0;
2348 /* no need to flag anything since this is called after
2349 * decompression that re-sets framebuffer state
2354 static struct pipe_memory_object
*
2355 si_memobj_from_handle(struct pipe_screen
*screen
,
2356 struct winsys_handle
*whandle
,
2359 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
2360 struct r600_memory_object
*memobj
= CALLOC_STRUCT(r600_memory_object
);
2361 struct pb_buffer
*buf
= NULL
;
2362 uint32_t stride
, offset
;
2367 buf
= sscreen
->ws
->buffer_from_handle(sscreen
->ws
, whandle
,
2374 memobj
->b
.dedicated
= dedicated
;
2376 memobj
->stride
= stride
;
2377 memobj
->offset
= offset
;
2379 return (struct pipe_memory_object
*)memobj
;
2384 si_memobj_destroy(struct pipe_screen
*screen
,
2385 struct pipe_memory_object
*_memobj
)
2387 struct r600_memory_object
*memobj
= (struct r600_memory_object
*)_memobj
;
2389 pb_reference(&memobj
->buf
, NULL
);
2393 static struct pipe_resource
*
2394 si_texture_from_memobj(struct pipe_screen
*screen
,
2395 const struct pipe_resource
*templ
,
2396 struct pipe_memory_object
*_memobj
,
2400 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
2401 struct r600_memory_object
*memobj
= (struct r600_memory_object
*)_memobj
;
2402 struct r600_texture
*rtex
;
2403 struct radeon_surf surface
= {};
2404 struct radeon_bo_metadata metadata
= {};
2405 enum radeon_surf_mode array_mode
;
2407 struct pb_buffer
*buf
= NULL
;
2409 if (memobj
->b
.dedicated
) {
2410 sscreen
->ws
->buffer_get_metadata(memobj
->buf
, &metadata
);
2411 si_surface_import_metadata(sscreen
, &surface
, &metadata
,
2412 &array_mode
, &is_scanout
);
2415 * The bo metadata is unset for un-dedicated images. So we fall
2416 * back to linear. See answer to question 5 of the
2417 * VK_KHX_external_memory spec for some details.
2419 * It is possible that this case isn't going to work if the
2420 * surface pitch isn't correctly aligned by default.
2422 * In order to support it correctly we require multi-image
2423 * metadata to be syncrhonized between radv and radeonsi. The
2424 * semantics of associating multiple image metadata to a memory
2425 * object on the vulkan export side are not concretely defined
2428 * All the use cases we are aware of at the moment for memory
2429 * objects use dedicated allocations. So lets keep the initial
2430 * implementation simple.
2432 * A possible alternative is to attempt to reconstruct the
2433 * tiling information when the TexParameter TEXTURE_TILING_EXT
2436 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
2441 r
= si_init_surface(sscreen
, &surface
, templ
,
2442 array_mode
, memobj
->stride
,
2443 offset
, true, is_scanout
,
2448 rtex
= si_texture_create_object(screen
, templ
, memobj
->buf
, &surface
);
2452 /* r600_texture_create_object doesn't increment refcount of
2453 * memobj->buf, so increment it here.
2455 pb_reference(&buf
, memobj
->buf
);
2457 rtex
->resource
.b
.is_shared
= true;
2458 rtex
->resource
.external_usage
= PIPE_HANDLE_USAGE_READ_WRITE
;
2460 si_apply_opaque_metadata(sscreen
, rtex
, &metadata
);
2462 return &rtex
->resource
.b
.b
;
2465 static bool si_check_resource_capability(struct pipe_screen
*screen
,
2466 struct pipe_resource
*resource
,
2469 struct r600_texture
*tex
= (struct r600_texture
*)resource
;
2471 /* Buffers only support the linear flag. */
2472 if (resource
->target
== PIPE_BUFFER
)
2473 return (bind
& ~PIPE_BIND_LINEAR
) == 0;
2475 if (bind
& PIPE_BIND_LINEAR
&& !tex
->surface
.is_linear
)
2478 if (bind
& PIPE_BIND_SCANOUT
&& !tex
->surface
.is_displayable
)
2481 /* TODO: PIPE_BIND_CURSOR - do we care? */
2485 void si_init_screen_texture_functions(struct si_screen
*sscreen
)
2487 sscreen
->b
.resource_from_handle
= si_texture_from_handle
;
2488 sscreen
->b
.resource_get_handle
= si_texture_get_handle
;
2489 sscreen
->b
.resource_from_memobj
= si_texture_from_memobj
;
2490 sscreen
->b
.memobj_create_from_handle
= si_memobj_from_handle
;
2491 sscreen
->b
.memobj_destroy
= si_memobj_destroy
;
2492 sscreen
->b
.check_resource_capability
= si_check_resource_capability
;
2495 void si_init_context_texture_functions(struct si_context
*sctx
)
2497 sctx
->b
.create_surface
= si_create_surface
;
2498 sctx
->b
.surface_destroy
= si_surface_destroy
;