2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
35 static void r600_texture_discard_dcc(struct r600_common_screen
*rscreen
,
36 struct r600_texture
*rtex
);
37 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
38 struct r600_texture
*rtex
);
41 bool r600_prepare_for_dma_blit(struct r600_common_context
*rctx
,
42 struct r600_texture
*rdst
,
43 unsigned dst_level
, unsigned dstx
,
44 unsigned dsty
, unsigned dstz
,
45 struct r600_texture
*rsrc
,
47 const struct pipe_box
*src_box
)
52 if (util_format_get_blocksizebits(rdst
->resource
.b
.b
.format
) !=
53 util_format_get_blocksizebits(rsrc
->resource
.b
.b
.format
))
56 /* MSAA: Blits don't exist in the real world. */
57 if (rsrc
->resource
.b
.b
.nr_samples
> 1 ||
58 rdst
->resource
.b
.b
.nr_samples
> 1)
61 /* Depth-stencil surfaces:
62 * When dst is linear, the DB->CB copy preserves HTILE.
63 * When dst is tiled, the 3D path must be used to update HTILE.
65 if (rsrc
->is_depth
|| rdst
->is_depth
)
69 * src: Use the 3D path. DCC decompression is expensive.
70 * dst: If overwriting the whole texture, discard DCC and use SDMA.
71 * Otherwise, use the 3D path.
76 if (rdst
->dcc_offset
) {
77 /* We can't discard DCC if the texture has been exported. */
78 if (rdst
->resource
.is_shared
||
79 !util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
80 dstx
, dsty
, dstz
, src_box
->width
,
81 src_box
->height
, src_box
->depth
))
84 r600_texture_discard_dcc(rctx
->screen
, rdst
);
88 * src: Both texture and SDMA paths need decompression. Use SDMA.
89 * dst: If overwriting the whole texture, discard CMASK and use
90 * SDMA. Otherwise, use the 3D path.
92 if (rdst
->cmask
.size
&& rdst
->dirty_level_mask
& (1 << dst_level
)) {
93 if (!util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
94 dstx
, dsty
, dstz
, src_box
->width
,
95 src_box
->height
, src_box
->depth
))
98 r600_texture_discard_cmask(rctx
->screen
, rdst
);
101 /* All requirements are met. Prepare textures for SDMA. */
102 if (rsrc
->cmask
.size
&& rsrc
->dirty_level_mask
& (1 << src_level
))
103 rctx
->b
.flush_resource(&rctx
->b
, &rsrc
->resource
.b
.b
);
105 assert(!(rsrc
->dirty_level_mask
& (1 << src_level
)));
106 assert(!(rdst
->dirty_level_mask
& (1 << dst_level
)));
111 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
112 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
113 struct pipe_resource
*dst
,
115 unsigned dstx
, unsigned dsty
, unsigned dstz
,
116 struct pipe_resource
*src
,
118 const struct pipe_box
*src_box
)
120 struct pipe_blit_info blit
;
122 memset(&blit
, 0, sizeof(blit
));
123 blit
.src
.resource
= src
;
124 blit
.src
.format
= src
->format
;
125 blit
.src
.level
= src_level
;
126 blit
.src
.box
= *src_box
;
127 blit
.dst
.resource
= dst
;
128 blit
.dst
.format
= dst
->format
;
129 blit
.dst
.level
= dst_level
;
130 blit
.dst
.box
.x
= dstx
;
131 blit
.dst
.box
.y
= dsty
;
132 blit
.dst
.box
.z
= dstz
;
133 blit
.dst
.box
.width
= src_box
->width
;
134 blit
.dst
.box
.height
= src_box
->height
;
135 blit
.dst
.box
.depth
= src_box
->depth
;
136 blit
.mask
= util_format_get_mask(src
->format
) &
137 util_format_get_mask(dst
->format
);
138 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
141 pipe
->blit(pipe
, &blit
);
145 /* Copy from a full GPU texture to a transfer's staging one. */
146 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
148 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
149 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
150 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
151 struct pipe_resource
*src
= transfer
->resource
;
153 if (src
->nr_samples
> 1) {
154 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
155 src
, transfer
->level
, &transfer
->box
);
159 rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
163 /* Copy from a transfer's staging texture to a full GPU one. */
164 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
166 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
167 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
168 struct pipe_resource
*dst
= transfer
->resource
;
169 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
170 struct pipe_box sbox
;
172 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
174 if (dst
->nr_samples
> 1) {
175 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
176 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
181 rctx
->dma_copy(ctx
, dst
, transfer
->level
,
182 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
186 static unsigned r600_texture_get_offset(struct r600_texture
*rtex
, unsigned level
,
187 const struct pipe_box
*box
)
189 enum pipe_format format
= rtex
->resource
.b
.b
.format
;
191 return rtex
->surface
.level
[level
].offset
+
192 box
->z
* rtex
->surface
.level
[level
].slice_size
+
193 box
->y
/ util_format_get_blockheight(format
) * rtex
->surface
.level
[level
].pitch_bytes
+
194 box
->x
/ util_format_get_blockwidth(format
) * util_format_get_blocksize(format
);
197 static int r600_init_surface(struct r600_common_screen
*rscreen
,
198 struct radeon_surf
*surface
,
199 const struct pipe_resource
*ptex
,
201 bool is_flushed_depth
)
203 const struct util_format_description
*desc
=
204 util_format_description(ptex
->format
);
205 bool is_depth
, is_stencil
;
207 is_depth
= util_format_has_depth(desc
);
208 is_stencil
= util_format_has_stencil(desc
);
210 surface
->npix_x
= ptex
->width0
;
211 surface
->npix_y
= ptex
->height0
;
212 surface
->npix_z
= ptex
->depth0
;
213 surface
->blk_w
= util_format_get_blockwidth(ptex
->format
);
214 surface
->blk_h
= util_format_get_blockheight(ptex
->format
);
216 surface
->array_size
= 1;
217 surface
->last_level
= ptex
->last_level
;
219 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
220 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
221 surface
->bpe
= 4; /* stencil is allocated separately on evergreen */
223 surface
->bpe
= util_format_get_blocksize(ptex
->format
);
224 /* align byte per element on dword */
225 if (surface
->bpe
== 3) {
230 surface
->nsamples
= ptex
->nr_samples
? ptex
->nr_samples
: 1;
231 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
233 switch (ptex
->target
) {
234 case PIPE_TEXTURE_1D
:
235 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
237 case PIPE_TEXTURE_RECT
:
238 case PIPE_TEXTURE_2D
:
239 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
241 case PIPE_TEXTURE_3D
:
242 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
244 case PIPE_TEXTURE_1D_ARRAY
:
245 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
246 surface
->array_size
= ptex
->array_size
;
248 case PIPE_TEXTURE_CUBE_ARRAY
: /* cube array layout like 2d array */
249 assert(ptex
->array_size
% 6 == 0);
250 case PIPE_TEXTURE_2D_ARRAY
:
251 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
252 surface
->array_size
= ptex
->array_size
;
254 case PIPE_TEXTURE_CUBE
:
255 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP
, TYPE
);
261 if (ptex
->bind
& PIPE_BIND_SCANOUT
) {
262 surface
->flags
|= RADEON_SURF_SCANOUT
;
265 if (!is_flushed_depth
&& is_depth
) {
266 surface
->flags
|= RADEON_SURF_ZBUFFER
;
269 surface
->flags
|= RADEON_SURF_SBUFFER
|
270 RADEON_SURF_HAS_SBUFFER_MIPTREE
;
273 if (rscreen
->chip_class
>= SI
) {
274 surface
->flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
279 static int r600_setup_surface(struct pipe_screen
*screen
,
280 struct r600_texture
*rtex
,
281 unsigned pitch_in_bytes_override
,
284 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
288 r
= rscreen
->ws
->surface_init(rscreen
->ws
, &rtex
->surface
);
293 rtex
->size
= rtex
->surface
.bo_size
;
295 if (pitch_in_bytes_override
&& pitch_in_bytes_override
!= rtex
->surface
.level
[0].pitch_bytes
) {
296 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
299 rtex
->surface
.level
[0].nblk_x
= pitch_in_bytes_override
/ rtex
->surface
.bpe
;
300 rtex
->surface
.level
[0].pitch_bytes
= pitch_in_bytes_override
;
301 rtex
->surface
.level
[0].slice_size
= pitch_in_bytes_override
* rtex
->surface
.level
[0].nblk_y
;
305 for (i
= 0; i
< ARRAY_SIZE(rtex
->surface
.level
); ++i
)
306 rtex
->surface
.level
[i
].offset
+= offset
;
311 static void r600_texture_init_metadata(struct r600_texture
*rtex
,
312 struct radeon_bo_metadata
*metadata
)
314 struct radeon_surf
*surface
= &rtex
->surface
;
316 memset(metadata
, 0, sizeof(*metadata
));
317 metadata
->microtile
= surface
->level
[0].mode
>= RADEON_SURF_MODE_1D
?
318 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
319 metadata
->macrotile
= surface
->level
[0].mode
>= RADEON_SURF_MODE_2D
?
320 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
321 metadata
->pipe_config
= surface
->pipe_config
;
322 metadata
->bankw
= surface
->bankw
;
323 metadata
->bankh
= surface
->bankh
;
324 metadata
->tile_split
= surface
->tile_split
;
325 metadata
->mtilea
= surface
->mtilea
;
326 metadata
->num_banks
= surface
->num_banks
;
327 metadata
->stride
= surface
->level
[0].pitch_bytes
;
328 metadata
->scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
331 static void r600_dirty_all_framebuffer_states(struct r600_common_screen
*rscreen
)
333 p_atomic_inc(&rscreen
->dirty_fb_counter
);
336 static void r600_eliminate_fast_color_clear(struct r600_common_screen
*rscreen
,
337 struct r600_texture
*rtex
)
339 struct pipe_context
*ctx
= rscreen
->aux_context
;
341 pipe_mutex_lock(rscreen
->aux_context_lock
);
342 ctx
->flush_resource(ctx
, &rtex
->resource
.b
.b
);
343 ctx
->flush(ctx
, NULL
, 0);
344 pipe_mutex_unlock(rscreen
->aux_context_lock
);
347 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
348 struct r600_texture
*rtex
)
350 if (!rtex
->cmask
.size
)
353 assert(rtex
->resource
.b
.b
.nr_samples
<= 1);
356 memset(&rtex
->cmask
, 0, sizeof(rtex
->cmask
));
357 rtex
->cmask
.base_address_reg
= rtex
->resource
.gpu_address
>> 8;
359 if (rscreen
->chip_class
>= SI
)
360 rtex
->cb_color_info
&= ~SI_S_028C70_FAST_CLEAR(1);
362 rtex
->cb_color_info
&= ~EG_S_028C70_FAST_CLEAR(1);
364 if (rtex
->cmask_buffer
!= &rtex
->resource
)
365 pipe_resource_reference((struct pipe_resource
**)&rtex
->cmask_buffer
, NULL
);
367 /* Notify all contexts about the change. */
368 r600_dirty_all_framebuffer_states(rscreen
);
369 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
372 static void r600_texture_discard_dcc(struct r600_common_screen
*rscreen
,
373 struct r600_texture
*rtex
)
376 rtex
->dcc_offset
= 0;
377 rtex
->cb_color_info
&= ~VI_S_028C70_DCC_ENABLE(1);
379 /* Notify all contexts about the change. */
380 r600_dirty_all_framebuffer_states(rscreen
);
383 void r600_texture_disable_dcc(struct r600_common_screen
*rscreen
,
384 struct r600_texture
*rtex
)
386 struct r600_common_context
*rctx
=
387 (struct r600_common_context
*)rscreen
->aux_context
;
389 if (!rtex
->dcc_offset
)
392 /* Decompress DCC. */
393 pipe_mutex_lock(rscreen
->aux_context_lock
);
394 rctx
->decompress_dcc(&rctx
->b
, rtex
);
395 rctx
->b
.flush(&rctx
->b
, NULL
, 0);
396 pipe_mutex_unlock(rscreen
->aux_context_lock
);
398 r600_texture_discard_dcc(rscreen
, rtex
);
401 static boolean
r600_texture_get_handle(struct pipe_screen
* screen
,
402 struct pipe_resource
*resource
,
403 struct winsys_handle
*whandle
,
406 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
407 struct r600_resource
*res
= (struct r600_resource
*)resource
;
408 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
409 struct radeon_bo_metadata metadata
;
410 bool update_metadata
= false;
412 /* This is not supported now, but it might be required for OpenCL
413 * interop in the future.
415 if (resource
->target
!= PIPE_BUFFER
&&
416 (resource
->nr_samples
> 1 || rtex
->is_depth
))
419 if (resource
->target
!= PIPE_BUFFER
) {
420 /* Since shader image stores don't support DCC on VI,
421 * disable it for external clients that want write
424 if (usage
& PIPE_HANDLE_USAGE_WRITE
&& rtex
->dcc_offset
) {
425 r600_texture_disable_dcc(rscreen
, rtex
);
426 update_metadata
= true;
429 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
431 /* Eliminate fast clear (both CMASK and DCC) */
432 r600_eliminate_fast_color_clear(rscreen
, rtex
);
434 /* Disable CMASK if flush_resource isn't going
437 r600_texture_discard_cmask(rscreen
, rtex
);
438 update_metadata
= true;
442 if (!res
->is_shared
|| update_metadata
) {
443 r600_texture_init_metadata(rtex
, &metadata
);
444 if (rscreen
->query_opaque_metadata
)
445 rscreen
->query_opaque_metadata(rscreen
, rtex
,
448 rscreen
->ws
->buffer_set_metadata(res
->buf
, &metadata
);
452 if (res
->is_shared
) {
453 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
456 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
457 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
458 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
460 res
->is_shared
= true;
461 res
->external_usage
= usage
;
464 return rscreen
->ws
->buffer_get_handle(res
->buf
,
465 rtex
->surface
.level
[0].pitch_bytes
,
466 rtex
->surface
.level
[0].offset
,
467 rtex
->surface
.level
[0].slice_size
,
471 static void r600_texture_destroy(struct pipe_screen
*screen
,
472 struct pipe_resource
*ptex
)
474 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
475 struct r600_resource
*resource
= &rtex
->resource
;
477 if (rtex
->flushed_depth_texture
)
478 pipe_resource_reference((struct pipe_resource
**)&rtex
->flushed_depth_texture
, NULL
);
480 pipe_resource_reference((struct pipe_resource
**)&rtex
->htile_buffer
, NULL
);
481 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
482 pipe_resource_reference((struct pipe_resource
**)&rtex
->cmask_buffer
, NULL
);
484 pb_reference(&resource
->buf
, NULL
);
488 static const struct u_resource_vtbl r600_texture_vtbl
;
490 /* The number of samples can be specified independently of the texture. */
491 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
492 struct r600_texture
*rtex
,
494 struct r600_fmask_info
*out
)
496 /* FMASK is allocated like an ordinary texture. */
497 struct radeon_surf fmask
= rtex
->surface
;
499 memset(out
, 0, sizeof(*out
));
501 fmask
.bo_alignment
= 0;
504 fmask
.flags
|= RADEON_SURF_FMASK
;
506 /* Force 2D tiling if it wasn't set. This may occur when creating
507 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
508 * destination buffer must have an FMASK too. */
509 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
510 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
512 if (rscreen
->chip_class
>= SI
) {
513 fmask
.flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
516 switch (nr_samples
) {
520 if (rscreen
->chip_class
<= CAYMAN
) {
528 R600_ERR("Invalid sample count for FMASK allocation.\n");
532 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
533 * This can be fixed by writing a separate FMASK allocator specifically
534 * for R600-R700 asics. */
535 if (rscreen
->chip_class
<= R700
) {
539 if (rscreen
->ws
->surface_init(rscreen
->ws
, &fmask
)) {
540 R600_ERR("Got error in surface_init while allocating FMASK.\n");
544 assert(fmask
.level
[0].mode
== RADEON_SURF_MODE_2D
);
546 out
->slice_tile_max
= (fmask
.level
[0].nblk_x
* fmask
.level
[0].nblk_y
) / 64;
547 if (out
->slice_tile_max
)
548 out
->slice_tile_max
-= 1;
550 out
->tile_mode_index
= fmask
.tiling_index
[0];
551 out
->pitch_in_pixels
= fmask
.level
[0].nblk_x
;
552 out
->bank_height
= fmask
.bankh
;
553 out
->alignment
= MAX2(256, fmask
.bo_alignment
);
554 out
->size
= fmask
.bo_size
;
557 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
558 struct r600_texture
*rtex
)
560 r600_texture_get_fmask_info(rscreen
, rtex
,
561 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
563 rtex
->fmask
.offset
= align64(rtex
->size
, rtex
->fmask
.alignment
);
564 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
567 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
568 struct r600_texture
*rtex
,
569 struct r600_cmask_info
*out
)
571 unsigned cmask_tile_width
= 8;
572 unsigned cmask_tile_height
= 8;
573 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
574 unsigned element_bits
= 4;
575 unsigned cmask_cache_bits
= 1024;
576 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
577 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
579 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
580 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
581 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
582 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
583 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
585 unsigned pitch_elements
= align(rtex
->surface
.npix_x
, macro_tile_width
);
586 unsigned height
= align(rtex
->surface
.npix_y
, macro_tile_height
);
588 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
589 unsigned slice_bytes
=
590 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
592 assert(macro_tile_width
% 128 == 0);
593 assert(macro_tile_height
% 128 == 0);
595 out
->pitch
= pitch_elements
;
596 out
->height
= height
;
597 out
->xalign
= macro_tile_width
;
598 out
->yalign
= macro_tile_height
;
599 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
600 out
->alignment
= MAX2(256, base_align
);
601 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
602 align(slice_bytes
, base_align
);
605 static void si_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
606 struct r600_texture
*rtex
,
607 struct r600_cmask_info
*out
)
609 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
610 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
611 unsigned cl_width
, cl_height
;
626 case 16: /* Hawaii */
635 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
637 unsigned width
= align(rtex
->surface
.npix_x
, cl_width
*8);
638 unsigned height
= align(rtex
->surface
.npix_y
, cl_height
*8);
639 unsigned slice_elements
= (width
* height
) / (8*8);
641 /* Each element of CMASK is a nibble. */
642 unsigned slice_bytes
= slice_elements
/ 2;
645 out
->height
= height
;
646 out
->xalign
= cl_width
* 8;
647 out
->yalign
= cl_height
* 8;
648 out
->slice_tile_max
= (width
* height
) / (128*128);
649 if (out
->slice_tile_max
)
650 out
->slice_tile_max
-= 1;
652 out
->alignment
= MAX2(256, base_align
);
653 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
654 align(slice_bytes
, base_align
);
657 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
658 struct r600_texture
*rtex
)
660 if (rscreen
->chip_class
>= SI
) {
661 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
663 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
666 rtex
->cmask
.offset
= align64(rtex
->size
, rtex
->cmask
.alignment
);
667 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
669 if (rscreen
->chip_class
>= SI
)
670 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
672 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
675 static void r600_texture_alloc_cmask_separate(struct r600_common_screen
*rscreen
,
676 struct r600_texture
*rtex
)
678 if (rtex
->cmask_buffer
)
681 assert(rtex
->cmask
.size
== 0);
683 if (rscreen
->chip_class
>= SI
) {
684 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
686 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
689 rtex
->cmask_buffer
= (struct r600_resource
*)
690 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
691 PIPE_USAGE_DEFAULT
, rtex
->cmask
.size
);
692 if (rtex
->cmask_buffer
== NULL
) {
693 rtex
->cmask
.size
= 0;
697 /* update colorbuffer state bits */
698 rtex
->cmask
.base_address_reg
= rtex
->cmask_buffer
->gpu_address
>> 8;
700 if (rscreen
->chip_class
>= SI
)
701 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
703 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
705 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
708 static unsigned r600_texture_get_htile_size(struct r600_common_screen
*rscreen
,
709 struct r600_texture
*rtex
)
711 unsigned cl_width
, cl_height
, width
, height
;
712 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
713 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
715 if (rscreen
->chip_class
<= EVERGREEN
&&
716 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 26)
719 /* HW bug on R6xx. */
720 if (rscreen
->chip_class
== R600
&&
721 (rtex
->surface
.level
[0].npix_x
> 7680 ||
722 rtex
->surface
.level
[0].npix_y
> 7680))
725 /* HTILE is broken with 1D tiling on old kernels and CIK. */
726 if (rscreen
->chip_class
>= CIK
&&
727 rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
728 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 38)
731 /* Overalign HTILE on P2 configs to work around GPU hangs in
732 * piglit/depthstencil-render-miplevels 585.
734 * This has been confirmed to help Kabini & Stoney, where the hangs
735 * are always reproducible. I think I have seen the test hang
736 * on Carrizo too, though it was very rare there.
738 if (rscreen
->chip_class
>= CIK
&& num_pipes
< 4)
767 width
= align(rtex
->surface
.npix_x
, cl_width
* 8);
768 height
= align(rtex
->surface
.npix_y
, cl_height
* 8);
770 slice_elements
= (width
* height
) / (8 * 8);
771 slice_bytes
= slice_elements
* 4;
773 pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
774 base_align
= num_pipes
* pipe_interleave_bytes
;
776 rtex
->htile
.pitch
= width
;
777 rtex
->htile
.height
= height
;
778 rtex
->htile
.xalign
= cl_width
* 8;
779 rtex
->htile
.yalign
= cl_height
* 8;
781 return (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
782 align(slice_bytes
, base_align
);
785 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
786 struct r600_texture
*rtex
)
788 unsigned htile_size
= r600_texture_get_htile_size(rscreen
, rtex
);
793 rtex
->htile_buffer
= (struct r600_resource
*)
794 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
795 PIPE_USAGE_DEFAULT
, htile_size
);
796 if (rtex
->htile_buffer
== NULL
) {
797 /* this is not a fatal error as we can still keep rendering
798 * without htile buffer */
799 R600_ERR("Failed to create buffer object for htile buffer.\n");
801 r600_screen_clear_buffer(rscreen
, &rtex
->htile_buffer
->b
.b
, 0,
802 htile_size
, 0, R600_COHERENCY_NONE
);
806 void r600_print_texture_info(struct r600_texture
*rtex
, FILE *f
)
810 fprintf(f
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
811 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
812 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
813 rtex
->surface
.npix_x
, rtex
->surface
.npix_y
,
814 rtex
->surface
.npix_z
, rtex
->surface
.blk_w
,
815 rtex
->surface
.blk_h
, rtex
->surface
.blk_d
,
816 rtex
->surface
.array_size
, rtex
->surface
.last_level
,
817 rtex
->surface
.bpe
, rtex
->surface
.nsamples
,
818 rtex
->surface
.flags
, util_format_short_name(rtex
->resource
.b
.b
.format
));
820 fprintf(f
, " Layout: size=%"PRIu64
", alignment=%"PRIu64
", bankw=%u, "
821 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
822 rtex
->surface
.bo_size
, rtex
->surface
.bo_alignment
, rtex
->surface
.bankw
,
823 rtex
->surface
.bankh
, rtex
->surface
.num_banks
, rtex
->surface
.mtilea
,
824 rtex
->surface
.tile_split
, rtex
->surface
.pipe_config
,
825 (rtex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
827 if (rtex
->fmask
.size
)
828 fprintf(f
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
829 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
830 rtex
->fmask
.offset
, rtex
->fmask
.size
, rtex
->fmask
.alignment
,
831 rtex
->fmask
.pitch_in_pixels
, rtex
->fmask
.bank_height
,
832 rtex
->fmask
.slice_tile_max
, rtex
->fmask
.tile_mode_index
);
834 if (rtex
->cmask
.size
)
835 fprintf(f
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch=%u, "
836 "height=%u, xalign=%u, yalign=%u, slice_tile_max=%u\n",
837 rtex
->cmask
.offset
, rtex
->cmask
.size
, rtex
->cmask
.alignment
,
838 rtex
->cmask
.pitch
, rtex
->cmask
.height
, rtex
->cmask
.xalign
,
839 rtex
->cmask
.yalign
, rtex
->cmask
.slice_tile_max
);
841 if (rtex
->htile_buffer
)
842 fprintf(f
, " HTile: size=%u, alignment=%u, pitch=%u, height=%u, "
843 "xalign=%u, yalign=%u\n",
844 rtex
->htile_buffer
->b
.b
.width0
,
845 rtex
->htile_buffer
->buf
->alignment
, rtex
->htile
.pitch
,
846 rtex
->htile
.height
, rtex
->htile
.xalign
, rtex
->htile
.yalign
);
848 if (rtex
->dcc_offset
) {
849 fprintf(f
, " DCC: offset=%"PRIu64
", size=%"PRIu64
", alignment=%"PRIu64
"\n",
850 rtex
->dcc_offset
, rtex
->surface
.dcc_size
,
851 rtex
->surface
.dcc_alignment
);
852 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++)
853 fprintf(f
, " DCCLevel[%i]: offset=%"PRIu64
"\n",
854 i
, rtex
->surface
.level
[i
].dcc_offset
);
857 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++)
858 fprintf(f
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
859 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
860 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
861 i
, rtex
->surface
.level
[i
].offset
,
862 rtex
->surface
.level
[i
].slice_size
,
863 u_minify(rtex
->resource
.b
.b
.width0
, i
),
864 u_minify(rtex
->resource
.b
.b
.height0
, i
),
865 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
866 rtex
->surface
.level
[i
].nblk_x
,
867 rtex
->surface
.level
[i
].nblk_y
,
868 rtex
->surface
.level
[i
].nblk_z
,
869 rtex
->surface
.level
[i
].pitch_bytes
,
870 rtex
->surface
.level
[i
].mode
);
872 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
873 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
874 fprintf(f
, " StencilLayout: tilesplit=%u\n",
875 rtex
->surface
.stencil_tile_split
);
876 fprintf(f
, " StencilLevel[%i]: offset=%"PRIu64
", "
877 "slice_size=%"PRIu64
", npix_x=%u, "
878 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
879 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
880 i
, rtex
->surface
.stencil_level
[i
].offset
,
881 rtex
->surface
.stencil_level
[i
].slice_size
,
882 u_minify(rtex
->resource
.b
.b
.width0
, i
),
883 u_minify(rtex
->resource
.b
.b
.height0
, i
),
884 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
885 rtex
->surface
.stencil_level
[i
].nblk_x
,
886 rtex
->surface
.stencil_level
[i
].nblk_y
,
887 rtex
->surface
.stencil_level
[i
].nblk_z
,
888 rtex
->surface
.stencil_level
[i
].pitch_bytes
,
889 rtex
->surface
.stencil_level
[i
].mode
);
894 /* Common processing for r600_texture_create and r600_texture_from_handle */
895 static struct r600_texture
*
896 r600_texture_create_object(struct pipe_screen
*screen
,
897 const struct pipe_resource
*base
,
898 unsigned pitch_in_bytes_override
,
900 struct pb_buffer
*buf
,
901 struct radeon_surf
*surface
)
903 struct r600_texture
*rtex
;
904 struct r600_resource
*resource
;
905 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
907 rtex
= CALLOC_STRUCT(r600_texture
);
911 resource
= &rtex
->resource
;
912 resource
->b
.b
= *base
;
913 resource
->b
.vtbl
= &r600_texture_vtbl
;
914 pipe_reference_init(&resource
->b
.b
.reference
, 1);
915 resource
->b
.b
.screen
= screen
;
917 /* don't include stencil-only formats which we don't support for rendering */
918 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
920 rtex
->surface
= *surface
;
921 if (r600_setup_surface(screen
, rtex
, pitch_in_bytes_override
, offset
)) {
926 /* Tiled depth textures utilize the non-displayable tile order.
927 * This must be done after r600_setup_surface.
928 * Applies to R600-Cayman. */
929 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
931 if (rtex
->is_depth
) {
932 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
933 R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) &&
934 !(rscreen
->debug_flags
& DBG_NO_HYPERZ
)) {
936 r600_texture_allocate_htile(rscreen
, rtex
);
939 if (base
->nr_samples
> 1) {
941 r600_texture_allocate_fmask(rscreen
, rtex
);
942 r600_texture_allocate_cmask(rscreen
, rtex
);
943 rtex
->cmask_buffer
= &rtex
->resource
;
945 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
951 if (!buf
&& rtex
->surface
.dcc_size
&&
952 !(rscreen
->debug_flags
& DBG_NO_DCC
)) {
953 /* Reserve space for the DCC buffer. */
954 rtex
->dcc_offset
= align64(rtex
->size
, rtex
->surface
.dcc_alignment
);
955 rtex
->size
= rtex
->dcc_offset
+ rtex
->surface
.dcc_size
;
956 rtex
->cb_color_info
|= VI_S_028C70_DCC_ENABLE(1);
960 /* Now create the backing buffer. */
962 if (!r600_init_resource(rscreen
, resource
, rtex
->size
,
963 rtex
->surface
.bo_alignment
)) {
969 resource
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
970 resource
->domains
= rscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
973 if (rtex
->cmask
.size
) {
974 /* Initialize the cmask to 0xCC (= compressed state). */
975 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
976 rtex
->cmask
.offset
, rtex
->cmask
.size
,
977 0xCCCCCCCC, R600_COHERENCY_NONE
);
979 if (rtex
->dcc_offset
) {
980 r600_screen_clear_buffer(rscreen
, &rtex
->resource
.b
.b
,
982 rtex
->surface
.dcc_size
,
983 0xFFFFFFFF, R600_COHERENCY_NONE
);
986 /* Initialize the CMASK base register value. */
987 rtex
->cmask
.base_address_reg
=
988 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
990 if (rscreen
->debug_flags
& DBG_VM
) {
991 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
992 rtex
->resource
.gpu_address
,
993 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
994 base
->width0
, base
->height0
, util_max_layer(base
, 0)+1, base
->last_level
+1,
995 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
998 if (rscreen
->debug_flags
& DBG_TEX
) {
1000 r600_print_texture_info(rtex
, stdout
);
1006 static unsigned r600_choose_tiling(struct r600_common_screen
*rscreen
,
1007 const struct pipe_resource
*templ
)
1009 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1010 bool force_tiling
= templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
;
1012 /* MSAA resources must be 2D tiled. */
1013 if (templ
->nr_samples
> 1)
1014 return RADEON_SURF_MODE_2D
;
1016 /* Transfer resources should be linear. */
1017 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
1018 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1020 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1021 if (rscreen
->chip_class
>= R600
&& rscreen
->chip_class
<= CAYMAN
&&
1022 (templ
->bind
& PIPE_BIND_COMPUTE_RESOURCE
) &&
1023 (templ
->target
== PIPE_TEXTURE_2D
||
1024 templ
->target
== PIPE_TEXTURE_3D
))
1025 force_tiling
= true;
1027 /* Handle common candidates for the linear mode.
1028 * Compressed textures and DB surfaces must always be tiled.
1030 if (!force_tiling
&& !util_format_is_compressed(templ
->format
) &&
1031 (!util_format_is_depth_or_stencil(templ
->format
) ||
1032 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) {
1033 if (rscreen
->debug_flags
& DBG_NO_TILING
)
1034 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1036 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1037 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1038 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1040 /* Cursors are linear on SI.
1041 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1042 if (rscreen
->chip_class
>= SI
&&
1043 (templ
->bind
& PIPE_BIND_CURSOR
))
1044 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1046 if (templ
->bind
& PIPE_BIND_LINEAR
)
1047 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1049 /* Textures with a very small height are recommended to be linear. */
1050 if (templ
->target
== PIPE_TEXTURE_1D
||
1051 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
1052 templ
->height0
<= 4)
1053 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1055 /* Textures likely to be mapped often. */
1056 if (templ
->usage
== PIPE_USAGE_STAGING
||
1057 templ
->usage
== PIPE_USAGE_STREAM
)
1058 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1061 /* Make small textures 1D tiled. */
1062 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1063 (rscreen
->debug_flags
& DBG_NO_2D_TILING
))
1064 return RADEON_SURF_MODE_1D
;
1066 /* The allocator will switch to 1D if needed. */
1067 return RADEON_SURF_MODE_2D
;
1070 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
1071 const struct pipe_resource
*templ
)
1073 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1074 struct radeon_surf surface
= {0};
1077 r
= r600_init_surface(rscreen
, &surface
, templ
,
1078 r600_choose_tiling(rscreen
, templ
),
1079 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
1083 r
= rscreen
->ws
->surface_best(rscreen
->ws
, &surface
);
1087 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
, 0,
1091 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
1092 const struct pipe_resource
*templ
,
1093 struct winsys_handle
*whandle
,
1096 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1097 struct pb_buffer
*buf
= NULL
;
1098 unsigned stride
= 0, offset
= 0;
1099 unsigned array_mode
;
1100 struct radeon_surf surface
;
1102 struct radeon_bo_metadata metadata
= {};
1103 struct r600_texture
*rtex
;
1105 /* Support only 2D textures without mipmaps */
1106 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1107 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1110 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
, &stride
, &offset
);
1114 rscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1116 surface
.pipe_config
= metadata
.pipe_config
;
1117 surface
.bankw
= metadata
.bankw
;
1118 surface
.bankh
= metadata
.bankh
;
1119 surface
.tile_split
= metadata
.tile_split
;
1120 surface
.mtilea
= metadata
.mtilea
;
1121 surface
.num_banks
= metadata
.num_banks
;
1123 if (metadata
.macrotile
== RADEON_LAYOUT_TILED
)
1124 array_mode
= RADEON_SURF_MODE_2D
;
1125 else if (metadata
.microtile
== RADEON_LAYOUT_TILED
)
1126 array_mode
= RADEON_SURF_MODE_1D
;
1128 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
1130 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
, false);
1135 if (metadata
.scanout
)
1136 surface
.flags
|= RADEON_SURF_SCANOUT
;
1138 rtex
= r600_texture_create_object(screen
, templ
, stride
,
1139 offset
, buf
, &surface
);
1143 rtex
->resource
.is_shared
= true;
1144 rtex
->resource
.external_usage
= usage
;
1145 return &rtex
->resource
.b
.b
;
1148 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
1149 struct pipe_resource
*texture
,
1150 struct r600_texture
**staging
)
1152 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1153 struct pipe_resource resource
;
1154 struct r600_texture
**flushed_depth_texture
= staging
?
1155 staging
: &rtex
->flushed_depth_texture
;
1157 if (!staging
&& rtex
->flushed_depth_texture
)
1158 return true; /* it's ready */
1160 resource
.target
= texture
->target
;
1161 resource
.format
= texture
->format
;
1162 resource
.width0
= texture
->width0
;
1163 resource
.height0
= texture
->height0
;
1164 resource
.depth0
= texture
->depth0
;
1165 resource
.array_size
= texture
->array_size
;
1166 resource
.last_level
= texture
->last_level
;
1167 resource
.nr_samples
= texture
->nr_samples
;
1168 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1169 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1170 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1173 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
1175 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1176 if (*flushed_depth_texture
== NULL
) {
1177 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1181 (*flushed_depth_texture
)->is_flushing_texture
= TRUE
;
1182 (*flushed_depth_texture
)->non_disp_tiling
= false;
1187 * Initialize the pipe_resource descriptor to be of the same size as the box,
1188 * which is supposed to hold a subregion of the texture "orig" at the given
1191 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
1192 struct pipe_resource
*orig
,
1193 const struct pipe_box
*box
,
1194 unsigned level
, unsigned flags
)
1196 memset(res
, 0, sizeof(*res
));
1197 res
->format
= orig
->format
;
1198 res
->width0
= box
->width
;
1199 res
->height0
= box
->height
;
1201 res
->array_size
= 1;
1202 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1205 /* We must set the correct texture target and dimensions for a 3D box. */
1206 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1207 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1208 res
->array_size
= box
->depth
;
1210 res
->target
= PIPE_TEXTURE_2D
;
1214 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
1215 struct pipe_resource
*texture
,
1218 const struct pipe_box
*box
,
1219 struct pipe_transfer
**ptransfer
)
1221 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1222 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1223 struct r600_transfer
*trans
;
1224 boolean use_staging_texture
= FALSE
;
1225 struct r600_resource
*buf
;
1226 unsigned offset
= 0;
1229 assert(!(texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
));
1231 /* We cannot map a tiled texture directly because the data is
1232 * in a different order, therefore we do detiling using a blit.
1234 * Also, use a temporary in GTT memory for read transfers, as
1235 * the CPU is much happier reading out of cached system memory
1236 * than uncached VRAM.
1238 if (rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
) {
1239 use_staging_texture
= TRUE
;
1240 } else if ((usage
& PIPE_TRANSFER_READ
) &&
1241 rtex
->resource
.domains
& RADEON_DOMAIN_VRAM
) {
1242 /* Untiled buffers in VRAM, which is slow for CPU reads */
1243 use_staging_texture
= TRUE
;
1244 } else if (!(usage
& PIPE_TRANSFER_READ
) &&
1245 (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.buf
, RADEON_USAGE_READWRITE
) ||
1246 !rctx
->ws
->buffer_wait(rtex
->resource
.buf
, 0, RADEON_USAGE_READWRITE
))) {
1247 /* Use a staging texture for uploads if the underlying BO is busy. */
1248 use_staging_texture
= TRUE
;
1251 trans
= CALLOC_STRUCT(r600_transfer
);
1254 trans
->transfer
.resource
= texture
;
1255 trans
->transfer
.level
= level
;
1256 trans
->transfer
.usage
= usage
;
1257 trans
->transfer
.box
= *box
;
1259 if (rtex
->is_depth
) {
1260 struct r600_texture
*staging_depth
;
1262 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1263 /* MSAA depth buffers need to be converted to single sample buffers.
1265 * Mapping MSAA depth buffers can occur if ReadPixels is called
1266 * with a multisample GLX visual.
1268 * First downsample the depth buffer to a temporary texture,
1269 * then decompress the temporary one to staging.
1271 * Only the region being mapped is transfered.
1273 struct pipe_resource resource
;
1275 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1277 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1278 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1283 if (usage
& PIPE_TRANSFER_READ
) {
1284 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1286 R600_ERR("failed to create a temporary depth texture\n");
1291 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1292 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
1293 0, 0, 0, box
->depth
, 0, 0);
1294 pipe_resource_reference(&temp
, NULL
);
1298 /* XXX: only readback the rectangle which is being mapped? */
1299 /* XXX: when discard is true, no need to read back from depth texture */
1300 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1301 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1306 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
1308 box
->z
, box
->z
+ box
->depth
- 1,
1311 offset
= r600_texture_get_offset(staging_depth
, level
, box
);
1314 trans
->transfer
.stride
= staging_depth
->surface
.level
[level
].pitch_bytes
;
1315 trans
->transfer
.layer_stride
= staging_depth
->surface
.level
[level
].slice_size
;
1316 trans
->staging
= (struct r600_resource
*)staging_depth
;
1317 buf
= trans
->staging
;
1318 } else if (use_staging_texture
) {
1319 struct pipe_resource resource
;
1320 struct r600_texture
*staging
;
1322 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1323 R600_RESOURCE_FLAG_TRANSFER
);
1324 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1325 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1327 /* Create the temporary texture. */
1328 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1330 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1334 trans
->staging
= &staging
->resource
;
1335 trans
->transfer
.stride
= staging
->surface
.level
[0].pitch_bytes
;
1336 trans
->transfer
.layer_stride
= staging
->surface
.level
[0].slice_size
;
1338 if (usage
& PIPE_TRANSFER_READ
)
1339 r600_copy_to_staging_texture(ctx
, trans
);
1341 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1343 buf
= trans
->staging
;
1345 /* the resource is mapped directly */
1346 trans
->transfer
.stride
= rtex
->surface
.level
[level
].pitch_bytes
;
1347 trans
->transfer
.layer_stride
= rtex
->surface
.level
[level
].slice_size
;
1348 offset
= r600_texture_get_offset(rtex
, level
, box
);
1349 buf
= &rtex
->resource
;
1352 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1353 pipe_resource_reference((struct pipe_resource
**)&trans
->staging
, NULL
);
1358 *ptransfer
= &trans
->transfer
;
1359 return map
+ offset
;
1362 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1363 struct pipe_transfer
* transfer
)
1365 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1366 struct pipe_resource
*texture
= transfer
->resource
;
1367 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1369 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1370 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1371 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1372 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1373 &rtransfer
->staging
->b
.b
, transfer
->level
,
1376 r600_copy_from_staging_texture(ctx
, rtransfer
);
1380 if (rtransfer
->staging
)
1381 pipe_resource_reference((struct pipe_resource
**)&rtransfer
->staging
, NULL
);
1386 static const struct u_resource_vtbl r600_texture_vtbl
=
1388 NULL
, /* get_handle */
1389 r600_texture_destroy
, /* resource_destroy */
1390 r600_texture_transfer_map
, /* transfer_map */
1391 u_default_transfer_flush_region
, /* transfer_flush_region */
1392 r600_texture_transfer_unmap
, /* transfer_unmap */
1393 NULL
/* transfer_inline_write */
1396 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1397 struct pipe_resource
*texture
,
1398 const struct pipe_surface
*templ
,
1399 unsigned width
, unsigned height
)
1401 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1406 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1407 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1409 pipe_reference_init(&surface
->base
.reference
, 1);
1410 pipe_resource_reference(&surface
->base
.texture
, texture
);
1411 surface
->base
.context
= pipe
;
1412 surface
->base
.format
= templ
->format
;
1413 surface
->base
.width
= width
;
1414 surface
->base
.height
= height
;
1415 surface
->base
.u
= templ
->u
;
1416 return &surface
->base
;
1419 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1420 struct pipe_resource
*tex
,
1421 const struct pipe_surface
*templ
)
1423 unsigned level
= templ
->u
.tex
.level
;
1424 unsigned width
= u_minify(tex
->width0
, level
);
1425 unsigned height
= u_minify(tex
->height0
, level
);
1427 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
1428 const struct util_format_description
*tex_desc
1429 = util_format_description(tex
->format
);
1430 const struct util_format_description
*templ_desc
1431 = util_format_description(templ
->format
);
1433 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
1435 /* Adjust size of surface if and only if the block width or
1436 * height is changed. */
1437 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
1438 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
1439 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
1440 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
1442 width
= nblks_x
* templ_desc
->block
.width
;
1443 height
= nblks_y
* templ_desc
->block
.height
;
1447 return r600_create_surface_custom(pipe
, tex
, templ
, width
, height
);
1450 static void r600_surface_destroy(struct pipe_context
*pipe
,
1451 struct pipe_surface
*surface
)
1453 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1454 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_fmask
, NULL
);
1455 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_cmask
, NULL
);
1456 pipe_resource_reference(&surface
->texture
, NULL
);
1460 unsigned r600_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
1462 const struct util_format_description
*desc
= util_format_description(format
);
1464 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1466 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
1467 return V_0280A0_SWAP_STD
;
1469 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1472 switch (desc
->nr_channels
) {
1474 if (HAS_SWIZZLE(0,X
))
1475 return V_0280A0_SWAP_STD
; /* X___ */
1476 else if (HAS_SWIZZLE(3,X
))
1477 return V_0280A0_SWAP_ALT_REV
; /* ___X */
1480 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
1481 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
1482 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
1483 return V_0280A0_SWAP_STD
; /* XY__ */
1484 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
1485 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
1486 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
1488 return (do_endian_swap
? V_0280A0_SWAP_STD
: V_0280A0_SWAP_STD_REV
);
1489 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
1490 return V_0280A0_SWAP_ALT
; /* X__Y */
1491 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
1492 return V_0280A0_SWAP_ALT_REV
; /* Y__X */
1495 if (HAS_SWIZZLE(0,X
))
1496 return (do_endian_swap
? V_0280A0_SWAP_STD_REV
: V_0280A0_SWAP_STD
);
1497 else if (HAS_SWIZZLE(0,Z
))
1498 return V_0280A0_SWAP_STD_REV
; /* ZYX */
1501 /* check the middle channels, the 1st and 4th channel can be NONE */
1502 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
1503 return V_0280A0_SWAP_STD
; /* XYZW */
1504 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
1505 return V_0280A0_SWAP_STD_REV
; /* WZYX */
1506 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
1507 return V_0280A0_SWAP_ALT
; /* ZYXW */
1508 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
1511 return V_0280A0_SWAP_ALT_REV
;
1513 return (do_endian_swap
? V_0280A0_SWAP_ALT
: V_0280A0_SWAP_ALT_REV
);
1520 static void evergreen_set_clear_color(struct r600_texture
*rtex
,
1521 enum pipe_format surface_format
,
1522 const union pipe_color_union
*color
)
1524 union util_color uc
;
1526 memset(&uc
, 0, sizeof(uc
));
1528 if (util_format_is_pure_uint(surface_format
)) {
1529 util_format_write_4ui(surface_format
, color
->ui
, 0, &uc
, 0, 0, 0, 1, 1);
1530 } else if (util_format_is_pure_sint(surface_format
)) {
1531 util_format_write_4i(surface_format
, color
->i
, 0, &uc
, 0, 0, 0, 1, 1);
1533 util_pack_color(color
->f
, surface_format
, &uc
);
1536 memcpy(rtex
->color_clear_value
, &uc
, 2 * sizeof(uint32_t));
1539 static void vi_get_fast_clear_parameters(enum pipe_format surface_format
,
1540 const union pipe_color_union
*color
,
1541 uint32_t* reset_value
,
1542 bool* clear_words_needed
)
1544 bool values
[4] = {};
1546 bool main_value
= false;
1547 bool extra_value
= false;
1549 const struct util_format_description
*desc
= util_format_description(surface_format
);
1551 *clear_words_needed
= true;
1552 *reset_value
= 0x20202020U
;
1554 /* If we want to clear without needing a fast clear eliminate step, we
1555 * can set each channel to 0 or 1 (or 0/max for integer formats). We
1556 * have two sets of flags, one for the last or first channel(extra) and
1557 * one for the other channels(main).
1560 if (surface_format
== PIPE_FORMAT_R11G11B10_FLOAT
||
1561 surface_format
== PIPE_FORMAT_B5G6R5_UNORM
||
1562 surface_format
== PIPE_FORMAT_B5G6R5_SRGB
) {
1564 } else if (desc
->layout
== UTIL_FORMAT_LAYOUT_PLAIN
) {
1565 if(r600_translate_colorswap(surface_format
, FALSE
) <= 1)
1566 extra_channel
= desc
->nr_channels
- 1;
1572 for (i
= 0; i
< 4; ++i
) {
1573 int index
= desc
->swizzle
[i
] - PIPE_SWIZZLE_X
;
1575 if (desc
->swizzle
[i
] < PIPE_SWIZZLE_X
||
1576 desc
->swizzle
[i
] > PIPE_SWIZZLE_W
)
1579 if (util_format_is_pure_sint(surface_format
)) {
1580 values
[i
] = color
->i
[i
] != 0;
1581 if (color
->i
[i
] != 0 && color
->i
[i
] != INT32_MAX
)
1583 } else if (util_format_is_pure_uint(surface_format
)) {
1584 values
[i
] = color
->ui
[i
] != 0U;
1585 if (color
->ui
[i
] != 0U && color
->ui
[i
] != UINT32_MAX
)
1588 values
[i
] = color
->f
[i
] != 0.0F
;
1589 if (color
->f
[i
] != 0.0F
&& color
->f
[i
] != 1.0F
)
1593 if (index
== extra_channel
)
1594 extra_value
= values
[i
];
1596 main_value
= values
[i
];
1599 for (int i
= 0; i
< 4; ++i
)
1600 if (values
[i
] != main_value
&&
1601 desc
->swizzle
[i
] - PIPE_SWIZZLE_X
!= extra_channel
&&
1602 desc
->swizzle
[i
] >= PIPE_SWIZZLE_X
&&
1603 desc
->swizzle
[i
] <= PIPE_SWIZZLE_W
)
1606 *clear_words_needed
= false;
1608 *reset_value
|= 0x80808080U
;
1611 *reset_value
|= 0x40404040U
;
1614 void evergreen_do_fast_color_clear(struct r600_common_context
*rctx
,
1615 struct pipe_framebuffer_state
*fb
,
1616 struct r600_atom
*fb_state
,
1617 unsigned *buffers
, unsigned *dirty_cbufs
,
1618 const union pipe_color_union
*color
)
1622 /* This function is broken in BE, so just disable this path for now */
1623 #ifdef PIPE_ARCH_BIG_ENDIAN
1627 if (rctx
->render_cond
)
1630 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1631 struct r600_texture
*tex
;
1632 unsigned clear_bit
= PIPE_CLEAR_COLOR0
<< i
;
1637 /* if this colorbuffer is not being cleared */
1638 if (!(*buffers
& clear_bit
))
1641 tex
= (struct r600_texture
*)fb
->cbufs
[i
]->texture
;
1643 /* 128-bit formats are unusupported */
1644 if (util_format_get_blocksizebits(fb
->cbufs
[i
]->format
) > 64) {
1648 /* the clear is allowed if all layers are bound */
1649 if (fb
->cbufs
[i
]->u
.tex
.first_layer
!= 0 ||
1650 fb
->cbufs
[i
]->u
.tex
.last_layer
!= util_max_layer(&tex
->resource
.b
.b
, 0)) {
1654 /* cannot clear mipmapped textures */
1655 if (fb
->cbufs
[i
]->texture
->last_level
!= 0) {
1659 /* only supported on tiled surfaces */
1660 if (tex
->surface
.level
[0].mode
< RADEON_SURF_MODE_1D
) {
1664 /* shared textures can't use fast clear without an explicit flush,
1665 * because there is no way to communicate the clear color among
1668 if (tex
->resource
.is_shared
&&
1669 !(tex
->resource
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
1672 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1673 if (tex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
1674 rctx
->chip_class
>= CIK
&&
1675 rctx
->screen
->info
.drm_major
== 2 &&
1676 rctx
->screen
->info
.drm_minor
< 38) {
1680 if (tex
->dcc_offset
) {
1681 uint32_t reset_value
;
1682 bool clear_words_needed
;
1684 if (rctx
->screen
->debug_flags
& DBG_NO_DCC_CLEAR
)
1687 vi_get_fast_clear_parameters(fb
->cbufs
[i
]->format
, color
, &reset_value
, &clear_words_needed
);
1689 rctx
->clear_buffer(&rctx
->b
, &tex
->resource
.b
.b
,
1690 tex
->dcc_offset
, tex
->surface
.dcc_size
,
1691 reset_value
, R600_COHERENCY_CB_META
);
1693 if (clear_words_needed
)
1694 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1696 /* Stoney/RB+ doesn't work with CMASK fast clear. */
1697 if (rctx
->family
== CHIP_STONEY
)
1700 /* ensure CMASK is enabled */
1701 r600_texture_alloc_cmask_separate(rctx
->screen
, tex
);
1702 if (tex
->cmask
.size
== 0) {
1706 /* Do the fast clear. */
1707 rctx
->clear_buffer(&rctx
->b
, &tex
->cmask_buffer
->b
.b
,
1708 tex
->cmask
.offset
, tex
->cmask
.size
, 0,
1709 R600_COHERENCY_CB_META
);
1711 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1714 evergreen_set_clear_color(tex
, fb
->cbufs
[i
]->format
, color
);
1717 *dirty_cbufs
|= 1 << i
;
1718 rctx
->set_atom_dirty(rctx
, fb_state
, true);
1719 *buffers
&= ~clear_bit
;
1723 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
1725 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
1726 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
1729 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
1731 rctx
->b
.create_surface
= r600_create_surface
;
1732 rctx
->b
.surface_destroy
= r600_surface_destroy
;