2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
34 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
35 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
36 struct pipe_resource
*dst
,
38 unsigned dstx
, unsigned dsty
, unsigned dstz
,
39 struct pipe_resource
*src
,
41 const struct pipe_box
*src_box
)
43 struct pipe_blit_info blit
;
45 memset(&blit
, 0, sizeof(blit
));
46 blit
.src
.resource
= src
;
47 blit
.src
.format
= src
->format
;
48 blit
.src
.level
= src_level
;
49 blit
.src
.box
= *src_box
;
50 blit
.dst
.resource
= dst
;
51 blit
.dst
.format
= dst
->format
;
52 blit
.dst
.level
= dst_level
;
53 blit
.dst
.box
.x
= dstx
;
54 blit
.dst
.box
.y
= dsty
;
55 blit
.dst
.box
.z
= dstz
;
56 blit
.dst
.box
.width
= src_box
->width
;
57 blit
.dst
.box
.height
= src_box
->height
;
58 blit
.dst
.box
.depth
= src_box
->depth
;
59 blit
.mask
= util_format_get_mask(src
->format
) &
60 util_format_get_mask(dst
->format
);
61 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
64 pipe
->blit(pipe
, &blit
);
68 /* Copy from a full GPU texture to a transfer's staging one. */
69 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
71 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
72 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
73 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
74 struct pipe_resource
*src
= transfer
->resource
;
76 if (src
->nr_samples
> 1) {
77 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
78 src
, transfer
->level
, &transfer
->box
);
82 if (!rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0,
85 ctx
->resource_copy_region(ctx
, dst
, 0, 0, 0, 0,
86 src
, transfer
->level
, &transfer
->box
);
90 /* Copy from a transfer's staging texture to a full GPU one. */
91 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
93 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
94 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
95 struct pipe_resource
*dst
= transfer
->resource
;
96 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
99 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
101 if (dst
->nr_samples
> 1) {
102 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
103 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
108 if (!rctx
->dma_copy(ctx
, dst
, transfer
->level
,
109 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
111 ctx
->resource_copy_region(ctx
, dst
, transfer
->level
,
112 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
117 static unsigned r600_texture_get_offset(struct r600_texture
*rtex
, unsigned level
,
118 const struct pipe_box
*box
)
120 enum pipe_format format
= rtex
->resource
.b
.b
.format
;
122 return rtex
->surface
.level
[level
].offset
+
123 box
->z
* rtex
->surface
.level
[level
].slice_size
+
124 box
->y
/ util_format_get_blockheight(format
) * rtex
->surface
.level
[level
].pitch_bytes
+
125 box
->x
/ util_format_get_blockwidth(format
) * util_format_get_blocksize(format
);
128 static int r600_init_surface(struct r600_common_screen
*rscreen
,
129 struct radeon_surface
*surface
,
130 const struct pipe_resource
*ptex
,
132 bool is_flushed_depth
)
134 const struct util_format_description
*desc
=
135 util_format_description(ptex
->format
);
136 bool is_depth
, is_stencil
;
138 is_depth
= util_format_has_depth(desc
);
139 is_stencil
= util_format_has_stencil(desc
);
141 surface
->npix_x
= ptex
->width0
;
142 surface
->npix_y
= ptex
->height0
;
143 surface
->npix_z
= ptex
->depth0
;
144 surface
->blk_w
= util_format_get_blockwidth(ptex
->format
);
145 surface
->blk_h
= util_format_get_blockheight(ptex
->format
);
147 surface
->array_size
= 1;
148 surface
->last_level
= ptex
->last_level
;
150 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
151 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
152 surface
->bpe
= 4; /* stencil is allocated separately on evergreen */
154 surface
->bpe
= util_format_get_blocksize(ptex
->format
);
155 /* align byte per element on dword */
156 if (surface
->bpe
== 3) {
161 surface
->nsamples
= ptex
->nr_samples
? ptex
->nr_samples
: 1;
162 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
164 switch (ptex
->target
) {
165 case PIPE_TEXTURE_1D
:
166 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
168 case PIPE_TEXTURE_RECT
:
169 case PIPE_TEXTURE_2D
:
170 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
172 case PIPE_TEXTURE_3D
:
173 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
175 case PIPE_TEXTURE_1D_ARRAY
:
176 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
177 surface
->array_size
= ptex
->array_size
;
179 case PIPE_TEXTURE_2D_ARRAY
:
180 case PIPE_TEXTURE_CUBE_ARRAY
: /* cube array layout like 2d array */
181 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
182 surface
->array_size
= ptex
->array_size
;
184 case PIPE_TEXTURE_CUBE
:
185 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP
, TYPE
);
191 if (ptex
->bind
& PIPE_BIND_SCANOUT
) {
192 surface
->flags
|= RADEON_SURF_SCANOUT
;
195 if (!is_flushed_depth
&& is_depth
) {
196 surface
->flags
|= RADEON_SURF_ZBUFFER
;
199 surface
->flags
|= RADEON_SURF_SBUFFER
|
200 RADEON_SURF_HAS_SBUFFER_MIPTREE
;
203 if (rscreen
->chip_class
>= SI
) {
204 surface
->flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
209 static int r600_setup_surface(struct pipe_screen
*screen
,
210 struct r600_texture
*rtex
,
211 unsigned pitch_in_bytes_override
)
213 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
216 r
= rscreen
->ws
->surface_init(rscreen
->ws
, &rtex
->surface
);
221 rtex
->size
= rtex
->surface
.bo_size
;
223 if (pitch_in_bytes_override
&& pitch_in_bytes_override
!= rtex
->surface
.level
[0].pitch_bytes
) {
224 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
227 rtex
->surface
.level
[0].nblk_x
= pitch_in_bytes_override
/ rtex
->surface
.bpe
;
228 rtex
->surface
.level
[0].pitch_bytes
= pitch_in_bytes_override
;
229 rtex
->surface
.level
[0].slice_size
= pitch_in_bytes_override
* rtex
->surface
.level
[0].nblk_y
;
230 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
231 rtex
->surface
.stencil_offset
=
232 rtex
->surface
.stencil_level
[0].offset
= rtex
->surface
.level
[0].slice_size
;
238 static boolean
r600_texture_get_handle(struct pipe_screen
* screen
,
239 struct pipe_resource
*ptex
,
240 struct winsys_handle
*whandle
)
242 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
243 struct r600_resource
*resource
= &rtex
->resource
;
244 struct radeon_surface
*surface
= &rtex
->surface
;
245 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
247 rscreen
->ws
->buffer_set_tiling(resource
->buf
,
249 surface
->level
[0].mode
>= RADEON_SURF_MODE_1D
?
250 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
,
251 surface
->level
[0].mode
>= RADEON_SURF_MODE_2D
?
252 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
,
253 surface
->bankw
, surface
->bankh
,
255 surface
->stencil_tile_split
,
257 surface
->level
[0].pitch_bytes
,
258 (surface
->flags
& RADEON_SURF_SCANOUT
) != 0);
260 return rscreen
->ws
->buffer_get_handle(resource
->buf
,
261 surface
->level
[0].pitch_bytes
, whandle
);
264 static void r600_texture_destroy(struct pipe_screen
*screen
,
265 struct pipe_resource
*ptex
)
267 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
268 struct r600_resource
*resource
= &rtex
->resource
;
270 if (rtex
->flushed_depth_texture
)
271 pipe_resource_reference((struct pipe_resource
**)&rtex
->flushed_depth_texture
, NULL
);
273 pipe_resource_reference((struct pipe_resource
**)&rtex
->htile_buffer
, NULL
);
274 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
275 pipe_resource_reference((struct pipe_resource
**)&rtex
->cmask_buffer
, NULL
);
277 pb_reference(&resource
->buf
, NULL
);
281 static const struct u_resource_vtbl r600_texture_vtbl
;
283 /* The number of samples can be specified independently of the texture. */
284 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
285 struct r600_texture
*rtex
,
287 struct r600_fmask_info
*out
)
289 /* FMASK is allocated like an ordinary texture. */
290 struct radeon_surface fmask
= rtex
->surface
;
292 memset(out
, 0, sizeof(*out
));
294 fmask
.bo_alignment
= 0;
297 fmask
.flags
|= RADEON_SURF_FMASK
;
299 if (rscreen
->chip_class
>= SI
) {
300 fmask
.flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
303 switch (nr_samples
) {
307 if (rscreen
->chip_class
<= CAYMAN
) {
315 R600_ERR("Invalid sample count for FMASK allocation.\n");
319 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
320 * This can be fixed by writing a separate FMASK allocator specifically
321 * for R600-R700 asics. */
322 if (rscreen
->chip_class
<= R700
) {
326 if (rscreen
->ws
->surface_init(rscreen
->ws
, &fmask
)) {
327 R600_ERR("Got error in surface_init while allocating FMASK.\n");
331 assert(fmask
.level
[0].mode
== RADEON_SURF_MODE_2D
);
333 out
->slice_tile_max
= (fmask
.level
[0].nblk_x
* fmask
.level
[0].nblk_y
) / 64;
334 if (out
->slice_tile_max
)
335 out
->slice_tile_max
-= 1;
337 out
->tile_mode_index
= fmask
.tiling_index
[0];
338 out
->pitch
= fmask
.level
[0].nblk_x
;
339 out
->bank_height
= fmask
.bankh
;
340 out
->alignment
= MAX2(256, fmask
.bo_alignment
);
341 out
->size
= fmask
.bo_size
;
344 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
345 struct r600_texture
*rtex
)
347 r600_texture_get_fmask_info(rscreen
, rtex
,
348 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
350 rtex
->fmask
.offset
= align(rtex
->size
, rtex
->fmask
.alignment
);
351 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
354 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
355 struct r600_texture
*rtex
,
356 struct r600_cmask_info
*out
)
358 unsigned cmask_tile_width
= 8;
359 unsigned cmask_tile_height
= 8;
360 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
361 unsigned element_bits
= 4;
362 unsigned cmask_cache_bits
= 1024;
363 unsigned num_pipes
= rscreen
->tiling_info
.num_channels
;
364 unsigned pipe_interleave_bytes
= rscreen
->tiling_info
.group_bytes
;
366 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
367 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
368 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
369 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
370 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
372 unsigned pitch_elements
= align(rtex
->surface
.npix_x
, macro_tile_width
);
373 unsigned height
= align(rtex
->surface
.npix_y
, macro_tile_height
);
375 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
376 unsigned slice_bytes
=
377 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
379 assert(macro_tile_width
% 128 == 0);
380 assert(macro_tile_height
% 128 == 0);
382 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
383 out
->alignment
= MAX2(256, base_align
);
384 out
->size
= rtex
->surface
.array_size
* align(slice_bytes
, base_align
);
387 static void si_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
388 struct r600_texture
*rtex
,
389 struct r600_cmask_info
*out
)
391 unsigned pipe_interleave_bytes
= rscreen
->tiling_info
.group_bytes
;
392 unsigned num_pipes
= rscreen
->tiling_info
.num_channels
;
393 unsigned cl_width
, cl_height
;
408 case 16: /* Hawaii */
417 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
419 unsigned width
= align(rtex
->surface
.npix_x
, cl_width
*8);
420 unsigned height
= align(rtex
->surface
.npix_y
, cl_height
*8);
421 unsigned slice_elements
= (width
* height
) / (8*8);
423 /* Each element of CMASK is a nibble. */
424 unsigned slice_bytes
= slice_elements
/ 2;
426 out
->slice_tile_max
= (width
* height
) / (128*128);
427 if (out
->slice_tile_max
)
428 out
->slice_tile_max
-= 1;
430 out
->alignment
= MAX2(256, base_align
);
431 out
->size
= rtex
->surface
.array_size
* align(slice_bytes
, base_align
);
434 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
435 struct r600_texture
*rtex
)
437 if (rscreen
->chip_class
>= SI
) {
438 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
440 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
443 rtex
->cmask
.offset
= align(rtex
->size
, rtex
->cmask
.alignment
);
444 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
447 void r600_texture_init_cmask(struct r600_common_screen
*rscreen
,
448 struct r600_texture
*rtex
)
450 assert(rtex
->cmask
.size
== 0);
452 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
454 rtex
->cmask_buffer
= (struct r600_resource
*)
455 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
456 PIPE_USAGE_DEFAULT
, rtex
->cmask
.size
);
457 if (rtex
->cmask_buffer
== NULL
) {
458 rtex
->cmask
.size
= 0;
462 static unsigned si_texture_htile_alloc_size(struct r600_common_screen
*rscreen
,
463 struct r600_texture
*rtex
)
465 unsigned cl_width
, cl_height
, width
, height
;
466 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
467 unsigned num_pipes
= rscreen
->tiling_info
.num_channels
;
469 /* HTILE doesn't work with 1D tiling (there's massive corruption
471 if (rtex
->surface
.level
[0].mode
!= RADEON_SURF_MODE_2D
)
496 width
= align(rtex
->surface
.npix_x
, cl_width
* 8);
497 height
= align(rtex
->surface
.npix_y
, cl_height
* 8);
499 slice_elements
= (width
* height
) / (8 * 8);
500 slice_bytes
= slice_elements
* 4;
502 pipe_interleave_bytes
= rscreen
->tiling_info
.group_bytes
;
503 base_align
= num_pipes
* pipe_interleave_bytes
;
505 return rtex
->surface
.array_size
* align(slice_bytes
, base_align
);
508 static unsigned r600_texture_htile_alloc_size(struct r600_common_screen
*rscreen
,
509 struct r600_texture
*rtex
)
511 unsigned sw
= rtex
->surface
.level
[0].nblk_x
* rtex
->surface
.blk_w
;
512 unsigned sh
= rtex
->surface
.level
[0].nblk_y
* rtex
->surface
.blk_h
;
513 unsigned npipes
= rscreen
->info
.r600_num_tile_pipes
;
516 /* XXX also use it for other texture targets */
517 if (rscreen
->info
.drm_minor
< 26 ||
518 rtex
->resource
.b
.b
.target
!= PIPE_TEXTURE_2D
||
519 rtex
->surface
.level
[0].nblk_x
< 32 ||
520 rtex
->surface
.level
[0].nblk_y
< 32) {
524 /* this alignment and htile size only apply to linear htile buffer */
525 sw
= align(sw
, 16 << 3);
526 sh
= align(sh
, npipes
<< 3);
527 htile_size
= (sw
>> 3) * (sh
>> 3) * 4;
528 /* must be aligned with 2K * npipes */
529 htile_size
= align(htile_size
, (2 << 10) * npipes
);
533 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
534 struct r600_texture
*rtex
)
537 if (rscreen
->chip_class
>= SI
) {
538 htile_size
= si_texture_htile_alloc_size(rscreen
, rtex
);
540 htile_size
= r600_texture_htile_alloc_size(rscreen
, rtex
);
546 /* XXX don't allocate it separately */
547 rtex
->htile_buffer
= (struct r600_resource
*)
548 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
549 PIPE_USAGE_DEFAULT
, htile_size
);
550 if (rtex
->htile_buffer
== NULL
) {
551 /* this is not a fatal error as we can still keep rendering
552 * without htile buffer */
553 R600_ERR("Failed to create buffer object for htile buffer.\n");
555 r600_screen_clear_buffer(rscreen
, &rtex
->htile_buffer
->b
.b
, 0, htile_size
, 0);
559 /* Common processing for r600_texture_create and r600_texture_from_handle */
560 static struct r600_texture
*
561 r600_texture_create_object(struct pipe_screen
*screen
,
562 const struct pipe_resource
*base
,
563 unsigned pitch_in_bytes_override
,
564 struct pb_buffer
*buf
,
565 struct radeon_surface
*surface
)
567 struct r600_texture
*rtex
;
568 struct r600_resource
*resource
;
569 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
571 rtex
= CALLOC_STRUCT(r600_texture
);
575 resource
= &rtex
->resource
;
576 resource
->b
.b
= *base
;
577 resource
->b
.vtbl
= &r600_texture_vtbl
;
578 pipe_reference_init(&resource
->b
.b
.reference
, 1);
579 resource
->b
.b
.screen
= screen
;
580 rtex
->pitch_override
= pitch_in_bytes_override
;
582 /* don't include stencil-only formats which we don't support for rendering */
583 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
585 rtex
->surface
= *surface
;
586 if (r600_setup_surface(screen
, rtex
, pitch_in_bytes_override
)) {
591 /* Tiled depth textures utilize the non-displayable tile order.
592 * This must be done after r600_setup_surface.
593 * Applies to R600-Cayman. */
594 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
596 if (rtex
->is_depth
) {
597 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
598 R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) &&
599 (rscreen
->debug_flags
& DBG_HYPERZ
)) {
601 r600_texture_allocate_htile(rscreen
, rtex
);
604 if (base
->nr_samples
> 1) {
606 r600_texture_allocate_fmask(rscreen
, rtex
);
607 r600_texture_allocate_cmask(rscreen
, rtex
);
608 rtex
->cmask_buffer
= &rtex
->resource
;
610 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
617 /* Now create the backing buffer. */
619 if (!r600_init_resource(rscreen
, resource
, rtex
->size
,
620 rtex
->surface
.bo_alignment
, FALSE
)) {
626 resource
->cs_buf
= rscreen
->ws
->buffer_get_cs_handle(buf
);
627 resource
->domains
= RADEON_DOMAIN_GTT
| RADEON_DOMAIN_VRAM
;
630 if (rtex
->cmask
.size
) {
631 /* Initialize the cmask to 0xCC (= compressed state). */
632 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
633 rtex
->cmask
.offset
, rtex
->cmask
.size
, 0xCCCCCCCC);
636 if (rscreen
->debug_flags
& DBG_VM
) {
637 fprintf(stderr
, "VM start=0x%"PRIu64
" end=0x%"PRIu64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
638 r600_resource_va(screen
, &rtex
->resource
.b
.b
),
639 r600_resource_va(screen
, &rtex
->resource
.b
.b
) + rtex
->resource
.buf
->size
,
640 base
->width0
, base
->height0
, util_max_layer(base
, 0)+1, base
->last_level
+1,
641 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
644 if (rscreen
->debug_flags
& DBG_TEX
||
645 (rtex
->resource
.b
.b
.last_level
> 0 && rscreen
->debug_flags
& DBG_TEXMIP
)) {
646 printf("Texture: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
647 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
648 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
649 rtex
->surface
.npix_x
, rtex
->surface
.npix_y
,
650 rtex
->surface
.npix_z
, rtex
->surface
.blk_w
,
651 rtex
->surface
.blk_h
, rtex
->surface
.blk_d
,
652 rtex
->surface
.array_size
, rtex
->surface
.last_level
,
653 rtex
->surface
.bpe
, rtex
->surface
.nsamples
,
654 rtex
->surface
.flags
, util_format_short_name(base
->format
));
655 for (int i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
656 printf(" L %i: offset=%"PRIu64
", slice_size=%"PRIu64
", npix_x=%u, "
657 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
658 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
659 i
, rtex
->surface
.level
[i
].offset
,
660 rtex
->surface
.level
[i
].slice_size
,
661 u_minify(rtex
->resource
.b
.b
.width0
, i
),
662 u_minify(rtex
->resource
.b
.b
.height0
, i
),
663 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
664 rtex
->surface
.level
[i
].nblk_x
,
665 rtex
->surface
.level
[i
].nblk_y
,
666 rtex
->surface
.level
[i
].nblk_z
,
667 rtex
->surface
.level
[i
].pitch_bytes
,
668 rtex
->surface
.level
[i
].mode
);
670 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
671 for (int i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
672 printf(" S %i: offset=%"PRIu64
", slice_size=%"PRIu64
", npix_x=%u, "
673 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
674 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
675 i
, rtex
->surface
.stencil_level
[i
].offset
,
676 rtex
->surface
.stencil_level
[i
].slice_size
,
677 u_minify(rtex
->resource
.b
.b
.width0
, i
),
678 u_minify(rtex
->resource
.b
.b
.height0
, i
),
679 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
680 rtex
->surface
.stencil_level
[i
].nblk_x
,
681 rtex
->surface
.stencil_level
[i
].nblk_y
,
682 rtex
->surface
.stencil_level
[i
].nblk_z
,
683 rtex
->surface
.stencil_level
[i
].pitch_bytes
,
684 rtex
->surface
.stencil_level
[i
].mode
);
691 static unsigned r600_choose_tiling(struct r600_common_screen
*rscreen
,
692 const struct pipe_resource
*templ
)
694 const struct util_format_description
*desc
= util_format_description(templ
->format
);
696 /* MSAA resources must be 2D tiled. */
697 if (templ
->nr_samples
> 1)
698 return RADEON_SURF_MODE_2D
;
700 /* Transfer resources should be linear. */
701 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
702 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
704 /* Handle common candidates for the linear mode.
705 * Compressed textures must always be tiled. */
706 if (!(templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
) &&
707 !util_format_is_compressed(templ
->format
)) {
708 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600-Cayman. */
709 if (rscreen
->chip_class
<= CAYMAN
&&
710 desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
711 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
713 /* Cursors are linear on SI.
714 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
715 if (rscreen
->chip_class
>= SI
&&
716 (templ
->bind
& PIPE_BIND_CURSOR
))
717 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
719 if (templ
->bind
& PIPE_BIND_LINEAR
)
720 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
722 /* Textures with a very small height are recommended to be linear. */
723 if (templ
->target
== PIPE_TEXTURE_1D
||
724 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
726 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
728 /* Textures likely to be mapped often. */
729 if (templ
->usage
== PIPE_USAGE_STAGING
||
730 templ
->usage
== PIPE_USAGE_STREAM
)
731 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
734 /* Make small textures 1D tiled. */
735 if (templ
->width0
<= 16 || templ
->height0
<= 16)
736 return RADEON_SURF_MODE_1D
;
738 /* The allocator will switch to 1D if needed. */
739 return RADEON_SURF_MODE_2D
;
742 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
743 const struct pipe_resource
*templ
)
745 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
746 struct radeon_surface surface
= {0};
749 r
= r600_init_surface(rscreen
, &surface
, templ
,
750 r600_choose_tiling(rscreen
, templ
),
751 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
755 r
= rscreen
->ws
->surface_best(rscreen
->ws
, &surface
);
759 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
,
763 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
764 const struct pipe_resource
*templ
,
765 struct winsys_handle
*whandle
)
767 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
768 struct pb_buffer
*buf
= NULL
;
771 enum radeon_bo_layout micro
, macro
;
772 struct radeon_surface surface
;
776 /* Support only 2D textures without mipmaps */
777 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
778 templ
->depth0
!= 1 || templ
->last_level
!= 0)
781 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
, &stride
);
785 rscreen
->ws
->buffer_get_tiling(buf
, µ
, ¯o
,
786 &surface
.bankw
, &surface
.bankh
,
788 &surface
.stencil_tile_split
,
789 &surface
.mtilea
, &scanout
);
791 if (macro
== RADEON_LAYOUT_TILED
)
792 array_mode
= RADEON_SURF_MODE_2D
;
793 else if (micro
== RADEON_LAYOUT_TILED
)
794 array_mode
= RADEON_SURF_MODE_1D
;
796 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
798 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
, false);
804 surface
.flags
|= RADEON_SURF_SCANOUT
;
806 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
,
807 stride
, buf
, &surface
);
810 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
811 struct pipe_resource
*texture
,
812 struct r600_texture
**staging
)
814 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
815 struct pipe_resource resource
;
816 struct r600_texture
**flushed_depth_texture
= staging
?
817 staging
: &rtex
->flushed_depth_texture
;
819 if (!staging
&& rtex
->flushed_depth_texture
)
820 return true; /* it's ready */
822 resource
.target
= texture
->target
;
823 resource
.format
= texture
->format
;
824 resource
.width0
= texture
->width0
;
825 resource
.height0
= texture
->height0
;
826 resource
.depth0
= texture
->depth0
;
827 resource
.array_size
= texture
->array_size
;
828 resource
.last_level
= texture
->last_level
;
829 resource
.nr_samples
= texture
->nr_samples
;
830 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
831 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
832 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
835 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
837 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
838 if (*flushed_depth_texture
== NULL
) {
839 R600_ERR("failed to create temporary texture to hold flushed depth\n");
843 (*flushed_depth_texture
)->is_flushing_texture
= TRUE
;
844 (*flushed_depth_texture
)->non_disp_tiling
= false;
849 * Initialize the pipe_resource descriptor to be of the same size as the box,
850 * which is supposed to hold a subregion of the texture "orig" at the given
853 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
854 struct pipe_resource
*orig
,
855 const struct pipe_box
*box
,
856 unsigned level
, unsigned flags
)
858 memset(res
, 0, sizeof(*res
));
859 res
->format
= orig
->format
;
860 res
->width0
= box
->width
;
861 res
->height0
= box
->height
;
864 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
867 /* We must set the correct texture target and dimensions for a 3D box. */
868 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0)
869 res
->target
= orig
->target
;
871 res
->target
= PIPE_TEXTURE_2D
;
873 switch (res
->target
) {
874 case PIPE_TEXTURE_1D_ARRAY
:
875 case PIPE_TEXTURE_2D_ARRAY
:
876 case PIPE_TEXTURE_CUBE_ARRAY
:
877 res
->array_size
= box
->depth
;
879 case PIPE_TEXTURE_3D
:
880 res
->depth0
= box
->depth
;
886 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
887 struct pipe_resource
*texture
,
890 const struct pipe_box
*box
,
891 struct pipe_transfer
**ptransfer
)
893 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
894 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
895 struct r600_transfer
*trans
;
896 boolean use_staging_texture
= FALSE
;
897 struct r600_resource
*buf
;
901 /* We cannot map a tiled texture directly because the data is
902 * in a different order, therefore we do detiling using a blit.
904 * Also, use a temporary in GTT memory for read transfers, as
905 * the CPU is much happier reading out of cached system memory
906 * than uncached VRAM.
908 if (rtex
->surface
.level
[level
].mode
>= RADEON_SURF_MODE_1D
)
909 use_staging_texture
= TRUE
;
911 /* Untiled buffers in VRAM, which is slow for CPU reads and writes */
912 if (!(usage
& PIPE_TRANSFER_MAP_DIRECTLY
) &&
913 (rtex
->resource
.domains
== RADEON_DOMAIN_VRAM
)) {
914 use_staging_texture
= TRUE
;
917 /* Use a staging texture for uploads if the underlying BO is busy. */
918 if (!(usage
& PIPE_TRANSFER_READ
) &&
919 (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.cs_buf
, RADEON_USAGE_READWRITE
) ||
920 rctx
->ws
->buffer_is_busy(rtex
->resource
.buf
, RADEON_USAGE_READWRITE
))) {
921 use_staging_texture
= TRUE
;
924 if (texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
) {
925 use_staging_texture
= FALSE
;
928 if (use_staging_texture
&& (usage
& PIPE_TRANSFER_MAP_DIRECTLY
)) {
932 trans
= CALLOC_STRUCT(r600_transfer
);
935 trans
->transfer
.resource
= texture
;
936 trans
->transfer
.level
= level
;
937 trans
->transfer
.usage
= usage
;
938 trans
->transfer
.box
= *box
;
940 if (rtex
->is_depth
) {
941 struct r600_texture
*staging_depth
;
943 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
944 /* MSAA depth buffers need to be converted to single sample buffers.
946 * Mapping MSAA depth buffers can occur if ReadPixels is called
947 * with a multisample GLX visual.
949 * First downsample the depth buffer to a temporary texture,
950 * then decompress the temporary one to staging.
952 * Only the region being mapped is transfered.
954 struct pipe_resource resource
;
956 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
958 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
959 R600_ERR("failed to create temporary texture to hold untiled copy\n");
964 if (usage
& PIPE_TRANSFER_READ
) {
965 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
967 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
968 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
969 0, 0, 0, box
->depth
, 0, 0);
970 pipe_resource_reference((struct pipe_resource
**)&temp
, NULL
);
974 /* XXX: only readback the rectangle which is being mapped? */
975 /* XXX: when discard is true, no need to read back from depth texture */
976 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
977 R600_ERR("failed to create temporary texture to hold untiled copy\n");
982 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
984 box
->z
, box
->z
+ box
->depth
- 1,
987 offset
= r600_texture_get_offset(staging_depth
, level
, box
);
990 trans
->transfer
.stride
= staging_depth
->surface
.level
[level
].pitch_bytes
;
991 trans
->transfer
.layer_stride
= staging_depth
->surface
.level
[level
].slice_size
;
992 trans
->staging
= (struct r600_resource
*)staging_depth
;
993 } else if (use_staging_texture
) {
994 struct pipe_resource resource
;
995 struct r600_texture
*staging
;
997 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
998 R600_RESOURCE_FLAG_TRANSFER
);
1000 /* Create the temporary texture. */
1001 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1002 if (staging
== NULL
) {
1003 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1007 trans
->staging
= &staging
->resource
;
1008 trans
->transfer
.stride
= staging
->surface
.level
[0].pitch_bytes
;
1009 trans
->transfer
.layer_stride
= staging
->surface
.level
[0].slice_size
;
1010 if (usage
& PIPE_TRANSFER_READ
) {
1011 r600_copy_to_staging_texture(ctx
, trans
);
1014 /* the resource is mapped directly */
1015 trans
->transfer
.stride
= rtex
->surface
.level
[level
].pitch_bytes
;
1016 trans
->transfer
.layer_stride
= rtex
->surface
.level
[level
].slice_size
;
1017 offset
= r600_texture_get_offset(rtex
, level
, box
);
1020 if (trans
->staging
) {
1021 buf
= trans
->staging
;
1023 buf
= &rtex
->resource
;
1026 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1027 pipe_resource_reference((struct pipe_resource
**)&trans
->staging
, NULL
);
1032 *ptransfer
= &trans
->transfer
;
1033 return map
+ offset
;
1036 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1037 struct pipe_transfer
* transfer
)
1039 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1040 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1041 struct radeon_winsys_cs_handle
*buf
;
1042 struct pipe_resource
*texture
= transfer
->resource
;
1043 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1045 if (rtransfer
->staging
) {
1046 buf
= rtransfer
->staging
->cs_buf
;
1048 buf
= r600_resource(transfer
->resource
)->cs_buf
;
1050 rctx
->ws
->buffer_unmap(buf
);
1052 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1053 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1054 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1055 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1056 &rtransfer
->staging
->b
.b
, transfer
->level
,
1059 r600_copy_from_staging_texture(ctx
, rtransfer
);
1063 if (rtransfer
->staging
)
1064 pipe_resource_reference((struct pipe_resource
**)&rtransfer
->staging
, NULL
);
1069 static const struct u_resource_vtbl r600_texture_vtbl
=
1071 NULL
, /* get_handle */
1072 r600_texture_destroy
, /* resource_destroy */
1073 r600_texture_transfer_map
, /* transfer_map */
1074 NULL
, /* transfer_flush_region */
1075 r600_texture_transfer_unmap
, /* transfer_unmap */
1076 NULL
/* transfer_inline_write */
1079 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1080 struct pipe_resource
*texture
,
1081 const struct pipe_surface
*templ
,
1082 unsigned width
, unsigned height
)
1084 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1086 if (surface
== NULL
)
1089 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1090 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1092 pipe_reference_init(&surface
->base
.reference
, 1);
1093 pipe_resource_reference(&surface
->base
.texture
, texture
);
1094 surface
->base
.context
= pipe
;
1095 surface
->base
.format
= templ
->format
;
1096 surface
->base
.width
= width
;
1097 surface
->base
.height
= height
;
1098 surface
->base
.u
= templ
->u
;
1099 return &surface
->base
;
1102 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1103 struct pipe_resource
*tex
,
1104 const struct pipe_surface
*templ
)
1106 unsigned level
= templ
->u
.tex
.level
;
1108 return r600_create_surface_custom(pipe
, tex
, templ
,
1109 u_minify(tex
->width0
, level
),
1110 u_minify(tex
->height0
, level
));
1113 static void r600_surface_destroy(struct pipe_context
*pipe
,
1114 struct pipe_surface
*surface
)
1116 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1117 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_fmask
, NULL
);
1118 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_cmask
, NULL
);
1119 pipe_resource_reference(&surface
->texture
, NULL
);
1123 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
1125 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
1126 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
1129 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
1131 rctx
->b
.create_surface
= r600_create_surface
;
1132 rctx
->b
.surface_destroy
= r600_surface_destroy
;