2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
35 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
36 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
37 struct pipe_resource
*dst
,
39 unsigned dstx
, unsigned dsty
, unsigned dstz
,
40 struct pipe_resource
*src
,
42 const struct pipe_box
*src_box
)
44 struct pipe_blit_info blit
;
46 memset(&blit
, 0, sizeof(blit
));
47 blit
.src
.resource
= src
;
48 blit
.src
.format
= src
->format
;
49 blit
.src
.level
= src_level
;
50 blit
.src
.box
= *src_box
;
51 blit
.dst
.resource
= dst
;
52 blit
.dst
.format
= dst
->format
;
53 blit
.dst
.level
= dst_level
;
54 blit
.dst
.box
.x
= dstx
;
55 blit
.dst
.box
.y
= dsty
;
56 blit
.dst
.box
.z
= dstz
;
57 blit
.dst
.box
.width
= src_box
->width
;
58 blit
.dst
.box
.height
= src_box
->height
;
59 blit
.dst
.box
.depth
= src_box
->depth
;
60 blit
.mask
= util_format_get_mask(src
->format
) &
61 util_format_get_mask(dst
->format
);
62 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
65 pipe
->blit(pipe
, &blit
);
69 /* Copy from a full GPU texture to a transfer's staging one. */
70 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
72 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
73 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
74 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
75 struct pipe_resource
*src
= transfer
->resource
;
77 if (src
->nr_samples
> 1) {
78 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
79 src
, transfer
->level
, &transfer
->box
);
83 rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
87 /* Copy from a transfer's staging texture to a full GPU one. */
88 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
90 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
91 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
92 struct pipe_resource
*dst
= transfer
->resource
;
93 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
96 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
98 if (dst
->nr_samples
> 1) {
99 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
100 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
105 rctx
->dma_copy(ctx
, dst
, transfer
->level
,
106 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
110 static unsigned r600_texture_get_offset(struct r600_texture
*rtex
, unsigned level
,
111 const struct pipe_box
*box
)
113 enum pipe_format format
= rtex
->resource
.b
.b
.format
;
115 return rtex
->surface
.level
[level
].offset
+
116 box
->z
* rtex
->surface
.level
[level
].slice_size
+
117 box
->y
/ util_format_get_blockheight(format
) * rtex
->surface
.level
[level
].pitch_bytes
+
118 box
->x
/ util_format_get_blockwidth(format
) * util_format_get_blocksize(format
);
121 static int r600_init_surface(struct r600_common_screen
*rscreen
,
122 struct radeon_surface
*surface
,
123 const struct pipe_resource
*ptex
,
125 bool is_flushed_depth
)
127 const struct util_format_description
*desc
=
128 util_format_description(ptex
->format
);
129 bool is_depth
, is_stencil
;
131 is_depth
= util_format_has_depth(desc
);
132 is_stencil
= util_format_has_stencil(desc
);
134 surface
->npix_x
= ptex
->width0
;
135 surface
->npix_y
= ptex
->height0
;
136 surface
->npix_z
= ptex
->depth0
;
137 surface
->blk_w
= util_format_get_blockwidth(ptex
->format
);
138 surface
->blk_h
= util_format_get_blockheight(ptex
->format
);
140 surface
->array_size
= 1;
141 surface
->last_level
= ptex
->last_level
;
143 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
144 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
145 surface
->bpe
= 4; /* stencil is allocated separately on evergreen */
147 surface
->bpe
= util_format_get_blocksize(ptex
->format
);
148 /* align byte per element on dword */
149 if (surface
->bpe
== 3) {
154 surface
->nsamples
= ptex
->nr_samples
? ptex
->nr_samples
: 1;
155 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
157 switch (ptex
->target
) {
158 case PIPE_TEXTURE_1D
:
159 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
161 case PIPE_TEXTURE_RECT
:
162 case PIPE_TEXTURE_2D
:
163 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
165 case PIPE_TEXTURE_3D
:
166 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
168 case PIPE_TEXTURE_1D_ARRAY
:
169 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
170 surface
->array_size
= ptex
->array_size
;
172 case PIPE_TEXTURE_2D_ARRAY
:
173 case PIPE_TEXTURE_CUBE_ARRAY
: /* cube array layout like 2d array */
174 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
175 surface
->array_size
= ptex
->array_size
;
177 case PIPE_TEXTURE_CUBE
:
178 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP
, TYPE
);
184 if (ptex
->bind
& PIPE_BIND_SCANOUT
) {
185 surface
->flags
|= RADEON_SURF_SCANOUT
;
188 if (!is_flushed_depth
&& is_depth
) {
189 surface
->flags
|= RADEON_SURF_ZBUFFER
;
192 surface
->flags
|= RADEON_SURF_SBUFFER
|
193 RADEON_SURF_HAS_SBUFFER_MIPTREE
;
196 if (rscreen
->chip_class
>= SI
) {
197 surface
->flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
202 static int r600_setup_surface(struct pipe_screen
*screen
,
203 struct r600_texture
*rtex
,
204 unsigned pitch_in_bytes_override
)
206 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
209 r
= rscreen
->ws
->surface_init(rscreen
->ws
, &rtex
->surface
);
214 rtex
->size
= rtex
->surface
.bo_size
;
216 if (pitch_in_bytes_override
&& pitch_in_bytes_override
!= rtex
->surface
.level
[0].pitch_bytes
) {
217 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
220 rtex
->surface
.level
[0].nblk_x
= pitch_in_bytes_override
/ rtex
->surface
.bpe
;
221 rtex
->surface
.level
[0].pitch_bytes
= pitch_in_bytes_override
;
222 rtex
->surface
.level
[0].slice_size
= pitch_in_bytes_override
* rtex
->surface
.level
[0].nblk_y
;
223 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
224 rtex
->surface
.stencil_offset
=
225 rtex
->surface
.stencil_level
[0].offset
= rtex
->surface
.level
[0].slice_size
;
231 static boolean
r600_texture_get_handle(struct pipe_screen
* screen
,
232 struct pipe_resource
*ptex
,
233 struct winsys_handle
*whandle
)
235 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
236 struct r600_resource
*resource
= &rtex
->resource
;
237 struct radeon_surface
*surface
= &rtex
->surface
;
238 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
240 rscreen
->ws
->buffer_set_tiling(resource
->buf
,
242 surface
->level
[0].mode
>= RADEON_SURF_MODE_1D
?
243 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
,
244 surface
->level
[0].mode
>= RADEON_SURF_MODE_2D
?
245 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
,
246 surface
->bankw
, surface
->bankh
,
248 surface
->stencil_tile_split
,
250 surface
->level
[0].pitch_bytes
,
251 (surface
->flags
& RADEON_SURF_SCANOUT
) != 0);
253 return rscreen
->ws
->buffer_get_handle(resource
->buf
,
254 surface
->level
[0].pitch_bytes
, whandle
);
257 static void r600_texture_destroy(struct pipe_screen
*screen
,
258 struct pipe_resource
*ptex
)
260 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
261 struct r600_resource
*resource
= &rtex
->resource
;
263 if (rtex
->flushed_depth_texture
)
264 pipe_resource_reference((struct pipe_resource
**)&rtex
->flushed_depth_texture
, NULL
);
266 pipe_resource_reference((struct pipe_resource
**)&rtex
->htile_buffer
, NULL
);
267 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
268 pipe_resource_reference((struct pipe_resource
**)&rtex
->cmask_buffer
, NULL
);
270 pb_reference(&resource
->buf
, NULL
);
274 static const struct u_resource_vtbl r600_texture_vtbl
;
276 /* The number of samples can be specified independently of the texture. */
277 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
278 struct r600_texture
*rtex
,
280 struct r600_fmask_info
*out
)
282 /* FMASK is allocated like an ordinary texture. */
283 struct radeon_surface fmask
= rtex
->surface
;
285 memset(out
, 0, sizeof(*out
));
287 fmask
.bo_alignment
= 0;
290 fmask
.flags
|= RADEON_SURF_FMASK
;
292 /* Force 2D tiling if it wasn't set. This may occur when creating
293 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
294 * destination buffer must have an FMASK too. */
295 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
296 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
298 if (rscreen
->chip_class
>= SI
) {
299 fmask
.flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
302 switch (nr_samples
) {
306 if (rscreen
->chip_class
<= CAYMAN
) {
314 R600_ERR("Invalid sample count for FMASK allocation.\n");
318 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
319 * This can be fixed by writing a separate FMASK allocator specifically
320 * for R600-R700 asics. */
321 if (rscreen
->chip_class
<= R700
) {
325 if (rscreen
->ws
->surface_init(rscreen
->ws
, &fmask
)) {
326 R600_ERR("Got error in surface_init while allocating FMASK.\n");
330 assert(fmask
.level
[0].mode
== RADEON_SURF_MODE_2D
);
332 out
->slice_tile_max
= (fmask
.level
[0].nblk_x
* fmask
.level
[0].nblk_y
) / 64;
333 if (out
->slice_tile_max
)
334 out
->slice_tile_max
-= 1;
336 out
->tile_mode_index
= fmask
.tiling_index
[0];
337 out
->pitch
= fmask
.level
[0].nblk_x
;
338 out
->bank_height
= fmask
.bankh
;
339 out
->alignment
= MAX2(256, fmask
.bo_alignment
);
340 out
->size
= fmask
.bo_size
;
343 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
344 struct r600_texture
*rtex
)
346 r600_texture_get_fmask_info(rscreen
, rtex
,
347 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
349 rtex
->fmask
.offset
= align(rtex
->size
, rtex
->fmask
.alignment
);
350 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
353 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
354 struct r600_texture
*rtex
,
355 struct r600_cmask_info
*out
)
357 unsigned cmask_tile_width
= 8;
358 unsigned cmask_tile_height
= 8;
359 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
360 unsigned element_bits
= 4;
361 unsigned cmask_cache_bits
= 1024;
362 unsigned num_pipes
= rscreen
->tiling_info
.num_channels
;
363 unsigned pipe_interleave_bytes
= rscreen
->tiling_info
.group_bytes
;
365 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
366 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
367 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
368 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
369 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
371 unsigned pitch_elements
= align(rtex
->surface
.npix_x
, macro_tile_width
);
372 unsigned height
= align(rtex
->surface
.npix_y
, macro_tile_height
);
374 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
375 unsigned slice_bytes
=
376 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
378 assert(macro_tile_width
% 128 == 0);
379 assert(macro_tile_height
% 128 == 0);
381 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
382 out
->alignment
= MAX2(256, base_align
);
383 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
384 align(slice_bytes
, base_align
);
387 static void si_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
388 struct r600_texture
*rtex
,
389 struct r600_cmask_info
*out
)
391 unsigned pipe_interleave_bytes
= rscreen
->tiling_info
.group_bytes
;
392 unsigned num_pipes
= rscreen
->tiling_info
.num_channels
;
393 unsigned cl_width
, cl_height
;
408 case 16: /* Hawaii */
417 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
419 unsigned width
= align(rtex
->surface
.npix_x
, cl_width
*8);
420 unsigned height
= align(rtex
->surface
.npix_y
, cl_height
*8);
421 unsigned slice_elements
= (width
* height
) / (8*8);
423 /* Each element of CMASK is a nibble. */
424 unsigned slice_bytes
= slice_elements
/ 2;
426 out
->slice_tile_max
= (width
* height
) / (128*128);
427 if (out
->slice_tile_max
)
428 out
->slice_tile_max
-= 1;
430 out
->alignment
= MAX2(256, base_align
);
431 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
432 align(slice_bytes
, base_align
);
435 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
436 struct r600_texture
*rtex
)
438 if (rscreen
->chip_class
>= SI
) {
439 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
441 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
444 rtex
->cmask
.offset
= align(rtex
->size
, rtex
->cmask
.alignment
);
445 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
447 if (rscreen
->chip_class
>= SI
)
448 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
450 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
453 static void r600_texture_alloc_cmask_separate(struct r600_common_screen
*rscreen
,
454 struct r600_texture
*rtex
)
456 if (rtex
->cmask_buffer
)
459 assert(rtex
->cmask
.size
== 0);
461 if (rscreen
->chip_class
>= SI
) {
462 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
464 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
467 rtex
->cmask_buffer
= (struct r600_resource
*)
468 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
469 PIPE_USAGE_DEFAULT
, rtex
->cmask
.size
);
470 if (rtex
->cmask_buffer
== NULL
) {
471 rtex
->cmask
.size
= 0;
475 /* update colorbuffer state bits */
476 rtex
->cmask
.base_address_reg
= rtex
->cmask_buffer
->gpu_address
>> 8;
478 if (rscreen
->chip_class
>= SI
)
479 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
481 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
484 static unsigned r600_texture_get_htile_size(struct r600_common_screen
*rscreen
,
485 struct r600_texture
*rtex
)
487 unsigned cl_width
, cl_height
, width
, height
;
488 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
489 unsigned num_pipes
= rscreen
->tiling_info
.num_channels
;
491 if (rscreen
->chip_class
<= EVERGREEN
&&
492 rscreen
->info
.drm_minor
< 26)
495 /* HW bug on R6xx. */
496 if (rscreen
->chip_class
== R600
&&
497 (rtex
->surface
.level
[0].npix_x
> 7680 ||
498 rtex
->surface
.level
[0].npix_y
> 7680))
501 /* HTILE is broken with 1D tiling on old kernels and CIK. */
502 if (rscreen
->chip_class
>= CIK
&&
503 rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
504 rscreen
->info
.drm_minor
< 38)
533 width
= align(rtex
->surface
.npix_x
, cl_width
* 8);
534 height
= align(rtex
->surface
.npix_y
, cl_height
* 8);
536 slice_elements
= (width
* height
) / (8 * 8);
537 slice_bytes
= slice_elements
* 4;
539 pipe_interleave_bytes
= rscreen
->tiling_info
.group_bytes
;
540 base_align
= num_pipes
* pipe_interleave_bytes
;
542 return (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
543 align(slice_bytes
, base_align
);
546 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
547 struct r600_texture
*rtex
)
549 unsigned htile_size
= r600_texture_get_htile_size(rscreen
, rtex
);
554 rtex
->htile_buffer
= (struct r600_resource
*)
555 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
556 PIPE_USAGE_DEFAULT
, htile_size
);
557 if (rtex
->htile_buffer
== NULL
) {
558 /* this is not a fatal error as we can still keep rendering
559 * without htile buffer */
560 R600_ERR("Failed to create buffer object for htile buffer.\n");
562 r600_screen_clear_buffer(rscreen
, &rtex
->htile_buffer
->b
.b
, 0, htile_size
, 0);
566 /* Common processing for r600_texture_create and r600_texture_from_handle */
567 static struct r600_texture
*
568 r600_texture_create_object(struct pipe_screen
*screen
,
569 const struct pipe_resource
*base
,
570 unsigned pitch_in_bytes_override
,
571 struct pb_buffer
*buf
,
572 struct radeon_surface
*surface
)
574 struct r600_texture
*rtex
;
575 struct r600_resource
*resource
;
576 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
578 rtex
= CALLOC_STRUCT(r600_texture
);
582 resource
= &rtex
->resource
;
583 resource
->b
.b
= *base
;
584 resource
->b
.vtbl
= &r600_texture_vtbl
;
585 pipe_reference_init(&resource
->b
.b
.reference
, 1);
586 resource
->b
.b
.screen
= screen
;
587 rtex
->pitch_override
= pitch_in_bytes_override
;
589 /* don't include stencil-only formats which we don't support for rendering */
590 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
592 rtex
->surface
= *surface
;
593 if (r600_setup_surface(screen
, rtex
, pitch_in_bytes_override
)) {
598 /* Tiled depth textures utilize the non-displayable tile order.
599 * This must be done after r600_setup_surface.
600 * Applies to R600-Cayman. */
601 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
603 if (rtex
->is_depth
) {
604 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
605 R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) &&
606 !(rscreen
->debug_flags
& DBG_NO_HYPERZ
)) {
608 r600_texture_allocate_htile(rscreen
, rtex
);
611 if (base
->nr_samples
> 1) {
613 r600_texture_allocate_fmask(rscreen
, rtex
);
614 r600_texture_allocate_cmask(rscreen
, rtex
);
615 rtex
->cmask_buffer
= &rtex
->resource
;
617 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
624 /* Now create the backing buffer. */
626 if (!r600_init_resource(rscreen
, resource
, rtex
->size
,
627 rtex
->surface
.bo_alignment
, TRUE
)) {
633 resource
->cs_buf
= rscreen
->ws
->buffer_get_cs_handle(buf
);
634 resource
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(resource
->cs_buf
);
635 resource
->domains
= rscreen
->ws
->buffer_get_initial_domain(resource
->cs_buf
);
638 if (rtex
->cmask
.size
) {
639 /* Initialize the cmask to 0xCC (= compressed state). */
640 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
641 rtex
->cmask
.offset
, rtex
->cmask
.size
, 0xCCCCCCCC);
644 /* Initialize the CMASK base register value. */
645 rtex
->cmask
.base_address_reg
=
646 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
648 if (rscreen
->debug_flags
& DBG_VM
) {
649 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
650 rtex
->resource
.gpu_address
,
651 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
652 base
->width0
, base
->height0
, util_max_layer(base
, 0)+1, base
->last_level
+1,
653 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
656 if (rscreen
->debug_flags
& DBG_TEX
||
657 (rtex
->resource
.b
.b
.last_level
> 0 && rscreen
->debug_flags
& DBG_TEXMIP
)) {
658 printf("Texture: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
659 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
660 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
661 rtex
->surface
.npix_x
, rtex
->surface
.npix_y
,
662 rtex
->surface
.npix_z
, rtex
->surface
.blk_w
,
663 rtex
->surface
.blk_h
, rtex
->surface
.blk_d
,
664 rtex
->surface
.array_size
, rtex
->surface
.last_level
,
665 rtex
->surface
.bpe
, rtex
->surface
.nsamples
,
666 rtex
->surface
.flags
, util_format_short_name(base
->format
));
667 for (int i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
668 printf(" L %i: offset=%"PRIu64
", slice_size=%"PRIu64
", npix_x=%u, "
669 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
670 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
671 i
, rtex
->surface
.level
[i
].offset
,
672 rtex
->surface
.level
[i
].slice_size
,
673 u_minify(rtex
->resource
.b
.b
.width0
, i
),
674 u_minify(rtex
->resource
.b
.b
.height0
, i
),
675 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
676 rtex
->surface
.level
[i
].nblk_x
,
677 rtex
->surface
.level
[i
].nblk_y
,
678 rtex
->surface
.level
[i
].nblk_z
,
679 rtex
->surface
.level
[i
].pitch_bytes
,
680 rtex
->surface
.level
[i
].mode
);
682 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
683 for (int i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
684 printf(" S %i: offset=%"PRIu64
", slice_size=%"PRIu64
", npix_x=%u, "
685 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
686 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
687 i
, rtex
->surface
.stencil_level
[i
].offset
,
688 rtex
->surface
.stencil_level
[i
].slice_size
,
689 u_minify(rtex
->resource
.b
.b
.width0
, i
),
690 u_minify(rtex
->resource
.b
.b
.height0
, i
),
691 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
692 rtex
->surface
.stencil_level
[i
].nblk_x
,
693 rtex
->surface
.stencil_level
[i
].nblk_y
,
694 rtex
->surface
.stencil_level
[i
].nblk_z
,
695 rtex
->surface
.stencil_level
[i
].pitch_bytes
,
696 rtex
->surface
.stencil_level
[i
].mode
);
703 static unsigned r600_choose_tiling(struct r600_common_screen
*rscreen
,
704 const struct pipe_resource
*templ
)
706 const struct util_format_description
*desc
= util_format_description(templ
->format
);
708 /* MSAA resources must be 2D tiled. */
709 if (templ
->nr_samples
> 1)
710 return RADEON_SURF_MODE_2D
;
712 /* Transfer resources should be linear. */
713 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
714 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
716 /* Handle common candidates for the linear mode.
717 * Compressed textures must always be tiled. */
718 if (!(templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
) &&
719 !util_format_is_compressed(templ
->format
)) {
720 /* Not everything can be linear, so we cannot enforce it
721 * for all textures. */
722 if ((rscreen
->debug_flags
& DBG_NO_TILING
) &&
723 (!util_format_is_depth_or_stencil(templ
->format
) ||
724 !(templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
)))
725 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
727 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
728 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
729 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
731 /* Cursors are linear on SI.
732 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
733 if (rscreen
->chip_class
>= SI
&&
734 (templ
->bind
& PIPE_BIND_CURSOR
))
735 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
737 if (templ
->bind
& PIPE_BIND_LINEAR
)
738 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
740 /* Textures with a very small height are recommended to be linear. */
741 if (templ
->target
== PIPE_TEXTURE_1D
||
742 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
744 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
746 /* Textures likely to be mapped often. */
747 if (templ
->usage
== PIPE_USAGE_STAGING
||
748 templ
->usage
== PIPE_USAGE_STREAM
)
749 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
752 /* Make small textures 1D tiled. */
753 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
754 (rscreen
->debug_flags
& DBG_NO_2D_TILING
))
755 return RADEON_SURF_MODE_1D
;
757 /* The allocator will switch to 1D if needed. */
758 return RADEON_SURF_MODE_2D
;
761 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
762 const struct pipe_resource
*templ
)
764 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
765 struct radeon_surface surface
= {0};
768 r
= r600_init_surface(rscreen
, &surface
, templ
,
769 r600_choose_tiling(rscreen
, templ
),
770 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
774 r
= rscreen
->ws
->surface_best(rscreen
->ws
, &surface
);
778 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
,
782 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
783 const struct pipe_resource
*templ
,
784 struct winsys_handle
*whandle
)
786 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
787 struct pb_buffer
*buf
= NULL
;
790 enum radeon_bo_layout micro
, macro
;
791 struct radeon_surface surface
;
795 /* Support only 2D textures without mipmaps */
796 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
797 templ
->depth0
!= 1 || templ
->last_level
!= 0)
800 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
, &stride
);
804 rscreen
->ws
->buffer_get_tiling(buf
, µ
, ¯o
,
805 &surface
.bankw
, &surface
.bankh
,
807 &surface
.stencil_tile_split
,
808 &surface
.mtilea
, &scanout
);
810 if (macro
== RADEON_LAYOUT_TILED
)
811 array_mode
= RADEON_SURF_MODE_2D
;
812 else if (micro
== RADEON_LAYOUT_TILED
)
813 array_mode
= RADEON_SURF_MODE_1D
;
815 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
817 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
, false);
823 surface
.flags
|= RADEON_SURF_SCANOUT
;
825 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
,
826 stride
, buf
, &surface
);
829 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
830 struct pipe_resource
*texture
,
831 struct r600_texture
**staging
)
833 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
834 struct pipe_resource resource
;
835 struct r600_texture
**flushed_depth_texture
= staging
?
836 staging
: &rtex
->flushed_depth_texture
;
838 if (!staging
&& rtex
->flushed_depth_texture
)
839 return true; /* it's ready */
841 resource
.target
= texture
->target
;
842 resource
.format
= texture
->format
;
843 resource
.width0
= texture
->width0
;
844 resource
.height0
= texture
->height0
;
845 resource
.depth0
= texture
->depth0
;
846 resource
.array_size
= texture
->array_size
;
847 resource
.last_level
= texture
->last_level
;
848 resource
.nr_samples
= texture
->nr_samples
;
849 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
850 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
851 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
854 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
856 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
857 if (*flushed_depth_texture
== NULL
) {
858 R600_ERR("failed to create temporary texture to hold flushed depth\n");
862 (*flushed_depth_texture
)->is_flushing_texture
= TRUE
;
863 (*flushed_depth_texture
)->non_disp_tiling
= false;
868 * Initialize the pipe_resource descriptor to be of the same size as the box,
869 * which is supposed to hold a subregion of the texture "orig" at the given
872 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
873 struct pipe_resource
*orig
,
874 const struct pipe_box
*box
,
875 unsigned level
, unsigned flags
)
877 memset(res
, 0, sizeof(*res
));
878 res
->format
= orig
->format
;
879 res
->width0
= box
->width
;
880 res
->height0
= box
->height
;
883 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
886 /* We must set the correct texture target and dimensions for a 3D box. */
887 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0)
888 res
->target
= orig
->target
;
890 res
->target
= PIPE_TEXTURE_2D
;
892 switch (res
->target
) {
893 case PIPE_TEXTURE_1D_ARRAY
:
894 case PIPE_TEXTURE_2D_ARRAY
:
895 case PIPE_TEXTURE_CUBE_ARRAY
:
896 res
->array_size
= box
->depth
;
898 case PIPE_TEXTURE_3D
:
899 res
->depth0
= box
->depth
;
905 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
906 struct pipe_resource
*texture
,
909 const struct pipe_box
*box
,
910 struct pipe_transfer
**ptransfer
)
912 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
913 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
914 struct r600_transfer
*trans
;
915 boolean use_staging_texture
= FALSE
;
916 struct r600_resource
*buf
;
920 /* We cannot map a tiled texture directly because the data is
921 * in a different order, therefore we do detiling using a blit.
923 * Also, use a temporary in GTT memory for read transfers, as
924 * the CPU is much happier reading out of cached system memory
925 * than uncached VRAM.
927 if (rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
) {
928 use_staging_texture
= TRUE
;
929 } else if ((usage
& PIPE_TRANSFER_READ
) && !(usage
& PIPE_TRANSFER_MAP_DIRECTLY
) &&
930 (rtex
->resource
.domains
== RADEON_DOMAIN_VRAM
)) {
931 /* Untiled buffers in VRAM, which is slow for CPU reads */
932 use_staging_texture
= TRUE
;
933 } else if (!(usage
& PIPE_TRANSFER_READ
) &&
934 (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.cs_buf
, RADEON_USAGE_READWRITE
) ||
935 rctx
->ws
->buffer_is_busy(rtex
->resource
.buf
, RADEON_USAGE_READWRITE
))) {
936 /* Use a staging texture for uploads if the underlying BO is busy. */
937 use_staging_texture
= TRUE
;
940 if (texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
) {
941 use_staging_texture
= FALSE
;
944 if (use_staging_texture
&& (usage
& PIPE_TRANSFER_MAP_DIRECTLY
)) {
948 trans
= CALLOC_STRUCT(r600_transfer
);
951 trans
->transfer
.resource
= texture
;
952 trans
->transfer
.level
= level
;
953 trans
->transfer
.usage
= usage
;
954 trans
->transfer
.box
= *box
;
956 if (rtex
->is_depth
) {
957 struct r600_texture
*staging_depth
;
959 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
960 /* MSAA depth buffers need to be converted to single sample buffers.
962 * Mapping MSAA depth buffers can occur if ReadPixels is called
963 * with a multisample GLX visual.
965 * First downsample the depth buffer to a temporary texture,
966 * then decompress the temporary one to staging.
968 * Only the region being mapped is transfered.
970 struct pipe_resource resource
;
972 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
974 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
975 R600_ERR("failed to create temporary texture to hold untiled copy\n");
980 if (usage
& PIPE_TRANSFER_READ
) {
981 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
983 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
984 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
985 0, 0, 0, box
->depth
, 0, 0);
986 pipe_resource_reference((struct pipe_resource
**)&temp
, NULL
);
990 /* XXX: only readback the rectangle which is being mapped? */
991 /* XXX: when discard is true, no need to read back from depth texture */
992 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
993 R600_ERR("failed to create temporary texture to hold untiled copy\n");
998 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
1000 box
->z
, box
->z
+ box
->depth
- 1,
1003 offset
= r600_texture_get_offset(staging_depth
, level
, box
);
1006 trans
->transfer
.stride
= staging_depth
->surface
.level
[level
].pitch_bytes
;
1007 trans
->transfer
.layer_stride
= staging_depth
->surface
.level
[level
].slice_size
;
1008 trans
->staging
= (struct r600_resource
*)staging_depth
;
1009 } else if (use_staging_texture
) {
1010 struct pipe_resource resource
;
1011 struct r600_texture
*staging
;
1013 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1014 R600_RESOURCE_FLAG_TRANSFER
);
1015 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1016 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1018 /* Create the temporary texture. */
1019 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1020 if (staging
== NULL
) {
1021 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1025 trans
->staging
= &staging
->resource
;
1026 trans
->transfer
.stride
= staging
->surface
.level
[0].pitch_bytes
;
1027 trans
->transfer
.layer_stride
= staging
->surface
.level
[0].slice_size
;
1028 if (usage
& PIPE_TRANSFER_READ
) {
1029 r600_copy_to_staging_texture(ctx
, trans
);
1032 /* the resource is mapped directly */
1033 trans
->transfer
.stride
= rtex
->surface
.level
[level
].pitch_bytes
;
1034 trans
->transfer
.layer_stride
= rtex
->surface
.level
[level
].slice_size
;
1035 offset
= r600_texture_get_offset(rtex
, level
, box
);
1038 if (trans
->staging
) {
1039 buf
= trans
->staging
;
1040 if (!rtex
->is_depth
&& !(usage
& PIPE_TRANSFER_READ
))
1041 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1043 buf
= &rtex
->resource
;
1046 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1047 pipe_resource_reference((struct pipe_resource
**)&trans
->staging
, NULL
);
1052 *ptransfer
= &trans
->transfer
;
1053 return map
+ offset
;
1056 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1057 struct pipe_transfer
* transfer
)
1059 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1060 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1061 struct radeon_winsys_cs_handle
*buf
;
1062 struct pipe_resource
*texture
= transfer
->resource
;
1063 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1065 if (rtransfer
->staging
) {
1066 buf
= rtransfer
->staging
->cs_buf
;
1068 buf
= r600_resource(transfer
->resource
)->cs_buf
;
1070 rctx
->ws
->buffer_unmap(buf
);
1072 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1073 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1074 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1075 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1076 &rtransfer
->staging
->b
.b
, transfer
->level
,
1079 r600_copy_from_staging_texture(ctx
, rtransfer
);
1083 if (rtransfer
->staging
)
1084 pipe_resource_reference((struct pipe_resource
**)&rtransfer
->staging
, NULL
);
1089 static const struct u_resource_vtbl r600_texture_vtbl
=
1091 NULL
, /* get_handle */
1092 r600_texture_destroy
, /* resource_destroy */
1093 r600_texture_transfer_map
, /* transfer_map */
1094 NULL
, /* transfer_flush_region */
1095 r600_texture_transfer_unmap
, /* transfer_unmap */
1096 NULL
/* transfer_inline_write */
1099 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1100 struct pipe_resource
*texture
,
1101 const struct pipe_surface
*templ
,
1102 unsigned width
, unsigned height
)
1104 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1106 if (surface
== NULL
)
1109 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1110 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1112 pipe_reference_init(&surface
->base
.reference
, 1);
1113 pipe_resource_reference(&surface
->base
.texture
, texture
);
1114 surface
->base
.context
= pipe
;
1115 surface
->base
.format
= templ
->format
;
1116 surface
->base
.width
= width
;
1117 surface
->base
.height
= height
;
1118 surface
->base
.u
= templ
->u
;
1119 return &surface
->base
;
1122 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1123 struct pipe_resource
*tex
,
1124 const struct pipe_surface
*templ
)
1126 unsigned level
= templ
->u
.tex
.level
;
1128 return r600_create_surface_custom(pipe
, tex
, templ
,
1129 u_minify(tex
->width0
, level
),
1130 u_minify(tex
->height0
, level
));
1133 static void r600_surface_destroy(struct pipe_context
*pipe
,
1134 struct pipe_surface
*surface
)
1136 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1137 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_fmask
, NULL
);
1138 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_cmask
, NULL
);
1139 pipe_resource_reference(&surface
->texture
, NULL
);
1143 unsigned r600_translate_colorswap(enum pipe_format format
)
1145 const struct util_format_description
*desc
= util_format_description(format
);
1147 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == UTIL_FORMAT_SWIZZLE_##swz)
1149 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
1150 return V_0280A0_SWAP_STD
;
1152 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1155 switch (desc
->nr_channels
) {
1157 if (HAS_SWIZZLE(0,X
))
1158 return V_0280A0_SWAP_STD
; /* X___ */
1159 else if (HAS_SWIZZLE(3,X
))
1160 return V_0280A0_SWAP_ALT_REV
; /* ___X */
1163 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
1164 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
1165 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
1166 return V_0280A0_SWAP_STD
; /* XY__ */
1167 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
1168 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
1169 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
1170 return V_0280A0_SWAP_STD_REV
; /* YX__ */
1171 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
1172 return V_0280A0_SWAP_ALT
; /* X__Y */
1173 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
1174 return V_0280A0_SWAP_ALT_REV
; /* Y__X */
1177 if (HAS_SWIZZLE(0,X
))
1178 return V_0280A0_SWAP_STD
; /* XYZ */
1179 else if (HAS_SWIZZLE(0,Z
))
1180 return V_0280A0_SWAP_STD_REV
; /* ZYX */
1183 /* check the middle channels, the 1st and 4th channel can be NONE */
1184 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
))
1185 return V_0280A0_SWAP_STD
; /* XYZW */
1186 else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
))
1187 return V_0280A0_SWAP_STD_REV
; /* WZYX */
1188 else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
))
1189 return V_0280A0_SWAP_ALT
; /* ZYXW */
1190 else if (HAS_SWIZZLE(1,X
) && HAS_SWIZZLE(2,Y
))
1191 return V_0280A0_SWAP_ALT_REV
; /* WXYZ */
1197 static void evergreen_set_clear_color(struct r600_texture
*rtex
,
1198 enum pipe_format surface_format
,
1199 const union pipe_color_union
*color
)
1201 union util_color uc
;
1203 memset(&uc
, 0, sizeof(uc
));
1205 if (util_format_is_pure_uint(surface_format
)) {
1206 util_format_write_4ui(surface_format
, color
->ui
, 0, &uc
, 0, 0, 0, 1, 1);
1207 } else if (util_format_is_pure_sint(surface_format
)) {
1208 util_format_write_4i(surface_format
, color
->i
, 0, &uc
, 0, 0, 0, 1, 1);
1210 util_pack_color(color
->f
, surface_format
, &uc
);
1213 memcpy(rtex
->color_clear_value
, &uc
, 2 * sizeof(uint32_t));
1216 void evergreen_do_fast_color_clear(struct r600_common_context
*rctx
,
1217 struct pipe_framebuffer_state
*fb
,
1218 struct r600_atom
*fb_state
,
1220 const union pipe_color_union
*color
)
1224 if (rctx
->current_render_cond
)
1227 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1228 struct r600_texture
*tex
;
1229 unsigned clear_bit
= PIPE_CLEAR_COLOR0
<< i
;
1234 /* if this colorbuffer is not being cleared */
1235 if (!(*buffers
& clear_bit
))
1238 tex
= (struct r600_texture
*)fb
->cbufs
[i
]->texture
;
1240 /* 128-bit formats are unusupported */
1241 if (util_format_get_blocksizebits(fb
->cbufs
[i
]->format
) > 64) {
1245 /* the clear is allowed if all layers are bound */
1246 if (fb
->cbufs
[i
]->u
.tex
.first_layer
!= 0 ||
1247 fb
->cbufs
[i
]->u
.tex
.last_layer
!= util_max_layer(&tex
->resource
.b
.b
, 0)) {
1251 /* cannot clear mipmapped textures */
1252 if (fb
->cbufs
[i
]->texture
->last_level
!= 0) {
1256 /* only supported on tiled surfaces */
1257 if (tex
->surface
.level
[0].mode
< RADEON_SURF_MODE_1D
) {
1261 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1262 if (tex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
1263 rctx
->chip_class
>= CIK
&& rctx
->screen
->info
.drm_minor
< 38) {
1267 /* ensure CMASK is enabled */
1268 r600_texture_alloc_cmask_separate(rctx
->screen
, tex
);
1269 if (tex
->cmask
.size
== 0) {
1273 /* Do the fast clear. */
1274 evergreen_set_clear_color(tex
, fb
->cbufs
[i
]->format
, color
);
1275 rctx
->clear_buffer(&rctx
->b
, &tex
->cmask_buffer
->b
.b
,
1276 tex
->cmask
.offset
, tex
->cmask
.size
, 0);
1278 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1279 fb_state
->dirty
= true;
1280 *buffers
&= ~clear_bit
;
1284 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
1286 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
1287 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
1290 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
1292 rctx
->b
.create_surface
= r600_create_surface
;
1293 rctx
->b
.surface_destroy
= r600_surface_destroy
;