2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
35 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
36 struct r600_texture
*rtex
);
37 static unsigned r600_choose_tiling(struct r600_common_screen
*rscreen
,
38 const struct pipe_resource
*templ
);
41 bool r600_prepare_for_dma_blit(struct r600_common_context
*rctx
,
42 struct r600_texture
*rdst
,
43 unsigned dst_level
, unsigned dstx
,
44 unsigned dsty
, unsigned dstz
,
45 struct r600_texture
*rsrc
,
47 const struct pipe_box
*src_box
)
52 if (util_format_get_blocksizebits(rdst
->resource
.b
.b
.format
) !=
53 util_format_get_blocksizebits(rsrc
->resource
.b
.b
.format
))
56 /* MSAA: Blits don't exist in the real world. */
57 if (rsrc
->resource
.b
.b
.nr_samples
> 1 ||
58 rdst
->resource
.b
.b
.nr_samples
> 1)
61 /* Depth-stencil surfaces:
62 * When dst is linear, the DB->CB copy preserves HTILE.
63 * When dst is tiled, the 3D path must be used to update HTILE.
65 if (rsrc
->is_depth
|| rdst
->is_depth
)
69 * src: Use the 3D path. DCC decompression is expensive.
70 * dst: Use the 3D path to compress the pixels with DCC.
72 if ((rsrc
->dcc_offset
&& rsrc
->surface
.level
[src_level
].dcc_enabled
) ||
73 (rdst
->dcc_offset
&& rdst
->surface
.level
[dst_level
].dcc_enabled
))
77 * src: Both texture and SDMA paths need decompression. Use SDMA.
78 * dst: If overwriting the whole texture, discard CMASK and use
79 * SDMA. Otherwise, use the 3D path.
81 if (rdst
->cmask
.size
&& rdst
->dirty_level_mask
& (1 << dst_level
)) {
82 /* The CMASK clear is only enabled for the first level. */
83 assert(dst_level
== 0);
84 if (!util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
85 dstx
, dsty
, dstz
, src_box
->width
,
86 src_box
->height
, src_box
->depth
))
89 r600_texture_discard_cmask(rctx
->screen
, rdst
);
92 /* All requirements are met. Prepare textures for SDMA. */
93 if (rsrc
->cmask
.size
&& rsrc
->dirty_level_mask
& (1 << src_level
))
94 rctx
->b
.flush_resource(&rctx
->b
, &rsrc
->resource
.b
.b
);
96 assert(!(rsrc
->dirty_level_mask
& (1 << src_level
)));
97 assert(!(rdst
->dirty_level_mask
& (1 << dst_level
)));
102 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
103 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
104 struct pipe_resource
*dst
,
106 unsigned dstx
, unsigned dsty
, unsigned dstz
,
107 struct pipe_resource
*src
,
109 const struct pipe_box
*src_box
)
111 struct pipe_blit_info blit
;
113 memset(&blit
, 0, sizeof(blit
));
114 blit
.src
.resource
= src
;
115 blit
.src
.format
= src
->format
;
116 blit
.src
.level
= src_level
;
117 blit
.src
.box
= *src_box
;
118 blit
.dst
.resource
= dst
;
119 blit
.dst
.format
= dst
->format
;
120 blit
.dst
.level
= dst_level
;
121 blit
.dst
.box
.x
= dstx
;
122 blit
.dst
.box
.y
= dsty
;
123 blit
.dst
.box
.z
= dstz
;
124 blit
.dst
.box
.width
= src_box
->width
;
125 blit
.dst
.box
.height
= src_box
->height
;
126 blit
.dst
.box
.depth
= src_box
->depth
;
127 blit
.mask
= util_format_get_mask(src
->format
) &
128 util_format_get_mask(dst
->format
);
129 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
132 pipe
->blit(pipe
, &blit
);
136 /* Copy from a full GPU texture to a transfer's staging one. */
137 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
139 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
140 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
141 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
142 struct pipe_resource
*src
= transfer
->resource
;
144 if (src
->nr_samples
> 1) {
145 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
146 src
, transfer
->level
, &transfer
->box
);
150 rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
154 /* Copy from a transfer's staging texture to a full GPU one. */
155 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
157 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
158 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
159 struct pipe_resource
*dst
= transfer
->resource
;
160 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
161 struct pipe_box sbox
;
163 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
165 if (dst
->nr_samples
> 1) {
166 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
167 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
172 rctx
->dma_copy(ctx
, dst
, transfer
->level
,
173 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
177 static unsigned r600_texture_get_offset(struct r600_texture
*rtex
, unsigned level
,
178 const struct pipe_box
*box
)
180 enum pipe_format format
= rtex
->resource
.b
.b
.format
;
182 return rtex
->surface
.level
[level
].offset
+
183 box
->z
* rtex
->surface
.level
[level
].slice_size
+
184 box
->y
/ util_format_get_blockheight(format
) * rtex
->surface
.level
[level
].pitch_bytes
+
185 box
->x
/ util_format_get_blockwidth(format
) * util_format_get_blocksize(format
);
188 static int r600_init_surface(struct r600_common_screen
*rscreen
,
189 struct radeon_surf
*surface
,
190 const struct pipe_resource
*ptex
,
192 bool is_flushed_depth
)
194 const struct util_format_description
*desc
=
195 util_format_description(ptex
->format
);
196 bool is_depth
, is_stencil
;
198 is_depth
= util_format_has_depth(desc
);
199 is_stencil
= util_format_has_stencil(desc
);
201 surface
->npix_x
= ptex
->width0
;
202 surface
->npix_y
= ptex
->height0
;
203 surface
->npix_z
= ptex
->depth0
;
204 surface
->blk_w
= util_format_get_blockwidth(ptex
->format
);
205 surface
->blk_h
= util_format_get_blockheight(ptex
->format
);
207 surface
->array_size
= 1;
208 surface
->last_level
= ptex
->last_level
;
210 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
211 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
212 surface
->bpe
= 4; /* stencil is allocated separately on evergreen */
214 surface
->bpe
= util_format_get_blocksize(ptex
->format
);
215 /* align byte per element on dword */
216 if (surface
->bpe
== 3) {
221 surface
->nsamples
= ptex
->nr_samples
? ptex
->nr_samples
: 1;
222 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
224 switch (ptex
->target
) {
225 case PIPE_TEXTURE_1D
:
226 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
228 case PIPE_TEXTURE_RECT
:
229 case PIPE_TEXTURE_2D
:
230 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
232 case PIPE_TEXTURE_3D
:
233 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
235 case PIPE_TEXTURE_1D_ARRAY
:
236 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
237 surface
->array_size
= ptex
->array_size
;
239 case PIPE_TEXTURE_CUBE_ARRAY
: /* cube array layout like 2d array */
240 assert(ptex
->array_size
% 6 == 0);
241 case PIPE_TEXTURE_2D_ARRAY
:
242 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
243 surface
->array_size
= ptex
->array_size
;
245 case PIPE_TEXTURE_CUBE
:
246 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP
, TYPE
);
253 if (!is_flushed_depth
&& is_depth
) {
254 surface
->flags
|= RADEON_SURF_ZBUFFER
;
257 surface
->flags
|= RADEON_SURF_SBUFFER
|
258 RADEON_SURF_HAS_SBUFFER_MIPTREE
;
261 if (rscreen
->chip_class
>= SI
) {
262 surface
->flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
265 if (rscreen
->chip_class
>= VI
&&
266 (ptex
->flags
& R600_RESOURCE_FLAG_DISABLE_DCC
||
267 ptex
->format
== PIPE_FORMAT_R9G9B9E5_FLOAT
))
268 surface
->flags
|= RADEON_SURF_DISABLE_DCC
;
270 if (ptex
->bind
& PIPE_BIND_SCANOUT
) {
271 /* This should catch bugs in gallium users setting incorrect flags. */
272 assert(surface
->nsamples
== 1 &&
273 surface
->array_size
== 1 &&
274 surface
->npix_z
== 1 &&
275 surface
->last_level
== 0 &&
276 !(surface
->flags
& RADEON_SURF_Z_OR_SBUFFER
));
278 surface
->flags
|= RADEON_SURF_SCANOUT
;
283 static int r600_setup_surface(struct pipe_screen
*screen
,
284 struct r600_texture
*rtex
,
285 unsigned pitch_in_bytes_override
,
288 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
292 r
= rscreen
->ws
->surface_init(rscreen
->ws
, &rtex
->surface
);
297 rtex
->size
= rtex
->surface
.bo_size
;
299 if (pitch_in_bytes_override
&& pitch_in_bytes_override
!= rtex
->surface
.level
[0].pitch_bytes
) {
300 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
303 rtex
->surface
.level
[0].nblk_x
= pitch_in_bytes_override
/ rtex
->surface
.bpe
;
304 rtex
->surface
.level
[0].pitch_bytes
= pitch_in_bytes_override
;
305 rtex
->surface
.level
[0].slice_size
= pitch_in_bytes_override
* rtex
->surface
.level
[0].nblk_y
;
309 for (i
= 0; i
< ARRAY_SIZE(rtex
->surface
.level
); ++i
)
310 rtex
->surface
.level
[i
].offset
+= offset
;
315 static void r600_texture_init_metadata(struct r600_texture
*rtex
,
316 struct radeon_bo_metadata
*metadata
)
318 struct radeon_surf
*surface
= &rtex
->surface
;
320 memset(metadata
, 0, sizeof(*metadata
));
321 metadata
->microtile
= surface
->level
[0].mode
>= RADEON_SURF_MODE_1D
?
322 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
323 metadata
->macrotile
= surface
->level
[0].mode
>= RADEON_SURF_MODE_2D
?
324 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
325 metadata
->pipe_config
= surface
->pipe_config
;
326 metadata
->bankw
= surface
->bankw
;
327 metadata
->bankh
= surface
->bankh
;
328 metadata
->tile_split
= surface
->tile_split
;
329 metadata
->mtilea
= surface
->mtilea
;
330 metadata
->num_banks
= surface
->num_banks
;
331 metadata
->stride
= surface
->level
[0].pitch_bytes
;
332 metadata
->scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
335 static void r600_dirty_all_framebuffer_states(struct r600_common_screen
*rscreen
)
337 p_atomic_inc(&rscreen
->dirty_fb_counter
);
340 static void r600_eliminate_fast_color_clear(struct r600_common_screen
*rscreen
,
341 struct r600_texture
*rtex
)
343 struct pipe_context
*ctx
= rscreen
->aux_context
;
345 pipe_mutex_lock(rscreen
->aux_context_lock
);
346 ctx
->flush_resource(ctx
, &rtex
->resource
.b
.b
);
347 ctx
->flush(ctx
, NULL
, 0);
348 pipe_mutex_unlock(rscreen
->aux_context_lock
);
351 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
352 struct r600_texture
*rtex
)
354 if (!rtex
->cmask
.size
)
357 assert(rtex
->resource
.b
.b
.nr_samples
<= 1);
360 memset(&rtex
->cmask
, 0, sizeof(rtex
->cmask
));
361 rtex
->cmask
.base_address_reg
= rtex
->resource
.gpu_address
>> 8;
363 if (rscreen
->chip_class
>= SI
)
364 rtex
->cb_color_info
&= ~SI_S_028C70_FAST_CLEAR(1);
366 rtex
->cb_color_info
&= ~EG_S_028C70_FAST_CLEAR(1);
368 if (rtex
->cmask_buffer
!= &rtex
->resource
)
369 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
371 /* Notify all contexts about the change. */
372 r600_dirty_all_framebuffer_states(rscreen
);
373 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
376 static bool r600_can_disable_dcc(struct r600_texture
*rtex
)
378 /* We can't disable DCC if it can be written by another process. */
379 return rtex
->dcc_offset
&&
380 (!rtex
->resource
.is_shared
||
381 !(rtex
->resource
.external_usage
& PIPE_HANDLE_USAGE_WRITE
));
384 static bool r600_texture_discard_dcc(struct r600_common_screen
*rscreen
,
385 struct r600_texture
*rtex
)
387 if (!r600_can_disable_dcc(rtex
))
391 rtex
->dcc_offset
= 0;
393 /* Notify all contexts about the change. */
394 r600_dirty_all_framebuffer_states(rscreen
);
398 bool r600_texture_disable_dcc(struct r600_common_screen
*rscreen
,
399 struct r600_texture
*rtex
)
401 struct r600_common_context
*rctx
=
402 (struct r600_common_context
*)rscreen
->aux_context
;
404 if (!r600_can_disable_dcc(rtex
))
407 /* Decompress DCC. */
408 pipe_mutex_lock(rscreen
->aux_context_lock
);
409 rctx
->decompress_dcc(&rctx
->b
, rtex
);
410 rctx
->b
.flush(&rctx
->b
, NULL
, 0);
411 pipe_mutex_unlock(rscreen
->aux_context_lock
);
413 return r600_texture_discard_dcc(rscreen
, rtex
);
416 static void r600_degrade_tile_mode_to_linear(struct r600_common_context
*rctx
,
417 struct r600_texture
*rtex
,
418 bool invalidate_storage
)
420 struct pipe_screen
*screen
= rctx
->b
.screen
;
421 struct r600_texture
*new_tex
;
422 struct pipe_resource templ
= rtex
->resource
.b
.b
;
425 templ
.bind
|= PIPE_BIND_LINEAR
;
427 /* r600g doesn't react to dirty_tex_descriptor_counter */
428 if (rctx
->chip_class
< SI
)
431 if (rtex
->resource
.is_shared
||
432 rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
)
435 /* This fails with MSAA, depth, and compressed textures. */
436 if (r600_choose_tiling(rctx
->screen
, &templ
) !=
437 RADEON_SURF_MODE_LINEAR_ALIGNED
)
440 new_tex
= (struct r600_texture
*)screen
->resource_create(screen
, &templ
);
444 /* Copy the pixels to the new texture. */
445 if (!invalidate_storage
) {
446 for (i
= 0; i
<= templ
.last_level
; i
++) {
450 u_minify(templ
.width0
, i
), u_minify(templ
.height0
, i
),
451 util_max_layer(&templ
, i
) + 1, &box
);
453 rctx
->dma_copy(&rctx
->b
, &new_tex
->resource
.b
.b
, i
, 0, 0, 0,
454 &rtex
->resource
.b
.b
, i
, &box
);
458 r600_texture_discard_cmask(rctx
->screen
, rtex
);
459 r600_texture_discard_dcc(rctx
->screen
, rtex
);
461 /* Replace the structure fields of rtex. */
462 rtex
->resource
.b
.b
.bind
= templ
.bind
;
463 pb_reference(&rtex
->resource
.buf
, new_tex
->resource
.buf
);
464 rtex
->resource
.gpu_address
= new_tex
->resource
.gpu_address
;
465 rtex
->resource
.domains
= new_tex
->resource
.domains
;
466 rtex
->size
= new_tex
->size
;
467 rtex
->surface
= new_tex
->surface
;
468 rtex
->non_disp_tiling
= new_tex
->non_disp_tiling
;
469 rtex
->cb_color_info
= new_tex
->cb_color_info
;
470 rtex
->cmask
= new_tex
->cmask
; /* needed even without CMASK */
472 assert(!rtex
->htile_buffer
);
473 assert(!rtex
->cmask
.size
);
474 assert(!rtex
->fmask
.size
);
475 assert(!rtex
->dcc_offset
);
476 assert(!rtex
->is_depth
);
478 pipe_resource_reference((struct pipe_resource
**)&new_tex
, NULL
);
480 r600_dirty_all_framebuffer_states(rctx
->screen
);
481 p_atomic_inc(&rctx
->screen
->dirty_tex_descriptor_counter
);
484 static boolean
r600_texture_get_handle(struct pipe_screen
* screen
,
485 struct pipe_resource
*resource
,
486 struct winsys_handle
*whandle
,
489 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
490 struct r600_resource
*res
= (struct r600_resource
*)resource
;
491 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
492 struct radeon_bo_metadata metadata
;
493 bool update_metadata
= false;
495 /* This is not supported now, but it might be required for OpenCL
496 * interop in the future.
498 if (resource
->target
!= PIPE_BUFFER
&&
499 (resource
->nr_samples
> 1 || rtex
->is_depth
))
502 if (resource
->target
!= PIPE_BUFFER
) {
503 /* Since shader image stores don't support DCC on VI,
504 * disable it for external clients that want write
507 if (usage
& PIPE_HANDLE_USAGE_WRITE
&& rtex
->dcc_offset
) {
508 if (r600_texture_disable_dcc(rscreen
, rtex
))
509 update_metadata
= true;
512 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
514 /* Eliminate fast clear (both CMASK and DCC) */
515 r600_eliminate_fast_color_clear(rscreen
, rtex
);
517 /* Disable CMASK if flush_resource isn't going
520 r600_texture_discard_cmask(rscreen
, rtex
);
524 if (!res
->is_shared
|| update_metadata
) {
525 r600_texture_init_metadata(rtex
, &metadata
);
526 if (rscreen
->query_opaque_metadata
)
527 rscreen
->query_opaque_metadata(rscreen
, rtex
,
530 rscreen
->ws
->buffer_set_metadata(res
->buf
, &metadata
);
534 if (res
->is_shared
) {
535 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
538 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
539 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
540 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
542 res
->is_shared
= true;
543 res
->external_usage
= usage
;
546 return rscreen
->ws
->buffer_get_handle(res
->buf
,
547 rtex
->surface
.level
[0].pitch_bytes
,
548 rtex
->surface
.level
[0].offset
,
549 rtex
->surface
.level
[0].slice_size
,
553 static void r600_texture_destroy(struct pipe_screen
*screen
,
554 struct pipe_resource
*ptex
)
556 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
557 struct r600_resource
*resource
= &rtex
->resource
;
559 if (rtex
->flushed_depth_texture
)
560 pipe_resource_reference((struct pipe_resource
**)&rtex
->flushed_depth_texture
, NULL
);
562 r600_resource_reference(&rtex
->htile_buffer
, NULL
);
563 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
564 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
566 pb_reference(&resource
->buf
, NULL
);
570 static const struct u_resource_vtbl r600_texture_vtbl
;
572 /* The number of samples can be specified independently of the texture. */
573 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
574 struct r600_texture
*rtex
,
576 struct r600_fmask_info
*out
)
578 /* FMASK is allocated like an ordinary texture. */
579 struct radeon_surf fmask
= rtex
->surface
;
581 memset(out
, 0, sizeof(*out
));
583 fmask
.bo_alignment
= 0;
586 fmask
.flags
|= RADEON_SURF_FMASK
;
588 /* Force 2D tiling if it wasn't set. This may occur when creating
589 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
590 * destination buffer must have an FMASK too. */
591 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
592 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
594 if (rscreen
->chip_class
>= SI
) {
595 fmask
.flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
598 switch (nr_samples
) {
602 if (rscreen
->chip_class
<= CAYMAN
) {
610 R600_ERR("Invalid sample count for FMASK allocation.\n");
614 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
615 * This can be fixed by writing a separate FMASK allocator specifically
616 * for R600-R700 asics. */
617 if (rscreen
->chip_class
<= R700
) {
621 if (rscreen
->ws
->surface_init(rscreen
->ws
, &fmask
)) {
622 R600_ERR("Got error in surface_init while allocating FMASK.\n");
626 assert(fmask
.level
[0].mode
== RADEON_SURF_MODE_2D
);
628 out
->slice_tile_max
= (fmask
.level
[0].nblk_x
* fmask
.level
[0].nblk_y
) / 64;
629 if (out
->slice_tile_max
)
630 out
->slice_tile_max
-= 1;
632 out
->tile_mode_index
= fmask
.tiling_index
[0];
633 out
->pitch_in_pixels
= fmask
.level
[0].nblk_x
;
634 out
->bank_height
= fmask
.bankh
;
635 out
->alignment
= MAX2(256, fmask
.bo_alignment
);
636 out
->size
= fmask
.bo_size
;
639 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
640 struct r600_texture
*rtex
)
642 r600_texture_get_fmask_info(rscreen
, rtex
,
643 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
645 rtex
->fmask
.offset
= align64(rtex
->size
, rtex
->fmask
.alignment
);
646 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
649 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
650 struct r600_texture
*rtex
,
651 struct r600_cmask_info
*out
)
653 unsigned cmask_tile_width
= 8;
654 unsigned cmask_tile_height
= 8;
655 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
656 unsigned element_bits
= 4;
657 unsigned cmask_cache_bits
= 1024;
658 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
659 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
661 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
662 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
663 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
664 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
665 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
667 unsigned pitch_elements
= align(rtex
->surface
.npix_x
, macro_tile_width
);
668 unsigned height
= align(rtex
->surface
.npix_y
, macro_tile_height
);
670 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
671 unsigned slice_bytes
=
672 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
674 assert(macro_tile_width
% 128 == 0);
675 assert(macro_tile_height
% 128 == 0);
677 out
->pitch
= pitch_elements
;
678 out
->height
= height
;
679 out
->xalign
= macro_tile_width
;
680 out
->yalign
= macro_tile_height
;
681 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
682 out
->alignment
= MAX2(256, base_align
);
683 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
684 align(slice_bytes
, base_align
);
687 static void si_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
688 struct r600_texture
*rtex
,
689 struct r600_cmask_info
*out
)
691 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
692 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
693 unsigned cl_width
, cl_height
;
708 case 16: /* Hawaii */
717 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
719 unsigned width
= align(rtex
->surface
.npix_x
, cl_width
*8);
720 unsigned height
= align(rtex
->surface
.npix_y
, cl_height
*8);
721 unsigned slice_elements
= (width
* height
) / (8*8);
723 /* Each element of CMASK is a nibble. */
724 unsigned slice_bytes
= slice_elements
/ 2;
727 out
->height
= height
;
728 out
->xalign
= cl_width
* 8;
729 out
->yalign
= cl_height
* 8;
730 out
->slice_tile_max
= (width
* height
) / (128*128);
731 if (out
->slice_tile_max
)
732 out
->slice_tile_max
-= 1;
734 out
->alignment
= MAX2(256, base_align
);
735 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
736 align(slice_bytes
, base_align
);
739 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
740 struct r600_texture
*rtex
)
742 if (rscreen
->chip_class
>= SI
) {
743 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
745 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
748 rtex
->cmask
.offset
= align64(rtex
->size
, rtex
->cmask
.alignment
);
749 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
751 if (rscreen
->chip_class
>= SI
)
752 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
754 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
757 static void r600_texture_alloc_cmask_separate(struct r600_common_screen
*rscreen
,
758 struct r600_texture
*rtex
)
760 if (rtex
->cmask_buffer
)
763 assert(rtex
->cmask
.size
== 0);
765 if (rscreen
->chip_class
>= SI
) {
766 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
768 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
771 rtex
->cmask_buffer
= (struct r600_resource
*)
772 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
773 PIPE_USAGE_DEFAULT
, rtex
->cmask
.size
);
774 if (rtex
->cmask_buffer
== NULL
) {
775 rtex
->cmask
.size
= 0;
779 /* update colorbuffer state bits */
780 rtex
->cmask
.base_address_reg
= rtex
->cmask_buffer
->gpu_address
>> 8;
782 if (rscreen
->chip_class
>= SI
)
783 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
785 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
787 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
790 static unsigned r600_texture_get_htile_size(struct r600_common_screen
*rscreen
,
791 struct r600_texture
*rtex
)
793 unsigned cl_width
, cl_height
, width
, height
;
794 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
795 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
797 if (rscreen
->chip_class
<= EVERGREEN
&&
798 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 26)
801 /* HW bug on R6xx. */
802 if (rscreen
->chip_class
== R600
&&
803 (rtex
->surface
.level
[0].npix_x
> 7680 ||
804 rtex
->surface
.level
[0].npix_y
> 7680))
807 /* HTILE is broken with 1D tiling on old kernels and CIK. */
808 if (rscreen
->chip_class
>= CIK
&&
809 rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
810 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 38)
813 /* Overalign HTILE on P2 configs to work around GPU hangs in
814 * piglit/depthstencil-render-miplevels 585.
816 * This has been confirmed to help Kabini & Stoney, where the hangs
817 * are always reproducible. I think I have seen the test hang
818 * on Carrizo too, though it was very rare there.
820 if (rscreen
->chip_class
>= CIK
&& num_pipes
< 4)
849 width
= align(rtex
->surface
.npix_x
, cl_width
* 8);
850 height
= align(rtex
->surface
.npix_y
, cl_height
* 8);
852 slice_elements
= (width
* height
) / (8 * 8);
853 slice_bytes
= slice_elements
* 4;
855 pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
856 base_align
= num_pipes
* pipe_interleave_bytes
;
858 rtex
->htile
.pitch
= width
;
859 rtex
->htile
.height
= height
;
860 rtex
->htile
.xalign
= cl_width
* 8;
861 rtex
->htile
.yalign
= cl_height
* 8;
863 return (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
864 align(slice_bytes
, base_align
);
867 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
868 struct r600_texture
*rtex
)
870 unsigned htile_size
= r600_texture_get_htile_size(rscreen
, rtex
);
875 rtex
->htile_buffer
= (struct r600_resource
*)
876 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
877 PIPE_USAGE_DEFAULT
, htile_size
);
878 if (rtex
->htile_buffer
== NULL
) {
879 /* this is not a fatal error as we can still keep rendering
880 * without htile buffer */
881 R600_ERR("Failed to create buffer object for htile buffer.\n");
883 r600_screen_clear_buffer(rscreen
, &rtex
->htile_buffer
->b
.b
, 0,
884 htile_size
, 0, R600_COHERENCY_NONE
);
888 void r600_print_texture_info(struct r600_texture
*rtex
, FILE *f
)
892 fprintf(f
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
893 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
894 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
895 rtex
->surface
.npix_x
, rtex
->surface
.npix_y
,
896 rtex
->surface
.npix_z
, rtex
->surface
.blk_w
,
897 rtex
->surface
.blk_h
, rtex
->surface
.blk_d
,
898 rtex
->surface
.array_size
, rtex
->surface
.last_level
,
899 rtex
->surface
.bpe
, rtex
->surface
.nsamples
,
900 rtex
->surface
.flags
, util_format_short_name(rtex
->resource
.b
.b
.format
));
902 fprintf(f
, " Layout: size=%"PRIu64
", alignment=%"PRIu64
", bankw=%u, "
903 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
904 rtex
->surface
.bo_size
, rtex
->surface
.bo_alignment
, rtex
->surface
.bankw
,
905 rtex
->surface
.bankh
, rtex
->surface
.num_banks
, rtex
->surface
.mtilea
,
906 rtex
->surface
.tile_split
, rtex
->surface
.pipe_config
,
907 (rtex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
909 if (rtex
->fmask
.size
)
910 fprintf(f
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
911 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
912 rtex
->fmask
.offset
, rtex
->fmask
.size
, rtex
->fmask
.alignment
,
913 rtex
->fmask
.pitch_in_pixels
, rtex
->fmask
.bank_height
,
914 rtex
->fmask
.slice_tile_max
, rtex
->fmask
.tile_mode_index
);
916 if (rtex
->cmask
.size
)
917 fprintf(f
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch=%u, "
918 "height=%u, xalign=%u, yalign=%u, slice_tile_max=%u\n",
919 rtex
->cmask
.offset
, rtex
->cmask
.size
, rtex
->cmask
.alignment
,
920 rtex
->cmask
.pitch
, rtex
->cmask
.height
, rtex
->cmask
.xalign
,
921 rtex
->cmask
.yalign
, rtex
->cmask
.slice_tile_max
);
923 if (rtex
->htile_buffer
)
924 fprintf(f
, " HTile: size=%u, alignment=%u, pitch=%u, height=%u, "
925 "xalign=%u, yalign=%u\n",
926 rtex
->htile_buffer
->b
.b
.width0
,
927 rtex
->htile_buffer
->buf
->alignment
, rtex
->htile
.pitch
,
928 rtex
->htile
.height
, rtex
->htile
.xalign
, rtex
->htile
.yalign
);
930 if (rtex
->dcc_offset
) {
931 fprintf(f
, " DCC: offset=%"PRIu64
", size=%"PRIu64
", alignment=%"PRIu64
"\n",
932 rtex
->dcc_offset
, rtex
->surface
.dcc_size
,
933 rtex
->surface
.dcc_alignment
);
934 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++)
935 fprintf(f
, " DCCLevel[%i]: enabled=%u, offset=%"PRIu64
", "
936 "fast_clear_size=%"PRIu64
"\n",
937 i
, rtex
->surface
.level
[i
].dcc_enabled
,
938 rtex
->surface
.level
[i
].dcc_offset
,
939 rtex
->surface
.level
[i
].dcc_fast_clear_size
);
942 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++)
943 fprintf(f
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
944 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
945 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
946 i
, rtex
->surface
.level
[i
].offset
,
947 rtex
->surface
.level
[i
].slice_size
,
948 u_minify(rtex
->resource
.b
.b
.width0
, i
),
949 u_minify(rtex
->resource
.b
.b
.height0
, i
),
950 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
951 rtex
->surface
.level
[i
].nblk_x
,
952 rtex
->surface
.level
[i
].nblk_y
,
953 rtex
->surface
.level
[i
].nblk_z
,
954 rtex
->surface
.level
[i
].pitch_bytes
,
955 rtex
->surface
.level
[i
].mode
);
957 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
958 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
959 fprintf(f
, " StencilLayout: tilesplit=%u\n",
960 rtex
->surface
.stencil_tile_split
);
961 fprintf(f
, " StencilLevel[%i]: offset=%"PRIu64
", "
962 "slice_size=%"PRIu64
", npix_x=%u, "
963 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
964 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
965 i
, rtex
->surface
.stencil_level
[i
].offset
,
966 rtex
->surface
.stencil_level
[i
].slice_size
,
967 u_minify(rtex
->resource
.b
.b
.width0
, i
),
968 u_minify(rtex
->resource
.b
.b
.height0
, i
),
969 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
970 rtex
->surface
.stencil_level
[i
].nblk_x
,
971 rtex
->surface
.stencil_level
[i
].nblk_y
,
972 rtex
->surface
.stencil_level
[i
].nblk_z
,
973 rtex
->surface
.stencil_level
[i
].pitch_bytes
,
974 rtex
->surface
.stencil_level
[i
].mode
);
979 /* Common processing for r600_texture_create and r600_texture_from_handle */
980 static struct r600_texture
*
981 r600_texture_create_object(struct pipe_screen
*screen
,
982 const struct pipe_resource
*base
,
983 unsigned pitch_in_bytes_override
,
985 struct pb_buffer
*buf
,
986 struct radeon_surf
*surface
)
988 struct r600_texture
*rtex
;
989 struct r600_resource
*resource
;
990 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
992 rtex
= CALLOC_STRUCT(r600_texture
);
996 resource
= &rtex
->resource
;
997 resource
->b
.b
= *base
;
998 resource
->b
.vtbl
= &r600_texture_vtbl
;
999 pipe_reference_init(&resource
->b
.b
.reference
, 1);
1000 resource
->b
.b
.screen
= screen
;
1002 /* don't include stencil-only formats which we don't support for rendering */
1003 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
1005 rtex
->surface
= *surface
;
1006 if (r600_setup_surface(screen
, rtex
, pitch_in_bytes_override
, offset
)) {
1011 /* Tiled depth textures utilize the non-displayable tile order.
1012 * This must be done after r600_setup_surface.
1013 * Applies to R600-Cayman. */
1014 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
1015 /* Applies to GCN. */
1016 rtex
->last_msaa_resolve_target_micro_mode
= rtex
->surface
.micro_tile_mode
;
1018 if (rtex
->is_depth
) {
1019 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
1020 R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) &&
1021 !(rscreen
->debug_flags
& DBG_NO_HYPERZ
)) {
1023 r600_texture_allocate_htile(rscreen
, rtex
);
1026 if (base
->nr_samples
> 1) {
1028 r600_texture_allocate_fmask(rscreen
, rtex
);
1029 r600_texture_allocate_cmask(rscreen
, rtex
);
1030 rtex
->cmask_buffer
= &rtex
->resource
;
1032 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
1038 /* Shared textures must always set up DCC here.
1039 * If it's not present, it will be disabled by
1040 * apply_opaque_metadata later.
1042 if (rtex
->surface
.dcc_size
&&
1043 (buf
|| !(rscreen
->debug_flags
& DBG_NO_DCC
))) {
1044 /* Reserve space for the DCC buffer. */
1045 rtex
->dcc_offset
= align64(rtex
->size
, rtex
->surface
.dcc_alignment
);
1046 rtex
->size
= rtex
->dcc_offset
+ rtex
->surface
.dcc_size
;
1050 /* Now create the backing buffer. */
1052 if (!r600_init_resource(rscreen
, resource
, rtex
->size
,
1053 rtex
->surface
.bo_alignment
)) {
1058 resource
->buf
= buf
;
1059 resource
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
1060 resource
->domains
= rscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
1063 if (rtex
->cmask
.size
) {
1064 /* Initialize the cmask to 0xCC (= compressed state). */
1065 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
1066 rtex
->cmask
.offset
, rtex
->cmask
.size
,
1067 0xCCCCCCCC, R600_COHERENCY_NONE
);
1070 /* Initialize DCC only if the texture is not being imported. */
1071 if (!buf
&& rtex
->dcc_offset
) {
1072 r600_screen_clear_buffer(rscreen
, &rtex
->resource
.b
.b
,
1074 rtex
->surface
.dcc_size
,
1075 0xFFFFFFFF, R600_COHERENCY_NONE
);
1078 /* Initialize the CMASK base register value. */
1079 rtex
->cmask
.base_address_reg
=
1080 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1082 if (rscreen
->debug_flags
& DBG_VM
) {
1083 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1084 rtex
->resource
.gpu_address
,
1085 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
1086 base
->width0
, base
->height0
, util_max_layer(base
, 0)+1, base
->last_level
+1,
1087 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
1090 if (rscreen
->debug_flags
& DBG_TEX
) {
1092 r600_print_texture_info(rtex
, stdout
);
1098 static unsigned r600_choose_tiling(struct r600_common_screen
*rscreen
,
1099 const struct pipe_resource
*templ
)
1101 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1102 bool force_tiling
= templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
;
1104 /* MSAA resources must be 2D tiled. */
1105 if (templ
->nr_samples
> 1)
1106 return RADEON_SURF_MODE_2D
;
1108 /* Transfer resources should be linear. */
1109 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
1110 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1112 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1113 if (rscreen
->chip_class
>= R600
&& rscreen
->chip_class
<= CAYMAN
&&
1114 (templ
->bind
& PIPE_BIND_COMPUTE_RESOURCE
) &&
1115 (templ
->target
== PIPE_TEXTURE_2D
||
1116 templ
->target
== PIPE_TEXTURE_3D
))
1117 force_tiling
= true;
1119 /* Handle common candidates for the linear mode.
1120 * Compressed textures and DB surfaces must always be tiled.
1122 if (!force_tiling
&& !util_format_is_compressed(templ
->format
) &&
1123 (!util_format_is_depth_or_stencil(templ
->format
) ||
1124 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) {
1125 if (rscreen
->debug_flags
& DBG_NO_TILING
)
1126 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1128 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1129 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1130 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1132 /* Cursors are linear on SI.
1133 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1134 if (rscreen
->chip_class
>= SI
&&
1135 (templ
->bind
& PIPE_BIND_CURSOR
))
1136 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1138 if (templ
->bind
& PIPE_BIND_LINEAR
)
1139 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1141 /* Textures with a very small height are recommended to be linear. */
1142 if (templ
->target
== PIPE_TEXTURE_1D
||
1143 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
1144 templ
->height0
<= 4)
1145 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1147 /* Textures likely to be mapped often. */
1148 if (templ
->usage
== PIPE_USAGE_STAGING
||
1149 templ
->usage
== PIPE_USAGE_STREAM
)
1150 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1153 /* Make small textures 1D tiled. */
1154 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1155 (rscreen
->debug_flags
& DBG_NO_2D_TILING
))
1156 return RADEON_SURF_MODE_1D
;
1158 /* The allocator will switch to 1D if needed. */
1159 return RADEON_SURF_MODE_2D
;
1162 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
1163 const struct pipe_resource
*templ
)
1165 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1166 struct radeon_surf surface
= {0};
1169 r
= r600_init_surface(rscreen
, &surface
, templ
,
1170 r600_choose_tiling(rscreen
, templ
),
1171 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
1175 r
= rscreen
->ws
->surface_best(rscreen
->ws
, &surface
);
1179 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
, 0,
1183 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
1184 const struct pipe_resource
*templ
,
1185 struct winsys_handle
*whandle
,
1188 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1189 struct pb_buffer
*buf
= NULL
;
1190 unsigned stride
= 0, offset
= 0;
1191 unsigned array_mode
;
1192 struct radeon_surf surface
;
1194 struct radeon_bo_metadata metadata
= {};
1195 struct r600_texture
*rtex
;
1197 /* Support only 2D textures without mipmaps */
1198 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1199 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1202 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
, &stride
, &offset
);
1206 rscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1208 surface
.pipe_config
= metadata
.pipe_config
;
1209 surface
.bankw
= metadata
.bankw
;
1210 surface
.bankh
= metadata
.bankh
;
1211 surface
.tile_split
= metadata
.tile_split
;
1212 surface
.mtilea
= metadata
.mtilea
;
1213 surface
.num_banks
= metadata
.num_banks
;
1215 if (metadata
.macrotile
== RADEON_LAYOUT_TILED
)
1216 array_mode
= RADEON_SURF_MODE_2D
;
1217 else if (metadata
.microtile
== RADEON_LAYOUT_TILED
)
1218 array_mode
= RADEON_SURF_MODE_1D
;
1220 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
1222 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
, false);
1227 if (metadata
.scanout
)
1228 surface
.flags
|= RADEON_SURF_SCANOUT
;
1230 rtex
= r600_texture_create_object(screen
, templ
, stride
,
1231 offset
, buf
, &surface
);
1235 rtex
->resource
.is_shared
= true;
1236 rtex
->resource
.external_usage
= usage
;
1238 if (rscreen
->apply_opaque_metadata
)
1239 rscreen
->apply_opaque_metadata(rscreen
, rtex
, &metadata
);
1241 return &rtex
->resource
.b
.b
;
1244 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
1245 struct pipe_resource
*texture
,
1246 struct r600_texture
**staging
)
1248 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1249 struct pipe_resource resource
;
1250 struct r600_texture
**flushed_depth_texture
= staging
?
1251 staging
: &rtex
->flushed_depth_texture
;
1253 if (!staging
&& rtex
->flushed_depth_texture
)
1254 return true; /* it's ready */
1256 resource
.target
= texture
->target
;
1257 resource
.format
= texture
->format
;
1258 resource
.width0
= texture
->width0
;
1259 resource
.height0
= texture
->height0
;
1260 resource
.depth0
= texture
->depth0
;
1261 resource
.array_size
= texture
->array_size
;
1262 resource
.last_level
= texture
->last_level
;
1263 resource
.nr_samples
= texture
->nr_samples
;
1264 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1265 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1266 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1269 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
1271 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1272 if (*flushed_depth_texture
== NULL
) {
1273 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1277 (*flushed_depth_texture
)->is_flushing_texture
= true;
1278 (*flushed_depth_texture
)->non_disp_tiling
= false;
1283 * Initialize the pipe_resource descriptor to be of the same size as the box,
1284 * which is supposed to hold a subregion of the texture "orig" at the given
1287 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
1288 struct pipe_resource
*orig
,
1289 const struct pipe_box
*box
,
1290 unsigned level
, unsigned flags
)
1292 memset(res
, 0, sizeof(*res
));
1293 res
->format
= orig
->format
;
1294 res
->width0
= box
->width
;
1295 res
->height0
= box
->height
;
1297 res
->array_size
= 1;
1298 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1301 /* We must set the correct texture target and dimensions for a 3D box. */
1302 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1303 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1304 res
->array_size
= box
->depth
;
1306 res
->target
= PIPE_TEXTURE_2D
;
1310 static bool r600_can_invalidate_texture(struct r600_common_screen
*rscreen
,
1311 struct r600_texture
*rtex
,
1312 unsigned transfer_usage
,
1313 const struct pipe_box
*box
)
1315 /* r600g doesn't react to dirty_tex_descriptor_counter */
1316 return rscreen
->chip_class
>= SI
&&
1317 !rtex
->resource
.is_shared
&&
1318 !(transfer_usage
& PIPE_TRANSFER_READ
) &&
1319 rtex
->resource
.b
.b
.last_level
== 0 &&
1320 util_texrange_covers_whole_level(&rtex
->resource
.b
.b
, 0,
1321 box
->x
, box
->y
, box
->z
,
1322 box
->width
, box
->height
,
1326 static void r600_texture_invalidate_storage(struct r600_common_context
*rctx
,
1327 struct r600_texture
*rtex
)
1329 struct r600_common_screen
*rscreen
= rctx
->screen
;
1331 /* There is no point in discarding depth and tiled buffers. */
1332 assert(!rtex
->is_depth
);
1333 assert(rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_LINEAR_ALIGNED
);
1335 /* Reallocate the buffer in the same pipe_resource. */
1336 r600_init_resource(rscreen
, &rtex
->resource
, rtex
->size
,
1337 rtex
->surface
.bo_alignment
);
1339 /* Initialize the CMASK base address (needed even without CMASK). */
1340 rtex
->cmask
.base_address_reg
=
1341 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1343 r600_dirty_all_framebuffer_states(rscreen
);
1344 p_atomic_inc(&rscreen
->dirty_tex_descriptor_counter
);
1346 rctx
->num_alloc_tex_transfer_bytes
+= rtex
->size
;
1349 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
1350 struct pipe_resource
*texture
,
1353 const struct pipe_box
*box
,
1354 struct pipe_transfer
**ptransfer
)
1356 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1357 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1358 struct r600_transfer
*trans
;
1359 struct r600_resource
*buf
;
1360 unsigned offset
= 0;
1362 bool use_staging_texture
= false;
1364 assert(!(texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
));
1366 /* Depth textures use staging unconditionally. */
1367 if (!rtex
->is_depth
) {
1368 /* Degrade the tile mode if we get too many transfers on APUs.
1369 * On dGPUs, the staging texture is always faster.
1370 * Only count uploads that are at least 4x4 pixels large.
1372 if (!rctx
->screen
->info
.has_dedicated_vram
&&
1374 box
->width
>= 4 && box
->height
>= 4 &&
1375 p_atomic_inc_return(&rtex
->num_level0_transfers
) == 10) {
1376 bool can_invalidate
=
1377 r600_can_invalidate_texture(rctx
->screen
, rtex
,
1380 r600_degrade_tile_mode_to_linear(rctx
, rtex
,
1384 /* Tiled textures need to be converted into a linear texture for CPU
1385 * access. The staging texture is always linear and is placed in GART.
1387 * Reading from VRAM is slow, always use the staging texture in
1390 * Use the staging texture for uploads if the underlying BO
1393 if (rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
)
1394 use_staging_texture
= true;
1395 else if (usage
& PIPE_TRANSFER_READ
)
1396 use_staging_texture
= (rtex
->resource
.domains
&
1397 RADEON_DOMAIN_VRAM
) != 0;
1398 /* Write & linear only: */
1399 else if (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.buf
,
1400 RADEON_USAGE_READWRITE
) ||
1401 !rctx
->ws
->buffer_wait(rtex
->resource
.buf
, 0,
1402 RADEON_USAGE_READWRITE
)) {
1404 if (r600_can_invalidate_texture(rctx
->screen
, rtex
,
1406 r600_texture_invalidate_storage(rctx
, rtex
);
1408 use_staging_texture
= true;
1412 trans
= CALLOC_STRUCT(r600_transfer
);
1415 trans
->transfer
.resource
= texture
;
1416 trans
->transfer
.level
= level
;
1417 trans
->transfer
.usage
= usage
;
1418 trans
->transfer
.box
= *box
;
1420 if (rtex
->is_depth
) {
1421 struct r600_texture
*staging_depth
;
1423 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1424 /* MSAA depth buffers need to be converted to single sample buffers.
1426 * Mapping MSAA depth buffers can occur if ReadPixels is called
1427 * with a multisample GLX visual.
1429 * First downsample the depth buffer to a temporary texture,
1430 * then decompress the temporary one to staging.
1432 * Only the region being mapped is transfered.
1434 struct pipe_resource resource
;
1436 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1438 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1439 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1444 if (usage
& PIPE_TRANSFER_READ
) {
1445 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1447 R600_ERR("failed to create a temporary depth texture\n");
1452 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1453 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
1454 0, 0, 0, box
->depth
, 0, 0);
1455 pipe_resource_reference(&temp
, NULL
);
1459 /* XXX: only readback the rectangle which is being mapped? */
1460 /* XXX: when discard is true, no need to read back from depth texture */
1461 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1462 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1467 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
1469 box
->z
, box
->z
+ box
->depth
- 1,
1472 offset
= r600_texture_get_offset(staging_depth
, level
, box
);
1475 trans
->transfer
.stride
= staging_depth
->surface
.level
[level
].pitch_bytes
;
1476 trans
->transfer
.layer_stride
= staging_depth
->surface
.level
[level
].slice_size
;
1477 trans
->staging
= (struct r600_resource
*)staging_depth
;
1478 buf
= trans
->staging
;
1479 } else if (use_staging_texture
) {
1480 struct pipe_resource resource
;
1481 struct r600_texture
*staging
;
1483 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1484 R600_RESOURCE_FLAG_TRANSFER
);
1485 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1486 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1488 /* Create the temporary texture. */
1489 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1491 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1495 trans
->staging
= &staging
->resource
;
1496 trans
->transfer
.stride
= staging
->surface
.level
[0].pitch_bytes
;
1497 trans
->transfer
.layer_stride
= staging
->surface
.level
[0].slice_size
;
1499 if (usage
& PIPE_TRANSFER_READ
)
1500 r600_copy_to_staging_texture(ctx
, trans
);
1502 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1504 buf
= trans
->staging
;
1506 /* the resource is mapped directly */
1507 trans
->transfer
.stride
= rtex
->surface
.level
[level
].pitch_bytes
;
1508 trans
->transfer
.layer_stride
= rtex
->surface
.level
[level
].slice_size
;
1509 offset
= r600_texture_get_offset(rtex
, level
, box
);
1510 buf
= &rtex
->resource
;
1513 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1514 r600_resource_reference(&trans
->staging
, NULL
);
1519 *ptransfer
= &trans
->transfer
;
1520 return map
+ offset
;
1523 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1524 struct pipe_transfer
* transfer
)
1526 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1527 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1528 struct pipe_resource
*texture
= transfer
->resource
;
1529 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1531 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1532 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1533 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1534 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1535 &rtransfer
->staging
->b
.b
, transfer
->level
,
1538 r600_copy_from_staging_texture(ctx
, rtransfer
);
1542 if (rtransfer
->staging
) {
1543 rctx
->num_alloc_tex_transfer_bytes
+= rtransfer
->staging
->buf
->size
;
1544 r600_resource_reference(&rtransfer
->staging
, NULL
);
1547 /* Heuristic for {upload, draw, upload, draw, ..}:
1549 * Flush the gfx IB if we've allocated too much texture storage.
1551 * The idea is that we don't want to build IBs that use too much
1552 * memory and put pressure on the kernel memory manager and we also
1553 * want to make temporary and invalidated buffers go idle ASAP to
1554 * decrease the total memory usage or make them reusable. The memory
1555 * usage will be slightly higher than given here because of the buffer
1556 * cache in the winsys.
1558 * The result is that the kernel memory manager is never a bottleneck.
1560 if (rctx
->num_alloc_tex_transfer_bytes
> rctx
->screen
->info
.gart_size
/ 4) {
1561 rctx
->gfx
.flush(rctx
, RADEON_FLUSH_ASYNC
, NULL
);
1562 rctx
->num_alloc_tex_transfer_bytes
= 0;
1568 static const struct u_resource_vtbl r600_texture_vtbl
=
1570 NULL
, /* get_handle */
1571 r600_texture_destroy
, /* resource_destroy */
1572 r600_texture_transfer_map
, /* transfer_map */
1573 u_default_transfer_flush_region
, /* transfer_flush_region */
1574 r600_texture_transfer_unmap
, /* transfer_unmap */
1575 NULL
/* transfer_inline_write */
1578 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1579 struct pipe_resource
*texture
,
1580 const struct pipe_surface
*templ
,
1581 unsigned width
, unsigned height
)
1583 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1584 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1589 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1590 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1592 pipe_reference_init(&surface
->base
.reference
, 1);
1593 pipe_resource_reference(&surface
->base
.texture
, texture
);
1594 surface
->base
.context
= pipe
;
1595 surface
->base
.format
= templ
->format
;
1596 surface
->base
.width
= width
;
1597 surface
->base
.height
= height
;
1598 surface
->base
.u
= templ
->u
;
1599 surface
->level_info
= &rtex
->surface
.level
[templ
->u
.tex
.level
];
1600 return &surface
->base
;
1603 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1604 struct pipe_resource
*tex
,
1605 const struct pipe_surface
*templ
)
1607 unsigned level
= templ
->u
.tex
.level
;
1608 unsigned width
= u_minify(tex
->width0
, level
);
1609 unsigned height
= u_minify(tex
->height0
, level
);
1611 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
1612 const struct util_format_description
*tex_desc
1613 = util_format_description(tex
->format
);
1614 const struct util_format_description
*templ_desc
1615 = util_format_description(templ
->format
);
1617 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
1619 /* Adjust size of surface if and only if the block width or
1620 * height is changed. */
1621 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
1622 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
1623 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
1624 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
1626 width
= nblks_x
* templ_desc
->block
.width
;
1627 height
= nblks_y
* templ_desc
->block
.height
;
1631 return r600_create_surface_custom(pipe
, tex
, templ
, width
, height
);
1634 static void r600_surface_destroy(struct pipe_context
*pipe
,
1635 struct pipe_surface
*surface
)
1637 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1638 r600_resource_reference(&surf
->cb_buffer_fmask
, NULL
);
1639 r600_resource_reference(&surf
->cb_buffer_cmask
, NULL
);
1640 pipe_resource_reference(&surface
->texture
, NULL
);
1644 unsigned r600_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
1646 const struct util_format_description
*desc
= util_format_description(format
);
1648 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1650 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
1651 return V_0280A0_SWAP_STD
;
1653 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1656 switch (desc
->nr_channels
) {
1658 if (HAS_SWIZZLE(0,X
))
1659 return V_0280A0_SWAP_STD
; /* X___ */
1660 else if (HAS_SWIZZLE(3,X
))
1661 return V_0280A0_SWAP_ALT_REV
; /* ___X */
1664 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
1665 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
1666 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
1667 return V_0280A0_SWAP_STD
; /* XY__ */
1668 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
1669 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
1670 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
1672 return (do_endian_swap
? V_0280A0_SWAP_STD
: V_0280A0_SWAP_STD_REV
);
1673 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
1674 return V_0280A0_SWAP_ALT
; /* X__Y */
1675 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
1676 return V_0280A0_SWAP_ALT_REV
; /* Y__X */
1679 if (HAS_SWIZZLE(0,X
))
1680 return (do_endian_swap
? V_0280A0_SWAP_STD_REV
: V_0280A0_SWAP_STD
);
1681 else if (HAS_SWIZZLE(0,Z
))
1682 return V_0280A0_SWAP_STD_REV
; /* ZYX */
1685 /* check the middle channels, the 1st and 4th channel can be NONE */
1686 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
1687 return V_0280A0_SWAP_STD
; /* XYZW */
1688 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
1689 return V_0280A0_SWAP_STD_REV
; /* WZYX */
1690 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
1691 return V_0280A0_SWAP_ALT
; /* ZYXW */
1692 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
1695 return V_0280A0_SWAP_ALT_REV
;
1697 return (do_endian_swap
? V_0280A0_SWAP_ALT
: V_0280A0_SWAP_ALT_REV
);
1704 static void evergreen_set_clear_color(struct r600_texture
*rtex
,
1705 enum pipe_format surface_format
,
1706 const union pipe_color_union
*color
)
1708 union util_color uc
;
1710 memset(&uc
, 0, sizeof(uc
));
1712 if (util_format_is_pure_uint(surface_format
)) {
1713 util_format_write_4ui(surface_format
, color
->ui
, 0, &uc
, 0, 0, 0, 1, 1);
1714 } else if (util_format_is_pure_sint(surface_format
)) {
1715 util_format_write_4i(surface_format
, color
->i
, 0, &uc
, 0, 0, 0, 1, 1);
1717 util_pack_color(color
->f
, surface_format
, &uc
);
1720 memcpy(rtex
->color_clear_value
, &uc
, 2 * sizeof(uint32_t));
1723 static void vi_get_fast_clear_parameters(enum pipe_format surface_format
,
1724 const union pipe_color_union
*color
,
1725 uint32_t* reset_value
,
1726 bool* clear_words_needed
)
1728 bool values
[4] = {};
1730 bool main_value
= false;
1731 bool extra_value
= false;
1733 const struct util_format_description
*desc
= util_format_description(surface_format
);
1735 *clear_words_needed
= true;
1736 *reset_value
= 0x20202020U
;
1738 /* If we want to clear without needing a fast clear eliminate step, we
1739 * can set each channel to 0 or 1 (or 0/max for integer formats). We
1740 * have two sets of flags, one for the last or first channel(extra) and
1741 * one for the other channels(main).
1744 if (surface_format
== PIPE_FORMAT_R11G11B10_FLOAT
||
1745 surface_format
== PIPE_FORMAT_B5G6R5_UNORM
||
1746 surface_format
== PIPE_FORMAT_B5G6R5_SRGB
) {
1748 } else if (desc
->layout
== UTIL_FORMAT_LAYOUT_PLAIN
) {
1749 if(r600_translate_colorswap(surface_format
, false) <= 1)
1750 extra_channel
= desc
->nr_channels
- 1;
1756 for (i
= 0; i
< 4; ++i
) {
1757 int index
= desc
->swizzle
[i
] - PIPE_SWIZZLE_X
;
1759 if (desc
->swizzle
[i
] < PIPE_SWIZZLE_X
||
1760 desc
->swizzle
[i
] > PIPE_SWIZZLE_W
)
1763 if (util_format_is_pure_sint(surface_format
)) {
1764 values
[i
] = color
->i
[i
] != 0;
1765 if (color
->i
[i
] != 0 && color
->i
[i
] != INT32_MAX
)
1767 } else if (util_format_is_pure_uint(surface_format
)) {
1768 values
[i
] = color
->ui
[i
] != 0U;
1769 if (color
->ui
[i
] != 0U && color
->ui
[i
] != UINT32_MAX
)
1772 values
[i
] = color
->f
[i
] != 0.0F
;
1773 if (color
->f
[i
] != 0.0F
&& color
->f
[i
] != 1.0F
)
1777 if (index
== extra_channel
)
1778 extra_value
= values
[i
];
1780 main_value
= values
[i
];
1783 for (int i
= 0; i
< 4; ++i
)
1784 if (values
[i
] != main_value
&&
1785 desc
->swizzle
[i
] - PIPE_SWIZZLE_X
!= extra_channel
&&
1786 desc
->swizzle
[i
] >= PIPE_SWIZZLE_X
&&
1787 desc
->swizzle
[i
] <= PIPE_SWIZZLE_W
)
1790 *clear_words_needed
= false;
1792 *reset_value
|= 0x80808080U
;
1795 *reset_value
|= 0x40404040U
;
1798 void vi_dcc_clear_level(struct r600_common_context
*rctx
,
1799 struct r600_texture
*rtex
,
1800 unsigned level
, unsigned clear_value
)
1802 struct pipe_resource
*dcc_buffer
= &rtex
->resource
.b
.b
;
1803 uint64_t dcc_offset
= rtex
->dcc_offset
+
1804 rtex
->surface
.level
[level
].dcc_offset
;
1806 assert(rtex
->dcc_offset
&& rtex
->surface
.level
[level
].dcc_enabled
);
1808 rctx
->clear_buffer(&rctx
->b
, dcc_buffer
, dcc_offset
,
1809 rtex
->surface
.level
[level
].dcc_fast_clear_size
,
1810 clear_value
, R600_COHERENCY_CB_META
);
1813 /* Set the same micro tile mode as the destination of the last MSAA resolve.
1814 * This allows hitting the MSAA resolve fast path, which requires that both
1815 * src and dst micro tile modes match.
1817 static void si_set_optimal_micro_tile_mode(struct r600_common_screen
*rscreen
,
1818 struct r600_texture
*rtex
)
1820 if (rtex
->resource
.is_shared
||
1821 rtex
->surface
.nsamples
<= 1 ||
1822 rtex
->surface
.micro_tile_mode
== rtex
->last_msaa_resolve_target_micro_mode
)
1825 assert(rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_2D
);
1826 assert(rtex
->surface
.last_level
== 0);
1828 /* These magic numbers were copied from addrlib. It doesn't use any
1829 * definitions for them either. They are all 2D_TILED_THIN1 modes with
1830 * different bpp and micro tile mode.
1832 if (rscreen
->chip_class
>= CIK
) {
1833 switch (rtex
->last_msaa_resolve_target_micro_mode
) {
1834 case 0: /* displayable */
1835 rtex
->surface
.tiling_index
[0] = 10;
1838 rtex
->surface
.tiling_index
[0] = 14;
1840 case 3: /* rotated */
1841 rtex
->surface
.tiling_index
[0] = 28;
1843 default: /* depth, thick */
1844 assert(!"unexpected micro mode");
1848 switch (rtex
->last_msaa_resolve_target_micro_mode
) {
1849 case 0: /* displayable */
1850 switch (rtex
->surface
.bpe
) {
1852 rtex
->surface
.tiling_index
[0] = 10;
1855 rtex
->surface
.tiling_index
[0] = 11;
1857 default: /* 32, 64 */
1858 rtex
->surface
.tiling_index
[0] = 12;
1863 switch (rtex
->surface
.bpe
) {
1865 rtex
->surface
.tiling_index
[0] = 14;
1868 rtex
->surface
.tiling_index
[0] = 15;
1871 rtex
->surface
.tiling_index
[0] = 16;
1873 default: /* 64, 128 */
1874 rtex
->surface
.tiling_index
[0] = 17;
1878 default: /* depth, thick */
1879 assert(!"unexpected micro mode");
1884 rtex
->surface
.micro_tile_mode
= rtex
->last_msaa_resolve_target_micro_mode
;
1886 p_atomic_inc(&rscreen
->dirty_fb_counter
);
1887 p_atomic_inc(&rscreen
->dirty_tex_descriptor_counter
);
1890 void evergreen_do_fast_color_clear(struct r600_common_context
*rctx
,
1891 struct pipe_framebuffer_state
*fb
,
1892 struct r600_atom
*fb_state
,
1893 unsigned *buffers
, unsigned *dirty_cbufs
,
1894 const union pipe_color_union
*color
)
1898 /* This function is broken in BE, so just disable this path for now */
1899 #ifdef PIPE_ARCH_BIG_ENDIAN
1903 if (rctx
->render_cond
)
1906 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1907 struct r600_texture
*tex
;
1908 unsigned clear_bit
= PIPE_CLEAR_COLOR0
<< i
;
1913 /* if this colorbuffer is not being cleared */
1914 if (!(*buffers
& clear_bit
))
1917 tex
= (struct r600_texture
*)fb
->cbufs
[i
]->texture
;
1919 /* 128-bit formats are unusupported */
1920 if (util_format_get_blocksizebits(fb
->cbufs
[i
]->format
) > 64) {
1924 /* the clear is allowed if all layers are bound */
1925 if (fb
->cbufs
[i
]->u
.tex
.first_layer
!= 0 ||
1926 fb
->cbufs
[i
]->u
.tex
.last_layer
!= util_max_layer(&tex
->resource
.b
.b
, 0)) {
1930 /* cannot clear mipmapped textures */
1931 if (fb
->cbufs
[i
]->texture
->last_level
!= 0) {
1935 /* only supported on tiled surfaces */
1936 if (tex
->surface
.level
[0].mode
< RADEON_SURF_MODE_1D
) {
1940 /* shared textures can't use fast clear without an explicit flush,
1941 * because there is no way to communicate the clear color among
1944 if (tex
->resource
.is_shared
&&
1945 !(tex
->resource
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
1948 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1949 if (tex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
1950 rctx
->chip_class
>= CIK
&&
1951 rctx
->screen
->info
.drm_major
== 2 &&
1952 rctx
->screen
->info
.drm_minor
< 38) {
1956 if (tex
->dcc_offset
&& tex
->surface
.level
[0].dcc_enabled
) {
1957 uint32_t reset_value
;
1958 bool clear_words_needed
;
1960 if (rctx
->screen
->debug_flags
& DBG_NO_DCC_CLEAR
)
1963 /* We can change the micro tile mode before a full clear. */
1964 if (rctx
->screen
->chip_class
>= SI
)
1965 si_set_optimal_micro_tile_mode(rctx
->screen
, tex
);
1967 vi_get_fast_clear_parameters(fb
->cbufs
[i
]->format
, color
, &reset_value
, &clear_words_needed
);
1968 vi_dcc_clear_level(rctx
, tex
, 0, reset_value
);
1970 if (clear_words_needed
)
1971 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1973 /* Stoney/RB+ doesn't work with CMASK fast clear. */
1974 if (rctx
->family
== CHIP_STONEY
)
1977 /* ensure CMASK is enabled */
1978 r600_texture_alloc_cmask_separate(rctx
->screen
, tex
);
1979 if (tex
->cmask
.size
== 0) {
1983 /* We can change the micro tile mode before a full clear. */
1984 if (rctx
->screen
->chip_class
>= SI
)
1985 si_set_optimal_micro_tile_mode(rctx
->screen
, tex
);
1987 /* Do the fast clear. */
1988 rctx
->clear_buffer(&rctx
->b
, &tex
->cmask_buffer
->b
.b
,
1989 tex
->cmask
.offset
, tex
->cmask
.size
, 0,
1990 R600_COHERENCY_CB_META
);
1992 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1995 evergreen_set_clear_color(tex
, fb
->cbufs
[i
]->format
, color
);
1998 *dirty_cbufs
|= 1 << i
;
1999 rctx
->set_atom_dirty(rctx
, fb_state
, true);
2000 *buffers
&= ~clear_bit
;
2004 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
2006 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
2007 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
2010 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
2012 rctx
->b
.create_surface
= r600_create_surface
;
2013 rctx
->b
.surface_destroy
= r600_surface_destroy
;