2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
35 static void r600_texture_discard_dcc(struct r600_common_screen
*rscreen
,
36 struct r600_texture
*rtex
);
37 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
38 struct r600_texture
*rtex
);
41 bool r600_prepare_for_dma_blit(struct r600_common_context
*rctx
,
42 struct r600_texture
*rdst
,
43 unsigned dst_level
, unsigned dstx
,
44 unsigned dsty
, unsigned dstz
,
45 struct r600_texture
*rsrc
,
47 const struct pipe_box
*src_box
)
52 if (util_format_get_blocksizebits(rdst
->resource
.b
.b
.format
) !=
53 util_format_get_blocksizebits(rsrc
->resource
.b
.b
.format
))
56 /* MSAA: Blits don't exist in the real world. */
57 if (rsrc
->resource
.b
.b
.nr_samples
> 1 ||
58 rdst
->resource
.b
.b
.nr_samples
> 1)
61 /* Depth-stencil surfaces:
62 * When dst is linear, the DB->CB copy preserves HTILE.
63 * When dst is tiled, the 3D path must be used to update HTILE.
65 if (rsrc
->is_depth
|| rdst
->is_depth
)
69 * src: Use the 3D path. DCC decompression is expensive.
70 * dst: If overwriting the whole texture, discard DCC and use SDMA.
71 * Otherwise, use the 3D path.
76 if (rdst
->dcc_offset
) {
77 /* We can't discard DCC if the texture has been exported.
78 * We can only discard DCC for the entire texture.
80 if (rdst
->resource
.is_shared
||
81 rdst
->resource
.b
.b
.last_level
> 0 ||
82 !util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
83 dstx
, dsty
, dstz
, src_box
->width
,
84 src_box
->height
, src_box
->depth
))
87 r600_texture_discard_dcc(rctx
->screen
, rdst
);
91 * src: Both texture and SDMA paths need decompression. Use SDMA.
92 * dst: If overwriting the whole texture, discard CMASK and use
93 * SDMA. Otherwise, use the 3D path.
95 if (rdst
->cmask
.size
&& rdst
->dirty_level_mask
& (1 << dst_level
)) {
96 /* The CMASK clear is only enabled for the first level. */
97 assert(dst_level
== 0);
98 if (!util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
99 dstx
, dsty
, dstz
, src_box
->width
,
100 src_box
->height
, src_box
->depth
))
103 r600_texture_discard_cmask(rctx
->screen
, rdst
);
106 /* All requirements are met. Prepare textures for SDMA. */
107 if (rsrc
->cmask
.size
&& rsrc
->dirty_level_mask
& (1 << src_level
))
108 rctx
->b
.flush_resource(&rctx
->b
, &rsrc
->resource
.b
.b
);
110 assert(!(rsrc
->dirty_level_mask
& (1 << src_level
)));
111 assert(!(rdst
->dirty_level_mask
& (1 << dst_level
)));
116 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
117 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
118 struct pipe_resource
*dst
,
120 unsigned dstx
, unsigned dsty
, unsigned dstz
,
121 struct pipe_resource
*src
,
123 const struct pipe_box
*src_box
)
125 struct pipe_blit_info blit
;
127 memset(&blit
, 0, sizeof(blit
));
128 blit
.src
.resource
= src
;
129 blit
.src
.format
= src
->format
;
130 blit
.src
.level
= src_level
;
131 blit
.src
.box
= *src_box
;
132 blit
.dst
.resource
= dst
;
133 blit
.dst
.format
= dst
->format
;
134 blit
.dst
.level
= dst_level
;
135 blit
.dst
.box
.x
= dstx
;
136 blit
.dst
.box
.y
= dsty
;
137 blit
.dst
.box
.z
= dstz
;
138 blit
.dst
.box
.width
= src_box
->width
;
139 blit
.dst
.box
.height
= src_box
->height
;
140 blit
.dst
.box
.depth
= src_box
->depth
;
141 blit
.mask
= util_format_get_mask(src
->format
) &
142 util_format_get_mask(dst
->format
);
143 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
146 pipe
->blit(pipe
, &blit
);
150 /* Copy from a full GPU texture to a transfer's staging one. */
151 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
153 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
154 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
155 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
156 struct pipe_resource
*src
= transfer
->resource
;
158 if (src
->nr_samples
> 1) {
159 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
160 src
, transfer
->level
, &transfer
->box
);
164 rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
168 /* Copy from a transfer's staging texture to a full GPU one. */
169 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
171 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
172 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
173 struct pipe_resource
*dst
= transfer
->resource
;
174 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
175 struct pipe_box sbox
;
177 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
179 if (dst
->nr_samples
> 1) {
180 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
181 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
186 rctx
->dma_copy(ctx
, dst
, transfer
->level
,
187 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
191 static unsigned r600_texture_get_offset(struct r600_texture
*rtex
, unsigned level
,
192 const struct pipe_box
*box
)
194 enum pipe_format format
= rtex
->resource
.b
.b
.format
;
196 return rtex
->surface
.level
[level
].offset
+
197 box
->z
* rtex
->surface
.level
[level
].slice_size
+
198 box
->y
/ util_format_get_blockheight(format
) * rtex
->surface
.level
[level
].pitch_bytes
+
199 box
->x
/ util_format_get_blockwidth(format
) * util_format_get_blocksize(format
);
202 static int r600_init_surface(struct r600_common_screen
*rscreen
,
203 struct radeon_surf
*surface
,
204 const struct pipe_resource
*ptex
,
206 bool is_flushed_depth
)
208 const struct util_format_description
*desc
=
209 util_format_description(ptex
->format
);
210 bool is_depth
, is_stencil
;
212 is_depth
= util_format_has_depth(desc
);
213 is_stencil
= util_format_has_stencil(desc
);
215 surface
->npix_x
= ptex
->width0
;
216 surface
->npix_y
= ptex
->height0
;
217 surface
->npix_z
= ptex
->depth0
;
218 surface
->blk_w
= util_format_get_blockwidth(ptex
->format
);
219 surface
->blk_h
= util_format_get_blockheight(ptex
->format
);
221 surface
->array_size
= 1;
222 surface
->last_level
= ptex
->last_level
;
224 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
225 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
226 surface
->bpe
= 4; /* stencil is allocated separately on evergreen */
228 surface
->bpe
= util_format_get_blocksize(ptex
->format
);
229 /* align byte per element on dword */
230 if (surface
->bpe
== 3) {
235 surface
->nsamples
= ptex
->nr_samples
? ptex
->nr_samples
: 1;
236 surface
->flags
= RADEON_SURF_SET(array_mode
, MODE
);
238 switch (ptex
->target
) {
239 case PIPE_TEXTURE_1D
:
240 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D
, TYPE
);
242 case PIPE_TEXTURE_RECT
:
243 case PIPE_TEXTURE_2D
:
244 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D
, TYPE
);
246 case PIPE_TEXTURE_3D
:
247 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_3D
, TYPE
);
249 case PIPE_TEXTURE_1D_ARRAY
:
250 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY
, TYPE
);
251 surface
->array_size
= ptex
->array_size
;
253 case PIPE_TEXTURE_CUBE_ARRAY
: /* cube array layout like 2d array */
254 assert(ptex
->array_size
% 6 == 0);
255 case PIPE_TEXTURE_2D_ARRAY
:
256 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY
, TYPE
);
257 surface
->array_size
= ptex
->array_size
;
259 case PIPE_TEXTURE_CUBE
:
260 surface
->flags
|= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP
, TYPE
);
266 if (ptex
->bind
& PIPE_BIND_SCANOUT
) {
267 surface
->flags
|= RADEON_SURF_SCANOUT
;
270 if (!is_flushed_depth
&& is_depth
) {
271 surface
->flags
|= RADEON_SURF_ZBUFFER
;
274 surface
->flags
|= RADEON_SURF_SBUFFER
|
275 RADEON_SURF_HAS_SBUFFER_MIPTREE
;
278 if (rscreen
->chip_class
>= SI
) {
279 surface
->flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
284 static int r600_setup_surface(struct pipe_screen
*screen
,
285 struct r600_texture
*rtex
,
286 unsigned pitch_in_bytes_override
,
289 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
293 r
= rscreen
->ws
->surface_init(rscreen
->ws
, &rtex
->surface
);
298 rtex
->size
= rtex
->surface
.bo_size
;
300 if (pitch_in_bytes_override
&& pitch_in_bytes_override
!= rtex
->surface
.level
[0].pitch_bytes
) {
301 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
304 rtex
->surface
.level
[0].nblk_x
= pitch_in_bytes_override
/ rtex
->surface
.bpe
;
305 rtex
->surface
.level
[0].pitch_bytes
= pitch_in_bytes_override
;
306 rtex
->surface
.level
[0].slice_size
= pitch_in_bytes_override
* rtex
->surface
.level
[0].nblk_y
;
310 for (i
= 0; i
< ARRAY_SIZE(rtex
->surface
.level
); ++i
)
311 rtex
->surface
.level
[i
].offset
+= offset
;
316 static void r600_texture_init_metadata(struct r600_texture
*rtex
,
317 struct radeon_bo_metadata
*metadata
)
319 struct radeon_surf
*surface
= &rtex
->surface
;
321 memset(metadata
, 0, sizeof(*metadata
));
322 metadata
->microtile
= surface
->level
[0].mode
>= RADEON_SURF_MODE_1D
?
323 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
324 metadata
->macrotile
= surface
->level
[0].mode
>= RADEON_SURF_MODE_2D
?
325 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
326 metadata
->pipe_config
= surface
->pipe_config
;
327 metadata
->bankw
= surface
->bankw
;
328 metadata
->bankh
= surface
->bankh
;
329 metadata
->tile_split
= surface
->tile_split
;
330 metadata
->mtilea
= surface
->mtilea
;
331 metadata
->num_banks
= surface
->num_banks
;
332 metadata
->stride
= surface
->level
[0].pitch_bytes
;
333 metadata
->scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
336 static void r600_dirty_all_framebuffer_states(struct r600_common_screen
*rscreen
)
338 p_atomic_inc(&rscreen
->dirty_fb_counter
);
341 static void r600_eliminate_fast_color_clear(struct r600_common_screen
*rscreen
,
342 struct r600_texture
*rtex
)
344 struct pipe_context
*ctx
= rscreen
->aux_context
;
346 pipe_mutex_lock(rscreen
->aux_context_lock
);
347 ctx
->flush_resource(ctx
, &rtex
->resource
.b
.b
);
348 ctx
->flush(ctx
, NULL
, 0);
349 pipe_mutex_unlock(rscreen
->aux_context_lock
);
352 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
353 struct r600_texture
*rtex
)
355 if (!rtex
->cmask
.size
)
358 assert(rtex
->resource
.b
.b
.nr_samples
<= 1);
361 memset(&rtex
->cmask
, 0, sizeof(rtex
->cmask
));
362 rtex
->cmask
.base_address_reg
= rtex
->resource
.gpu_address
>> 8;
364 if (rscreen
->chip_class
>= SI
)
365 rtex
->cb_color_info
&= ~SI_S_028C70_FAST_CLEAR(1);
367 rtex
->cb_color_info
&= ~EG_S_028C70_FAST_CLEAR(1);
369 if (rtex
->cmask_buffer
!= &rtex
->resource
)
370 pipe_resource_reference((struct pipe_resource
**)&rtex
->cmask_buffer
, NULL
);
372 /* Notify all contexts about the change. */
373 r600_dirty_all_framebuffer_states(rscreen
);
374 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
377 static void r600_texture_discard_dcc(struct r600_common_screen
*rscreen
,
378 struct r600_texture
*rtex
)
381 rtex
->dcc_offset
= 0;
382 rtex
->cb_color_info
&= ~VI_S_028C70_DCC_ENABLE(1);
384 /* Notify all contexts about the change. */
385 r600_dirty_all_framebuffer_states(rscreen
);
388 void r600_texture_disable_dcc(struct r600_common_screen
*rscreen
,
389 struct r600_texture
*rtex
)
391 struct r600_common_context
*rctx
=
392 (struct r600_common_context
*)rscreen
->aux_context
;
394 if (!rtex
->dcc_offset
)
397 /* Decompress DCC. */
398 pipe_mutex_lock(rscreen
->aux_context_lock
);
399 rctx
->decompress_dcc(&rctx
->b
, rtex
);
400 rctx
->b
.flush(&rctx
->b
, NULL
, 0);
401 pipe_mutex_unlock(rscreen
->aux_context_lock
);
403 r600_texture_discard_dcc(rscreen
, rtex
);
406 static boolean
r600_texture_get_handle(struct pipe_screen
* screen
,
407 struct pipe_resource
*resource
,
408 struct winsys_handle
*whandle
,
411 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
412 struct r600_resource
*res
= (struct r600_resource
*)resource
;
413 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
414 struct radeon_bo_metadata metadata
;
415 bool update_metadata
= false;
417 /* This is not supported now, but it might be required for OpenCL
418 * interop in the future.
420 if (resource
->target
!= PIPE_BUFFER
&&
421 (resource
->nr_samples
> 1 || rtex
->is_depth
))
424 if (resource
->target
!= PIPE_BUFFER
) {
425 /* Since shader image stores don't support DCC on VI,
426 * disable it for external clients that want write
429 if (usage
& PIPE_HANDLE_USAGE_WRITE
&& rtex
->dcc_offset
) {
430 r600_texture_disable_dcc(rscreen
, rtex
);
431 update_metadata
= true;
434 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
436 /* Eliminate fast clear (both CMASK and DCC) */
437 r600_eliminate_fast_color_clear(rscreen
, rtex
);
439 /* Disable CMASK if flush_resource isn't going
442 r600_texture_discard_cmask(rscreen
, rtex
);
443 update_metadata
= true;
447 if (!res
->is_shared
|| update_metadata
) {
448 r600_texture_init_metadata(rtex
, &metadata
);
449 if (rscreen
->query_opaque_metadata
)
450 rscreen
->query_opaque_metadata(rscreen
, rtex
,
453 rscreen
->ws
->buffer_set_metadata(res
->buf
, &metadata
);
457 if (res
->is_shared
) {
458 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
461 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
462 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
463 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
465 res
->is_shared
= true;
466 res
->external_usage
= usage
;
469 return rscreen
->ws
->buffer_get_handle(res
->buf
,
470 rtex
->surface
.level
[0].pitch_bytes
,
471 rtex
->surface
.level
[0].offset
,
472 rtex
->surface
.level
[0].slice_size
,
476 static void r600_texture_destroy(struct pipe_screen
*screen
,
477 struct pipe_resource
*ptex
)
479 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
480 struct r600_resource
*resource
= &rtex
->resource
;
482 if (rtex
->flushed_depth_texture
)
483 pipe_resource_reference((struct pipe_resource
**)&rtex
->flushed_depth_texture
, NULL
);
485 pipe_resource_reference((struct pipe_resource
**)&rtex
->htile_buffer
, NULL
);
486 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
487 pipe_resource_reference((struct pipe_resource
**)&rtex
->cmask_buffer
, NULL
);
489 pb_reference(&resource
->buf
, NULL
);
493 static const struct u_resource_vtbl r600_texture_vtbl
;
495 /* The number of samples can be specified independently of the texture. */
496 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
497 struct r600_texture
*rtex
,
499 struct r600_fmask_info
*out
)
501 /* FMASK is allocated like an ordinary texture. */
502 struct radeon_surf fmask
= rtex
->surface
;
504 memset(out
, 0, sizeof(*out
));
506 fmask
.bo_alignment
= 0;
509 fmask
.flags
|= RADEON_SURF_FMASK
;
511 /* Force 2D tiling if it wasn't set. This may occur when creating
512 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
513 * destination buffer must have an FMASK too. */
514 fmask
.flags
= RADEON_SURF_CLR(fmask
.flags
, MODE
);
515 fmask
.flags
|= RADEON_SURF_SET(RADEON_SURF_MODE_2D
, MODE
);
517 if (rscreen
->chip_class
>= SI
) {
518 fmask
.flags
|= RADEON_SURF_HAS_TILE_MODE_INDEX
;
521 switch (nr_samples
) {
525 if (rscreen
->chip_class
<= CAYMAN
) {
533 R600_ERR("Invalid sample count for FMASK allocation.\n");
537 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
538 * This can be fixed by writing a separate FMASK allocator specifically
539 * for R600-R700 asics. */
540 if (rscreen
->chip_class
<= R700
) {
544 if (rscreen
->ws
->surface_init(rscreen
->ws
, &fmask
)) {
545 R600_ERR("Got error in surface_init while allocating FMASK.\n");
549 assert(fmask
.level
[0].mode
== RADEON_SURF_MODE_2D
);
551 out
->slice_tile_max
= (fmask
.level
[0].nblk_x
* fmask
.level
[0].nblk_y
) / 64;
552 if (out
->slice_tile_max
)
553 out
->slice_tile_max
-= 1;
555 out
->tile_mode_index
= fmask
.tiling_index
[0];
556 out
->pitch_in_pixels
= fmask
.level
[0].nblk_x
;
557 out
->bank_height
= fmask
.bankh
;
558 out
->alignment
= MAX2(256, fmask
.bo_alignment
);
559 out
->size
= fmask
.bo_size
;
562 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
563 struct r600_texture
*rtex
)
565 r600_texture_get_fmask_info(rscreen
, rtex
,
566 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
568 rtex
->fmask
.offset
= align64(rtex
->size
, rtex
->fmask
.alignment
);
569 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
572 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
573 struct r600_texture
*rtex
,
574 struct r600_cmask_info
*out
)
576 unsigned cmask_tile_width
= 8;
577 unsigned cmask_tile_height
= 8;
578 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
579 unsigned element_bits
= 4;
580 unsigned cmask_cache_bits
= 1024;
581 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
582 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
584 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
585 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
586 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
587 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
588 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
590 unsigned pitch_elements
= align(rtex
->surface
.npix_x
, macro_tile_width
);
591 unsigned height
= align(rtex
->surface
.npix_y
, macro_tile_height
);
593 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
594 unsigned slice_bytes
=
595 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
597 assert(macro_tile_width
% 128 == 0);
598 assert(macro_tile_height
% 128 == 0);
600 out
->pitch
= pitch_elements
;
601 out
->height
= height
;
602 out
->xalign
= macro_tile_width
;
603 out
->yalign
= macro_tile_height
;
604 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
605 out
->alignment
= MAX2(256, base_align
);
606 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
607 align(slice_bytes
, base_align
);
610 static void si_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
611 struct r600_texture
*rtex
,
612 struct r600_cmask_info
*out
)
614 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
615 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
616 unsigned cl_width
, cl_height
;
631 case 16: /* Hawaii */
640 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
642 unsigned width
= align(rtex
->surface
.npix_x
, cl_width
*8);
643 unsigned height
= align(rtex
->surface
.npix_y
, cl_height
*8);
644 unsigned slice_elements
= (width
* height
) / (8*8);
646 /* Each element of CMASK is a nibble. */
647 unsigned slice_bytes
= slice_elements
/ 2;
650 out
->height
= height
;
651 out
->xalign
= cl_width
* 8;
652 out
->yalign
= cl_height
* 8;
653 out
->slice_tile_max
= (width
* height
) / (128*128);
654 if (out
->slice_tile_max
)
655 out
->slice_tile_max
-= 1;
657 out
->alignment
= MAX2(256, base_align
);
658 out
->size
= (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
659 align(slice_bytes
, base_align
);
662 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
663 struct r600_texture
*rtex
)
665 if (rscreen
->chip_class
>= SI
) {
666 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
668 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
671 rtex
->cmask
.offset
= align64(rtex
->size
, rtex
->cmask
.alignment
);
672 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
674 if (rscreen
->chip_class
>= SI
)
675 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
677 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
680 static void r600_texture_alloc_cmask_separate(struct r600_common_screen
*rscreen
,
681 struct r600_texture
*rtex
)
683 if (rtex
->cmask_buffer
)
686 assert(rtex
->cmask
.size
== 0);
688 if (rscreen
->chip_class
>= SI
) {
689 si_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
691 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
694 rtex
->cmask_buffer
= (struct r600_resource
*)
695 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
696 PIPE_USAGE_DEFAULT
, rtex
->cmask
.size
);
697 if (rtex
->cmask_buffer
== NULL
) {
698 rtex
->cmask
.size
= 0;
702 /* update colorbuffer state bits */
703 rtex
->cmask
.base_address_reg
= rtex
->cmask_buffer
->gpu_address
>> 8;
705 if (rscreen
->chip_class
>= SI
)
706 rtex
->cb_color_info
|= SI_S_028C70_FAST_CLEAR(1);
708 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
710 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
713 static unsigned r600_texture_get_htile_size(struct r600_common_screen
*rscreen
,
714 struct r600_texture
*rtex
)
716 unsigned cl_width
, cl_height
, width
, height
;
717 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
718 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
720 if (rscreen
->chip_class
<= EVERGREEN
&&
721 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 26)
724 /* HW bug on R6xx. */
725 if (rscreen
->chip_class
== R600
&&
726 (rtex
->surface
.level
[0].npix_x
> 7680 ||
727 rtex
->surface
.level
[0].npix_y
> 7680))
730 /* HTILE is broken with 1D tiling on old kernels and CIK. */
731 if (rscreen
->chip_class
>= CIK
&&
732 rtex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
733 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 38)
736 /* Overalign HTILE on P2 configs to work around GPU hangs in
737 * piglit/depthstencil-render-miplevels 585.
739 * This has been confirmed to help Kabini & Stoney, where the hangs
740 * are always reproducible. I think I have seen the test hang
741 * on Carrizo too, though it was very rare there.
743 if (rscreen
->chip_class
>= CIK
&& num_pipes
< 4)
772 width
= align(rtex
->surface
.npix_x
, cl_width
* 8);
773 height
= align(rtex
->surface
.npix_y
, cl_height
* 8);
775 slice_elements
= (width
* height
) / (8 * 8);
776 slice_bytes
= slice_elements
* 4;
778 pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
779 base_align
= num_pipes
* pipe_interleave_bytes
;
781 rtex
->htile
.pitch
= width
;
782 rtex
->htile
.height
= height
;
783 rtex
->htile
.xalign
= cl_width
* 8;
784 rtex
->htile
.yalign
= cl_height
* 8;
786 return (util_max_layer(&rtex
->resource
.b
.b
, 0) + 1) *
787 align(slice_bytes
, base_align
);
790 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
791 struct r600_texture
*rtex
)
793 unsigned htile_size
= r600_texture_get_htile_size(rscreen
, rtex
);
798 rtex
->htile_buffer
= (struct r600_resource
*)
799 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
800 PIPE_USAGE_DEFAULT
, htile_size
);
801 if (rtex
->htile_buffer
== NULL
) {
802 /* this is not a fatal error as we can still keep rendering
803 * without htile buffer */
804 R600_ERR("Failed to create buffer object for htile buffer.\n");
806 r600_screen_clear_buffer(rscreen
, &rtex
->htile_buffer
->b
.b
, 0,
807 htile_size
, 0, R600_COHERENCY_NONE
);
811 void r600_print_texture_info(struct r600_texture
*rtex
, FILE *f
)
815 fprintf(f
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
816 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
817 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
818 rtex
->surface
.npix_x
, rtex
->surface
.npix_y
,
819 rtex
->surface
.npix_z
, rtex
->surface
.blk_w
,
820 rtex
->surface
.blk_h
, rtex
->surface
.blk_d
,
821 rtex
->surface
.array_size
, rtex
->surface
.last_level
,
822 rtex
->surface
.bpe
, rtex
->surface
.nsamples
,
823 rtex
->surface
.flags
, util_format_short_name(rtex
->resource
.b
.b
.format
));
825 fprintf(f
, " Layout: size=%"PRIu64
", alignment=%"PRIu64
", bankw=%u, "
826 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
827 rtex
->surface
.bo_size
, rtex
->surface
.bo_alignment
, rtex
->surface
.bankw
,
828 rtex
->surface
.bankh
, rtex
->surface
.num_banks
, rtex
->surface
.mtilea
,
829 rtex
->surface
.tile_split
, rtex
->surface
.pipe_config
,
830 (rtex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
832 if (rtex
->fmask
.size
)
833 fprintf(f
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
834 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
835 rtex
->fmask
.offset
, rtex
->fmask
.size
, rtex
->fmask
.alignment
,
836 rtex
->fmask
.pitch_in_pixels
, rtex
->fmask
.bank_height
,
837 rtex
->fmask
.slice_tile_max
, rtex
->fmask
.tile_mode_index
);
839 if (rtex
->cmask
.size
)
840 fprintf(f
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch=%u, "
841 "height=%u, xalign=%u, yalign=%u, slice_tile_max=%u\n",
842 rtex
->cmask
.offset
, rtex
->cmask
.size
, rtex
->cmask
.alignment
,
843 rtex
->cmask
.pitch
, rtex
->cmask
.height
, rtex
->cmask
.xalign
,
844 rtex
->cmask
.yalign
, rtex
->cmask
.slice_tile_max
);
846 if (rtex
->htile_buffer
)
847 fprintf(f
, " HTile: size=%u, alignment=%u, pitch=%u, height=%u, "
848 "xalign=%u, yalign=%u\n",
849 rtex
->htile_buffer
->b
.b
.width0
,
850 rtex
->htile_buffer
->buf
->alignment
, rtex
->htile
.pitch
,
851 rtex
->htile
.height
, rtex
->htile
.xalign
, rtex
->htile
.yalign
);
853 if (rtex
->dcc_offset
) {
854 fprintf(f
, " DCC: offset=%"PRIu64
", size=%"PRIu64
", alignment=%"PRIu64
"\n",
855 rtex
->dcc_offset
, rtex
->surface
.dcc_size
,
856 rtex
->surface
.dcc_alignment
);
857 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++)
858 fprintf(f
, " DCCLevel[%i]: offset=%"PRIu64
"\n",
859 i
, rtex
->surface
.level
[i
].dcc_offset
);
862 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++)
863 fprintf(f
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
864 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
865 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
866 i
, rtex
->surface
.level
[i
].offset
,
867 rtex
->surface
.level
[i
].slice_size
,
868 u_minify(rtex
->resource
.b
.b
.width0
, i
),
869 u_minify(rtex
->resource
.b
.b
.height0
, i
),
870 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
871 rtex
->surface
.level
[i
].nblk_x
,
872 rtex
->surface
.level
[i
].nblk_y
,
873 rtex
->surface
.level
[i
].nblk_z
,
874 rtex
->surface
.level
[i
].pitch_bytes
,
875 rtex
->surface
.level
[i
].mode
);
877 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
) {
878 for (i
= 0; i
<= rtex
->surface
.last_level
; i
++) {
879 fprintf(f
, " StencilLayout: tilesplit=%u\n",
880 rtex
->surface
.stencil_tile_split
);
881 fprintf(f
, " StencilLevel[%i]: offset=%"PRIu64
", "
882 "slice_size=%"PRIu64
", npix_x=%u, "
883 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
884 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
885 i
, rtex
->surface
.stencil_level
[i
].offset
,
886 rtex
->surface
.stencil_level
[i
].slice_size
,
887 u_minify(rtex
->resource
.b
.b
.width0
, i
),
888 u_minify(rtex
->resource
.b
.b
.height0
, i
),
889 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
890 rtex
->surface
.stencil_level
[i
].nblk_x
,
891 rtex
->surface
.stencil_level
[i
].nblk_y
,
892 rtex
->surface
.stencil_level
[i
].nblk_z
,
893 rtex
->surface
.stencil_level
[i
].pitch_bytes
,
894 rtex
->surface
.stencil_level
[i
].mode
);
899 /* Common processing for r600_texture_create and r600_texture_from_handle */
900 static struct r600_texture
*
901 r600_texture_create_object(struct pipe_screen
*screen
,
902 const struct pipe_resource
*base
,
903 unsigned pitch_in_bytes_override
,
905 struct pb_buffer
*buf
,
906 struct radeon_surf
*surface
)
908 struct r600_texture
*rtex
;
909 struct r600_resource
*resource
;
910 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
912 rtex
= CALLOC_STRUCT(r600_texture
);
916 resource
= &rtex
->resource
;
917 resource
->b
.b
= *base
;
918 resource
->b
.vtbl
= &r600_texture_vtbl
;
919 pipe_reference_init(&resource
->b
.b
.reference
, 1);
920 resource
->b
.b
.screen
= screen
;
922 /* don't include stencil-only formats which we don't support for rendering */
923 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
925 rtex
->surface
= *surface
;
926 if (r600_setup_surface(screen
, rtex
, pitch_in_bytes_override
, offset
)) {
931 /* Tiled depth textures utilize the non-displayable tile order.
932 * This must be done after r600_setup_surface.
933 * Applies to R600-Cayman. */
934 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
936 if (rtex
->is_depth
) {
937 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
938 R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) &&
939 !(rscreen
->debug_flags
& DBG_NO_HYPERZ
)) {
941 r600_texture_allocate_htile(rscreen
, rtex
);
944 if (base
->nr_samples
> 1) {
946 r600_texture_allocate_fmask(rscreen
, rtex
);
947 r600_texture_allocate_cmask(rscreen
, rtex
);
948 rtex
->cmask_buffer
= &rtex
->resource
;
950 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
956 if (!buf
&& rtex
->surface
.dcc_size
&&
957 !(rscreen
->debug_flags
& DBG_NO_DCC
)) {
958 /* Reserve space for the DCC buffer. */
959 rtex
->dcc_offset
= align64(rtex
->size
, rtex
->surface
.dcc_alignment
);
960 rtex
->size
= rtex
->dcc_offset
+ rtex
->surface
.dcc_size
;
961 rtex
->cb_color_info
|= VI_S_028C70_DCC_ENABLE(1);
965 /* Now create the backing buffer. */
967 if (!r600_init_resource(rscreen
, resource
, rtex
->size
,
968 rtex
->surface
.bo_alignment
)) {
974 resource
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
975 resource
->domains
= rscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
978 if (rtex
->cmask
.size
) {
979 /* Initialize the cmask to 0xCC (= compressed state). */
980 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
981 rtex
->cmask
.offset
, rtex
->cmask
.size
,
982 0xCCCCCCCC, R600_COHERENCY_NONE
);
984 if (rtex
->dcc_offset
) {
985 r600_screen_clear_buffer(rscreen
, &rtex
->resource
.b
.b
,
987 rtex
->surface
.dcc_size
,
988 0xFFFFFFFF, R600_COHERENCY_NONE
);
991 /* Initialize the CMASK base register value. */
992 rtex
->cmask
.base_address_reg
=
993 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
995 if (rscreen
->debug_flags
& DBG_VM
) {
996 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
997 rtex
->resource
.gpu_address
,
998 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
999 base
->width0
, base
->height0
, util_max_layer(base
, 0)+1, base
->last_level
+1,
1000 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
1003 if (rscreen
->debug_flags
& DBG_TEX
) {
1005 r600_print_texture_info(rtex
, stdout
);
1011 static unsigned r600_choose_tiling(struct r600_common_screen
*rscreen
,
1012 const struct pipe_resource
*templ
)
1014 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1015 bool force_tiling
= templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
;
1017 /* MSAA resources must be 2D tiled. */
1018 if (templ
->nr_samples
> 1)
1019 return RADEON_SURF_MODE_2D
;
1021 /* Transfer resources should be linear. */
1022 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
1023 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1025 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1026 if (rscreen
->chip_class
>= R600
&& rscreen
->chip_class
<= CAYMAN
&&
1027 (templ
->bind
& PIPE_BIND_COMPUTE_RESOURCE
) &&
1028 (templ
->target
== PIPE_TEXTURE_2D
||
1029 templ
->target
== PIPE_TEXTURE_3D
))
1030 force_tiling
= true;
1032 /* Handle common candidates for the linear mode.
1033 * Compressed textures and DB surfaces must always be tiled.
1035 if (!force_tiling
&& !util_format_is_compressed(templ
->format
) &&
1036 (!util_format_is_depth_or_stencil(templ
->format
) ||
1037 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
)) {
1038 if (rscreen
->debug_flags
& DBG_NO_TILING
)
1039 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1041 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1042 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1043 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1045 /* Cursors are linear on SI.
1046 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1047 if (rscreen
->chip_class
>= SI
&&
1048 (templ
->bind
& PIPE_BIND_CURSOR
))
1049 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1051 if (templ
->bind
& PIPE_BIND_LINEAR
)
1052 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1054 /* Textures with a very small height are recommended to be linear. */
1055 if (templ
->target
== PIPE_TEXTURE_1D
||
1056 templ
->target
== PIPE_TEXTURE_1D_ARRAY
||
1057 templ
->height0
<= 4)
1058 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1060 /* Textures likely to be mapped often. */
1061 if (templ
->usage
== PIPE_USAGE_STAGING
||
1062 templ
->usage
== PIPE_USAGE_STREAM
)
1063 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1066 /* Make small textures 1D tiled. */
1067 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1068 (rscreen
->debug_flags
& DBG_NO_2D_TILING
))
1069 return RADEON_SURF_MODE_1D
;
1071 /* The allocator will switch to 1D if needed. */
1072 return RADEON_SURF_MODE_2D
;
1075 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
1076 const struct pipe_resource
*templ
)
1078 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1079 struct radeon_surf surface
= {0};
1082 r
= r600_init_surface(rscreen
, &surface
, templ
,
1083 r600_choose_tiling(rscreen
, templ
),
1084 templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
1088 r
= rscreen
->ws
->surface_best(rscreen
->ws
, &surface
);
1092 return (struct pipe_resource
*)r600_texture_create_object(screen
, templ
, 0,
1096 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
1097 const struct pipe_resource
*templ
,
1098 struct winsys_handle
*whandle
,
1101 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1102 struct pb_buffer
*buf
= NULL
;
1103 unsigned stride
= 0, offset
= 0;
1104 unsigned array_mode
;
1105 struct radeon_surf surface
;
1107 struct radeon_bo_metadata metadata
= {};
1108 struct r600_texture
*rtex
;
1110 /* Support only 2D textures without mipmaps */
1111 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1112 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1115 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
, &stride
, &offset
);
1119 rscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1121 surface
.pipe_config
= metadata
.pipe_config
;
1122 surface
.bankw
= metadata
.bankw
;
1123 surface
.bankh
= metadata
.bankh
;
1124 surface
.tile_split
= metadata
.tile_split
;
1125 surface
.mtilea
= metadata
.mtilea
;
1126 surface
.num_banks
= metadata
.num_banks
;
1128 if (metadata
.macrotile
== RADEON_LAYOUT_TILED
)
1129 array_mode
= RADEON_SURF_MODE_2D
;
1130 else if (metadata
.microtile
== RADEON_LAYOUT_TILED
)
1131 array_mode
= RADEON_SURF_MODE_1D
;
1133 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
1135 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
, false);
1140 if (metadata
.scanout
)
1141 surface
.flags
|= RADEON_SURF_SCANOUT
;
1143 rtex
= r600_texture_create_object(screen
, templ
, stride
,
1144 offset
, buf
, &surface
);
1148 rtex
->resource
.is_shared
= true;
1149 rtex
->resource
.external_usage
= usage
;
1150 return &rtex
->resource
.b
.b
;
1153 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
1154 struct pipe_resource
*texture
,
1155 struct r600_texture
**staging
)
1157 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1158 struct pipe_resource resource
;
1159 struct r600_texture
**flushed_depth_texture
= staging
?
1160 staging
: &rtex
->flushed_depth_texture
;
1162 if (!staging
&& rtex
->flushed_depth_texture
)
1163 return true; /* it's ready */
1165 resource
.target
= texture
->target
;
1166 resource
.format
= texture
->format
;
1167 resource
.width0
= texture
->width0
;
1168 resource
.height0
= texture
->height0
;
1169 resource
.depth0
= texture
->depth0
;
1170 resource
.array_size
= texture
->array_size
;
1171 resource
.last_level
= texture
->last_level
;
1172 resource
.nr_samples
= texture
->nr_samples
;
1173 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1174 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1175 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1178 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
1180 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1181 if (*flushed_depth_texture
== NULL
) {
1182 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1186 (*flushed_depth_texture
)->is_flushing_texture
= TRUE
;
1187 (*flushed_depth_texture
)->non_disp_tiling
= false;
1192 * Initialize the pipe_resource descriptor to be of the same size as the box,
1193 * which is supposed to hold a subregion of the texture "orig" at the given
1196 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
1197 struct pipe_resource
*orig
,
1198 const struct pipe_box
*box
,
1199 unsigned level
, unsigned flags
)
1201 memset(res
, 0, sizeof(*res
));
1202 res
->format
= orig
->format
;
1203 res
->width0
= box
->width
;
1204 res
->height0
= box
->height
;
1206 res
->array_size
= 1;
1207 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1210 /* We must set the correct texture target and dimensions for a 3D box. */
1211 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1212 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1213 res
->array_size
= box
->depth
;
1215 res
->target
= PIPE_TEXTURE_2D
;
1219 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
1220 struct pipe_resource
*texture
,
1223 const struct pipe_box
*box
,
1224 struct pipe_transfer
**ptransfer
)
1226 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1227 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1228 struct r600_transfer
*trans
;
1229 boolean use_staging_texture
= FALSE
;
1230 struct r600_resource
*buf
;
1231 unsigned offset
= 0;
1234 assert(!(texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
));
1236 /* We cannot map a tiled texture directly because the data is
1237 * in a different order, therefore we do detiling using a blit.
1239 * Also, use a temporary in GTT memory for read transfers, as
1240 * the CPU is much happier reading out of cached system memory
1241 * than uncached VRAM.
1243 if (rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
) {
1244 use_staging_texture
= TRUE
;
1245 } else if ((usage
& PIPE_TRANSFER_READ
) &&
1246 rtex
->resource
.domains
& RADEON_DOMAIN_VRAM
) {
1247 /* Untiled buffers in VRAM, which is slow for CPU reads */
1248 use_staging_texture
= TRUE
;
1249 } else if (!(usage
& PIPE_TRANSFER_READ
) &&
1250 (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.buf
, RADEON_USAGE_READWRITE
) ||
1251 !rctx
->ws
->buffer_wait(rtex
->resource
.buf
, 0, RADEON_USAGE_READWRITE
))) {
1252 /* Use a staging texture for uploads if the underlying BO is busy. */
1253 use_staging_texture
= TRUE
;
1256 trans
= CALLOC_STRUCT(r600_transfer
);
1259 trans
->transfer
.resource
= texture
;
1260 trans
->transfer
.level
= level
;
1261 trans
->transfer
.usage
= usage
;
1262 trans
->transfer
.box
= *box
;
1264 if (rtex
->is_depth
) {
1265 struct r600_texture
*staging_depth
;
1267 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1268 /* MSAA depth buffers need to be converted to single sample buffers.
1270 * Mapping MSAA depth buffers can occur if ReadPixels is called
1271 * with a multisample GLX visual.
1273 * First downsample the depth buffer to a temporary texture,
1274 * then decompress the temporary one to staging.
1276 * Only the region being mapped is transfered.
1278 struct pipe_resource resource
;
1280 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1282 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1283 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1288 if (usage
& PIPE_TRANSFER_READ
) {
1289 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1291 R600_ERR("failed to create a temporary depth texture\n");
1296 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1297 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
1298 0, 0, 0, box
->depth
, 0, 0);
1299 pipe_resource_reference(&temp
, NULL
);
1303 /* XXX: only readback the rectangle which is being mapped? */
1304 /* XXX: when discard is true, no need to read back from depth texture */
1305 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1306 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1311 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
1313 box
->z
, box
->z
+ box
->depth
- 1,
1316 offset
= r600_texture_get_offset(staging_depth
, level
, box
);
1319 trans
->transfer
.stride
= staging_depth
->surface
.level
[level
].pitch_bytes
;
1320 trans
->transfer
.layer_stride
= staging_depth
->surface
.level
[level
].slice_size
;
1321 trans
->staging
= (struct r600_resource
*)staging_depth
;
1322 buf
= trans
->staging
;
1323 } else if (use_staging_texture
) {
1324 struct pipe_resource resource
;
1325 struct r600_texture
*staging
;
1327 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1328 R600_RESOURCE_FLAG_TRANSFER
);
1329 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1330 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1332 /* Create the temporary texture. */
1333 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1335 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1339 trans
->staging
= &staging
->resource
;
1340 trans
->transfer
.stride
= staging
->surface
.level
[0].pitch_bytes
;
1341 trans
->transfer
.layer_stride
= staging
->surface
.level
[0].slice_size
;
1343 if (usage
& PIPE_TRANSFER_READ
)
1344 r600_copy_to_staging_texture(ctx
, trans
);
1346 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1348 buf
= trans
->staging
;
1350 /* the resource is mapped directly */
1351 trans
->transfer
.stride
= rtex
->surface
.level
[level
].pitch_bytes
;
1352 trans
->transfer
.layer_stride
= rtex
->surface
.level
[level
].slice_size
;
1353 offset
= r600_texture_get_offset(rtex
, level
, box
);
1354 buf
= &rtex
->resource
;
1357 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1358 pipe_resource_reference((struct pipe_resource
**)&trans
->staging
, NULL
);
1363 *ptransfer
= &trans
->transfer
;
1364 return map
+ offset
;
1367 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1368 struct pipe_transfer
* transfer
)
1370 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1371 struct pipe_resource
*texture
= transfer
->resource
;
1372 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1374 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1375 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1376 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1377 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1378 &rtransfer
->staging
->b
.b
, transfer
->level
,
1381 r600_copy_from_staging_texture(ctx
, rtransfer
);
1385 if (rtransfer
->staging
)
1386 pipe_resource_reference((struct pipe_resource
**)&rtransfer
->staging
, NULL
);
1391 static const struct u_resource_vtbl r600_texture_vtbl
=
1393 NULL
, /* get_handle */
1394 r600_texture_destroy
, /* resource_destroy */
1395 r600_texture_transfer_map
, /* transfer_map */
1396 u_default_transfer_flush_region
, /* transfer_flush_region */
1397 r600_texture_transfer_unmap
, /* transfer_unmap */
1398 NULL
/* transfer_inline_write */
1401 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1402 struct pipe_resource
*texture
,
1403 const struct pipe_surface
*templ
,
1404 unsigned width
, unsigned height
)
1406 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1411 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1412 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1414 pipe_reference_init(&surface
->base
.reference
, 1);
1415 pipe_resource_reference(&surface
->base
.texture
, texture
);
1416 surface
->base
.context
= pipe
;
1417 surface
->base
.format
= templ
->format
;
1418 surface
->base
.width
= width
;
1419 surface
->base
.height
= height
;
1420 surface
->base
.u
= templ
->u
;
1421 return &surface
->base
;
1424 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1425 struct pipe_resource
*tex
,
1426 const struct pipe_surface
*templ
)
1428 unsigned level
= templ
->u
.tex
.level
;
1429 unsigned width
= u_minify(tex
->width0
, level
);
1430 unsigned height
= u_minify(tex
->height0
, level
);
1432 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
1433 const struct util_format_description
*tex_desc
1434 = util_format_description(tex
->format
);
1435 const struct util_format_description
*templ_desc
1436 = util_format_description(templ
->format
);
1438 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
1440 /* Adjust size of surface if and only if the block width or
1441 * height is changed. */
1442 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
1443 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
1444 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
1445 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
1447 width
= nblks_x
* templ_desc
->block
.width
;
1448 height
= nblks_y
* templ_desc
->block
.height
;
1452 return r600_create_surface_custom(pipe
, tex
, templ
, width
, height
);
1455 static void r600_surface_destroy(struct pipe_context
*pipe
,
1456 struct pipe_surface
*surface
)
1458 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1459 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_fmask
, NULL
);
1460 pipe_resource_reference((struct pipe_resource
**)&surf
->cb_buffer_cmask
, NULL
);
1461 pipe_resource_reference(&surface
->texture
, NULL
);
1465 unsigned r600_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
1467 const struct util_format_description
*desc
= util_format_description(format
);
1469 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1471 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
1472 return V_0280A0_SWAP_STD
;
1474 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1477 switch (desc
->nr_channels
) {
1479 if (HAS_SWIZZLE(0,X
))
1480 return V_0280A0_SWAP_STD
; /* X___ */
1481 else if (HAS_SWIZZLE(3,X
))
1482 return V_0280A0_SWAP_ALT_REV
; /* ___X */
1485 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
1486 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
1487 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
1488 return V_0280A0_SWAP_STD
; /* XY__ */
1489 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
1490 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
1491 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
1493 return (do_endian_swap
? V_0280A0_SWAP_STD
: V_0280A0_SWAP_STD_REV
);
1494 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
1495 return V_0280A0_SWAP_ALT
; /* X__Y */
1496 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
1497 return V_0280A0_SWAP_ALT_REV
; /* Y__X */
1500 if (HAS_SWIZZLE(0,X
))
1501 return (do_endian_swap
? V_0280A0_SWAP_STD_REV
: V_0280A0_SWAP_STD
);
1502 else if (HAS_SWIZZLE(0,Z
))
1503 return V_0280A0_SWAP_STD_REV
; /* ZYX */
1506 /* check the middle channels, the 1st and 4th channel can be NONE */
1507 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
1508 return V_0280A0_SWAP_STD
; /* XYZW */
1509 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
1510 return V_0280A0_SWAP_STD_REV
; /* WZYX */
1511 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
1512 return V_0280A0_SWAP_ALT
; /* ZYXW */
1513 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
1516 return V_0280A0_SWAP_ALT_REV
;
1518 return (do_endian_swap
? V_0280A0_SWAP_ALT
: V_0280A0_SWAP_ALT_REV
);
1525 static void evergreen_set_clear_color(struct r600_texture
*rtex
,
1526 enum pipe_format surface_format
,
1527 const union pipe_color_union
*color
)
1529 union util_color uc
;
1531 memset(&uc
, 0, sizeof(uc
));
1533 if (util_format_is_pure_uint(surface_format
)) {
1534 util_format_write_4ui(surface_format
, color
->ui
, 0, &uc
, 0, 0, 0, 1, 1);
1535 } else if (util_format_is_pure_sint(surface_format
)) {
1536 util_format_write_4i(surface_format
, color
->i
, 0, &uc
, 0, 0, 0, 1, 1);
1538 util_pack_color(color
->f
, surface_format
, &uc
);
1541 memcpy(rtex
->color_clear_value
, &uc
, 2 * sizeof(uint32_t));
1544 static void vi_get_fast_clear_parameters(enum pipe_format surface_format
,
1545 const union pipe_color_union
*color
,
1546 uint32_t* reset_value
,
1547 bool* clear_words_needed
)
1549 bool values
[4] = {};
1551 bool main_value
= false;
1552 bool extra_value
= false;
1554 const struct util_format_description
*desc
= util_format_description(surface_format
);
1556 *clear_words_needed
= true;
1557 *reset_value
= 0x20202020U
;
1559 /* If we want to clear without needing a fast clear eliminate step, we
1560 * can set each channel to 0 or 1 (or 0/max for integer formats). We
1561 * have two sets of flags, one for the last or first channel(extra) and
1562 * one for the other channels(main).
1565 if (surface_format
== PIPE_FORMAT_R11G11B10_FLOAT
||
1566 surface_format
== PIPE_FORMAT_B5G6R5_UNORM
||
1567 surface_format
== PIPE_FORMAT_B5G6R5_SRGB
) {
1569 } else if (desc
->layout
== UTIL_FORMAT_LAYOUT_PLAIN
) {
1570 if(r600_translate_colorswap(surface_format
, FALSE
) <= 1)
1571 extra_channel
= desc
->nr_channels
- 1;
1577 for (i
= 0; i
< 4; ++i
) {
1578 int index
= desc
->swizzle
[i
] - PIPE_SWIZZLE_X
;
1580 if (desc
->swizzle
[i
] < PIPE_SWIZZLE_X
||
1581 desc
->swizzle
[i
] > PIPE_SWIZZLE_W
)
1584 if (util_format_is_pure_sint(surface_format
)) {
1585 values
[i
] = color
->i
[i
] != 0;
1586 if (color
->i
[i
] != 0 && color
->i
[i
] != INT32_MAX
)
1588 } else if (util_format_is_pure_uint(surface_format
)) {
1589 values
[i
] = color
->ui
[i
] != 0U;
1590 if (color
->ui
[i
] != 0U && color
->ui
[i
] != UINT32_MAX
)
1593 values
[i
] = color
->f
[i
] != 0.0F
;
1594 if (color
->f
[i
] != 0.0F
&& color
->f
[i
] != 1.0F
)
1598 if (index
== extra_channel
)
1599 extra_value
= values
[i
];
1601 main_value
= values
[i
];
1604 for (int i
= 0; i
< 4; ++i
)
1605 if (values
[i
] != main_value
&&
1606 desc
->swizzle
[i
] - PIPE_SWIZZLE_X
!= extra_channel
&&
1607 desc
->swizzle
[i
] >= PIPE_SWIZZLE_X
&&
1608 desc
->swizzle
[i
] <= PIPE_SWIZZLE_W
)
1611 *clear_words_needed
= false;
1613 *reset_value
|= 0x80808080U
;
1616 *reset_value
|= 0x40404040U
;
1619 void evergreen_do_fast_color_clear(struct r600_common_context
*rctx
,
1620 struct pipe_framebuffer_state
*fb
,
1621 struct r600_atom
*fb_state
,
1622 unsigned *buffers
, unsigned *dirty_cbufs
,
1623 const union pipe_color_union
*color
)
1627 /* This function is broken in BE, so just disable this path for now */
1628 #ifdef PIPE_ARCH_BIG_ENDIAN
1632 if (rctx
->render_cond
)
1635 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1636 struct r600_texture
*tex
;
1637 unsigned clear_bit
= PIPE_CLEAR_COLOR0
<< i
;
1642 /* if this colorbuffer is not being cleared */
1643 if (!(*buffers
& clear_bit
))
1646 tex
= (struct r600_texture
*)fb
->cbufs
[i
]->texture
;
1648 /* 128-bit formats are unusupported */
1649 if (util_format_get_blocksizebits(fb
->cbufs
[i
]->format
) > 64) {
1653 /* the clear is allowed if all layers are bound */
1654 if (fb
->cbufs
[i
]->u
.tex
.first_layer
!= 0 ||
1655 fb
->cbufs
[i
]->u
.tex
.last_layer
!= util_max_layer(&tex
->resource
.b
.b
, 0)) {
1659 /* cannot clear mipmapped textures */
1660 if (fb
->cbufs
[i
]->texture
->last_level
!= 0) {
1664 /* only supported on tiled surfaces */
1665 if (tex
->surface
.level
[0].mode
< RADEON_SURF_MODE_1D
) {
1669 /* shared textures can't use fast clear without an explicit flush,
1670 * because there is no way to communicate the clear color among
1673 if (tex
->resource
.is_shared
&&
1674 !(tex
->resource
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
1677 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1678 if (tex
->surface
.level
[0].mode
== RADEON_SURF_MODE_1D
&&
1679 rctx
->chip_class
>= CIK
&&
1680 rctx
->screen
->info
.drm_major
== 2 &&
1681 rctx
->screen
->info
.drm_minor
< 38) {
1685 if (tex
->dcc_offset
) {
1686 uint32_t reset_value
;
1687 bool clear_words_needed
;
1689 if (rctx
->screen
->debug_flags
& DBG_NO_DCC_CLEAR
)
1692 vi_get_fast_clear_parameters(fb
->cbufs
[i
]->format
, color
, &reset_value
, &clear_words_needed
);
1694 rctx
->clear_buffer(&rctx
->b
, &tex
->resource
.b
.b
,
1695 tex
->dcc_offset
, tex
->surface
.dcc_size
,
1696 reset_value
, R600_COHERENCY_CB_META
);
1698 if (clear_words_needed
)
1699 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1701 /* Stoney/RB+ doesn't work with CMASK fast clear. */
1702 if (rctx
->family
== CHIP_STONEY
)
1705 /* ensure CMASK is enabled */
1706 r600_texture_alloc_cmask_separate(rctx
->screen
, tex
);
1707 if (tex
->cmask
.size
== 0) {
1711 /* Do the fast clear. */
1712 rctx
->clear_buffer(&rctx
->b
, &tex
->cmask_buffer
->b
.b
,
1713 tex
->cmask
.offset
, tex
->cmask
.size
, 0,
1714 R600_COHERENCY_CB_META
);
1716 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1719 evergreen_set_clear_color(tex
, fb
->cbufs
[i
]->format
, color
);
1722 *dirty_cbufs
|= 1 << i
;
1723 rctx
->set_atom_dirty(rctx
, fb_state
, true);
1724 *buffers
&= ~clear_bit
;
1728 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
1730 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
1731 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
1734 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
1736 rctx
->b
.create_surface
= r600_create_surface
;
1737 rctx
->b
.surface_destroy
= r600_surface_destroy
;