2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "r600_query.h"
30 #include "util/format/u_format.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_pack_color.h"
34 #include "util/u_surface.h"
35 #include "util/os_time.h"
36 #include "state_tracker/winsys_handle.h"
40 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
41 struct r600_texture
*rtex
);
42 static enum radeon_surf_mode
43 r600_choose_tiling(struct r600_common_screen
*rscreen
,
44 const struct pipe_resource
*templ
);
47 bool r600_prepare_for_dma_blit(struct r600_common_context
*rctx
,
48 struct r600_texture
*rdst
,
49 unsigned dst_level
, unsigned dstx
,
50 unsigned dsty
, unsigned dstz
,
51 struct r600_texture
*rsrc
,
53 const struct pipe_box
*src_box
)
58 if (rdst
->surface
.bpe
!= rsrc
->surface
.bpe
)
61 /* MSAA: Blits don't exist in the real world. */
62 if (rsrc
->resource
.b
.b
.nr_samples
> 1 ||
63 rdst
->resource
.b
.b
.nr_samples
> 1)
66 /* Depth-stencil surfaces:
67 * When dst is linear, the DB->CB copy preserves HTILE.
68 * When dst is tiled, the 3D path must be used to update HTILE.
70 if (rsrc
->is_depth
|| rdst
->is_depth
)
74 * src: Both texture and SDMA paths need decompression. Use SDMA.
75 * dst: If overwriting the whole texture, discard CMASK and use
76 * SDMA. Otherwise, use the 3D path.
78 if (rdst
->cmask
.size
&& rdst
->dirty_level_mask
& (1 << dst_level
)) {
79 /* The CMASK clear is only enabled for the first level. */
80 assert(dst_level
== 0);
81 if (!util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
82 dstx
, dsty
, dstz
, src_box
->width
,
83 src_box
->height
, src_box
->depth
))
86 r600_texture_discard_cmask(rctx
->screen
, rdst
);
89 /* All requirements are met. Prepare textures for SDMA. */
90 if (rsrc
->cmask
.size
&& rsrc
->dirty_level_mask
& (1 << src_level
))
91 rctx
->b
.flush_resource(&rctx
->b
, &rsrc
->resource
.b
.b
);
93 assert(!(rsrc
->dirty_level_mask
& (1 << src_level
)));
94 assert(!(rdst
->dirty_level_mask
& (1 << dst_level
)));
99 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
100 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
101 struct pipe_resource
*dst
,
103 unsigned dstx
, unsigned dsty
, unsigned dstz
,
104 struct pipe_resource
*src
,
106 const struct pipe_box
*src_box
)
108 struct pipe_blit_info blit
;
110 memset(&blit
, 0, sizeof(blit
));
111 blit
.src
.resource
= src
;
112 blit
.src
.format
= src
->format
;
113 blit
.src
.level
= src_level
;
114 blit
.src
.box
= *src_box
;
115 blit
.dst
.resource
= dst
;
116 blit
.dst
.format
= dst
->format
;
117 blit
.dst
.level
= dst_level
;
118 blit
.dst
.box
.x
= dstx
;
119 blit
.dst
.box
.y
= dsty
;
120 blit
.dst
.box
.z
= dstz
;
121 blit
.dst
.box
.width
= src_box
->width
;
122 blit
.dst
.box
.height
= src_box
->height
;
123 blit
.dst
.box
.depth
= src_box
->depth
;
124 blit
.mask
= util_format_get_mask(src
->format
) &
125 util_format_get_mask(dst
->format
);
126 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
129 pipe
->blit(pipe
, &blit
);
133 /* Copy from a full GPU texture to a transfer's staging one. */
134 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
136 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
137 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
138 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
139 struct pipe_resource
*src
= transfer
->resource
;
141 if (src
->nr_samples
> 1) {
142 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
143 src
, transfer
->level
, &transfer
->box
);
147 rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
151 /* Copy from a transfer's staging texture to a full GPU one. */
152 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
154 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
155 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
156 struct pipe_resource
*dst
= transfer
->resource
;
157 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
158 struct pipe_box sbox
;
160 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
162 if (dst
->nr_samples
> 1) {
163 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
164 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
169 rctx
->dma_copy(ctx
, dst
, transfer
->level
,
170 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
174 static unsigned r600_texture_get_offset(struct r600_common_screen
*rscreen
,
175 struct r600_texture
*rtex
, unsigned level
,
176 const struct pipe_box
*box
,
178 unsigned *layer_stride
)
180 *stride
= rtex
->surface
.u
.legacy
.level
[level
].nblk_x
*
182 assert((uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 <= UINT_MAX
);
183 *layer_stride
= (uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4;
186 return rtex
->surface
.u
.legacy
.level
[level
].offset
;
188 /* Each texture is an array of mipmap levels. Each level is
189 * an array of slices. */
190 return rtex
->surface
.u
.legacy
.level
[level
].offset
+
191 box
->z
* (uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 +
192 (box
->y
/ rtex
->surface
.blk_h
*
193 rtex
->surface
.u
.legacy
.level
[level
].nblk_x
+
194 box
->x
/ rtex
->surface
.blk_w
) * rtex
->surface
.bpe
;
197 static int r600_init_surface(struct r600_common_screen
*rscreen
,
198 struct radeon_surf
*surface
,
199 const struct pipe_resource
*ptex
,
200 enum radeon_surf_mode array_mode
,
201 unsigned pitch_in_bytes_override
,
205 bool is_flushed_depth
)
207 const struct util_format_description
*desc
=
208 util_format_description(ptex
->format
);
209 bool is_depth
, is_stencil
;
211 unsigned i
, bpe
, flags
= 0;
213 is_depth
= util_format_has_depth(desc
);
214 is_stencil
= util_format_has_stencil(desc
);
216 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
217 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
218 bpe
= 4; /* stencil is allocated separately on evergreen */
220 bpe
= util_format_get_blocksize(ptex
->format
);
221 assert(util_is_power_of_two_or_zero(bpe
));
224 if (!is_flushed_depth
&& is_depth
) {
225 flags
|= RADEON_SURF_ZBUFFER
;
228 flags
|= RADEON_SURF_SBUFFER
;
231 if (ptex
->bind
& PIPE_BIND_SCANOUT
|| is_scanout
) {
232 /* This should catch bugs in gallium users setting incorrect flags. */
233 assert(ptex
->nr_samples
<= 1 &&
234 ptex
->array_size
== 1 &&
236 ptex
->last_level
== 0 &&
237 !(flags
& RADEON_SURF_Z_OR_SBUFFER
));
239 flags
|= RADEON_SURF_SCANOUT
;
242 if (ptex
->bind
& PIPE_BIND_SHARED
)
243 flags
|= RADEON_SURF_SHAREABLE
;
245 flags
|= RADEON_SURF_IMPORTED
| RADEON_SURF_SHAREABLE
;
246 if (!(ptex
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
))
247 flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
249 r
= rscreen
->ws
->surface_init(rscreen
->ws
, ptex
,
250 flags
, bpe
, array_mode
, surface
);
255 if (pitch_in_bytes_override
&&
256 pitch_in_bytes_override
!= surface
->u
.legacy
.level
[0].nblk_x
* bpe
) {
257 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
260 surface
->u
.legacy
.level
[0].nblk_x
= pitch_in_bytes_override
/ bpe
;
261 surface
->u
.legacy
.level
[0].slice_size_dw
=
262 ((uint64_t)pitch_in_bytes_override
* surface
->u
.legacy
.level
[0].nblk_y
) / 4;
266 for (i
= 0; i
< ARRAY_SIZE(surface
->u
.legacy
.level
); ++i
)
267 surface
->u
.legacy
.level
[i
].offset
+= offset
;
273 static void r600_texture_init_metadata(struct r600_common_screen
*rscreen
,
274 struct r600_texture
*rtex
,
275 struct radeon_bo_metadata
*metadata
)
277 struct radeon_surf
*surface
= &rtex
->surface
;
279 memset(metadata
, 0, sizeof(*metadata
));
281 metadata
->u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
282 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
283 metadata
->u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
284 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
285 metadata
->u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
286 metadata
->u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
287 metadata
->u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
288 metadata
->u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
289 metadata
->u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
290 metadata
->u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
291 metadata
->u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
292 metadata
->u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
295 static void r600_surface_import_metadata(struct r600_common_screen
*rscreen
,
296 struct radeon_surf
*surf
,
297 struct radeon_bo_metadata
*metadata
,
298 enum radeon_surf_mode
*array_mode
,
301 surf
->u
.legacy
.pipe_config
= metadata
->u
.legacy
.pipe_config
;
302 surf
->u
.legacy
.bankw
= metadata
->u
.legacy
.bankw
;
303 surf
->u
.legacy
.bankh
= metadata
->u
.legacy
.bankh
;
304 surf
->u
.legacy
.tile_split
= metadata
->u
.legacy
.tile_split
;
305 surf
->u
.legacy
.mtilea
= metadata
->u
.legacy
.mtilea
;
306 surf
->u
.legacy
.num_banks
= metadata
->u
.legacy
.num_banks
;
308 if (metadata
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
309 *array_mode
= RADEON_SURF_MODE_2D
;
310 else if (metadata
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
311 *array_mode
= RADEON_SURF_MODE_1D
;
313 *array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
315 *is_scanout
= metadata
->u
.legacy
.scanout
;
318 static void r600_eliminate_fast_color_clear(struct r600_common_context
*rctx
,
319 struct r600_texture
*rtex
)
321 struct r600_common_screen
*rscreen
= rctx
->screen
;
322 struct pipe_context
*ctx
= &rctx
->b
;
324 if (ctx
== rscreen
->aux_context
)
325 mtx_lock(&rscreen
->aux_context_lock
);
327 ctx
->flush_resource(ctx
, &rtex
->resource
.b
.b
);
328 ctx
->flush(ctx
, NULL
, 0);
330 if (ctx
== rscreen
->aux_context
)
331 mtx_unlock(&rscreen
->aux_context_lock
);
334 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
335 struct r600_texture
*rtex
)
337 if (!rtex
->cmask
.size
)
340 assert(rtex
->resource
.b
.b
.nr_samples
<= 1);
343 memset(&rtex
->cmask
, 0, sizeof(rtex
->cmask
));
344 rtex
->cmask
.base_address_reg
= rtex
->resource
.gpu_address
>> 8;
345 rtex
->dirty_level_mask
= 0;
347 rtex
->cb_color_info
&= ~EG_S_028C70_FAST_CLEAR(1);
349 if (rtex
->cmask_buffer
!= &rtex
->resource
)
350 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
352 /* Notify all contexts about the change. */
353 p_atomic_inc(&rscreen
->dirty_tex_counter
);
354 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
357 static void r600_reallocate_texture_inplace(struct r600_common_context
*rctx
,
358 struct r600_texture
*rtex
,
359 unsigned new_bind_flag
,
360 bool invalidate_storage
)
362 struct pipe_screen
*screen
= rctx
->b
.screen
;
363 struct r600_texture
*new_tex
;
364 struct pipe_resource templ
= rtex
->resource
.b
.b
;
367 templ
.bind
|= new_bind_flag
;
369 /* r600g doesn't react to dirty_tex_descriptor_counter */
370 if (rctx
->chip_class
< GFX6
)
373 if (rtex
->resource
.b
.is_shared
)
376 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
377 if (rtex
->surface
.is_linear
)
380 /* This fails with MSAA, depth, and compressed textures. */
381 if (r600_choose_tiling(rctx
->screen
, &templ
) !=
382 RADEON_SURF_MODE_LINEAR_ALIGNED
)
386 new_tex
= (struct r600_texture
*)screen
->resource_create(screen
, &templ
);
390 /* Copy the pixels to the new texture. */
391 if (!invalidate_storage
) {
392 for (i
= 0; i
<= templ
.last_level
; i
++) {
396 u_minify(templ
.width0
, i
), u_minify(templ
.height0
, i
),
397 util_num_layers(&templ
, i
), &box
);
399 rctx
->dma_copy(&rctx
->b
, &new_tex
->resource
.b
.b
, i
, 0, 0, 0,
400 &rtex
->resource
.b
.b
, i
, &box
);
404 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
405 r600_texture_discard_cmask(rctx
->screen
, rtex
);
408 /* Replace the structure fields of rtex. */
409 rtex
->resource
.b
.b
.bind
= templ
.bind
;
410 pb_reference(&rtex
->resource
.buf
, new_tex
->resource
.buf
);
411 rtex
->resource
.gpu_address
= new_tex
->resource
.gpu_address
;
412 rtex
->resource
.vram_usage
= new_tex
->resource
.vram_usage
;
413 rtex
->resource
.gart_usage
= new_tex
->resource
.gart_usage
;
414 rtex
->resource
.bo_size
= new_tex
->resource
.bo_size
;
415 rtex
->resource
.bo_alignment
= new_tex
->resource
.bo_alignment
;
416 rtex
->resource
.domains
= new_tex
->resource
.domains
;
417 rtex
->resource
.flags
= new_tex
->resource
.flags
;
418 rtex
->size
= new_tex
->size
;
419 rtex
->db_render_format
= new_tex
->db_render_format
;
420 rtex
->db_compatible
= new_tex
->db_compatible
;
421 rtex
->can_sample_z
= new_tex
->can_sample_z
;
422 rtex
->can_sample_s
= new_tex
->can_sample_s
;
423 rtex
->surface
= new_tex
->surface
;
424 rtex
->fmask
= new_tex
->fmask
;
425 rtex
->cmask
= new_tex
->cmask
;
426 rtex
->cb_color_info
= new_tex
->cb_color_info
;
427 rtex
->last_msaa_resolve_target_micro_mode
= new_tex
->last_msaa_resolve_target_micro_mode
;
428 rtex
->htile_offset
= new_tex
->htile_offset
;
429 rtex
->depth_cleared
= new_tex
->depth_cleared
;
430 rtex
->stencil_cleared
= new_tex
->stencil_cleared
;
431 rtex
->non_disp_tiling
= new_tex
->non_disp_tiling
;
432 rtex
->framebuffers_bound
= new_tex
->framebuffers_bound
;
434 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
435 assert(!rtex
->htile_offset
);
436 assert(!rtex
->cmask
.size
);
437 assert(!rtex
->fmask
.size
);
438 assert(!rtex
->is_depth
);
441 r600_texture_reference(&new_tex
, NULL
);
443 p_atomic_inc(&rctx
->screen
->dirty_tex_counter
);
446 static void r600_texture_get_info(struct pipe_screen
* screen
,
447 struct pipe_resource
*resource
,
451 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
452 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
456 if (!rscreen
|| !rtex
)
459 if (resource
->target
!= PIPE_BUFFER
) {
460 offset
= rtex
->surface
.u
.legacy
.level
[0].offset
;
461 stride
= rtex
->surface
.u
.legacy
.level
[0].nblk_x
*
472 static bool r600_texture_get_handle(struct pipe_screen
* screen
,
473 struct pipe_context
*ctx
,
474 struct pipe_resource
*resource
,
475 struct winsys_handle
*whandle
,
478 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
479 struct r600_common_context
*rctx
;
480 struct r600_resource
*res
= (struct r600_resource
*)resource
;
481 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
482 struct radeon_bo_metadata metadata
;
483 bool update_metadata
= false;
484 unsigned stride
, offset
, slice_size
;
486 ctx
= threaded_context_unwrap_sync(ctx
);
487 rctx
= (struct r600_common_context
*)(ctx
? ctx
: rscreen
->aux_context
);
489 if (resource
->target
!= PIPE_BUFFER
) {
490 /* This is not supported now, but it might be required for OpenCL
491 * interop in the future.
493 if (resource
->nr_samples
> 1 || rtex
->is_depth
)
496 /* Move a suballocated texture into a non-suballocated allocation. */
497 if (rscreen
->ws
->buffer_is_suballocated(res
->buf
) ||
498 rtex
->surface
.tile_swizzle
) {
499 assert(!res
->b
.is_shared
);
500 r600_reallocate_texture_inplace(rctx
, rtex
,
501 PIPE_BIND_SHARED
, false);
502 rctx
->b
.flush(&rctx
->b
, NULL
, 0);
503 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
504 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
505 assert(rtex
->surface
.tile_swizzle
== 0);
508 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
510 /* Eliminate fast clear (CMASK) */
511 r600_eliminate_fast_color_clear(rctx
, rtex
);
513 /* Disable CMASK if flush_resource isn't going
516 if (rtex
->cmask
.size
)
517 r600_texture_discard_cmask(rscreen
, rtex
);
521 if (!res
->b
.is_shared
|| update_metadata
) {
522 r600_texture_init_metadata(rscreen
, rtex
, &metadata
);
524 rscreen
->ws
->buffer_set_metadata(res
->buf
, &metadata
);
527 slice_size
= (uint64_t)rtex
->surface
.u
.legacy
.level
[0].slice_size_dw
* 4;
529 /* Move a suballocated buffer into a non-suballocated allocation. */
530 if (rscreen
->ws
->buffer_is_suballocated(res
->buf
)) {
531 assert(!res
->b
.is_shared
);
533 /* Allocate a new buffer with PIPE_BIND_SHARED. */
534 struct pipe_resource templ
= res
->b
.b
;
535 templ
.bind
|= PIPE_BIND_SHARED
;
537 struct pipe_resource
*newb
=
538 screen
->resource_create(screen
, &templ
);
542 /* Copy the old buffer contents to the new one. */
544 u_box_1d(0, newb
->width0
, &box
);
545 rctx
->b
.resource_copy_region(&rctx
->b
, newb
, 0, 0, 0, 0,
547 /* Move the new buffer storage to the old pipe_resource. */
548 r600_replace_buffer_storage(&rctx
->b
, &res
->b
.b
, newb
);
549 pipe_resource_reference(&newb
, NULL
);
551 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
552 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
559 r600_texture_get_info(screen
, resource
, &stride
, &offset
);
561 if (res
->b
.is_shared
) {
562 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
565 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
566 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
567 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
569 res
->b
.is_shared
= true;
570 res
->external_usage
= usage
;
573 whandle
->stride
= stride
;
574 whandle
->offset
= offset
+ slice_size
* whandle
->layer
;
576 return rscreen
->ws
->buffer_get_handle(rscreen
->ws
, res
->buf
, whandle
);
579 static void r600_texture_destroy(struct pipe_screen
*screen
,
580 struct pipe_resource
*ptex
)
582 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
583 struct r600_resource
*resource
= &rtex
->resource
;
585 r600_texture_reference(&rtex
->flushed_depth_texture
, NULL
);
586 pipe_resource_reference((struct pipe_resource
**)&resource
->immed_buffer
, NULL
);
588 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
589 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
591 pb_reference(&resource
->buf
, NULL
);
595 static const struct u_resource_vtbl r600_texture_vtbl
;
597 /* The number of samples can be specified independently of the texture. */
598 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
599 struct r600_texture
*rtex
,
601 struct r600_fmask_info
*out
)
603 /* FMASK is allocated like an ordinary texture. */
604 struct pipe_resource templ
= rtex
->resource
.b
.b
;
605 struct radeon_surf fmask
= {};
608 memset(out
, 0, sizeof(*out
));
610 templ
.nr_samples
= 1;
611 flags
= rtex
->surface
.flags
| RADEON_SURF_FMASK
;
613 /* Use the same parameters and tile mode. */
614 fmask
.u
.legacy
.bankw
= rtex
->surface
.u
.legacy
.bankw
;
615 fmask
.u
.legacy
.bankh
= rtex
->surface
.u
.legacy
.bankh
;
616 fmask
.u
.legacy
.mtilea
= rtex
->surface
.u
.legacy
.mtilea
;
617 fmask
.u
.legacy
.tile_split
= rtex
->surface
.u
.legacy
.tile_split
;
620 fmask
.u
.legacy
.bankh
= 4;
622 switch (nr_samples
) {
631 R600_ERR("Invalid sample count for FMASK allocation.\n");
635 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
636 * This can be fixed by writing a separate FMASK allocator specifically
637 * for R600-R700 asics. */
638 if (rscreen
->chip_class
<= R700
) {
642 if (rscreen
->ws
->surface_init(rscreen
->ws
, &templ
,
643 flags
, bpe
, RADEON_SURF_MODE_2D
, &fmask
)) {
644 R600_ERR("Got error in surface_init while allocating FMASK.\n");
648 assert(fmask
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
);
650 out
->slice_tile_max
= (fmask
.u
.legacy
.level
[0].nblk_x
* fmask
.u
.legacy
.level
[0].nblk_y
) / 64;
651 if (out
->slice_tile_max
)
652 out
->slice_tile_max
-= 1;
654 out
->tile_mode_index
= fmask
.u
.legacy
.tiling_index
[0];
655 out
->pitch_in_pixels
= fmask
.u
.legacy
.level
[0].nblk_x
;
656 out
->bank_height
= fmask
.u
.legacy
.bankh
;
657 out
->tile_swizzle
= fmask
.tile_swizzle
;
658 out
->alignment
= MAX2(256, fmask
.surf_alignment
);
659 out
->size
= fmask
.surf_size
;
662 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
663 struct r600_texture
*rtex
)
665 r600_texture_get_fmask_info(rscreen
, rtex
,
666 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
668 rtex
->fmask
.offset
= align64(rtex
->size
, rtex
->fmask
.alignment
);
669 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
672 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
673 struct r600_texture
*rtex
,
674 struct r600_cmask_info
*out
)
676 unsigned cmask_tile_width
= 8;
677 unsigned cmask_tile_height
= 8;
678 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
679 unsigned element_bits
= 4;
680 unsigned cmask_cache_bits
= 1024;
681 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
682 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
684 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
685 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
686 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
687 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
688 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
690 unsigned pitch_elements
= align(rtex
->resource
.b
.b
.width0
, macro_tile_width
);
691 unsigned height
= align(rtex
->resource
.b
.b
.height0
, macro_tile_height
);
693 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
694 unsigned slice_bytes
=
695 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
697 assert(macro_tile_width
% 128 == 0);
698 assert(macro_tile_height
% 128 == 0);
700 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
701 out
->alignment
= MAX2(256, base_align
);
702 out
->size
= util_num_layers(&rtex
->resource
.b
.b
, 0) *
703 align(slice_bytes
, base_align
);
706 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
707 struct r600_texture
*rtex
)
709 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
711 rtex
->cmask
.offset
= align64(rtex
->size
, rtex
->cmask
.alignment
);
712 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
714 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
717 static void r600_texture_alloc_cmask_separate(struct r600_common_screen
*rscreen
,
718 struct r600_texture
*rtex
)
720 if (rtex
->cmask_buffer
)
723 assert(rtex
->cmask
.size
== 0);
725 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
727 rtex
->cmask_buffer
= (struct r600_resource
*)
728 r600_aligned_buffer_create(&rscreen
->b
,
729 R600_RESOURCE_FLAG_UNMAPPABLE
,
732 rtex
->cmask
.alignment
);
733 if (rtex
->cmask_buffer
== NULL
) {
734 rtex
->cmask
.size
= 0;
738 /* update colorbuffer state bits */
739 rtex
->cmask
.base_address_reg
= rtex
->cmask_buffer
->gpu_address
>> 8;
741 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
743 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
746 void eg_resource_alloc_immed(struct r600_common_screen
*rscreen
,
747 struct r600_resource
*res
,
750 res
->immed_buffer
= (struct r600_resource
*)
751 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
752 PIPE_USAGE_DEFAULT
, immed_size
);
755 static void r600_texture_get_htile_size(struct r600_common_screen
*rscreen
,
756 struct r600_texture
*rtex
)
758 unsigned cl_width
, cl_height
, width
, height
;
759 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
760 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
762 rtex
->surface
.htile_size
= 0;
764 if (rscreen
->chip_class
<= EVERGREEN
&&
765 rscreen
->info
.drm_minor
< 26)
768 /* HW bug on R6xx. */
769 if (rscreen
->chip_class
== R600
&&
770 (rtex
->resource
.b
.b
.width0
> 7680 ||
771 rtex
->resource
.b
.b
.height0
> 7680))
800 width
= align(rtex
->surface
.u
.legacy
.level
[0].nblk_x
, cl_width
* 8);
801 height
= align(rtex
->surface
.u
.legacy
.level
[0].nblk_y
, cl_height
* 8);
803 slice_elements
= (width
* height
) / (8 * 8);
804 slice_bytes
= slice_elements
* 4;
806 pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
807 base_align
= num_pipes
* pipe_interleave_bytes
;
809 rtex
->surface
.htile_alignment
= base_align
;
810 rtex
->surface
.htile_size
=
811 util_num_layers(&rtex
->resource
.b
.b
, 0) *
812 align(slice_bytes
, base_align
);
815 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
816 struct r600_texture
*rtex
)
818 r600_texture_get_htile_size(rscreen
, rtex
);
820 if (!rtex
->surface
.htile_size
)
823 rtex
->htile_offset
= align(rtex
->size
, rtex
->surface
.htile_alignment
);
824 rtex
->size
= rtex
->htile_offset
+ rtex
->surface
.htile_size
;
827 void r600_print_texture_info(struct r600_common_screen
*rscreen
,
828 struct r600_texture
*rtex
, struct u_log_context
*log
)
832 /* Common parameters. */
833 u_log_printf(log
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
834 "blk_h=%u, array_size=%u, last_level=%u, "
835 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
836 rtex
->resource
.b
.b
.width0
, rtex
->resource
.b
.b
.height0
,
837 rtex
->resource
.b
.b
.depth0
, rtex
->surface
.blk_w
,
839 rtex
->resource
.b
.b
.array_size
, rtex
->resource
.b
.b
.last_level
,
840 rtex
->surface
.bpe
, rtex
->resource
.b
.b
.nr_samples
,
841 rtex
->surface
.flags
, util_format_short_name(rtex
->resource
.b
.b
.format
));
843 u_log_printf(log
, " Layout: size=%"PRIu64
", alignment=%u, bankw=%u, "
844 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
845 rtex
->surface
.surf_size
, rtex
->surface
.surf_alignment
, rtex
->surface
.u
.legacy
.bankw
,
846 rtex
->surface
.u
.legacy
.bankh
, rtex
->surface
.u
.legacy
.num_banks
, rtex
->surface
.u
.legacy
.mtilea
,
847 rtex
->surface
.u
.legacy
.tile_split
, rtex
->surface
.u
.legacy
.pipe_config
,
848 (rtex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
850 if (rtex
->fmask
.size
)
851 u_log_printf(log
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
852 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
853 rtex
->fmask
.offset
, rtex
->fmask
.size
, rtex
->fmask
.alignment
,
854 rtex
->fmask
.pitch_in_pixels
, rtex
->fmask
.bank_height
,
855 rtex
->fmask
.slice_tile_max
, rtex
->fmask
.tile_mode_index
);
857 if (rtex
->cmask
.size
)
858 u_log_printf(log
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, "
859 "slice_tile_max=%u\n",
860 rtex
->cmask
.offset
, rtex
->cmask
.size
, rtex
->cmask
.alignment
,
861 rtex
->cmask
.slice_tile_max
);
863 if (rtex
->htile_offset
)
864 u_log_printf(log
, " HTile: offset=%"PRIu64
", size=%u "
866 rtex
->htile_offset
, rtex
->surface
.htile_size
,
867 rtex
->surface
.htile_alignment
);
869 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++)
870 u_log_printf(log
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
871 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
872 "mode=%u, tiling_index = %u\n",
873 i
, rtex
->surface
.u
.legacy
.level
[i
].offset
,
874 (uint64_t)rtex
->surface
.u
.legacy
.level
[i
].slice_size_dw
* 4,
875 u_minify(rtex
->resource
.b
.b
.width0
, i
),
876 u_minify(rtex
->resource
.b
.b
.height0
, i
),
877 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
878 rtex
->surface
.u
.legacy
.level
[i
].nblk_x
,
879 rtex
->surface
.u
.legacy
.level
[i
].nblk_y
,
880 rtex
->surface
.u
.legacy
.level
[i
].mode
,
881 rtex
->surface
.u
.legacy
.tiling_index
[i
]);
883 if (rtex
->surface
.has_stencil
) {
884 u_log_printf(log
, " StencilLayout: tilesplit=%u\n",
885 rtex
->surface
.u
.legacy
.stencil_tile_split
);
886 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++) {
887 u_log_printf(log
, " StencilLevel[%i]: offset=%"PRIu64
", "
888 "slice_size=%"PRIu64
", npix_x=%u, "
889 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
890 "mode=%u, tiling_index = %u\n",
891 i
, rtex
->surface
.u
.legacy
.stencil_level
[i
].offset
,
892 (uint64_t)rtex
->surface
.u
.legacy
.stencil_level
[i
].slice_size_dw
* 4,
893 u_minify(rtex
->resource
.b
.b
.width0
, i
),
894 u_minify(rtex
->resource
.b
.b
.height0
, i
),
895 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
896 rtex
->surface
.u
.legacy
.stencil_level
[i
].nblk_x
,
897 rtex
->surface
.u
.legacy
.stencil_level
[i
].nblk_y
,
898 rtex
->surface
.u
.legacy
.stencil_level
[i
].mode
,
899 rtex
->surface
.u
.legacy
.stencil_tiling_index
[i
]);
904 /* Common processing for r600_texture_create and r600_texture_from_handle */
905 static struct r600_texture
*
906 r600_texture_create_object(struct pipe_screen
*screen
,
907 const struct pipe_resource
*base
,
908 struct pb_buffer
*buf
,
909 struct radeon_surf
*surface
)
911 struct r600_texture
*rtex
;
912 struct r600_resource
*resource
;
913 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
915 rtex
= CALLOC_STRUCT(r600_texture
);
919 resource
= &rtex
->resource
;
920 resource
->b
.b
= *base
;
921 resource
->b
.vtbl
= &r600_texture_vtbl
;
922 pipe_reference_init(&resource
->b
.b
.reference
, 1);
923 resource
->b
.b
.screen
= screen
;
925 /* don't include stencil-only formats which we don't support for rendering */
926 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
928 rtex
->surface
= *surface
;
929 rtex
->size
= rtex
->surface
.surf_size
;
930 rtex
->db_render_format
= base
->format
;
932 /* Tiled depth textures utilize the non-displayable tile order.
933 * This must be done after r600_setup_surface.
934 * Applies to R600-Cayman. */
935 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
936 /* Applies to GCN. */
937 rtex
->last_msaa_resolve_target_micro_mode
= rtex
->surface
.micro_tile_mode
;
939 if (rtex
->is_depth
) {
940 if (base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
941 R600_RESOURCE_FLAG_FLUSHED_DEPTH
) ||
942 rscreen
->chip_class
>= EVERGREEN
) {
943 rtex
->can_sample_z
= !rtex
->surface
.u
.legacy
.depth_adjusted
;
944 rtex
->can_sample_s
= !rtex
->surface
.u
.legacy
.stencil_adjusted
;
946 if (rtex
->resource
.b
.b
.nr_samples
<= 1 &&
947 (rtex
->resource
.b
.b
.format
== PIPE_FORMAT_Z16_UNORM
||
948 rtex
->resource
.b
.b
.format
== PIPE_FORMAT_Z32_FLOAT
))
949 rtex
->can_sample_z
= true;
952 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
953 R600_RESOURCE_FLAG_FLUSHED_DEPTH
))) {
954 rtex
->db_compatible
= true;
956 if (!(rscreen
->debug_flags
& DBG_NO_HYPERZ
))
957 r600_texture_allocate_htile(rscreen
, rtex
);
960 if (base
->nr_samples
> 1) {
962 r600_texture_allocate_fmask(rscreen
, rtex
);
963 r600_texture_allocate_cmask(rscreen
, rtex
);
964 rtex
->cmask_buffer
= &rtex
->resource
;
966 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
973 /* Now create the backing buffer. */
975 r600_init_resource_fields(rscreen
, resource
, rtex
->size
,
976 rtex
->surface
.surf_alignment
);
978 if (!r600_alloc_resource(rscreen
, resource
)) {
984 resource
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
985 resource
->bo_size
= buf
->size
;
986 resource
->bo_alignment
= buf
->alignment
;
987 resource
->domains
= rscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
988 if (resource
->domains
& RADEON_DOMAIN_VRAM
)
989 resource
->vram_usage
= buf
->size
;
990 else if (resource
->domains
& RADEON_DOMAIN_GTT
)
991 resource
->gart_usage
= buf
->size
;
994 if (rtex
->cmask
.size
) {
995 /* Initialize the cmask to 0xCC (= compressed state). */
996 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
997 rtex
->cmask
.offset
, rtex
->cmask
.size
,
1000 if (rtex
->htile_offset
) {
1001 uint32_t clear_value
= 0;
1003 r600_screen_clear_buffer(rscreen
, &rtex
->resource
.b
.b
,
1005 rtex
->surface
.htile_size
,
1009 /* Initialize the CMASK base register value. */
1010 rtex
->cmask
.base_address_reg
=
1011 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1013 if (rscreen
->debug_flags
& DBG_VM
) {
1014 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1015 rtex
->resource
.gpu_address
,
1016 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
1017 base
->width0
, base
->height0
, util_num_layers(base
, 0), base
->last_level
+1,
1018 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
1021 if (rscreen
->debug_flags
& DBG_TEX
) {
1023 struct u_log_context log
;
1024 u_log_context_init(&log
);
1025 r600_print_texture_info(rscreen
, rtex
, &log
);
1026 u_log_new_page_print(&log
, stdout
);
1028 u_log_context_destroy(&log
);
1034 static enum radeon_surf_mode
1035 r600_choose_tiling(struct r600_common_screen
*rscreen
,
1036 const struct pipe_resource
*templ
)
1038 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1039 bool force_tiling
= templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
;
1040 bool is_depth_stencil
= util_format_is_depth_or_stencil(templ
->format
) &&
1041 !(templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
1043 /* MSAA resources must be 2D tiled. */
1044 if (templ
->nr_samples
> 1)
1045 return RADEON_SURF_MODE_2D
;
1047 /* Transfer resources should be linear. */
1048 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
1049 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1051 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1052 if (rscreen
->chip_class
>= R600
&& rscreen
->chip_class
<= CAYMAN
&&
1053 (templ
->bind
& PIPE_BIND_COMPUTE_RESOURCE
) &&
1054 (templ
->target
== PIPE_TEXTURE_2D
||
1055 templ
->target
== PIPE_TEXTURE_3D
))
1056 force_tiling
= true;
1058 /* Handle common candidates for the linear mode.
1059 * Compressed textures and DB surfaces must always be tiled.
1061 if (!force_tiling
&&
1062 !is_depth_stencil
&&
1063 !util_format_is_compressed(templ
->format
)) {
1064 if (rscreen
->debug_flags
& DBG_NO_TILING
)
1065 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1067 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1068 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1069 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1071 if (templ
->bind
& PIPE_BIND_LINEAR
)
1072 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1074 /* 1D textures should be linear - fixes image operations on 1d */
1075 if (templ
->target
== PIPE_TEXTURE_1D
||
1076 templ
->target
== PIPE_TEXTURE_1D_ARRAY
)
1077 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1079 /* Textures likely to be mapped often. */
1080 if (templ
->usage
== PIPE_USAGE_STAGING
||
1081 templ
->usage
== PIPE_USAGE_STREAM
)
1082 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1085 /* Make small textures 1D tiled. */
1086 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1087 (rscreen
->debug_flags
& DBG_NO_2D_TILING
))
1088 return RADEON_SURF_MODE_1D
;
1090 /* The allocator will switch to 1D if needed. */
1091 return RADEON_SURF_MODE_2D
;
1094 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
1095 const struct pipe_resource
*templ
)
1097 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1098 struct radeon_surf surface
= {0};
1099 bool is_flushed_depth
= templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1102 r
= r600_init_surface(rscreen
, &surface
, templ
,
1103 r600_choose_tiling(rscreen
, templ
), 0, 0,
1104 false, false, is_flushed_depth
);
1109 return (struct pipe_resource
*)
1110 r600_texture_create_object(screen
, templ
, NULL
, &surface
);
1113 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
1114 const struct pipe_resource
*templ
,
1115 struct winsys_handle
*whandle
,
1118 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1119 struct pb_buffer
*buf
= NULL
;
1120 enum radeon_surf_mode array_mode
;
1121 struct radeon_surf surface
= {};
1123 struct radeon_bo_metadata metadata
= {};
1124 struct r600_texture
*rtex
;
1127 /* Support only 2D textures without mipmaps */
1128 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1129 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1132 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
,
1133 rscreen
->info
.max_alignment
);
1137 rscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1138 r600_surface_import_metadata(rscreen
, &surface
, &metadata
,
1139 &array_mode
, &is_scanout
);
1141 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
,
1142 whandle
->stride
, whandle
->offset
,
1143 true, is_scanout
, false);
1148 rtex
= r600_texture_create_object(screen
, templ
, buf
, &surface
);
1152 rtex
->resource
.b
.is_shared
= true;
1153 rtex
->resource
.external_usage
= usage
;
1155 assert(rtex
->surface
.tile_swizzle
== 0);
1156 return &rtex
->resource
.b
.b
;
1159 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
1160 struct pipe_resource
*texture
,
1161 struct r600_texture
**staging
)
1163 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1164 struct pipe_resource resource
;
1165 struct r600_texture
**flushed_depth_texture
= staging
?
1166 staging
: &rtex
->flushed_depth_texture
;
1167 enum pipe_format pipe_format
= texture
->format
;
1170 if (rtex
->flushed_depth_texture
)
1171 return true; /* it's ready */
1173 if (!rtex
->can_sample_z
&& rtex
->can_sample_s
) {
1174 switch (pipe_format
) {
1175 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
1176 /* Save memory by not allocating the S plane. */
1177 pipe_format
= PIPE_FORMAT_Z32_FLOAT
;
1179 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1180 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
1181 /* Save memory bandwidth by not copying the
1182 * stencil part during flush.
1184 * This potentially increases memory bandwidth
1185 * if an application uses both Z and S texturing
1186 * simultaneously (a flushed Z24S8 texture
1187 * would be stored compactly), but how often
1188 * does that really happen?
1190 pipe_format
= PIPE_FORMAT_Z24X8_UNORM
;
1194 } else if (!rtex
->can_sample_s
&& rtex
->can_sample_z
) {
1195 assert(util_format_has_stencil(util_format_description(pipe_format
)));
1197 /* DB->CB copies to an 8bpp surface don't work. */
1198 pipe_format
= PIPE_FORMAT_X24S8_UINT
;
1202 memset(&resource
, 0, sizeof(resource
));
1203 resource
.target
= texture
->target
;
1204 resource
.format
= pipe_format
;
1205 resource
.width0
= texture
->width0
;
1206 resource
.height0
= texture
->height0
;
1207 resource
.depth0
= texture
->depth0
;
1208 resource
.array_size
= texture
->array_size
;
1209 resource
.last_level
= texture
->last_level
;
1210 resource
.nr_samples
= texture
->nr_samples
;
1211 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1212 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1213 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1216 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
1218 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1219 if (*flushed_depth_texture
== NULL
) {
1220 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1224 (*flushed_depth_texture
)->non_disp_tiling
= false;
1229 * Initialize the pipe_resource descriptor to be of the same size as the box,
1230 * which is supposed to hold a subregion of the texture "orig" at the given
1233 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
1234 struct pipe_resource
*orig
,
1235 const struct pipe_box
*box
,
1236 unsigned level
, unsigned flags
)
1238 memset(res
, 0, sizeof(*res
));
1239 res
->format
= orig
->format
;
1240 res
->width0
= box
->width
;
1241 res
->height0
= box
->height
;
1243 res
->array_size
= 1;
1244 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1247 /* We must set the correct texture target and dimensions for a 3D box. */
1248 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1249 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1250 res
->array_size
= box
->depth
;
1252 res
->target
= PIPE_TEXTURE_2D
;
1256 static bool r600_can_invalidate_texture(struct r600_common_screen
*rscreen
,
1257 struct r600_texture
*rtex
,
1258 unsigned transfer_usage
,
1259 const struct pipe_box
*box
)
1261 /* r600g doesn't react to dirty_tex_descriptor_counter */
1262 return rscreen
->chip_class
>= GFX6
&&
1263 !rtex
->resource
.b
.is_shared
&&
1264 !(transfer_usage
& PIPE_TRANSFER_READ
) &&
1265 rtex
->resource
.b
.b
.last_level
== 0 &&
1266 util_texrange_covers_whole_level(&rtex
->resource
.b
.b
, 0,
1267 box
->x
, box
->y
, box
->z
,
1268 box
->width
, box
->height
,
1272 static void r600_texture_invalidate_storage(struct r600_common_context
*rctx
,
1273 struct r600_texture
*rtex
)
1275 struct r600_common_screen
*rscreen
= rctx
->screen
;
1277 /* There is no point in discarding depth and tiled buffers. */
1278 assert(!rtex
->is_depth
);
1279 assert(rtex
->surface
.is_linear
);
1281 /* Reallocate the buffer in the same pipe_resource. */
1282 r600_alloc_resource(rscreen
, &rtex
->resource
);
1284 /* Initialize the CMASK base address (needed even without CMASK). */
1285 rtex
->cmask
.base_address_reg
=
1286 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1288 p_atomic_inc(&rscreen
->dirty_tex_counter
);
1290 rctx
->num_alloc_tex_transfer_bytes
+= rtex
->size
;
1293 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
1294 struct pipe_resource
*texture
,
1297 const struct pipe_box
*box
,
1298 struct pipe_transfer
**ptransfer
)
1300 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1301 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1302 struct r600_transfer
*trans
;
1303 struct r600_resource
*buf
;
1304 unsigned offset
= 0;
1306 bool use_staging_texture
= false;
1308 assert(!(texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
));
1309 assert(box
->width
&& box
->height
&& box
->depth
);
1311 /* Depth textures use staging unconditionally. */
1312 if (!rtex
->is_depth
) {
1313 /* Degrade the tile mode if we get too many transfers on APUs.
1314 * On dGPUs, the staging texture is always faster.
1315 * Only count uploads that are at least 4x4 pixels large.
1317 if (!rctx
->screen
->info
.has_dedicated_vram
&&
1319 box
->width
>= 4 && box
->height
>= 4 &&
1320 p_atomic_inc_return(&rtex
->num_level0_transfers
) == 10) {
1321 bool can_invalidate
=
1322 r600_can_invalidate_texture(rctx
->screen
, rtex
,
1325 r600_reallocate_texture_inplace(rctx
, rtex
,
1330 /* Tiled textures need to be converted into a linear texture for CPU
1331 * access. The staging texture is always linear and is placed in GART.
1333 * Reading from VRAM or GTT WC is slow, always use the staging
1334 * texture in this case.
1336 * Use the staging texture for uploads if the underlying BO
1339 if (!rtex
->surface
.is_linear
)
1340 use_staging_texture
= true;
1341 else if (usage
& PIPE_TRANSFER_READ
)
1342 use_staging_texture
=
1343 rtex
->resource
.domains
& RADEON_DOMAIN_VRAM
||
1344 rtex
->resource
.flags
& RADEON_FLAG_GTT_WC
;
1345 /* Write & linear only: */
1346 else if (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.buf
,
1347 RADEON_USAGE_READWRITE
) ||
1348 !rctx
->ws
->buffer_wait(rtex
->resource
.buf
, 0,
1349 RADEON_USAGE_READWRITE
)) {
1351 if (r600_can_invalidate_texture(rctx
->screen
, rtex
,
1353 r600_texture_invalidate_storage(rctx
, rtex
);
1355 use_staging_texture
= true;
1359 trans
= CALLOC_STRUCT(r600_transfer
);
1362 pipe_resource_reference(&trans
->b
.b
.resource
, texture
);
1363 trans
->b
.b
.level
= level
;
1364 trans
->b
.b
.usage
= usage
;
1365 trans
->b
.b
.box
= *box
;
1367 if (rtex
->is_depth
) {
1368 struct r600_texture
*staging_depth
;
1370 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1371 /* MSAA depth buffers need to be converted to single sample buffers.
1373 * Mapping MSAA depth buffers can occur if ReadPixels is called
1374 * with a multisample GLX visual.
1376 * First downsample the depth buffer to a temporary texture,
1377 * then decompress the temporary one to staging.
1379 * Only the region being mapped is transfered.
1381 struct pipe_resource resource
;
1383 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1385 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1386 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1391 if (usage
& PIPE_TRANSFER_READ
) {
1392 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1394 R600_ERR("failed to create a temporary depth texture\n");
1399 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1400 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
1401 0, 0, 0, box
->depth
, 0, 0);
1402 pipe_resource_reference(&temp
, NULL
);
1405 /* Just get the strides. */
1406 r600_texture_get_offset(rctx
->screen
, staging_depth
, level
, NULL
,
1408 &trans
->b
.b
.layer_stride
);
1410 /* XXX: only readback the rectangle which is being mapped? */
1411 /* XXX: when discard is true, no need to read back from depth texture */
1412 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1413 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1418 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
1420 box
->z
, box
->z
+ box
->depth
- 1,
1423 offset
= r600_texture_get_offset(rctx
->screen
, staging_depth
,
1426 &trans
->b
.b
.layer_stride
);
1429 trans
->staging
= (struct r600_resource
*)staging_depth
;
1430 buf
= trans
->staging
;
1431 } else if (use_staging_texture
) {
1432 struct pipe_resource resource
;
1433 struct r600_texture
*staging
;
1435 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1436 R600_RESOURCE_FLAG_TRANSFER
);
1437 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1438 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1440 /* Create the temporary texture. */
1441 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1443 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1447 trans
->staging
= &staging
->resource
;
1449 /* Just get the strides. */
1450 r600_texture_get_offset(rctx
->screen
, staging
, 0, NULL
,
1452 &trans
->b
.b
.layer_stride
);
1454 if (usage
& PIPE_TRANSFER_READ
)
1455 r600_copy_to_staging_texture(ctx
, trans
);
1457 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1459 buf
= trans
->staging
;
1461 /* the resource is mapped directly */
1462 offset
= r600_texture_get_offset(rctx
->screen
, rtex
, level
, box
,
1464 &trans
->b
.b
.layer_stride
);
1465 buf
= &rtex
->resource
;
1468 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1469 r600_resource_reference(&trans
->staging
, NULL
);
1474 *ptransfer
= &trans
->b
.b
;
1475 return map
+ offset
;
1478 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1479 struct pipe_transfer
* transfer
)
1481 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1482 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1483 struct pipe_resource
*texture
= transfer
->resource
;
1484 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1486 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1487 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1488 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1489 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1490 &rtransfer
->staging
->b
.b
, transfer
->level
,
1493 r600_copy_from_staging_texture(ctx
, rtransfer
);
1497 if (rtransfer
->staging
) {
1498 rctx
->num_alloc_tex_transfer_bytes
+= rtransfer
->staging
->buf
->size
;
1499 r600_resource_reference(&rtransfer
->staging
, NULL
);
1502 /* Heuristic for {upload, draw, upload, draw, ..}:
1504 * Flush the gfx IB if we've allocated too much texture storage.
1506 * The idea is that we don't want to build IBs that use too much
1507 * memory and put pressure on the kernel memory manager and we also
1508 * want to make temporary and invalidated buffers go idle ASAP to
1509 * decrease the total memory usage or make them reusable. The memory
1510 * usage will be slightly higher than given here because of the buffer
1511 * cache in the winsys.
1513 * The result is that the kernel memory manager is never a bottleneck.
1515 if (rctx
->num_alloc_tex_transfer_bytes
> rctx
->screen
->info
.gart_size
/ 4) {
1516 rctx
->gfx
.flush(rctx
, PIPE_FLUSH_ASYNC
, NULL
);
1517 rctx
->num_alloc_tex_transfer_bytes
= 0;
1520 pipe_resource_reference(&transfer
->resource
, NULL
);
1524 static const struct u_resource_vtbl r600_texture_vtbl
=
1526 NULL
, /* get_handle */
1527 r600_texture_destroy
, /* resource_destroy */
1528 r600_texture_transfer_map
, /* transfer_map */
1529 u_default_transfer_flush_region
, /* transfer_flush_region */
1530 r600_texture_transfer_unmap
, /* transfer_unmap */
1533 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1534 struct pipe_resource
*texture
,
1535 const struct pipe_surface
*templ
,
1536 unsigned width0
, unsigned height0
,
1537 unsigned width
, unsigned height
)
1539 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1544 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1545 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1547 pipe_reference_init(&surface
->base
.reference
, 1);
1548 pipe_resource_reference(&surface
->base
.texture
, texture
);
1549 surface
->base
.context
= pipe
;
1550 surface
->base
.format
= templ
->format
;
1551 surface
->base
.width
= width
;
1552 surface
->base
.height
= height
;
1553 surface
->base
.u
= templ
->u
;
1555 surface
->width0
= width0
;
1556 surface
->height0
= height0
;
1558 return &surface
->base
;
1561 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1562 struct pipe_resource
*tex
,
1563 const struct pipe_surface
*templ
)
1565 unsigned level
= templ
->u
.tex
.level
;
1566 unsigned width
= u_minify(tex
->width0
, level
);
1567 unsigned height
= u_minify(tex
->height0
, level
);
1568 unsigned width0
= tex
->width0
;
1569 unsigned height0
= tex
->height0
;
1571 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
1572 const struct util_format_description
*tex_desc
1573 = util_format_description(tex
->format
);
1574 const struct util_format_description
*templ_desc
1575 = util_format_description(templ
->format
);
1577 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
1579 /* Adjust size of surface if and only if the block width or
1580 * height is changed. */
1581 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
1582 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
1583 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
1584 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
1586 width
= nblks_x
* templ_desc
->block
.width
;
1587 height
= nblks_y
* templ_desc
->block
.height
;
1589 width0
= util_format_get_nblocksx(tex
->format
, width0
);
1590 height0
= util_format_get_nblocksy(tex
->format
, height0
);
1594 return r600_create_surface_custom(pipe
, tex
, templ
,
1599 static void r600_surface_destroy(struct pipe_context
*pipe
,
1600 struct pipe_surface
*surface
)
1602 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1603 r600_resource_reference(&surf
->cb_buffer_fmask
, NULL
);
1604 r600_resource_reference(&surf
->cb_buffer_cmask
, NULL
);
1605 pipe_resource_reference(&surface
->texture
, NULL
);
1609 static void r600_clear_texture(struct pipe_context
*pipe
,
1610 struct pipe_resource
*tex
,
1612 const struct pipe_box
*box
,
1615 struct pipe_screen
*screen
= pipe
->screen
;
1616 struct r600_texture
*rtex
= (struct r600_texture
*)tex
;
1617 struct pipe_surface tmpl
= {{0}};
1618 struct pipe_surface
*sf
;
1619 const struct util_format_description
*desc
=
1620 util_format_description(tex
->format
);
1622 tmpl
.format
= tex
->format
;
1623 tmpl
.u
.tex
.first_layer
= box
->z
;
1624 tmpl
.u
.tex
.last_layer
= box
->z
+ box
->depth
- 1;
1625 tmpl
.u
.tex
.level
= level
;
1626 sf
= pipe
->create_surface(pipe
, tex
, &tmpl
);
1630 if (rtex
->is_depth
) {
1633 uint8_t stencil
= 0;
1635 /* Depth is always present. */
1636 clear
= PIPE_CLEAR_DEPTH
;
1637 util_format_unpack_z_float(tex
->format
, &depth
, data
, 1);
1639 if (rtex
->surface
.has_stencil
) {
1640 clear
|= PIPE_CLEAR_STENCIL
;
1641 util_format_unpack_s_8uint(tex
->format
, &stencil
, data
, 1);
1644 pipe
->clear_depth_stencil(pipe
, sf
, clear
, depth
, stencil
,
1646 box
->width
, box
->height
, false);
1648 union pipe_color_union color
;
1650 util_format_unpack_rgba(tex
->format
, color
.ui
, data
, 1);
1652 if (screen
->is_format_supported(screen
, tex
->format
,
1654 PIPE_BIND_RENDER_TARGET
)) {
1655 pipe
->clear_render_target(pipe
, sf
, &color
,
1657 box
->width
, box
->height
, false);
1659 /* Software fallback - just for R9G9B9E5_FLOAT */
1660 util_clear_render_target(pipe
, sf
, &color
,
1662 box
->width
, box
->height
);
1665 pipe_surface_reference(&sf
, NULL
);
1668 unsigned r600_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
1670 const struct util_format_description
*desc
= util_format_description(format
);
1672 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1674 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
1675 return V_0280A0_SWAP_STD
;
1677 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1680 switch (desc
->nr_channels
) {
1682 if (HAS_SWIZZLE(0,X
))
1683 return V_0280A0_SWAP_STD
; /* X___ */
1684 else if (HAS_SWIZZLE(3,X
))
1685 return V_0280A0_SWAP_ALT_REV
; /* ___X */
1688 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
1689 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
1690 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
1691 return V_0280A0_SWAP_STD
; /* XY__ */
1692 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
1693 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
1694 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
1696 return (do_endian_swap
? V_0280A0_SWAP_STD
: V_0280A0_SWAP_STD_REV
);
1697 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
1698 return V_0280A0_SWAP_ALT
; /* X__Y */
1699 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
1700 return V_0280A0_SWAP_ALT_REV
; /* Y__X */
1703 if (HAS_SWIZZLE(0,X
))
1704 return (do_endian_swap
? V_0280A0_SWAP_STD_REV
: V_0280A0_SWAP_STD
);
1705 else if (HAS_SWIZZLE(0,Z
))
1706 return V_0280A0_SWAP_STD_REV
; /* ZYX */
1709 /* check the middle channels, the 1st and 4th channel can be NONE */
1710 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
1711 return V_0280A0_SWAP_STD
; /* XYZW */
1712 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
1713 return V_0280A0_SWAP_STD_REV
; /* WZYX */
1714 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
1715 return V_0280A0_SWAP_ALT
; /* ZYXW */
1716 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
1719 return V_0280A0_SWAP_ALT_REV
;
1721 return (do_endian_swap
? V_0280A0_SWAP_ALT
: V_0280A0_SWAP_ALT_REV
);
1728 /* FAST COLOR CLEAR */
1730 static void evergreen_set_clear_color(struct r600_texture
*rtex
,
1731 enum pipe_format surface_format
,
1732 const union pipe_color_union
*color
)
1734 union util_color uc
;
1736 memset(&uc
, 0, sizeof(uc
));
1738 if (rtex
->surface
.bpe
== 16) {
1739 /* DCC fast clear only:
1740 * CLEAR_WORD0 = R = G = B
1743 assert(color
->ui
[0] == color
->ui
[1] &&
1744 color
->ui
[0] == color
->ui
[2]);
1745 uc
.ui
[0] = color
->ui
[0];
1746 uc
.ui
[1] = color
->ui
[3];
1748 util_pack_color_union(surface_format
, &uc
, color
);
1751 memcpy(rtex
->color_clear_value
, &uc
, 2 * sizeof(uint32_t));
1754 void evergreen_do_fast_color_clear(struct r600_common_context
*rctx
,
1755 struct pipe_framebuffer_state
*fb
,
1756 struct r600_atom
*fb_state
,
1757 unsigned *buffers
, ubyte
*dirty_cbufs
,
1758 const union pipe_color_union
*color
)
1762 /* This function is broken in BE, so just disable this path for now */
1763 #if UTIL_ARCH_BIG_ENDIAN
1767 if (rctx
->render_cond
)
1770 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1771 struct r600_texture
*tex
;
1772 unsigned clear_bit
= PIPE_CLEAR_COLOR0
<< i
;
1777 /* if this colorbuffer is not being cleared */
1778 if (!(*buffers
& clear_bit
))
1781 tex
= (struct r600_texture
*)fb
->cbufs
[i
]->texture
;
1783 /* the clear is allowed if all layers are bound */
1784 if (fb
->cbufs
[i
]->u
.tex
.first_layer
!= 0 ||
1785 fb
->cbufs
[i
]->u
.tex
.last_layer
!= util_max_layer(&tex
->resource
.b
.b
, 0)) {
1789 /* cannot clear mipmapped textures */
1790 if (fb
->cbufs
[i
]->texture
->last_level
!= 0) {
1794 /* only supported on tiled surfaces */
1795 if (tex
->surface
.is_linear
) {
1799 /* shared textures can't use fast clear without an explicit flush,
1800 * because there is no way to communicate the clear color among
1803 if (tex
->resource
.b
.is_shared
&&
1804 !(tex
->resource
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
1807 /* Use a slow clear for small surfaces where the cost of
1808 * the eliminate pass can be higher than the benefit of fast
1809 * clear. AMDGPU-pro does this, but the numbers may differ.
1811 * This helps on both dGPUs and APUs, even small ones.
1813 if (tex
->resource
.b
.b
.nr_samples
<= 1 &&
1814 tex
->resource
.b
.b
.width0
* tex
->resource
.b
.b
.height0
<= 300 * 300)
1818 /* 128-bit formats are unusupported */
1819 if (tex
->surface
.bpe
> 8) {
1823 /* ensure CMASK is enabled */
1824 r600_texture_alloc_cmask_separate(rctx
->screen
, tex
);
1825 if (tex
->cmask
.size
== 0) {
1829 /* Do the fast clear. */
1830 rctx
->clear_buffer(&rctx
->b
, &tex
->cmask_buffer
->b
.b
,
1831 tex
->cmask
.offset
, tex
->cmask
.size
, 0,
1832 R600_COHERENCY_CB_META
);
1834 bool need_compressed_update
= !tex
->dirty_level_mask
;
1836 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1838 if (need_compressed_update
)
1839 p_atomic_inc(&rctx
->screen
->compressed_colortex_counter
);
1842 evergreen_set_clear_color(tex
, fb
->cbufs
[i
]->format
, color
);
1845 *dirty_cbufs
|= 1 << i
;
1846 rctx
->set_atom_dirty(rctx
, fb_state
, true);
1847 *buffers
&= ~clear_bit
;
1851 static struct pipe_memory_object
*
1852 r600_memobj_from_handle(struct pipe_screen
*screen
,
1853 struct winsys_handle
*whandle
,
1856 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1857 struct r600_memory_object
*memobj
= CALLOC_STRUCT(r600_memory_object
);
1858 struct pb_buffer
*buf
= NULL
;
1863 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
,
1864 rscreen
->info
.max_alignment
);
1870 memobj
->b
.dedicated
= dedicated
;
1872 memobj
->stride
= whandle
->stride
;
1873 memobj
->offset
= whandle
->offset
;
1875 return (struct pipe_memory_object
*)memobj
;
1880 r600_memobj_destroy(struct pipe_screen
*screen
,
1881 struct pipe_memory_object
*_memobj
)
1883 struct r600_memory_object
*memobj
= (struct r600_memory_object
*)_memobj
;
1885 pb_reference(&memobj
->buf
, NULL
);
1889 static struct pipe_resource
*
1890 r600_texture_from_memobj(struct pipe_screen
*screen
,
1891 const struct pipe_resource
*templ
,
1892 struct pipe_memory_object
*_memobj
,
1896 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1897 struct r600_memory_object
*memobj
= (struct r600_memory_object
*)_memobj
;
1898 struct r600_texture
*rtex
;
1899 struct radeon_surf surface
= {};
1900 struct radeon_bo_metadata metadata
= {};
1901 enum radeon_surf_mode array_mode
;
1903 struct pb_buffer
*buf
= NULL
;
1905 if (memobj
->b
.dedicated
) {
1906 rscreen
->ws
->buffer_get_metadata(memobj
->buf
, &metadata
);
1907 r600_surface_import_metadata(rscreen
, &surface
, &metadata
,
1908 &array_mode
, &is_scanout
);
1911 * The bo metadata is unset for un-dedicated images. So we fall
1912 * back to linear. See answer to question 5 of the
1913 * VK_KHX_external_memory spec for some details.
1915 * It is possible that this case isn't going to work if the
1916 * surface pitch isn't correctly aligned by default.
1918 * In order to support it correctly we require multi-image
1919 * metadata to be syncrhonized between radv and radeonsi. The
1920 * semantics of associating multiple image metadata to a memory
1921 * object on the vulkan export side are not concretely defined
1924 * All the use cases we are aware of at the moment for memory
1925 * objects use dedicated allocations. So lets keep the initial
1926 * implementation simple.
1928 * A possible alternative is to attempt to reconstruct the
1929 * tiling information when the TexParameter TEXTURE_TILING_EXT
1932 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
1937 r
= r600_init_surface(rscreen
, &surface
, templ
,
1938 array_mode
, memobj
->stride
,
1939 offset
, true, is_scanout
,
1944 rtex
= r600_texture_create_object(screen
, templ
, memobj
->buf
, &surface
);
1948 /* r600_texture_create_object doesn't increment refcount of
1949 * memobj->buf, so increment it here.
1951 pb_reference(&buf
, memobj
->buf
);
1953 rtex
->resource
.b
.is_shared
= true;
1954 rtex
->resource
.external_usage
= PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE
;
1956 return &rtex
->resource
.b
.b
;
1959 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
1961 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
1962 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
1963 rscreen
->b
.resource_get_info
= r600_texture_get_info
;
1964 rscreen
->b
.resource_from_memobj
= r600_texture_from_memobj
;
1965 rscreen
->b
.memobj_create_from_handle
= r600_memobj_from_handle
;
1966 rscreen
->b
.memobj_destroy
= r600_memobj_destroy
;
1969 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
1971 rctx
->b
.create_surface
= r600_create_surface
;
1972 rctx
->b
.surface_destroy
= r600_surface_destroy
;
1973 rctx
->b
.clear_texture
= r600_clear_texture
;