2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "r600_pipe_common.h"
29 #include "r600_query.h"
30 #include "util/u_format.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_pack_color.h"
34 #include "util/u_surface.h"
35 #include "util/os_time.h"
39 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
40 struct r600_texture
*rtex
);
41 static enum radeon_surf_mode
42 r600_choose_tiling(struct r600_common_screen
*rscreen
,
43 const struct pipe_resource
*templ
);
46 bool r600_prepare_for_dma_blit(struct r600_common_context
*rctx
,
47 struct r600_texture
*rdst
,
48 unsigned dst_level
, unsigned dstx
,
49 unsigned dsty
, unsigned dstz
,
50 struct r600_texture
*rsrc
,
52 const struct pipe_box
*src_box
)
57 if (rdst
->surface
.bpe
!= rsrc
->surface
.bpe
)
60 /* MSAA: Blits don't exist in the real world. */
61 if (rsrc
->resource
.b
.b
.nr_samples
> 1 ||
62 rdst
->resource
.b
.b
.nr_samples
> 1)
65 /* Depth-stencil surfaces:
66 * When dst is linear, the DB->CB copy preserves HTILE.
67 * When dst is tiled, the 3D path must be used to update HTILE.
69 if (rsrc
->is_depth
|| rdst
->is_depth
)
73 * src: Both texture and SDMA paths need decompression. Use SDMA.
74 * dst: If overwriting the whole texture, discard CMASK and use
75 * SDMA. Otherwise, use the 3D path.
77 if (rdst
->cmask
.size
&& rdst
->dirty_level_mask
& (1 << dst_level
)) {
78 /* The CMASK clear is only enabled for the first level. */
79 assert(dst_level
== 0);
80 if (!util_texrange_covers_whole_level(&rdst
->resource
.b
.b
, dst_level
,
81 dstx
, dsty
, dstz
, src_box
->width
,
82 src_box
->height
, src_box
->depth
))
85 r600_texture_discard_cmask(rctx
->screen
, rdst
);
88 /* All requirements are met. Prepare textures for SDMA. */
89 if (rsrc
->cmask
.size
&& rsrc
->dirty_level_mask
& (1 << src_level
))
90 rctx
->b
.flush_resource(&rctx
->b
, &rsrc
->resource
.b
.b
);
92 assert(!(rsrc
->dirty_level_mask
& (1 << src_level
)));
93 assert(!(rdst
->dirty_level_mask
& (1 << dst_level
)));
98 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
99 static void r600_copy_region_with_blit(struct pipe_context
*pipe
,
100 struct pipe_resource
*dst
,
102 unsigned dstx
, unsigned dsty
, unsigned dstz
,
103 struct pipe_resource
*src
,
105 const struct pipe_box
*src_box
)
107 struct pipe_blit_info blit
;
109 memset(&blit
, 0, sizeof(blit
));
110 blit
.src
.resource
= src
;
111 blit
.src
.format
= src
->format
;
112 blit
.src
.level
= src_level
;
113 blit
.src
.box
= *src_box
;
114 blit
.dst
.resource
= dst
;
115 blit
.dst
.format
= dst
->format
;
116 blit
.dst
.level
= dst_level
;
117 blit
.dst
.box
.x
= dstx
;
118 blit
.dst
.box
.y
= dsty
;
119 blit
.dst
.box
.z
= dstz
;
120 blit
.dst
.box
.width
= src_box
->width
;
121 blit
.dst
.box
.height
= src_box
->height
;
122 blit
.dst
.box
.depth
= src_box
->depth
;
123 blit
.mask
= util_format_get_mask(src
->format
) &
124 util_format_get_mask(dst
->format
);
125 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
128 pipe
->blit(pipe
, &blit
);
132 /* Copy from a full GPU texture to a transfer's staging one. */
133 static void r600_copy_to_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
135 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
136 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
137 struct pipe_resource
*dst
= &rtransfer
->staging
->b
.b
;
138 struct pipe_resource
*src
= transfer
->resource
;
140 if (src
->nr_samples
> 1) {
141 r600_copy_region_with_blit(ctx
, dst
, 0, 0, 0, 0,
142 src
, transfer
->level
, &transfer
->box
);
146 rctx
->dma_copy(ctx
, dst
, 0, 0, 0, 0, src
, transfer
->level
,
150 /* Copy from a transfer's staging texture to a full GPU one. */
151 static void r600_copy_from_staging_texture(struct pipe_context
*ctx
, struct r600_transfer
*rtransfer
)
153 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
154 struct pipe_transfer
*transfer
= (struct pipe_transfer
*)rtransfer
;
155 struct pipe_resource
*dst
= transfer
->resource
;
156 struct pipe_resource
*src
= &rtransfer
->staging
->b
.b
;
157 struct pipe_box sbox
;
159 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
, transfer
->box
.depth
, &sbox
);
161 if (dst
->nr_samples
> 1) {
162 r600_copy_region_with_blit(ctx
, dst
, transfer
->level
,
163 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
168 rctx
->dma_copy(ctx
, dst
, transfer
->level
,
169 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
173 static unsigned r600_texture_get_offset(struct r600_common_screen
*rscreen
,
174 struct r600_texture
*rtex
, unsigned level
,
175 const struct pipe_box
*box
,
177 unsigned *layer_stride
)
179 *stride
= rtex
->surface
.u
.legacy
.level
[level
].nblk_x
*
181 assert((uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 <= UINT_MAX
);
182 *layer_stride
= (uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4;
185 return rtex
->surface
.u
.legacy
.level
[level
].offset
;
187 /* Each texture is an array of mipmap levels. Each level is
188 * an array of slices. */
189 return rtex
->surface
.u
.legacy
.level
[level
].offset
+
190 box
->z
* (uint64_t)rtex
->surface
.u
.legacy
.level
[level
].slice_size_dw
* 4 +
191 (box
->y
/ rtex
->surface
.blk_h
*
192 rtex
->surface
.u
.legacy
.level
[level
].nblk_x
+
193 box
->x
/ rtex
->surface
.blk_w
) * rtex
->surface
.bpe
;
196 static int r600_init_surface(struct r600_common_screen
*rscreen
,
197 struct radeon_surf
*surface
,
198 const struct pipe_resource
*ptex
,
199 enum radeon_surf_mode array_mode
,
200 unsigned pitch_in_bytes_override
,
204 bool is_flushed_depth
)
206 const struct util_format_description
*desc
=
207 util_format_description(ptex
->format
);
208 bool is_depth
, is_stencil
;
210 unsigned i
, bpe
, flags
= 0;
212 is_depth
= util_format_has_depth(desc
);
213 is_stencil
= util_format_has_stencil(desc
);
215 if (rscreen
->chip_class
>= EVERGREEN
&& !is_flushed_depth
&&
216 ptex
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
217 bpe
= 4; /* stencil is allocated separately on evergreen */
219 bpe
= util_format_get_blocksize(ptex
->format
);
220 assert(util_is_power_of_two_or_zero(bpe
));
223 if (!is_flushed_depth
&& is_depth
) {
224 flags
|= RADEON_SURF_ZBUFFER
;
227 flags
|= RADEON_SURF_SBUFFER
;
230 if (ptex
->bind
& PIPE_BIND_SCANOUT
|| is_scanout
) {
231 /* This should catch bugs in gallium users setting incorrect flags. */
232 assert(ptex
->nr_samples
<= 1 &&
233 ptex
->array_size
== 1 &&
235 ptex
->last_level
== 0 &&
236 !(flags
& RADEON_SURF_Z_OR_SBUFFER
));
238 flags
|= RADEON_SURF_SCANOUT
;
241 if (ptex
->bind
& PIPE_BIND_SHARED
)
242 flags
|= RADEON_SURF_SHAREABLE
;
244 flags
|= RADEON_SURF_IMPORTED
| RADEON_SURF_SHAREABLE
;
245 if (!(ptex
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
))
246 flags
|= RADEON_SURF_OPTIMIZE_FOR_SPACE
;
248 r
= rscreen
->ws
->surface_init(rscreen
->ws
, ptex
,
249 flags
, bpe
, array_mode
, surface
);
254 if (pitch_in_bytes_override
&&
255 pitch_in_bytes_override
!= surface
->u
.legacy
.level
[0].nblk_x
* bpe
) {
256 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
259 surface
->u
.legacy
.level
[0].nblk_x
= pitch_in_bytes_override
/ bpe
;
260 surface
->u
.legacy
.level
[0].slice_size_dw
=
261 ((uint64_t)pitch_in_bytes_override
* surface
->u
.legacy
.level
[0].nblk_y
) / 4;
265 for (i
= 0; i
< ARRAY_SIZE(surface
->u
.legacy
.level
); ++i
)
266 surface
->u
.legacy
.level
[i
].offset
+= offset
;
272 static void r600_texture_init_metadata(struct r600_common_screen
*rscreen
,
273 struct r600_texture
*rtex
,
274 struct radeon_bo_metadata
*metadata
)
276 struct radeon_surf
*surface
= &rtex
->surface
;
278 memset(metadata
, 0, sizeof(*metadata
));
280 metadata
->u
.legacy
.microtile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
?
281 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
282 metadata
->u
.legacy
.macrotile
= surface
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
?
283 RADEON_LAYOUT_TILED
: RADEON_LAYOUT_LINEAR
;
284 metadata
->u
.legacy
.pipe_config
= surface
->u
.legacy
.pipe_config
;
285 metadata
->u
.legacy
.bankw
= surface
->u
.legacy
.bankw
;
286 metadata
->u
.legacy
.bankh
= surface
->u
.legacy
.bankh
;
287 metadata
->u
.legacy
.tile_split
= surface
->u
.legacy
.tile_split
;
288 metadata
->u
.legacy
.mtilea
= surface
->u
.legacy
.mtilea
;
289 metadata
->u
.legacy
.num_banks
= surface
->u
.legacy
.num_banks
;
290 metadata
->u
.legacy
.stride
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
291 metadata
->u
.legacy
.scanout
= (surface
->flags
& RADEON_SURF_SCANOUT
) != 0;
294 static void r600_surface_import_metadata(struct r600_common_screen
*rscreen
,
295 struct radeon_surf
*surf
,
296 struct radeon_bo_metadata
*metadata
,
297 enum radeon_surf_mode
*array_mode
,
300 surf
->u
.legacy
.pipe_config
= metadata
->u
.legacy
.pipe_config
;
301 surf
->u
.legacy
.bankw
= metadata
->u
.legacy
.bankw
;
302 surf
->u
.legacy
.bankh
= metadata
->u
.legacy
.bankh
;
303 surf
->u
.legacy
.tile_split
= metadata
->u
.legacy
.tile_split
;
304 surf
->u
.legacy
.mtilea
= metadata
->u
.legacy
.mtilea
;
305 surf
->u
.legacy
.num_banks
= metadata
->u
.legacy
.num_banks
;
307 if (metadata
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
308 *array_mode
= RADEON_SURF_MODE_2D
;
309 else if (metadata
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
310 *array_mode
= RADEON_SURF_MODE_1D
;
312 *array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
314 *is_scanout
= metadata
->u
.legacy
.scanout
;
317 static void r600_eliminate_fast_color_clear(struct r600_common_context
*rctx
,
318 struct r600_texture
*rtex
)
320 struct r600_common_screen
*rscreen
= rctx
->screen
;
321 struct pipe_context
*ctx
= &rctx
->b
;
323 if (ctx
== rscreen
->aux_context
)
324 mtx_lock(&rscreen
->aux_context_lock
);
326 ctx
->flush_resource(ctx
, &rtex
->resource
.b
.b
);
327 ctx
->flush(ctx
, NULL
, 0);
329 if (ctx
== rscreen
->aux_context
)
330 mtx_unlock(&rscreen
->aux_context_lock
);
333 static void r600_texture_discard_cmask(struct r600_common_screen
*rscreen
,
334 struct r600_texture
*rtex
)
336 if (!rtex
->cmask
.size
)
339 assert(rtex
->resource
.b
.b
.nr_samples
<= 1);
342 memset(&rtex
->cmask
, 0, sizeof(rtex
->cmask
));
343 rtex
->cmask
.base_address_reg
= rtex
->resource
.gpu_address
>> 8;
344 rtex
->dirty_level_mask
= 0;
346 rtex
->cb_color_info
&= ~EG_S_028C70_FAST_CLEAR(1);
348 if (rtex
->cmask_buffer
!= &rtex
->resource
)
349 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
351 /* Notify all contexts about the change. */
352 p_atomic_inc(&rscreen
->dirty_tex_counter
);
353 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
356 static void r600_reallocate_texture_inplace(struct r600_common_context
*rctx
,
357 struct r600_texture
*rtex
,
358 unsigned new_bind_flag
,
359 bool invalidate_storage
)
361 struct pipe_screen
*screen
= rctx
->b
.screen
;
362 struct r600_texture
*new_tex
;
363 struct pipe_resource templ
= rtex
->resource
.b
.b
;
366 templ
.bind
|= new_bind_flag
;
368 /* r600g doesn't react to dirty_tex_descriptor_counter */
369 if (rctx
->chip_class
< SI
)
372 if (rtex
->resource
.b
.is_shared
)
375 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
376 if (rtex
->surface
.is_linear
)
379 /* This fails with MSAA, depth, and compressed textures. */
380 if (r600_choose_tiling(rctx
->screen
, &templ
) !=
381 RADEON_SURF_MODE_LINEAR_ALIGNED
)
385 new_tex
= (struct r600_texture
*)screen
->resource_create(screen
, &templ
);
389 /* Copy the pixels to the new texture. */
390 if (!invalidate_storage
) {
391 for (i
= 0; i
<= templ
.last_level
; i
++) {
395 u_minify(templ
.width0
, i
), u_minify(templ
.height0
, i
),
396 util_num_layers(&templ
, i
), &box
);
398 rctx
->dma_copy(&rctx
->b
, &new_tex
->resource
.b
.b
, i
, 0, 0, 0,
399 &rtex
->resource
.b
.b
, i
, &box
);
403 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
404 r600_texture_discard_cmask(rctx
->screen
, rtex
);
407 /* Replace the structure fields of rtex. */
408 rtex
->resource
.b
.b
.bind
= templ
.bind
;
409 pb_reference(&rtex
->resource
.buf
, new_tex
->resource
.buf
);
410 rtex
->resource
.gpu_address
= new_tex
->resource
.gpu_address
;
411 rtex
->resource
.vram_usage
= new_tex
->resource
.vram_usage
;
412 rtex
->resource
.gart_usage
= new_tex
->resource
.gart_usage
;
413 rtex
->resource
.bo_size
= new_tex
->resource
.bo_size
;
414 rtex
->resource
.bo_alignment
= new_tex
->resource
.bo_alignment
;
415 rtex
->resource
.domains
= new_tex
->resource
.domains
;
416 rtex
->resource
.flags
= new_tex
->resource
.flags
;
417 rtex
->size
= new_tex
->size
;
418 rtex
->db_render_format
= new_tex
->db_render_format
;
419 rtex
->db_compatible
= new_tex
->db_compatible
;
420 rtex
->can_sample_z
= new_tex
->can_sample_z
;
421 rtex
->can_sample_s
= new_tex
->can_sample_s
;
422 rtex
->surface
= new_tex
->surface
;
423 rtex
->fmask
= new_tex
->fmask
;
424 rtex
->cmask
= new_tex
->cmask
;
425 rtex
->cb_color_info
= new_tex
->cb_color_info
;
426 rtex
->last_msaa_resolve_target_micro_mode
= new_tex
->last_msaa_resolve_target_micro_mode
;
427 rtex
->htile_offset
= new_tex
->htile_offset
;
428 rtex
->depth_cleared
= new_tex
->depth_cleared
;
429 rtex
->stencil_cleared
= new_tex
->stencil_cleared
;
430 rtex
->non_disp_tiling
= new_tex
->non_disp_tiling
;
431 rtex
->framebuffers_bound
= new_tex
->framebuffers_bound
;
433 if (new_bind_flag
== PIPE_BIND_LINEAR
) {
434 assert(!rtex
->htile_offset
);
435 assert(!rtex
->cmask
.size
);
436 assert(!rtex
->fmask
.size
);
437 assert(!rtex
->is_depth
);
440 r600_texture_reference(&new_tex
, NULL
);
442 p_atomic_inc(&rctx
->screen
->dirty_tex_counter
);
445 static boolean
r600_texture_get_handle(struct pipe_screen
* screen
,
446 struct pipe_context
*ctx
,
447 struct pipe_resource
*resource
,
448 struct winsys_handle
*whandle
,
451 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
452 struct r600_common_context
*rctx
;
453 struct r600_resource
*res
= (struct r600_resource
*)resource
;
454 struct r600_texture
*rtex
= (struct r600_texture
*)resource
;
455 struct radeon_bo_metadata metadata
;
456 bool update_metadata
= false;
457 unsigned stride
, offset
, slice_size
;
459 ctx
= threaded_context_unwrap_sync(ctx
);
460 rctx
= (struct r600_common_context
*)(ctx
? ctx
: rscreen
->aux_context
);
462 if (resource
->target
!= PIPE_BUFFER
) {
463 /* This is not supported now, but it might be required for OpenCL
464 * interop in the future.
466 if (resource
->nr_samples
> 1 || rtex
->is_depth
)
469 /* Move a suballocated texture into a non-suballocated allocation. */
470 if (rscreen
->ws
->buffer_is_suballocated(res
->buf
) ||
471 rtex
->surface
.tile_swizzle
) {
472 assert(!res
->b
.is_shared
);
473 r600_reallocate_texture_inplace(rctx
, rtex
,
474 PIPE_BIND_SHARED
, false);
475 rctx
->b
.flush(&rctx
->b
, NULL
, 0);
476 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
477 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
478 assert(rtex
->surface
.tile_swizzle
== 0);
481 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
) &&
483 /* Eliminate fast clear (CMASK) */
484 r600_eliminate_fast_color_clear(rctx
, rtex
);
486 /* Disable CMASK if flush_resource isn't going
489 if (rtex
->cmask
.size
)
490 r600_texture_discard_cmask(rscreen
, rtex
);
494 if (!res
->b
.is_shared
|| update_metadata
) {
495 r600_texture_init_metadata(rscreen
, rtex
, &metadata
);
496 if (rscreen
->query_opaque_metadata
)
497 rscreen
->query_opaque_metadata(rscreen
, rtex
,
500 rscreen
->ws
->buffer_set_metadata(res
->buf
, &metadata
);
503 offset
= rtex
->surface
.u
.legacy
.level
[0].offset
;
504 stride
= rtex
->surface
.u
.legacy
.level
[0].nblk_x
*
506 slice_size
= (uint64_t)rtex
->surface
.u
.legacy
.level
[0].slice_size_dw
* 4;
508 /* Move a suballocated buffer into a non-suballocated allocation. */
509 if (rscreen
->ws
->buffer_is_suballocated(res
->buf
)) {
510 assert(!res
->b
.is_shared
);
512 /* Allocate a new buffer with PIPE_BIND_SHARED. */
513 struct pipe_resource templ
= res
->b
.b
;
514 templ
.bind
|= PIPE_BIND_SHARED
;
516 struct pipe_resource
*newb
=
517 screen
->resource_create(screen
, &templ
);
521 /* Copy the old buffer contents to the new one. */
523 u_box_1d(0, newb
->width0
, &box
);
524 rctx
->b
.resource_copy_region(&rctx
->b
, newb
, 0, 0, 0, 0,
526 /* Move the new buffer storage to the old pipe_resource. */
527 r600_replace_buffer_storage(&rctx
->b
, &res
->b
.b
, newb
);
528 pipe_resource_reference(&newb
, NULL
);
530 assert(res
->b
.b
.bind
& PIPE_BIND_SHARED
);
531 assert(res
->flags
& RADEON_FLAG_NO_SUBALLOC
);
540 if (res
->b
.is_shared
) {
541 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
544 res
->external_usage
|= usage
& ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
545 if (!(usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
546 res
->external_usage
&= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
;
548 res
->b
.is_shared
= true;
549 res
->external_usage
= usage
;
552 return rscreen
->ws
->buffer_get_handle(res
->buf
, stride
, offset
,
553 slice_size
, whandle
);
556 static void r600_texture_destroy(struct pipe_screen
*screen
,
557 struct pipe_resource
*ptex
)
559 struct r600_texture
*rtex
= (struct r600_texture
*)ptex
;
560 struct r600_resource
*resource
= &rtex
->resource
;
562 r600_texture_reference(&rtex
->flushed_depth_texture
, NULL
);
563 pipe_resource_reference((struct pipe_resource
**)&resource
->immed_buffer
, NULL
);
565 if (rtex
->cmask_buffer
!= &rtex
->resource
) {
566 r600_resource_reference(&rtex
->cmask_buffer
, NULL
);
568 pb_reference(&resource
->buf
, NULL
);
572 static const struct u_resource_vtbl r600_texture_vtbl
;
574 /* The number of samples can be specified independently of the texture. */
575 void r600_texture_get_fmask_info(struct r600_common_screen
*rscreen
,
576 struct r600_texture
*rtex
,
578 struct r600_fmask_info
*out
)
580 /* FMASK is allocated like an ordinary texture. */
581 struct pipe_resource templ
= rtex
->resource
.b
.b
;
582 struct radeon_surf fmask
= {};
585 memset(out
, 0, sizeof(*out
));
587 templ
.nr_samples
= 1;
588 flags
= rtex
->surface
.flags
| RADEON_SURF_FMASK
;
590 /* Use the same parameters and tile mode. */
591 fmask
.u
.legacy
.bankw
= rtex
->surface
.u
.legacy
.bankw
;
592 fmask
.u
.legacy
.bankh
= rtex
->surface
.u
.legacy
.bankh
;
593 fmask
.u
.legacy
.mtilea
= rtex
->surface
.u
.legacy
.mtilea
;
594 fmask
.u
.legacy
.tile_split
= rtex
->surface
.u
.legacy
.tile_split
;
597 fmask
.u
.legacy
.bankh
= 4;
599 switch (nr_samples
) {
608 R600_ERR("Invalid sample count for FMASK allocation.\n");
612 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
613 * This can be fixed by writing a separate FMASK allocator specifically
614 * for R600-R700 asics. */
615 if (rscreen
->chip_class
<= R700
) {
619 if (rscreen
->ws
->surface_init(rscreen
->ws
, &templ
,
620 flags
, bpe
, RADEON_SURF_MODE_2D
, &fmask
)) {
621 R600_ERR("Got error in surface_init while allocating FMASK.\n");
625 assert(fmask
.u
.legacy
.level
[0].mode
== RADEON_SURF_MODE_2D
);
627 out
->slice_tile_max
= (fmask
.u
.legacy
.level
[0].nblk_x
* fmask
.u
.legacy
.level
[0].nblk_y
) / 64;
628 if (out
->slice_tile_max
)
629 out
->slice_tile_max
-= 1;
631 out
->tile_mode_index
= fmask
.u
.legacy
.tiling_index
[0];
632 out
->pitch_in_pixels
= fmask
.u
.legacy
.level
[0].nblk_x
;
633 out
->bank_height
= fmask
.u
.legacy
.bankh
;
634 out
->tile_swizzle
= fmask
.tile_swizzle
;
635 out
->alignment
= MAX2(256, fmask
.surf_alignment
);
636 out
->size
= fmask
.surf_size
;
639 static void r600_texture_allocate_fmask(struct r600_common_screen
*rscreen
,
640 struct r600_texture
*rtex
)
642 r600_texture_get_fmask_info(rscreen
, rtex
,
643 rtex
->resource
.b
.b
.nr_samples
, &rtex
->fmask
);
645 rtex
->fmask
.offset
= align64(rtex
->size
, rtex
->fmask
.alignment
);
646 rtex
->size
= rtex
->fmask
.offset
+ rtex
->fmask
.size
;
649 void r600_texture_get_cmask_info(struct r600_common_screen
*rscreen
,
650 struct r600_texture
*rtex
,
651 struct r600_cmask_info
*out
)
653 unsigned cmask_tile_width
= 8;
654 unsigned cmask_tile_height
= 8;
655 unsigned cmask_tile_elements
= cmask_tile_width
* cmask_tile_height
;
656 unsigned element_bits
= 4;
657 unsigned cmask_cache_bits
= 1024;
658 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
659 unsigned pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
661 unsigned elements_per_macro_tile
= (cmask_cache_bits
/ element_bits
) * num_pipes
;
662 unsigned pixels_per_macro_tile
= elements_per_macro_tile
* cmask_tile_elements
;
663 unsigned sqrt_pixels_per_macro_tile
= sqrt(pixels_per_macro_tile
);
664 unsigned macro_tile_width
= util_next_power_of_two(sqrt_pixels_per_macro_tile
);
665 unsigned macro_tile_height
= pixels_per_macro_tile
/ macro_tile_width
;
667 unsigned pitch_elements
= align(rtex
->resource
.b
.b
.width0
, macro_tile_width
);
668 unsigned height
= align(rtex
->resource
.b
.b
.height0
, macro_tile_height
);
670 unsigned base_align
= num_pipes
* pipe_interleave_bytes
;
671 unsigned slice_bytes
=
672 ((pitch_elements
* height
* element_bits
+ 7) / 8) / cmask_tile_elements
;
674 assert(macro_tile_width
% 128 == 0);
675 assert(macro_tile_height
% 128 == 0);
677 out
->slice_tile_max
= ((pitch_elements
* height
) / (128*128)) - 1;
678 out
->alignment
= MAX2(256, base_align
);
679 out
->size
= util_num_layers(&rtex
->resource
.b
.b
, 0) *
680 align(slice_bytes
, base_align
);
683 static void r600_texture_allocate_cmask(struct r600_common_screen
*rscreen
,
684 struct r600_texture
*rtex
)
686 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
688 rtex
->cmask
.offset
= align64(rtex
->size
, rtex
->cmask
.alignment
);
689 rtex
->size
= rtex
->cmask
.offset
+ rtex
->cmask
.size
;
691 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
694 static void r600_texture_alloc_cmask_separate(struct r600_common_screen
*rscreen
,
695 struct r600_texture
*rtex
)
697 if (rtex
->cmask_buffer
)
700 assert(rtex
->cmask
.size
== 0);
702 r600_texture_get_cmask_info(rscreen
, rtex
, &rtex
->cmask
);
704 rtex
->cmask_buffer
= (struct r600_resource
*)
705 r600_aligned_buffer_create(&rscreen
->b
,
706 R600_RESOURCE_FLAG_UNMAPPABLE
,
709 rtex
->cmask
.alignment
);
710 if (rtex
->cmask_buffer
== NULL
) {
711 rtex
->cmask
.size
= 0;
715 /* update colorbuffer state bits */
716 rtex
->cmask
.base_address_reg
= rtex
->cmask_buffer
->gpu_address
>> 8;
718 rtex
->cb_color_info
|= EG_S_028C70_FAST_CLEAR(1);
720 p_atomic_inc(&rscreen
->compressed_colortex_counter
);
723 void eg_resource_alloc_immed(struct r600_common_screen
*rscreen
,
724 struct r600_resource
*res
,
727 res
->immed_buffer
= (struct r600_resource
*)
728 pipe_buffer_create(&rscreen
->b
, PIPE_BIND_CUSTOM
,
729 PIPE_USAGE_DEFAULT
, immed_size
);
732 static void r600_texture_get_htile_size(struct r600_common_screen
*rscreen
,
733 struct r600_texture
*rtex
)
735 unsigned cl_width
, cl_height
, width
, height
;
736 unsigned slice_elements
, slice_bytes
, pipe_interleave_bytes
, base_align
;
737 unsigned num_pipes
= rscreen
->info
.num_tile_pipes
;
739 rtex
->surface
.htile_size
= 0;
741 if (rscreen
->chip_class
<= EVERGREEN
&&
742 rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
< 26)
745 /* HW bug on R6xx. */
746 if (rscreen
->chip_class
== R600
&&
747 (rtex
->resource
.b
.b
.width0
> 7680 ||
748 rtex
->resource
.b
.b
.height0
> 7680))
777 width
= align(rtex
->resource
.b
.b
.width0
, cl_width
* 8);
778 height
= align(rtex
->resource
.b
.b
.height0
, cl_height
* 8);
780 slice_elements
= (width
* height
) / (8 * 8);
781 slice_bytes
= slice_elements
* 4;
783 pipe_interleave_bytes
= rscreen
->info
.pipe_interleave_bytes
;
784 base_align
= num_pipes
* pipe_interleave_bytes
;
786 rtex
->surface
.htile_alignment
= base_align
;
787 rtex
->surface
.htile_size
=
788 util_num_layers(&rtex
->resource
.b
.b
, 0) *
789 align(slice_bytes
, base_align
);
792 static void r600_texture_allocate_htile(struct r600_common_screen
*rscreen
,
793 struct r600_texture
*rtex
)
795 r600_texture_get_htile_size(rscreen
, rtex
);
797 if (!rtex
->surface
.htile_size
)
800 rtex
->htile_offset
= align(rtex
->size
, rtex
->surface
.htile_alignment
);
801 rtex
->size
= rtex
->htile_offset
+ rtex
->surface
.htile_size
;
804 void r600_print_texture_info(struct r600_common_screen
*rscreen
,
805 struct r600_texture
*rtex
, struct u_log_context
*log
)
809 /* Common parameters. */
810 u_log_printf(log
, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
811 "blk_h=%u, array_size=%u, last_level=%u, "
812 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
813 rtex
->resource
.b
.b
.width0
, rtex
->resource
.b
.b
.height0
,
814 rtex
->resource
.b
.b
.depth0
, rtex
->surface
.blk_w
,
816 rtex
->resource
.b
.b
.array_size
, rtex
->resource
.b
.b
.last_level
,
817 rtex
->surface
.bpe
, rtex
->resource
.b
.b
.nr_samples
,
818 rtex
->surface
.flags
, util_format_short_name(rtex
->resource
.b
.b
.format
));
820 u_log_printf(log
, " Layout: size=%"PRIu64
", alignment=%u, bankw=%u, "
821 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
822 rtex
->surface
.surf_size
, rtex
->surface
.surf_alignment
, rtex
->surface
.u
.legacy
.bankw
,
823 rtex
->surface
.u
.legacy
.bankh
, rtex
->surface
.u
.legacy
.num_banks
, rtex
->surface
.u
.legacy
.mtilea
,
824 rtex
->surface
.u
.legacy
.tile_split
, rtex
->surface
.u
.legacy
.pipe_config
,
825 (rtex
->surface
.flags
& RADEON_SURF_SCANOUT
) != 0);
827 if (rtex
->fmask
.size
)
828 u_log_printf(log
, " FMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, pitch_in_pixels=%u, "
829 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
830 rtex
->fmask
.offset
, rtex
->fmask
.size
, rtex
->fmask
.alignment
,
831 rtex
->fmask
.pitch_in_pixels
, rtex
->fmask
.bank_height
,
832 rtex
->fmask
.slice_tile_max
, rtex
->fmask
.tile_mode_index
);
834 if (rtex
->cmask
.size
)
835 u_log_printf(log
, " CMask: offset=%"PRIu64
", size=%"PRIu64
", alignment=%u, "
836 "slice_tile_max=%u\n",
837 rtex
->cmask
.offset
, rtex
->cmask
.size
, rtex
->cmask
.alignment
,
838 rtex
->cmask
.slice_tile_max
);
840 if (rtex
->htile_offset
)
841 u_log_printf(log
, " HTile: offset=%"PRIu64
", size=%u "
843 rtex
->htile_offset
, rtex
->surface
.htile_size
,
844 rtex
->surface
.htile_alignment
);
846 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++)
847 u_log_printf(log
, " Level[%i]: offset=%"PRIu64
", slice_size=%"PRIu64
", "
848 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
849 "mode=%u, tiling_index = %u\n",
850 i
, rtex
->surface
.u
.legacy
.level
[i
].offset
,
851 (uint64_t)rtex
->surface
.u
.legacy
.level
[i
].slice_size_dw
* 4,
852 u_minify(rtex
->resource
.b
.b
.width0
, i
),
853 u_minify(rtex
->resource
.b
.b
.height0
, i
),
854 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
855 rtex
->surface
.u
.legacy
.level
[i
].nblk_x
,
856 rtex
->surface
.u
.legacy
.level
[i
].nblk_y
,
857 rtex
->surface
.u
.legacy
.level
[i
].mode
,
858 rtex
->surface
.u
.legacy
.tiling_index
[i
]);
860 if (rtex
->surface
.has_stencil
) {
861 u_log_printf(log
, " StencilLayout: tilesplit=%u\n",
862 rtex
->surface
.u
.legacy
.stencil_tile_split
);
863 for (i
= 0; i
<= rtex
->resource
.b
.b
.last_level
; i
++) {
864 u_log_printf(log
, " StencilLevel[%i]: offset=%"PRIu64
", "
865 "slice_size=%"PRIu64
", npix_x=%u, "
866 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
867 "mode=%u, tiling_index = %u\n",
868 i
, rtex
->surface
.u
.legacy
.stencil_level
[i
].offset
,
869 (uint64_t)rtex
->surface
.u
.legacy
.stencil_level
[i
].slice_size_dw
* 4,
870 u_minify(rtex
->resource
.b
.b
.width0
, i
),
871 u_minify(rtex
->resource
.b
.b
.height0
, i
),
872 u_minify(rtex
->resource
.b
.b
.depth0
, i
),
873 rtex
->surface
.u
.legacy
.stencil_level
[i
].nblk_x
,
874 rtex
->surface
.u
.legacy
.stencil_level
[i
].nblk_y
,
875 rtex
->surface
.u
.legacy
.stencil_level
[i
].mode
,
876 rtex
->surface
.u
.legacy
.stencil_tiling_index
[i
]);
881 /* Common processing for r600_texture_create and r600_texture_from_handle */
882 static struct r600_texture
*
883 r600_texture_create_object(struct pipe_screen
*screen
,
884 const struct pipe_resource
*base
,
885 struct pb_buffer
*buf
,
886 struct radeon_surf
*surface
)
888 struct r600_texture
*rtex
;
889 struct r600_resource
*resource
;
890 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
892 rtex
= CALLOC_STRUCT(r600_texture
);
896 resource
= &rtex
->resource
;
897 resource
->b
.b
= *base
;
898 resource
->b
.b
.next
= NULL
;
899 resource
->b
.vtbl
= &r600_texture_vtbl
;
900 pipe_reference_init(&resource
->b
.b
.reference
, 1);
901 resource
->b
.b
.screen
= screen
;
903 /* don't include stencil-only formats which we don't support for rendering */
904 rtex
->is_depth
= util_format_has_depth(util_format_description(rtex
->resource
.b
.b
.format
));
906 rtex
->surface
= *surface
;
907 rtex
->size
= rtex
->surface
.surf_size
;
908 rtex
->db_render_format
= base
->format
;
910 /* Tiled depth textures utilize the non-displayable tile order.
911 * This must be done after r600_setup_surface.
912 * Applies to R600-Cayman. */
913 rtex
->non_disp_tiling
= rtex
->is_depth
&& rtex
->surface
.u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
;
914 /* Applies to GCN. */
915 rtex
->last_msaa_resolve_target_micro_mode
= rtex
->surface
.micro_tile_mode
;
917 if (rtex
->is_depth
) {
918 if (base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
919 R600_RESOURCE_FLAG_FLUSHED_DEPTH
) ||
920 rscreen
->chip_class
>= EVERGREEN
) {
921 rtex
->can_sample_z
= !rtex
->surface
.u
.legacy
.depth_adjusted
;
922 rtex
->can_sample_s
= !rtex
->surface
.u
.legacy
.stencil_adjusted
;
924 if (rtex
->resource
.b
.b
.nr_samples
<= 1 &&
925 (rtex
->resource
.b
.b
.format
== PIPE_FORMAT_Z16_UNORM
||
926 rtex
->resource
.b
.b
.format
== PIPE_FORMAT_Z32_FLOAT
))
927 rtex
->can_sample_z
= true;
930 if (!(base
->flags
& (R600_RESOURCE_FLAG_TRANSFER
|
931 R600_RESOURCE_FLAG_FLUSHED_DEPTH
))) {
932 rtex
->db_compatible
= true;
934 if (!(rscreen
->debug_flags
& DBG_NO_HYPERZ
))
935 r600_texture_allocate_htile(rscreen
, rtex
);
938 if (base
->nr_samples
> 1) {
940 r600_texture_allocate_fmask(rscreen
, rtex
);
941 r600_texture_allocate_cmask(rscreen
, rtex
);
942 rtex
->cmask_buffer
= &rtex
->resource
;
944 if (!rtex
->fmask
.size
|| !rtex
->cmask
.size
) {
951 /* Now create the backing buffer. */
953 r600_init_resource_fields(rscreen
, resource
, rtex
->size
,
954 rtex
->surface
.surf_alignment
);
956 if (!r600_alloc_resource(rscreen
, resource
)) {
962 resource
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(resource
->buf
);
963 resource
->bo_size
= buf
->size
;
964 resource
->bo_alignment
= buf
->alignment
;
965 resource
->domains
= rscreen
->ws
->buffer_get_initial_domain(resource
->buf
);
966 if (resource
->domains
& RADEON_DOMAIN_VRAM
)
967 resource
->vram_usage
= buf
->size
;
968 else if (resource
->domains
& RADEON_DOMAIN_GTT
)
969 resource
->gart_usage
= buf
->size
;
972 if (rtex
->cmask
.size
) {
973 /* Initialize the cmask to 0xCC (= compressed state). */
974 r600_screen_clear_buffer(rscreen
, &rtex
->cmask_buffer
->b
.b
,
975 rtex
->cmask
.offset
, rtex
->cmask
.size
,
978 if (rtex
->htile_offset
) {
979 uint32_t clear_value
= 0;
981 r600_screen_clear_buffer(rscreen
, &rtex
->resource
.b
.b
,
983 rtex
->surface
.htile_size
,
987 /* Initialize the CMASK base register value. */
988 rtex
->cmask
.base_address_reg
=
989 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
991 if (rscreen
->debug_flags
& DBG_VM
) {
992 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
993 rtex
->resource
.gpu_address
,
994 rtex
->resource
.gpu_address
+ rtex
->resource
.buf
->size
,
995 base
->width0
, base
->height0
, util_num_layers(base
, 0), base
->last_level
+1,
996 base
->nr_samples
? base
->nr_samples
: 1, util_format_short_name(base
->format
));
999 if (rscreen
->debug_flags
& DBG_TEX
) {
1001 struct u_log_context log
;
1002 u_log_context_init(&log
);
1003 r600_print_texture_info(rscreen
, rtex
, &log
);
1004 u_log_new_page_print(&log
, stdout
);
1006 u_log_context_destroy(&log
);
1012 static enum radeon_surf_mode
1013 r600_choose_tiling(struct r600_common_screen
*rscreen
,
1014 const struct pipe_resource
*templ
)
1016 const struct util_format_description
*desc
= util_format_description(templ
->format
);
1017 bool force_tiling
= templ
->flags
& R600_RESOURCE_FLAG_FORCE_TILING
;
1018 bool is_depth_stencil
= util_format_is_depth_or_stencil(templ
->format
) &&
1019 !(templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
);
1021 /* MSAA resources must be 2D tiled. */
1022 if (templ
->nr_samples
> 1)
1023 return RADEON_SURF_MODE_2D
;
1025 /* Transfer resources should be linear. */
1026 if (templ
->flags
& R600_RESOURCE_FLAG_TRANSFER
)
1027 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1029 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1030 if (rscreen
->chip_class
>= R600
&& rscreen
->chip_class
<= CAYMAN
&&
1031 (templ
->bind
& PIPE_BIND_COMPUTE_RESOURCE
) &&
1032 (templ
->target
== PIPE_TEXTURE_2D
||
1033 templ
->target
== PIPE_TEXTURE_3D
))
1034 force_tiling
= true;
1036 /* Handle common candidates for the linear mode.
1037 * Compressed textures and DB surfaces must always be tiled.
1039 if (!force_tiling
&&
1040 !is_depth_stencil
&&
1041 !util_format_is_compressed(templ
->format
)) {
1042 if (rscreen
->debug_flags
& DBG_NO_TILING
)
1043 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1045 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1046 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
)
1047 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1049 if (templ
->bind
& PIPE_BIND_LINEAR
)
1050 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1052 /* 1D textures should be linear - fixes image operations on 1d */
1053 if (templ
->target
== PIPE_TEXTURE_1D
||
1054 templ
->target
== PIPE_TEXTURE_1D_ARRAY
)
1055 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1057 /* Textures likely to be mapped often. */
1058 if (templ
->usage
== PIPE_USAGE_STAGING
||
1059 templ
->usage
== PIPE_USAGE_STREAM
)
1060 return RADEON_SURF_MODE_LINEAR_ALIGNED
;
1063 /* Make small textures 1D tiled. */
1064 if (templ
->width0
<= 16 || templ
->height0
<= 16 ||
1065 (rscreen
->debug_flags
& DBG_NO_2D_TILING
))
1066 return RADEON_SURF_MODE_1D
;
1068 /* The allocator will switch to 1D if needed. */
1069 return RADEON_SURF_MODE_2D
;
1072 struct pipe_resource
*r600_texture_create(struct pipe_screen
*screen
,
1073 const struct pipe_resource
*templ
)
1075 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1076 struct radeon_surf surface
= {0};
1077 bool is_flushed_depth
= templ
->flags
& R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1080 r
= r600_init_surface(rscreen
, &surface
, templ
,
1081 r600_choose_tiling(rscreen
, templ
), 0, 0,
1082 false, false, is_flushed_depth
);
1087 return (struct pipe_resource
*)
1088 r600_texture_create_object(screen
, templ
, NULL
, &surface
);
1091 static struct pipe_resource
*r600_texture_from_handle(struct pipe_screen
*screen
,
1092 const struct pipe_resource
*templ
,
1093 struct winsys_handle
*whandle
,
1096 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1097 struct pb_buffer
*buf
= NULL
;
1098 unsigned stride
= 0, offset
= 0;
1099 enum radeon_surf_mode array_mode
;
1100 struct radeon_surf surface
= {};
1102 struct radeon_bo_metadata metadata
= {};
1103 struct r600_texture
*rtex
;
1106 /* Support only 2D textures without mipmaps */
1107 if ((templ
->target
!= PIPE_TEXTURE_2D
&& templ
->target
!= PIPE_TEXTURE_RECT
) ||
1108 templ
->depth0
!= 1 || templ
->last_level
!= 0)
1111 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
, &stride
, &offset
);
1115 rscreen
->ws
->buffer_get_metadata(buf
, &metadata
);
1116 r600_surface_import_metadata(rscreen
, &surface
, &metadata
,
1117 &array_mode
, &is_scanout
);
1119 r
= r600_init_surface(rscreen
, &surface
, templ
, array_mode
, stride
,
1120 offset
, true, is_scanout
, false);
1125 rtex
= r600_texture_create_object(screen
, templ
, buf
, &surface
);
1129 rtex
->resource
.b
.is_shared
= true;
1130 rtex
->resource
.external_usage
= usage
;
1132 if (rscreen
->apply_opaque_metadata
)
1133 rscreen
->apply_opaque_metadata(rscreen
, rtex
, &metadata
);
1135 assert(rtex
->surface
.tile_swizzle
== 0);
1136 return &rtex
->resource
.b
.b
;
1139 bool r600_init_flushed_depth_texture(struct pipe_context
*ctx
,
1140 struct pipe_resource
*texture
,
1141 struct r600_texture
**staging
)
1143 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1144 struct pipe_resource resource
;
1145 struct r600_texture
**flushed_depth_texture
= staging
?
1146 staging
: &rtex
->flushed_depth_texture
;
1147 enum pipe_format pipe_format
= texture
->format
;
1150 if (rtex
->flushed_depth_texture
)
1151 return true; /* it's ready */
1153 if (!rtex
->can_sample_z
&& rtex
->can_sample_s
) {
1154 switch (pipe_format
) {
1155 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
1156 /* Save memory by not allocating the S plane. */
1157 pipe_format
= PIPE_FORMAT_Z32_FLOAT
;
1159 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1160 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
1161 /* Save memory bandwidth by not copying the
1162 * stencil part during flush.
1164 * This potentially increases memory bandwidth
1165 * if an application uses both Z and S texturing
1166 * simultaneously (a flushed Z24S8 texture
1167 * would be stored compactly), but how often
1168 * does that really happen?
1170 pipe_format
= PIPE_FORMAT_Z24X8_UNORM
;
1174 } else if (!rtex
->can_sample_s
&& rtex
->can_sample_z
) {
1175 assert(util_format_has_stencil(util_format_description(pipe_format
)));
1177 /* DB->CB copies to an 8bpp surface don't work. */
1178 pipe_format
= PIPE_FORMAT_X24S8_UINT
;
1182 memset(&resource
, 0, sizeof(resource
));
1183 resource
.target
= texture
->target
;
1184 resource
.format
= pipe_format
;
1185 resource
.width0
= texture
->width0
;
1186 resource
.height0
= texture
->height0
;
1187 resource
.depth0
= texture
->depth0
;
1188 resource
.array_size
= texture
->array_size
;
1189 resource
.last_level
= texture
->last_level
;
1190 resource
.nr_samples
= texture
->nr_samples
;
1191 resource
.usage
= staging
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1192 resource
.bind
= texture
->bind
& ~PIPE_BIND_DEPTH_STENCIL
;
1193 resource
.flags
= texture
->flags
| R600_RESOURCE_FLAG_FLUSHED_DEPTH
;
1196 resource
.flags
|= R600_RESOURCE_FLAG_TRANSFER
;
1198 *flushed_depth_texture
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1199 if (*flushed_depth_texture
== NULL
) {
1200 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1204 (*flushed_depth_texture
)->non_disp_tiling
= false;
1209 * Initialize the pipe_resource descriptor to be of the same size as the box,
1210 * which is supposed to hold a subregion of the texture "orig" at the given
1213 static void r600_init_temp_resource_from_box(struct pipe_resource
*res
,
1214 struct pipe_resource
*orig
,
1215 const struct pipe_box
*box
,
1216 unsigned level
, unsigned flags
)
1218 memset(res
, 0, sizeof(*res
));
1219 res
->format
= orig
->format
;
1220 res
->width0
= box
->width
;
1221 res
->height0
= box
->height
;
1223 res
->array_size
= 1;
1224 res
->usage
= flags
& R600_RESOURCE_FLAG_TRANSFER
? PIPE_USAGE_STAGING
: PIPE_USAGE_DEFAULT
;
1227 /* We must set the correct texture target and dimensions for a 3D box. */
1228 if (box
->depth
> 1 && util_max_layer(orig
, level
) > 0) {
1229 res
->target
= PIPE_TEXTURE_2D_ARRAY
;
1230 res
->array_size
= box
->depth
;
1232 res
->target
= PIPE_TEXTURE_2D
;
1236 static bool r600_can_invalidate_texture(struct r600_common_screen
*rscreen
,
1237 struct r600_texture
*rtex
,
1238 unsigned transfer_usage
,
1239 const struct pipe_box
*box
)
1241 /* r600g doesn't react to dirty_tex_descriptor_counter */
1242 return rscreen
->chip_class
>= SI
&&
1243 !rtex
->resource
.b
.is_shared
&&
1244 !(transfer_usage
& PIPE_TRANSFER_READ
) &&
1245 rtex
->resource
.b
.b
.last_level
== 0 &&
1246 util_texrange_covers_whole_level(&rtex
->resource
.b
.b
, 0,
1247 box
->x
, box
->y
, box
->z
,
1248 box
->width
, box
->height
,
1252 static void r600_texture_invalidate_storage(struct r600_common_context
*rctx
,
1253 struct r600_texture
*rtex
)
1255 struct r600_common_screen
*rscreen
= rctx
->screen
;
1257 /* There is no point in discarding depth and tiled buffers. */
1258 assert(!rtex
->is_depth
);
1259 assert(rtex
->surface
.is_linear
);
1261 /* Reallocate the buffer in the same pipe_resource. */
1262 r600_alloc_resource(rscreen
, &rtex
->resource
);
1264 /* Initialize the CMASK base address (needed even without CMASK). */
1265 rtex
->cmask
.base_address_reg
=
1266 (rtex
->resource
.gpu_address
+ rtex
->cmask
.offset
) >> 8;
1268 p_atomic_inc(&rscreen
->dirty_tex_counter
);
1270 rctx
->num_alloc_tex_transfer_bytes
+= rtex
->size
;
1273 static void *r600_texture_transfer_map(struct pipe_context
*ctx
,
1274 struct pipe_resource
*texture
,
1277 const struct pipe_box
*box
,
1278 struct pipe_transfer
**ptransfer
)
1280 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1281 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1282 struct r600_transfer
*trans
;
1283 struct r600_resource
*buf
;
1284 unsigned offset
= 0;
1286 bool use_staging_texture
= false;
1288 assert(!(texture
->flags
& R600_RESOURCE_FLAG_TRANSFER
));
1289 assert(box
->width
&& box
->height
&& box
->depth
);
1291 /* Depth textures use staging unconditionally. */
1292 if (!rtex
->is_depth
) {
1293 /* Degrade the tile mode if we get too many transfers on APUs.
1294 * On dGPUs, the staging texture is always faster.
1295 * Only count uploads that are at least 4x4 pixels large.
1297 if (!rctx
->screen
->info
.has_dedicated_vram
&&
1299 box
->width
>= 4 && box
->height
>= 4 &&
1300 p_atomic_inc_return(&rtex
->num_level0_transfers
) == 10) {
1301 bool can_invalidate
=
1302 r600_can_invalidate_texture(rctx
->screen
, rtex
,
1305 r600_reallocate_texture_inplace(rctx
, rtex
,
1310 /* Tiled textures need to be converted into a linear texture for CPU
1311 * access. The staging texture is always linear and is placed in GART.
1313 * Reading from VRAM or GTT WC is slow, always use the staging
1314 * texture in this case.
1316 * Use the staging texture for uploads if the underlying BO
1319 if (!rtex
->surface
.is_linear
)
1320 use_staging_texture
= true;
1321 else if (usage
& PIPE_TRANSFER_READ
)
1322 use_staging_texture
=
1323 rtex
->resource
.domains
& RADEON_DOMAIN_VRAM
||
1324 rtex
->resource
.flags
& RADEON_FLAG_GTT_WC
;
1325 /* Write & linear only: */
1326 else if (r600_rings_is_buffer_referenced(rctx
, rtex
->resource
.buf
,
1327 RADEON_USAGE_READWRITE
) ||
1328 !rctx
->ws
->buffer_wait(rtex
->resource
.buf
, 0,
1329 RADEON_USAGE_READWRITE
)) {
1331 if (r600_can_invalidate_texture(rctx
->screen
, rtex
,
1333 r600_texture_invalidate_storage(rctx
, rtex
);
1335 use_staging_texture
= true;
1339 trans
= CALLOC_STRUCT(r600_transfer
);
1342 pipe_resource_reference(&trans
->b
.b
.resource
, texture
);
1343 trans
->b
.b
.level
= level
;
1344 trans
->b
.b
.usage
= usage
;
1345 trans
->b
.b
.box
= *box
;
1347 if (rtex
->is_depth
) {
1348 struct r600_texture
*staging_depth
;
1350 if (rtex
->resource
.b
.b
.nr_samples
> 1) {
1351 /* MSAA depth buffers need to be converted to single sample buffers.
1353 * Mapping MSAA depth buffers can occur if ReadPixels is called
1354 * with a multisample GLX visual.
1356 * First downsample the depth buffer to a temporary texture,
1357 * then decompress the temporary one to staging.
1359 * Only the region being mapped is transfered.
1361 struct pipe_resource resource
;
1363 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
, 0);
1365 if (!r600_init_flushed_depth_texture(ctx
, &resource
, &staging_depth
)) {
1366 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1371 if (usage
& PIPE_TRANSFER_READ
) {
1372 struct pipe_resource
*temp
= ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1374 R600_ERR("failed to create a temporary depth texture\n");
1379 r600_copy_region_with_blit(ctx
, temp
, 0, 0, 0, 0, texture
, level
, box
);
1380 rctx
->blit_decompress_depth(ctx
, (struct r600_texture
*)temp
, staging_depth
,
1381 0, 0, 0, box
->depth
, 0, 0);
1382 pipe_resource_reference(&temp
, NULL
);
1385 /* Just get the strides. */
1386 r600_texture_get_offset(rctx
->screen
, staging_depth
, level
, NULL
,
1388 &trans
->b
.b
.layer_stride
);
1390 /* XXX: only readback the rectangle which is being mapped? */
1391 /* XXX: when discard is true, no need to read back from depth texture */
1392 if (!r600_init_flushed_depth_texture(ctx
, texture
, &staging_depth
)) {
1393 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1398 rctx
->blit_decompress_depth(ctx
, rtex
, staging_depth
,
1400 box
->z
, box
->z
+ box
->depth
- 1,
1403 offset
= r600_texture_get_offset(rctx
->screen
, staging_depth
,
1406 &trans
->b
.b
.layer_stride
);
1409 trans
->staging
= (struct r600_resource
*)staging_depth
;
1410 buf
= trans
->staging
;
1411 } else if (use_staging_texture
) {
1412 struct pipe_resource resource
;
1413 struct r600_texture
*staging
;
1415 r600_init_temp_resource_from_box(&resource
, texture
, box
, level
,
1416 R600_RESOURCE_FLAG_TRANSFER
);
1417 resource
.usage
= (usage
& PIPE_TRANSFER_READ
) ?
1418 PIPE_USAGE_STAGING
: PIPE_USAGE_STREAM
;
1420 /* Create the temporary texture. */
1421 staging
= (struct r600_texture
*)ctx
->screen
->resource_create(ctx
->screen
, &resource
);
1423 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1427 trans
->staging
= &staging
->resource
;
1429 /* Just get the strides. */
1430 r600_texture_get_offset(rctx
->screen
, staging
, 0, NULL
,
1432 &trans
->b
.b
.layer_stride
);
1434 if (usage
& PIPE_TRANSFER_READ
)
1435 r600_copy_to_staging_texture(ctx
, trans
);
1437 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1439 buf
= trans
->staging
;
1441 /* the resource is mapped directly */
1442 offset
= r600_texture_get_offset(rctx
->screen
, rtex
, level
, box
,
1444 &trans
->b
.b
.layer_stride
);
1445 buf
= &rtex
->resource
;
1448 if (!(map
= r600_buffer_map_sync_with_rings(rctx
, buf
, usage
))) {
1449 r600_resource_reference(&trans
->staging
, NULL
);
1454 *ptransfer
= &trans
->b
.b
;
1455 return map
+ offset
;
1458 static void r600_texture_transfer_unmap(struct pipe_context
*ctx
,
1459 struct pipe_transfer
* transfer
)
1461 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1462 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
1463 struct pipe_resource
*texture
= transfer
->resource
;
1464 struct r600_texture
*rtex
= (struct r600_texture
*)texture
;
1466 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) && rtransfer
->staging
) {
1467 if (rtex
->is_depth
&& rtex
->resource
.b
.b
.nr_samples
<= 1) {
1468 ctx
->resource_copy_region(ctx
, texture
, transfer
->level
,
1469 transfer
->box
.x
, transfer
->box
.y
, transfer
->box
.z
,
1470 &rtransfer
->staging
->b
.b
, transfer
->level
,
1473 r600_copy_from_staging_texture(ctx
, rtransfer
);
1477 if (rtransfer
->staging
) {
1478 rctx
->num_alloc_tex_transfer_bytes
+= rtransfer
->staging
->buf
->size
;
1479 r600_resource_reference(&rtransfer
->staging
, NULL
);
1482 /* Heuristic for {upload, draw, upload, draw, ..}:
1484 * Flush the gfx IB if we've allocated too much texture storage.
1486 * The idea is that we don't want to build IBs that use too much
1487 * memory and put pressure on the kernel memory manager and we also
1488 * want to make temporary and invalidated buffers go idle ASAP to
1489 * decrease the total memory usage or make them reusable. The memory
1490 * usage will be slightly higher than given here because of the buffer
1491 * cache in the winsys.
1493 * The result is that the kernel memory manager is never a bottleneck.
1495 if (rctx
->num_alloc_tex_transfer_bytes
> rctx
->screen
->info
.gart_size
/ 4) {
1496 rctx
->gfx
.flush(rctx
, PIPE_FLUSH_ASYNC
, NULL
);
1497 rctx
->num_alloc_tex_transfer_bytes
= 0;
1500 pipe_resource_reference(&transfer
->resource
, NULL
);
1504 static const struct u_resource_vtbl r600_texture_vtbl
=
1506 NULL
, /* get_handle */
1507 r600_texture_destroy
, /* resource_destroy */
1508 r600_texture_transfer_map
, /* transfer_map */
1509 u_default_transfer_flush_region
, /* transfer_flush_region */
1510 r600_texture_transfer_unmap
, /* transfer_unmap */
1513 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
1514 struct pipe_resource
*texture
,
1515 const struct pipe_surface
*templ
,
1516 unsigned width0
, unsigned height0
,
1517 unsigned width
, unsigned height
)
1519 struct r600_surface
*surface
= CALLOC_STRUCT(r600_surface
);
1524 assert(templ
->u
.tex
.first_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1525 assert(templ
->u
.tex
.last_layer
<= util_max_layer(texture
, templ
->u
.tex
.level
));
1527 pipe_reference_init(&surface
->base
.reference
, 1);
1528 pipe_resource_reference(&surface
->base
.texture
, texture
);
1529 surface
->base
.context
= pipe
;
1530 surface
->base
.format
= templ
->format
;
1531 surface
->base
.width
= width
;
1532 surface
->base
.height
= height
;
1533 surface
->base
.u
= templ
->u
;
1535 surface
->width0
= width0
;
1536 surface
->height0
= height0
;
1538 return &surface
->base
;
1541 static struct pipe_surface
*r600_create_surface(struct pipe_context
*pipe
,
1542 struct pipe_resource
*tex
,
1543 const struct pipe_surface
*templ
)
1545 unsigned level
= templ
->u
.tex
.level
;
1546 unsigned width
= u_minify(tex
->width0
, level
);
1547 unsigned height
= u_minify(tex
->height0
, level
);
1548 unsigned width0
= tex
->width0
;
1549 unsigned height0
= tex
->height0
;
1551 if (tex
->target
!= PIPE_BUFFER
&& templ
->format
!= tex
->format
) {
1552 const struct util_format_description
*tex_desc
1553 = util_format_description(tex
->format
);
1554 const struct util_format_description
*templ_desc
1555 = util_format_description(templ
->format
);
1557 assert(tex_desc
->block
.bits
== templ_desc
->block
.bits
);
1559 /* Adjust size of surface if and only if the block width or
1560 * height is changed. */
1561 if (tex_desc
->block
.width
!= templ_desc
->block
.width
||
1562 tex_desc
->block
.height
!= templ_desc
->block
.height
) {
1563 unsigned nblks_x
= util_format_get_nblocksx(tex
->format
, width
);
1564 unsigned nblks_y
= util_format_get_nblocksy(tex
->format
, height
);
1566 width
= nblks_x
* templ_desc
->block
.width
;
1567 height
= nblks_y
* templ_desc
->block
.height
;
1569 width0
= util_format_get_nblocksx(tex
->format
, width0
);
1570 height0
= util_format_get_nblocksy(tex
->format
, height0
);
1574 return r600_create_surface_custom(pipe
, tex
, templ
,
1579 static void r600_surface_destroy(struct pipe_context
*pipe
,
1580 struct pipe_surface
*surface
)
1582 struct r600_surface
*surf
= (struct r600_surface
*)surface
;
1583 r600_resource_reference(&surf
->cb_buffer_fmask
, NULL
);
1584 r600_resource_reference(&surf
->cb_buffer_cmask
, NULL
);
1585 pipe_resource_reference(&surface
->texture
, NULL
);
1589 static void r600_clear_texture(struct pipe_context
*pipe
,
1590 struct pipe_resource
*tex
,
1592 const struct pipe_box
*box
,
1595 struct pipe_screen
*screen
= pipe
->screen
;
1596 struct r600_texture
*rtex
= (struct r600_texture
*)tex
;
1597 struct pipe_surface tmpl
= {{0}};
1598 struct pipe_surface
*sf
;
1599 const struct util_format_description
*desc
=
1600 util_format_description(tex
->format
);
1602 tmpl
.format
= tex
->format
;
1603 tmpl
.u
.tex
.first_layer
= box
->z
;
1604 tmpl
.u
.tex
.last_layer
= box
->z
+ box
->depth
- 1;
1605 tmpl
.u
.tex
.level
= level
;
1606 sf
= pipe
->create_surface(pipe
, tex
, &tmpl
);
1610 if (rtex
->is_depth
) {
1613 uint8_t stencil
= 0;
1615 /* Depth is always present. */
1616 clear
= PIPE_CLEAR_DEPTH
;
1617 desc
->unpack_z_float(&depth
, 0, data
, 0, 1, 1);
1619 if (rtex
->surface
.has_stencil
) {
1620 clear
|= PIPE_CLEAR_STENCIL
;
1621 desc
->unpack_s_8uint(&stencil
, 0, data
, 0, 1, 1);
1624 pipe
->clear_depth_stencil(pipe
, sf
, clear
, depth
, stencil
,
1626 box
->width
, box
->height
, false);
1628 union pipe_color_union color
;
1630 /* pipe_color_union requires the full vec4 representation. */
1631 if (util_format_is_pure_uint(tex
->format
))
1632 desc
->unpack_rgba_uint(color
.ui
, 0, data
, 0, 1, 1);
1633 else if (util_format_is_pure_sint(tex
->format
))
1634 desc
->unpack_rgba_sint(color
.i
, 0, data
, 0, 1, 1);
1636 desc
->unpack_rgba_float(color
.f
, 0, data
, 0, 1, 1);
1638 if (screen
->is_format_supported(screen
, tex
->format
,
1640 PIPE_BIND_RENDER_TARGET
)) {
1641 pipe
->clear_render_target(pipe
, sf
, &color
,
1643 box
->width
, box
->height
, false);
1645 /* Software fallback - just for R9G9B9E5_FLOAT */
1646 util_clear_render_target(pipe
, sf
, &color
,
1648 box
->width
, box
->height
);
1651 pipe_surface_reference(&sf
, NULL
);
1654 unsigned r600_translate_colorswap(enum pipe_format format
, bool do_endian_swap
)
1656 const struct util_format_description
*desc
= util_format_description(format
);
1658 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1660 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
1661 return V_0280A0_SWAP_STD
;
1663 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
)
1666 switch (desc
->nr_channels
) {
1668 if (HAS_SWIZZLE(0,X
))
1669 return V_0280A0_SWAP_STD
; /* X___ */
1670 else if (HAS_SWIZZLE(3,X
))
1671 return V_0280A0_SWAP_ALT_REV
; /* ___X */
1674 if ((HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,Y
)) ||
1675 (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(1,NONE
)) ||
1676 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,Y
)))
1677 return V_0280A0_SWAP_STD
; /* XY__ */
1678 else if ((HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,X
)) ||
1679 (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(1,NONE
)) ||
1680 (HAS_SWIZZLE(0,NONE
) && HAS_SWIZZLE(1,X
)))
1682 return (do_endian_swap
? V_0280A0_SWAP_STD
: V_0280A0_SWAP_STD_REV
);
1683 else if (HAS_SWIZZLE(0,X
) && HAS_SWIZZLE(3,Y
))
1684 return V_0280A0_SWAP_ALT
; /* X__Y */
1685 else if (HAS_SWIZZLE(0,Y
) && HAS_SWIZZLE(3,X
))
1686 return V_0280A0_SWAP_ALT_REV
; /* Y__X */
1689 if (HAS_SWIZZLE(0,X
))
1690 return (do_endian_swap
? V_0280A0_SWAP_STD_REV
: V_0280A0_SWAP_STD
);
1691 else if (HAS_SWIZZLE(0,Z
))
1692 return V_0280A0_SWAP_STD_REV
; /* ZYX */
1695 /* check the middle channels, the 1st and 4th channel can be NONE */
1696 if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,Z
)) {
1697 return V_0280A0_SWAP_STD
; /* XYZW */
1698 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,Y
)) {
1699 return V_0280A0_SWAP_STD_REV
; /* WZYX */
1700 } else if (HAS_SWIZZLE(1,Y
) && HAS_SWIZZLE(2,X
)) {
1701 return V_0280A0_SWAP_ALT
; /* ZYXW */
1702 } else if (HAS_SWIZZLE(1,Z
) && HAS_SWIZZLE(2,W
)) {
1705 return V_0280A0_SWAP_ALT_REV
;
1707 return (do_endian_swap
? V_0280A0_SWAP_ALT
: V_0280A0_SWAP_ALT_REV
);
1714 /* FAST COLOR CLEAR */
1716 static void evergreen_set_clear_color(struct r600_texture
*rtex
,
1717 enum pipe_format surface_format
,
1718 const union pipe_color_union
*color
)
1720 union util_color uc
;
1722 memset(&uc
, 0, sizeof(uc
));
1724 if (rtex
->surface
.bpe
== 16) {
1725 /* DCC fast clear only:
1726 * CLEAR_WORD0 = R = G = B
1729 assert(color
->ui
[0] == color
->ui
[1] &&
1730 color
->ui
[0] == color
->ui
[2]);
1731 uc
.ui
[0] = color
->ui
[0];
1732 uc
.ui
[1] = color
->ui
[3];
1733 } else if (util_format_is_pure_uint(surface_format
)) {
1734 util_format_write_4ui(surface_format
, color
->ui
, 0, &uc
, 0, 0, 0, 1, 1);
1735 } else if (util_format_is_pure_sint(surface_format
)) {
1736 util_format_write_4i(surface_format
, color
->i
, 0, &uc
, 0, 0, 0, 1, 1);
1738 util_pack_color(color
->f
, surface_format
, &uc
);
1741 memcpy(rtex
->color_clear_value
, &uc
, 2 * sizeof(uint32_t));
1744 void evergreen_do_fast_color_clear(struct r600_common_context
*rctx
,
1745 struct pipe_framebuffer_state
*fb
,
1746 struct r600_atom
*fb_state
,
1747 unsigned *buffers
, ubyte
*dirty_cbufs
,
1748 const union pipe_color_union
*color
)
1752 /* This function is broken in BE, so just disable this path for now */
1753 #ifdef PIPE_ARCH_BIG_ENDIAN
1757 if (rctx
->render_cond
)
1760 for (i
= 0; i
< fb
->nr_cbufs
; i
++) {
1761 struct r600_texture
*tex
;
1762 unsigned clear_bit
= PIPE_CLEAR_COLOR0
<< i
;
1767 /* if this colorbuffer is not being cleared */
1768 if (!(*buffers
& clear_bit
))
1771 tex
= (struct r600_texture
*)fb
->cbufs
[i
]->texture
;
1773 /* the clear is allowed if all layers are bound */
1774 if (fb
->cbufs
[i
]->u
.tex
.first_layer
!= 0 ||
1775 fb
->cbufs
[i
]->u
.tex
.last_layer
!= util_max_layer(&tex
->resource
.b
.b
, 0)) {
1779 /* cannot clear mipmapped textures */
1780 if (fb
->cbufs
[i
]->texture
->last_level
!= 0) {
1784 /* only supported on tiled surfaces */
1785 if (tex
->surface
.is_linear
) {
1789 /* shared textures can't use fast clear without an explicit flush,
1790 * because there is no way to communicate the clear color among
1793 if (tex
->resource
.b
.is_shared
&&
1794 !(tex
->resource
.external_usage
& PIPE_HANDLE_USAGE_EXPLICIT_FLUSH
))
1797 /* Use a slow clear for small surfaces where the cost of
1798 * the eliminate pass can be higher than the benefit of fast
1799 * clear. AMDGPU-pro does this, but the numbers may differ.
1801 * This helps on both dGPUs and APUs, even small ones.
1803 if (tex
->resource
.b
.b
.nr_samples
<= 1 &&
1804 tex
->resource
.b
.b
.width0
* tex
->resource
.b
.b
.height0
<= 300 * 300)
1808 /* 128-bit formats are unusupported */
1809 if (tex
->surface
.bpe
> 8) {
1813 /* ensure CMASK is enabled */
1814 r600_texture_alloc_cmask_separate(rctx
->screen
, tex
);
1815 if (tex
->cmask
.size
== 0) {
1819 /* Do the fast clear. */
1820 rctx
->clear_buffer(&rctx
->b
, &tex
->cmask_buffer
->b
.b
,
1821 tex
->cmask
.offset
, tex
->cmask
.size
, 0,
1822 R600_COHERENCY_CB_META
);
1824 bool need_compressed_update
= !tex
->dirty_level_mask
;
1826 tex
->dirty_level_mask
|= 1 << fb
->cbufs
[i
]->u
.tex
.level
;
1828 if (need_compressed_update
)
1829 p_atomic_inc(&rctx
->screen
->compressed_colortex_counter
);
1832 evergreen_set_clear_color(tex
, fb
->cbufs
[i
]->format
, color
);
1835 *dirty_cbufs
|= 1 << i
;
1836 rctx
->set_atom_dirty(rctx
, fb_state
, true);
1837 *buffers
&= ~clear_bit
;
1841 static struct pipe_memory_object
*
1842 r600_memobj_from_handle(struct pipe_screen
*screen
,
1843 struct winsys_handle
*whandle
,
1846 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1847 struct r600_memory_object
*memobj
= CALLOC_STRUCT(r600_memory_object
);
1848 struct pb_buffer
*buf
= NULL
;
1849 uint32_t stride
, offset
;
1854 buf
= rscreen
->ws
->buffer_from_handle(rscreen
->ws
, whandle
,
1861 memobj
->b
.dedicated
= dedicated
;
1863 memobj
->stride
= stride
;
1864 memobj
->offset
= offset
;
1866 return (struct pipe_memory_object
*)memobj
;
1871 r600_memobj_destroy(struct pipe_screen
*screen
,
1872 struct pipe_memory_object
*_memobj
)
1874 struct r600_memory_object
*memobj
= (struct r600_memory_object
*)_memobj
;
1876 pb_reference(&memobj
->buf
, NULL
);
1880 static struct pipe_resource
*
1881 r600_texture_from_memobj(struct pipe_screen
*screen
,
1882 const struct pipe_resource
*templ
,
1883 struct pipe_memory_object
*_memobj
,
1887 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1888 struct r600_memory_object
*memobj
= (struct r600_memory_object
*)_memobj
;
1889 struct r600_texture
*rtex
;
1890 struct radeon_surf surface
= {};
1891 struct radeon_bo_metadata metadata
= {};
1892 enum radeon_surf_mode array_mode
;
1894 struct pb_buffer
*buf
= NULL
;
1896 if (memobj
->b
.dedicated
) {
1897 rscreen
->ws
->buffer_get_metadata(memobj
->buf
, &metadata
);
1898 r600_surface_import_metadata(rscreen
, &surface
, &metadata
,
1899 &array_mode
, &is_scanout
);
1902 * The bo metadata is unset for un-dedicated images. So we fall
1903 * back to linear. See answer to question 5 of the
1904 * VK_KHX_external_memory spec for some details.
1906 * It is possible that this case isn't going to work if the
1907 * surface pitch isn't correctly aligned by default.
1909 * In order to support it correctly we require multi-image
1910 * metadata to be syncrhonized between radv and radeonsi. The
1911 * semantics of associating multiple image metadata to a memory
1912 * object on the vulkan export side are not concretely defined
1915 * All the use cases we are aware of at the moment for memory
1916 * objects use dedicated allocations. So lets keep the initial
1917 * implementation simple.
1919 * A possible alternative is to attempt to reconstruct the
1920 * tiling information when the TexParameter TEXTURE_TILING_EXT
1923 array_mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
1928 r
= r600_init_surface(rscreen
, &surface
, templ
,
1929 array_mode
, memobj
->stride
,
1930 offset
, true, is_scanout
,
1935 rtex
= r600_texture_create_object(screen
, templ
, memobj
->buf
, &surface
);
1939 /* r600_texture_create_object doesn't increment refcount of
1940 * memobj->buf, so increment it here.
1942 pb_reference(&buf
, memobj
->buf
);
1944 rtex
->resource
.b
.is_shared
= true;
1945 rtex
->resource
.external_usage
= PIPE_HANDLE_USAGE_READ_WRITE
;
1947 if (rscreen
->apply_opaque_metadata
)
1948 rscreen
->apply_opaque_metadata(rscreen
, rtex
, &metadata
);
1950 return &rtex
->resource
.b
.b
;
1953 void r600_init_screen_texture_functions(struct r600_common_screen
*rscreen
)
1955 rscreen
->b
.resource_from_handle
= r600_texture_from_handle
;
1956 rscreen
->b
.resource_get_handle
= r600_texture_get_handle
;
1957 rscreen
->b
.resource_from_memobj
= r600_texture_from_memobj
;
1958 rscreen
->b
.memobj_create_from_handle
= r600_memobj_from_handle
;
1959 rscreen
->b
.memobj_destroy
= r600_memobj_destroy
;
1962 void r600_init_context_texture_functions(struct r600_common_context
*rctx
)
1964 rctx
->b
.create_surface
= r600_create_surface
;
1965 rctx
->b
.surface_destroy
= r600_surface_destroy
;
1966 rctx
->b
.clear_texture
= r600_clear_texture
;