2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_format.h"
28 #include "util/u_format_rgtc.h"
29 #include "util/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
36 #include "freedreno_resource.h"
37 #include "freedreno_batch_cache.h"
38 #include "freedreno_fence.h"
39 #include "freedreno_screen.h"
40 #include "freedreno_surface.h"
41 #include "freedreno_context.h"
42 #include "freedreno_query_hw.h"
43 #include "freedreno_util.h"
47 /* XXX this should go away, needed for 'struct winsys_handle' */
48 #include "state_tracker/drm_driver.h"
51 * Go through the entire state and see if the resource is bound
52 * anywhere. If it is, mark the relevant state as dirty. This is
53 * called on realloc_bo to ensure the neccessary state is re-
54 * emitted so the GPU looks at the new backing bo.
57 rebind_resource(struct fd_context
*ctx
, struct pipe_resource
*prsc
)
60 for (unsigned i
= 0; i
< ctx
->vtx
.vertexbuf
.count
&& !(ctx
->dirty
& FD_DIRTY_VTXBUF
); i
++) {
61 if (ctx
->vtx
.vertexbuf
.vb
[i
].buffer
.resource
== prsc
)
62 ctx
->dirty
|= FD_DIRTY_VTXBUF
;
65 /* per-shader-stage resources: */
66 for (unsigned stage
= 0; stage
< PIPE_SHADER_TYPES
; stage
++) {
67 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
68 * cmdstream rather than by pointer..
70 const unsigned num_ubos
= util_last_bit(ctx
->constbuf
[stage
].enabled_mask
);
71 for (unsigned i
= 1; i
< num_ubos
; i
++) {
72 if (ctx
->dirty_shader
[stage
] & FD_DIRTY_SHADER_CONST
)
74 if (ctx
->constbuf
[stage
].cb
[i
].buffer
== prsc
)
75 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_CONST
;
79 for (unsigned i
= 0; i
< ctx
->tex
[stage
].num_textures
; i
++) {
80 if (ctx
->dirty_shader
[stage
] & FD_DIRTY_SHADER_TEX
)
82 if (ctx
->tex
[stage
].textures
[i
] && (ctx
->tex
[stage
].textures
[i
]->texture
== prsc
))
83 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_TEX
;
87 const unsigned num_ssbos
= util_last_bit(ctx
->shaderbuf
[stage
].enabled_mask
);
88 for (unsigned i
= 0; i
< num_ssbos
; i
++) {
89 if (ctx
->dirty_shader
[stage
] & FD_DIRTY_SHADER_SSBO
)
91 if (ctx
->shaderbuf
[stage
].sb
[i
].buffer
== prsc
)
92 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_SSBO
;
98 realloc_bo(struct fd_resource
*rsc
, uint32_t size
)
100 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
101 uint32_t flags
= DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
102 DRM_FREEDRENO_GEM_TYPE_KMEM
; /* TODO */
104 /* if we start using things other than write-combine,
105 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
111 rsc
->bo
= fd_bo_new(screen
->dev
, size
, flags
);
112 rsc
->seqno
= p_atomic_inc_return(&screen
->rsc_seqno
);
113 util_range_set_empty(&rsc
->valid_buffer_range
);
114 fd_bc_invalidate_resource(rsc
, true);
118 do_blit(struct fd_context
*ctx
, const struct pipe_blit_info
*blit
, bool fallback
)
120 /* TODO size threshold too?? */
122 /* do blit on gpu: */
123 fd_blitter_pipe_begin(ctx
, false, true, FD_STAGE_BLIT
);
124 ctx
->blit(ctx
, blit
);
125 fd_blitter_pipe_end(ctx
);
127 /* do blit on cpu: */
128 util_resource_copy_region(&ctx
->base
,
129 blit
->dst
.resource
, blit
->dst
.level
, blit
->dst
.box
.x
,
130 blit
->dst
.box
.y
, blit
->dst
.box
.z
,
131 blit
->src
.resource
, blit
->src
.level
, &blit
->src
.box
);
136 fd_try_shadow_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
,
137 unsigned level
, const struct pipe_box
*box
)
139 struct pipe_context
*pctx
= &ctx
->base
;
140 struct pipe_resource
*prsc
= &rsc
->base
;
141 bool fallback
= false;
146 /* TODO: somehow munge dimensions and format to copy unsupported
147 * render target format to something that is supported?
149 if (!pctx
->screen
->is_format_supported(pctx
->screen
,
150 prsc
->format
, prsc
->target
, prsc
->nr_samples
,
151 prsc
->nr_storage_samples
,
152 PIPE_BIND_RENDER_TARGET
))
155 /* do shadowing back-blits on the cpu for buffers: */
156 if (prsc
->target
== PIPE_BUFFER
)
159 bool whole_level
= util_texrange_covers_whole_level(prsc
, level
,
160 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
, box
->depth
);
162 /* TODO need to be more clever about current level */
163 if ((prsc
->target
>= PIPE_TEXTURE_2D
) && !whole_level
)
166 struct pipe_resource
*pshadow
=
167 pctx
->screen
->resource_create(pctx
->screen
, prsc
);
172 assert(!ctx
->in_shadow
);
173 ctx
->in_shadow
= true;
175 /* get rid of any references that batch-cache might have to us (which
176 * should empty/destroy rsc->batches hashset)
178 fd_bc_invalidate_resource(rsc
, false);
180 mtx_lock(&ctx
->screen
->lock
);
182 /* Swap the backing bo's, so shadow becomes the old buffer,
183 * blit from shadow to new buffer. From here on out, we
186 * Note that we need to do it in this order, otherwise if
187 * we go down cpu blit path, the recursive transfer_map()
188 * sees the wrong status..
190 struct fd_resource
*shadow
= fd_resource(pshadow
);
192 DBG("shadow: %p (%d) -> %p (%d)\n", rsc
, rsc
->base
.reference
.count
,
193 shadow
, shadow
->base
.reference
.count
);
195 /* TODO valid_buffer_range?? */
196 swap(rsc
->bo
, shadow
->bo
);
197 swap(rsc
->write_batch
, shadow
->write_batch
);
198 rsc
->seqno
= p_atomic_inc_return(&ctx
->screen
->rsc_seqno
);
200 /* at this point, the newly created shadow buffer is not referenced
201 * by any batches, but the existing rsc (probably) is. We need to
202 * transfer those references over:
204 debug_assert(shadow
->batch_mask
== 0);
205 struct fd_batch
*batch
;
206 foreach_batch(batch
, &ctx
->screen
->batch_cache
, rsc
->batch_mask
) {
207 struct set_entry
*entry
= _mesa_set_search(batch
->resources
, rsc
);
208 _mesa_set_remove(batch
->resources
, entry
);
209 _mesa_set_add(batch
->resources
, shadow
);
211 swap(rsc
->batch_mask
, shadow
->batch_mask
);
213 mtx_unlock(&ctx
->screen
->lock
);
215 struct pipe_blit_info blit
= {};
216 blit
.dst
.resource
= prsc
;
217 blit
.dst
.format
= prsc
->format
;
218 blit
.src
.resource
= pshadow
;
219 blit
.src
.format
= pshadow
->format
;
220 blit
.mask
= util_format_get_mask(prsc
->format
);
221 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
223 #define set_box(field, val) do { \
224 blit.dst.field = (val); \
225 blit.src.field = (val); \
228 /* blit the other levels in their entirety: */
229 for (unsigned l
= 0; l
<= prsc
->last_level
; l
++) {
233 /* just blit whole level: */
235 set_box(box
.width
, u_minify(prsc
->width0
, l
));
236 set_box(box
.height
, u_minify(prsc
->height0
, l
));
237 set_box(box
.depth
, u_minify(prsc
->depth0
, l
));
239 do_blit(ctx
, &blit
, fallback
);
242 /* deal w/ current level specially, since we might need to split
243 * it up into a couple blits:
246 set_box(level
, level
);
248 switch (prsc
->target
) {
250 case PIPE_TEXTURE_1D
:
253 set_box(box
.height
, 1);
254 set_box(box
.depth
, 1);
258 set_box(box
.width
, box
->x
);
260 do_blit(ctx
, &blit
, fallback
);
262 if ((box
->x
+ box
->width
) < u_minify(prsc
->width0
, level
)) {
263 set_box(box
.x
, box
->x
+ box
->width
);
264 set_box(box
.width
, u_minify(prsc
->width0
, level
) - (box
->x
+ box
->width
));
266 do_blit(ctx
, &blit
, fallback
);
269 case PIPE_TEXTURE_2D
:
276 ctx
->in_shadow
= false;
278 pipe_resource_reference(&pshadow
, NULL
);
283 static struct fd_resource
*
284 fd_alloc_staging(struct fd_context
*ctx
, struct fd_resource
*rsc
,
285 unsigned level
, const struct pipe_box
*box
)
287 struct pipe_context
*pctx
= &ctx
->base
;
288 struct pipe_resource tmpl
= rsc
->base
;
290 tmpl
.width0
= box
->width
;
291 tmpl
.height0
= box
->height
;
292 tmpl
.depth0
= box
->depth
;
295 tmpl
.bind
|= PIPE_BIND_LINEAR
;
297 struct pipe_resource
*pstaging
=
298 pctx
->screen
->resource_create(pctx
->screen
, &tmpl
);
302 return fd_resource(pstaging
);
306 fd_blit_from_staging(struct fd_context
*ctx
, struct fd_transfer
*trans
)
308 struct pipe_resource
*dst
= trans
->base
.resource
;
309 struct pipe_blit_info blit
= {};
311 blit
.dst
.resource
= dst
;
312 blit
.dst
.format
= dst
->format
;
313 blit
.dst
.level
= trans
->base
.level
;
314 blit
.dst
.box
= trans
->base
.box
;
315 blit
.src
.resource
= trans
->staging_prsc
;
316 blit
.src
.format
= trans
->staging_prsc
->format
;
318 blit
.src
.box
= trans
->staging_box
;
319 blit
.mask
= util_format_get_mask(trans
->staging_prsc
->format
);
320 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
322 do_blit(ctx
, &blit
, false);
326 fd_blit_to_staging(struct fd_context
*ctx
, struct fd_transfer
*trans
)
328 struct pipe_resource
*src
= trans
->base
.resource
;
329 struct pipe_blit_info blit
= {};
331 blit
.src
.resource
= src
;
332 blit
.src
.format
= src
->format
;
333 blit
.src
.level
= trans
->base
.level
;
334 blit
.src
.box
= trans
->base
.box
;
335 blit
.dst
.resource
= trans
->staging_prsc
;
336 blit
.dst
.format
= trans
->staging_prsc
->format
;
338 blit
.dst
.box
= trans
->staging_box
;
339 blit
.mask
= util_format_get_mask(trans
->staging_prsc
->format
);
340 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
342 do_blit(ctx
, &blit
, false);
346 fd_resource_layer_offset(struct fd_resource
*rsc
,
347 struct fd_resource_slice
*slice
,
350 if (rsc
->layer_first
)
351 return layer
* rsc
->layer_size
;
353 return layer
* slice
->size0
;
356 static void fd_resource_transfer_flush_region(struct pipe_context
*pctx
,
357 struct pipe_transfer
*ptrans
,
358 const struct pipe_box
*box
)
360 struct fd_resource
*rsc
= fd_resource(ptrans
->resource
);
362 if (ptrans
->resource
->target
== PIPE_BUFFER
)
363 util_range_add(&rsc
->valid_buffer_range
,
364 ptrans
->box
.x
+ box
->x
,
365 ptrans
->box
.x
+ box
->x
+ box
->width
);
369 flush_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
, unsigned usage
)
371 struct fd_batch
*write_batch
= NULL
;
373 fd_batch_reference(&write_batch
, rsc
->write_batch
);
375 if (usage
& PIPE_TRANSFER_WRITE
) {
376 struct fd_batch
*batch
, *batches
[32] = {};
379 /* This is a bit awkward, probably a fd_batch_flush_locked()
380 * would make things simpler.. but we need to hold the lock
381 * to iterate the batches which reference this resource. So
382 * we must first grab references under a lock, then flush.
384 mtx_lock(&ctx
->screen
->lock
);
385 batch_mask
= rsc
->batch_mask
;
386 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
)
387 fd_batch_reference(&batches
[batch
->idx
], batch
);
388 mtx_unlock(&ctx
->screen
->lock
);
390 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
)
391 fd_batch_flush(batch
, false, false);
393 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
) {
394 fd_batch_sync(batch
);
395 fd_batch_reference(&batches
[batch
->idx
], NULL
);
397 assert(rsc
->batch_mask
== 0);
398 } else if (write_batch
) {
399 fd_batch_flush(write_batch
, true, false);
402 fd_batch_reference(&write_batch
, NULL
);
404 assert(!rsc
->write_batch
);
408 fd_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
410 flush_resource(fd_context(pctx
), fd_resource(prsc
), PIPE_TRANSFER_READ
);
414 fd_resource_transfer_unmap(struct pipe_context
*pctx
,
415 struct pipe_transfer
*ptrans
)
417 struct fd_context
*ctx
= fd_context(pctx
);
418 struct fd_resource
*rsc
= fd_resource(ptrans
->resource
);
419 struct fd_transfer
*trans
= fd_transfer(ptrans
);
421 if (trans
->staging_prsc
) {
422 if (ptrans
->usage
& PIPE_TRANSFER_WRITE
)
423 fd_blit_from_staging(ctx
, trans
);
424 pipe_resource_reference(&trans
->staging_prsc
, NULL
);
427 if (!(ptrans
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
428 fd_bo_cpu_fini(rsc
->bo
);
431 util_range_add(&rsc
->valid_buffer_range
,
433 ptrans
->box
.x
+ ptrans
->box
.width
);
435 pipe_resource_reference(&ptrans
->resource
, NULL
);
436 slab_free(&ctx
->transfer_pool
, ptrans
);
440 fd_resource_transfer_map(struct pipe_context
*pctx
,
441 struct pipe_resource
*prsc
,
442 unsigned level
, unsigned usage
,
443 const struct pipe_box
*box
,
444 struct pipe_transfer
**pptrans
)
446 struct fd_context
*ctx
= fd_context(pctx
);
447 struct fd_resource
*rsc
= fd_resource(prsc
);
448 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, level
);
449 struct fd_transfer
*trans
;
450 struct pipe_transfer
*ptrans
;
451 enum pipe_format format
= prsc
->format
;
457 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc
, level
, usage
,
458 box
->width
, box
->height
, box
->x
, box
->y
);
460 ptrans
= slab_alloc(&ctx
->transfer_pool
);
464 /* slab_alloc_st() doesn't zero: */
465 trans
= fd_transfer(ptrans
);
466 memset(trans
, 0, sizeof(*trans
));
468 pipe_resource_reference(&ptrans
->resource
, prsc
);
469 ptrans
->level
= level
;
470 ptrans
->usage
= usage
;
472 ptrans
->stride
= util_format_get_nblocksx(format
, slice
->pitch
) * rsc
->cpp
;
473 ptrans
->layer_stride
= rsc
->layer_first
? rsc
->layer_size
: slice
->size0
;
475 /* we always need a staging texture for tiled buffers:
477 * TODO we might sometimes want to *also* shadow the resource to avoid
478 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
481 if (rsc
->tile_mode
) {
482 struct fd_resource
*staging_rsc
;
484 staging_rsc
= fd_alloc_staging(ctx
, rsc
, level
, box
);
486 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
487 trans
->staging_prsc
= &staging_rsc
->base
;
488 trans
->base
.stride
= util_format_get_nblocksx(format
,
489 staging_rsc
->slices
[0].pitch
) * staging_rsc
->cpp
;
490 trans
->base
.layer_stride
= staging_rsc
->layer_first
?
491 staging_rsc
->layer_size
: staging_rsc
->slices
[0].size0
;
492 trans
->staging_box
= *box
;
493 trans
->staging_box
.x
= 0;
494 trans
->staging_box
.y
= 0;
495 trans
->staging_box
.z
= 0;
497 if (usage
& PIPE_TRANSFER_READ
) {
498 fd_blit_to_staging(ctx
, trans
);
499 fd_bo_cpu_prep(rsc
->bo
, ctx
->pipe
, DRM_FREEDRENO_PREP_READ
);
502 buf
= fd_bo_map(staging_rsc
->bo
);
507 ctx
->stats
.staging_uploads
++;
513 if (ctx
->in_shadow
&& !(usage
& PIPE_TRANSFER_READ
))
514 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
516 if (usage
& PIPE_TRANSFER_READ
)
517 op
|= DRM_FREEDRENO_PREP_READ
;
519 if (usage
& PIPE_TRANSFER_WRITE
)
520 op
|= DRM_FREEDRENO_PREP_WRITE
;
522 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
523 realloc_bo(rsc
, fd_bo_size(rsc
->bo
));
524 rebind_resource(ctx
, prsc
);
525 } else if ((usage
& PIPE_TRANSFER_WRITE
) &&
526 prsc
->target
== PIPE_BUFFER
&&
527 !util_ranges_intersect(&rsc
->valid_buffer_range
,
528 box
->x
, box
->x
+ box
->width
)) {
529 /* We are trying to write to a previously uninitialized range. No need
532 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
533 struct fd_batch
*write_batch
= NULL
;
535 /* hold a reference, so it doesn't disappear under us: */
536 fd_batch_reference(&write_batch
, rsc
->write_batch
);
538 if ((usage
& PIPE_TRANSFER_WRITE
) && write_batch
&&
539 write_batch
->back_blit
) {
540 /* if only thing pending is a back-blit, we can discard it: */
541 fd_batch_reset(write_batch
);
544 /* If the GPU is writing to the resource, or if it is reading from the
545 * resource and we're trying to write to it, flush the renders.
547 bool needs_flush
= pending(rsc
, !!(usage
& PIPE_TRANSFER_WRITE
));
548 bool busy
= needs_flush
|| (0 != fd_bo_cpu_prep(rsc
->bo
,
549 ctx
->pipe
, op
| DRM_FREEDRENO_PREP_NOSYNC
));
551 /* if we need to flush/stall, see if we can make a shadow buffer
554 * TODO we could go down this path !reorder && !busy_for_read
555 * ie. we only *don't* want to go down this path if the blit
556 * will trigger a flush!
558 if (ctx
->screen
->reorder
&& busy
&& !(usage
& PIPE_TRANSFER_READ
) &&
559 (usage
& PIPE_TRANSFER_DISCARD_RANGE
)) {
560 /* try shadowing only if it avoids a flush, otherwise staging would
563 if (needs_flush
&& fd_try_shadow_resource(ctx
, rsc
, level
, box
)) {
564 needs_flush
= busy
= false;
565 rebind_resource(ctx
, prsc
);
566 ctx
->stats
.shadow_uploads
++;
568 struct fd_resource
*staging_rsc
;
571 flush_resource(ctx
, rsc
, usage
);
575 /* in this case, we don't need to shadow the whole resource,
576 * since any draw that references the previous contents has
577 * already had rendering flushed for all tiles. So we can
578 * use a staging buffer to do the upload.
580 staging_rsc
= fd_alloc_staging(ctx
, rsc
, level
, box
);
582 trans
->staging_prsc
= &staging_rsc
->base
;
583 trans
->base
.stride
= util_format_get_nblocksx(format
,
584 staging_rsc
->slices
[0].pitch
) * staging_rsc
->cpp
;
585 trans
->base
.layer_stride
= staging_rsc
->layer_first
?
586 staging_rsc
->layer_size
: staging_rsc
->slices
[0].size0
;
587 trans
->staging_box
= *box
;
588 trans
->staging_box
.x
= 0;
589 trans
->staging_box
.y
= 0;
590 trans
->staging_box
.z
= 0;
591 buf
= fd_bo_map(staging_rsc
->bo
);
596 fd_batch_reference(&write_batch
, NULL
);
598 ctx
->stats
.staging_uploads
++;
606 flush_resource(ctx
, rsc
, usage
);
610 fd_batch_reference(&write_batch
, NULL
);
612 /* The GPU keeps track of how the various bo's are being used, and
613 * will wait if necessary for the proper operation to have
617 ret
= fd_bo_cpu_prep(rsc
->bo
, ctx
->pipe
, op
);
623 buf
= fd_bo_map(rsc
->bo
);
624 offset
= slice
->offset
+
625 box
->y
/ util_format_get_blockheight(format
) * ptrans
->stride
+
626 box
->x
/ util_format_get_blockwidth(format
) * rsc
->cpp
+
627 fd_resource_layer_offset(rsc
, slice
, box
->z
);
629 if (usage
& PIPE_TRANSFER_WRITE
)
637 fd_resource_transfer_unmap(pctx
, ptrans
);
642 fd_resource_destroy(struct pipe_screen
*pscreen
,
643 struct pipe_resource
*prsc
)
645 struct fd_resource
*rsc
= fd_resource(prsc
);
646 fd_bc_invalidate_resource(rsc
, true);
649 util_range_destroy(&rsc
->valid_buffer_range
);
654 fd_resource_get_handle(struct pipe_screen
*pscreen
,
655 struct pipe_context
*pctx
,
656 struct pipe_resource
*prsc
,
657 struct winsys_handle
*handle
,
660 struct fd_resource
*rsc
= fd_resource(prsc
);
662 return fd_screen_bo_get_handle(pscreen
, rsc
->bo
,
663 rsc
->slices
[0].pitch
* rsc
->cpp
, handle
);
667 setup_slices(struct fd_resource
*rsc
, uint32_t alignment
, enum pipe_format format
)
669 struct pipe_resource
*prsc
= &rsc
->base
;
670 struct fd_screen
*screen
= fd_screen(prsc
->screen
);
671 enum util_format_layout layout
= util_format_description(format
)->layout
;
672 uint32_t pitchalign
= screen
->gmem_alignw
;
673 uint32_t level
, size
= 0;
674 uint32_t width
= prsc
->width0
;
675 uint32_t height
= prsc
->height0
;
676 uint32_t depth
= prsc
->depth0
;
677 /* in layer_first layout, the level (slice) contains just one
678 * layer (since in fact the layer contains the slices)
680 uint32_t layers_in_level
= rsc
->layer_first
? 1 : prsc
->array_size
;
682 for (level
= 0; level
<= prsc
->last_level
; level
++) {
683 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, level
);
686 if (layout
== UTIL_FORMAT_LAYOUT_ASTC
)
687 slice
->pitch
= width
=
688 util_align_npot(width
, pitchalign
* util_format_get_blockwidth(format
));
690 slice
->pitch
= width
= align(width
, pitchalign
);
691 slice
->offset
= size
;
692 blocks
= util_format_get_nblocks(format
, width
, height
);
693 /* 1d array and 2d array textures must all have the same layer size
694 * for each miplevel on a3xx. 3d textures can have different layer
695 * sizes for high levels, but the hw auto-sizer is buggy (or at least
696 * different than what this code does), so as soon as the layer size
697 * range gets into range, we stop reducing it.
699 if (prsc
->target
== PIPE_TEXTURE_3D
&& (
701 (level
> 1 && rsc
->slices
[level
- 1].size0
> 0xf000)))
702 slice
->size0
= align(blocks
* rsc
->cpp
, alignment
);
703 else if (level
== 0 || rsc
->layer_first
|| alignment
== 1)
704 slice
->size0
= align(blocks
* rsc
->cpp
, alignment
);
706 slice
->size0
= rsc
->slices
[level
- 1].size0
;
708 size
+= slice
->size0
* depth
* layers_in_level
;
710 width
= u_minify(width
, 1);
711 height
= u_minify(height
, 1);
712 depth
= u_minify(depth
, 1);
719 slice_alignment(enum pipe_texture_target target
)
721 /* on a3xx, 2d array and 3d textures seem to want their
722 * layers aligned to page boundaries:
725 case PIPE_TEXTURE_3D
:
726 case PIPE_TEXTURE_1D_ARRAY
:
727 case PIPE_TEXTURE_2D_ARRAY
:
734 /* cross generation texture layout to plug in to screen->setup_slices()..
735 * replace with generation specific one as-needed.
737 * TODO for a4xx probably can extract out the a4xx specific logic int
738 * a small fd4_setup_slices() wrapper that sets up layer_first, and then
742 fd_setup_slices(struct fd_resource
*rsc
)
746 alignment
= slice_alignment(rsc
->base
.target
);
748 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
749 if (is_a4xx(screen
)) {
750 switch (rsc
->base
.target
) {
751 case PIPE_TEXTURE_3D
:
752 rsc
->layer_first
= false;
755 rsc
->layer_first
= true;
761 return setup_slices(rsc
, alignment
, rsc
->base
.format
);
764 /* special case to resize query buf after allocated.. */
766 fd_resource_resize(struct pipe_resource
*prsc
, uint32_t sz
)
768 struct fd_resource
*rsc
= fd_resource(prsc
);
770 debug_assert(prsc
->width0
== 0);
771 debug_assert(prsc
->target
== PIPE_BUFFER
);
772 debug_assert(prsc
->bind
== PIPE_BIND_QUERY_BUFFER
);
775 realloc_bo(rsc
, fd_screen(prsc
->screen
)->setup_slices(rsc
));
778 // TODO common helper?
780 has_depth(enum pipe_format format
)
783 case PIPE_FORMAT_Z16_UNORM
:
784 case PIPE_FORMAT_Z32_UNORM
:
785 case PIPE_FORMAT_Z32_FLOAT
:
786 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
787 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
788 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
789 case PIPE_FORMAT_Z24X8_UNORM
:
790 case PIPE_FORMAT_X8Z24_UNORM
:
798 * Create a new texture object, using the given template info.
800 static struct pipe_resource
*
801 fd_resource_create(struct pipe_screen
*pscreen
,
802 const struct pipe_resource
*tmpl
)
804 struct fd_screen
*screen
= fd_screen(pscreen
);
805 struct fd_resource
*rsc
= CALLOC_STRUCT(fd_resource
);
806 struct pipe_resource
*prsc
= &rsc
->base
;
807 enum pipe_format format
= tmpl
->format
;
810 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
811 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc
,
812 tmpl
->target
, util_format_name(format
),
813 tmpl
->width0
, tmpl
->height0
, tmpl
->depth0
,
814 tmpl
->array_size
, tmpl
->last_level
, tmpl
->nr_samples
,
815 tmpl
->usage
, tmpl
->bind
, tmpl
->flags
);
823 (PIPE_BIND_SCANOUT | \
825 PIPE_BIND_DISPLAY_TARGET)
827 if (screen
->tile_mode
&&
828 (tmpl
->target
!= PIPE_BUFFER
) &&
829 (tmpl
->bind
& PIPE_BIND_SAMPLER_VIEW
) &&
830 !(tmpl
->bind
& LINEAR
)) {
831 rsc
->tile_mode
= screen
->tile_mode(tmpl
);
834 pipe_reference_init(&prsc
->reference
, 1);
836 prsc
->screen
= pscreen
;
838 util_range_init(&rsc
->valid_buffer_range
);
840 rsc
->internal_format
= format
;
841 rsc
->cpp
= util_format_get_blocksize(format
);
842 prsc
->nr_samples
= MAX2(1, prsc
->nr_samples
);
843 rsc
->cpp
*= prsc
->nr_samples
;
847 // XXX probably need some extra work if we hit rsc shadowing path w/ lrz..
848 if ((is_a5xx(screen
) || is_a6xx(screen
)) &&
849 (fd_mesa_debug
& FD_DBG_LRZ
) && has_depth(format
)) {
850 const uint32_t flags
= DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
851 DRM_FREEDRENO_GEM_TYPE_KMEM
; /* TODO */
852 unsigned lrz_pitch
= align(DIV_ROUND_UP(tmpl
->width0
, 8), 64);
853 unsigned lrz_height
= DIV_ROUND_UP(tmpl
->height0
, 8);
854 unsigned size
= lrz_pitch
* lrz_height
* 2;
856 size
+= 0x1000; /* for GRAS_LRZ_FAST_CLEAR_BUFFER */
858 rsc
->lrz_height
= lrz_height
;
859 rsc
->lrz_width
= lrz_pitch
;
860 rsc
->lrz_pitch
= lrz_pitch
;
861 rsc
->lrz
= fd_bo_new(screen
->dev
, size
, flags
);
864 size
= screen
->setup_slices(rsc
);
866 /* special case for hw-query buffer, which we need to allocate before we
870 /* note, semi-intention == instead of & */
871 debug_assert(prsc
->bind
== PIPE_BIND_QUERY_BUFFER
);
875 if (rsc
->layer_first
) {
876 rsc
->layer_size
= align(size
, 4096);
877 size
= rsc
->layer_size
* prsc
->array_size
;
880 realloc_bo(rsc
, size
);
886 fd_resource_destroy(pscreen
, prsc
);
891 * Create a texture from a winsys_handle. The handle is often created in
892 * another process by first creating a pipe texture and then calling
893 * resource_get_handle.
895 static struct pipe_resource
*
896 fd_resource_from_handle(struct pipe_screen
*pscreen
,
897 const struct pipe_resource
*tmpl
,
898 struct winsys_handle
*handle
, unsigned usage
)
900 struct fd_resource
*rsc
= CALLOC_STRUCT(fd_resource
);
901 struct fd_resource_slice
*slice
= &rsc
->slices
[0];
902 struct pipe_resource
*prsc
= &rsc
->base
;
903 uint32_t pitchalign
= fd_screen(pscreen
)->gmem_alignw
;
905 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
906 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
907 tmpl
->target
, util_format_name(tmpl
->format
),
908 tmpl
->width0
, tmpl
->height0
, tmpl
->depth0
,
909 tmpl
->array_size
, tmpl
->last_level
, tmpl
->nr_samples
,
910 tmpl
->usage
, tmpl
->bind
, tmpl
->flags
);
917 pipe_reference_init(&prsc
->reference
, 1);
919 prsc
->screen
= pscreen
;
921 util_range_init(&rsc
->valid_buffer_range
);
923 rsc
->bo
= fd_screen_bo_from_handle(pscreen
, handle
);
927 prsc
->nr_samples
= MAX2(1, prsc
->nr_samples
);
928 rsc
->internal_format
= tmpl
->format
;
929 rsc
->cpp
= prsc
->nr_samples
* util_format_get_blocksize(tmpl
->format
);
930 slice
->pitch
= handle
->stride
/ rsc
->cpp
;
931 slice
->offset
= handle
->offset
;
932 slice
->size0
= handle
->stride
* prsc
->height0
;
934 if ((slice
->pitch
< align(prsc
->width0
, pitchalign
)) ||
935 (slice
->pitch
& (pitchalign
- 1)))
943 fd_resource_destroy(pscreen
, prsc
);
948 * _copy_region using pipe (3d engine)
951 fd_blitter_pipe_copy_region(struct fd_context
*ctx
,
952 struct pipe_resource
*dst
,
954 unsigned dstx
, unsigned dsty
, unsigned dstz
,
955 struct pipe_resource
*src
,
957 const struct pipe_box
*src_box
)
959 /* not until we allow rendertargets to be buffers */
960 if (dst
->target
== PIPE_BUFFER
|| src
->target
== PIPE_BUFFER
)
963 if (!util_blitter_is_copy_supported(ctx
->blitter
, dst
, src
))
966 /* TODO we could discard if dst box covers dst level fully.. */
967 fd_blitter_pipe_begin(ctx
, false, false, FD_STAGE_BLIT
);
968 util_blitter_copy_texture(ctx
->blitter
,
969 dst
, dst_level
, dstx
, dsty
, dstz
,
970 src
, src_level
, src_box
);
971 fd_blitter_pipe_end(ctx
);
977 * Copy a block of pixels from one resource to another.
978 * The resource must be of the same format.
979 * Resources with nr_samples > 1 are not allowed.
982 fd_resource_copy_region(struct pipe_context
*pctx
,
983 struct pipe_resource
*dst
,
985 unsigned dstx
, unsigned dsty
, unsigned dstz
,
986 struct pipe_resource
*src
,
988 const struct pipe_box
*src_box
)
990 struct fd_context
*ctx
= fd_context(pctx
);
992 /* TODO if we have 2d core, or other DMA engine that could be used
993 * for simple copies and reasonably easily synchronized with the 3d
994 * core, this is where we'd plug it in..
997 /* try blit on 3d pipe: */
998 if (fd_blitter_pipe_copy_region(ctx
,
999 dst
, dst_level
, dstx
, dsty
, dstz
,
1000 src
, src_level
, src_box
))
1003 /* else fallback to pure sw: */
1004 util_resource_copy_region(pctx
,
1005 dst
, dst_level
, dstx
, dsty
, dstz
,
1006 src
, src_level
, src_box
);
1010 fd_render_condition_check(struct pipe_context
*pctx
)
1012 struct fd_context
*ctx
= fd_context(pctx
);
1014 if (!ctx
->cond_query
)
1017 union pipe_query_result res
= { 0 };
1019 ctx
->cond_mode
!= PIPE_RENDER_COND_NO_WAIT
&&
1020 ctx
->cond_mode
!= PIPE_RENDER_COND_BY_REGION_NO_WAIT
;
1022 if (pctx
->get_query_result(pctx
, ctx
->cond_query
, wait
, &res
))
1023 return (bool)res
.u64
!= ctx
->cond_cond
;
1029 * Optimal hardware path for blitting pixels.
1030 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
1033 fd_blit(struct pipe_context
*pctx
, const struct pipe_blit_info
*blit_info
)
1035 struct fd_context
*ctx
= fd_context(pctx
);
1036 struct pipe_blit_info info
= *blit_info
;
1037 bool discard
= false;
1039 if (info
.render_condition_enable
&& !fd_render_condition_check(pctx
))
1042 if (!info
.scissor_enable
&& !info
.alpha_blend
) {
1043 discard
= util_texrange_covers_whole_level(info
.dst
.resource
,
1044 info
.dst
.level
, info
.dst
.box
.x
, info
.dst
.box
.y
,
1045 info
.dst
.box
.z
, info
.dst
.box
.width
,
1046 info
.dst
.box
.height
, info
.dst
.box
.depth
);
1049 if (util_try_blit_via_copy_region(pctx
, &info
)) {
1053 if (info
.mask
& PIPE_MASK_S
) {
1054 DBG("cannot blit stencil, skipping");
1055 info
.mask
&= ~PIPE_MASK_S
;
1058 if (!util_blitter_is_blit_supported(ctx
->blitter
, &info
)) {
1059 DBG("blit unsupported %s -> %s",
1060 util_format_short_name(info
.src
.resource
->format
),
1061 util_format_short_name(info
.dst
.resource
->format
));
1065 fd_blitter_pipe_begin(ctx
, info
.render_condition_enable
, discard
, FD_STAGE_BLIT
);
1066 ctx
->blit(ctx
, &info
);
1067 fd_blitter_pipe_end(ctx
);
1071 fd_blitter_pipe_begin(struct fd_context
*ctx
, bool render_cond
, bool discard
,
1072 enum fd_render_stage stage
)
1074 fd_fence_ref(ctx
->base
.screen
, &ctx
->last_fence
, NULL
);
1076 util_blitter_save_fragment_constant_buffer_slot(ctx
->blitter
,
1077 ctx
->constbuf
[PIPE_SHADER_FRAGMENT
].cb
);
1078 util_blitter_save_vertex_buffer_slot(ctx
->blitter
, ctx
->vtx
.vertexbuf
.vb
);
1079 util_blitter_save_vertex_elements(ctx
->blitter
, ctx
->vtx
.vtx
);
1080 util_blitter_save_vertex_shader(ctx
->blitter
, ctx
->prog
.vp
);
1081 util_blitter_save_so_targets(ctx
->blitter
, ctx
->streamout
.num_targets
,
1082 ctx
->streamout
.targets
);
1083 util_blitter_save_rasterizer(ctx
->blitter
, ctx
->rasterizer
);
1084 util_blitter_save_viewport(ctx
->blitter
, &ctx
->viewport
);
1085 util_blitter_save_scissor(ctx
->blitter
, &ctx
->scissor
);
1086 util_blitter_save_fragment_shader(ctx
->blitter
, ctx
->prog
.fp
);
1087 util_blitter_save_blend(ctx
->blitter
, ctx
->blend
);
1088 util_blitter_save_depth_stencil_alpha(ctx
->blitter
, ctx
->zsa
);
1089 util_blitter_save_stencil_ref(ctx
->blitter
, &ctx
->stencil_ref
);
1090 util_blitter_save_sample_mask(ctx
->blitter
, ctx
->sample_mask
);
1091 util_blitter_save_framebuffer(ctx
->blitter
, &ctx
->framebuffer
);
1092 util_blitter_save_fragment_sampler_states(ctx
->blitter
,
1093 ctx
->tex
[PIPE_SHADER_FRAGMENT
].num_samplers
,
1094 (void **)ctx
->tex
[PIPE_SHADER_FRAGMENT
].samplers
);
1095 util_blitter_save_fragment_sampler_views(ctx
->blitter
,
1096 ctx
->tex
[PIPE_SHADER_FRAGMENT
].num_textures
,
1097 ctx
->tex
[PIPE_SHADER_FRAGMENT
].textures
);
1099 util_blitter_save_render_condition(ctx
->blitter
,
1100 ctx
->cond_query
, ctx
->cond_cond
, ctx
->cond_mode
);
1103 fd_batch_set_stage(ctx
->batch
, stage
);
1105 ctx
->in_blit
= discard
;
1109 fd_blitter_pipe_end(struct fd_context
*ctx
)
1112 fd_batch_set_stage(ctx
->batch
, FD_STAGE_NULL
);
1113 ctx
->in_blit
= false;
1117 fd_invalidate_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
1119 struct fd_resource
*rsc
= fd_resource(prsc
);
1122 * TODO I guess we could track that the resource is invalidated and
1123 * use that as a hint to realloc rather than stall in _transfer_map(),
1124 * even in the non-DISCARD_WHOLE_RESOURCE case?
1127 if (rsc
->write_batch
) {
1128 struct fd_batch
*batch
= rsc
->write_batch
;
1129 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1131 if (pfb
->zsbuf
&& pfb
->zsbuf
->texture
== prsc
)
1132 batch
->resolve
&= ~(FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
);
1134 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1135 if (pfb
->cbufs
[i
] && pfb
->cbufs
[i
]->texture
== prsc
) {
1136 batch
->resolve
&= ~(PIPE_CLEAR_COLOR0
<< i
);
1144 static enum pipe_format
1145 fd_resource_get_internal_format(struct pipe_resource
*prsc
)
1147 return fd_resource(prsc
)->internal_format
;
1151 fd_resource_set_stencil(struct pipe_resource
*prsc
,
1152 struct pipe_resource
*stencil
)
1154 fd_resource(prsc
)->stencil
= fd_resource(stencil
);
1157 static struct pipe_resource
*
1158 fd_resource_get_stencil(struct pipe_resource
*prsc
)
1160 struct fd_resource
*rsc
= fd_resource(prsc
);
1162 return &rsc
->stencil
->base
;
1166 static const struct u_transfer_vtbl transfer_vtbl
= {
1167 .resource_create
= fd_resource_create
,
1168 .resource_destroy
= fd_resource_destroy
,
1169 .transfer_map
= fd_resource_transfer_map
,
1170 .transfer_flush_region
= fd_resource_transfer_flush_region
,
1171 .transfer_unmap
= fd_resource_transfer_unmap
,
1172 .get_internal_format
= fd_resource_get_internal_format
,
1173 .set_stencil
= fd_resource_set_stencil
,
1174 .get_stencil
= fd_resource_get_stencil
,
1178 fd_resource_screen_init(struct pipe_screen
*pscreen
)
1180 struct fd_screen
*screen
= fd_screen(pscreen
);
1181 bool fake_rgtc
= screen
->gpu_id
< 400;
1183 pscreen
->resource_create
= u_transfer_helper_resource_create
;
1184 pscreen
->resource_from_handle
= fd_resource_from_handle
;
1185 pscreen
->resource_get_handle
= fd_resource_get_handle
;
1186 pscreen
->resource_destroy
= u_transfer_helper_resource_destroy
;
1188 pscreen
->transfer_helper
= u_transfer_helper_create(&transfer_vtbl
,
1189 true, false, fake_rgtc
, true);
1191 if (!screen
->setup_slices
)
1192 screen
->setup_slices
= fd_setup_slices
;
1196 fd_resource_context_init(struct pipe_context
*pctx
)
1198 pctx
->transfer_map
= u_transfer_helper_transfer_map
;
1199 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
1200 pctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
1201 pctx
->buffer_subdata
= u_default_buffer_subdata
;
1202 pctx
->texture_subdata
= u_default_texture_subdata
;
1203 pctx
->create_surface
= fd_create_surface
;
1204 pctx
->surface_destroy
= fd_surface_destroy
;
1205 pctx
->resource_copy_region
= fd_resource_copy_region
;
1206 pctx
->blit
= fd_blit
;
1207 pctx
->flush_resource
= fd_flush_resource
;
1208 pctx
->invalidate_resource
= fd_invalidate_resource
;