2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_format.h"
28 #include "util/u_format_rgtc.h"
29 #include "util/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
36 #include "freedreno_resource.h"
37 #include "freedreno_batch_cache.h"
38 #include "freedreno_blitter.h"
39 #include "freedreno_fence.h"
40 #include "freedreno_screen.h"
41 #include "freedreno_surface.h"
42 #include "freedreno_context.h"
43 #include "freedreno_query_hw.h"
44 #include "freedreno_util.h"
48 /* XXX this should go away, needed for 'struct winsys_handle' */
49 #include "state_tracker/drm_driver.h"
52 * Go through the entire state and see if the resource is bound
53 * anywhere. If it is, mark the relevant state as dirty. This is
54 * called on realloc_bo to ensure the neccessary state is re-
55 * emitted so the GPU looks at the new backing bo.
58 rebind_resource(struct fd_context
*ctx
, struct pipe_resource
*prsc
)
61 for (unsigned i
= 0; i
< ctx
->vtx
.vertexbuf
.count
&& !(ctx
->dirty
& FD_DIRTY_VTXBUF
); i
++) {
62 if (ctx
->vtx
.vertexbuf
.vb
[i
].buffer
.resource
== prsc
)
63 ctx
->dirty
|= FD_DIRTY_VTXBUF
;
66 /* per-shader-stage resources: */
67 for (unsigned stage
= 0; stage
< PIPE_SHADER_TYPES
; stage
++) {
68 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
69 * cmdstream rather than by pointer..
71 const unsigned num_ubos
= util_last_bit(ctx
->constbuf
[stage
].enabled_mask
);
72 for (unsigned i
= 1; i
< num_ubos
; i
++) {
73 if (ctx
->dirty_shader
[stage
] & FD_DIRTY_SHADER_CONST
)
75 if (ctx
->constbuf
[stage
].cb
[i
].buffer
== prsc
)
76 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_CONST
;
80 for (unsigned i
= 0; i
< ctx
->tex
[stage
].num_textures
; i
++) {
81 if (ctx
->dirty_shader
[stage
] & FD_DIRTY_SHADER_TEX
)
83 if (ctx
->tex
[stage
].textures
[i
] && (ctx
->tex
[stage
].textures
[i
]->texture
== prsc
))
84 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_TEX
;
88 const unsigned num_ssbos
= util_last_bit(ctx
->shaderbuf
[stage
].enabled_mask
);
89 for (unsigned i
= 0; i
< num_ssbos
; i
++) {
90 if (ctx
->dirty_shader
[stage
] & FD_DIRTY_SHADER_SSBO
)
92 if (ctx
->shaderbuf
[stage
].sb
[i
].buffer
== prsc
)
93 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_SSBO
;
99 realloc_bo(struct fd_resource
*rsc
, uint32_t size
)
101 struct pipe_resource
*prsc
= &rsc
->base
;
102 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
103 uint32_t flags
= DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
104 DRM_FREEDRENO_GEM_TYPE_KMEM
|
105 COND(prsc
->bind
& PIPE_BIND_SCANOUT
, DRM_FREEDRENO_GEM_SCANOUT
);
106 /* TODO other flags? */
108 /* if we start using things other than write-combine,
109 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
115 rsc
->bo
= fd_bo_new(screen
->dev
, size
, flags
, "%ux%ux%u@%u:%x",
116 prsc
->width0
, prsc
->height0
, prsc
->depth0
, rsc
->cpp
, prsc
->bind
);
117 rsc
->seqno
= p_atomic_inc_return(&screen
->rsc_seqno
);
118 util_range_set_empty(&rsc
->valid_buffer_range
);
119 fd_bc_invalidate_resource(rsc
, true);
123 do_blit(struct fd_context
*ctx
, const struct pipe_blit_info
*blit
, bool fallback
)
125 struct pipe_context
*pctx
= &ctx
->base
;
127 /* TODO size threshold too?? */
129 /* do blit on gpu: */
130 pctx
->blit(pctx
, blit
);
132 /* do blit on cpu: */
133 util_resource_copy_region(pctx
,
134 blit
->dst
.resource
, blit
->dst
.level
, blit
->dst
.box
.x
,
135 blit
->dst
.box
.y
, blit
->dst
.box
.z
,
136 blit
->src
.resource
, blit
->src
.level
, &blit
->src
.box
);
141 fd_try_shadow_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
,
142 unsigned level
, const struct pipe_box
*box
)
144 struct pipe_context
*pctx
= &ctx
->base
;
145 struct pipe_resource
*prsc
= &rsc
->base
;
146 bool fallback
= false;
151 /* TODO: somehow munge dimensions and format to copy unsupported
152 * render target format to something that is supported?
154 if (!pctx
->screen
->is_format_supported(pctx
->screen
,
155 prsc
->format
, prsc
->target
, prsc
->nr_samples
,
156 prsc
->nr_storage_samples
,
157 PIPE_BIND_RENDER_TARGET
))
160 /* do shadowing back-blits on the cpu for buffers: */
161 if (prsc
->target
== PIPE_BUFFER
)
164 bool whole_level
= util_texrange_covers_whole_level(prsc
, level
,
165 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
, box
->depth
);
167 /* TODO need to be more clever about current level */
168 if ((prsc
->target
>= PIPE_TEXTURE_2D
) && !whole_level
)
171 struct pipe_resource
*pshadow
=
172 pctx
->screen
->resource_create(pctx
->screen
, prsc
);
177 assert(!ctx
->in_shadow
);
178 ctx
->in_shadow
= true;
180 /* get rid of any references that batch-cache might have to us (which
181 * should empty/destroy rsc->batches hashset)
183 fd_bc_invalidate_resource(rsc
, false);
185 mtx_lock(&ctx
->screen
->lock
);
187 /* Swap the backing bo's, so shadow becomes the old buffer,
188 * blit from shadow to new buffer. From here on out, we
191 * Note that we need to do it in this order, otherwise if
192 * we go down cpu blit path, the recursive transfer_map()
193 * sees the wrong status..
195 struct fd_resource
*shadow
= fd_resource(pshadow
);
197 DBG("shadow: %p (%d) -> %p (%d)\n", rsc
, rsc
->base
.reference
.count
,
198 shadow
, shadow
->base
.reference
.count
);
200 /* TODO valid_buffer_range?? */
201 swap(rsc
->bo
, shadow
->bo
);
202 swap(rsc
->write_batch
, shadow
->write_batch
);
203 rsc
->seqno
= p_atomic_inc_return(&ctx
->screen
->rsc_seqno
);
205 /* at this point, the newly created shadow buffer is not referenced
206 * by any batches, but the existing rsc (probably) is. We need to
207 * transfer those references over:
209 debug_assert(shadow
->batch_mask
== 0);
210 struct fd_batch
*batch
;
211 foreach_batch(batch
, &ctx
->screen
->batch_cache
, rsc
->batch_mask
) {
212 struct set_entry
*entry
= _mesa_set_search(batch
->resources
, rsc
);
213 _mesa_set_remove(batch
->resources
, entry
);
214 _mesa_set_add(batch
->resources
, shadow
);
216 swap(rsc
->batch_mask
, shadow
->batch_mask
);
218 mtx_unlock(&ctx
->screen
->lock
);
220 struct pipe_blit_info blit
= {};
221 blit
.dst
.resource
= prsc
;
222 blit
.dst
.format
= prsc
->format
;
223 blit
.src
.resource
= pshadow
;
224 blit
.src
.format
= pshadow
->format
;
225 blit
.mask
= util_format_get_mask(prsc
->format
);
226 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
228 #define set_box(field, val) do { \
229 blit.dst.field = (val); \
230 blit.src.field = (val); \
233 /* blit the other levels in their entirety: */
234 for (unsigned l
= 0; l
<= prsc
->last_level
; l
++) {
238 /* just blit whole level: */
240 set_box(box
.width
, u_minify(prsc
->width0
, l
));
241 set_box(box
.height
, u_minify(prsc
->height0
, l
));
242 set_box(box
.depth
, u_minify(prsc
->depth0
, l
));
244 do_blit(ctx
, &blit
, fallback
);
247 /* deal w/ current level specially, since we might need to split
248 * it up into a couple blits:
251 set_box(level
, level
);
253 switch (prsc
->target
) {
255 case PIPE_TEXTURE_1D
:
258 set_box(box
.height
, 1);
259 set_box(box
.depth
, 1);
263 set_box(box
.width
, box
->x
);
265 do_blit(ctx
, &blit
, fallback
);
267 if ((box
->x
+ box
->width
) < u_minify(prsc
->width0
, level
)) {
268 set_box(box
.x
, box
->x
+ box
->width
);
269 set_box(box
.width
, u_minify(prsc
->width0
, level
) - (box
->x
+ box
->width
));
271 do_blit(ctx
, &blit
, fallback
);
274 case PIPE_TEXTURE_2D
:
281 ctx
->in_shadow
= false;
283 pipe_resource_reference(&pshadow
, NULL
);
288 static struct fd_resource
*
289 fd_alloc_staging(struct fd_context
*ctx
, struct fd_resource
*rsc
,
290 unsigned level
, const struct pipe_box
*box
)
292 struct pipe_context
*pctx
= &ctx
->base
;
293 struct pipe_resource tmpl
= rsc
->base
;
295 tmpl
.width0
= box
->width
;
296 tmpl
.height0
= box
->height
;
297 /* for array textures, box->depth is the array_size, otherwise
298 * for 3d textures, it is the depth:
300 if (tmpl
.array_size
> 1) {
301 tmpl
.array_size
= box
->depth
;
305 tmpl
.depth0
= box
->depth
;
308 tmpl
.bind
|= PIPE_BIND_LINEAR
;
310 struct pipe_resource
*pstaging
=
311 pctx
->screen
->resource_create(pctx
->screen
, &tmpl
);
315 return fd_resource(pstaging
);
319 fd_blit_from_staging(struct fd_context
*ctx
, struct fd_transfer
*trans
)
321 struct pipe_resource
*dst
= trans
->base
.resource
;
322 struct pipe_blit_info blit
= {};
324 blit
.dst
.resource
= dst
;
325 blit
.dst
.format
= dst
->format
;
326 blit
.dst
.level
= trans
->base
.level
;
327 blit
.dst
.box
= trans
->base
.box
;
328 blit
.src
.resource
= trans
->staging_prsc
;
329 blit
.src
.format
= trans
->staging_prsc
->format
;
331 blit
.src
.box
= trans
->staging_box
;
332 blit
.mask
= util_format_get_mask(trans
->staging_prsc
->format
);
333 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
335 do_blit(ctx
, &blit
, false);
339 fd_blit_to_staging(struct fd_context
*ctx
, struct fd_transfer
*trans
)
341 struct pipe_resource
*src
= trans
->base
.resource
;
342 struct pipe_blit_info blit
= {};
344 blit
.src
.resource
= src
;
345 blit
.src
.format
= src
->format
;
346 blit
.src
.level
= trans
->base
.level
;
347 blit
.src
.box
= trans
->base
.box
;
348 blit
.dst
.resource
= trans
->staging_prsc
;
349 blit
.dst
.format
= trans
->staging_prsc
->format
;
351 blit
.dst
.box
= trans
->staging_box
;
352 blit
.mask
= util_format_get_mask(trans
->staging_prsc
->format
);
353 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
355 do_blit(ctx
, &blit
, false);
358 static void fd_resource_transfer_flush_region(struct pipe_context
*pctx
,
359 struct pipe_transfer
*ptrans
,
360 const struct pipe_box
*box
)
362 struct fd_resource
*rsc
= fd_resource(ptrans
->resource
);
364 if (ptrans
->resource
->target
== PIPE_BUFFER
)
365 util_range_add(&rsc
->valid_buffer_range
,
366 ptrans
->box
.x
+ box
->x
,
367 ptrans
->box
.x
+ box
->x
+ box
->width
);
371 flush_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
, unsigned usage
)
373 struct fd_batch
*write_batch
= NULL
;
375 fd_batch_reference(&write_batch
, rsc
->write_batch
);
377 if (usage
& PIPE_TRANSFER_WRITE
) {
378 struct fd_batch
*batch
, *batches
[32] = {};
381 /* This is a bit awkward, probably a fd_batch_flush_locked()
382 * would make things simpler.. but we need to hold the lock
383 * to iterate the batches which reference this resource. So
384 * we must first grab references under a lock, then flush.
386 mtx_lock(&ctx
->screen
->lock
);
387 batch_mask
= rsc
->batch_mask
;
388 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
)
389 fd_batch_reference(&batches
[batch
->idx
], batch
);
390 mtx_unlock(&ctx
->screen
->lock
);
392 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
)
393 fd_batch_flush(batch
, false, false);
395 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
) {
396 fd_batch_sync(batch
);
397 fd_batch_reference(&batches
[batch
->idx
], NULL
);
399 assert(rsc
->batch_mask
== 0);
400 } else if (write_batch
) {
401 fd_batch_flush(write_batch
, true, false);
404 fd_batch_reference(&write_batch
, NULL
);
406 assert(!rsc
->write_batch
);
410 fd_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
412 flush_resource(fd_context(pctx
), fd_resource(prsc
), PIPE_TRANSFER_READ
);
416 fd_resource_transfer_unmap(struct pipe_context
*pctx
,
417 struct pipe_transfer
*ptrans
)
419 struct fd_context
*ctx
= fd_context(pctx
);
420 struct fd_resource
*rsc
= fd_resource(ptrans
->resource
);
421 struct fd_transfer
*trans
= fd_transfer(ptrans
);
423 if (trans
->staging_prsc
) {
424 if (ptrans
->usage
& PIPE_TRANSFER_WRITE
)
425 fd_blit_from_staging(ctx
, trans
);
426 pipe_resource_reference(&trans
->staging_prsc
, NULL
);
429 if (!(ptrans
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
430 fd_bo_cpu_fini(rsc
->bo
);
433 util_range_add(&rsc
->valid_buffer_range
,
435 ptrans
->box
.x
+ ptrans
->box
.width
);
437 pipe_resource_reference(&ptrans
->resource
, NULL
);
438 slab_free(&ctx
->transfer_pool
, ptrans
);
442 fd_resource_transfer_map(struct pipe_context
*pctx
,
443 struct pipe_resource
*prsc
,
444 unsigned level
, unsigned usage
,
445 const struct pipe_box
*box
,
446 struct pipe_transfer
**pptrans
)
448 struct fd_context
*ctx
= fd_context(pctx
);
449 struct fd_resource
*rsc
= fd_resource(prsc
);
450 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, level
);
451 struct fd_transfer
*trans
;
452 struct pipe_transfer
*ptrans
;
453 enum pipe_format format
= prsc
->format
;
459 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc
, level
, usage
,
460 box
->width
, box
->height
, box
->x
, box
->y
);
462 ptrans
= slab_alloc(&ctx
->transfer_pool
);
466 /* slab_alloc_st() doesn't zero: */
467 trans
= fd_transfer(ptrans
);
468 memset(trans
, 0, sizeof(*trans
));
470 pipe_resource_reference(&ptrans
->resource
, prsc
);
471 ptrans
->level
= level
;
472 ptrans
->usage
= usage
;
474 ptrans
->stride
= util_format_get_nblocksx(format
, slice
->pitch
) * rsc
->cpp
;
475 ptrans
->layer_stride
= rsc
->layer_first
? rsc
->layer_size
: slice
->size0
;
477 /* we always need a staging texture for tiled buffers:
479 * TODO we might sometimes want to *also* shadow the resource to avoid
480 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
483 if (rsc
->tile_mode
) {
484 struct fd_resource
*staging_rsc
;
486 staging_rsc
= fd_alloc_staging(ctx
, rsc
, level
, box
);
488 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
489 trans
->staging_prsc
= &staging_rsc
->base
;
490 trans
->base
.stride
= util_format_get_nblocksx(format
,
491 staging_rsc
->slices
[0].pitch
) * staging_rsc
->cpp
;
492 trans
->base
.layer_stride
= staging_rsc
->layer_first
?
493 staging_rsc
->layer_size
: staging_rsc
->slices
[0].size0
;
494 trans
->staging_box
= *box
;
495 trans
->staging_box
.x
= 0;
496 trans
->staging_box
.y
= 0;
497 trans
->staging_box
.z
= 0;
499 if (usage
& PIPE_TRANSFER_READ
) {
500 fd_blit_to_staging(ctx
, trans
);
501 fd_bo_cpu_prep(rsc
->bo
, ctx
->pipe
, DRM_FREEDRENO_PREP_READ
);
504 buf
= fd_bo_map(staging_rsc
->bo
);
509 ctx
->stats
.staging_uploads
++;
515 if (ctx
->in_shadow
&& !(usage
& PIPE_TRANSFER_READ
))
516 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
518 if (usage
& PIPE_TRANSFER_READ
)
519 op
|= DRM_FREEDRENO_PREP_READ
;
521 if (usage
& PIPE_TRANSFER_WRITE
)
522 op
|= DRM_FREEDRENO_PREP_WRITE
;
524 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
525 realloc_bo(rsc
, fd_bo_size(rsc
->bo
));
526 rebind_resource(ctx
, prsc
);
527 } else if ((usage
& PIPE_TRANSFER_WRITE
) &&
528 prsc
->target
== PIPE_BUFFER
&&
529 !util_ranges_intersect(&rsc
->valid_buffer_range
,
530 box
->x
, box
->x
+ box
->width
)) {
531 /* We are trying to write to a previously uninitialized range. No need
534 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
535 struct fd_batch
*write_batch
= NULL
;
537 /* hold a reference, so it doesn't disappear under us: */
538 fd_batch_reference(&write_batch
, rsc
->write_batch
);
540 if ((usage
& PIPE_TRANSFER_WRITE
) && write_batch
&&
541 write_batch
->back_blit
) {
542 /* if only thing pending is a back-blit, we can discard it: */
543 fd_batch_reset(write_batch
);
546 /* If the GPU is writing to the resource, or if it is reading from the
547 * resource and we're trying to write to it, flush the renders.
549 bool needs_flush
= pending(rsc
, !!(usage
& PIPE_TRANSFER_WRITE
));
550 bool busy
= needs_flush
|| (0 != fd_bo_cpu_prep(rsc
->bo
,
551 ctx
->pipe
, op
| DRM_FREEDRENO_PREP_NOSYNC
));
553 /* if we need to flush/stall, see if we can make a shadow buffer
556 * TODO we could go down this path !reorder && !busy_for_read
557 * ie. we only *don't* want to go down this path if the blit
558 * will trigger a flush!
560 if (ctx
->screen
->reorder
&& busy
&& !(usage
& PIPE_TRANSFER_READ
) &&
561 (usage
& PIPE_TRANSFER_DISCARD_RANGE
)) {
562 /* try shadowing only if it avoids a flush, otherwise staging would
565 if (needs_flush
&& fd_try_shadow_resource(ctx
, rsc
, level
, box
)) {
566 needs_flush
= busy
= false;
567 rebind_resource(ctx
, prsc
);
568 ctx
->stats
.shadow_uploads
++;
570 struct fd_resource
*staging_rsc
;
573 flush_resource(ctx
, rsc
, usage
);
577 /* in this case, we don't need to shadow the whole resource,
578 * since any draw that references the previous contents has
579 * already had rendering flushed for all tiles. So we can
580 * use a staging buffer to do the upload.
582 staging_rsc
= fd_alloc_staging(ctx
, rsc
, level
, box
);
584 trans
->staging_prsc
= &staging_rsc
->base
;
585 trans
->base
.stride
= util_format_get_nblocksx(format
,
586 staging_rsc
->slices
[0].pitch
) * staging_rsc
->cpp
;
587 trans
->base
.layer_stride
= staging_rsc
->layer_first
?
588 staging_rsc
->layer_size
: staging_rsc
->slices
[0].size0
;
589 trans
->staging_box
= *box
;
590 trans
->staging_box
.x
= 0;
591 trans
->staging_box
.y
= 0;
592 trans
->staging_box
.z
= 0;
593 buf
= fd_bo_map(staging_rsc
->bo
);
598 fd_batch_reference(&write_batch
, NULL
);
600 ctx
->stats
.staging_uploads
++;
608 flush_resource(ctx
, rsc
, usage
);
612 fd_batch_reference(&write_batch
, NULL
);
614 /* The GPU keeps track of how the various bo's are being used, and
615 * will wait if necessary for the proper operation to have
619 ret
= fd_bo_cpu_prep(rsc
->bo
, ctx
->pipe
, op
);
625 buf
= fd_bo_map(rsc
->bo
);
627 box
->y
/ util_format_get_blockheight(format
) * ptrans
->stride
+
628 box
->x
/ util_format_get_blockwidth(format
) * rsc
->cpp
+
629 fd_resource_offset(rsc
, level
, box
->z
);
631 if (usage
& PIPE_TRANSFER_WRITE
)
639 fd_resource_transfer_unmap(pctx
, ptrans
);
644 fd_resource_destroy(struct pipe_screen
*pscreen
,
645 struct pipe_resource
*prsc
)
647 struct fd_resource
*rsc
= fd_resource(prsc
);
648 fd_bc_invalidate_resource(rsc
, true);
651 util_range_destroy(&rsc
->valid_buffer_range
);
656 fd_resource_get_handle(struct pipe_screen
*pscreen
,
657 struct pipe_context
*pctx
,
658 struct pipe_resource
*prsc
,
659 struct winsys_handle
*handle
,
662 struct fd_resource
*rsc
= fd_resource(prsc
);
664 return fd_screen_bo_get_handle(pscreen
, rsc
->bo
,
665 rsc
->slices
[0].pitch
* rsc
->cpp
, handle
);
669 setup_slices(struct fd_resource
*rsc
, uint32_t alignment
, enum pipe_format format
)
671 struct pipe_resource
*prsc
= &rsc
->base
;
672 struct fd_screen
*screen
= fd_screen(prsc
->screen
);
673 enum util_format_layout layout
= util_format_description(format
)->layout
;
674 uint32_t pitchalign
= screen
->gmem_alignw
;
675 uint32_t level
, size
= 0;
676 uint32_t width
= prsc
->width0
;
677 uint32_t height
= prsc
->height0
;
678 uint32_t depth
= prsc
->depth0
;
679 /* in layer_first layout, the level (slice) contains just one
680 * layer (since in fact the layer contains the slices)
682 uint32_t layers_in_level
= rsc
->layer_first
? 1 : prsc
->array_size
;
684 for (level
= 0; level
<= prsc
->last_level
; level
++) {
685 struct fd_resource_slice
*slice
= fd_resource_slice(rsc
, level
);
688 if (layout
== UTIL_FORMAT_LAYOUT_ASTC
)
689 slice
->pitch
= width
=
690 util_align_npot(width
, pitchalign
* util_format_get_blockwidth(format
));
692 slice
->pitch
= width
= align(width
, pitchalign
);
693 slice
->offset
= size
;
694 blocks
= util_format_get_nblocks(format
, width
, height
);
695 /* 1d array and 2d array textures must all have the same layer size
696 * for each miplevel on a3xx. 3d textures can have different layer
697 * sizes for high levels, but the hw auto-sizer is buggy (or at least
698 * different than what this code does), so as soon as the layer size
699 * range gets into range, we stop reducing it.
701 if (prsc
->target
== PIPE_TEXTURE_3D
&& (
703 (level
> 1 && rsc
->slices
[level
- 1].size0
> 0xf000)))
704 slice
->size0
= align(blocks
* rsc
->cpp
, alignment
);
705 else if (level
== 0 || rsc
->layer_first
|| alignment
== 1)
706 slice
->size0
= align(blocks
* rsc
->cpp
, alignment
);
708 slice
->size0
= rsc
->slices
[level
- 1].size0
;
710 size
+= slice
->size0
* depth
* layers_in_level
;
712 width
= u_minify(width
, 1);
713 height
= u_minify(height
, 1);
714 depth
= u_minify(depth
, 1);
721 slice_alignment(enum pipe_texture_target target
)
723 /* on a3xx, 2d array and 3d textures seem to want their
724 * layers aligned to page boundaries:
727 case PIPE_TEXTURE_3D
:
728 case PIPE_TEXTURE_1D_ARRAY
:
729 case PIPE_TEXTURE_2D_ARRAY
:
736 /* cross generation texture layout to plug in to screen->setup_slices()..
737 * replace with generation specific one as-needed.
739 * TODO for a4xx probably can extract out the a4xx specific logic int
740 * a small fd4_setup_slices() wrapper that sets up layer_first, and then
744 fd_setup_slices(struct fd_resource
*rsc
)
748 alignment
= slice_alignment(rsc
->base
.target
);
750 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
751 if (is_a4xx(screen
)) {
752 switch (rsc
->base
.target
) {
753 case PIPE_TEXTURE_3D
:
754 rsc
->layer_first
= false;
757 rsc
->layer_first
= true;
763 return setup_slices(rsc
, alignment
, rsc
->base
.format
);
766 /* special case to resize query buf after allocated.. */
768 fd_resource_resize(struct pipe_resource
*prsc
, uint32_t sz
)
770 struct fd_resource
*rsc
= fd_resource(prsc
);
772 debug_assert(prsc
->width0
== 0);
773 debug_assert(prsc
->target
== PIPE_BUFFER
);
774 debug_assert(prsc
->bind
== PIPE_BIND_QUERY_BUFFER
);
777 realloc_bo(rsc
, fd_screen(prsc
->screen
)->setup_slices(rsc
));
780 // TODO common helper?
782 has_depth(enum pipe_format format
)
785 case PIPE_FORMAT_Z16_UNORM
:
786 case PIPE_FORMAT_Z32_UNORM
:
787 case PIPE_FORMAT_Z32_FLOAT
:
788 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
789 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
790 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
791 case PIPE_FORMAT_Z24X8_UNORM
:
792 case PIPE_FORMAT_X8Z24_UNORM
:
800 * Create a new texture object, using the given template info.
802 static struct pipe_resource
*
803 fd_resource_create(struct pipe_screen
*pscreen
,
804 const struct pipe_resource
*tmpl
)
806 struct fd_screen
*screen
= fd_screen(pscreen
);
807 struct fd_resource
*rsc
= CALLOC_STRUCT(fd_resource
);
808 struct pipe_resource
*prsc
= &rsc
->base
;
809 enum pipe_format format
= tmpl
->format
;
812 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
813 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc
,
814 tmpl
->target
, util_format_name(format
),
815 tmpl
->width0
, tmpl
->height0
, tmpl
->depth0
,
816 tmpl
->array_size
, tmpl
->last_level
, tmpl
->nr_samples
,
817 tmpl
->usage
, tmpl
->bind
, tmpl
->flags
);
825 (PIPE_BIND_SCANOUT | \
827 PIPE_BIND_DISPLAY_TARGET)
829 if (screen
->tile_mode
&&
830 (tmpl
->target
!= PIPE_BUFFER
) &&
831 (tmpl
->bind
& PIPE_BIND_SAMPLER_VIEW
) &&
832 !(tmpl
->bind
& LINEAR
)) {
833 rsc
->tile_mode
= screen
->tile_mode(tmpl
);
836 pipe_reference_init(&prsc
->reference
, 1);
838 prsc
->screen
= pscreen
;
840 util_range_init(&rsc
->valid_buffer_range
);
842 rsc
->internal_format
= format
;
843 rsc
->cpp
= util_format_get_blocksize(format
);
844 prsc
->nr_samples
= MAX2(1, prsc
->nr_samples
);
845 rsc
->cpp
*= prsc
->nr_samples
;
849 // XXX probably need some extra work if we hit rsc shadowing path w/ lrz..
850 if ((is_a5xx(screen
) || is_a6xx(screen
)) &&
851 (fd_mesa_debug
& FD_DBG_LRZ
) && has_depth(format
)) {
852 const uint32_t flags
= DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
853 DRM_FREEDRENO_GEM_TYPE_KMEM
; /* TODO */
854 unsigned lrz_pitch
= align(DIV_ROUND_UP(tmpl
->width0
, 8), 64);
855 unsigned lrz_height
= DIV_ROUND_UP(tmpl
->height0
, 8);
857 /* LRZ buffer is super-sampled: */
858 switch (prsc
->nr_samples
) {
865 unsigned size
= lrz_pitch
* lrz_height
* 2;
867 size
+= 0x1000; /* for GRAS_LRZ_FAST_CLEAR_BUFFER */
869 rsc
->lrz_height
= lrz_height
;
870 rsc
->lrz_width
= lrz_pitch
;
871 rsc
->lrz_pitch
= lrz_pitch
;
872 rsc
->lrz
= fd_bo_new(screen
->dev
, size
, flags
, "lrz");
875 size
= screen
->setup_slices(rsc
);
877 /* special case for hw-query buffer, which we need to allocate before we
881 /* note, semi-intention == instead of & */
882 debug_assert(prsc
->bind
== PIPE_BIND_QUERY_BUFFER
);
886 if (rsc
->layer_first
) {
887 rsc
->layer_size
= align(size
, 4096);
888 size
= rsc
->layer_size
* prsc
->array_size
;
891 realloc_bo(rsc
, size
);
897 fd_resource_destroy(pscreen
, prsc
);
902 * Create a texture from a winsys_handle. The handle is often created in
903 * another process by first creating a pipe texture and then calling
904 * resource_get_handle.
906 static struct pipe_resource
*
907 fd_resource_from_handle(struct pipe_screen
*pscreen
,
908 const struct pipe_resource
*tmpl
,
909 struct winsys_handle
*handle
, unsigned usage
)
911 struct fd_resource
*rsc
= CALLOC_STRUCT(fd_resource
);
912 struct fd_resource_slice
*slice
= &rsc
->slices
[0];
913 struct pipe_resource
*prsc
= &rsc
->base
;
914 uint32_t pitchalign
= fd_screen(pscreen
)->gmem_alignw
;
916 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
917 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
918 tmpl
->target
, util_format_name(tmpl
->format
),
919 tmpl
->width0
, tmpl
->height0
, tmpl
->depth0
,
920 tmpl
->array_size
, tmpl
->last_level
, tmpl
->nr_samples
,
921 tmpl
->usage
, tmpl
->bind
, tmpl
->flags
);
928 pipe_reference_init(&prsc
->reference
, 1);
930 prsc
->screen
= pscreen
;
932 util_range_init(&rsc
->valid_buffer_range
);
934 rsc
->bo
= fd_screen_bo_from_handle(pscreen
, handle
);
938 prsc
->nr_samples
= MAX2(1, prsc
->nr_samples
);
939 rsc
->internal_format
= tmpl
->format
;
940 rsc
->cpp
= prsc
->nr_samples
* util_format_get_blocksize(tmpl
->format
);
941 slice
->pitch
= handle
->stride
/ rsc
->cpp
;
942 slice
->offset
= handle
->offset
;
943 slice
->size0
= handle
->stride
* prsc
->height0
;
945 if ((slice
->pitch
< align(prsc
->width0
, pitchalign
)) ||
946 (slice
->pitch
& (pitchalign
- 1)))
954 fd_resource_destroy(pscreen
, prsc
);
959 fd_render_condition_check(struct pipe_context
*pctx
)
961 struct fd_context
*ctx
= fd_context(pctx
);
963 if (!ctx
->cond_query
)
966 union pipe_query_result res
= { 0 };
968 ctx
->cond_mode
!= PIPE_RENDER_COND_NO_WAIT
&&
969 ctx
->cond_mode
!= PIPE_RENDER_COND_BY_REGION_NO_WAIT
;
971 if (pctx
->get_query_result(pctx
, ctx
->cond_query
, wait
, &res
))
972 return (bool)res
.u64
!= ctx
->cond_cond
;
978 * Optimal hardware path for blitting pixels.
979 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
982 fd_blit(struct pipe_context
*pctx
, const struct pipe_blit_info
*blit_info
)
984 struct fd_context
*ctx
= fd_context(pctx
);
985 struct pipe_blit_info info
= *blit_info
;
987 if (info
.render_condition_enable
&& !fd_render_condition_check(pctx
))
990 if (info
.mask
& PIPE_MASK_S
) {
991 DBG("cannot blit stencil, skipping");
992 info
.mask
&= ~PIPE_MASK_S
;
995 if (!util_blitter_is_blit_supported(ctx
->blitter
, &info
)) {
996 DBG("blit unsupported %s -> %s",
997 util_format_short_name(info
.src
.resource
->format
),
998 util_format_short_name(info
.dst
.resource
->format
));
1002 if (!(ctx
->blit
&& ctx
->blit(ctx
, &info
)))
1003 fd_blitter_blit(ctx
, &info
);
1007 fd_blitter_pipe_begin(struct fd_context
*ctx
, bool render_cond
, bool discard
,
1008 enum fd_render_stage stage
)
1010 fd_fence_ref(ctx
->base
.screen
, &ctx
->last_fence
, NULL
);
1012 util_blitter_save_fragment_constant_buffer_slot(ctx
->blitter
,
1013 ctx
->constbuf
[PIPE_SHADER_FRAGMENT
].cb
);
1014 util_blitter_save_vertex_buffer_slot(ctx
->blitter
, ctx
->vtx
.vertexbuf
.vb
);
1015 util_blitter_save_vertex_elements(ctx
->blitter
, ctx
->vtx
.vtx
);
1016 util_blitter_save_vertex_shader(ctx
->blitter
, ctx
->prog
.vp
);
1017 util_blitter_save_so_targets(ctx
->blitter
, ctx
->streamout
.num_targets
,
1018 ctx
->streamout
.targets
);
1019 util_blitter_save_rasterizer(ctx
->blitter
, ctx
->rasterizer
);
1020 util_blitter_save_viewport(ctx
->blitter
, &ctx
->viewport
);
1021 util_blitter_save_scissor(ctx
->blitter
, &ctx
->scissor
);
1022 util_blitter_save_fragment_shader(ctx
->blitter
, ctx
->prog
.fp
);
1023 util_blitter_save_blend(ctx
->blitter
, ctx
->blend
);
1024 util_blitter_save_depth_stencil_alpha(ctx
->blitter
, ctx
->zsa
);
1025 util_blitter_save_stencil_ref(ctx
->blitter
, &ctx
->stencil_ref
);
1026 util_blitter_save_sample_mask(ctx
->blitter
, ctx
->sample_mask
);
1027 util_blitter_save_framebuffer(ctx
->blitter
, &ctx
->framebuffer
);
1028 util_blitter_save_fragment_sampler_states(ctx
->blitter
,
1029 ctx
->tex
[PIPE_SHADER_FRAGMENT
].num_samplers
,
1030 (void **)ctx
->tex
[PIPE_SHADER_FRAGMENT
].samplers
);
1031 util_blitter_save_fragment_sampler_views(ctx
->blitter
,
1032 ctx
->tex
[PIPE_SHADER_FRAGMENT
].num_textures
,
1033 ctx
->tex
[PIPE_SHADER_FRAGMENT
].textures
);
1035 util_blitter_save_render_condition(ctx
->blitter
,
1036 ctx
->cond_query
, ctx
->cond_cond
, ctx
->cond_mode
);
1039 fd_batch_set_stage(ctx
->batch
, stage
);
1041 ctx
->in_blit
= discard
;
1045 fd_blitter_pipe_end(struct fd_context
*ctx
)
1048 fd_batch_set_stage(ctx
->batch
, FD_STAGE_NULL
);
1049 ctx
->in_blit
= false;
1053 fd_invalidate_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
1055 struct fd_resource
*rsc
= fd_resource(prsc
);
1058 * TODO I guess we could track that the resource is invalidated and
1059 * use that as a hint to realloc rather than stall in _transfer_map(),
1060 * even in the non-DISCARD_WHOLE_RESOURCE case?
1063 if (rsc
->write_batch
) {
1064 struct fd_batch
*batch
= rsc
->write_batch
;
1065 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1067 if (pfb
->zsbuf
&& pfb
->zsbuf
->texture
== prsc
)
1068 batch
->resolve
&= ~(FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
);
1070 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1071 if (pfb
->cbufs
[i
] && pfb
->cbufs
[i
]->texture
== prsc
) {
1072 batch
->resolve
&= ~(PIPE_CLEAR_COLOR0
<< i
);
1080 static enum pipe_format
1081 fd_resource_get_internal_format(struct pipe_resource
*prsc
)
1083 return fd_resource(prsc
)->internal_format
;
1087 fd_resource_set_stencil(struct pipe_resource
*prsc
,
1088 struct pipe_resource
*stencil
)
1090 fd_resource(prsc
)->stencil
= fd_resource(stencil
);
1093 static struct pipe_resource
*
1094 fd_resource_get_stencil(struct pipe_resource
*prsc
)
1096 struct fd_resource
*rsc
= fd_resource(prsc
);
1098 return &rsc
->stencil
->base
;
1102 static const struct u_transfer_vtbl transfer_vtbl
= {
1103 .resource_create
= fd_resource_create
,
1104 .resource_destroy
= fd_resource_destroy
,
1105 .transfer_map
= fd_resource_transfer_map
,
1106 .transfer_flush_region
= fd_resource_transfer_flush_region
,
1107 .transfer_unmap
= fd_resource_transfer_unmap
,
1108 .get_internal_format
= fd_resource_get_internal_format
,
1109 .set_stencil
= fd_resource_set_stencil
,
1110 .get_stencil
= fd_resource_get_stencil
,
1114 fd_resource_screen_init(struct pipe_screen
*pscreen
)
1116 struct fd_screen
*screen
= fd_screen(pscreen
);
1117 bool fake_rgtc
= screen
->gpu_id
< 400;
1119 pscreen
->resource_create
= u_transfer_helper_resource_create
;
1120 pscreen
->resource_from_handle
= fd_resource_from_handle
;
1121 pscreen
->resource_get_handle
= fd_resource_get_handle
;
1122 pscreen
->resource_destroy
= u_transfer_helper_resource_destroy
;
1124 pscreen
->transfer_helper
= u_transfer_helper_create(&transfer_vtbl
,
1125 true, false, fake_rgtc
, true);
1127 if (!screen
->setup_slices
)
1128 screen
->setup_slices
= fd_setup_slices
;
1132 fd_get_sample_position(struct pipe_context
*context
,
1133 unsigned sample_count
, unsigned sample_index
,
1136 /* The following is copied from nouveau/nv50 except for position
1137 * values, which are taken from blob driver */
1138 static const uint8_t pos1
[1][2] = { { 0x8, 0x8 } };
1139 static const uint8_t pos2
[2][2] = {
1140 { 0xc, 0xc }, { 0x4, 0x4 } };
1141 static const uint8_t pos4
[4][2] = {
1142 { 0x6, 0x2 }, { 0xe, 0x6 },
1143 { 0x2, 0xa }, { 0xa, 0xe } };
1144 /* TODO needs to be verified on supported hw */
1145 static const uint8_t pos8
[8][2] = {
1146 { 0x9, 0x5 }, { 0x7, 0xb },
1147 { 0xd, 0x9 }, { 0x5, 0x3 },
1148 { 0x3, 0xd }, { 0x1, 0x7 },
1149 { 0xb, 0xf }, { 0xf, 0x1 } };
1151 const uint8_t (*ptr
)[2];
1153 switch (sample_count
) {
1171 pos_out
[0] = ptr
[sample_index
][0] / 16.0f
;
1172 pos_out
[1] = ptr
[sample_index
][1] / 16.0f
;
1176 fd_resource_context_init(struct pipe_context
*pctx
)
1178 pctx
->transfer_map
= u_transfer_helper_transfer_map
;
1179 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
1180 pctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
1181 pctx
->buffer_subdata
= u_default_buffer_subdata
;
1182 pctx
->texture_subdata
= u_default_texture_subdata
;
1183 pctx
->create_surface
= fd_create_surface
;
1184 pctx
->surface_destroy
= fd_surface_destroy
;
1185 pctx
->resource_copy_region
= fd_resource_copy_region
;
1186 pctx
->blit
= fd_blit
;
1187 pctx
->flush_resource
= fd_flush_resource
;
1188 pctx
->invalidate_resource
= fd_invalidate_resource
;
1189 pctx
->get_sample_position
= fd_get_sample_position
;