2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/format/u_format.h"
28 #include "util/format/u_format_rgtc.h"
29 #include "util/format/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
35 #include "util/u_drm.h"
37 #include "decode/util.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_batch_cache.h"
41 #include "freedreno_blitter.h"
42 #include "freedreno_fence.h"
43 #include "freedreno_screen.h"
44 #include "freedreno_surface.h"
45 #include "freedreno_context.h"
46 #include "freedreno_query_hw.h"
47 #include "freedreno_util.h"
49 #include "drm-uapi/drm_fourcc.h"
52 /* XXX this should go away, needed for 'struct winsys_handle' */
53 #include "frontend/drm_driver.h"
55 /* A private modifier for now, so we have a way to request tiled but not
56 * compressed. It would perhaps be good to get real modifiers for the
57 * tiled formats, but would probably need to do some work to figure out
58 * the layout(s) of the tiled modes, and whether they are the same
61 #define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
64 * Go through the entire state and see if the resource is bound
65 * anywhere. If it is, mark the relevant state as dirty. This is
66 * called on realloc_bo to ensure the necessary state is re-
67 * emitted so the GPU looks at the new backing bo.
70 rebind_resource_in_ctx(struct fd_context
*ctx
, struct fd_resource
*rsc
)
72 struct pipe_resource
*prsc
= &rsc
->base
;
74 if (ctx
->rebind_resource
)
75 ctx
->rebind_resource(ctx
, rsc
);
78 if (rsc
->dirty
& FD_DIRTY_VTXBUF
) {
79 struct fd_vertexbuf_stateobj
*vb
= &ctx
->vtx
.vertexbuf
;
80 for (unsigned i
= 0; i
< vb
->count
&& !(ctx
->dirty
& FD_DIRTY_VTXBUF
); i
++) {
81 if (vb
->vb
[i
].buffer
.resource
== prsc
)
82 ctx
->dirty
|= FD_DIRTY_VTXBUF
;
86 const enum fd_dirty_3d_state per_stage_dirty
=
87 FD_DIRTY_CONST
| FD_DIRTY_TEX
| FD_DIRTY_IMAGE
| FD_DIRTY_SSBO
;
89 if (!(rsc
->dirty
& per_stage_dirty
))
92 /* per-shader-stage resources: */
93 for (unsigned stage
= 0; stage
< PIPE_SHADER_TYPES
; stage
++) {
94 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
95 * cmdstream rather than by pointer..
97 if ((rsc
->dirty
& FD_DIRTY_CONST
) &&
98 !(ctx
->dirty_shader
[stage
] & FD_DIRTY_CONST
)) {
99 struct fd_constbuf_stateobj
*cb
= &ctx
->constbuf
[stage
];
100 const unsigned num_ubos
= util_last_bit(cb
->enabled_mask
);
101 for (unsigned i
= 1; i
< num_ubos
; i
++) {
102 if (cb
->cb
[i
].buffer
== prsc
) {
103 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_CONST
;
104 ctx
->dirty
|= FD_DIRTY_CONST
;
111 if ((rsc
->dirty
& FD_DIRTY_TEX
) &&
112 !(ctx
->dirty_shader
[stage
] & FD_DIRTY_TEX
)) {
113 struct fd_texture_stateobj
*tex
= &ctx
->tex
[stage
];
114 for (unsigned i
= 0; i
< tex
->num_textures
; i
++) {
115 if (tex
->textures
[i
] && (tex
->textures
[i
]->texture
== prsc
)) {
116 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_TEX
;
117 ctx
->dirty
|= FD_DIRTY_TEX
;
124 if ((rsc
->dirty
& FD_DIRTY_IMAGE
) &&
125 !(ctx
->dirty_shader
[stage
] & FD_DIRTY_IMAGE
)) {
126 struct fd_shaderimg_stateobj
*si
= &ctx
->shaderimg
[stage
];
127 const unsigned num_images
= util_last_bit(si
->enabled_mask
);
128 for (unsigned i
= 0; i
< num_images
; i
++) {
129 if (si
->si
[i
].resource
== prsc
) {
130 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_IMAGE
;
131 ctx
->dirty
|= FD_DIRTY_IMAGE
;
138 if ((rsc
->dirty
& FD_DIRTY_SSBO
) &&
139 !(ctx
->dirty_shader
[stage
] & FD_DIRTY_SSBO
)) {
140 struct fd_shaderbuf_stateobj
*sb
= &ctx
->shaderbuf
[stage
];
141 const unsigned num_ssbos
= util_last_bit(sb
->enabled_mask
);
142 for (unsigned i
= 0; i
< num_ssbos
; i
++) {
143 if (sb
->sb
[i
].buffer
== prsc
) {
144 ctx
->dirty_shader
[stage
] |= FD_DIRTY_SHADER_SSBO
;
145 ctx
->dirty
|= FD_DIRTY_SSBO
;
154 rebind_resource(struct fd_resource
*rsc
)
156 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
158 fd_screen_lock(screen
);
159 fd_resource_lock(rsc
);
162 list_for_each_entry (struct fd_context
, ctx
, &screen
->context_list
, node
)
163 rebind_resource_in_ctx(ctx
, rsc
);
165 fd_resource_unlock(rsc
);
166 fd_screen_unlock(screen
);
170 realloc_bo(struct fd_resource
*rsc
, uint32_t size
)
172 struct pipe_resource
*prsc
= &rsc
->base
;
173 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
174 uint32_t flags
= DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
175 DRM_FREEDRENO_GEM_TYPE_KMEM
|
176 COND(prsc
->bind
& PIPE_BIND_SCANOUT
, DRM_FREEDRENO_GEM_SCANOUT
);
177 /* TODO other flags? */
179 /* if we start using things other than write-combine,
180 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
186 rsc
->bo
= fd_bo_new(screen
->dev
, size
, flags
, "%ux%ux%u@%u:%x",
187 prsc
->width0
, prsc
->height0
, prsc
->depth0
, rsc
->layout
.cpp
, prsc
->bind
);
189 /* Zero out the UBWC area on allocation. This fixes intermittent failures
190 * with UBWC, which I suspect are due to the HW having a hard time
191 * interpreting arbitrary values populating the flags buffer when the BO
192 * was recycled through the bo cache (instead of fresh allocations from
193 * the kernel, which are zeroed). sleep(1) in this spot didn't work
194 * around the issue, but any memset value seems to.
196 if (rsc
->layout
.ubwc
) {
197 void *buf
= fd_bo_map(rsc
->bo
);
198 memset(buf
, 0, rsc
->layout
.slices
[0].offset
);
201 rsc
->seqno
= p_atomic_inc_return(&screen
->rsc_seqno
);
202 util_range_set_empty(&rsc
->valid_buffer_range
);
203 fd_bc_invalidate_resource(rsc
, true);
207 do_blit(struct fd_context
*ctx
, const struct pipe_blit_info
*blit
, bool fallback
)
209 struct pipe_context
*pctx
= &ctx
->base
;
211 /* TODO size threshold too?? */
212 if (fallback
|| !fd_blit(pctx
, blit
)) {
213 /* do blit on cpu: */
214 util_resource_copy_region(pctx
,
215 blit
->dst
.resource
, blit
->dst
.level
, blit
->dst
.box
.x
,
216 blit
->dst
.box
.y
, blit
->dst
.box
.z
,
217 blit
->src
.resource
, blit
->src
.level
, &blit
->src
.box
);
222 flush_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
, unsigned usage
);
225 * @rsc: the resource to shadow
226 * @level: the level to discard (if box != NULL, otherwise ignored)
227 * @box: the box to discard (or NULL if none)
228 * @modifier: the modifier for the new buffer state
231 fd_try_shadow_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
,
232 unsigned level
, const struct pipe_box
*box
, uint64_t modifier
)
234 struct pipe_context
*pctx
= &ctx
->base
;
235 struct pipe_resource
*prsc
= &rsc
->base
;
236 bool fallback
= false;
241 /* If you have a sequence where there is a single rsc associated
242 * with the current render target, and then you end up shadowing
243 * that same rsc on the 3d pipe (u_blitter), because of how we
244 * swap the new shadow and rsc before the back-blit, you could end
245 * up confusing things into thinking that u_blitter's framebuffer
246 * state is the same as the current framebuffer state, which has
247 * the result of blitting to rsc rather than shadow.
249 * Normally we wouldn't want to unconditionally trigger a flush,
250 * since that defeats the purpose of shadowing, but this is a
251 * case where we'd have to flush anyways.
253 if (rsc
->write_batch
== ctx
->batch
)
254 flush_resource(ctx
, rsc
, 0);
256 /* TODO: somehow munge dimensions and format to copy unsupported
257 * render target format to something that is supported?
259 if (!pctx
->screen
->is_format_supported(pctx
->screen
,
260 prsc
->format
, prsc
->target
, prsc
->nr_samples
,
261 prsc
->nr_storage_samples
,
262 PIPE_BIND_RENDER_TARGET
))
265 /* do shadowing back-blits on the cpu for buffers: */
266 if (prsc
->target
== PIPE_BUFFER
)
269 bool discard_whole_level
= box
&& util_texrange_covers_whole_level(prsc
, level
,
270 box
->x
, box
->y
, box
->z
, box
->width
, box
->height
, box
->depth
);
272 /* TODO need to be more clever about current level */
273 if ((prsc
->target
>= PIPE_TEXTURE_2D
) && box
&& !discard_whole_level
)
276 struct pipe_resource
*pshadow
=
277 pctx
->screen
->resource_create_with_modifiers(pctx
->screen
,
283 assert(!ctx
->in_shadow
);
284 ctx
->in_shadow
= true;
286 /* get rid of any references that batch-cache might have to us (which
287 * should empty/destroy rsc->batches hashset)
289 fd_bc_invalidate_resource(rsc
, false);
290 rebind_resource(rsc
);
292 fd_screen_lock(ctx
->screen
);
294 /* Swap the backing bo's, so shadow becomes the old buffer,
295 * blit from shadow to new buffer. From here on out, we
298 * Note that we need to do it in this order, otherwise if
299 * we go down cpu blit path, the recursive transfer_map()
300 * sees the wrong status..
302 struct fd_resource
*shadow
= fd_resource(pshadow
);
304 DBG("shadow: %p (%d) -> %p (%d)\n", rsc
, rsc
->base
.reference
.count
,
305 shadow
, shadow
->base
.reference
.count
);
307 /* TODO valid_buffer_range?? */
308 swap(rsc
->bo
, shadow
->bo
);
309 swap(rsc
->write_batch
, shadow
->write_batch
);
310 swap(rsc
->layout
, shadow
->layout
);
311 rsc
->seqno
= p_atomic_inc_return(&ctx
->screen
->rsc_seqno
);
313 /* at this point, the newly created shadow buffer is not referenced
314 * by any batches, but the existing rsc (probably) is. We need to
315 * transfer those references over:
317 debug_assert(shadow
->batch_mask
== 0);
318 struct fd_batch
*batch
;
319 foreach_batch(batch
, &ctx
->screen
->batch_cache
, rsc
->batch_mask
) {
320 struct set_entry
*entry
= _mesa_set_search(batch
->resources
, rsc
);
321 _mesa_set_remove(batch
->resources
, entry
);
322 _mesa_set_add(batch
->resources
, shadow
);
324 swap(rsc
->batch_mask
, shadow
->batch_mask
);
326 fd_screen_unlock(ctx
->screen
);
328 struct pipe_blit_info blit
= {};
329 blit
.dst
.resource
= prsc
;
330 blit
.dst
.format
= prsc
->format
;
331 blit
.src
.resource
= pshadow
;
332 blit
.src
.format
= pshadow
->format
;
333 blit
.mask
= util_format_get_mask(prsc
->format
);
334 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
336 #define set_box(field, val) do { \
337 blit.dst.field = (val); \
338 blit.src.field = (val); \
341 /* blit the other levels in their entirety: */
342 for (unsigned l
= 0; l
<= prsc
->last_level
; l
++) {
343 if (box
&& l
== level
)
346 /* just blit whole level: */
348 set_box(box
.width
, u_minify(prsc
->width0
, l
));
349 set_box(box
.height
, u_minify(prsc
->height0
, l
));
350 set_box(box
.depth
, u_minify(prsc
->depth0
, l
));
352 for (int i
= 0; i
< prsc
->array_size
; i
++) {
354 do_blit(ctx
, &blit
, fallback
);
358 /* deal w/ current level specially, since we might need to split
359 * it up into a couple blits:
361 if (box
&& !discard_whole_level
) {
362 set_box(level
, level
);
364 switch (prsc
->target
) {
366 case PIPE_TEXTURE_1D
:
369 set_box(box
.height
, 1);
370 set_box(box
.depth
, 1);
374 set_box(box
.width
, box
->x
);
376 do_blit(ctx
, &blit
, fallback
);
378 if ((box
->x
+ box
->width
) < u_minify(prsc
->width0
, level
)) {
379 set_box(box
.x
, box
->x
+ box
->width
);
380 set_box(box
.width
, u_minify(prsc
->width0
, level
) - (box
->x
+ box
->width
));
382 do_blit(ctx
, &blit
, fallback
);
385 case PIPE_TEXTURE_2D
:
392 ctx
->in_shadow
= false;
394 pipe_resource_reference(&pshadow
, NULL
);
400 * Uncompress an UBWC compressed buffer "in place". This works basically
401 * like resource shadowing, creating a new resource, and doing an uncompress
402 * blit, and swapping the state between shadow and original resource so it
403 * appears to the gallium frontends as if nothing changed.
406 fd_resource_uncompress(struct fd_context
*ctx
, struct fd_resource
*rsc
)
409 fd_try_shadow_resource(ctx
, rsc
, 0, NULL
, FD_FORMAT_MOD_QCOM_TILED
);
411 /* shadow should not fail in any cases where we need to uncompress: */
412 debug_assert(success
);
416 * Debug helper to hexdump a resource.
419 fd_resource_dump(struct fd_resource
*rsc
, const char *name
)
421 fd_bo_cpu_prep(rsc
->bo
, NULL
, DRM_FREEDRENO_PREP_READ
);
422 printf("%s: \n", name
);
423 dump_hex(fd_bo_map(rsc
->bo
), fd_bo_size(rsc
->bo
));
426 static struct fd_resource
*
427 fd_alloc_staging(struct fd_context
*ctx
, struct fd_resource
*rsc
,
428 unsigned level
, const struct pipe_box
*box
)
430 struct pipe_context
*pctx
= &ctx
->base
;
431 struct pipe_resource tmpl
= rsc
->base
;
433 tmpl
.width0
= box
->width
;
434 tmpl
.height0
= box
->height
;
435 /* for array textures, box->depth is the array_size, otherwise
436 * for 3d textures, it is the depth:
438 if (tmpl
.array_size
> 1) {
439 if (tmpl
.target
== PIPE_TEXTURE_CUBE
)
440 tmpl
.target
= PIPE_TEXTURE_2D_ARRAY
;
441 tmpl
.array_size
= box
->depth
;
445 tmpl
.depth0
= box
->depth
;
448 tmpl
.bind
|= PIPE_BIND_LINEAR
;
450 struct pipe_resource
*pstaging
=
451 pctx
->screen
->resource_create(pctx
->screen
, &tmpl
);
455 return fd_resource(pstaging
);
459 fd_blit_from_staging(struct fd_context
*ctx
, struct fd_transfer
*trans
)
461 struct pipe_resource
*dst
= trans
->base
.resource
;
462 struct pipe_blit_info blit
= {};
464 blit
.dst
.resource
= dst
;
465 blit
.dst
.format
= dst
->format
;
466 blit
.dst
.level
= trans
->base
.level
;
467 blit
.dst
.box
= trans
->base
.box
;
468 blit
.src
.resource
= trans
->staging_prsc
;
469 blit
.src
.format
= trans
->staging_prsc
->format
;
471 blit
.src
.box
= trans
->staging_box
;
472 blit
.mask
= util_format_get_mask(trans
->staging_prsc
->format
);
473 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
475 do_blit(ctx
, &blit
, false);
479 fd_blit_to_staging(struct fd_context
*ctx
, struct fd_transfer
*trans
)
481 struct pipe_resource
*src
= trans
->base
.resource
;
482 struct pipe_blit_info blit
= {};
484 blit
.src
.resource
= src
;
485 blit
.src
.format
= src
->format
;
486 blit
.src
.level
= trans
->base
.level
;
487 blit
.src
.box
= trans
->base
.box
;
488 blit
.dst
.resource
= trans
->staging_prsc
;
489 blit
.dst
.format
= trans
->staging_prsc
->format
;
491 blit
.dst
.box
= trans
->staging_box
;
492 blit
.mask
= util_format_get_mask(trans
->staging_prsc
->format
);
493 blit
.filter
= PIPE_TEX_FILTER_NEAREST
;
495 do_blit(ctx
, &blit
, false);
498 static void fd_resource_transfer_flush_region(struct pipe_context
*pctx
,
499 struct pipe_transfer
*ptrans
,
500 const struct pipe_box
*box
)
502 struct fd_resource
*rsc
= fd_resource(ptrans
->resource
);
504 if (ptrans
->resource
->target
== PIPE_BUFFER
)
505 util_range_add(&rsc
->base
, &rsc
->valid_buffer_range
,
506 ptrans
->box
.x
+ box
->x
,
507 ptrans
->box
.x
+ box
->x
+ box
->width
);
511 flush_resource(struct fd_context
*ctx
, struct fd_resource
*rsc
, unsigned usage
)
513 struct fd_batch
*write_batch
= NULL
;
515 fd_screen_lock(ctx
->screen
);
516 fd_batch_reference_locked(&write_batch
, rsc
->write_batch
);
517 fd_screen_unlock(ctx
->screen
);
519 if (usage
& PIPE_TRANSFER_WRITE
) {
520 struct fd_batch
*batch
, *batches
[32] = {};
523 /* This is a bit awkward, probably a fd_batch_flush_locked()
524 * would make things simpler.. but we need to hold the lock
525 * to iterate the batches which reference this resource. So
526 * we must first grab references under a lock, then flush.
528 fd_screen_lock(ctx
->screen
);
529 batch_mask
= rsc
->batch_mask
;
530 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
)
531 fd_batch_reference_locked(&batches
[batch
->idx
], batch
);
532 fd_screen_unlock(ctx
->screen
);
534 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
)
535 fd_batch_flush(batch
);
537 foreach_batch(batch
, &ctx
->screen
->batch_cache
, batch_mask
) {
538 fd_batch_reference(&batches
[batch
->idx
], NULL
);
540 assert(rsc
->batch_mask
== 0);
541 } else if (write_batch
) {
542 fd_batch_flush(write_batch
);
545 fd_batch_reference(&write_batch
, NULL
);
547 assert(!rsc
->write_batch
);
551 fd_flush_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
553 flush_resource(fd_context(pctx
), fd_resource(prsc
), PIPE_TRANSFER_READ
);
557 fd_resource_transfer_unmap(struct pipe_context
*pctx
,
558 struct pipe_transfer
*ptrans
)
560 struct fd_context
*ctx
= fd_context(pctx
);
561 struct fd_resource
*rsc
= fd_resource(ptrans
->resource
);
562 struct fd_transfer
*trans
= fd_transfer(ptrans
);
564 if (trans
->staging_prsc
) {
565 if (ptrans
->usage
& PIPE_TRANSFER_WRITE
)
566 fd_blit_from_staging(ctx
, trans
);
567 pipe_resource_reference(&trans
->staging_prsc
, NULL
);
570 if (!(ptrans
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
571 fd_bo_cpu_fini(rsc
->bo
);
574 util_range_add(&rsc
->base
, &rsc
->valid_buffer_range
,
576 ptrans
->box
.x
+ ptrans
->box
.width
);
578 pipe_resource_reference(&ptrans
->resource
, NULL
);
579 slab_free(&ctx
->transfer_pool
, ptrans
);
583 fd_resource_transfer_map(struct pipe_context
*pctx
,
584 struct pipe_resource
*prsc
,
585 unsigned level
, unsigned usage
,
586 const struct pipe_box
*box
,
587 struct pipe_transfer
**pptrans
)
589 struct fd_context
*ctx
= fd_context(pctx
);
590 struct fd_resource
*rsc
= fd_resource(prsc
);
591 struct fd_transfer
*trans
;
592 struct pipe_transfer
*ptrans
;
593 enum pipe_format format
= prsc
->format
;
599 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc
, level
, usage
,
600 box
->width
, box
->height
, box
->x
, box
->y
);
602 if ((usage
& PIPE_TRANSFER_MAP_DIRECTLY
) && rsc
->layout
.tile_mode
) {
603 DBG("CANNOT MAP DIRECTLY!\n");
607 ptrans
= slab_alloc(&ctx
->transfer_pool
);
611 /* slab_alloc_st() doesn't zero: */
612 trans
= fd_transfer(ptrans
);
613 memset(trans
, 0, sizeof(*trans
));
615 pipe_resource_reference(&ptrans
->resource
, prsc
);
616 ptrans
->level
= level
;
617 ptrans
->usage
= usage
;
619 ptrans
->stride
= fd_resource_pitch(rsc
, level
);
620 ptrans
->layer_stride
= fd_resource_layer_stride(rsc
, level
);
622 /* we always need a staging texture for tiled buffers:
624 * TODO we might sometimes want to *also* shadow the resource to avoid
625 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
628 if (rsc
->layout
.tile_mode
) {
629 struct fd_resource
*staging_rsc
;
631 staging_rsc
= fd_alloc_staging(ctx
, rsc
, level
, box
);
633 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
634 trans
->staging_prsc
= &staging_rsc
->base
;
635 trans
->base
.stride
= fd_resource_pitch(staging_rsc
, 0);
636 trans
->base
.layer_stride
= fd_resource_layer_stride(staging_rsc
, 0);
637 trans
->staging_box
= *box
;
638 trans
->staging_box
.x
= 0;
639 trans
->staging_box
.y
= 0;
640 trans
->staging_box
.z
= 0;
642 if (usage
& PIPE_TRANSFER_READ
) {
643 fd_blit_to_staging(ctx
, trans
);
645 fd_bo_cpu_prep(staging_rsc
->bo
, ctx
->pipe
,
646 DRM_FREEDRENO_PREP_READ
);
649 buf
= fd_bo_map(staging_rsc
->bo
);
654 ctx
->stats
.staging_uploads
++;
660 if (ctx
->in_shadow
&& !(usage
& PIPE_TRANSFER_READ
))
661 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
663 if (usage
& PIPE_TRANSFER_READ
)
664 op
|= DRM_FREEDRENO_PREP_READ
;
666 if (usage
& PIPE_TRANSFER_WRITE
)
667 op
|= DRM_FREEDRENO_PREP_WRITE
;
669 bool needs_flush
= pending(rsc
, !!(usage
& PIPE_TRANSFER_WRITE
));
671 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
672 if (needs_flush
|| fd_resource_busy(rsc
, op
)) {
673 rebind_resource(rsc
);
674 realloc_bo(rsc
, fd_bo_size(rsc
->bo
));
676 } else if ((usage
& PIPE_TRANSFER_WRITE
) &&
677 prsc
->target
== PIPE_BUFFER
&&
678 !util_ranges_intersect(&rsc
->valid_buffer_range
,
679 box
->x
, box
->x
+ box
->width
)) {
680 /* We are trying to write to a previously uninitialized range. No need
683 } else if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
684 struct fd_batch
*write_batch
= NULL
;
686 /* hold a reference, so it doesn't disappear under us: */
687 fd_context_lock(ctx
);
688 fd_batch_reference_locked(&write_batch
, rsc
->write_batch
);
689 fd_context_unlock(ctx
);
691 if ((usage
& PIPE_TRANSFER_WRITE
) && write_batch
&&
692 write_batch
->back_blit
) {
693 /* if only thing pending is a back-blit, we can discard it: */
694 fd_batch_reset(write_batch
);
697 /* If the GPU is writing to the resource, or if it is reading from the
698 * resource and we're trying to write to it, flush the renders.
700 bool busy
= needs_flush
|| fd_resource_busy(rsc
, op
);
702 /* if we need to flush/stall, see if we can make a shadow buffer
705 * TODO we could go down this path !reorder && !busy_for_read
706 * ie. we only *don't* want to go down this path if the blit
707 * will trigger a flush!
709 if (ctx
->screen
->reorder
&& busy
&& !(usage
& PIPE_TRANSFER_READ
) &&
710 (usage
& PIPE_TRANSFER_DISCARD_RANGE
)) {
711 /* try shadowing only if it avoids a flush, otherwise staging would
714 if (needs_flush
&& fd_try_shadow_resource(ctx
, rsc
, level
,
715 box
, DRM_FORMAT_MOD_LINEAR
)) {
716 needs_flush
= busy
= false;
717 ctx
->stats
.shadow_uploads
++;
719 struct fd_resource
*staging_rsc
;
722 flush_resource(ctx
, rsc
, usage
);
726 /* in this case, we don't need to shadow the whole resource,
727 * since any draw that references the previous contents has
728 * already had rendering flushed for all tiles. So we can
729 * use a staging buffer to do the upload.
731 staging_rsc
= fd_alloc_staging(ctx
, rsc
, level
, box
);
733 trans
->staging_prsc
= &staging_rsc
->base
;
734 trans
->base
.stride
= fd_resource_pitch(staging_rsc
, 0);
735 trans
->base
.layer_stride
=
736 fd_resource_layer_stride(staging_rsc
, 0);
737 trans
->staging_box
= *box
;
738 trans
->staging_box
.x
= 0;
739 trans
->staging_box
.y
= 0;
740 trans
->staging_box
.z
= 0;
741 buf
= fd_bo_map(staging_rsc
->bo
);
746 fd_batch_reference(&write_batch
, NULL
);
748 ctx
->stats
.staging_uploads
++;
756 flush_resource(ctx
, rsc
, usage
);
760 fd_batch_reference(&write_batch
, NULL
);
762 /* The GPU keeps track of how the various bo's are being used, and
763 * will wait if necessary for the proper operation to have
767 ret
= fd_bo_cpu_prep(rsc
->bo
, ctx
->pipe
, op
);
773 buf
= fd_bo_map(rsc
->bo
);
775 box
->y
/ util_format_get_blockheight(format
) * ptrans
->stride
+
776 box
->x
/ util_format_get_blockwidth(format
) * rsc
->layout
.cpp
+
777 fd_resource_offset(rsc
, level
, box
->z
);
779 if (usage
& PIPE_TRANSFER_WRITE
)
787 fd_resource_transfer_unmap(pctx
, ptrans
);
792 fd_resource_destroy(struct pipe_screen
*pscreen
,
793 struct pipe_resource
*prsc
)
795 struct fd_resource
*rsc
= fd_resource(prsc
);
796 fd_bc_invalidate_resource(rsc
, true);
800 renderonly_scanout_destroy(rsc
->scanout
, fd_screen(pscreen
)->ro
);
802 util_range_destroy(&rsc
->valid_buffer_range
);
803 simple_mtx_destroy(&rsc
->lock
);
808 fd_resource_modifier(struct fd_resource
*rsc
)
810 if (!rsc
->layout
.tile_mode
)
811 return DRM_FORMAT_MOD_LINEAR
;
813 if (rsc
->layout
.ubwc_layer_size
)
814 return DRM_FORMAT_MOD_QCOM_COMPRESSED
;
816 /* TODO invent a modifier for tiled but not UBWC buffers: */
817 return DRM_FORMAT_MOD_INVALID
;
821 fd_resource_get_handle(struct pipe_screen
*pscreen
,
822 struct pipe_context
*pctx
,
823 struct pipe_resource
*prsc
,
824 struct winsys_handle
*handle
,
827 struct fd_resource
*rsc
= fd_resource(prsc
);
829 handle
->modifier
= fd_resource_modifier(rsc
);
831 return fd_screen_bo_get_handle(pscreen
, rsc
->bo
, rsc
->scanout
,
832 fd_resource_pitch(rsc
, 0), handle
);
835 /* special case to resize query buf after allocated.. */
837 fd_resource_resize(struct pipe_resource
*prsc
, uint32_t sz
)
839 struct fd_resource
*rsc
= fd_resource(prsc
);
841 debug_assert(prsc
->width0
== 0);
842 debug_assert(prsc
->target
== PIPE_BUFFER
);
843 debug_assert(prsc
->bind
== PIPE_BIND_QUERY_BUFFER
);
846 realloc_bo(rsc
, fd_screen(prsc
->screen
)->setup_slices(rsc
));
850 fd_resource_layout_init(struct pipe_resource
*prsc
)
852 struct fd_resource
*rsc
= fd_resource(prsc
);
853 struct fdl_layout
*layout
= &rsc
->layout
;
855 layout
->format
= prsc
->format
;
857 layout
->width0
= prsc
->width0
;
858 layout
->height0
= prsc
->height0
;
859 layout
->depth0
= prsc
->depth0
;
861 layout
->cpp
= util_format_get_blocksize(prsc
->format
);
862 layout
->cpp
*= fd_resource_nr_samples(prsc
);
863 layout
->cpp_shift
= ffs(layout
->cpp
) - 1;
867 * Helper that allocates a resource and resolves its layout (but doesn't
870 * It returns a pipe_resource (as fd_resource_create_with_modifiers()
871 * would do), and also bo's minimum required size as an output argument.
873 static struct pipe_resource
*
874 fd_resource_allocate_and_resolve(struct pipe_screen
*pscreen
,
875 const struct pipe_resource
*tmpl
,
876 const uint64_t *modifiers
, int count
, uint32_t *psize
)
878 struct fd_screen
*screen
= fd_screen(pscreen
);
879 struct fd_resource
*rsc
;
880 struct pipe_resource
*prsc
;
881 enum pipe_format format
= tmpl
->format
;
884 rsc
= CALLOC_STRUCT(fd_resource
);
887 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
888 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc
,
889 tmpl
->target
, util_format_name(format
),
890 tmpl
->width0
, tmpl
->height0
, tmpl
->depth0
,
891 tmpl
->array_size
, tmpl
->last_level
, tmpl
->nr_samples
,
892 tmpl
->usage
, tmpl
->bind
, tmpl
->flags
);
898 fd_resource_layout_init(prsc
);
901 (PIPE_BIND_SCANOUT | \
903 PIPE_BIND_DISPLAY_TARGET)
905 bool linear
= drm_find_modifier(DRM_FORMAT_MOD_LINEAR
, modifiers
, count
);
906 if (tmpl
->bind
& LINEAR
)
909 if (fd_mesa_debug
& FD_DBG_NOTILE
)
912 /* Normally, for non-shared buffers, allow buffer compression if
913 * not shared, otherwise only allow if QCOM_COMPRESSED modifier
916 * TODO we should probably also limit tiled in a similar way,
917 * except we don't have a format modifier for tiled. (We probably
920 bool allow_ubwc
= drm_find_modifier(DRM_FORMAT_MOD_INVALID
, modifiers
, count
);
921 if (tmpl
->bind
& PIPE_BIND_SHARED
)
922 allow_ubwc
= drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED
, modifiers
, count
);
924 allow_ubwc
&= !(fd_mesa_debug
& FD_DBG_NOUBWC
);
926 pipe_reference_init(&prsc
->reference
, 1);
928 prsc
->screen
= pscreen
;
930 if (screen
->tile_mode
&&
931 (tmpl
->target
!= PIPE_BUFFER
) &&
933 rsc
->layout
.tile_mode
= screen
->tile_mode(prsc
);
936 util_range_init(&rsc
->valid_buffer_range
);
938 simple_mtx_init(&rsc
->lock
, mtx_plain
);
940 rsc
->internal_format
= format
;
942 rsc
->layout
.ubwc
= rsc
->layout
.tile_mode
&& is_a6xx(screen
) && allow_ubwc
;
944 if (prsc
->target
== PIPE_BUFFER
) {
945 assert(prsc
->format
== PIPE_FORMAT_R8_UNORM
);
947 fdl_layout_buffer(&rsc
->layout
, size
);
949 size
= screen
->setup_slices(rsc
);
952 /* special case for hw-query buffer, which we need to allocate before we
956 /* note, semi-intention == instead of & */
957 debug_assert(prsc
->bind
== PIPE_BIND_QUERY_BUFFER
);
961 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
962 if (rsc
->layout
.layer_first
&& !rsc
->layout
.layer_size
) {
963 rsc
->layout
.layer_size
= align(size
, 4096);
964 size
= rsc
->layout
.layer_size
* prsc
->array_size
;
967 if (fd_mesa_debug
& FD_DBG_LAYOUT
)
968 fdl_dump_layout(&rsc
->layout
);
970 /* Hand out the resolved size. */
978 * Create a new texture object, using the given template info.
980 static struct pipe_resource
*
981 fd_resource_create_with_modifiers(struct pipe_screen
*pscreen
,
982 const struct pipe_resource
*tmpl
,
983 const uint64_t *modifiers
, int count
)
985 struct fd_screen
*screen
= fd_screen(pscreen
);
986 struct fd_resource
*rsc
;
987 struct pipe_resource
*prsc
;
990 /* when using kmsro, scanout buffers are allocated on the display device
991 * create_with_modifiers() doesn't give us usage flags, so we have to
992 * assume that all calls with modifiers are scanout-possible
995 ((tmpl
->bind
& PIPE_BIND_SCANOUT
) ||
996 !(count
== 1 && modifiers
[0] == DRM_FORMAT_MOD_INVALID
))) {
997 struct pipe_resource scanout_templat
= *tmpl
;
998 struct renderonly_scanout
*scanout
;
999 struct winsys_handle handle
;
1001 /* note: alignment is wrong for a6xx */
1002 scanout_templat
.width0
= align(tmpl
->width0
, screen
->gmem_alignw
);
1004 scanout
= renderonly_scanout_for_resource(&scanout_templat
,
1005 screen
->ro
, &handle
);
1009 renderonly_scanout_destroy(scanout
, screen
->ro
);
1011 assert(handle
.type
== WINSYS_HANDLE_TYPE_FD
);
1012 rsc
= fd_resource(pscreen
->resource_from_handle(pscreen
, tmpl
,
1014 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE
));
1015 close(handle
.handle
);
1022 prsc
= fd_resource_allocate_and_resolve(pscreen
, tmpl
, modifiers
, count
, &size
);
1025 rsc
= fd_resource(prsc
);
1027 realloc_bo(rsc
, size
);
1033 fd_resource_destroy(pscreen
, prsc
);
1037 static struct pipe_resource
*
1038 fd_resource_create(struct pipe_screen
*pscreen
,
1039 const struct pipe_resource
*tmpl
)
1041 const uint64_t mod
= DRM_FORMAT_MOD_INVALID
;
1042 return fd_resource_create_with_modifiers(pscreen
, tmpl
, &mod
, 1);
1046 * Create a texture from a winsys_handle. The handle is often created in
1047 * another process by first creating a pipe texture and then calling
1048 * resource_get_handle.
1050 static struct pipe_resource
*
1051 fd_resource_from_handle(struct pipe_screen
*pscreen
,
1052 const struct pipe_resource
*tmpl
,
1053 struct winsys_handle
*handle
, unsigned usage
)
1055 struct fd_screen
*screen
= fd_screen(pscreen
);
1056 struct fd_resource
*rsc
= CALLOC_STRUCT(fd_resource
);
1057 struct fdl_slice
*slice
= fd_resource_slice(rsc
, 0);
1058 struct pipe_resource
*prsc
= &rsc
->base
;
1060 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
1061 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
1062 tmpl
->target
, util_format_name(tmpl
->format
),
1063 tmpl
->width0
, tmpl
->height0
, tmpl
->depth0
,
1064 tmpl
->array_size
, tmpl
->last_level
, tmpl
->nr_samples
,
1065 tmpl
->usage
, tmpl
->bind
, tmpl
->flags
);
1071 fd_resource_layout_init(prsc
);
1073 pipe_reference_init(&prsc
->reference
, 1);
1075 prsc
->screen
= pscreen
;
1077 util_range_init(&rsc
->valid_buffer_range
);
1079 simple_mtx_init(&rsc
->lock
, mtx_plain
);
1081 rsc
->bo
= fd_screen_bo_from_handle(pscreen
, handle
);
1085 rsc
->internal_format
= tmpl
->format
;
1086 rsc
->layout
.pitch0
= handle
->stride
;
1087 slice
->offset
= handle
->offset
;
1088 slice
->size0
= handle
->stride
* prsc
->height0
;
1090 /* use a pitchalign of gmem_alignw pixels, because GMEM resolve for
1091 * lower alignments is not implemented (but possible for a6xx at least)
1093 * for UBWC-enabled resources, layout_resource_for_modifier will further
1094 * validate the pitch and set the right pitchalign
1096 rsc
->layout
.pitchalign
=
1097 fdl_cpp_shift(&rsc
->layout
) + util_logbase2(screen
->gmem_alignw
);
1099 /* apply the minimum pitchalign (note: actually 4 for a3xx but doesn't matter) */
1100 if (is_a6xx(screen
) || is_a5xx(screen
))
1101 rsc
->layout
.pitchalign
= MAX2(rsc
->layout
.pitchalign
, 6);
1103 rsc
->layout
.pitchalign
= MAX2(rsc
->layout
.pitchalign
, 5);
1105 if (rsc
->layout
.pitch0
< (prsc
->width0
* rsc
->layout
.cpp
) ||
1106 fd_resource_pitch(rsc
, 0) != rsc
->layout
.pitch0
)
1109 assert(rsc
->layout
.cpp
);
1111 if (screen
->layout_resource_for_modifier(rsc
, handle
->modifier
) < 0)
1116 renderonly_create_gpu_import_for_resource(prsc
, screen
->ro
, NULL
);
1117 /* failure is expected in some cases.. */
1125 fd_resource_destroy(pscreen
, prsc
);
1130 fd_render_condition_check(struct pipe_context
*pctx
)
1132 struct fd_context
*ctx
= fd_context(pctx
);
1134 if (!ctx
->cond_query
)
1137 union pipe_query_result res
= { 0 };
1139 ctx
->cond_mode
!= PIPE_RENDER_COND_NO_WAIT
&&
1140 ctx
->cond_mode
!= PIPE_RENDER_COND_BY_REGION_NO_WAIT
;
1142 if (pctx
->get_query_result(pctx
, ctx
->cond_query
, wait
, &res
))
1143 return (bool)res
.u64
!= ctx
->cond_cond
;
1149 fd_invalidate_resource(struct pipe_context
*pctx
, struct pipe_resource
*prsc
)
1151 struct fd_context
*ctx
= fd_context(pctx
);
1152 struct fd_resource
*rsc
= fd_resource(prsc
);
1155 * TODO I guess we could track that the resource is invalidated and
1156 * use that as a hint to realloc rather than stall in _transfer_map(),
1157 * even in the non-DISCARD_WHOLE_RESOURCE case?
1159 * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
1162 if (rsc
->write_batch
) {
1163 struct fd_batch
*batch
= rsc
->write_batch
;
1164 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
1166 if (pfb
->zsbuf
&& pfb
->zsbuf
->texture
== prsc
) {
1167 batch
->resolve
&= ~(FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
);
1168 ctx
->dirty
|= FD_DIRTY_ZSA
;
1171 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++) {
1172 if (pfb
->cbufs
[i
] && pfb
->cbufs
[i
]->texture
== prsc
) {
1173 batch
->resolve
&= ~(PIPE_CLEAR_COLOR0
<< i
);
1174 ctx
->dirty
|= FD_DIRTY_FRAMEBUFFER
;
1182 static enum pipe_format
1183 fd_resource_get_internal_format(struct pipe_resource
*prsc
)
1185 return fd_resource(prsc
)->internal_format
;
1189 fd_resource_set_stencil(struct pipe_resource
*prsc
,
1190 struct pipe_resource
*stencil
)
1192 fd_resource(prsc
)->stencil
= fd_resource(stencil
);
1195 static struct pipe_resource
*
1196 fd_resource_get_stencil(struct pipe_resource
*prsc
)
1198 struct fd_resource
*rsc
= fd_resource(prsc
);
1200 return &rsc
->stencil
->base
;
1204 static const struct u_transfer_vtbl transfer_vtbl
= {
1205 .resource_create
= fd_resource_create
,
1206 .resource_destroy
= fd_resource_destroy
,
1207 .transfer_map
= fd_resource_transfer_map
,
1208 .transfer_flush_region
= fd_resource_transfer_flush_region
,
1209 .transfer_unmap
= fd_resource_transfer_unmap
,
1210 .get_internal_format
= fd_resource_get_internal_format
,
1211 .set_stencil
= fd_resource_set_stencil
,
1212 .get_stencil
= fd_resource_get_stencil
,
1215 static const uint64_t supported_modifiers
[] = {
1216 DRM_FORMAT_MOD_LINEAR
,
1220 fd_layout_resource_for_modifier(struct fd_resource
*rsc
, uint64_t modifier
)
1223 case DRM_FORMAT_MOD_LINEAR
:
1224 /* The dri gallium frontend will pass DRM_FORMAT_MOD_INVALID to us
1225 * when it's called through any of the non-modifier BO create entry
1226 * points. Other drivers will determine tiling from the kernel or
1227 * other legacy backchannels, but for freedreno it just means
1229 case DRM_FORMAT_MOD_INVALID
:
1236 static struct pipe_resource
*
1237 fd_resource_from_memobj(struct pipe_screen
*pscreen
,
1238 const struct pipe_resource
*tmpl
,
1239 struct pipe_memory_object
*pmemobj
,
1242 struct fd_screen
*screen
= fd_screen(pscreen
);
1243 struct fd_memory_object
*memobj
= fd_memory_object(pmemobj
);
1244 struct pipe_resource
*prsc
;
1245 struct fd_resource
*rsc
;
1249 /* We shouldn't get a scanout buffer here. */
1250 assert(!(tmpl
->bind
& PIPE_BIND_SCANOUT
));
1252 uint64_t modifiers
= DRM_FORMAT_MOD_INVALID
;
1253 if (tmpl
->bind
& PIPE_BIND_LINEAR
) {
1254 modifiers
= DRM_FORMAT_MOD_LINEAR
;
1255 } else if (is_a6xx(screen
) && tmpl
->width0
>= FDL_MIN_UBWC_WIDTH
) {
1256 modifiers
= DRM_FORMAT_MOD_QCOM_COMPRESSED
;
1259 /* Allocate new pipe resource. */
1260 prsc
= fd_resource_allocate_and_resolve(pscreen
, tmpl
, &modifiers
, 1, &size
);
1263 rsc
= fd_resource(prsc
);
1265 /* bo's size has to be large enough, otherwise cleanup resource and fail
1268 if (fd_bo_size(memobj
->bo
) < size
) {
1269 fd_resource_destroy(pscreen
, prsc
);
1273 /* Share the bo with the memory object. */
1274 rsc
->bo
= fd_bo_ref(memobj
->bo
);
1279 static struct pipe_memory_object
*
1280 fd_memobj_create_from_handle(struct pipe_screen
*pscreen
,
1281 struct winsys_handle
*whandle
,
1284 struct fd_memory_object
*memobj
= CALLOC_STRUCT(fd_memory_object
);
1288 struct fd_bo
*bo
= fd_screen_bo_from_handle(pscreen
, whandle
);
1294 memobj
->b
.dedicated
= dedicated
;
1301 fd_memobj_destroy(struct pipe_screen
*pscreen
,
1302 struct pipe_memory_object
*pmemobj
)
1304 struct fd_memory_object
*memobj
= fd_memory_object(pmemobj
);
1307 fd_bo_del(memobj
->bo
);
1313 fd_resource_screen_init(struct pipe_screen
*pscreen
)
1315 struct fd_screen
*screen
= fd_screen(pscreen
);
1316 bool fake_rgtc
= screen
->gpu_id
< 400;
1318 pscreen
->resource_create
= u_transfer_helper_resource_create
;
1319 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1322 pscreen
->resource_create_with_modifiers
= fd_resource_create_with_modifiers
;
1323 pscreen
->resource_from_handle
= fd_resource_from_handle
;
1324 pscreen
->resource_get_handle
= fd_resource_get_handle
;
1325 pscreen
->resource_destroy
= u_transfer_helper_resource_destroy
;
1327 pscreen
->transfer_helper
= u_transfer_helper_create(&transfer_vtbl
,
1328 true, false, fake_rgtc
, true);
1330 if (!screen
->layout_resource_for_modifier
)
1331 screen
->layout_resource_for_modifier
= fd_layout_resource_for_modifier
;
1332 if (!screen
->supported_modifiers
) {
1333 screen
->supported_modifiers
= supported_modifiers
;
1334 screen
->num_supported_modifiers
= ARRAY_SIZE(supported_modifiers
);
1337 /* GL_EXT_memory_object */
1338 pscreen
->memobj_create_from_handle
= fd_memobj_create_from_handle
;
1339 pscreen
->memobj_destroy
= fd_memobj_destroy
;
1340 pscreen
->resource_from_memobj
= fd_resource_from_memobj
;
1344 fd_get_sample_position(struct pipe_context
*context
,
1345 unsigned sample_count
, unsigned sample_index
,
1348 /* The following is copied from nouveau/nv50 except for position
1349 * values, which are taken from blob driver */
1350 static const uint8_t pos1
[1][2] = { { 0x8, 0x8 } };
1351 static const uint8_t pos2
[2][2] = {
1352 { 0xc, 0xc }, { 0x4, 0x4 } };
1353 static const uint8_t pos4
[4][2] = {
1354 { 0x6, 0x2 }, { 0xe, 0x6 },
1355 { 0x2, 0xa }, { 0xa, 0xe } };
1356 /* TODO needs to be verified on supported hw */
1357 static const uint8_t pos8
[8][2] = {
1358 { 0x9, 0x5 }, { 0x7, 0xb },
1359 { 0xd, 0x9 }, { 0x5, 0x3 },
1360 { 0x3, 0xd }, { 0x1, 0x7 },
1361 { 0xb, 0xf }, { 0xf, 0x1 } };
1363 const uint8_t (*ptr
)[2];
1365 switch (sample_count
) {
1383 pos_out
[0] = ptr
[sample_index
][0] / 16.0f
;
1384 pos_out
[1] = ptr
[sample_index
][1] / 16.0f
;
1388 fd_blit_pipe(struct pipe_context
*pctx
, const struct pipe_blit_info
*blit_info
)
1390 /* wrap fd_blit to return void */
1391 fd_blit(pctx
, blit_info
);
1395 fd_resource_context_init(struct pipe_context
*pctx
)
1397 pctx
->transfer_map
= u_transfer_helper_transfer_map
;
1398 pctx
->transfer_flush_region
= u_transfer_helper_transfer_flush_region
;
1399 pctx
->transfer_unmap
= u_transfer_helper_transfer_unmap
;
1400 pctx
->buffer_subdata
= u_default_buffer_subdata
;
1401 pctx
->texture_subdata
= u_default_texture_subdata
;
1402 pctx
->create_surface
= fd_create_surface
;
1403 pctx
->surface_destroy
= fd_surface_destroy
;
1404 pctx
->resource_copy_region
= fd_resource_copy_region
;
1405 pctx
->blit
= fd_blit_pipe
;
1406 pctx
->flush_resource
= fd_flush_resource
;
1407 pctx
->invalidate_resource
= fd_invalidate_resource
;
1408 pctx
->get_sample_position
= fd_get_sample_position
;