freedreno: Work around UBWC flakiness.
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/format/u_format.h"
28 #include "util/format/u_format_rgtc.h"
29 #include "util/format/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
34 #include "util/set.h"
35 #include "util/u_drm.h"
36
37 #include "freedreno_resource.h"
38 #include "freedreno_batch_cache.h"
39 #include "freedreno_blitter.h"
40 #include "freedreno_fence.h"
41 #include "freedreno_screen.h"
42 #include "freedreno_surface.h"
43 #include "freedreno_context.h"
44 #include "freedreno_query_hw.h"
45 #include "freedreno_util.h"
46
47 #include "drm-uapi/drm_fourcc.h"
48 #include <errno.h>
49
50 /* XXX this should go away, needed for 'struct winsys_handle' */
51 #include "state_tracker/drm_driver.h"
52
53 /* A private modifier for now, so we have a way to request tiled but not
54 * compressed. It would perhaps be good to get real modifiers for the
55 * tiled formats, but would probably need to do some work to figure out
56 * the layout(s) of the tiled modes, and whether they are the same
57 * across generations.
58 */
59 #define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
60
61 /**
62 * Go through the entire state and see if the resource is bound
63 * anywhere. If it is, mark the relevant state as dirty. This is
64 * called on realloc_bo to ensure the neccessary state is re-
65 * emitted so the GPU looks at the new backing bo.
66 */
67 static void
68 rebind_resource(struct fd_context *ctx, struct pipe_resource *prsc)
69 {
70 /* VBOs */
71 for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
72 if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
73 ctx->dirty |= FD_DIRTY_VTXBUF;
74 }
75
76 /* per-shader-stage resources: */
77 for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
78 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
79 * cmdstream rather than by pointer..
80 */
81 const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask);
82 for (unsigned i = 1; i < num_ubos; i++) {
83 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST)
84 break;
85 if (ctx->constbuf[stage].cb[i].buffer == prsc)
86 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST;
87 }
88
89 /* Textures */
90 for (unsigned i = 0; i < ctx->tex[stage].num_textures; i++) {
91 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_TEX)
92 break;
93 if (ctx->tex[stage].textures[i] && (ctx->tex[stage].textures[i]->texture == prsc))
94 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
95 }
96
97 /* Images */
98 const unsigned num_images = util_last_bit(ctx->shaderimg[stage].enabled_mask);
99 for (unsigned i = 0; i < num_images; i++) {
100 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_IMAGE)
101 break;
102 if (ctx->shaderimg[stage].si[i].resource == prsc)
103 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE;
104 }
105
106 /* SSBOs */
107 const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask);
108 for (unsigned i = 0; i < num_ssbos; i++) {
109 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_SSBO)
110 break;
111 if (ctx->shaderbuf[stage].sb[i].buffer == prsc)
112 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO;
113 }
114 }
115 }
116
117 static void
118 realloc_bo(struct fd_resource *rsc, uint32_t size)
119 {
120 struct pipe_resource *prsc = &rsc->base;
121 struct fd_screen *screen = fd_screen(rsc->base.screen);
122 uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
123 DRM_FREEDRENO_GEM_TYPE_KMEM |
124 COND(prsc->bind & PIPE_BIND_SCANOUT, DRM_FREEDRENO_GEM_SCANOUT);
125 /* TODO other flags? */
126
127 /* if we start using things other than write-combine,
128 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
129 */
130
131 if (rsc->bo)
132 fd_bo_del(rsc->bo);
133
134 rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x",
135 prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
136
137 /* Zero out the UBWC area on allocation. This fixes intermittent failures
138 * with UBWC, which I suspect are due to the HW having a hard time
139 * interpreting arbitrary values populating the flags buffer when the BO
140 * was recycled through the bo cache (instead of fresh allocations from
141 * the kernel, which are zeroed). sleep(1) in this spot didn't work
142 * around the issue, but any memset value seems to.
143 */
144 if (rsc->layout.ubwc) {
145 void *buf = fd_bo_map(rsc->bo);
146 memset(buf, 0, rsc->layout.slices[0].offset);
147 }
148
149 rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
150 util_range_set_empty(&rsc->valid_buffer_range);
151 fd_bc_invalidate_resource(rsc, true);
152 }
153
154 static void
155 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
156 {
157 struct pipe_context *pctx = &ctx->base;
158
159 /* TODO size threshold too?? */
160 if (fallback || !fd_blit(pctx, blit)) {
161 /* do blit on cpu: */
162 util_resource_copy_region(pctx,
163 blit->dst.resource, blit->dst.level, blit->dst.box.x,
164 blit->dst.box.y, blit->dst.box.z,
165 blit->src.resource, blit->src.level, &blit->src.box);
166 }
167 }
168
169 /**
170 * @rsc: the resource to shadow
171 * @level: the level to discard (if box != NULL, otherwise ignored)
172 * @box: the box to discard (or NULL if none)
173 * @modifier: the modifier for the new buffer state
174 */
175 static bool
176 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
177 unsigned level, const struct pipe_box *box, uint64_t modifier)
178 {
179 struct pipe_context *pctx = &ctx->base;
180 struct pipe_resource *prsc = &rsc->base;
181 bool fallback = false;
182
183 if (prsc->next)
184 return false;
185
186 /* TODO: somehow munge dimensions and format to copy unsupported
187 * render target format to something that is supported?
188 */
189 if (!pctx->screen->is_format_supported(pctx->screen,
190 prsc->format, prsc->target, prsc->nr_samples,
191 prsc->nr_storage_samples,
192 PIPE_BIND_RENDER_TARGET))
193 fallback = true;
194
195 /* do shadowing back-blits on the cpu for buffers: */
196 if (prsc->target == PIPE_BUFFER)
197 fallback = true;
198
199 bool discard_whole_level = box && util_texrange_covers_whole_level(prsc, level,
200 box->x, box->y, box->z, box->width, box->height, box->depth);
201
202 /* TODO need to be more clever about current level */
203 if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
204 return false;
205
206 struct pipe_resource *pshadow =
207 pctx->screen->resource_create_with_modifiers(pctx->screen,
208 prsc, &modifier, 1);
209
210 if (!pshadow)
211 return false;
212
213 assert(!ctx->in_shadow);
214 ctx->in_shadow = true;
215
216 /* get rid of any references that batch-cache might have to us (which
217 * should empty/destroy rsc->batches hashset)
218 */
219 fd_bc_invalidate_resource(rsc, false);
220
221 mtx_lock(&ctx->screen->lock);
222
223 /* Swap the backing bo's, so shadow becomes the old buffer,
224 * blit from shadow to new buffer. From here on out, we
225 * cannot fail.
226 *
227 * Note that we need to do it in this order, otherwise if
228 * we go down cpu blit path, the recursive transfer_map()
229 * sees the wrong status..
230 */
231 struct fd_resource *shadow = fd_resource(pshadow);
232
233 DBG("shadow: %p (%d) -> %p (%d)\n", rsc, rsc->base.reference.count,
234 shadow, shadow->base.reference.count);
235
236 /* TODO valid_buffer_range?? */
237 swap(rsc->bo, shadow->bo);
238 swap(rsc->write_batch, shadow->write_batch);
239 swap(rsc->layout, shadow->layout);
240 rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
241
242 /* at this point, the newly created shadow buffer is not referenced
243 * by any batches, but the existing rsc (probably) is. We need to
244 * transfer those references over:
245 */
246 debug_assert(shadow->batch_mask == 0);
247 struct fd_batch *batch;
248 foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
249 struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
250 _mesa_set_remove(batch->resources, entry);
251 _mesa_set_add(batch->resources, shadow);
252 }
253 swap(rsc->batch_mask, shadow->batch_mask);
254
255 mtx_unlock(&ctx->screen->lock);
256
257 struct pipe_blit_info blit = {};
258 blit.dst.resource = prsc;
259 blit.dst.format = prsc->format;
260 blit.src.resource = pshadow;
261 blit.src.format = pshadow->format;
262 blit.mask = util_format_get_mask(prsc->format);
263 blit.filter = PIPE_TEX_FILTER_NEAREST;
264
265 #define set_box(field, val) do { \
266 blit.dst.field = (val); \
267 blit.src.field = (val); \
268 } while (0)
269
270 /* blit the other levels in their entirety: */
271 for (unsigned l = 0; l <= prsc->last_level; l++) {
272 if (box && l == level)
273 continue;
274
275 /* just blit whole level: */
276 set_box(level, l);
277 set_box(box.width, u_minify(prsc->width0, l));
278 set_box(box.height, u_minify(prsc->height0, l));
279 set_box(box.depth, u_minify(prsc->depth0, l));
280
281 for (int i = 0; i < prsc->array_size; i++) {
282 set_box(box.z, i);
283 do_blit(ctx, &blit, fallback);
284 }
285 }
286
287 /* deal w/ current level specially, since we might need to split
288 * it up into a couple blits:
289 */
290 if (box && !discard_whole_level) {
291 set_box(level, level);
292
293 switch (prsc->target) {
294 case PIPE_BUFFER:
295 case PIPE_TEXTURE_1D:
296 set_box(box.y, 0);
297 set_box(box.z, 0);
298 set_box(box.height, 1);
299 set_box(box.depth, 1);
300
301 if (box->x > 0) {
302 set_box(box.x, 0);
303 set_box(box.width, box->x);
304
305 do_blit(ctx, &blit, fallback);
306 }
307 if ((box->x + box->width) < u_minify(prsc->width0, level)) {
308 set_box(box.x, box->x + box->width);
309 set_box(box.width, u_minify(prsc->width0, level) - (box->x + box->width));
310
311 do_blit(ctx, &blit, fallback);
312 }
313 break;
314 case PIPE_TEXTURE_2D:
315 /* TODO */
316 default:
317 unreachable("TODO");
318 }
319 }
320
321 ctx->in_shadow = false;
322
323 pipe_resource_reference(&pshadow, NULL);
324
325 return true;
326 }
327
328 /**
329 * Uncompress an UBWC compressed buffer "in place". This works basically
330 * like resource shadowing, creating a new resource, and doing an uncompress
331 * blit, and swapping the state between shadow and original resource so it
332 * appears to the state tracker as if nothing changed.
333 */
334 void
335 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
336 {
337 bool success =
338 fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
339
340 /* shadow should not fail in any cases where we need to uncompress: */
341 debug_assert(success);
342
343 /*
344 * TODO what if rsc is used in other contexts, we don't currently
345 * have a good way to rebind_resource() in other contexts. And an
346 * app that is reading one resource in multiple contexts, isn't
347 * going to expect that the resource is modified.
348 *
349 * Hopefully the edge cases where we need to uncompress are rare
350 * enough that they mostly only show up in deqp.
351 */
352
353 rebind_resource(ctx, &rsc->base);
354 }
355
356 static struct fd_resource *
357 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
358 unsigned level, const struct pipe_box *box)
359 {
360 struct pipe_context *pctx = &ctx->base;
361 struct pipe_resource tmpl = rsc->base;
362
363 tmpl.width0 = box->width;
364 tmpl.height0 = box->height;
365 /* for array textures, box->depth is the array_size, otherwise
366 * for 3d textures, it is the depth:
367 */
368 if (tmpl.array_size > 1) {
369 if (tmpl.target == PIPE_TEXTURE_CUBE)
370 tmpl.target = PIPE_TEXTURE_2D_ARRAY;
371 tmpl.array_size = box->depth;
372 tmpl.depth0 = 1;
373 } else {
374 tmpl.array_size = 1;
375 tmpl.depth0 = box->depth;
376 }
377 tmpl.last_level = 0;
378 tmpl.bind |= PIPE_BIND_LINEAR;
379
380 struct pipe_resource *pstaging =
381 pctx->screen->resource_create(pctx->screen, &tmpl);
382 if (!pstaging)
383 return NULL;
384
385 return fd_resource(pstaging);
386 }
387
388 static void
389 fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
390 {
391 struct pipe_resource *dst = trans->base.resource;
392 struct pipe_blit_info blit = {};
393
394 blit.dst.resource = dst;
395 blit.dst.format = dst->format;
396 blit.dst.level = trans->base.level;
397 blit.dst.box = trans->base.box;
398 blit.src.resource = trans->staging_prsc;
399 blit.src.format = trans->staging_prsc->format;
400 blit.src.level = 0;
401 blit.src.box = trans->staging_box;
402 blit.mask = util_format_get_mask(trans->staging_prsc->format);
403 blit.filter = PIPE_TEX_FILTER_NEAREST;
404
405 do_blit(ctx, &blit, false);
406 }
407
408 static void
409 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
410 {
411 struct pipe_resource *src = trans->base.resource;
412 struct pipe_blit_info blit = {};
413
414 blit.src.resource = src;
415 blit.src.format = src->format;
416 blit.src.level = trans->base.level;
417 blit.src.box = trans->base.box;
418 blit.dst.resource = trans->staging_prsc;
419 blit.dst.format = trans->staging_prsc->format;
420 blit.dst.level = 0;
421 blit.dst.box = trans->staging_box;
422 blit.mask = util_format_get_mask(trans->staging_prsc->format);
423 blit.filter = PIPE_TEX_FILTER_NEAREST;
424
425 do_blit(ctx, &blit, false);
426 }
427
428 static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
429 struct pipe_transfer *ptrans,
430 const struct pipe_box *box)
431 {
432 struct fd_resource *rsc = fd_resource(ptrans->resource);
433
434 if (ptrans->resource->target == PIPE_BUFFER)
435 util_range_add(&rsc->base, &rsc->valid_buffer_range,
436 ptrans->box.x + box->x,
437 ptrans->box.x + box->x + box->width);
438 }
439
440 static void
441 flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
442 {
443 struct fd_batch *write_batch = NULL;
444
445 mtx_lock(&ctx->screen->lock);
446 fd_batch_reference_locked(&write_batch, rsc->write_batch);
447 mtx_unlock(&ctx->screen->lock);
448
449 if (usage & PIPE_TRANSFER_WRITE) {
450 struct fd_batch *batch, *batches[32] = {};
451 uint32_t batch_mask;
452
453 /* This is a bit awkward, probably a fd_batch_flush_locked()
454 * would make things simpler.. but we need to hold the lock
455 * to iterate the batches which reference this resource. So
456 * we must first grab references under a lock, then flush.
457 */
458 mtx_lock(&ctx->screen->lock);
459 batch_mask = rsc->batch_mask;
460 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
461 fd_batch_reference_locked(&batches[batch->idx], batch);
462 mtx_unlock(&ctx->screen->lock);
463
464 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
465 fd_batch_flush(batch);
466
467 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
468 fd_batch_reference(&batches[batch->idx], NULL);
469 }
470 assert(rsc->batch_mask == 0);
471 } else if (write_batch) {
472 fd_batch_flush(write_batch);
473 }
474
475 fd_batch_reference(&write_batch, NULL);
476
477 assert(!rsc->write_batch);
478 }
479
480 static void
481 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
482 {
483 flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
484 }
485
486 static void
487 fd_resource_transfer_unmap(struct pipe_context *pctx,
488 struct pipe_transfer *ptrans)
489 {
490 struct fd_context *ctx = fd_context(pctx);
491 struct fd_resource *rsc = fd_resource(ptrans->resource);
492 struct fd_transfer *trans = fd_transfer(ptrans);
493
494 if (trans->staging_prsc) {
495 if (ptrans->usage & PIPE_TRANSFER_WRITE)
496 fd_blit_from_staging(ctx, trans);
497 pipe_resource_reference(&trans->staging_prsc, NULL);
498 }
499
500 if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
501 fd_bo_cpu_fini(rsc->bo);
502 }
503
504 util_range_add(&rsc->base, &rsc->valid_buffer_range,
505 ptrans->box.x,
506 ptrans->box.x + ptrans->box.width);
507
508 pipe_resource_reference(&ptrans->resource, NULL);
509 slab_free(&ctx->transfer_pool, ptrans);
510 }
511
512 static void *
513 fd_resource_transfer_map(struct pipe_context *pctx,
514 struct pipe_resource *prsc,
515 unsigned level, unsigned usage,
516 const struct pipe_box *box,
517 struct pipe_transfer **pptrans)
518 {
519 struct fd_context *ctx = fd_context(pctx);
520 struct fd_resource *rsc = fd_resource(prsc);
521 struct fdl_slice *slice = fd_resource_slice(rsc, level);
522 struct fd_transfer *trans;
523 struct pipe_transfer *ptrans;
524 enum pipe_format format = prsc->format;
525 uint32_t op = 0;
526 uint32_t offset;
527 char *buf;
528 int ret = 0;
529
530 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
531 box->width, box->height, box->x, box->y);
532
533 ptrans = slab_alloc(&ctx->transfer_pool);
534 if (!ptrans)
535 return NULL;
536
537 /* slab_alloc_st() doesn't zero: */
538 trans = fd_transfer(ptrans);
539 memset(trans, 0, sizeof(*trans));
540
541 pipe_resource_reference(&ptrans->resource, prsc);
542 ptrans->level = level;
543 ptrans->usage = usage;
544 ptrans->box = *box;
545 ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->layout.cpp;
546 ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
547
548 /* we always need a staging texture for tiled buffers:
549 *
550 * TODO we might sometimes want to *also* shadow the resource to avoid
551 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
552 * texture.
553 */
554 if (rsc->layout.tile_mode) {
555 struct fd_resource *staging_rsc;
556
557 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
558 if (staging_rsc) {
559 struct fdl_slice *staging_slice =
560 fd_resource_slice(staging_rsc, 0);
561 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
562 trans->staging_prsc = &staging_rsc->base;
563 trans->base.stride = util_format_get_nblocksx(format,
564 staging_slice->pitch) * staging_rsc->layout.cpp;
565 trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
566 trans->staging_box = *box;
567 trans->staging_box.x = 0;
568 trans->staging_box.y = 0;
569 trans->staging_box.z = 0;
570
571 if (usage & PIPE_TRANSFER_READ) {
572 fd_blit_to_staging(ctx, trans);
573
574 fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
575 DRM_FREEDRENO_PREP_READ);
576 }
577
578 buf = fd_bo_map(staging_rsc->bo);
579 offset = 0;
580
581 *pptrans = ptrans;
582
583 ctx->stats.staging_uploads++;
584
585 return buf;
586 }
587 }
588
589 if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
590 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
591
592 if (usage & PIPE_TRANSFER_READ)
593 op |= DRM_FREEDRENO_PREP_READ;
594
595 if (usage & PIPE_TRANSFER_WRITE)
596 op |= DRM_FREEDRENO_PREP_WRITE;
597
598 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
599 realloc_bo(rsc, fd_bo_size(rsc->bo));
600 rebind_resource(ctx, prsc);
601 } else if ((usage & PIPE_TRANSFER_WRITE) &&
602 prsc->target == PIPE_BUFFER &&
603 !util_ranges_intersect(&rsc->valid_buffer_range,
604 box->x, box->x + box->width)) {
605 /* We are trying to write to a previously uninitialized range. No need
606 * to wait.
607 */
608 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
609 struct fd_batch *write_batch = NULL;
610
611 /* hold a reference, so it doesn't disappear under us: */
612 fd_context_lock(ctx);
613 fd_batch_reference_locked(&write_batch, rsc->write_batch);
614 fd_context_unlock(ctx);
615
616 if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
617 write_batch->back_blit) {
618 /* if only thing pending is a back-blit, we can discard it: */
619 fd_batch_reset(write_batch);
620 }
621
622 /* If the GPU is writing to the resource, or if it is reading from the
623 * resource and we're trying to write to it, flush the renders.
624 */
625 bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
626 bool busy = needs_flush || (0 != fd_bo_cpu_prep(rsc->bo,
627 ctx->pipe, op | DRM_FREEDRENO_PREP_NOSYNC));
628
629 /* if we need to flush/stall, see if we can make a shadow buffer
630 * to avoid this:
631 *
632 * TODO we could go down this path !reorder && !busy_for_read
633 * ie. we only *don't* want to go down this path if the blit
634 * will trigger a flush!
635 */
636 if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
637 (usage & PIPE_TRANSFER_DISCARD_RANGE)) {
638 /* try shadowing only if it avoids a flush, otherwise staging would
639 * be better:
640 */
641 if (needs_flush && fd_try_shadow_resource(ctx, rsc, level,
642 box, DRM_FORMAT_MOD_LINEAR)) {
643 needs_flush = busy = false;
644 rebind_resource(ctx, prsc);
645 ctx->stats.shadow_uploads++;
646 } else {
647 struct fd_resource *staging_rsc;
648
649 if (needs_flush) {
650 flush_resource(ctx, rsc, usage);
651 needs_flush = false;
652 }
653
654 /* in this case, we don't need to shadow the whole resource,
655 * since any draw that references the previous contents has
656 * already had rendering flushed for all tiles. So we can
657 * use a staging buffer to do the upload.
658 */
659 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
660 if (staging_rsc) {
661 struct fdl_slice *staging_slice =
662 fd_resource_slice(staging_rsc, 0);
663 trans->staging_prsc = &staging_rsc->base;
664 trans->base.stride = util_format_get_nblocksx(format,
665 staging_slice->pitch) * staging_rsc->layout.cpp;
666 trans->base.layer_stride =
667 fd_resource_layer_stride(staging_rsc, 0);
668 trans->staging_box = *box;
669 trans->staging_box.x = 0;
670 trans->staging_box.y = 0;
671 trans->staging_box.z = 0;
672 buf = fd_bo_map(staging_rsc->bo);
673 offset = 0;
674
675 *pptrans = ptrans;
676
677 fd_batch_reference(&write_batch, NULL);
678
679 ctx->stats.staging_uploads++;
680
681 return buf;
682 }
683 }
684 }
685
686 if (needs_flush) {
687 flush_resource(ctx, rsc, usage);
688 needs_flush = false;
689 }
690
691 fd_batch_reference(&write_batch, NULL);
692
693 /* The GPU keeps track of how the various bo's are being used, and
694 * will wait if necessary for the proper operation to have
695 * completed.
696 */
697 if (busy) {
698 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
699 if (ret)
700 goto fail;
701 }
702 }
703
704 buf = fd_bo_map(rsc->bo);
705 offset =
706 box->y / util_format_get_blockheight(format) * ptrans->stride +
707 box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
708 fd_resource_offset(rsc, level, box->z);
709
710 if (usage & PIPE_TRANSFER_WRITE)
711 rsc->valid = true;
712
713 *pptrans = ptrans;
714
715 return buf + offset;
716
717 fail:
718 fd_resource_transfer_unmap(pctx, ptrans);
719 return NULL;
720 }
721
722 static void
723 fd_resource_destroy(struct pipe_screen *pscreen,
724 struct pipe_resource *prsc)
725 {
726 struct fd_resource *rsc = fd_resource(prsc);
727 fd_bc_invalidate_resource(rsc, true);
728 if (rsc->bo)
729 fd_bo_del(rsc->bo);
730 if (rsc->scanout)
731 renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
732
733 util_range_destroy(&rsc->valid_buffer_range);
734 FREE(rsc);
735 }
736
737 static uint64_t
738 fd_resource_modifier(struct fd_resource *rsc)
739 {
740 if (!rsc->layout.tile_mode)
741 return DRM_FORMAT_MOD_LINEAR;
742
743 if (rsc->layout.ubwc_layer_size)
744 return DRM_FORMAT_MOD_QCOM_COMPRESSED;
745
746 /* TODO invent a modifier for tiled but not UBWC buffers: */
747 return DRM_FORMAT_MOD_INVALID;
748 }
749
750 static bool
751 fd_resource_get_handle(struct pipe_screen *pscreen,
752 struct pipe_context *pctx,
753 struct pipe_resource *prsc,
754 struct winsys_handle *handle,
755 unsigned usage)
756 {
757 struct fd_resource *rsc = fd_resource(prsc);
758
759 handle->modifier = fd_resource_modifier(rsc);
760
761 return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
762 fd_resource_slice(rsc, 0)->pitch * rsc->layout.cpp, handle);
763 }
764
765 static uint32_t
766 setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format format)
767 {
768 struct pipe_resource *prsc = &rsc->base;
769 struct fd_screen *screen = fd_screen(prsc->screen);
770 enum util_format_layout layout = util_format_description(format)->layout;
771 uint32_t pitchalign = screen->gmem_alignw;
772 uint32_t level, size = 0;
773 uint32_t width = prsc->width0;
774 uint32_t height = prsc->height0;
775 uint32_t depth = prsc->depth0;
776 /* in layer_first layout, the level (slice) contains just one
777 * layer (since in fact the layer contains the slices)
778 */
779 uint32_t layers_in_level = rsc->layout.layer_first ? 1 : prsc->array_size;
780
781 for (level = 0; level <= prsc->last_level; level++) {
782 struct fdl_slice *slice = fd_resource_slice(rsc, level);
783 uint32_t blocks;
784
785 if (layout == UTIL_FORMAT_LAYOUT_ASTC)
786 slice->pitch = width =
787 util_align_npot(width, pitchalign * util_format_get_blockwidth(format));
788 else
789 slice->pitch = width = align(width, pitchalign);
790 slice->offset = size;
791 blocks = util_format_get_nblocks(format, width, height);
792 /* 1d array and 2d array textures must all have the same layer size
793 * for each miplevel on a3xx. 3d textures can have different layer
794 * sizes for high levels, but the hw auto-sizer is buggy (or at least
795 * different than what this code does), so as soon as the layer size
796 * range gets into range, we stop reducing it.
797 */
798 if (prsc->target == PIPE_TEXTURE_3D && (
799 level == 1 ||
800 (level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000)))
801 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
802 else if (level == 0 || rsc->layout.layer_first || alignment == 1)
803 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
804 else
805 slice->size0 = fd_resource_slice(rsc, level - 1)->size0;
806
807 size += slice->size0 * depth * layers_in_level;
808
809 width = u_minify(width, 1);
810 height = u_minify(height, 1);
811 depth = u_minify(depth, 1);
812 }
813
814 return size;
815 }
816
817 static uint32_t
818 slice_alignment(enum pipe_texture_target target)
819 {
820 /* on a3xx, 2d array and 3d textures seem to want their
821 * layers aligned to page boundaries:
822 */
823 switch (target) {
824 case PIPE_TEXTURE_3D:
825 case PIPE_TEXTURE_1D_ARRAY:
826 case PIPE_TEXTURE_2D_ARRAY:
827 return 4096;
828 default:
829 return 1;
830 }
831 }
832
833 /* cross generation texture layout to plug in to screen->setup_slices()..
834 * replace with generation specific one as-needed.
835 *
836 * TODO for a4xx probably can extract out the a4xx specific logic int
837 * a small fd4_setup_slices() wrapper that sets up layer_first, and then
838 * calls this.
839 */
840 uint32_t
841 fd_setup_slices(struct fd_resource *rsc)
842 {
843 uint32_t alignment;
844
845 alignment = slice_alignment(rsc->base.target);
846
847 struct fd_screen *screen = fd_screen(rsc->base.screen);
848 if (is_a4xx(screen)) {
849 switch (rsc->base.target) {
850 case PIPE_TEXTURE_3D:
851 rsc->layout.layer_first = false;
852 break;
853 default:
854 rsc->layout.layer_first = true;
855 alignment = 1;
856 break;
857 }
858 }
859
860 return setup_slices(rsc, alignment, rsc->base.format);
861 }
862
863 /* special case to resize query buf after allocated.. */
864 void
865 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
866 {
867 struct fd_resource *rsc = fd_resource(prsc);
868
869 debug_assert(prsc->width0 == 0);
870 debug_assert(prsc->target == PIPE_BUFFER);
871 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
872
873 prsc->width0 = sz;
874 realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
875 }
876
877 static void
878 fd_resource_layout_init(struct pipe_resource *prsc)
879 {
880 struct fd_resource *rsc = fd_resource(prsc);
881 struct fdl_layout *layout = &rsc->layout;
882
883 layout->width0 = prsc->width0;
884 layout->height0 = prsc->height0;
885 layout->depth0 = prsc->depth0;
886
887 layout->cpp = util_format_get_blocksize(prsc->format);
888 layout->cpp *= fd_resource_nr_samples(prsc);
889 }
890
891 /**
892 * Create a new texture object, using the given template info.
893 */
894 static struct pipe_resource *
895 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
896 const struct pipe_resource *tmpl,
897 const uint64_t *modifiers, int count)
898 {
899 struct fd_screen *screen = fd_screen(pscreen);
900 struct fd_resource *rsc;
901 struct pipe_resource *prsc;
902 enum pipe_format format = tmpl->format;
903 uint32_t size;
904
905 /* when using kmsro, scanout buffers are allocated on the display device
906 * create_with_modifiers() doesn't give us usage flags, so we have to
907 * assume that all calls with modifiers are scanout-possible
908 */
909 if (screen->ro &&
910 ((tmpl->bind & PIPE_BIND_SCANOUT) ||
911 !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
912 struct pipe_resource scanout_templat = *tmpl;
913 struct renderonly_scanout *scanout;
914 struct winsys_handle handle;
915
916 /* apply freedreno alignment requirement */
917 scanout_templat.width0 = align(tmpl->width0, screen->gmem_alignw);
918
919 scanout = renderonly_scanout_for_resource(&scanout_templat,
920 screen->ro, &handle);
921 if (!scanout)
922 return NULL;
923
924 renderonly_scanout_destroy(scanout, screen->ro);
925
926 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
927 rsc = fd_resource(pscreen->resource_from_handle(pscreen, tmpl,
928 &handle,
929 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
930 close(handle.handle);
931 if (!rsc)
932 return NULL;
933
934 return &rsc->base;
935 }
936
937 rsc = CALLOC_STRUCT(fd_resource);
938 prsc = &rsc->base;
939
940 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
941 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc,
942 tmpl->target, util_format_name(format),
943 tmpl->width0, tmpl->height0, tmpl->depth0,
944 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
945 tmpl->usage, tmpl->bind, tmpl->flags);
946
947 if (!rsc)
948 return NULL;
949
950 *prsc = *tmpl;
951 fd_resource_layout_init(prsc);
952
953 #define LINEAR \
954 (PIPE_BIND_SCANOUT | \
955 PIPE_BIND_LINEAR | \
956 PIPE_BIND_DISPLAY_TARGET)
957
958 bool linear = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
959 if (tmpl->bind & LINEAR)
960 linear = true;
961
962 if (fd_mesa_debug & FD_DBG_NOTILE)
963 linear = true;
964
965 /* Normally, for non-shared buffers, allow buffer compression if
966 * not shared, otherwise only allow if QCOM_COMPRESSED modifier
967 * is requested:
968 *
969 * TODO we should probably also limit tiled in a similar way,
970 * except we don't have a format modifier for tiled. (We probably
971 * should.)
972 */
973 bool allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
974 if (tmpl->bind & PIPE_BIND_SHARED)
975 allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count);
976
977 allow_ubwc &= !(fd_mesa_debug & FD_DBG_NOUBWC);
978
979 pipe_reference_init(&prsc->reference, 1);
980
981 prsc->screen = pscreen;
982
983 if (screen->tile_mode &&
984 (tmpl->target != PIPE_BUFFER) &&
985 !linear) {
986 rsc->layout.tile_mode = screen->tile_mode(prsc);
987 }
988
989 util_range_init(&rsc->valid_buffer_range);
990
991 rsc->internal_format = format;
992
993 rsc->layout.ubwc = rsc->layout.tile_mode && is_a6xx(screen) && allow_ubwc;
994
995 if (prsc->target == PIPE_BUFFER) {
996 assert(prsc->format == PIPE_FORMAT_R8_UNORM);
997 size = prsc->width0;
998 fdl_layout_buffer(&rsc->layout, size);
999 } else {
1000 size = screen->setup_slices(rsc);
1001 }
1002
1003 /* special case for hw-query buffer, which we need to allocate before we
1004 * know the size:
1005 */
1006 if (size == 0) {
1007 /* note, semi-intention == instead of & */
1008 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1009 return prsc;
1010 }
1011
1012 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
1013 if (rsc->layout.layer_first && !rsc->layout.layer_size) {
1014 rsc->layout.layer_size = align(size, 4096);
1015 size = rsc->layout.layer_size * prsc->array_size;
1016 }
1017
1018 if (fd_mesa_debug & FD_DBG_LAYOUT)
1019 fdl_dump_layout(&rsc->layout);
1020
1021 realloc_bo(rsc, size);
1022 if (!rsc->bo)
1023 goto fail;
1024
1025 return prsc;
1026 fail:
1027 fd_resource_destroy(pscreen, prsc);
1028 return NULL;
1029 }
1030
1031 static struct pipe_resource *
1032 fd_resource_create(struct pipe_screen *pscreen,
1033 const struct pipe_resource *tmpl)
1034 {
1035 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1036 return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1037 }
1038
1039 /**
1040 * Create a texture from a winsys_handle. The handle is often created in
1041 * another process by first creating a pipe texture and then calling
1042 * resource_get_handle.
1043 */
1044 static struct pipe_resource *
1045 fd_resource_from_handle(struct pipe_screen *pscreen,
1046 const struct pipe_resource *tmpl,
1047 struct winsys_handle *handle, unsigned usage)
1048 {
1049 struct fd_screen *screen = fd_screen(pscreen);
1050 struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1051 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1052 struct pipe_resource *prsc = &rsc->base;
1053 uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw;
1054
1055 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
1056 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
1057 tmpl->target, util_format_name(tmpl->format),
1058 tmpl->width0, tmpl->height0, tmpl->depth0,
1059 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
1060 tmpl->usage, tmpl->bind, tmpl->flags);
1061
1062 if (!rsc)
1063 return NULL;
1064
1065 *prsc = *tmpl;
1066 fd_resource_layout_init(prsc);
1067
1068 pipe_reference_init(&prsc->reference, 1);
1069
1070 prsc->screen = pscreen;
1071
1072 util_range_init(&rsc->valid_buffer_range);
1073
1074 rsc->bo = fd_screen_bo_from_handle(pscreen, handle);
1075 if (!rsc->bo)
1076 goto fail;
1077
1078 rsc->internal_format = tmpl->format;
1079 slice->pitch = handle->stride / rsc->layout.cpp;
1080 slice->offset = handle->offset;
1081 slice->size0 = handle->stride * prsc->height0;
1082
1083 if ((slice->pitch < align(prsc->width0, pitchalign)) ||
1084 (slice->pitch & (pitchalign - 1)))
1085 goto fail;
1086
1087 assert(rsc->layout.cpp);
1088
1089 if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
1090 goto fail;
1091
1092 if (screen->ro) {
1093 rsc->scanout =
1094 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1095 /* failure is expected in some cases.. */
1096 }
1097
1098 rsc->valid = true;
1099
1100 return prsc;
1101
1102 fail:
1103 fd_resource_destroy(pscreen, prsc);
1104 return NULL;
1105 }
1106
1107 bool
1108 fd_render_condition_check(struct pipe_context *pctx)
1109 {
1110 struct fd_context *ctx = fd_context(pctx);
1111
1112 if (!ctx->cond_query)
1113 return true;
1114
1115 union pipe_query_result res = { 0 };
1116 bool wait =
1117 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1118 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1119
1120 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1121 return (bool)res.u64 != ctx->cond_cond;
1122
1123 return true;
1124 }
1125
1126 static void
1127 fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
1128 {
1129 struct fd_context *ctx = fd_context(pctx);
1130 struct fd_resource *rsc = fd_resource(prsc);
1131
1132 /*
1133 * TODO I guess we could track that the resource is invalidated and
1134 * use that as a hint to realloc rather than stall in _transfer_map(),
1135 * even in the non-DISCARD_WHOLE_RESOURCE case?
1136 *
1137 * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
1138 */
1139
1140 if (rsc->write_batch) {
1141 struct fd_batch *batch = rsc->write_batch;
1142 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1143
1144 if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1145 batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1146 ctx->dirty |= FD_DIRTY_ZSA;
1147 }
1148
1149 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1150 if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1151 batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1152 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
1153 }
1154 }
1155 }
1156
1157 rsc->valid = false;
1158 }
1159
1160 static enum pipe_format
1161 fd_resource_get_internal_format(struct pipe_resource *prsc)
1162 {
1163 return fd_resource(prsc)->internal_format;
1164 }
1165
1166 static void
1167 fd_resource_set_stencil(struct pipe_resource *prsc,
1168 struct pipe_resource *stencil)
1169 {
1170 fd_resource(prsc)->stencil = fd_resource(stencil);
1171 }
1172
1173 static struct pipe_resource *
1174 fd_resource_get_stencil(struct pipe_resource *prsc)
1175 {
1176 struct fd_resource *rsc = fd_resource(prsc);
1177 if (rsc->stencil)
1178 return &rsc->stencil->base;
1179 return NULL;
1180 }
1181
1182 static const struct u_transfer_vtbl transfer_vtbl = {
1183 .resource_create = fd_resource_create,
1184 .resource_destroy = fd_resource_destroy,
1185 .transfer_map = fd_resource_transfer_map,
1186 .transfer_flush_region = fd_resource_transfer_flush_region,
1187 .transfer_unmap = fd_resource_transfer_unmap,
1188 .get_internal_format = fd_resource_get_internal_format,
1189 .set_stencil = fd_resource_set_stencil,
1190 .get_stencil = fd_resource_get_stencil,
1191 };
1192
1193 static const uint64_t supported_modifiers[] = {
1194 DRM_FORMAT_MOD_LINEAR,
1195 };
1196
1197 static int
1198 fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
1199 {
1200 switch (modifier) {
1201 case DRM_FORMAT_MOD_LINEAR:
1202 return 0;
1203 default:
1204 return -1;
1205 }
1206 }
1207
1208 void
1209 fd_resource_screen_init(struct pipe_screen *pscreen)
1210 {
1211 struct fd_screen *screen = fd_screen(pscreen);
1212 bool fake_rgtc = screen->gpu_id < 400;
1213
1214 pscreen->resource_create = u_transfer_helper_resource_create;
1215 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1216 * variant:
1217 */
1218 pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1219 pscreen->resource_from_handle = fd_resource_from_handle;
1220 pscreen->resource_get_handle = fd_resource_get_handle;
1221 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1222
1223 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1224 true, false, fake_rgtc, true);
1225
1226 if (!screen->setup_slices)
1227 screen->setup_slices = fd_setup_slices;
1228 if (!screen->layout_resource_for_modifier)
1229 screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
1230 if (!screen->supported_modifiers) {
1231 screen->supported_modifiers = supported_modifiers;
1232 screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers);
1233 }
1234 }
1235
1236 static void
1237 fd_get_sample_position(struct pipe_context *context,
1238 unsigned sample_count, unsigned sample_index,
1239 float *pos_out)
1240 {
1241 /* The following is copied from nouveau/nv50 except for position
1242 * values, which are taken from blob driver */
1243 static const uint8_t pos1[1][2] = { { 0x8, 0x8 } };
1244 static const uint8_t pos2[2][2] = {
1245 { 0xc, 0xc }, { 0x4, 0x4 } };
1246 static const uint8_t pos4[4][2] = {
1247 { 0x6, 0x2 }, { 0xe, 0x6 },
1248 { 0x2, 0xa }, { 0xa, 0xe } };
1249 /* TODO needs to be verified on supported hw */
1250 static const uint8_t pos8[8][2] = {
1251 { 0x9, 0x5 }, { 0x7, 0xb },
1252 { 0xd, 0x9 }, { 0x5, 0x3 },
1253 { 0x3, 0xd }, { 0x1, 0x7 },
1254 { 0xb, 0xf }, { 0xf, 0x1 } };
1255
1256 const uint8_t (*ptr)[2];
1257
1258 switch (sample_count) {
1259 case 1:
1260 ptr = pos1;
1261 break;
1262 case 2:
1263 ptr = pos2;
1264 break;
1265 case 4:
1266 ptr = pos4;
1267 break;
1268 case 8:
1269 ptr = pos8;
1270 break;
1271 default:
1272 assert(0);
1273 return;
1274 }
1275
1276 pos_out[0] = ptr[sample_index][0] / 16.0f;
1277 pos_out[1] = ptr[sample_index][1] / 16.0f;
1278 }
1279
1280 static void
1281 fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
1282 {
1283 /* wrap fd_blit to return void */
1284 fd_blit(pctx, blit_info);
1285 }
1286
1287 void
1288 fd_resource_context_init(struct pipe_context *pctx)
1289 {
1290 pctx->transfer_map = u_transfer_helper_transfer_map;
1291 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1292 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1293 pctx->buffer_subdata = u_default_buffer_subdata;
1294 pctx->texture_subdata = u_default_texture_subdata;
1295 pctx->create_surface = fd_create_surface;
1296 pctx->surface_destroy = fd_surface_destroy;
1297 pctx->resource_copy_region = fd_resource_copy_region;
1298 pctx->blit = fd_blit_pipe;
1299 pctx->flush_resource = fd_flush_resource;
1300 pctx->invalidate_resource = fd_invalidate_resource;
1301 pctx->get_sample_position = fd_get_sample_position;
1302 }