freedreno: Allow UBWC on textures with multiple mipmap levels.
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/format/u_format.h"
28 #include "util/format/u_format_rgtc.h"
29 #include "util/format/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
34 #include "util/set.h"
35 #include "util/u_drm.h"
36
37 #include "freedreno_resource.h"
38 #include "freedreno_batch_cache.h"
39 #include "freedreno_blitter.h"
40 #include "freedreno_fence.h"
41 #include "freedreno_screen.h"
42 #include "freedreno_surface.h"
43 #include "freedreno_context.h"
44 #include "freedreno_query_hw.h"
45 #include "freedreno_util.h"
46
47 #include "drm-uapi/drm_fourcc.h"
48 #include <errno.h>
49
50 /* XXX this should go away, needed for 'struct winsys_handle' */
51 #include "state_tracker/drm_driver.h"
52
53 /* A private modifier for now, so we have a way to request tiled but not
54 * compressed. It would perhaps be good to get real modifiers for the
55 * tiled formats, but would probably need to do some work to figure out
56 * the layout(s) of the tiled modes, and whether they are the same
57 * across generations.
58 */
59 #define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
60
61 /**
62 * Go through the entire state and see if the resource is bound
63 * anywhere. If it is, mark the relevant state as dirty. This is
64 * called on realloc_bo to ensure the neccessary state is re-
65 * emitted so the GPU looks at the new backing bo.
66 */
67 static void
68 rebind_resource(struct fd_context *ctx, struct pipe_resource *prsc)
69 {
70 /* VBOs */
71 for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
72 if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
73 ctx->dirty |= FD_DIRTY_VTXBUF;
74 }
75
76 /* per-shader-stage resources: */
77 for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
78 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
79 * cmdstream rather than by pointer..
80 */
81 const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask);
82 for (unsigned i = 1; i < num_ubos; i++) {
83 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST)
84 break;
85 if (ctx->constbuf[stage].cb[i].buffer == prsc)
86 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST;
87 }
88
89 /* Textures */
90 for (unsigned i = 0; i < ctx->tex[stage].num_textures; i++) {
91 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_TEX)
92 break;
93 if (ctx->tex[stage].textures[i] && (ctx->tex[stage].textures[i]->texture == prsc))
94 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
95 }
96
97 /* Images */
98 const unsigned num_images = util_last_bit(ctx->shaderimg[stage].enabled_mask);
99 for (unsigned i = 0; i < num_images; i++) {
100 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_IMAGE)
101 break;
102 if (ctx->shaderimg[stage].si[i].resource == prsc)
103 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE;
104 }
105
106 /* SSBOs */
107 const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask);
108 for (unsigned i = 0; i < num_ssbos; i++) {
109 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_SSBO)
110 break;
111 if (ctx->shaderbuf[stage].sb[i].buffer == prsc)
112 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO;
113 }
114 }
115 }
116
117 static void
118 realloc_bo(struct fd_resource *rsc, uint32_t size)
119 {
120 struct pipe_resource *prsc = &rsc->base;
121 struct fd_screen *screen = fd_screen(rsc->base.screen);
122 uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
123 DRM_FREEDRENO_GEM_TYPE_KMEM |
124 COND(prsc->bind & PIPE_BIND_SCANOUT, DRM_FREEDRENO_GEM_SCANOUT);
125 /* TODO other flags? */
126
127 /* if we start using things other than write-combine,
128 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
129 */
130
131 if (rsc->bo)
132 fd_bo_del(rsc->bo);
133
134 rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x",
135 prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
136 rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
137 util_range_set_empty(&rsc->valid_buffer_range);
138 fd_bc_invalidate_resource(rsc, true);
139 }
140
141 static void
142 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
143 {
144 struct pipe_context *pctx = &ctx->base;
145
146 /* TODO size threshold too?? */
147 if (fallback || !fd_blit(pctx, blit)) {
148 /* do blit on cpu: */
149 util_resource_copy_region(pctx,
150 blit->dst.resource, blit->dst.level, blit->dst.box.x,
151 blit->dst.box.y, blit->dst.box.z,
152 blit->src.resource, blit->src.level, &blit->src.box);
153 }
154 }
155
156 /**
157 * @rsc: the resource to shadow
158 * @level: the level to discard (if box != NULL, otherwise ignored)
159 * @box: the box to discard (or NULL if none)
160 * @modifier: the modifier for the new buffer state
161 */
162 static bool
163 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
164 unsigned level, const struct pipe_box *box, uint64_t modifier)
165 {
166 struct pipe_context *pctx = &ctx->base;
167 struct pipe_resource *prsc = &rsc->base;
168 bool fallback = false;
169
170 if (prsc->next)
171 return false;
172
173 /* TODO: somehow munge dimensions and format to copy unsupported
174 * render target format to something that is supported?
175 */
176 if (!pctx->screen->is_format_supported(pctx->screen,
177 prsc->format, prsc->target, prsc->nr_samples,
178 prsc->nr_storage_samples,
179 PIPE_BIND_RENDER_TARGET))
180 fallback = true;
181
182 /* do shadowing back-blits on the cpu for buffers: */
183 if (prsc->target == PIPE_BUFFER)
184 fallback = true;
185
186 bool discard_whole_level = box && util_texrange_covers_whole_level(prsc, level,
187 box->x, box->y, box->z, box->width, box->height, box->depth);
188
189 /* TODO need to be more clever about current level */
190 if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
191 return false;
192
193 struct pipe_resource *pshadow =
194 pctx->screen->resource_create_with_modifiers(pctx->screen,
195 prsc, &modifier, 1);
196
197 if (!pshadow)
198 return false;
199
200 assert(!ctx->in_shadow);
201 ctx->in_shadow = true;
202
203 /* get rid of any references that batch-cache might have to us (which
204 * should empty/destroy rsc->batches hashset)
205 */
206 fd_bc_invalidate_resource(rsc, false);
207
208 mtx_lock(&ctx->screen->lock);
209
210 /* Swap the backing bo's, so shadow becomes the old buffer,
211 * blit from shadow to new buffer. From here on out, we
212 * cannot fail.
213 *
214 * Note that we need to do it in this order, otherwise if
215 * we go down cpu blit path, the recursive transfer_map()
216 * sees the wrong status..
217 */
218 struct fd_resource *shadow = fd_resource(pshadow);
219
220 DBG("shadow: %p (%d) -> %p (%d)\n", rsc, rsc->base.reference.count,
221 shadow, shadow->base.reference.count);
222
223 /* TODO valid_buffer_range?? */
224 swap(rsc->bo, shadow->bo);
225 swap(rsc->write_batch, shadow->write_batch);
226 swap(rsc->layout, shadow->layout);
227 rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
228
229 /* at this point, the newly created shadow buffer is not referenced
230 * by any batches, but the existing rsc (probably) is. We need to
231 * transfer those references over:
232 */
233 debug_assert(shadow->batch_mask == 0);
234 struct fd_batch *batch;
235 foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
236 struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
237 _mesa_set_remove(batch->resources, entry);
238 _mesa_set_add(batch->resources, shadow);
239 }
240 swap(rsc->batch_mask, shadow->batch_mask);
241
242 mtx_unlock(&ctx->screen->lock);
243
244 struct pipe_blit_info blit = {};
245 blit.dst.resource = prsc;
246 blit.dst.format = prsc->format;
247 blit.src.resource = pshadow;
248 blit.src.format = pshadow->format;
249 blit.mask = util_format_get_mask(prsc->format);
250 blit.filter = PIPE_TEX_FILTER_NEAREST;
251
252 #define set_box(field, val) do { \
253 blit.dst.field = (val); \
254 blit.src.field = (val); \
255 } while (0)
256
257 /* blit the other levels in their entirety: */
258 for (unsigned l = 0; l <= prsc->last_level; l++) {
259 if (box && l == level)
260 continue;
261
262 /* just blit whole level: */
263 set_box(level, l);
264 set_box(box.width, u_minify(prsc->width0, l));
265 set_box(box.height, u_minify(prsc->height0, l));
266 set_box(box.depth, u_minify(prsc->depth0, l));
267
268 for (int i = 0; i < prsc->array_size; i++) {
269 set_box(box.z, i);
270 do_blit(ctx, &blit, fallback);
271 }
272 }
273
274 /* deal w/ current level specially, since we might need to split
275 * it up into a couple blits:
276 */
277 if (box && !discard_whole_level) {
278 set_box(level, level);
279
280 switch (prsc->target) {
281 case PIPE_BUFFER:
282 case PIPE_TEXTURE_1D:
283 set_box(box.y, 0);
284 set_box(box.z, 0);
285 set_box(box.height, 1);
286 set_box(box.depth, 1);
287
288 if (box->x > 0) {
289 set_box(box.x, 0);
290 set_box(box.width, box->x);
291
292 do_blit(ctx, &blit, fallback);
293 }
294 if ((box->x + box->width) < u_minify(prsc->width0, level)) {
295 set_box(box.x, box->x + box->width);
296 set_box(box.width, u_minify(prsc->width0, level) - (box->x + box->width));
297
298 do_blit(ctx, &blit, fallback);
299 }
300 break;
301 case PIPE_TEXTURE_2D:
302 /* TODO */
303 default:
304 unreachable("TODO");
305 }
306 }
307
308 ctx->in_shadow = false;
309
310 pipe_resource_reference(&pshadow, NULL);
311
312 return true;
313 }
314
315 /**
316 * Uncompress an UBWC compressed buffer "in place". This works basically
317 * like resource shadowing, creating a new resource, and doing an uncompress
318 * blit, and swapping the state between shadow and original resource so it
319 * appears to the state tracker as if nothing changed.
320 */
321 void
322 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
323 {
324 bool success =
325 fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
326
327 /* shadow should not fail in any cases where we need to uncompress: */
328 debug_assert(success);
329
330 /*
331 * TODO what if rsc is used in other contexts, we don't currently
332 * have a good way to rebind_resource() in other contexts. And an
333 * app that is reading one resource in multiple contexts, isn't
334 * going to expect that the resource is modified.
335 *
336 * Hopefully the edge cases where we need to uncompress are rare
337 * enough that they mostly only show up in deqp.
338 */
339
340 rebind_resource(ctx, &rsc->base);
341 }
342
343 static struct fd_resource *
344 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
345 unsigned level, const struct pipe_box *box)
346 {
347 struct pipe_context *pctx = &ctx->base;
348 struct pipe_resource tmpl = rsc->base;
349
350 tmpl.width0 = box->width;
351 tmpl.height0 = box->height;
352 /* for array textures, box->depth is the array_size, otherwise
353 * for 3d textures, it is the depth:
354 */
355 if (tmpl.array_size > 1) {
356 if (tmpl.target == PIPE_TEXTURE_CUBE)
357 tmpl.target = PIPE_TEXTURE_2D_ARRAY;
358 tmpl.array_size = box->depth;
359 tmpl.depth0 = 1;
360 } else {
361 tmpl.array_size = 1;
362 tmpl.depth0 = box->depth;
363 }
364 tmpl.last_level = 0;
365 tmpl.bind |= PIPE_BIND_LINEAR;
366
367 struct pipe_resource *pstaging =
368 pctx->screen->resource_create(pctx->screen, &tmpl);
369 if (!pstaging)
370 return NULL;
371
372 return fd_resource(pstaging);
373 }
374
375 static void
376 fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
377 {
378 struct pipe_resource *dst = trans->base.resource;
379 struct pipe_blit_info blit = {};
380
381 blit.dst.resource = dst;
382 blit.dst.format = dst->format;
383 blit.dst.level = trans->base.level;
384 blit.dst.box = trans->base.box;
385 blit.src.resource = trans->staging_prsc;
386 blit.src.format = trans->staging_prsc->format;
387 blit.src.level = 0;
388 blit.src.box = trans->staging_box;
389 blit.mask = util_format_get_mask(trans->staging_prsc->format);
390 blit.filter = PIPE_TEX_FILTER_NEAREST;
391
392 do_blit(ctx, &blit, false);
393 }
394
395 static void
396 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
397 {
398 struct pipe_resource *src = trans->base.resource;
399 struct pipe_blit_info blit = {};
400
401 blit.src.resource = src;
402 blit.src.format = src->format;
403 blit.src.level = trans->base.level;
404 blit.src.box = trans->base.box;
405 blit.dst.resource = trans->staging_prsc;
406 blit.dst.format = trans->staging_prsc->format;
407 blit.dst.level = 0;
408 blit.dst.box = trans->staging_box;
409 blit.mask = util_format_get_mask(trans->staging_prsc->format);
410 blit.filter = PIPE_TEX_FILTER_NEAREST;
411
412 do_blit(ctx, &blit, false);
413 }
414
415 static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
416 struct pipe_transfer *ptrans,
417 const struct pipe_box *box)
418 {
419 struct fd_resource *rsc = fd_resource(ptrans->resource);
420
421 if (ptrans->resource->target == PIPE_BUFFER)
422 util_range_add(&rsc->base, &rsc->valid_buffer_range,
423 ptrans->box.x + box->x,
424 ptrans->box.x + box->x + box->width);
425 }
426
427 static void
428 flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
429 {
430 struct fd_batch *write_batch = NULL;
431
432 mtx_lock(&ctx->screen->lock);
433 fd_batch_reference_locked(&write_batch, rsc->write_batch);
434 mtx_unlock(&ctx->screen->lock);
435
436 if (usage & PIPE_TRANSFER_WRITE) {
437 struct fd_batch *batch, *batches[32] = {};
438 uint32_t batch_mask;
439
440 /* This is a bit awkward, probably a fd_batch_flush_locked()
441 * would make things simpler.. but we need to hold the lock
442 * to iterate the batches which reference this resource. So
443 * we must first grab references under a lock, then flush.
444 */
445 mtx_lock(&ctx->screen->lock);
446 batch_mask = rsc->batch_mask;
447 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
448 fd_batch_reference_locked(&batches[batch->idx], batch);
449 mtx_unlock(&ctx->screen->lock);
450
451 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
452 fd_batch_flush(batch);
453
454 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
455 fd_batch_reference(&batches[batch->idx], NULL);
456 }
457 assert(rsc->batch_mask == 0);
458 } else if (write_batch) {
459 fd_batch_flush(write_batch);
460 }
461
462 fd_batch_reference(&write_batch, NULL);
463
464 assert(!rsc->write_batch);
465 }
466
467 static void
468 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
469 {
470 flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
471 }
472
473 static void
474 fd_resource_transfer_unmap(struct pipe_context *pctx,
475 struct pipe_transfer *ptrans)
476 {
477 struct fd_context *ctx = fd_context(pctx);
478 struct fd_resource *rsc = fd_resource(ptrans->resource);
479 struct fd_transfer *trans = fd_transfer(ptrans);
480
481 if (trans->staging_prsc) {
482 if (ptrans->usage & PIPE_TRANSFER_WRITE)
483 fd_blit_from_staging(ctx, trans);
484 pipe_resource_reference(&trans->staging_prsc, NULL);
485 }
486
487 if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
488 fd_bo_cpu_fini(rsc->bo);
489 }
490
491 util_range_add(&rsc->base, &rsc->valid_buffer_range,
492 ptrans->box.x,
493 ptrans->box.x + ptrans->box.width);
494
495 pipe_resource_reference(&ptrans->resource, NULL);
496 slab_free(&ctx->transfer_pool, ptrans);
497 }
498
499 static void *
500 fd_resource_transfer_map(struct pipe_context *pctx,
501 struct pipe_resource *prsc,
502 unsigned level, unsigned usage,
503 const struct pipe_box *box,
504 struct pipe_transfer **pptrans)
505 {
506 struct fd_context *ctx = fd_context(pctx);
507 struct fd_resource *rsc = fd_resource(prsc);
508 struct fdl_slice *slice = fd_resource_slice(rsc, level);
509 struct fd_transfer *trans;
510 struct pipe_transfer *ptrans;
511 enum pipe_format format = prsc->format;
512 uint32_t op = 0;
513 uint32_t offset;
514 char *buf;
515 int ret = 0;
516
517 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
518 box->width, box->height, box->x, box->y);
519
520 ptrans = slab_alloc(&ctx->transfer_pool);
521 if (!ptrans)
522 return NULL;
523
524 /* slab_alloc_st() doesn't zero: */
525 trans = fd_transfer(ptrans);
526 memset(trans, 0, sizeof(*trans));
527
528 pipe_resource_reference(&ptrans->resource, prsc);
529 ptrans->level = level;
530 ptrans->usage = usage;
531 ptrans->box = *box;
532 ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->layout.cpp;
533 ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
534
535 /* we always need a staging texture for tiled buffers:
536 *
537 * TODO we might sometimes want to *also* shadow the resource to avoid
538 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
539 * texture.
540 */
541 if (rsc->layout.tile_mode) {
542 struct fd_resource *staging_rsc;
543
544 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
545 if (staging_rsc) {
546 struct fdl_slice *staging_slice =
547 fd_resource_slice(staging_rsc, 0);
548 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
549 trans->staging_prsc = &staging_rsc->base;
550 trans->base.stride = util_format_get_nblocksx(format,
551 staging_slice->pitch) * staging_rsc->layout.cpp;
552 trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
553 trans->staging_box = *box;
554 trans->staging_box.x = 0;
555 trans->staging_box.y = 0;
556 trans->staging_box.z = 0;
557
558 if (usage & PIPE_TRANSFER_READ) {
559 fd_blit_to_staging(ctx, trans);
560
561 fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
562 DRM_FREEDRENO_PREP_READ);
563 }
564
565 buf = fd_bo_map(staging_rsc->bo);
566 offset = 0;
567
568 *pptrans = ptrans;
569
570 ctx->stats.staging_uploads++;
571
572 return buf;
573 }
574 }
575
576 if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
577 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
578
579 if (usage & PIPE_TRANSFER_READ)
580 op |= DRM_FREEDRENO_PREP_READ;
581
582 if (usage & PIPE_TRANSFER_WRITE)
583 op |= DRM_FREEDRENO_PREP_WRITE;
584
585 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
586 realloc_bo(rsc, fd_bo_size(rsc->bo));
587 rebind_resource(ctx, prsc);
588 } else if ((usage & PIPE_TRANSFER_WRITE) &&
589 prsc->target == PIPE_BUFFER &&
590 !util_ranges_intersect(&rsc->valid_buffer_range,
591 box->x, box->x + box->width)) {
592 /* We are trying to write to a previously uninitialized range. No need
593 * to wait.
594 */
595 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
596 struct fd_batch *write_batch = NULL;
597
598 /* hold a reference, so it doesn't disappear under us: */
599 fd_context_lock(ctx);
600 fd_batch_reference_locked(&write_batch, rsc->write_batch);
601 fd_context_unlock(ctx);
602
603 if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
604 write_batch->back_blit) {
605 /* if only thing pending is a back-blit, we can discard it: */
606 fd_batch_reset(write_batch);
607 }
608
609 /* If the GPU is writing to the resource, or if it is reading from the
610 * resource and we're trying to write to it, flush the renders.
611 */
612 bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
613 bool busy = needs_flush || (0 != fd_bo_cpu_prep(rsc->bo,
614 ctx->pipe, op | DRM_FREEDRENO_PREP_NOSYNC));
615
616 /* if we need to flush/stall, see if we can make a shadow buffer
617 * to avoid this:
618 *
619 * TODO we could go down this path !reorder && !busy_for_read
620 * ie. we only *don't* want to go down this path if the blit
621 * will trigger a flush!
622 */
623 if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
624 (usage & PIPE_TRANSFER_DISCARD_RANGE)) {
625 /* try shadowing only if it avoids a flush, otherwise staging would
626 * be better:
627 */
628 if (needs_flush && fd_try_shadow_resource(ctx, rsc, level,
629 box, DRM_FORMAT_MOD_LINEAR)) {
630 needs_flush = busy = false;
631 rebind_resource(ctx, prsc);
632 ctx->stats.shadow_uploads++;
633 } else {
634 struct fd_resource *staging_rsc;
635
636 if (needs_flush) {
637 flush_resource(ctx, rsc, usage);
638 needs_flush = false;
639 }
640
641 /* in this case, we don't need to shadow the whole resource,
642 * since any draw that references the previous contents has
643 * already had rendering flushed for all tiles. So we can
644 * use a staging buffer to do the upload.
645 */
646 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
647 if (staging_rsc) {
648 struct fdl_slice *staging_slice =
649 fd_resource_slice(staging_rsc, 0);
650 trans->staging_prsc = &staging_rsc->base;
651 trans->base.stride = util_format_get_nblocksx(format,
652 staging_slice->pitch) * staging_rsc->layout.cpp;
653 trans->base.layer_stride =
654 fd_resource_layer_stride(staging_rsc, 0);
655 trans->staging_box = *box;
656 trans->staging_box.x = 0;
657 trans->staging_box.y = 0;
658 trans->staging_box.z = 0;
659 buf = fd_bo_map(staging_rsc->bo);
660 offset = 0;
661
662 *pptrans = ptrans;
663
664 fd_batch_reference(&write_batch, NULL);
665
666 ctx->stats.staging_uploads++;
667
668 return buf;
669 }
670 }
671 }
672
673 if (needs_flush) {
674 flush_resource(ctx, rsc, usage);
675 needs_flush = false;
676 }
677
678 fd_batch_reference(&write_batch, NULL);
679
680 /* The GPU keeps track of how the various bo's are being used, and
681 * will wait if necessary for the proper operation to have
682 * completed.
683 */
684 if (busy) {
685 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
686 if (ret)
687 goto fail;
688 }
689 }
690
691 buf = fd_bo_map(rsc->bo);
692 offset =
693 box->y / util_format_get_blockheight(format) * ptrans->stride +
694 box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
695 fd_resource_offset(rsc, level, box->z);
696
697 if (usage & PIPE_TRANSFER_WRITE)
698 rsc->valid = true;
699
700 *pptrans = ptrans;
701
702 return buf + offset;
703
704 fail:
705 fd_resource_transfer_unmap(pctx, ptrans);
706 return NULL;
707 }
708
709 static void
710 fd_resource_destroy(struct pipe_screen *pscreen,
711 struct pipe_resource *prsc)
712 {
713 struct fd_resource *rsc = fd_resource(prsc);
714 fd_bc_invalidate_resource(rsc, true);
715 if (rsc->bo)
716 fd_bo_del(rsc->bo);
717 if (rsc->scanout)
718 renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
719
720 util_range_destroy(&rsc->valid_buffer_range);
721 FREE(rsc);
722 }
723
724 static uint64_t
725 fd_resource_modifier(struct fd_resource *rsc)
726 {
727 if (!rsc->layout.tile_mode)
728 return DRM_FORMAT_MOD_LINEAR;
729
730 if (rsc->layout.ubwc_layer_size)
731 return DRM_FORMAT_MOD_QCOM_COMPRESSED;
732
733 /* TODO invent a modifier for tiled but not UBWC buffers: */
734 return DRM_FORMAT_MOD_INVALID;
735 }
736
737 static bool
738 fd_resource_get_handle(struct pipe_screen *pscreen,
739 struct pipe_context *pctx,
740 struct pipe_resource *prsc,
741 struct winsys_handle *handle,
742 unsigned usage)
743 {
744 struct fd_resource *rsc = fd_resource(prsc);
745
746 handle->modifier = fd_resource_modifier(rsc);
747
748 return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
749 fd_resource_slice(rsc, 0)->pitch * rsc->layout.cpp, handle);
750 }
751
752 static uint32_t
753 setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format format)
754 {
755 struct pipe_resource *prsc = &rsc->base;
756 struct fd_screen *screen = fd_screen(prsc->screen);
757 enum util_format_layout layout = util_format_description(format)->layout;
758 uint32_t pitchalign = screen->gmem_alignw;
759 uint32_t level, size = 0;
760 uint32_t width = prsc->width0;
761 uint32_t height = prsc->height0;
762 uint32_t depth = prsc->depth0;
763 /* in layer_first layout, the level (slice) contains just one
764 * layer (since in fact the layer contains the slices)
765 */
766 uint32_t layers_in_level = rsc->layout.layer_first ? 1 : prsc->array_size;
767
768 for (level = 0; level <= prsc->last_level; level++) {
769 struct fdl_slice *slice = fd_resource_slice(rsc, level);
770 uint32_t blocks;
771
772 if (layout == UTIL_FORMAT_LAYOUT_ASTC)
773 slice->pitch = width =
774 util_align_npot(width, pitchalign * util_format_get_blockwidth(format));
775 else
776 slice->pitch = width = align(width, pitchalign);
777 slice->offset = size;
778 blocks = util_format_get_nblocks(format, width, height);
779 /* 1d array and 2d array textures must all have the same layer size
780 * for each miplevel on a3xx. 3d textures can have different layer
781 * sizes for high levels, but the hw auto-sizer is buggy (or at least
782 * different than what this code does), so as soon as the layer size
783 * range gets into range, we stop reducing it.
784 */
785 if (prsc->target == PIPE_TEXTURE_3D && (
786 level == 1 ||
787 (level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000)))
788 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
789 else if (level == 0 || rsc->layout.layer_first || alignment == 1)
790 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
791 else
792 slice->size0 = fd_resource_slice(rsc, level - 1)->size0;
793
794 size += slice->size0 * depth * layers_in_level;
795
796 width = u_minify(width, 1);
797 height = u_minify(height, 1);
798 depth = u_minify(depth, 1);
799 }
800
801 return size;
802 }
803
804 static uint32_t
805 slice_alignment(enum pipe_texture_target target)
806 {
807 /* on a3xx, 2d array and 3d textures seem to want their
808 * layers aligned to page boundaries:
809 */
810 switch (target) {
811 case PIPE_TEXTURE_3D:
812 case PIPE_TEXTURE_1D_ARRAY:
813 case PIPE_TEXTURE_2D_ARRAY:
814 return 4096;
815 default:
816 return 1;
817 }
818 }
819
820 /* cross generation texture layout to plug in to screen->setup_slices()..
821 * replace with generation specific one as-needed.
822 *
823 * TODO for a4xx probably can extract out the a4xx specific logic int
824 * a small fd4_setup_slices() wrapper that sets up layer_first, and then
825 * calls this.
826 */
827 uint32_t
828 fd_setup_slices(struct fd_resource *rsc)
829 {
830 uint32_t alignment;
831
832 alignment = slice_alignment(rsc->base.target);
833
834 struct fd_screen *screen = fd_screen(rsc->base.screen);
835 if (is_a4xx(screen)) {
836 switch (rsc->base.target) {
837 case PIPE_TEXTURE_3D:
838 rsc->layout.layer_first = false;
839 break;
840 default:
841 rsc->layout.layer_first = true;
842 alignment = 1;
843 break;
844 }
845 }
846
847 return setup_slices(rsc, alignment, rsc->base.format);
848 }
849
850 /* special case to resize query buf after allocated.. */
851 void
852 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
853 {
854 struct fd_resource *rsc = fd_resource(prsc);
855
856 debug_assert(prsc->width0 == 0);
857 debug_assert(prsc->target == PIPE_BUFFER);
858 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
859
860 prsc->width0 = sz;
861 realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
862 }
863
864 static void
865 fd_resource_layout_init(struct pipe_resource *prsc)
866 {
867 struct fd_resource *rsc = fd_resource(prsc);
868 struct fdl_layout *layout = &rsc->layout;
869
870 layout->width0 = prsc->width0;
871 layout->height0 = prsc->height0;
872 layout->depth0 = prsc->depth0;
873
874 layout->cpp = util_format_get_blocksize(prsc->format);
875 layout->cpp *= fd_resource_nr_samples(prsc);
876 }
877
878 /**
879 * Create a new texture object, using the given template info.
880 */
881 static struct pipe_resource *
882 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
883 const struct pipe_resource *tmpl,
884 const uint64_t *modifiers, int count)
885 {
886 struct fd_screen *screen = fd_screen(pscreen);
887 struct fd_resource *rsc;
888 struct pipe_resource *prsc;
889 enum pipe_format format = tmpl->format;
890 uint32_t size;
891
892 /* when using kmsro, scanout buffers are allocated on the display device
893 * create_with_modifiers() doesn't give us usage flags, so we have to
894 * assume that all calls with modifiers are scanout-possible
895 */
896 if (screen->ro &&
897 ((tmpl->bind & PIPE_BIND_SCANOUT) ||
898 !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
899 struct pipe_resource scanout_templat = *tmpl;
900 struct renderonly_scanout *scanout;
901 struct winsys_handle handle;
902
903 /* apply freedreno alignment requirement */
904 scanout_templat.width0 = align(tmpl->width0, screen->gmem_alignw);
905
906 scanout = renderonly_scanout_for_resource(&scanout_templat,
907 screen->ro, &handle);
908 if (!scanout)
909 return NULL;
910
911 renderonly_scanout_destroy(scanout, screen->ro);
912
913 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
914 rsc = fd_resource(pscreen->resource_from_handle(pscreen, tmpl,
915 &handle,
916 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
917 close(handle.handle);
918 if (!rsc)
919 return NULL;
920
921 return &rsc->base;
922 }
923
924 rsc = CALLOC_STRUCT(fd_resource);
925 prsc = &rsc->base;
926
927 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
928 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc,
929 tmpl->target, util_format_name(format),
930 tmpl->width0, tmpl->height0, tmpl->depth0,
931 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
932 tmpl->usage, tmpl->bind, tmpl->flags);
933
934 if (!rsc)
935 return NULL;
936
937 *prsc = *tmpl;
938 fd_resource_layout_init(prsc);
939
940 #define LINEAR \
941 (PIPE_BIND_SCANOUT | \
942 PIPE_BIND_LINEAR | \
943 PIPE_BIND_DISPLAY_TARGET)
944
945 bool linear = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
946 if (tmpl->bind & LINEAR)
947 linear = true;
948
949 if (fd_mesa_debug & FD_DBG_NOTILE)
950 linear = true;
951
952 /* Normally, for non-shared buffers, allow buffer compression if
953 * not shared, otherwise only allow if QCOM_COMPRESSED modifier
954 * is requested:
955 *
956 * TODO we should probably also limit tiled in a similar way,
957 * except we don't have a format modifier for tiled. (We probably
958 * should.)
959 */
960 bool allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
961 if (tmpl->bind & PIPE_BIND_SHARED)
962 allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count);
963
964 allow_ubwc &= !(fd_mesa_debug & FD_DBG_NOUBWC);
965
966 pipe_reference_init(&prsc->reference, 1);
967
968 prsc->screen = pscreen;
969
970 if (screen->tile_mode &&
971 (tmpl->target != PIPE_BUFFER) &&
972 !linear) {
973 rsc->layout.tile_mode = screen->tile_mode(prsc);
974 }
975
976 util_range_init(&rsc->valid_buffer_range);
977
978 rsc->internal_format = format;
979
980 rsc->layout.ubwc = rsc->layout.tile_mode && is_a6xx(screen) && allow_ubwc;
981
982 if (prsc->target == PIPE_BUFFER) {
983 assert(prsc->format == PIPE_FORMAT_R8_UNORM);
984 size = prsc->width0;
985 fdl_layout_buffer(&rsc->layout, size);
986 } else {
987 size = screen->setup_slices(rsc);
988 }
989
990 /* special case for hw-query buffer, which we need to allocate before we
991 * know the size:
992 */
993 if (size == 0) {
994 /* note, semi-intention == instead of & */
995 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
996 return prsc;
997 }
998
999 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
1000 if (rsc->layout.layer_first && !rsc->layout.layer_size) {
1001 rsc->layout.layer_size = align(size, 4096);
1002 size = rsc->layout.layer_size * prsc->array_size;
1003 }
1004
1005 if (fd_mesa_debug & FD_DBG_LAYOUT)
1006 fdl_dump_layout(&rsc->layout);
1007
1008 realloc_bo(rsc, size);
1009 if (!rsc->bo)
1010 goto fail;
1011
1012 return prsc;
1013 fail:
1014 fd_resource_destroy(pscreen, prsc);
1015 return NULL;
1016 }
1017
1018 static struct pipe_resource *
1019 fd_resource_create(struct pipe_screen *pscreen,
1020 const struct pipe_resource *tmpl)
1021 {
1022 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1023 return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1024 }
1025
1026 static bool
1027 is_supported_modifier(struct pipe_screen *pscreen, enum pipe_format pfmt,
1028 uint64_t mod)
1029 {
1030 int count;
1031
1032 /* Get the count of supported modifiers: */
1033 pscreen->query_dmabuf_modifiers(pscreen, pfmt, 0, NULL, NULL, &count);
1034
1035 /* Get the supported modifiers: */
1036 uint64_t modifiers[count];
1037 pscreen->query_dmabuf_modifiers(pscreen, pfmt, count, modifiers, NULL, &count);
1038
1039 for (int i = 0; i < count; i++)
1040 if (modifiers[i] == mod)
1041 return true;
1042
1043 return false;
1044 }
1045
1046 /**
1047 * Create a texture from a winsys_handle. The handle is often created in
1048 * another process by first creating a pipe texture and then calling
1049 * resource_get_handle.
1050 */
1051 static struct pipe_resource *
1052 fd_resource_from_handle(struct pipe_screen *pscreen,
1053 const struct pipe_resource *tmpl,
1054 struct winsys_handle *handle, unsigned usage)
1055 {
1056 struct fd_screen *screen = fd_screen(pscreen);
1057 struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1058 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1059 struct pipe_resource *prsc = &rsc->base;
1060 uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw;
1061
1062 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
1063 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
1064 tmpl->target, util_format_name(tmpl->format),
1065 tmpl->width0, tmpl->height0, tmpl->depth0,
1066 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
1067 tmpl->usage, tmpl->bind, tmpl->flags);
1068
1069 if (!rsc)
1070 return NULL;
1071
1072 *prsc = *tmpl;
1073 fd_resource_layout_init(prsc);
1074
1075 pipe_reference_init(&prsc->reference, 1);
1076
1077 prsc->screen = pscreen;
1078
1079 util_range_init(&rsc->valid_buffer_range);
1080
1081 rsc->bo = fd_screen_bo_from_handle(pscreen, handle);
1082 if (!rsc->bo)
1083 goto fail;
1084
1085 rsc->internal_format = tmpl->format;
1086 slice->pitch = handle->stride / rsc->layout.cpp;
1087 slice->offset = handle->offset;
1088 slice->size0 = handle->stride * prsc->height0;
1089
1090 if ((slice->pitch < align(prsc->width0, pitchalign)) ||
1091 (slice->pitch & (pitchalign - 1)))
1092 goto fail;
1093
1094 if (handle->modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
1095 if (!is_supported_modifier(pscreen, tmpl->format,
1096 DRM_FORMAT_MOD_QCOM_COMPRESSED)) {
1097 DBG("bad modifier: %"PRIx64, handle->modifier);
1098 goto fail;
1099 }
1100 /* XXX UBWC setup */
1101 } else if (handle->modifier &&
1102 (handle->modifier != DRM_FORMAT_MOD_INVALID)) {
1103 goto fail;
1104 }
1105
1106 assert(rsc->layout.cpp);
1107
1108 if (screen->ro) {
1109 rsc->scanout =
1110 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1111 /* failure is expected in some cases.. */
1112 }
1113
1114 rsc->valid = true;
1115
1116 return prsc;
1117
1118 fail:
1119 fd_resource_destroy(pscreen, prsc);
1120 return NULL;
1121 }
1122
1123 bool
1124 fd_render_condition_check(struct pipe_context *pctx)
1125 {
1126 struct fd_context *ctx = fd_context(pctx);
1127
1128 if (!ctx->cond_query)
1129 return true;
1130
1131 union pipe_query_result res = { 0 };
1132 bool wait =
1133 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1134 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1135
1136 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1137 return (bool)res.u64 != ctx->cond_cond;
1138
1139 return true;
1140 }
1141
1142 static void
1143 fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
1144 {
1145 struct fd_context *ctx = fd_context(pctx);
1146 struct fd_resource *rsc = fd_resource(prsc);
1147
1148 /*
1149 * TODO I guess we could track that the resource is invalidated and
1150 * use that as a hint to realloc rather than stall in _transfer_map(),
1151 * even in the non-DISCARD_WHOLE_RESOURCE case?
1152 *
1153 * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
1154 */
1155
1156 if (rsc->write_batch) {
1157 struct fd_batch *batch = rsc->write_batch;
1158 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1159
1160 if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1161 batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1162 ctx->dirty |= FD_DIRTY_ZSA;
1163 }
1164
1165 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1166 if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1167 batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1168 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
1169 }
1170 }
1171 }
1172
1173 rsc->valid = false;
1174 }
1175
1176 static enum pipe_format
1177 fd_resource_get_internal_format(struct pipe_resource *prsc)
1178 {
1179 return fd_resource(prsc)->internal_format;
1180 }
1181
1182 static void
1183 fd_resource_set_stencil(struct pipe_resource *prsc,
1184 struct pipe_resource *stencil)
1185 {
1186 fd_resource(prsc)->stencil = fd_resource(stencil);
1187 }
1188
1189 static struct pipe_resource *
1190 fd_resource_get_stencil(struct pipe_resource *prsc)
1191 {
1192 struct fd_resource *rsc = fd_resource(prsc);
1193 if (rsc->stencil)
1194 return &rsc->stencil->base;
1195 return NULL;
1196 }
1197
1198 static const struct u_transfer_vtbl transfer_vtbl = {
1199 .resource_create = fd_resource_create,
1200 .resource_destroy = fd_resource_destroy,
1201 .transfer_map = fd_resource_transfer_map,
1202 .transfer_flush_region = fd_resource_transfer_flush_region,
1203 .transfer_unmap = fd_resource_transfer_unmap,
1204 .get_internal_format = fd_resource_get_internal_format,
1205 .set_stencil = fd_resource_set_stencil,
1206 .get_stencil = fd_resource_get_stencil,
1207 };
1208
1209 void
1210 fd_resource_screen_init(struct pipe_screen *pscreen)
1211 {
1212 struct fd_screen *screen = fd_screen(pscreen);
1213 bool fake_rgtc = screen->gpu_id < 400;
1214
1215 pscreen->resource_create = u_transfer_helper_resource_create;
1216 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1217 * variant:
1218 */
1219 pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1220 pscreen->resource_from_handle = fd_resource_from_handle;
1221 pscreen->resource_get_handle = fd_resource_get_handle;
1222 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1223
1224 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1225 true, false, fake_rgtc, true);
1226
1227 if (!screen->setup_slices)
1228 screen->setup_slices = fd_setup_slices;
1229 }
1230
1231 static void
1232 fd_get_sample_position(struct pipe_context *context,
1233 unsigned sample_count, unsigned sample_index,
1234 float *pos_out)
1235 {
1236 /* The following is copied from nouveau/nv50 except for position
1237 * values, which are taken from blob driver */
1238 static const uint8_t pos1[1][2] = { { 0x8, 0x8 } };
1239 static const uint8_t pos2[2][2] = {
1240 { 0xc, 0xc }, { 0x4, 0x4 } };
1241 static const uint8_t pos4[4][2] = {
1242 { 0x6, 0x2 }, { 0xe, 0x6 },
1243 { 0x2, 0xa }, { 0xa, 0xe } };
1244 /* TODO needs to be verified on supported hw */
1245 static const uint8_t pos8[8][2] = {
1246 { 0x9, 0x5 }, { 0x7, 0xb },
1247 { 0xd, 0x9 }, { 0x5, 0x3 },
1248 { 0x3, 0xd }, { 0x1, 0x7 },
1249 { 0xb, 0xf }, { 0xf, 0x1 } };
1250
1251 const uint8_t (*ptr)[2];
1252
1253 switch (sample_count) {
1254 case 1:
1255 ptr = pos1;
1256 break;
1257 case 2:
1258 ptr = pos2;
1259 break;
1260 case 4:
1261 ptr = pos4;
1262 break;
1263 case 8:
1264 ptr = pos8;
1265 break;
1266 default:
1267 assert(0);
1268 return;
1269 }
1270
1271 pos_out[0] = ptr[sample_index][0] / 16.0f;
1272 pos_out[1] = ptr[sample_index][1] / 16.0f;
1273 }
1274
1275 static void
1276 fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
1277 {
1278 /* wrap fd_blit to return void */
1279 fd_blit(pctx, blit_info);
1280 }
1281
1282 void
1283 fd_resource_context_init(struct pipe_context *pctx)
1284 {
1285 pctx->transfer_map = u_transfer_helper_transfer_map;
1286 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1287 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1288 pctx->buffer_subdata = u_default_buffer_subdata;
1289 pctx->texture_subdata = u_default_texture_subdata;
1290 pctx->create_surface = fd_create_surface;
1291 pctx->surface_destroy = fd_surface_destroy;
1292 pctx->resource_copy_region = fd_resource_copy_region;
1293 pctx->blit = fd_blit_pipe;
1294 pctx->flush_resource = fd_flush_resource;
1295 pctx->invalidate_resource = fd_invalidate_resource;
1296 pctx->get_sample_position = fd_get_sample_position;
1297 }