freedreno: Make the slice pitch be bytes, not pixels.
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/format/u_format.h"
28 #include "util/format/u_format_rgtc.h"
29 #include "util/format/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
34 #include "util/set.h"
35 #include "util/u_drm.h"
36
37 #include "freedreno_resource.h"
38 #include "freedreno_batch_cache.h"
39 #include "freedreno_blitter.h"
40 #include "freedreno_fence.h"
41 #include "freedreno_screen.h"
42 #include "freedreno_surface.h"
43 #include "freedreno_context.h"
44 #include "freedreno_query_hw.h"
45 #include "freedreno_util.h"
46
47 #include "drm-uapi/drm_fourcc.h"
48 #include <errno.h>
49
50 /* XXX this should go away, needed for 'struct winsys_handle' */
51 #include "state_tracker/drm_driver.h"
52
53 /* A private modifier for now, so we have a way to request tiled but not
54 * compressed. It would perhaps be good to get real modifiers for the
55 * tiled formats, but would probably need to do some work to figure out
56 * the layout(s) of the tiled modes, and whether they are the same
57 * across generations.
58 */
59 #define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
60
61 /**
62 * Go through the entire state and see if the resource is bound
63 * anywhere. If it is, mark the relevant state as dirty. This is
64 * called on realloc_bo to ensure the neccessary state is re-
65 * emitted so the GPU looks at the new backing bo.
66 */
67 static void
68 rebind_resource(struct fd_context *ctx, struct pipe_resource *prsc)
69 {
70 /* VBOs */
71 for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
72 if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
73 ctx->dirty |= FD_DIRTY_VTXBUF;
74 }
75
76 /* per-shader-stage resources: */
77 for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
78 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
79 * cmdstream rather than by pointer..
80 */
81 const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask);
82 for (unsigned i = 1; i < num_ubos; i++) {
83 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST)
84 break;
85 if (ctx->constbuf[stage].cb[i].buffer == prsc)
86 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST;
87 }
88
89 /* Textures */
90 for (unsigned i = 0; i < ctx->tex[stage].num_textures; i++) {
91 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_TEX)
92 break;
93 if (ctx->tex[stage].textures[i] && (ctx->tex[stage].textures[i]->texture == prsc))
94 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
95 }
96
97 /* Images */
98 const unsigned num_images = util_last_bit(ctx->shaderimg[stage].enabled_mask);
99 for (unsigned i = 0; i < num_images; i++) {
100 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_IMAGE)
101 break;
102 if (ctx->shaderimg[stage].si[i].resource == prsc)
103 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE;
104 }
105
106 /* SSBOs */
107 const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask);
108 for (unsigned i = 0; i < num_ssbos; i++) {
109 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_SSBO)
110 break;
111 if (ctx->shaderbuf[stage].sb[i].buffer == prsc)
112 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO;
113 }
114 }
115 }
116
117 static void
118 realloc_bo(struct fd_resource *rsc, uint32_t size)
119 {
120 struct pipe_resource *prsc = &rsc->base;
121 struct fd_screen *screen = fd_screen(rsc->base.screen);
122 uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
123 DRM_FREEDRENO_GEM_TYPE_KMEM |
124 COND(prsc->bind & PIPE_BIND_SCANOUT, DRM_FREEDRENO_GEM_SCANOUT);
125 /* TODO other flags? */
126
127 /* if we start using things other than write-combine,
128 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
129 */
130
131 if (rsc->bo)
132 fd_bo_del(rsc->bo);
133
134 rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x",
135 prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
136
137 /* Zero out the UBWC area on allocation. This fixes intermittent failures
138 * with UBWC, which I suspect are due to the HW having a hard time
139 * interpreting arbitrary values populating the flags buffer when the BO
140 * was recycled through the bo cache (instead of fresh allocations from
141 * the kernel, which are zeroed). sleep(1) in this spot didn't work
142 * around the issue, but any memset value seems to.
143 */
144 if (rsc->layout.ubwc) {
145 void *buf = fd_bo_map(rsc->bo);
146 memset(buf, 0, rsc->layout.slices[0].offset);
147 }
148
149 rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
150 util_range_set_empty(&rsc->valid_buffer_range);
151 fd_bc_invalidate_resource(rsc, true);
152 }
153
154 static void
155 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
156 {
157 struct pipe_context *pctx = &ctx->base;
158
159 /* TODO size threshold too?? */
160 if (fallback || !fd_blit(pctx, blit)) {
161 /* do blit on cpu: */
162 util_resource_copy_region(pctx,
163 blit->dst.resource, blit->dst.level, blit->dst.box.x,
164 blit->dst.box.y, blit->dst.box.z,
165 blit->src.resource, blit->src.level, &blit->src.box);
166 }
167 }
168
169 /**
170 * @rsc: the resource to shadow
171 * @level: the level to discard (if box != NULL, otherwise ignored)
172 * @box: the box to discard (or NULL if none)
173 * @modifier: the modifier for the new buffer state
174 */
175 static bool
176 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
177 unsigned level, const struct pipe_box *box, uint64_t modifier)
178 {
179 struct pipe_context *pctx = &ctx->base;
180 struct pipe_resource *prsc = &rsc->base;
181 bool fallback = false;
182
183 if (prsc->next)
184 return false;
185
186 /* TODO: somehow munge dimensions and format to copy unsupported
187 * render target format to something that is supported?
188 */
189 if (!pctx->screen->is_format_supported(pctx->screen,
190 prsc->format, prsc->target, prsc->nr_samples,
191 prsc->nr_storage_samples,
192 PIPE_BIND_RENDER_TARGET))
193 fallback = true;
194
195 /* do shadowing back-blits on the cpu for buffers: */
196 if (prsc->target == PIPE_BUFFER)
197 fallback = true;
198
199 bool discard_whole_level = box && util_texrange_covers_whole_level(prsc, level,
200 box->x, box->y, box->z, box->width, box->height, box->depth);
201
202 /* TODO need to be more clever about current level */
203 if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
204 return false;
205
206 struct pipe_resource *pshadow =
207 pctx->screen->resource_create_with_modifiers(pctx->screen,
208 prsc, &modifier, 1);
209
210 if (!pshadow)
211 return false;
212
213 assert(!ctx->in_shadow);
214 ctx->in_shadow = true;
215
216 /* get rid of any references that batch-cache might have to us (which
217 * should empty/destroy rsc->batches hashset)
218 */
219 fd_bc_invalidate_resource(rsc, false);
220
221 mtx_lock(&ctx->screen->lock);
222
223 /* Swap the backing bo's, so shadow becomes the old buffer,
224 * blit from shadow to new buffer. From here on out, we
225 * cannot fail.
226 *
227 * Note that we need to do it in this order, otherwise if
228 * we go down cpu blit path, the recursive transfer_map()
229 * sees the wrong status..
230 */
231 struct fd_resource *shadow = fd_resource(pshadow);
232
233 DBG("shadow: %p (%d) -> %p (%d)\n", rsc, rsc->base.reference.count,
234 shadow, shadow->base.reference.count);
235
236 /* TODO valid_buffer_range?? */
237 swap(rsc->bo, shadow->bo);
238 swap(rsc->write_batch, shadow->write_batch);
239 swap(rsc->layout, shadow->layout);
240 rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
241
242 /* at this point, the newly created shadow buffer is not referenced
243 * by any batches, but the existing rsc (probably) is. We need to
244 * transfer those references over:
245 */
246 debug_assert(shadow->batch_mask == 0);
247 struct fd_batch *batch;
248 foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
249 struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
250 _mesa_set_remove(batch->resources, entry);
251 _mesa_set_add(batch->resources, shadow);
252 }
253 swap(rsc->batch_mask, shadow->batch_mask);
254
255 mtx_unlock(&ctx->screen->lock);
256
257 struct pipe_blit_info blit = {};
258 blit.dst.resource = prsc;
259 blit.dst.format = prsc->format;
260 blit.src.resource = pshadow;
261 blit.src.format = pshadow->format;
262 blit.mask = util_format_get_mask(prsc->format);
263 blit.filter = PIPE_TEX_FILTER_NEAREST;
264
265 #define set_box(field, val) do { \
266 blit.dst.field = (val); \
267 blit.src.field = (val); \
268 } while (0)
269
270 /* blit the other levels in their entirety: */
271 for (unsigned l = 0; l <= prsc->last_level; l++) {
272 if (box && l == level)
273 continue;
274
275 /* just blit whole level: */
276 set_box(level, l);
277 set_box(box.width, u_minify(prsc->width0, l));
278 set_box(box.height, u_minify(prsc->height0, l));
279 set_box(box.depth, u_minify(prsc->depth0, l));
280
281 for (int i = 0; i < prsc->array_size; i++) {
282 set_box(box.z, i);
283 do_blit(ctx, &blit, fallback);
284 }
285 }
286
287 /* deal w/ current level specially, since we might need to split
288 * it up into a couple blits:
289 */
290 if (box && !discard_whole_level) {
291 set_box(level, level);
292
293 switch (prsc->target) {
294 case PIPE_BUFFER:
295 case PIPE_TEXTURE_1D:
296 set_box(box.y, 0);
297 set_box(box.z, 0);
298 set_box(box.height, 1);
299 set_box(box.depth, 1);
300
301 if (box->x > 0) {
302 set_box(box.x, 0);
303 set_box(box.width, box->x);
304
305 do_blit(ctx, &blit, fallback);
306 }
307 if ((box->x + box->width) < u_minify(prsc->width0, level)) {
308 set_box(box.x, box->x + box->width);
309 set_box(box.width, u_minify(prsc->width0, level) - (box->x + box->width));
310
311 do_blit(ctx, &blit, fallback);
312 }
313 break;
314 case PIPE_TEXTURE_2D:
315 /* TODO */
316 default:
317 unreachable("TODO");
318 }
319 }
320
321 ctx->in_shadow = false;
322
323 pipe_resource_reference(&pshadow, NULL);
324
325 return true;
326 }
327
328 /**
329 * Uncompress an UBWC compressed buffer "in place". This works basically
330 * like resource shadowing, creating a new resource, and doing an uncompress
331 * blit, and swapping the state between shadow and original resource so it
332 * appears to the state tracker as if nothing changed.
333 */
334 void
335 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
336 {
337 bool success =
338 fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
339
340 /* shadow should not fail in any cases where we need to uncompress: */
341 debug_assert(success);
342
343 /*
344 * TODO what if rsc is used in other contexts, we don't currently
345 * have a good way to rebind_resource() in other contexts. And an
346 * app that is reading one resource in multiple contexts, isn't
347 * going to expect that the resource is modified.
348 *
349 * Hopefully the edge cases where we need to uncompress are rare
350 * enough that they mostly only show up in deqp.
351 */
352
353 rebind_resource(ctx, &rsc->base);
354 }
355
356 static struct fd_resource *
357 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
358 unsigned level, const struct pipe_box *box)
359 {
360 struct pipe_context *pctx = &ctx->base;
361 struct pipe_resource tmpl = rsc->base;
362
363 tmpl.width0 = box->width;
364 tmpl.height0 = box->height;
365 /* for array textures, box->depth is the array_size, otherwise
366 * for 3d textures, it is the depth:
367 */
368 if (tmpl.array_size > 1) {
369 if (tmpl.target == PIPE_TEXTURE_CUBE)
370 tmpl.target = PIPE_TEXTURE_2D_ARRAY;
371 tmpl.array_size = box->depth;
372 tmpl.depth0 = 1;
373 } else {
374 tmpl.array_size = 1;
375 tmpl.depth0 = box->depth;
376 }
377 tmpl.last_level = 0;
378 tmpl.bind |= PIPE_BIND_LINEAR;
379
380 struct pipe_resource *pstaging =
381 pctx->screen->resource_create(pctx->screen, &tmpl);
382 if (!pstaging)
383 return NULL;
384
385 return fd_resource(pstaging);
386 }
387
388 static void
389 fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
390 {
391 struct pipe_resource *dst = trans->base.resource;
392 struct pipe_blit_info blit = {};
393
394 blit.dst.resource = dst;
395 blit.dst.format = dst->format;
396 blit.dst.level = trans->base.level;
397 blit.dst.box = trans->base.box;
398 blit.src.resource = trans->staging_prsc;
399 blit.src.format = trans->staging_prsc->format;
400 blit.src.level = 0;
401 blit.src.box = trans->staging_box;
402 blit.mask = util_format_get_mask(trans->staging_prsc->format);
403 blit.filter = PIPE_TEX_FILTER_NEAREST;
404
405 do_blit(ctx, &blit, false);
406 }
407
408 static void
409 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
410 {
411 struct pipe_resource *src = trans->base.resource;
412 struct pipe_blit_info blit = {};
413
414 blit.src.resource = src;
415 blit.src.format = src->format;
416 blit.src.level = trans->base.level;
417 blit.src.box = trans->base.box;
418 blit.dst.resource = trans->staging_prsc;
419 blit.dst.format = trans->staging_prsc->format;
420 blit.dst.level = 0;
421 blit.dst.box = trans->staging_box;
422 blit.mask = util_format_get_mask(trans->staging_prsc->format);
423 blit.filter = PIPE_TEX_FILTER_NEAREST;
424
425 do_blit(ctx, &blit, false);
426 }
427
428 static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
429 struct pipe_transfer *ptrans,
430 const struct pipe_box *box)
431 {
432 struct fd_resource *rsc = fd_resource(ptrans->resource);
433
434 if (ptrans->resource->target == PIPE_BUFFER)
435 util_range_add(&rsc->base, &rsc->valid_buffer_range,
436 ptrans->box.x + box->x,
437 ptrans->box.x + box->x + box->width);
438 }
439
440 static void
441 flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
442 {
443 struct fd_batch *write_batch = NULL;
444
445 mtx_lock(&ctx->screen->lock);
446 fd_batch_reference_locked(&write_batch, rsc->write_batch);
447 mtx_unlock(&ctx->screen->lock);
448
449 if (usage & PIPE_TRANSFER_WRITE) {
450 struct fd_batch *batch, *batches[32] = {};
451 uint32_t batch_mask;
452
453 /* This is a bit awkward, probably a fd_batch_flush_locked()
454 * would make things simpler.. but we need to hold the lock
455 * to iterate the batches which reference this resource. So
456 * we must first grab references under a lock, then flush.
457 */
458 mtx_lock(&ctx->screen->lock);
459 batch_mask = rsc->batch_mask;
460 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
461 fd_batch_reference_locked(&batches[batch->idx], batch);
462 mtx_unlock(&ctx->screen->lock);
463
464 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
465 fd_batch_flush(batch);
466
467 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
468 fd_batch_reference(&batches[batch->idx], NULL);
469 }
470 assert(rsc->batch_mask == 0);
471 } else if (write_batch) {
472 fd_batch_flush(write_batch);
473 }
474
475 fd_batch_reference(&write_batch, NULL);
476
477 assert(!rsc->write_batch);
478 }
479
480 static void
481 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
482 {
483 flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
484 }
485
486 static void
487 fd_resource_transfer_unmap(struct pipe_context *pctx,
488 struct pipe_transfer *ptrans)
489 {
490 struct fd_context *ctx = fd_context(pctx);
491 struct fd_resource *rsc = fd_resource(ptrans->resource);
492 struct fd_transfer *trans = fd_transfer(ptrans);
493
494 if (trans->staging_prsc) {
495 if (ptrans->usage & PIPE_TRANSFER_WRITE)
496 fd_blit_from_staging(ctx, trans);
497 pipe_resource_reference(&trans->staging_prsc, NULL);
498 }
499
500 if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
501 fd_bo_cpu_fini(rsc->bo);
502 }
503
504 util_range_add(&rsc->base, &rsc->valid_buffer_range,
505 ptrans->box.x,
506 ptrans->box.x + ptrans->box.width);
507
508 pipe_resource_reference(&ptrans->resource, NULL);
509 slab_free(&ctx->transfer_pool, ptrans);
510 }
511
512 static void *
513 fd_resource_transfer_map(struct pipe_context *pctx,
514 struct pipe_resource *prsc,
515 unsigned level, unsigned usage,
516 const struct pipe_box *box,
517 struct pipe_transfer **pptrans)
518 {
519 struct fd_context *ctx = fd_context(pctx);
520 struct fd_resource *rsc = fd_resource(prsc);
521 struct fdl_slice *slice = fd_resource_slice(rsc, level);
522 struct fd_transfer *trans;
523 struct pipe_transfer *ptrans;
524 enum pipe_format format = prsc->format;
525 uint32_t op = 0;
526 uint32_t offset;
527 char *buf;
528 int ret = 0;
529
530 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
531 box->width, box->height, box->x, box->y);
532
533 ptrans = slab_alloc(&ctx->transfer_pool);
534 if (!ptrans)
535 return NULL;
536
537 /* slab_alloc_st() doesn't zero: */
538 trans = fd_transfer(ptrans);
539 memset(trans, 0, sizeof(*trans));
540
541 pipe_resource_reference(&ptrans->resource, prsc);
542 ptrans->level = level;
543 ptrans->usage = usage;
544 ptrans->box = *box;
545 ptrans->stride = slice->pitch;
546 ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
547
548 /* we always need a staging texture for tiled buffers:
549 *
550 * TODO we might sometimes want to *also* shadow the resource to avoid
551 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
552 * texture.
553 */
554 if (rsc->layout.tile_mode) {
555 struct fd_resource *staging_rsc;
556
557 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
558 if (staging_rsc) {
559 struct fdl_slice *staging_slice =
560 fd_resource_slice(staging_rsc, 0);
561 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
562 trans->staging_prsc = &staging_rsc->base;
563 trans->base.stride = staging_slice->pitch;
564 trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
565 trans->staging_box = *box;
566 trans->staging_box.x = 0;
567 trans->staging_box.y = 0;
568 trans->staging_box.z = 0;
569
570 if (usage & PIPE_TRANSFER_READ) {
571 fd_blit_to_staging(ctx, trans);
572
573 fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
574 DRM_FREEDRENO_PREP_READ);
575 }
576
577 buf = fd_bo_map(staging_rsc->bo);
578 offset = 0;
579
580 *pptrans = ptrans;
581
582 ctx->stats.staging_uploads++;
583
584 return buf;
585 }
586 }
587
588 if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
589 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
590
591 if (usage & PIPE_TRANSFER_READ)
592 op |= DRM_FREEDRENO_PREP_READ;
593
594 if (usage & PIPE_TRANSFER_WRITE)
595 op |= DRM_FREEDRENO_PREP_WRITE;
596
597 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
598 realloc_bo(rsc, fd_bo_size(rsc->bo));
599 rebind_resource(ctx, prsc);
600 } else if ((usage & PIPE_TRANSFER_WRITE) &&
601 prsc->target == PIPE_BUFFER &&
602 !util_ranges_intersect(&rsc->valid_buffer_range,
603 box->x, box->x + box->width)) {
604 /* We are trying to write to a previously uninitialized range. No need
605 * to wait.
606 */
607 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
608 struct fd_batch *write_batch = NULL;
609
610 /* hold a reference, so it doesn't disappear under us: */
611 fd_context_lock(ctx);
612 fd_batch_reference_locked(&write_batch, rsc->write_batch);
613 fd_context_unlock(ctx);
614
615 if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
616 write_batch->back_blit) {
617 /* if only thing pending is a back-blit, we can discard it: */
618 fd_batch_reset(write_batch);
619 }
620
621 /* If the GPU is writing to the resource, or if it is reading from the
622 * resource and we're trying to write to it, flush the renders.
623 */
624 bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
625 bool busy = needs_flush || (0 != fd_bo_cpu_prep(rsc->bo,
626 ctx->pipe, op | DRM_FREEDRENO_PREP_NOSYNC));
627
628 /* if we need to flush/stall, see if we can make a shadow buffer
629 * to avoid this:
630 *
631 * TODO we could go down this path !reorder && !busy_for_read
632 * ie. we only *don't* want to go down this path if the blit
633 * will trigger a flush!
634 */
635 if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
636 (usage & PIPE_TRANSFER_DISCARD_RANGE)) {
637 /* try shadowing only if it avoids a flush, otherwise staging would
638 * be better:
639 */
640 if (needs_flush && fd_try_shadow_resource(ctx, rsc, level,
641 box, DRM_FORMAT_MOD_LINEAR)) {
642 needs_flush = busy = false;
643 rebind_resource(ctx, prsc);
644 ctx->stats.shadow_uploads++;
645 } else {
646 struct fd_resource *staging_rsc;
647
648 if (needs_flush) {
649 flush_resource(ctx, rsc, usage);
650 needs_flush = false;
651 }
652
653 /* in this case, we don't need to shadow the whole resource,
654 * since any draw that references the previous contents has
655 * already had rendering flushed for all tiles. So we can
656 * use a staging buffer to do the upload.
657 */
658 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
659 if (staging_rsc) {
660 struct fdl_slice *staging_slice =
661 fd_resource_slice(staging_rsc, 0);
662 trans->staging_prsc = &staging_rsc->base;
663 trans->base.stride = staging_slice->pitch;
664 trans->base.layer_stride =
665 fd_resource_layer_stride(staging_rsc, 0);
666 trans->staging_box = *box;
667 trans->staging_box.x = 0;
668 trans->staging_box.y = 0;
669 trans->staging_box.z = 0;
670 buf = fd_bo_map(staging_rsc->bo);
671 offset = 0;
672
673 *pptrans = ptrans;
674
675 fd_batch_reference(&write_batch, NULL);
676
677 ctx->stats.staging_uploads++;
678
679 return buf;
680 }
681 }
682 }
683
684 if (needs_flush) {
685 flush_resource(ctx, rsc, usage);
686 needs_flush = false;
687 }
688
689 fd_batch_reference(&write_batch, NULL);
690
691 /* The GPU keeps track of how the various bo's are being used, and
692 * will wait if necessary for the proper operation to have
693 * completed.
694 */
695 if (busy) {
696 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
697 if (ret)
698 goto fail;
699 }
700 }
701
702 buf = fd_bo_map(rsc->bo);
703 offset =
704 box->y / util_format_get_blockheight(format) * ptrans->stride +
705 box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
706 fd_resource_offset(rsc, level, box->z);
707
708 if (usage & PIPE_TRANSFER_WRITE)
709 rsc->valid = true;
710
711 *pptrans = ptrans;
712
713 return buf + offset;
714
715 fail:
716 fd_resource_transfer_unmap(pctx, ptrans);
717 return NULL;
718 }
719
720 static void
721 fd_resource_destroy(struct pipe_screen *pscreen,
722 struct pipe_resource *prsc)
723 {
724 struct fd_resource *rsc = fd_resource(prsc);
725 fd_bc_invalidate_resource(rsc, true);
726 if (rsc->bo)
727 fd_bo_del(rsc->bo);
728 if (rsc->scanout)
729 renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
730
731 util_range_destroy(&rsc->valid_buffer_range);
732 FREE(rsc);
733 }
734
735 static uint64_t
736 fd_resource_modifier(struct fd_resource *rsc)
737 {
738 if (!rsc->layout.tile_mode)
739 return DRM_FORMAT_MOD_LINEAR;
740
741 if (rsc->layout.ubwc_layer_size)
742 return DRM_FORMAT_MOD_QCOM_COMPRESSED;
743
744 /* TODO invent a modifier for tiled but not UBWC buffers: */
745 return DRM_FORMAT_MOD_INVALID;
746 }
747
748 static bool
749 fd_resource_get_handle(struct pipe_screen *pscreen,
750 struct pipe_context *pctx,
751 struct pipe_resource *prsc,
752 struct winsys_handle *handle,
753 unsigned usage)
754 {
755 struct fd_resource *rsc = fd_resource(prsc);
756
757 handle->modifier = fd_resource_modifier(rsc);
758
759 return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
760 fd_resource_slice(rsc, 0)->pitch, handle);
761 }
762
763 static uint32_t
764 setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format format)
765 {
766 struct pipe_resource *prsc = &rsc->base;
767 struct fd_screen *screen = fd_screen(prsc->screen);
768 enum util_format_layout layout = util_format_description(format)->layout;
769 uint32_t pitchalign = screen->gmem_alignw;
770 uint32_t level, size = 0;
771 uint32_t width = prsc->width0;
772 uint32_t height = prsc->height0;
773 uint32_t depth = prsc->depth0;
774 /* in layer_first layout, the level (slice) contains just one
775 * layer (since in fact the layer contains the slices)
776 */
777 uint32_t layers_in_level = rsc->layout.layer_first ? 1 : prsc->array_size;
778
779 for (level = 0; level <= prsc->last_level; level++) {
780 struct fdl_slice *slice = fd_resource_slice(rsc, level);
781 uint32_t blocks;
782
783 if (layout == UTIL_FORMAT_LAYOUT_ASTC)
784 width = util_align_npot(width, pitchalign * util_format_get_blockwidth(format));
785 else
786 width = align(width, pitchalign);
787 slice->pitch = util_format_get_nblocksx(format, width) * rsc->layout.cpp;
788 slice->offset = size;
789 blocks = util_format_get_nblocks(format, width, height);
790 /* 1d array and 2d array textures must all have the same layer size
791 * for each miplevel on a3xx. 3d textures can have different layer
792 * sizes for high levels, but the hw auto-sizer is buggy (or at least
793 * different than what this code does), so as soon as the layer size
794 * range gets into range, we stop reducing it.
795 */
796 if (prsc->target == PIPE_TEXTURE_3D && (
797 level == 1 ||
798 (level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000)))
799 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
800 else if (level == 0 || rsc->layout.layer_first || alignment == 1)
801 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
802 else
803 slice->size0 = fd_resource_slice(rsc, level - 1)->size0;
804
805 size += slice->size0 * depth * layers_in_level;
806
807 width = u_minify(width, 1);
808 height = u_minify(height, 1);
809 depth = u_minify(depth, 1);
810 }
811
812 return size;
813 }
814
815 static uint32_t
816 slice_alignment(enum pipe_texture_target target)
817 {
818 /* on a3xx, 2d array and 3d textures seem to want their
819 * layers aligned to page boundaries:
820 */
821 switch (target) {
822 case PIPE_TEXTURE_3D:
823 case PIPE_TEXTURE_1D_ARRAY:
824 case PIPE_TEXTURE_2D_ARRAY:
825 return 4096;
826 default:
827 return 1;
828 }
829 }
830
831 /* cross generation texture layout to plug in to screen->setup_slices()..
832 * replace with generation specific one as-needed.
833 *
834 * TODO for a4xx probably can extract out the a4xx specific logic int
835 * a small fd4_setup_slices() wrapper that sets up layer_first, and then
836 * calls this.
837 */
838 uint32_t
839 fd_setup_slices(struct fd_resource *rsc)
840 {
841 uint32_t alignment;
842
843 alignment = slice_alignment(rsc->base.target);
844
845 struct fd_screen *screen = fd_screen(rsc->base.screen);
846 if (is_a4xx(screen)) {
847 switch (rsc->base.target) {
848 case PIPE_TEXTURE_3D:
849 rsc->layout.layer_first = false;
850 break;
851 default:
852 rsc->layout.layer_first = true;
853 alignment = 1;
854 break;
855 }
856 }
857
858 return setup_slices(rsc, alignment, rsc->base.format);
859 }
860
861 /* special case to resize query buf after allocated.. */
862 void
863 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
864 {
865 struct fd_resource *rsc = fd_resource(prsc);
866
867 debug_assert(prsc->width0 == 0);
868 debug_assert(prsc->target == PIPE_BUFFER);
869 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
870
871 prsc->width0 = sz;
872 realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
873 }
874
875 static void
876 fd_resource_layout_init(struct pipe_resource *prsc)
877 {
878 struct fd_resource *rsc = fd_resource(prsc);
879 struct fdl_layout *layout = &rsc->layout;
880
881 layout->width0 = prsc->width0;
882 layout->height0 = prsc->height0;
883 layout->depth0 = prsc->depth0;
884
885 layout->cpp = util_format_get_blocksize(prsc->format);
886 layout->cpp *= fd_resource_nr_samples(prsc);
887 layout->cpp_shift = ffs(layout->cpp) - 1;
888 }
889
890 /**
891 * Create a new texture object, using the given template info.
892 */
893 static struct pipe_resource *
894 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
895 const struct pipe_resource *tmpl,
896 const uint64_t *modifiers, int count)
897 {
898 struct fd_screen *screen = fd_screen(pscreen);
899 struct fd_resource *rsc;
900 struct pipe_resource *prsc;
901 enum pipe_format format = tmpl->format;
902 uint32_t size;
903
904 /* when using kmsro, scanout buffers are allocated on the display device
905 * create_with_modifiers() doesn't give us usage flags, so we have to
906 * assume that all calls with modifiers are scanout-possible
907 */
908 if (screen->ro &&
909 ((tmpl->bind & PIPE_BIND_SCANOUT) ||
910 !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
911 struct pipe_resource scanout_templat = *tmpl;
912 struct renderonly_scanout *scanout;
913 struct winsys_handle handle;
914
915 /* apply freedreno alignment requirement */
916 scanout_templat.width0 = align(tmpl->width0, screen->gmem_alignw);
917
918 scanout = renderonly_scanout_for_resource(&scanout_templat,
919 screen->ro, &handle);
920 if (!scanout)
921 return NULL;
922
923 renderonly_scanout_destroy(scanout, screen->ro);
924
925 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
926 rsc = fd_resource(pscreen->resource_from_handle(pscreen, tmpl,
927 &handle,
928 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
929 close(handle.handle);
930 if (!rsc)
931 return NULL;
932
933 return &rsc->base;
934 }
935
936 rsc = CALLOC_STRUCT(fd_resource);
937 prsc = &rsc->base;
938
939 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
940 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc,
941 tmpl->target, util_format_name(format),
942 tmpl->width0, tmpl->height0, tmpl->depth0,
943 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
944 tmpl->usage, tmpl->bind, tmpl->flags);
945
946 if (!rsc)
947 return NULL;
948
949 *prsc = *tmpl;
950 fd_resource_layout_init(prsc);
951
952 #define LINEAR \
953 (PIPE_BIND_SCANOUT | \
954 PIPE_BIND_LINEAR | \
955 PIPE_BIND_DISPLAY_TARGET)
956
957 bool linear = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
958 if (tmpl->bind & LINEAR)
959 linear = true;
960
961 if (fd_mesa_debug & FD_DBG_NOTILE)
962 linear = true;
963
964 /* Normally, for non-shared buffers, allow buffer compression if
965 * not shared, otherwise only allow if QCOM_COMPRESSED modifier
966 * is requested:
967 *
968 * TODO we should probably also limit tiled in a similar way,
969 * except we don't have a format modifier for tiled. (We probably
970 * should.)
971 */
972 bool allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
973 if (tmpl->bind & PIPE_BIND_SHARED)
974 allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count);
975
976 allow_ubwc &= !(fd_mesa_debug & FD_DBG_NOUBWC);
977
978 pipe_reference_init(&prsc->reference, 1);
979
980 prsc->screen = pscreen;
981
982 if (screen->tile_mode &&
983 (tmpl->target != PIPE_BUFFER) &&
984 !linear) {
985 rsc->layout.tile_mode = screen->tile_mode(prsc);
986 }
987
988 util_range_init(&rsc->valid_buffer_range);
989
990 rsc->internal_format = format;
991
992 rsc->layout.ubwc = rsc->layout.tile_mode && is_a6xx(screen) && allow_ubwc;
993
994 if (prsc->target == PIPE_BUFFER) {
995 assert(prsc->format == PIPE_FORMAT_R8_UNORM);
996 size = prsc->width0;
997 fdl_layout_buffer(&rsc->layout, size);
998 } else {
999 size = screen->setup_slices(rsc);
1000 }
1001
1002 /* special case for hw-query buffer, which we need to allocate before we
1003 * know the size:
1004 */
1005 if (size == 0) {
1006 /* note, semi-intention == instead of & */
1007 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1008 return prsc;
1009 }
1010
1011 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
1012 if (rsc->layout.layer_first && !rsc->layout.layer_size) {
1013 rsc->layout.layer_size = align(size, 4096);
1014 size = rsc->layout.layer_size * prsc->array_size;
1015 }
1016
1017 if (fd_mesa_debug & FD_DBG_LAYOUT)
1018 fdl_dump_layout(&rsc->layout);
1019
1020 realloc_bo(rsc, size);
1021 if (!rsc->bo)
1022 goto fail;
1023
1024 return prsc;
1025 fail:
1026 fd_resource_destroy(pscreen, prsc);
1027 return NULL;
1028 }
1029
1030 static struct pipe_resource *
1031 fd_resource_create(struct pipe_screen *pscreen,
1032 const struct pipe_resource *tmpl)
1033 {
1034 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1035 return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1036 }
1037
1038 /**
1039 * Create a texture from a winsys_handle. The handle is often created in
1040 * another process by first creating a pipe texture and then calling
1041 * resource_get_handle.
1042 */
1043 static struct pipe_resource *
1044 fd_resource_from_handle(struct pipe_screen *pscreen,
1045 const struct pipe_resource *tmpl,
1046 struct winsys_handle *handle, unsigned usage)
1047 {
1048 struct fd_screen *screen = fd_screen(pscreen);
1049 struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1050 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1051 struct pipe_resource *prsc = &rsc->base;
1052 uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw * rsc->layout.cpp;
1053
1054 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
1055 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
1056 tmpl->target, util_format_name(tmpl->format),
1057 tmpl->width0, tmpl->height0, tmpl->depth0,
1058 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
1059 tmpl->usage, tmpl->bind, tmpl->flags);
1060
1061 if (!rsc)
1062 return NULL;
1063
1064 *prsc = *tmpl;
1065 fd_resource_layout_init(prsc);
1066
1067 pipe_reference_init(&prsc->reference, 1);
1068
1069 prsc->screen = pscreen;
1070
1071 util_range_init(&rsc->valid_buffer_range);
1072
1073 rsc->bo = fd_screen_bo_from_handle(pscreen, handle);
1074 if (!rsc->bo)
1075 goto fail;
1076
1077 rsc->internal_format = tmpl->format;
1078 slice->pitch = handle->stride;
1079 slice->offset = handle->offset;
1080 slice->size0 = handle->stride * prsc->height0;
1081
1082 if ((slice->pitch < align(prsc->width0 * rsc->layout.cpp, pitchalign)) ||
1083 (slice->pitch & (pitchalign - 1)))
1084 goto fail;
1085
1086 assert(rsc->layout.cpp);
1087
1088 if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
1089 goto fail;
1090
1091 if (screen->ro) {
1092 rsc->scanout =
1093 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1094 /* failure is expected in some cases.. */
1095 }
1096
1097 rsc->valid = true;
1098
1099 return prsc;
1100
1101 fail:
1102 fd_resource_destroy(pscreen, prsc);
1103 return NULL;
1104 }
1105
1106 bool
1107 fd_render_condition_check(struct pipe_context *pctx)
1108 {
1109 struct fd_context *ctx = fd_context(pctx);
1110
1111 if (!ctx->cond_query)
1112 return true;
1113
1114 union pipe_query_result res = { 0 };
1115 bool wait =
1116 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1117 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1118
1119 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1120 return (bool)res.u64 != ctx->cond_cond;
1121
1122 return true;
1123 }
1124
1125 static void
1126 fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
1127 {
1128 struct fd_context *ctx = fd_context(pctx);
1129 struct fd_resource *rsc = fd_resource(prsc);
1130
1131 /*
1132 * TODO I guess we could track that the resource is invalidated and
1133 * use that as a hint to realloc rather than stall in _transfer_map(),
1134 * even in the non-DISCARD_WHOLE_RESOURCE case?
1135 *
1136 * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
1137 */
1138
1139 if (rsc->write_batch) {
1140 struct fd_batch *batch = rsc->write_batch;
1141 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1142
1143 if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1144 batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1145 ctx->dirty |= FD_DIRTY_ZSA;
1146 }
1147
1148 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1149 if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1150 batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1151 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
1152 }
1153 }
1154 }
1155
1156 rsc->valid = false;
1157 }
1158
1159 static enum pipe_format
1160 fd_resource_get_internal_format(struct pipe_resource *prsc)
1161 {
1162 return fd_resource(prsc)->internal_format;
1163 }
1164
1165 static void
1166 fd_resource_set_stencil(struct pipe_resource *prsc,
1167 struct pipe_resource *stencil)
1168 {
1169 fd_resource(prsc)->stencil = fd_resource(stencil);
1170 }
1171
1172 static struct pipe_resource *
1173 fd_resource_get_stencil(struct pipe_resource *prsc)
1174 {
1175 struct fd_resource *rsc = fd_resource(prsc);
1176 if (rsc->stencil)
1177 return &rsc->stencil->base;
1178 return NULL;
1179 }
1180
1181 static const struct u_transfer_vtbl transfer_vtbl = {
1182 .resource_create = fd_resource_create,
1183 .resource_destroy = fd_resource_destroy,
1184 .transfer_map = fd_resource_transfer_map,
1185 .transfer_flush_region = fd_resource_transfer_flush_region,
1186 .transfer_unmap = fd_resource_transfer_unmap,
1187 .get_internal_format = fd_resource_get_internal_format,
1188 .set_stencil = fd_resource_set_stencil,
1189 .get_stencil = fd_resource_get_stencil,
1190 };
1191
1192 static const uint64_t supported_modifiers[] = {
1193 DRM_FORMAT_MOD_LINEAR,
1194 };
1195
1196 static int
1197 fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
1198 {
1199 switch (modifier) {
1200 case DRM_FORMAT_MOD_LINEAR:
1201 return 0;
1202 default:
1203 return -1;
1204 }
1205 }
1206
1207 void
1208 fd_resource_screen_init(struct pipe_screen *pscreen)
1209 {
1210 struct fd_screen *screen = fd_screen(pscreen);
1211 bool fake_rgtc = screen->gpu_id < 400;
1212
1213 pscreen->resource_create = u_transfer_helper_resource_create;
1214 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1215 * variant:
1216 */
1217 pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1218 pscreen->resource_from_handle = fd_resource_from_handle;
1219 pscreen->resource_get_handle = fd_resource_get_handle;
1220 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1221
1222 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1223 true, false, fake_rgtc, true);
1224
1225 if (!screen->setup_slices)
1226 screen->setup_slices = fd_setup_slices;
1227 if (!screen->layout_resource_for_modifier)
1228 screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
1229 if (!screen->supported_modifiers) {
1230 screen->supported_modifiers = supported_modifiers;
1231 screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers);
1232 }
1233 }
1234
1235 static void
1236 fd_get_sample_position(struct pipe_context *context,
1237 unsigned sample_count, unsigned sample_index,
1238 float *pos_out)
1239 {
1240 /* The following is copied from nouveau/nv50 except for position
1241 * values, which are taken from blob driver */
1242 static const uint8_t pos1[1][2] = { { 0x8, 0x8 } };
1243 static const uint8_t pos2[2][2] = {
1244 { 0xc, 0xc }, { 0x4, 0x4 } };
1245 static const uint8_t pos4[4][2] = {
1246 { 0x6, 0x2 }, { 0xe, 0x6 },
1247 { 0x2, 0xa }, { 0xa, 0xe } };
1248 /* TODO needs to be verified on supported hw */
1249 static const uint8_t pos8[8][2] = {
1250 { 0x9, 0x5 }, { 0x7, 0xb },
1251 { 0xd, 0x9 }, { 0x5, 0x3 },
1252 { 0x3, 0xd }, { 0x1, 0x7 },
1253 { 0xb, 0xf }, { 0xf, 0x1 } };
1254
1255 const uint8_t (*ptr)[2];
1256
1257 switch (sample_count) {
1258 case 1:
1259 ptr = pos1;
1260 break;
1261 case 2:
1262 ptr = pos2;
1263 break;
1264 case 4:
1265 ptr = pos4;
1266 break;
1267 case 8:
1268 ptr = pos8;
1269 break;
1270 default:
1271 assert(0);
1272 return;
1273 }
1274
1275 pos_out[0] = ptr[sample_index][0] / 16.0f;
1276 pos_out[1] = ptr[sample_index][1] / 16.0f;
1277 }
1278
1279 static void
1280 fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
1281 {
1282 /* wrap fd_blit to return void */
1283 fd_blit(pctx, blit_info);
1284 }
1285
1286 void
1287 fd_resource_context_init(struct pipe_context *pctx)
1288 {
1289 pctx->transfer_map = u_transfer_helper_transfer_map;
1290 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1291 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1292 pctx->buffer_subdata = u_default_buffer_subdata;
1293 pctx->texture_subdata = u_default_texture_subdata;
1294 pctx->create_surface = fd_create_surface;
1295 pctx->surface_destroy = fd_surface_destroy;
1296 pctx->resource_copy_region = fd_resource_copy_region;
1297 pctx->blit = fd_blit_pipe;
1298 pctx->flush_resource = fd_flush_resource;
1299 pctx->invalidate_resource = fd_invalidate_resource;
1300 pctx->get_sample_position = fd_get_sample_position;
1301 }