freedreno: mark more state dirty when rebinding resources
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/format/u_format.h"
28 #include "util/format/u_format_rgtc.h"
29 #include "util/format/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
34 #include "util/set.h"
35 #include "util/u_drm.h"
36
37 #include "freedreno_resource.h"
38 #include "freedreno_batch_cache.h"
39 #include "freedreno_blitter.h"
40 #include "freedreno_fence.h"
41 #include "freedreno_screen.h"
42 #include "freedreno_surface.h"
43 #include "freedreno_context.h"
44 #include "freedreno_query_hw.h"
45 #include "freedreno_util.h"
46
47 #include "drm-uapi/drm_fourcc.h"
48 #include <errno.h>
49
50 /* XXX this should go away, needed for 'struct winsys_handle' */
51 #include "state_tracker/drm_driver.h"
52
53 /* A private modifier for now, so we have a way to request tiled but not
54 * compressed. It would perhaps be good to get real modifiers for the
55 * tiled formats, but would probably need to do some work to figure out
56 * the layout(s) of the tiled modes, and whether they are the same
57 * across generations.
58 */
59 #define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
60
61 /**
62 * Go through the entire state and see if the resource is bound
63 * anywhere. If it is, mark the relevant state as dirty. This is
64 * called on realloc_bo to ensure the necessary state is re-
65 * emitted so the GPU looks at the new backing bo.
66 */
67 static void
68 rebind_resource(struct fd_context *ctx, struct pipe_resource *prsc)
69 {
70 /* VBOs */
71 for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
72 if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
73 ctx->dirty |= FD_DIRTY_VTXBUF;
74 }
75
76 /* per-shader-stage resources: */
77 for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
78 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
79 * cmdstream rather than by pointer..
80 */
81 const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask);
82 for (unsigned i = 1; i < num_ubos; i++) {
83 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST)
84 break;
85 if (ctx->constbuf[stage].cb[i].buffer == prsc) {
86 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST;
87 ctx->dirty |= FD_DIRTY_CONST;
88 }
89 }
90
91 /* Textures */
92 for (unsigned i = 0; i < ctx->tex[stage].num_textures; i++) {
93 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_TEX)
94 break;
95 if (ctx->tex[stage].textures[i] && (ctx->tex[stage].textures[i]->texture == prsc)) {
96 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
97 ctx->dirty |= FD_DIRTY_TEX;
98 }
99 }
100
101 /* Images */
102 const unsigned num_images = util_last_bit(ctx->shaderimg[stage].enabled_mask);
103 for (unsigned i = 0; i < num_images; i++) {
104 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_IMAGE)
105 break;
106 if (ctx->shaderimg[stage].si[i].resource == prsc) {
107 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE;
108 ctx->dirty |= FD_DIRTY_IMAGE;
109 }
110 }
111
112 /* SSBOs */
113 const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask);
114 for (unsigned i = 0; i < num_ssbos; i++) {
115 if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_SSBO)
116 break;
117 if (ctx->shaderbuf[stage].sb[i].buffer == prsc) {
118 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO;
119 ctx->dirty |= FD_DIRTY_SSBO;
120 }
121 }
122 }
123 }
124
125 static void
126 realloc_bo(struct fd_resource *rsc, uint32_t size)
127 {
128 struct pipe_resource *prsc = &rsc->base;
129 struct fd_screen *screen = fd_screen(rsc->base.screen);
130 uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
131 DRM_FREEDRENO_GEM_TYPE_KMEM |
132 COND(prsc->bind & PIPE_BIND_SCANOUT, DRM_FREEDRENO_GEM_SCANOUT);
133 /* TODO other flags? */
134
135 /* if we start using things other than write-combine,
136 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
137 */
138
139 if (rsc->bo)
140 fd_bo_del(rsc->bo);
141
142 rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x",
143 prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
144
145 /* Zero out the UBWC area on allocation. This fixes intermittent failures
146 * with UBWC, which I suspect are due to the HW having a hard time
147 * interpreting arbitrary values populating the flags buffer when the BO
148 * was recycled through the bo cache (instead of fresh allocations from
149 * the kernel, which are zeroed). sleep(1) in this spot didn't work
150 * around the issue, but any memset value seems to.
151 */
152 if (rsc->layout.ubwc) {
153 void *buf = fd_bo_map(rsc->bo);
154 memset(buf, 0, rsc->layout.slices[0].offset);
155 }
156
157 rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
158 util_range_set_empty(&rsc->valid_buffer_range);
159 fd_bc_invalidate_resource(rsc, true);
160 }
161
162 static void
163 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
164 {
165 struct pipe_context *pctx = &ctx->base;
166
167 /* TODO size threshold too?? */
168 if (fallback || !fd_blit(pctx, blit)) {
169 /* do blit on cpu: */
170 util_resource_copy_region(pctx,
171 blit->dst.resource, blit->dst.level, blit->dst.box.x,
172 blit->dst.box.y, blit->dst.box.z,
173 blit->src.resource, blit->src.level, &blit->src.box);
174 }
175 }
176
177 /**
178 * @rsc: the resource to shadow
179 * @level: the level to discard (if box != NULL, otherwise ignored)
180 * @box: the box to discard (or NULL if none)
181 * @modifier: the modifier for the new buffer state
182 */
183 static bool
184 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
185 unsigned level, const struct pipe_box *box, uint64_t modifier)
186 {
187 struct pipe_context *pctx = &ctx->base;
188 struct pipe_resource *prsc = &rsc->base;
189 bool fallback = false;
190
191 if (prsc->next)
192 return false;
193
194 /* TODO: somehow munge dimensions and format to copy unsupported
195 * render target format to something that is supported?
196 */
197 if (!pctx->screen->is_format_supported(pctx->screen,
198 prsc->format, prsc->target, prsc->nr_samples,
199 prsc->nr_storage_samples,
200 PIPE_BIND_RENDER_TARGET))
201 fallback = true;
202
203 /* do shadowing back-blits on the cpu for buffers: */
204 if (prsc->target == PIPE_BUFFER)
205 fallback = true;
206
207 bool discard_whole_level = box && util_texrange_covers_whole_level(prsc, level,
208 box->x, box->y, box->z, box->width, box->height, box->depth);
209
210 /* TODO need to be more clever about current level */
211 if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
212 return false;
213
214 struct pipe_resource *pshadow =
215 pctx->screen->resource_create_with_modifiers(pctx->screen,
216 prsc, &modifier, 1);
217
218 if (!pshadow)
219 return false;
220
221 assert(!ctx->in_shadow);
222 ctx->in_shadow = true;
223
224 /* get rid of any references that batch-cache might have to us (which
225 * should empty/destroy rsc->batches hashset)
226 */
227 fd_bc_invalidate_resource(rsc, false);
228
229 mtx_lock(&ctx->screen->lock);
230
231 /* Swap the backing bo's, so shadow becomes the old buffer,
232 * blit from shadow to new buffer. From here on out, we
233 * cannot fail.
234 *
235 * Note that we need to do it in this order, otherwise if
236 * we go down cpu blit path, the recursive transfer_map()
237 * sees the wrong status..
238 */
239 struct fd_resource *shadow = fd_resource(pshadow);
240
241 DBG("shadow: %p (%d) -> %p (%d)\n", rsc, rsc->base.reference.count,
242 shadow, shadow->base.reference.count);
243
244 /* TODO valid_buffer_range?? */
245 swap(rsc->bo, shadow->bo);
246 swap(rsc->write_batch, shadow->write_batch);
247 swap(rsc->layout, shadow->layout);
248 rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
249
250 /* at this point, the newly created shadow buffer is not referenced
251 * by any batches, but the existing rsc (probably) is. We need to
252 * transfer those references over:
253 */
254 debug_assert(shadow->batch_mask == 0);
255 struct fd_batch *batch;
256 foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
257 struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
258 _mesa_set_remove(batch->resources, entry);
259 _mesa_set_add(batch->resources, shadow);
260 }
261 swap(rsc->batch_mask, shadow->batch_mask);
262
263 mtx_unlock(&ctx->screen->lock);
264
265 struct pipe_blit_info blit = {};
266 blit.dst.resource = prsc;
267 blit.dst.format = prsc->format;
268 blit.src.resource = pshadow;
269 blit.src.format = pshadow->format;
270 blit.mask = util_format_get_mask(prsc->format);
271 blit.filter = PIPE_TEX_FILTER_NEAREST;
272
273 #define set_box(field, val) do { \
274 blit.dst.field = (val); \
275 blit.src.field = (val); \
276 } while (0)
277
278 /* blit the other levels in their entirety: */
279 for (unsigned l = 0; l <= prsc->last_level; l++) {
280 if (box && l == level)
281 continue;
282
283 /* just blit whole level: */
284 set_box(level, l);
285 set_box(box.width, u_minify(prsc->width0, l));
286 set_box(box.height, u_minify(prsc->height0, l));
287 set_box(box.depth, u_minify(prsc->depth0, l));
288
289 for (int i = 0; i < prsc->array_size; i++) {
290 set_box(box.z, i);
291 do_blit(ctx, &blit, fallback);
292 }
293 }
294
295 /* deal w/ current level specially, since we might need to split
296 * it up into a couple blits:
297 */
298 if (box && !discard_whole_level) {
299 set_box(level, level);
300
301 switch (prsc->target) {
302 case PIPE_BUFFER:
303 case PIPE_TEXTURE_1D:
304 set_box(box.y, 0);
305 set_box(box.z, 0);
306 set_box(box.height, 1);
307 set_box(box.depth, 1);
308
309 if (box->x > 0) {
310 set_box(box.x, 0);
311 set_box(box.width, box->x);
312
313 do_blit(ctx, &blit, fallback);
314 }
315 if ((box->x + box->width) < u_minify(prsc->width0, level)) {
316 set_box(box.x, box->x + box->width);
317 set_box(box.width, u_minify(prsc->width0, level) - (box->x + box->width));
318
319 do_blit(ctx, &blit, fallback);
320 }
321 break;
322 case PIPE_TEXTURE_2D:
323 /* TODO */
324 default:
325 unreachable("TODO");
326 }
327 }
328
329 ctx->in_shadow = false;
330
331 pipe_resource_reference(&pshadow, NULL);
332
333 return true;
334 }
335
336 /**
337 * Uncompress an UBWC compressed buffer "in place". This works basically
338 * like resource shadowing, creating a new resource, and doing an uncompress
339 * blit, and swapping the state between shadow and original resource so it
340 * appears to the state tracker as if nothing changed.
341 */
342 void
343 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
344 {
345 bool success =
346 fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
347
348 /* shadow should not fail in any cases where we need to uncompress: */
349 debug_assert(success);
350
351 /*
352 * TODO what if rsc is used in other contexts, we don't currently
353 * have a good way to rebind_resource() in other contexts. And an
354 * app that is reading one resource in multiple contexts, isn't
355 * going to expect that the resource is modified.
356 *
357 * Hopefully the edge cases where we need to uncompress are rare
358 * enough that they mostly only show up in deqp.
359 */
360
361 rebind_resource(ctx, &rsc->base);
362 }
363
364 static struct fd_resource *
365 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
366 unsigned level, const struct pipe_box *box)
367 {
368 struct pipe_context *pctx = &ctx->base;
369 struct pipe_resource tmpl = rsc->base;
370
371 tmpl.width0 = box->width;
372 tmpl.height0 = box->height;
373 /* for array textures, box->depth is the array_size, otherwise
374 * for 3d textures, it is the depth:
375 */
376 if (tmpl.array_size > 1) {
377 if (tmpl.target == PIPE_TEXTURE_CUBE)
378 tmpl.target = PIPE_TEXTURE_2D_ARRAY;
379 tmpl.array_size = box->depth;
380 tmpl.depth0 = 1;
381 } else {
382 tmpl.array_size = 1;
383 tmpl.depth0 = box->depth;
384 }
385 tmpl.last_level = 0;
386 tmpl.bind |= PIPE_BIND_LINEAR;
387
388 struct pipe_resource *pstaging =
389 pctx->screen->resource_create(pctx->screen, &tmpl);
390 if (!pstaging)
391 return NULL;
392
393 return fd_resource(pstaging);
394 }
395
396 static void
397 fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
398 {
399 struct pipe_resource *dst = trans->base.resource;
400 struct pipe_blit_info blit = {};
401
402 blit.dst.resource = dst;
403 blit.dst.format = dst->format;
404 blit.dst.level = trans->base.level;
405 blit.dst.box = trans->base.box;
406 blit.src.resource = trans->staging_prsc;
407 blit.src.format = trans->staging_prsc->format;
408 blit.src.level = 0;
409 blit.src.box = trans->staging_box;
410 blit.mask = util_format_get_mask(trans->staging_prsc->format);
411 blit.filter = PIPE_TEX_FILTER_NEAREST;
412
413 do_blit(ctx, &blit, false);
414 }
415
416 static void
417 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
418 {
419 struct pipe_resource *src = trans->base.resource;
420 struct pipe_blit_info blit = {};
421
422 blit.src.resource = src;
423 blit.src.format = src->format;
424 blit.src.level = trans->base.level;
425 blit.src.box = trans->base.box;
426 blit.dst.resource = trans->staging_prsc;
427 blit.dst.format = trans->staging_prsc->format;
428 blit.dst.level = 0;
429 blit.dst.box = trans->staging_box;
430 blit.mask = util_format_get_mask(trans->staging_prsc->format);
431 blit.filter = PIPE_TEX_FILTER_NEAREST;
432
433 do_blit(ctx, &blit, false);
434 }
435
436 static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
437 struct pipe_transfer *ptrans,
438 const struct pipe_box *box)
439 {
440 struct fd_resource *rsc = fd_resource(ptrans->resource);
441
442 if (ptrans->resource->target == PIPE_BUFFER)
443 util_range_add(&rsc->base, &rsc->valid_buffer_range,
444 ptrans->box.x + box->x,
445 ptrans->box.x + box->x + box->width);
446 }
447
448 static void
449 flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
450 {
451 struct fd_batch *write_batch = NULL;
452
453 mtx_lock(&ctx->screen->lock);
454 fd_batch_reference_locked(&write_batch, rsc->write_batch);
455 mtx_unlock(&ctx->screen->lock);
456
457 if (usage & PIPE_TRANSFER_WRITE) {
458 struct fd_batch *batch, *batches[32] = {};
459 uint32_t batch_mask;
460
461 /* This is a bit awkward, probably a fd_batch_flush_locked()
462 * would make things simpler.. but we need to hold the lock
463 * to iterate the batches which reference this resource. So
464 * we must first grab references under a lock, then flush.
465 */
466 mtx_lock(&ctx->screen->lock);
467 batch_mask = rsc->batch_mask;
468 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
469 fd_batch_reference_locked(&batches[batch->idx], batch);
470 mtx_unlock(&ctx->screen->lock);
471
472 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
473 fd_batch_flush(batch);
474
475 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
476 fd_batch_reference(&batches[batch->idx], NULL);
477 }
478 assert(rsc->batch_mask == 0);
479 } else if (write_batch) {
480 fd_batch_flush(write_batch);
481 }
482
483 fd_batch_reference(&write_batch, NULL);
484
485 assert(!rsc->write_batch);
486 }
487
488 static void
489 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
490 {
491 flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
492 }
493
494 static void
495 fd_resource_transfer_unmap(struct pipe_context *pctx,
496 struct pipe_transfer *ptrans)
497 {
498 struct fd_context *ctx = fd_context(pctx);
499 struct fd_resource *rsc = fd_resource(ptrans->resource);
500 struct fd_transfer *trans = fd_transfer(ptrans);
501
502 if (trans->staging_prsc) {
503 if (ptrans->usage & PIPE_TRANSFER_WRITE)
504 fd_blit_from_staging(ctx, trans);
505 pipe_resource_reference(&trans->staging_prsc, NULL);
506 }
507
508 if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
509 fd_bo_cpu_fini(rsc->bo);
510 }
511
512 util_range_add(&rsc->base, &rsc->valid_buffer_range,
513 ptrans->box.x,
514 ptrans->box.x + ptrans->box.width);
515
516 pipe_resource_reference(&ptrans->resource, NULL);
517 slab_free(&ctx->transfer_pool, ptrans);
518 }
519
520 static void *
521 fd_resource_transfer_map(struct pipe_context *pctx,
522 struct pipe_resource *prsc,
523 unsigned level, unsigned usage,
524 const struct pipe_box *box,
525 struct pipe_transfer **pptrans)
526 {
527 struct fd_context *ctx = fd_context(pctx);
528 struct fd_resource *rsc = fd_resource(prsc);
529 struct fdl_slice *slice = fd_resource_slice(rsc, level);
530 struct fd_transfer *trans;
531 struct pipe_transfer *ptrans;
532 enum pipe_format format = prsc->format;
533 uint32_t op = 0;
534 uint32_t offset;
535 char *buf;
536 int ret = 0;
537
538 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
539 box->width, box->height, box->x, box->y);
540
541 ptrans = slab_alloc(&ctx->transfer_pool);
542 if (!ptrans)
543 return NULL;
544
545 /* slab_alloc_st() doesn't zero: */
546 trans = fd_transfer(ptrans);
547 memset(trans, 0, sizeof(*trans));
548
549 pipe_resource_reference(&ptrans->resource, prsc);
550 ptrans->level = level;
551 ptrans->usage = usage;
552 ptrans->box = *box;
553 ptrans->stride = slice->pitch;
554 ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
555
556 /* we always need a staging texture for tiled buffers:
557 *
558 * TODO we might sometimes want to *also* shadow the resource to avoid
559 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
560 * texture.
561 */
562 if (rsc->layout.tile_mode) {
563 struct fd_resource *staging_rsc;
564
565 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
566 if (staging_rsc) {
567 struct fdl_slice *staging_slice =
568 fd_resource_slice(staging_rsc, 0);
569 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
570 trans->staging_prsc = &staging_rsc->base;
571 trans->base.stride = staging_slice->pitch;
572 trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
573 trans->staging_box = *box;
574 trans->staging_box.x = 0;
575 trans->staging_box.y = 0;
576 trans->staging_box.z = 0;
577
578 if (usage & PIPE_TRANSFER_READ) {
579 fd_blit_to_staging(ctx, trans);
580
581 fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
582 DRM_FREEDRENO_PREP_READ);
583 }
584
585 buf = fd_bo_map(staging_rsc->bo);
586 offset = 0;
587
588 *pptrans = ptrans;
589
590 ctx->stats.staging_uploads++;
591
592 return buf;
593 }
594 }
595
596 if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
597 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
598
599 if (usage & PIPE_TRANSFER_READ)
600 op |= DRM_FREEDRENO_PREP_READ;
601
602 if (usage & PIPE_TRANSFER_WRITE)
603 op |= DRM_FREEDRENO_PREP_WRITE;
604
605 bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
606
607 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
608 if (needs_flush || fd_resource_busy(rsc, op)) {
609 realloc_bo(rsc, fd_bo_size(rsc->bo));
610 rebind_resource(ctx, prsc);
611 }
612 } else if ((usage & PIPE_TRANSFER_WRITE) &&
613 prsc->target == PIPE_BUFFER &&
614 !util_ranges_intersect(&rsc->valid_buffer_range,
615 box->x, box->x + box->width)) {
616 /* We are trying to write to a previously uninitialized range. No need
617 * to wait.
618 */
619 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
620 struct fd_batch *write_batch = NULL;
621
622 /* hold a reference, so it doesn't disappear under us: */
623 fd_context_lock(ctx);
624 fd_batch_reference_locked(&write_batch, rsc->write_batch);
625 fd_context_unlock(ctx);
626
627 if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
628 write_batch->back_blit) {
629 /* if only thing pending is a back-blit, we can discard it: */
630 fd_batch_reset(write_batch);
631 }
632
633 /* If the GPU is writing to the resource, or if it is reading from the
634 * resource and we're trying to write to it, flush the renders.
635 */
636 bool busy = needs_flush || fd_resource_busy(rsc, op);
637
638 /* if we need to flush/stall, see if we can make a shadow buffer
639 * to avoid this:
640 *
641 * TODO we could go down this path !reorder && !busy_for_read
642 * ie. we only *don't* want to go down this path if the blit
643 * will trigger a flush!
644 */
645 if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
646 (usage & PIPE_TRANSFER_DISCARD_RANGE)) {
647 /* try shadowing only if it avoids a flush, otherwise staging would
648 * be better:
649 */
650 if (needs_flush && fd_try_shadow_resource(ctx, rsc, level,
651 box, DRM_FORMAT_MOD_LINEAR)) {
652 needs_flush = busy = false;
653 rebind_resource(ctx, prsc);
654 ctx->stats.shadow_uploads++;
655 } else {
656 struct fd_resource *staging_rsc;
657
658 if (needs_flush) {
659 flush_resource(ctx, rsc, usage);
660 needs_flush = false;
661 }
662
663 /* in this case, we don't need to shadow the whole resource,
664 * since any draw that references the previous contents has
665 * already had rendering flushed for all tiles. So we can
666 * use a staging buffer to do the upload.
667 */
668 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
669 if (staging_rsc) {
670 struct fdl_slice *staging_slice =
671 fd_resource_slice(staging_rsc, 0);
672 trans->staging_prsc = &staging_rsc->base;
673 trans->base.stride = staging_slice->pitch;
674 trans->base.layer_stride =
675 fd_resource_layer_stride(staging_rsc, 0);
676 trans->staging_box = *box;
677 trans->staging_box.x = 0;
678 trans->staging_box.y = 0;
679 trans->staging_box.z = 0;
680 buf = fd_bo_map(staging_rsc->bo);
681 offset = 0;
682
683 *pptrans = ptrans;
684
685 fd_batch_reference(&write_batch, NULL);
686
687 ctx->stats.staging_uploads++;
688
689 return buf;
690 }
691 }
692 }
693
694 if (needs_flush) {
695 flush_resource(ctx, rsc, usage);
696 needs_flush = false;
697 }
698
699 fd_batch_reference(&write_batch, NULL);
700
701 /* The GPU keeps track of how the various bo's are being used, and
702 * will wait if necessary for the proper operation to have
703 * completed.
704 */
705 if (busy) {
706 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
707 if (ret)
708 goto fail;
709 }
710 }
711
712 buf = fd_bo_map(rsc->bo);
713 offset =
714 box->y / util_format_get_blockheight(format) * ptrans->stride +
715 box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
716 fd_resource_offset(rsc, level, box->z);
717
718 if (usage & PIPE_TRANSFER_WRITE)
719 rsc->valid = true;
720
721 *pptrans = ptrans;
722
723 return buf + offset;
724
725 fail:
726 fd_resource_transfer_unmap(pctx, ptrans);
727 return NULL;
728 }
729
730 static void
731 fd_resource_destroy(struct pipe_screen *pscreen,
732 struct pipe_resource *prsc)
733 {
734 struct fd_resource *rsc = fd_resource(prsc);
735 fd_bc_invalidate_resource(rsc, true);
736 if (rsc->bo)
737 fd_bo_del(rsc->bo);
738 if (rsc->scanout)
739 renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
740
741 util_range_destroy(&rsc->valid_buffer_range);
742 FREE(rsc);
743 }
744
745 static uint64_t
746 fd_resource_modifier(struct fd_resource *rsc)
747 {
748 if (!rsc->layout.tile_mode)
749 return DRM_FORMAT_MOD_LINEAR;
750
751 if (rsc->layout.ubwc_layer_size)
752 return DRM_FORMAT_MOD_QCOM_COMPRESSED;
753
754 /* TODO invent a modifier for tiled but not UBWC buffers: */
755 return DRM_FORMAT_MOD_INVALID;
756 }
757
758 static bool
759 fd_resource_get_handle(struct pipe_screen *pscreen,
760 struct pipe_context *pctx,
761 struct pipe_resource *prsc,
762 struct winsys_handle *handle,
763 unsigned usage)
764 {
765 struct fd_resource *rsc = fd_resource(prsc);
766
767 handle->modifier = fd_resource_modifier(rsc);
768
769 return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
770 fd_resource_slice(rsc, 0)->pitch, handle);
771 }
772
773 static uint32_t
774 setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format format)
775 {
776 struct pipe_resource *prsc = &rsc->base;
777 struct fd_screen *screen = fd_screen(prsc->screen);
778 enum util_format_layout layout = util_format_description(format)->layout;
779 uint32_t pitchalign = screen->gmem_alignw;
780 uint32_t level, size = 0;
781 uint32_t width = prsc->width0;
782 uint32_t height = prsc->height0;
783 uint32_t depth = prsc->depth0;
784 /* in layer_first layout, the level (slice) contains just one
785 * layer (since in fact the layer contains the slices)
786 */
787 uint32_t layers_in_level = rsc->layout.layer_first ? 1 : prsc->array_size;
788
789 for (level = 0; level <= prsc->last_level; level++) {
790 struct fdl_slice *slice = fd_resource_slice(rsc, level);
791 uint32_t blocks;
792
793 if (layout == UTIL_FORMAT_LAYOUT_ASTC)
794 width = util_align_npot(width, pitchalign * util_format_get_blockwidth(format));
795 else
796 width = align(width, pitchalign);
797 slice->pitch = util_format_get_nblocksx(format, width) * rsc->layout.cpp;
798 slice->offset = size;
799 blocks = util_format_get_nblocks(format, width, height);
800 /* 1d array and 2d array textures must all have the same layer size
801 * for each miplevel on a3xx. 3d textures can have different layer
802 * sizes for high levels, but the hw auto-sizer is buggy (or at least
803 * different than what this code does), so as soon as the layer size
804 * range gets into range, we stop reducing it.
805 */
806 if (prsc->target == PIPE_TEXTURE_3D && (
807 level == 1 ||
808 (level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000)))
809 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
810 else if (level == 0 || rsc->layout.layer_first || alignment == 1)
811 slice->size0 = align(blocks * rsc->layout.cpp, alignment);
812 else
813 slice->size0 = fd_resource_slice(rsc, level - 1)->size0;
814
815 size += slice->size0 * depth * layers_in_level;
816
817 width = u_minify(width, 1);
818 height = u_minify(height, 1);
819 depth = u_minify(depth, 1);
820 }
821
822 return size;
823 }
824
825 static uint32_t
826 slice_alignment(enum pipe_texture_target target)
827 {
828 /* on a3xx, 2d array and 3d textures seem to want their
829 * layers aligned to page boundaries:
830 */
831 switch (target) {
832 case PIPE_TEXTURE_3D:
833 case PIPE_TEXTURE_1D_ARRAY:
834 case PIPE_TEXTURE_2D_ARRAY:
835 return 4096;
836 default:
837 return 1;
838 }
839 }
840
841 /* cross generation texture layout to plug in to screen->setup_slices()..
842 * replace with generation specific one as-needed.
843 *
844 * TODO for a4xx probably can extract out the a4xx specific logic int
845 * a small fd4_setup_slices() wrapper that sets up layer_first, and then
846 * calls this.
847 */
848 uint32_t
849 fd_setup_slices(struct fd_resource *rsc)
850 {
851 uint32_t alignment;
852
853 alignment = slice_alignment(rsc->base.target);
854
855 struct fd_screen *screen = fd_screen(rsc->base.screen);
856 if (is_a4xx(screen)) {
857 switch (rsc->base.target) {
858 case PIPE_TEXTURE_3D:
859 rsc->layout.layer_first = false;
860 break;
861 default:
862 rsc->layout.layer_first = true;
863 alignment = 1;
864 break;
865 }
866 }
867
868 return setup_slices(rsc, alignment, rsc->base.format);
869 }
870
871 /* special case to resize query buf after allocated.. */
872 void
873 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
874 {
875 struct fd_resource *rsc = fd_resource(prsc);
876
877 debug_assert(prsc->width0 == 0);
878 debug_assert(prsc->target == PIPE_BUFFER);
879 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
880
881 prsc->width0 = sz;
882 realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
883 }
884
885 static void
886 fd_resource_layout_init(struct pipe_resource *prsc)
887 {
888 struct fd_resource *rsc = fd_resource(prsc);
889 struct fdl_layout *layout = &rsc->layout;
890
891 layout->width0 = prsc->width0;
892 layout->height0 = prsc->height0;
893 layout->depth0 = prsc->depth0;
894
895 layout->cpp = util_format_get_blocksize(prsc->format);
896 layout->cpp *= fd_resource_nr_samples(prsc);
897 layout->cpp_shift = ffs(layout->cpp) - 1;
898 }
899
900 /**
901 * Create a new texture object, using the given template info.
902 */
903 static struct pipe_resource *
904 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
905 const struct pipe_resource *tmpl,
906 const uint64_t *modifiers, int count)
907 {
908 struct fd_screen *screen = fd_screen(pscreen);
909 struct fd_resource *rsc;
910 struct pipe_resource *prsc;
911 enum pipe_format format = tmpl->format;
912 uint32_t size;
913
914 /* when using kmsro, scanout buffers are allocated on the display device
915 * create_with_modifiers() doesn't give us usage flags, so we have to
916 * assume that all calls with modifiers are scanout-possible
917 */
918 if (screen->ro &&
919 ((tmpl->bind & PIPE_BIND_SCANOUT) ||
920 !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
921 struct pipe_resource scanout_templat = *tmpl;
922 struct renderonly_scanout *scanout;
923 struct winsys_handle handle;
924
925 /* apply freedreno alignment requirement */
926 scanout_templat.width0 = align(tmpl->width0, screen->gmem_alignw);
927
928 scanout = renderonly_scanout_for_resource(&scanout_templat,
929 screen->ro, &handle);
930 if (!scanout)
931 return NULL;
932
933 renderonly_scanout_destroy(scanout, screen->ro);
934
935 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
936 rsc = fd_resource(pscreen->resource_from_handle(pscreen, tmpl,
937 &handle,
938 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
939 close(handle.handle);
940 if (!rsc)
941 return NULL;
942
943 return &rsc->base;
944 }
945
946 rsc = CALLOC_STRUCT(fd_resource);
947 prsc = &rsc->base;
948
949 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
950 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc,
951 tmpl->target, util_format_name(format),
952 tmpl->width0, tmpl->height0, tmpl->depth0,
953 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
954 tmpl->usage, tmpl->bind, tmpl->flags);
955
956 if (!rsc)
957 return NULL;
958
959 *prsc = *tmpl;
960 fd_resource_layout_init(prsc);
961
962 #define LINEAR \
963 (PIPE_BIND_SCANOUT | \
964 PIPE_BIND_LINEAR | \
965 PIPE_BIND_DISPLAY_TARGET)
966
967 bool linear = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
968 if (tmpl->bind & LINEAR)
969 linear = true;
970
971 if (fd_mesa_debug & FD_DBG_NOTILE)
972 linear = true;
973
974 /* Normally, for non-shared buffers, allow buffer compression if
975 * not shared, otherwise only allow if QCOM_COMPRESSED modifier
976 * is requested:
977 *
978 * TODO we should probably also limit tiled in a similar way,
979 * except we don't have a format modifier for tiled. (We probably
980 * should.)
981 */
982 bool allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
983 if (tmpl->bind & PIPE_BIND_SHARED)
984 allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count);
985
986 allow_ubwc &= !(fd_mesa_debug & FD_DBG_NOUBWC);
987
988 pipe_reference_init(&prsc->reference, 1);
989
990 prsc->screen = pscreen;
991
992 if (screen->tile_mode &&
993 (tmpl->target != PIPE_BUFFER) &&
994 !linear) {
995 rsc->layout.tile_mode = screen->tile_mode(prsc);
996 }
997
998 util_range_init(&rsc->valid_buffer_range);
999
1000 rsc->internal_format = format;
1001
1002 rsc->layout.ubwc = rsc->layout.tile_mode && is_a6xx(screen) && allow_ubwc;
1003
1004 if (prsc->target == PIPE_BUFFER) {
1005 assert(prsc->format == PIPE_FORMAT_R8_UNORM);
1006 size = prsc->width0;
1007 fdl_layout_buffer(&rsc->layout, size);
1008 } else {
1009 size = screen->setup_slices(rsc);
1010 }
1011
1012 /* special case for hw-query buffer, which we need to allocate before we
1013 * know the size:
1014 */
1015 if (size == 0) {
1016 /* note, semi-intention == instead of & */
1017 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1018 return prsc;
1019 }
1020
1021 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
1022 if (rsc->layout.layer_first && !rsc->layout.layer_size) {
1023 rsc->layout.layer_size = align(size, 4096);
1024 size = rsc->layout.layer_size * prsc->array_size;
1025 }
1026
1027 if (fd_mesa_debug & FD_DBG_LAYOUT)
1028 fdl_dump_layout(&rsc->layout);
1029
1030 realloc_bo(rsc, size);
1031 if (!rsc->bo)
1032 goto fail;
1033
1034 return prsc;
1035 fail:
1036 fd_resource_destroy(pscreen, prsc);
1037 return NULL;
1038 }
1039
1040 static struct pipe_resource *
1041 fd_resource_create(struct pipe_screen *pscreen,
1042 const struct pipe_resource *tmpl)
1043 {
1044 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1045 return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1046 }
1047
1048 /**
1049 * Create a texture from a winsys_handle. The handle is often created in
1050 * another process by first creating a pipe texture and then calling
1051 * resource_get_handle.
1052 */
1053 static struct pipe_resource *
1054 fd_resource_from_handle(struct pipe_screen *pscreen,
1055 const struct pipe_resource *tmpl,
1056 struct winsys_handle *handle, unsigned usage)
1057 {
1058 struct fd_screen *screen = fd_screen(pscreen);
1059 struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1060 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1061 struct pipe_resource *prsc = &rsc->base;
1062 uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw * rsc->layout.cpp;
1063
1064 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
1065 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
1066 tmpl->target, util_format_name(tmpl->format),
1067 tmpl->width0, tmpl->height0, tmpl->depth0,
1068 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
1069 tmpl->usage, tmpl->bind, tmpl->flags);
1070
1071 if (!rsc)
1072 return NULL;
1073
1074 *prsc = *tmpl;
1075 fd_resource_layout_init(prsc);
1076
1077 pipe_reference_init(&prsc->reference, 1);
1078
1079 prsc->screen = pscreen;
1080
1081 util_range_init(&rsc->valid_buffer_range);
1082
1083 rsc->bo = fd_screen_bo_from_handle(pscreen, handle);
1084 if (!rsc->bo)
1085 goto fail;
1086
1087 rsc->internal_format = tmpl->format;
1088 slice->pitch = handle->stride;
1089 slice->offset = handle->offset;
1090 slice->size0 = handle->stride * prsc->height0;
1091
1092 if ((slice->pitch < align(prsc->width0 * rsc->layout.cpp, pitchalign)) ||
1093 (slice->pitch & (pitchalign - 1)))
1094 goto fail;
1095
1096 assert(rsc->layout.cpp);
1097
1098 if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
1099 goto fail;
1100
1101 if (screen->ro) {
1102 rsc->scanout =
1103 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1104 /* failure is expected in some cases.. */
1105 }
1106
1107 rsc->valid = true;
1108
1109 return prsc;
1110
1111 fail:
1112 fd_resource_destroy(pscreen, prsc);
1113 return NULL;
1114 }
1115
1116 bool
1117 fd_render_condition_check(struct pipe_context *pctx)
1118 {
1119 struct fd_context *ctx = fd_context(pctx);
1120
1121 if (!ctx->cond_query)
1122 return true;
1123
1124 union pipe_query_result res = { 0 };
1125 bool wait =
1126 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1127 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1128
1129 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1130 return (bool)res.u64 != ctx->cond_cond;
1131
1132 return true;
1133 }
1134
1135 static void
1136 fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
1137 {
1138 struct fd_context *ctx = fd_context(pctx);
1139 struct fd_resource *rsc = fd_resource(prsc);
1140
1141 /*
1142 * TODO I guess we could track that the resource is invalidated and
1143 * use that as a hint to realloc rather than stall in _transfer_map(),
1144 * even in the non-DISCARD_WHOLE_RESOURCE case?
1145 *
1146 * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
1147 */
1148
1149 if (rsc->write_batch) {
1150 struct fd_batch *batch = rsc->write_batch;
1151 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1152
1153 if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1154 batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1155 ctx->dirty |= FD_DIRTY_ZSA;
1156 }
1157
1158 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1159 if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1160 batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1161 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
1162 }
1163 }
1164 }
1165
1166 rsc->valid = false;
1167 }
1168
1169 static enum pipe_format
1170 fd_resource_get_internal_format(struct pipe_resource *prsc)
1171 {
1172 return fd_resource(prsc)->internal_format;
1173 }
1174
1175 static void
1176 fd_resource_set_stencil(struct pipe_resource *prsc,
1177 struct pipe_resource *stencil)
1178 {
1179 fd_resource(prsc)->stencil = fd_resource(stencil);
1180 }
1181
1182 static struct pipe_resource *
1183 fd_resource_get_stencil(struct pipe_resource *prsc)
1184 {
1185 struct fd_resource *rsc = fd_resource(prsc);
1186 if (rsc->stencil)
1187 return &rsc->stencil->base;
1188 return NULL;
1189 }
1190
1191 static const struct u_transfer_vtbl transfer_vtbl = {
1192 .resource_create = fd_resource_create,
1193 .resource_destroy = fd_resource_destroy,
1194 .transfer_map = fd_resource_transfer_map,
1195 .transfer_flush_region = fd_resource_transfer_flush_region,
1196 .transfer_unmap = fd_resource_transfer_unmap,
1197 .get_internal_format = fd_resource_get_internal_format,
1198 .set_stencil = fd_resource_set_stencil,
1199 .get_stencil = fd_resource_get_stencil,
1200 };
1201
1202 static const uint64_t supported_modifiers[] = {
1203 DRM_FORMAT_MOD_LINEAR,
1204 };
1205
1206 static int
1207 fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
1208 {
1209 switch (modifier) {
1210 case DRM_FORMAT_MOD_LINEAR:
1211 return 0;
1212 default:
1213 return -1;
1214 }
1215 }
1216
1217 void
1218 fd_resource_screen_init(struct pipe_screen *pscreen)
1219 {
1220 struct fd_screen *screen = fd_screen(pscreen);
1221 bool fake_rgtc = screen->gpu_id < 400;
1222
1223 pscreen->resource_create = u_transfer_helper_resource_create;
1224 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1225 * variant:
1226 */
1227 pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1228 pscreen->resource_from_handle = fd_resource_from_handle;
1229 pscreen->resource_get_handle = fd_resource_get_handle;
1230 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1231
1232 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1233 true, false, fake_rgtc, true);
1234
1235 if (!screen->setup_slices)
1236 screen->setup_slices = fd_setup_slices;
1237 if (!screen->layout_resource_for_modifier)
1238 screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
1239 if (!screen->supported_modifiers) {
1240 screen->supported_modifiers = supported_modifiers;
1241 screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers);
1242 }
1243 }
1244
1245 static void
1246 fd_get_sample_position(struct pipe_context *context,
1247 unsigned sample_count, unsigned sample_index,
1248 float *pos_out)
1249 {
1250 /* The following is copied from nouveau/nv50 except for position
1251 * values, which are taken from blob driver */
1252 static const uint8_t pos1[1][2] = { { 0x8, 0x8 } };
1253 static const uint8_t pos2[2][2] = {
1254 { 0xc, 0xc }, { 0x4, 0x4 } };
1255 static const uint8_t pos4[4][2] = {
1256 { 0x6, 0x2 }, { 0xe, 0x6 },
1257 { 0x2, 0xa }, { 0xa, 0xe } };
1258 /* TODO needs to be verified on supported hw */
1259 static const uint8_t pos8[8][2] = {
1260 { 0x9, 0x5 }, { 0x7, 0xb },
1261 { 0xd, 0x9 }, { 0x5, 0x3 },
1262 { 0x3, 0xd }, { 0x1, 0x7 },
1263 { 0xb, 0xf }, { 0xf, 0x1 } };
1264
1265 const uint8_t (*ptr)[2];
1266
1267 switch (sample_count) {
1268 case 1:
1269 ptr = pos1;
1270 break;
1271 case 2:
1272 ptr = pos2;
1273 break;
1274 case 4:
1275 ptr = pos4;
1276 break;
1277 case 8:
1278 ptr = pos8;
1279 break;
1280 default:
1281 assert(0);
1282 return;
1283 }
1284
1285 pos_out[0] = ptr[sample_index][0] / 16.0f;
1286 pos_out[1] = ptr[sample_index][1] / 16.0f;
1287 }
1288
1289 static void
1290 fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
1291 {
1292 /* wrap fd_blit to return void */
1293 fd_blit(pctx, blit_info);
1294 }
1295
1296 void
1297 fd_resource_context_init(struct pipe_context *pctx)
1298 {
1299 pctx->transfer_map = u_transfer_helper_transfer_map;
1300 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1301 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1302 pctx->buffer_subdata = u_default_buffer_subdata;
1303 pctx->texture_subdata = u_default_texture_subdata;
1304 pctx->create_surface = fd_create_surface;
1305 pctx->surface_destroy = fd_surface_destroy;
1306 pctx->resource_copy_region = fd_resource_copy_region;
1307 pctx->blit = fd_blit_pipe;
1308 pctx->flush_resource = fd_flush_resource;
1309 pctx->invalidate_resource = fd_invalidate_resource;
1310 pctx->get_sample_position = fd_get_sample_position;
1311 }