freedreno: add debug helper to dump buffers
[mesa.git] / src / gallium / drivers / freedreno / freedreno_resource.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/format/u_format.h"
28 #include "util/format/u_format_rgtc.h"
29 #include "util/format/u_format_zs.h"
30 #include "util/u_inlines.h"
31 #include "util/u_transfer.h"
32 #include "util/u_string.h"
33 #include "util/u_surface.h"
34 #include "util/set.h"
35 #include "util/u_drm.h"
36
37 #include "decode/util.h"
38
39 #include "freedreno_resource.h"
40 #include "freedreno_batch_cache.h"
41 #include "freedreno_blitter.h"
42 #include "freedreno_fence.h"
43 #include "freedreno_screen.h"
44 #include "freedreno_surface.h"
45 #include "freedreno_context.h"
46 #include "freedreno_query_hw.h"
47 #include "freedreno_util.h"
48
49 #include "drm-uapi/drm_fourcc.h"
50 #include <errno.h>
51
52 /* XXX this should go away, needed for 'struct winsys_handle' */
53 #include "frontend/drm_driver.h"
54
55 /* A private modifier for now, so we have a way to request tiled but not
56 * compressed. It would perhaps be good to get real modifiers for the
57 * tiled formats, but would probably need to do some work to figure out
58 * the layout(s) of the tiled modes, and whether they are the same
59 * across generations.
60 */
61 #define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
62
63 /**
64 * Go through the entire state and see if the resource is bound
65 * anywhere. If it is, mark the relevant state as dirty. This is
66 * called on realloc_bo to ensure the necessary state is re-
67 * emitted so the GPU looks at the new backing bo.
68 */
69 static void
70 rebind_resource_in_ctx(struct fd_context *ctx, struct fd_resource *rsc)
71 {
72 struct pipe_resource *prsc = &rsc->base;
73
74 if (ctx->rebind_resource)
75 ctx->rebind_resource(ctx, rsc);
76
77 /* VBOs */
78 if (rsc->dirty & FD_DIRTY_VTXBUF) {
79 struct fd_vertexbuf_stateobj *vb = &ctx->vtx.vertexbuf;
80 for (unsigned i = 0; i < vb->count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
81 if (vb->vb[i].buffer.resource == prsc)
82 ctx->dirty |= FD_DIRTY_VTXBUF;
83 }
84 }
85
86 const enum fd_dirty_3d_state per_stage_dirty =
87 FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_IMAGE | FD_DIRTY_SSBO;
88
89 if (!(rsc->dirty & per_stage_dirty))
90 return;
91
92 /* per-shader-stage resources: */
93 for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
94 /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
95 * cmdstream rather than by pointer..
96 */
97 if ((rsc->dirty & FD_DIRTY_CONST) &&
98 !(ctx->dirty_shader[stage] & FD_DIRTY_CONST)) {
99 struct fd_constbuf_stateobj *cb = &ctx->constbuf[stage];
100 const unsigned num_ubos = util_last_bit(cb->enabled_mask);
101 for (unsigned i = 1; i < num_ubos; i++) {
102 if (cb->cb[i].buffer == prsc) {
103 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST;
104 ctx->dirty |= FD_DIRTY_CONST;
105 break;
106 }
107 }
108 }
109
110 /* Textures */
111 if ((rsc->dirty & FD_DIRTY_TEX) &&
112 !(ctx->dirty_shader[stage] & FD_DIRTY_TEX)) {
113 struct fd_texture_stateobj *tex = &ctx->tex[stage];
114 for (unsigned i = 0; i < tex->num_textures; i++) {
115 if (tex->textures[i] && (tex->textures[i]->texture == prsc)) {
116 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
117 ctx->dirty |= FD_DIRTY_TEX;
118 break;
119 }
120 }
121 }
122
123 /* Images */
124 if ((rsc->dirty & FD_DIRTY_IMAGE) &&
125 !(ctx->dirty_shader[stage] & FD_DIRTY_IMAGE)) {
126 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[stage];
127 const unsigned num_images = util_last_bit(si->enabled_mask);
128 for (unsigned i = 0; i < num_images; i++) {
129 if (si->si[i].resource == prsc) {
130 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE;
131 ctx->dirty |= FD_DIRTY_IMAGE;
132 break;
133 }
134 }
135 }
136
137 /* SSBOs */
138 if ((rsc->dirty & FD_DIRTY_SSBO) &&
139 !(ctx->dirty_shader[stage] & FD_DIRTY_SSBO)) {
140 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[stage];
141 const unsigned num_ssbos = util_last_bit(sb->enabled_mask);
142 for (unsigned i = 0; i < num_ssbos; i++) {
143 if (sb->sb[i].buffer == prsc) {
144 ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO;
145 ctx->dirty |= FD_DIRTY_SSBO;
146 break;
147 }
148 }
149 }
150 }
151 }
152
153 static void
154 rebind_resource(struct fd_resource *rsc)
155 {
156 struct fd_screen *screen = fd_screen(rsc->base.screen);
157
158 fd_screen_lock(screen);
159 fd_resource_lock(rsc);
160
161 if (rsc->dirty)
162 list_for_each_entry (struct fd_context, ctx, &screen->context_list, node)
163 rebind_resource_in_ctx(ctx, rsc);
164
165 fd_resource_unlock(rsc);
166 fd_screen_unlock(screen);
167 }
168
169 static void
170 realloc_bo(struct fd_resource *rsc, uint32_t size)
171 {
172 struct pipe_resource *prsc = &rsc->base;
173 struct fd_screen *screen = fd_screen(rsc->base.screen);
174 uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
175 DRM_FREEDRENO_GEM_TYPE_KMEM |
176 COND(prsc->bind & PIPE_BIND_SCANOUT, DRM_FREEDRENO_GEM_SCANOUT);
177 /* TODO other flags? */
178
179 /* if we start using things other than write-combine,
180 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
181 */
182
183 if (rsc->bo)
184 fd_bo_del(rsc->bo);
185
186 rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x",
187 prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
188
189 /* Zero out the UBWC area on allocation. This fixes intermittent failures
190 * with UBWC, which I suspect are due to the HW having a hard time
191 * interpreting arbitrary values populating the flags buffer when the BO
192 * was recycled through the bo cache (instead of fresh allocations from
193 * the kernel, which are zeroed). sleep(1) in this spot didn't work
194 * around the issue, but any memset value seems to.
195 */
196 if (rsc->layout.ubwc) {
197 void *buf = fd_bo_map(rsc->bo);
198 memset(buf, 0, rsc->layout.slices[0].offset);
199 }
200
201 rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
202 util_range_set_empty(&rsc->valid_buffer_range);
203 fd_bc_invalidate_resource(rsc, true);
204 }
205
206 static void
207 do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
208 {
209 struct pipe_context *pctx = &ctx->base;
210
211 /* TODO size threshold too?? */
212 if (fallback || !fd_blit(pctx, blit)) {
213 /* do blit on cpu: */
214 util_resource_copy_region(pctx,
215 blit->dst.resource, blit->dst.level, blit->dst.box.x,
216 blit->dst.box.y, blit->dst.box.z,
217 blit->src.resource, blit->src.level, &blit->src.box);
218 }
219 }
220
221 /**
222 * @rsc: the resource to shadow
223 * @level: the level to discard (if box != NULL, otherwise ignored)
224 * @box: the box to discard (or NULL if none)
225 * @modifier: the modifier for the new buffer state
226 */
227 static bool
228 fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
229 unsigned level, const struct pipe_box *box, uint64_t modifier)
230 {
231 struct pipe_context *pctx = &ctx->base;
232 struct pipe_resource *prsc = &rsc->base;
233 bool fallback = false;
234
235 if (prsc->next)
236 return false;
237
238 /* TODO: somehow munge dimensions and format to copy unsupported
239 * render target format to something that is supported?
240 */
241 if (!pctx->screen->is_format_supported(pctx->screen,
242 prsc->format, prsc->target, prsc->nr_samples,
243 prsc->nr_storage_samples,
244 PIPE_BIND_RENDER_TARGET))
245 fallback = true;
246
247 /* do shadowing back-blits on the cpu for buffers: */
248 if (prsc->target == PIPE_BUFFER)
249 fallback = true;
250
251 bool discard_whole_level = box && util_texrange_covers_whole_level(prsc, level,
252 box->x, box->y, box->z, box->width, box->height, box->depth);
253
254 /* TODO need to be more clever about current level */
255 if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
256 return false;
257
258 struct pipe_resource *pshadow =
259 pctx->screen->resource_create_with_modifiers(pctx->screen,
260 prsc, &modifier, 1);
261
262 if (!pshadow)
263 return false;
264
265 assert(!ctx->in_shadow);
266 ctx->in_shadow = true;
267
268 /* get rid of any references that batch-cache might have to us (which
269 * should empty/destroy rsc->batches hashset)
270 */
271 fd_bc_invalidate_resource(rsc, false);
272 rebind_resource(rsc);
273
274 fd_screen_lock(ctx->screen);
275
276 /* Swap the backing bo's, so shadow becomes the old buffer,
277 * blit from shadow to new buffer. From here on out, we
278 * cannot fail.
279 *
280 * Note that we need to do it in this order, otherwise if
281 * we go down cpu blit path, the recursive transfer_map()
282 * sees the wrong status..
283 */
284 struct fd_resource *shadow = fd_resource(pshadow);
285
286 DBG("shadow: %p (%d) -> %p (%d)\n", rsc, rsc->base.reference.count,
287 shadow, shadow->base.reference.count);
288
289 /* TODO valid_buffer_range?? */
290 swap(rsc->bo, shadow->bo);
291 swap(rsc->write_batch, shadow->write_batch);
292 swap(rsc->layout, shadow->layout);
293 rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
294
295 /* at this point, the newly created shadow buffer is not referenced
296 * by any batches, but the existing rsc (probably) is. We need to
297 * transfer those references over:
298 */
299 debug_assert(shadow->batch_mask == 0);
300 struct fd_batch *batch;
301 foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
302 struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
303 _mesa_set_remove(batch->resources, entry);
304 _mesa_set_add(batch->resources, shadow);
305 }
306 swap(rsc->batch_mask, shadow->batch_mask);
307
308 fd_screen_unlock(ctx->screen);
309
310 struct pipe_blit_info blit = {};
311 blit.dst.resource = prsc;
312 blit.dst.format = prsc->format;
313 blit.src.resource = pshadow;
314 blit.src.format = pshadow->format;
315 blit.mask = util_format_get_mask(prsc->format);
316 blit.filter = PIPE_TEX_FILTER_NEAREST;
317
318 #define set_box(field, val) do { \
319 blit.dst.field = (val); \
320 blit.src.field = (val); \
321 } while (0)
322
323 /* blit the other levels in their entirety: */
324 for (unsigned l = 0; l <= prsc->last_level; l++) {
325 if (box && l == level)
326 continue;
327
328 /* just blit whole level: */
329 set_box(level, l);
330 set_box(box.width, u_minify(prsc->width0, l));
331 set_box(box.height, u_minify(prsc->height0, l));
332 set_box(box.depth, u_minify(prsc->depth0, l));
333
334 for (int i = 0; i < prsc->array_size; i++) {
335 set_box(box.z, i);
336 do_blit(ctx, &blit, fallback);
337 }
338 }
339
340 /* deal w/ current level specially, since we might need to split
341 * it up into a couple blits:
342 */
343 if (box && !discard_whole_level) {
344 set_box(level, level);
345
346 switch (prsc->target) {
347 case PIPE_BUFFER:
348 case PIPE_TEXTURE_1D:
349 set_box(box.y, 0);
350 set_box(box.z, 0);
351 set_box(box.height, 1);
352 set_box(box.depth, 1);
353
354 if (box->x > 0) {
355 set_box(box.x, 0);
356 set_box(box.width, box->x);
357
358 do_blit(ctx, &blit, fallback);
359 }
360 if ((box->x + box->width) < u_minify(prsc->width0, level)) {
361 set_box(box.x, box->x + box->width);
362 set_box(box.width, u_minify(prsc->width0, level) - (box->x + box->width));
363
364 do_blit(ctx, &blit, fallback);
365 }
366 break;
367 case PIPE_TEXTURE_2D:
368 /* TODO */
369 default:
370 unreachable("TODO");
371 }
372 }
373
374 ctx->in_shadow = false;
375
376 pipe_resource_reference(&pshadow, NULL);
377
378 return true;
379 }
380
381 /**
382 * Uncompress an UBWC compressed buffer "in place". This works basically
383 * like resource shadowing, creating a new resource, and doing an uncompress
384 * blit, and swapping the state between shadow and original resource so it
385 * appears to the gallium frontends as if nothing changed.
386 */
387 void
388 fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
389 {
390 bool success =
391 fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
392
393 /* shadow should not fail in any cases where we need to uncompress: */
394 debug_assert(success);
395 }
396
397 /**
398 * Debug helper to hexdump a resource.
399 */
400 void
401 fd_resource_dump(struct fd_resource *rsc, const char *name)
402 {
403 fd_bo_cpu_prep(rsc->bo, NULL, DRM_FREEDRENO_PREP_READ);
404 printf("%s: \n", name);
405 dump_hex(fd_bo_map(rsc->bo), fd_bo_size(rsc->bo));
406 }
407
408 static struct fd_resource *
409 fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
410 unsigned level, const struct pipe_box *box)
411 {
412 struct pipe_context *pctx = &ctx->base;
413 struct pipe_resource tmpl = rsc->base;
414
415 tmpl.width0 = box->width;
416 tmpl.height0 = box->height;
417 /* for array textures, box->depth is the array_size, otherwise
418 * for 3d textures, it is the depth:
419 */
420 if (tmpl.array_size > 1) {
421 if (tmpl.target == PIPE_TEXTURE_CUBE)
422 tmpl.target = PIPE_TEXTURE_2D_ARRAY;
423 tmpl.array_size = box->depth;
424 tmpl.depth0 = 1;
425 } else {
426 tmpl.array_size = 1;
427 tmpl.depth0 = box->depth;
428 }
429 tmpl.last_level = 0;
430 tmpl.bind |= PIPE_BIND_LINEAR;
431
432 struct pipe_resource *pstaging =
433 pctx->screen->resource_create(pctx->screen, &tmpl);
434 if (!pstaging)
435 return NULL;
436
437 return fd_resource(pstaging);
438 }
439
440 static void
441 fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
442 {
443 struct pipe_resource *dst = trans->base.resource;
444 struct pipe_blit_info blit = {};
445
446 blit.dst.resource = dst;
447 blit.dst.format = dst->format;
448 blit.dst.level = trans->base.level;
449 blit.dst.box = trans->base.box;
450 blit.src.resource = trans->staging_prsc;
451 blit.src.format = trans->staging_prsc->format;
452 blit.src.level = 0;
453 blit.src.box = trans->staging_box;
454 blit.mask = util_format_get_mask(trans->staging_prsc->format);
455 blit.filter = PIPE_TEX_FILTER_NEAREST;
456
457 do_blit(ctx, &blit, false);
458 }
459
460 static void
461 fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
462 {
463 struct pipe_resource *src = trans->base.resource;
464 struct pipe_blit_info blit = {};
465
466 blit.src.resource = src;
467 blit.src.format = src->format;
468 blit.src.level = trans->base.level;
469 blit.src.box = trans->base.box;
470 blit.dst.resource = trans->staging_prsc;
471 blit.dst.format = trans->staging_prsc->format;
472 blit.dst.level = 0;
473 blit.dst.box = trans->staging_box;
474 blit.mask = util_format_get_mask(trans->staging_prsc->format);
475 blit.filter = PIPE_TEX_FILTER_NEAREST;
476
477 do_blit(ctx, &blit, false);
478 }
479
480 static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
481 struct pipe_transfer *ptrans,
482 const struct pipe_box *box)
483 {
484 struct fd_resource *rsc = fd_resource(ptrans->resource);
485
486 if (ptrans->resource->target == PIPE_BUFFER)
487 util_range_add(&rsc->base, &rsc->valid_buffer_range,
488 ptrans->box.x + box->x,
489 ptrans->box.x + box->x + box->width);
490 }
491
492 static void
493 flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
494 {
495 struct fd_batch *write_batch = NULL;
496
497 fd_screen_lock(ctx->screen);
498 fd_batch_reference_locked(&write_batch, rsc->write_batch);
499 fd_screen_unlock(ctx->screen);
500
501 if (usage & PIPE_TRANSFER_WRITE) {
502 struct fd_batch *batch, *batches[32] = {};
503 uint32_t batch_mask;
504
505 /* This is a bit awkward, probably a fd_batch_flush_locked()
506 * would make things simpler.. but we need to hold the lock
507 * to iterate the batches which reference this resource. So
508 * we must first grab references under a lock, then flush.
509 */
510 fd_screen_lock(ctx->screen);
511 batch_mask = rsc->batch_mask;
512 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
513 fd_batch_reference_locked(&batches[batch->idx], batch);
514 fd_screen_unlock(ctx->screen);
515
516 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
517 fd_batch_flush(batch);
518
519 foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
520 fd_batch_reference(&batches[batch->idx], NULL);
521 }
522 assert(rsc->batch_mask == 0);
523 } else if (write_batch) {
524 fd_batch_flush(write_batch);
525 }
526
527 fd_batch_reference(&write_batch, NULL);
528
529 assert(!rsc->write_batch);
530 }
531
532 static void
533 fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
534 {
535 flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_TRANSFER_READ);
536 }
537
538 static void
539 fd_resource_transfer_unmap(struct pipe_context *pctx,
540 struct pipe_transfer *ptrans)
541 {
542 struct fd_context *ctx = fd_context(pctx);
543 struct fd_resource *rsc = fd_resource(ptrans->resource);
544 struct fd_transfer *trans = fd_transfer(ptrans);
545
546 if (trans->staging_prsc) {
547 if (ptrans->usage & PIPE_TRANSFER_WRITE)
548 fd_blit_from_staging(ctx, trans);
549 pipe_resource_reference(&trans->staging_prsc, NULL);
550 }
551
552 if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
553 fd_bo_cpu_fini(rsc->bo);
554 }
555
556 util_range_add(&rsc->base, &rsc->valid_buffer_range,
557 ptrans->box.x,
558 ptrans->box.x + ptrans->box.width);
559
560 pipe_resource_reference(&ptrans->resource, NULL);
561 slab_free(&ctx->transfer_pool, ptrans);
562 }
563
564 static void *
565 fd_resource_transfer_map(struct pipe_context *pctx,
566 struct pipe_resource *prsc,
567 unsigned level, unsigned usage,
568 const struct pipe_box *box,
569 struct pipe_transfer **pptrans)
570 {
571 struct fd_context *ctx = fd_context(pctx);
572 struct fd_resource *rsc = fd_resource(prsc);
573 struct fd_transfer *trans;
574 struct pipe_transfer *ptrans;
575 enum pipe_format format = prsc->format;
576 uint32_t op = 0;
577 uint32_t offset;
578 char *buf;
579 int ret = 0;
580
581 DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
582 box->width, box->height, box->x, box->y);
583
584 if ((usage & PIPE_TRANSFER_MAP_DIRECTLY) && rsc->layout.tile_mode) {
585 DBG("CANNOT MAP DIRECTLY!\n");
586 return NULL;
587 }
588
589 ptrans = slab_alloc(&ctx->transfer_pool);
590 if (!ptrans)
591 return NULL;
592
593 /* slab_alloc_st() doesn't zero: */
594 trans = fd_transfer(ptrans);
595 memset(trans, 0, sizeof(*trans));
596
597 pipe_resource_reference(&ptrans->resource, prsc);
598 ptrans->level = level;
599 ptrans->usage = usage;
600 ptrans->box = *box;
601 ptrans->stride = fd_resource_pitch(rsc, level);
602 ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
603
604 /* we always need a staging texture for tiled buffers:
605 *
606 * TODO we might sometimes want to *also* shadow the resource to avoid
607 * splitting a batch.. for ex, mid-frame texture uploads to a tiled
608 * texture.
609 */
610 if (rsc->layout.tile_mode) {
611 struct fd_resource *staging_rsc;
612
613 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
614 if (staging_rsc) {
615 // TODO for PIPE_TRANSFER_READ, need to do untiling blit..
616 trans->staging_prsc = &staging_rsc->base;
617 trans->base.stride = fd_resource_pitch(staging_rsc, 0);
618 trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
619 trans->staging_box = *box;
620 trans->staging_box.x = 0;
621 trans->staging_box.y = 0;
622 trans->staging_box.z = 0;
623
624 if (usage & PIPE_TRANSFER_READ) {
625 fd_blit_to_staging(ctx, trans);
626
627 fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
628 DRM_FREEDRENO_PREP_READ);
629 }
630
631 buf = fd_bo_map(staging_rsc->bo);
632 offset = 0;
633
634 *pptrans = ptrans;
635
636 ctx->stats.staging_uploads++;
637
638 return buf;
639 }
640 }
641
642 if (ctx->in_shadow && !(usage & PIPE_TRANSFER_READ))
643 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
644
645 if (usage & PIPE_TRANSFER_READ)
646 op |= DRM_FREEDRENO_PREP_READ;
647
648 if (usage & PIPE_TRANSFER_WRITE)
649 op |= DRM_FREEDRENO_PREP_WRITE;
650
651 bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE));
652
653 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
654 if (needs_flush || fd_resource_busy(rsc, op)) {
655 rebind_resource(rsc);
656 realloc_bo(rsc, fd_bo_size(rsc->bo));
657 }
658 } else if ((usage & PIPE_TRANSFER_WRITE) &&
659 prsc->target == PIPE_BUFFER &&
660 !util_ranges_intersect(&rsc->valid_buffer_range,
661 box->x, box->x + box->width)) {
662 /* We are trying to write to a previously uninitialized range. No need
663 * to wait.
664 */
665 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
666 struct fd_batch *write_batch = NULL;
667
668 /* hold a reference, so it doesn't disappear under us: */
669 fd_context_lock(ctx);
670 fd_batch_reference_locked(&write_batch, rsc->write_batch);
671 fd_context_unlock(ctx);
672
673 if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
674 write_batch->back_blit) {
675 /* if only thing pending is a back-blit, we can discard it: */
676 fd_batch_reset(write_batch);
677 }
678
679 /* If the GPU is writing to the resource, or if it is reading from the
680 * resource and we're trying to write to it, flush the renders.
681 */
682 bool busy = needs_flush || fd_resource_busy(rsc, op);
683
684 /* if we need to flush/stall, see if we can make a shadow buffer
685 * to avoid this:
686 *
687 * TODO we could go down this path !reorder && !busy_for_read
688 * ie. we only *don't* want to go down this path if the blit
689 * will trigger a flush!
690 */
691 if (ctx->screen->reorder && busy && !(usage & PIPE_TRANSFER_READ) &&
692 (usage & PIPE_TRANSFER_DISCARD_RANGE)) {
693 /* try shadowing only if it avoids a flush, otherwise staging would
694 * be better:
695 */
696 if (needs_flush && fd_try_shadow_resource(ctx, rsc, level,
697 box, DRM_FORMAT_MOD_LINEAR)) {
698 needs_flush = busy = false;
699 ctx->stats.shadow_uploads++;
700 } else {
701 struct fd_resource *staging_rsc;
702
703 if (needs_flush) {
704 flush_resource(ctx, rsc, usage);
705 needs_flush = false;
706 }
707
708 /* in this case, we don't need to shadow the whole resource,
709 * since any draw that references the previous contents has
710 * already had rendering flushed for all tiles. So we can
711 * use a staging buffer to do the upload.
712 */
713 staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
714 if (staging_rsc) {
715 trans->staging_prsc = &staging_rsc->base;
716 trans->base.stride = fd_resource_pitch(staging_rsc, 0);
717 trans->base.layer_stride =
718 fd_resource_layer_stride(staging_rsc, 0);
719 trans->staging_box = *box;
720 trans->staging_box.x = 0;
721 trans->staging_box.y = 0;
722 trans->staging_box.z = 0;
723 buf = fd_bo_map(staging_rsc->bo);
724 offset = 0;
725
726 *pptrans = ptrans;
727
728 fd_batch_reference(&write_batch, NULL);
729
730 ctx->stats.staging_uploads++;
731
732 return buf;
733 }
734 }
735 }
736
737 if (needs_flush) {
738 flush_resource(ctx, rsc, usage);
739 needs_flush = false;
740 }
741
742 fd_batch_reference(&write_batch, NULL);
743
744 /* The GPU keeps track of how the various bo's are being used, and
745 * will wait if necessary for the proper operation to have
746 * completed.
747 */
748 if (busy) {
749 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
750 if (ret)
751 goto fail;
752 }
753 }
754
755 buf = fd_bo_map(rsc->bo);
756 offset =
757 box->y / util_format_get_blockheight(format) * ptrans->stride +
758 box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
759 fd_resource_offset(rsc, level, box->z);
760
761 if (usage & PIPE_TRANSFER_WRITE)
762 rsc->valid = true;
763
764 *pptrans = ptrans;
765
766 return buf + offset;
767
768 fail:
769 fd_resource_transfer_unmap(pctx, ptrans);
770 return NULL;
771 }
772
773 static void
774 fd_resource_destroy(struct pipe_screen *pscreen,
775 struct pipe_resource *prsc)
776 {
777 struct fd_resource *rsc = fd_resource(prsc);
778 fd_bc_invalidate_resource(rsc, true);
779 if (rsc->bo)
780 fd_bo_del(rsc->bo);
781 if (rsc->scanout)
782 renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
783
784 util_range_destroy(&rsc->valid_buffer_range);
785 simple_mtx_destroy(&rsc->lock);
786 FREE(rsc);
787 }
788
789 static uint64_t
790 fd_resource_modifier(struct fd_resource *rsc)
791 {
792 if (!rsc->layout.tile_mode)
793 return DRM_FORMAT_MOD_LINEAR;
794
795 if (rsc->layout.ubwc_layer_size)
796 return DRM_FORMAT_MOD_QCOM_COMPRESSED;
797
798 /* TODO invent a modifier for tiled but not UBWC buffers: */
799 return DRM_FORMAT_MOD_INVALID;
800 }
801
802 static bool
803 fd_resource_get_handle(struct pipe_screen *pscreen,
804 struct pipe_context *pctx,
805 struct pipe_resource *prsc,
806 struct winsys_handle *handle,
807 unsigned usage)
808 {
809 struct fd_resource *rsc = fd_resource(prsc);
810
811 handle->modifier = fd_resource_modifier(rsc);
812
813 return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
814 fd_resource_pitch(rsc, 0), handle);
815 }
816
817 /* special case to resize query buf after allocated.. */
818 void
819 fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
820 {
821 struct fd_resource *rsc = fd_resource(prsc);
822
823 debug_assert(prsc->width0 == 0);
824 debug_assert(prsc->target == PIPE_BUFFER);
825 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
826
827 prsc->width0 = sz;
828 realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
829 }
830
831 static void
832 fd_resource_layout_init(struct pipe_resource *prsc)
833 {
834 struct fd_resource *rsc = fd_resource(prsc);
835 struct fdl_layout *layout = &rsc->layout;
836
837 layout->format = prsc->format;
838
839 layout->width0 = prsc->width0;
840 layout->height0 = prsc->height0;
841 layout->depth0 = prsc->depth0;
842
843 layout->cpp = util_format_get_blocksize(prsc->format);
844 layout->cpp *= fd_resource_nr_samples(prsc);
845 layout->cpp_shift = ffs(layout->cpp) - 1;
846 }
847
848 /**
849 * Helper that allocates a resource and resolves its layout (but doesn't
850 * allocate its bo).
851 *
852 * It returns a pipe_resource (as fd_resource_create_with_modifiers()
853 * would do), and also bo's minimum required size as an output argument.
854 */
855 static struct pipe_resource *
856 fd_resource_allocate_and_resolve(struct pipe_screen *pscreen,
857 const struct pipe_resource *tmpl,
858 const uint64_t *modifiers, int count, uint32_t *psize)
859 {
860 struct fd_screen *screen = fd_screen(pscreen);
861 struct fd_resource *rsc;
862 struct pipe_resource *prsc;
863 enum pipe_format format = tmpl->format;
864 uint32_t size;
865
866 rsc = CALLOC_STRUCT(fd_resource);
867 prsc = &rsc->base;
868
869 DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
870 "nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc,
871 tmpl->target, util_format_name(format),
872 tmpl->width0, tmpl->height0, tmpl->depth0,
873 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
874 tmpl->usage, tmpl->bind, tmpl->flags);
875
876 if (!rsc)
877 return NULL;
878
879 *prsc = *tmpl;
880 fd_resource_layout_init(prsc);
881
882 #define LINEAR \
883 (PIPE_BIND_SCANOUT | \
884 PIPE_BIND_LINEAR | \
885 PIPE_BIND_DISPLAY_TARGET)
886
887 bool linear = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
888 if (tmpl->bind & LINEAR)
889 linear = true;
890
891 if (fd_mesa_debug & FD_DBG_NOTILE)
892 linear = true;
893
894 /* Normally, for non-shared buffers, allow buffer compression if
895 * not shared, otherwise only allow if QCOM_COMPRESSED modifier
896 * is requested:
897 *
898 * TODO we should probably also limit tiled in a similar way,
899 * except we don't have a format modifier for tiled. (We probably
900 * should.)
901 */
902 bool allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
903 if (tmpl->bind & PIPE_BIND_SHARED)
904 allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count);
905
906 allow_ubwc &= !(fd_mesa_debug & FD_DBG_NOUBWC);
907
908 pipe_reference_init(&prsc->reference, 1);
909
910 prsc->screen = pscreen;
911
912 if (screen->tile_mode &&
913 (tmpl->target != PIPE_BUFFER) &&
914 !linear) {
915 rsc->layout.tile_mode = screen->tile_mode(prsc);
916 }
917
918 util_range_init(&rsc->valid_buffer_range);
919
920 simple_mtx_init(&rsc->lock, mtx_plain);
921
922 rsc->internal_format = format;
923
924 rsc->layout.ubwc = rsc->layout.tile_mode && is_a6xx(screen) && allow_ubwc;
925
926 if (prsc->target == PIPE_BUFFER) {
927 assert(prsc->format == PIPE_FORMAT_R8_UNORM);
928 size = prsc->width0;
929 fdl_layout_buffer(&rsc->layout, size);
930 } else {
931 size = screen->setup_slices(rsc);
932 }
933
934 /* special case for hw-query buffer, which we need to allocate before we
935 * know the size:
936 */
937 if (size == 0) {
938 /* note, semi-intention == instead of & */
939 debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
940 return prsc;
941 }
942
943 /* Set the layer size if the (non-a6xx) backend hasn't done so. */
944 if (rsc->layout.layer_first && !rsc->layout.layer_size) {
945 rsc->layout.layer_size = align(size, 4096);
946 size = rsc->layout.layer_size * prsc->array_size;
947 }
948
949 if (fd_mesa_debug & FD_DBG_LAYOUT)
950 fdl_dump_layout(&rsc->layout);
951
952 /* Hand out the resolved size. */
953 if (psize)
954 *psize = size;
955
956 return prsc;
957 }
958
959 /**
960 * Create a new texture object, using the given template info.
961 */
962 static struct pipe_resource *
963 fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
964 const struct pipe_resource *tmpl,
965 const uint64_t *modifiers, int count)
966 {
967 struct fd_screen *screen = fd_screen(pscreen);
968 struct fd_resource *rsc;
969 struct pipe_resource *prsc;
970 uint32_t size;
971
972 /* when using kmsro, scanout buffers are allocated on the display device
973 * create_with_modifiers() doesn't give us usage flags, so we have to
974 * assume that all calls with modifiers are scanout-possible
975 */
976 if (screen->ro &&
977 ((tmpl->bind & PIPE_BIND_SCANOUT) ||
978 !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
979 struct pipe_resource scanout_templat = *tmpl;
980 struct renderonly_scanout *scanout;
981 struct winsys_handle handle;
982
983 /* note: alignment is wrong for a6xx */
984 scanout_templat.width0 = align(tmpl->width0, screen->gmem_alignw);
985
986 scanout = renderonly_scanout_for_resource(&scanout_templat,
987 screen->ro, &handle);
988 if (!scanout)
989 return NULL;
990
991 renderonly_scanout_destroy(scanout, screen->ro);
992
993 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
994 rsc = fd_resource(pscreen->resource_from_handle(pscreen, tmpl,
995 &handle,
996 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
997 close(handle.handle);
998 if (!rsc)
999 return NULL;
1000
1001 return &rsc->base;
1002 }
1003
1004 prsc = fd_resource_allocate_and_resolve(pscreen, tmpl, modifiers, count, &size);
1005 if (!prsc)
1006 return NULL;
1007 rsc = fd_resource(prsc);
1008
1009 realloc_bo(rsc, size);
1010 if (!rsc->bo)
1011 goto fail;
1012
1013 return prsc;
1014 fail:
1015 fd_resource_destroy(pscreen, prsc);
1016 return NULL;
1017 }
1018
1019 static struct pipe_resource *
1020 fd_resource_create(struct pipe_screen *pscreen,
1021 const struct pipe_resource *tmpl)
1022 {
1023 const uint64_t mod = DRM_FORMAT_MOD_INVALID;
1024 return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
1025 }
1026
1027 /**
1028 * Create a texture from a winsys_handle. The handle is often created in
1029 * another process by first creating a pipe texture and then calling
1030 * resource_get_handle.
1031 */
1032 static struct pipe_resource *
1033 fd_resource_from_handle(struct pipe_screen *pscreen,
1034 const struct pipe_resource *tmpl,
1035 struct winsys_handle *handle, unsigned usage)
1036 {
1037 struct fd_screen *screen = fd_screen(pscreen);
1038 struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
1039 struct fdl_slice *slice = fd_resource_slice(rsc, 0);
1040 struct pipe_resource *prsc = &rsc->base;
1041
1042 DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
1043 "nr_samples=%u, usage=%u, bind=%x, flags=%x",
1044 tmpl->target, util_format_name(tmpl->format),
1045 tmpl->width0, tmpl->height0, tmpl->depth0,
1046 tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
1047 tmpl->usage, tmpl->bind, tmpl->flags);
1048
1049 if (!rsc)
1050 return NULL;
1051
1052 *prsc = *tmpl;
1053 fd_resource_layout_init(prsc);
1054
1055 pipe_reference_init(&prsc->reference, 1);
1056
1057 prsc->screen = pscreen;
1058
1059 util_range_init(&rsc->valid_buffer_range);
1060
1061 simple_mtx_init(&rsc->lock, mtx_plain);
1062
1063 rsc->bo = fd_screen_bo_from_handle(pscreen, handle);
1064 if (!rsc->bo)
1065 goto fail;
1066
1067 rsc->internal_format = tmpl->format;
1068 rsc->layout.pitch0 = handle->stride;
1069 slice->offset = handle->offset;
1070 slice->size0 = handle->stride * prsc->height0;
1071
1072 /* use a pitchalign of gmem_alignw pixels, because GMEM resolve for
1073 * lower alignments is not implemented (but possible for a6xx at least)
1074 *
1075 * for UBWC-enabled resources, layout_resource_for_modifier will further
1076 * validate the pitch and set the right pitchalign
1077 */
1078 rsc->layout.pitchalign =
1079 fdl_cpp_shift(&rsc->layout) + util_logbase2(screen->gmem_alignw);
1080
1081 /* apply the minimum pitchalign (note: actually 4 for a3xx but doesn't matter) */
1082 if (is_a6xx(screen) || is_a5xx(screen))
1083 rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 6);
1084 else
1085 rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 5);
1086
1087 if (rsc->layout.pitch0 < (prsc->width0 * rsc->layout.cpp) ||
1088 fd_resource_pitch(rsc, 0) != rsc->layout.pitch0)
1089 goto fail;
1090
1091 assert(rsc->layout.cpp);
1092
1093 if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
1094 goto fail;
1095
1096 if (screen->ro) {
1097 rsc->scanout =
1098 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
1099 /* failure is expected in some cases.. */
1100 }
1101
1102 rsc->valid = true;
1103
1104 return prsc;
1105
1106 fail:
1107 fd_resource_destroy(pscreen, prsc);
1108 return NULL;
1109 }
1110
1111 bool
1112 fd_render_condition_check(struct pipe_context *pctx)
1113 {
1114 struct fd_context *ctx = fd_context(pctx);
1115
1116 if (!ctx->cond_query)
1117 return true;
1118
1119 union pipe_query_result res = { 0 };
1120 bool wait =
1121 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
1122 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
1123
1124 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
1125 return (bool)res.u64 != ctx->cond_cond;
1126
1127 return true;
1128 }
1129
1130 static void
1131 fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
1132 {
1133 struct fd_context *ctx = fd_context(pctx);
1134 struct fd_resource *rsc = fd_resource(prsc);
1135
1136 /*
1137 * TODO I guess we could track that the resource is invalidated and
1138 * use that as a hint to realloc rather than stall in _transfer_map(),
1139 * even in the non-DISCARD_WHOLE_RESOURCE case?
1140 *
1141 * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
1142 */
1143
1144 if (rsc->write_batch) {
1145 struct fd_batch *batch = rsc->write_batch;
1146 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1147
1148 if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
1149 batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
1150 ctx->dirty |= FD_DIRTY_ZSA;
1151 }
1152
1153 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
1154 if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
1155 batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
1156 ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
1157 }
1158 }
1159 }
1160
1161 rsc->valid = false;
1162 }
1163
1164 static enum pipe_format
1165 fd_resource_get_internal_format(struct pipe_resource *prsc)
1166 {
1167 return fd_resource(prsc)->internal_format;
1168 }
1169
1170 static void
1171 fd_resource_set_stencil(struct pipe_resource *prsc,
1172 struct pipe_resource *stencil)
1173 {
1174 fd_resource(prsc)->stencil = fd_resource(stencil);
1175 }
1176
1177 static struct pipe_resource *
1178 fd_resource_get_stencil(struct pipe_resource *prsc)
1179 {
1180 struct fd_resource *rsc = fd_resource(prsc);
1181 if (rsc->stencil)
1182 return &rsc->stencil->base;
1183 return NULL;
1184 }
1185
1186 static const struct u_transfer_vtbl transfer_vtbl = {
1187 .resource_create = fd_resource_create,
1188 .resource_destroy = fd_resource_destroy,
1189 .transfer_map = fd_resource_transfer_map,
1190 .transfer_flush_region = fd_resource_transfer_flush_region,
1191 .transfer_unmap = fd_resource_transfer_unmap,
1192 .get_internal_format = fd_resource_get_internal_format,
1193 .set_stencil = fd_resource_set_stencil,
1194 .get_stencil = fd_resource_get_stencil,
1195 };
1196
1197 static const uint64_t supported_modifiers[] = {
1198 DRM_FORMAT_MOD_LINEAR,
1199 };
1200
1201 static int
1202 fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
1203 {
1204 switch (modifier) {
1205 case DRM_FORMAT_MOD_LINEAR:
1206 /* The dri gallium frontend will pass DRM_FORMAT_MOD_INVALID to us
1207 * when it's called through any of the non-modifier BO create entry
1208 * points. Other drivers will determine tiling from the kernel or
1209 * other legacy backchannels, but for freedreno it just means
1210 * LINEAR. */
1211 case DRM_FORMAT_MOD_INVALID:
1212 return 0;
1213 default:
1214 return -1;
1215 }
1216 }
1217
1218 static struct pipe_resource *
1219 fd_resource_from_memobj(struct pipe_screen *pscreen,
1220 const struct pipe_resource *tmpl,
1221 struct pipe_memory_object *pmemobj,
1222 uint64_t offset)
1223 {
1224 struct fd_screen *screen = fd_screen(pscreen);
1225 struct fd_memory_object *memobj = fd_memory_object(pmemobj);
1226 struct pipe_resource *prsc;
1227 struct fd_resource *rsc;
1228 uint32_t size;
1229 assert(memobj->bo);
1230
1231 /* We shouldn't get a scanout buffer here. */
1232 assert(!(tmpl->bind & PIPE_BIND_SCANOUT));
1233
1234 uint64_t modifiers = DRM_FORMAT_MOD_INVALID;
1235 if (tmpl->bind & PIPE_BIND_LINEAR) {
1236 modifiers = DRM_FORMAT_MOD_LINEAR;
1237 } else if (is_a6xx(screen) && tmpl->width0 >= FDL_MIN_UBWC_WIDTH) {
1238 modifiers = DRM_FORMAT_MOD_QCOM_COMPRESSED;
1239 }
1240
1241 /* Allocate new pipe resource. */
1242 prsc = fd_resource_allocate_and_resolve(pscreen, tmpl, &modifiers, 1, &size);
1243 if (!prsc)
1244 return NULL;
1245 rsc = fd_resource(prsc);
1246
1247 /* bo's size has to be large enough, otherwise cleanup resource and fail
1248 * gracefully.
1249 */
1250 if (fd_bo_size(memobj->bo) < size) {
1251 fd_resource_destroy(pscreen, prsc);
1252 return NULL;
1253 }
1254
1255 /* Share the bo with the memory object. */
1256 rsc->bo = fd_bo_ref(memobj->bo);
1257
1258 return prsc;
1259 }
1260
1261 static struct pipe_memory_object *
1262 fd_memobj_create_from_handle(struct pipe_screen *pscreen,
1263 struct winsys_handle *whandle,
1264 bool dedicated)
1265 {
1266 struct fd_memory_object *memobj = CALLOC_STRUCT(fd_memory_object);
1267 if (!memobj)
1268 return NULL;
1269
1270 struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, whandle);
1271 if (!bo) {
1272 free(memobj);
1273 return NULL;
1274 }
1275
1276 memobj->b.dedicated = dedicated;
1277 memobj->bo = bo;
1278
1279 return &memobj->b;
1280 }
1281
1282 static void
1283 fd_memobj_destroy(struct pipe_screen *pscreen,
1284 struct pipe_memory_object *pmemobj)
1285 {
1286 struct fd_memory_object *memobj = fd_memory_object(pmemobj);
1287
1288 assert(memobj->bo);
1289 fd_bo_del(memobj->bo);
1290
1291 free(pmemobj);
1292 }
1293
1294 void
1295 fd_resource_screen_init(struct pipe_screen *pscreen)
1296 {
1297 struct fd_screen *screen = fd_screen(pscreen);
1298 bool fake_rgtc = screen->gpu_id < 400;
1299
1300 pscreen->resource_create = u_transfer_helper_resource_create;
1301 /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
1302 * variant:
1303 */
1304 pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
1305 pscreen->resource_from_handle = fd_resource_from_handle;
1306 pscreen->resource_get_handle = fd_resource_get_handle;
1307 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
1308
1309 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
1310 true, false, fake_rgtc, true);
1311
1312 if (!screen->layout_resource_for_modifier)
1313 screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
1314 if (!screen->supported_modifiers) {
1315 screen->supported_modifiers = supported_modifiers;
1316 screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers);
1317 }
1318
1319 /* GL_EXT_memory_object */
1320 pscreen->memobj_create_from_handle = fd_memobj_create_from_handle;
1321 pscreen->memobj_destroy = fd_memobj_destroy;
1322 pscreen->resource_from_memobj = fd_resource_from_memobj;
1323 }
1324
1325 static void
1326 fd_get_sample_position(struct pipe_context *context,
1327 unsigned sample_count, unsigned sample_index,
1328 float *pos_out)
1329 {
1330 /* The following is copied from nouveau/nv50 except for position
1331 * values, which are taken from blob driver */
1332 static const uint8_t pos1[1][2] = { { 0x8, 0x8 } };
1333 static const uint8_t pos2[2][2] = {
1334 { 0xc, 0xc }, { 0x4, 0x4 } };
1335 static const uint8_t pos4[4][2] = {
1336 { 0x6, 0x2 }, { 0xe, 0x6 },
1337 { 0x2, 0xa }, { 0xa, 0xe } };
1338 /* TODO needs to be verified on supported hw */
1339 static const uint8_t pos8[8][2] = {
1340 { 0x9, 0x5 }, { 0x7, 0xb },
1341 { 0xd, 0x9 }, { 0x5, 0x3 },
1342 { 0x3, 0xd }, { 0x1, 0x7 },
1343 { 0xb, 0xf }, { 0xf, 0x1 } };
1344
1345 const uint8_t (*ptr)[2];
1346
1347 switch (sample_count) {
1348 case 1:
1349 ptr = pos1;
1350 break;
1351 case 2:
1352 ptr = pos2;
1353 break;
1354 case 4:
1355 ptr = pos4;
1356 break;
1357 case 8:
1358 ptr = pos8;
1359 break;
1360 default:
1361 assert(0);
1362 return;
1363 }
1364
1365 pos_out[0] = ptr[sample_index][0] / 16.0f;
1366 pos_out[1] = ptr[sample_index][1] / 16.0f;
1367 }
1368
1369 static void
1370 fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
1371 {
1372 /* wrap fd_blit to return void */
1373 fd_blit(pctx, blit_info);
1374 }
1375
1376 void
1377 fd_resource_context_init(struct pipe_context *pctx)
1378 {
1379 pctx->transfer_map = u_transfer_helper_transfer_map;
1380 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1381 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
1382 pctx->buffer_subdata = u_default_buffer_subdata;
1383 pctx->texture_subdata = u_default_texture_subdata;
1384 pctx->create_surface = fd_create_surface;
1385 pctx->surface_destroy = fd_surface_destroy;
1386 pctx->resource_copy_region = fd_resource_copy_region;
1387 pctx->blit = fd_blit_pipe;
1388 pctx->flush_resource = fd_flush_resource;
1389 pctx->invalidate_resource = fd_invalidate_resource;
1390 pctx->get_sample_position = fd_get_sample_position;
1391 }