panfrost: Remove the panfrost_driver abstraction
[mesa.git] / src / gallium / drivers / panfrost / pan_resource.c
1 /*
2 * Copyright (C) 2008 VMware, Inc.
3 * Copyright (C) 2014 Broadcom
4 * Copyright (C) 2018-2019 Alyssa Rosenzweig
5 * Copyright (C) 2019 Collabora
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 * Authors (Collabora):
27 * Tomeu Vizoso <tomeu.vizoso@collabora.com>
28 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
29 *
30 */
31
32 #include <xf86drm.h>
33 #include <fcntl.h>
34 #include "drm-uapi/drm_fourcc.h"
35
36 #include "state_tracker/winsys_handle.h"
37 #include "util/u_format.h"
38 #include "util/u_memory.h"
39 #include "util/u_surface.h"
40 #include "util/u_transfer.h"
41 #include "util/u_transfer_helper.h"
42
43 #include "pan_context.h"
44 #include "pan_screen.h"
45 #include "pan_resource.h"
46 #include "pan_util.h"
47 #include "pan_tiling.h"
48
49 static struct pipe_resource *
50 panfrost_resource_from_handle(struct pipe_screen *pscreen,
51 const struct pipe_resource *templat,
52 struct winsys_handle *whandle,
53 unsigned usage)
54 {
55 struct panfrost_screen *screen = pan_screen(pscreen);
56 struct panfrost_resource *rsc;
57 struct pipe_resource *prsc;
58
59 assert(whandle->type == WINSYS_HANDLE_TYPE_FD);
60
61 rsc = rzalloc(pscreen, struct panfrost_resource);
62 if (!rsc)
63 return NULL;
64
65 prsc = &rsc->base;
66
67 *prsc = *templat;
68
69 pipe_reference_init(&prsc->reference, 1);
70 prsc->screen = pscreen;
71
72 rsc->bo = panfrost_drm_import_bo(screen, whandle);
73 rsc->bo->slices[0].stride = whandle->stride;
74 rsc->bo->slices[0].initialized = true;
75
76 if (screen->ro) {
77 rsc->scanout =
78 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
79 /* failure is expected in some cases.. */
80 }
81
82 return prsc;
83 }
84
85 static boolean
86 panfrost_resource_get_handle(struct pipe_screen *pscreen,
87 struct pipe_context *ctx,
88 struct pipe_resource *pt,
89 struct winsys_handle *handle,
90 unsigned usage)
91 {
92 struct panfrost_screen *screen = pan_screen(pscreen);
93 struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
94 struct renderonly_scanout *scanout = rsrc->scanout;
95
96 handle->modifier = DRM_FORMAT_MOD_INVALID;
97
98 if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
99 return FALSE;
100 } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
101 if (renderonly_get_handle(scanout, handle))
102 return TRUE;
103
104 handle->handle = rsrc->bo->gem_handle;
105 handle->stride = rsrc->bo->slices[0].stride;
106 return TRUE;
107 } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
108 if (scanout) {
109 struct drm_prime_handle args = {
110 .handle = scanout->handle,
111 .flags = DRM_CLOEXEC,
112 };
113
114 int ret = drmIoctl(screen->ro->kms_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
115 if (ret == -1)
116 return FALSE;
117
118 handle->stride = scanout->stride;
119 handle->handle = args.fd;
120
121 return TRUE;
122 } else
123 return panfrost_drm_export_bo(screen, rsrc->bo->gem_handle,
124 rsrc->bo->slices[0].stride,
125 handle);
126 }
127
128 return FALSE;
129 }
130
131 static void
132 panfrost_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
133 {
134 //DBG("TODO %s\n", __func__);
135 }
136
137 static struct pipe_surface *
138 panfrost_create_surface(struct pipe_context *pipe,
139 struct pipe_resource *pt,
140 const struct pipe_surface *surf_tmpl)
141 {
142 struct pipe_surface *ps = NULL;
143
144 ps = rzalloc(pipe, struct pipe_surface);
145
146 if (ps) {
147 pipe_reference_init(&ps->reference, 1);
148 pipe_resource_reference(&ps->texture, pt);
149 ps->context = pipe;
150 ps->format = surf_tmpl->format;
151
152 if (pt->target != PIPE_BUFFER) {
153 assert(surf_tmpl->u.tex.level <= pt->last_level);
154 ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level);
155 ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level);
156 ps->u.tex.level = surf_tmpl->u.tex.level;
157 ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
158 ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
159 } else {
160 /* setting width as number of elements should get us correct renderbuffer width */
161 ps->width = surf_tmpl->u.buf.last_element - surf_tmpl->u.buf.first_element + 1;
162 ps->height = pt->height0;
163 ps->u.buf.first_element = surf_tmpl->u.buf.first_element;
164 ps->u.buf.last_element = surf_tmpl->u.buf.last_element;
165 assert(ps->u.buf.first_element <= ps->u.buf.last_element);
166 assert(ps->u.buf.last_element < ps->width);
167 }
168 }
169
170 return ps;
171 }
172
173 static void
174 panfrost_surface_destroy(struct pipe_context *pipe,
175 struct pipe_surface *surf)
176 {
177 assert(surf->texture);
178 pipe_resource_reference(&surf->texture, NULL);
179 ralloc_free(surf);
180 }
181
182 static void
183 panfrost_setup_slices(const struct pipe_resource *tmpl, struct panfrost_bo *bo)
184 {
185 unsigned width = tmpl->width0;
186 unsigned height = tmpl->height0;
187 unsigned depth = tmpl->depth0;
188 unsigned bytes_per_pixel = util_format_get_blocksize(tmpl->format);
189
190 assert(depth > 0);
191
192 /* Tiled operates blockwise; linear is packed. Also, anything
193 * we render to has to be tile-aligned. Maybe not strictly
194 * necessary, but we're not *that* pressed for memory and it
195 * makes code a lot simpler */
196
197 bool renderable = tmpl->bind &
198 (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL);
199 bool tiled = bo->layout == PAN_TILED;
200 bool should_align = renderable || tiled;
201
202 /* We don't know how to specify a 2D stride for 3D textures */
203
204 bool can_align_stride =
205 tmpl->target != PIPE_TEXTURE_3D;
206
207 should_align &= can_align_stride;
208
209 unsigned offset = 0;
210 unsigned size_2d = 0;
211
212 for (unsigned l = 0; l <= tmpl->last_level; ++l) {
213 struct panfrost_slice *slice = &bo->slices[l];
214
215 unsigned effective_width = width;
216 unsigned effective_height = height;
217 unsigned effective_depth = depth;
218
219 if (should_align) {
220 effective_width = ALIGN(effective_width, 16);
221 effective_height = ALIGN(effective_height, 16);
222
223 /* We don't need to align depth */
224 }
225
226 slice->offset = offset;
227
228 /* Compute the would-be stride */
229 unsigned stride = bytes_per_pixel * effective_width;
230
231 /* ..but cache-line align it for performance */
232 if (can_align_stride && bo->layout == PAN_LINEAR)
233 stride = ALIGN(stride, 64);
234
235 slice->stride = stride;
236
237 unsigned slice_one_size = slice->stride * effective_height;
238 unsigned slice_full_size = slice_one_size * effective_depth;
239
240 /* Report 2D size for 3D texturing */
241
242 if (l == 0)
243 size_2d = slice_one_size;
244
245 offset += slice_full_size;
246
247 width = u_minify(width, 1);
248 height = u_minify(height, 1);
249 depth = u_minify(depth, 1);
250 }
251
252 assert(tmpl->array_size);
253
254 if (tmpl->target != PIPE_TEXTURE_3D) {
255 /* Arrays and cubemaps have the entire miptree duplicated */
256
257 bo->cubemap_stride = ALIGN(offset, 64);
258 bo->size = ALIGN(bo->cubemap_stride * tmpl->array_size, 4096);
259 } else {
260 /* 3D strides across the 2D layers */
261 assert(tmpl->array_size == 1);
262
263 bo->cubemap_stride = size_2d;
264 bo->size = ALIGN(offset, 4096);
265 }
266 }
267
268 static struct panfrost_bo *
269 panfrost_create_bo(struct panfrost_screen *screen, const struct pipe_resource *template)
270 {
271 struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
272 pipe_reference_init(&bo->reference, 1);
273
274 /* Based on the usage, figure out what storing will be used. There are
275 * various tradeoffs:
276 *
277 * Linear: the basic format, bad for memory bandwidth, bad for cache
278 * use. Zero-copy, though. Renderable.
279 *
280 * Tiled: Not compressed, but cache-optimized. Expensive to write into
281 * (due to software tiling), but cheap to sample from. Ideal for most
282 * textures.
283 *
284 * AFBC: Compressed and renderable (so always desirable for non-scanout
285 * rendertargets). Cheap to sample from. The format is black box, so we
286 * can't read/write from software.
287 */
288
289 /* Tiling textures is almost always faster, unless we only use it once */
290
291 bool is_texture = (template->bind & PIPE_BIND_SAMPLER_VIEW);
292 bool is_2d = template->depth0 == 1 && template->array_size == 1;
293 bool is_streaming = (template->usage != PIPE_USAGE_STREAM);
294
295 bool should_tile = is_streaming && is_texture && is_2d;
296
297 /* Set the layout appropriately */
298 bo->layout = should_tile ? PAN_TILED : PAN_LINEAR;
299
300 panfrost_setup_slices(template, bo);
301
302 if (bo->layout == PAN_TILED || bo->layout == PAN_LINEAR) {
303 struct panfrost_memory mem;
304
305 panfrost_drm_allocate_slab(screen, &mem, bo->size / 4096, true, 0, 0, 0);
306
307 bo->cpu = mem.cpu;
308 bo->gpu = mem.gpu;
309 bo->gem_handle = mem.gem_handle;
310 }
311
312 return bo;
313 }
314
315 static struct pipe_resource *
316 panfrost_resource_create(struct pipe_screen *screen,
317 const struct pipe_resource *template)
318 {
319 struct panfrost_resource *so = rzalloc(screen, struct panfrost_resource);
320 struct panfrost_screen *pscreen = (struct panfrost_screen *) screen;
321
322 so->base = *template;
323 so->base.screen = screen;
324
325 pipe_reference_init(&so->base.reference, 1);
326
327 /* Make sure we're familiar */
328 switch (template->target) {
329 case PIPE_BUFFER:
330 case PIPE_TEXTURE_1D:
331 case PIPE_TEXTURE_2D:
332 case PIPE_TEXTURE_3D:
333 case PIPE_TEXTURE_CUBE:
334 case PIPE_TEXTURE_RECT:
335 case PIPE_TEXTURE_2D_ARRAY:
336 break;
337 default:
338 DBG("Unknown texture target %d\n", template->target);
339 assert(0);
340 }
341
342 util_range_init(&so->valid_buffer_range);
343
344 if (template->bind & PIPE_BIND_DISPLAY_TARGET ||
345 template->bind & PIPE_BIND_SCANOUT ||
346 template->bind & PIPE_BIND_SHARED) {
347 struct pipe_resource scanout_templat = *template;
348 struct renderonly_scanout *scanout;
349 struct winsys_handle handle;
350
351 scanout = renderonly_scanout_for_resource(&scanout_templat,
352 pscreen->ro, &handle);
353 if (!scanout)
354 return NULL;
355
356 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
357 /* TODO: handle modifiers? */
358 so = pan_resource(screen->resource_from_handle(screen, template,
359 &handle,
360 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
361 close(handle.handle);
362 if (!so)
363 return NULL;
364
365 so->scanout = scanout;
366 pscreen->display_target = so;
367 } else {
368 so->bo = panfrost_create_bo(pscreen, template);
369 }
370
371 return (struct pipe_resource *)so;
372 }
373
374 static void
375 panfrost_destroy_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
376 {
377 if ((bo->layout == PAN_LINEAR || bo->layout == PAN_TILED) &&
378 !bo->imported) {
379 struct panfrost_memory mem = {
380 .cpu = bo->cpu,
381 .gpu = bo->gpu,
382 .size = bo->size,
383 .gem_handle = bo->gem_handle,
384 };
385
386 panfrost_drm_free_slab(screen, &mem);
387 }
388
389 if (bo->layout == PAN_AFBC) {
390 /* TODO */
391 DBG("--leaking afbc (%d bytes)--\n", bo->afbc_metadata_size);
392 }
393
394 if (bo->has_checksum) {
395 struct panfrost_memory mem = {
396 .cpu = bo->checksum_slab.cpu,
397 .gpu = bo->checksum_slab.gpu,
398 .size = bo->checksum_slab.size,
399 .gem_handle = bo->checksum_slab.gem_handle,
400 };
401
402 panfrost_drm_free_slab(screen, &mem);
403 }
404
405 if (bo->imported) {
406 panfrost_drm_free_imported_bo(screen, bo);
407 }
408
409 ralloc_free(bo);
410 }
411
412 void
413 panfrost_bo_reference(struct panfrost_bo *bo)
414 {
415 pipe_reference(NULL, &bo->reference);
416 }
417
418 void
419 panfrost_bo_unreference(struct pipe_screen *screen, struct panfrost_bo *bo)
420 {
421 /* When the reference count goes to zero, we need to cleanup */
422
423 if (pipe_reference(&bo->reference, NULL)) {
424 panfrost_destroy_bo(pan_screen(screen), bo);
425 }
426 }
427
428 static void
429 panfrost_resource_destroy(struct pipe_screen *screen,
430 struct pipe_resource *pt)
431 {
432 struct panfrost_screen *pscreen = pan_screen(screen);
433 struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
434
435 if (rsrc->scanout)
436 renderonly_scanout_destroy(rsrc->scanout, pscreen->ro);
437
438 if (rsrc->bo)
439 panfrost_bo_unreference(screen, rsrc->bo);
440
441 util_range_destroy(&rsrc->valid_buffer_range);
442 ralloc_free(rsrc);
443 }
444
445 static void *
446 panfrost_transfer_map(struct pipe_context *pctx,
447 struct pipe_resource *resource,
448 unsigned level,
449 unsigned usage, /* a combination of PIPE_TRANSFER_x */
450 const struct pipe_box *box,
451 struct pipe_transfer **out_transfer)
452 {
453 int bytes_per_pixel = util_format_get_blocksize(resource->format);
454 struct panfrost_resource *rsrc = pan_resource(resource);
455 struct panfrost_bo *bo = rsrc->bo;
456
457 struct panfrost_gtransfer *transfer = rzalloc(pctx, struct panfrost_gtransfer);
458 transfer->base.level = level;
459 transfer->base.usage = usage;
460 transfer->base.box = *box;
461
462 pipe_resource_reference(&transfer->base.resource, resource);
463
464 *out_transfer = &transfer->base;
465
466 /* Check if we're bound for rendering and this is a read pixels. If so,
467 * we need to flush */
468
469 struct panfrost_context *ctx = pan_context(pctx);
470 struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
471
472 bool is_bound = false;
473
474 for (unsigned c = 0; c < fb->nr_cbufs; ++c) {
475 is_bound |= fb->cbufs[c]->texture == resource;
476 }
477
478 if (is_bound && (usage & PIPE_TRANSFER_READ)) {
479 assert(level == 0);
480 panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
481 }
482
483 /* TODO: Respect usage flags */
484
485 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
486 /* TODO: reallocate */
487 //printf("debug: Missed reallocate\n");
488 } else if ((usage & PIPE_TRANSFER_WRITE)
489 && resource->target == PIPE_BUFFER
490 && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
491 /* No flush for writes to uninitialized */
492 } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
493 if (usage & PIPE_TRANSFER_WRITE) {
494 /* STUB: flush reading */
495 //printf("debug: missed reading flush %d\n", resource->target);
496 } else if (usage & PIPE_TRANSFER_READ) {
497 /* STUB: flush writing */
498 //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
499 } else {
500 /* Why are you even mapping?! */
501 }
502 }
503
504 if (bo->layout != PAN_LINEAR) {
505 /* Non-linear resources need to be indirectly mapped */
506
507 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
508 return NULL;
509
510 transfer->base.stride = box->width * bytes_per_pixel;
511 transfer->base.layer_stride = transfer->base.stride * box->height;
512 transfer->map = rzalloc_size(transfer, transfer->base.layer_stride * box->depth);
513 assert(box->depth == 1);
514
515 if ((usage & PIPE_TRANSFER_READ) && bo->slices[level].initialized) {
516 if (bo->layout == PAN_AFBC) {
517 DBG("Unimplemented: reads from AFBC");
518 } else if (bo->layout == PAN_TILED) {
519 panfrost_load_tiled_image(
520 transfer->map,
521 bo->cpu + bo->slices[level].offset,
522 box,
523 transfer->base.stride,
524 bo->slices[level].stride,
525 util_format_get_blocksize(resource->format));
526 }
527 }
528
529 return transfer->map;
530 } else {
531 transfer->base.stride = bo->slices[level].stride;
532 transfer->base.layer_stride = bo->cubemap_stride;
533
534 /* By mapping direct-write, we're implicitly already
535 * initialized (maybe), so be conservative */
536
537 if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
538 bo->slices[level].initialized = true;
539
540 return bo->cpu
541 + bo->slices[level].offset
542 + transfer->base.box.z * bo->cubemap_stride
543 + transfer->base.box.y * bo->slices[level].stride
544 + transfer->base.box.x * bytes_per_pixel;
545 }
546 }
547
548 static void
549 panfrost_transfer_unmap(struct pipe_context *pctx,
550 struct pipe_transfer *transfer)
551 {
552 /* Gallium expects writeback here, so we tile */
553
554 struct panfrost_gtransfer *trans = pan_transfer(transfer);
555 struct panfrost_resource *prsrc = (struct panfrost_resource *) transfer->resource;
556
557 if (trans->map) {
558 struct panfrost_bo *bo = prsrc->bo;
559
560 if (transfer->usage & PIPE_TRANSFER_WRITE) {
561 unsigned level = transfer->level;
562 bo->slices[level].initialized = true;
563
564 if (bo->layout == PAN_AFBC) {
565 DBG("Unimplemented: writes to AFBC\n");
566 } else if (bo->layout == PAN_TILED) {
567 assert(transfer->box.depth == 1);
568
569 panfrost_store_tiled_image(
570 bo->cpu + bo->slices[level].offset,
571 trans->map,
572 &transfer->box,
573 bo->slices[level].stride,
574 transfer->stride,
575 util_format_get_blocksize(prsrc->base.format));
576 }
577 }
578 }
579
580
581 util_range_add(&prsrc->valid_buffer_range,
582 transfer->box.x,
583 transfer->box.x + transfer->box.width);
584
585 /* Derefence the resource */
586 pipe_resource_reference(&transfer->resource, NULL);
587
588 /* Transfer itself is RALLOCed at the moment */
589 ralloc_free(transfer);
590 }
591
592 static void
593 panfrost_transfer_flush_region(struct pipe_context *pctx,
594 struct pipe_transfer *transfer,
595 const struct pipe_box *box)
596 {
597 struct panfrost_resource *rsc = pan_resource(transfer->resource);
598
599 if (transfer->resource->target == PIPE_BUFFER) {
600 util_range_add(&rsc->valid_buffer_range,
601 transfer->box.x + box->x,
602 transfer->box.x + box->x + box->width);
603 }
604 }
605
606 static struct pb_slab *
607 panfrost_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_index)
608 {
609 struct panfrost_screen *screen = (struct panfrost_screen *) priv;
610 struct panfrost_memory *mem = rzalloc(screen, struct panfrost_memory);
611
612 size_t slab_size = (1 << (MAX_SLAB_ENTRY_SIZE + 1));
613
614 mem->slab.num_entries = slab_size / entry_size;
615 mem->slab.num_free = mem->slab.num_entries;
616
617 LIST_INITHEAD(&mem->slab.free);
618 for (unsigned i = 0; i < mem->slab.num_entries; ++i) {
619 /* Create a slab entry */
620 struct panfrost_memory_entry *entry = rzalloc(mem, struct panfrost_memory_entry);
621 entry->offset = entry_size * i;
622
623 entry->base.slab = &mem->slab;
624 entry->base.group_index = group_index;
625
626 LIST_ADDTAIL(&entry->base.head, &mem->slab.free);
627 }
628
629 /* Actually allocate the memory from kernel-space. Mapped, same_va, no
630 * special flags */
631
632 panfrost_drm_allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
633
634 return &mem->slab;
635 }
636
637 static bool
638 panfrost_slab_can_reclaim(void *priv, struct pb_slab_entry *entry)
639 {
640 struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
641 return p_entry->freed;
642 }
643
644 static void
645 panfrost_slab_free(void *priv, struct pb_slab *slab)
646 {
647 struct panfrost_memory *mem = (struct panfrost_memory *) slab;
648 struct panfrost_screen *screen = (struct panfrost_screen *) priv;
649
650 panfrost_drm_free_slab(screen, mem);
651 ralloc_free(mem);
652 }
653
654 static void
655 panfrost_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
656 {
657 //DBG("TODO %s\n", __func__);
658 }
659
660 static enum pipe_format
661 panfrost_resource_get_internal_format(struct pipe_resource *prsrc)
662 {
663 return prsrc->format;
664 }
665
666 static void
667 panfrost_resource_set_stencil(struct pipe_resource *prsrc,
668 struct pipe_resource *stencil)
669 {
670 pan_resource(prsrc)->separate_stencil = pan_resource(stencil);
671 }
672
673 static struct pipe_resource *
674 panfrost_resource_get_stencil(struct pipe_resource *prsrc)
675 {
676 return &pan_resource(prsrc)->separate_stencil->base;
677 }
678
679 static const struct u_transfer_vtbl transfer_vtbl = {
680 .resource_create = panfrost_resource_create,
681 .resource_destroy = panfrost_resource_destroy,
682 .transfer_map = panfrost_transfer_map,
683 .transfer_unmap = panfrost_transfer_unmap,
684 .transfer_flush_region = panfrost_transfer_flush_region,
685 .get_internal_format = panfrost_resource_get_internal_format,
686 .set_stencil = panfrost_resource_set_stencil,
687 .get_stencil = panfrost_resource_get_stencil,
688 };
689
690 void
691 panfrost_resource_screen_init(struct panfrost_screen *pscreen)
692 {
693 //pscreen->base.resource_create_with_modifiers =
694 // panfrost_resource_create_with_modifiers;
695 pscreen->base.resource_create = u_transfer_helper_resource_create;
696 pscreen->base.resource_destroy = u_transfer_helper_resource_destroy;
697 pscreen->base.resource_from_handle = panfrost_resource_from_handle;
698 pscreen->base.resource_get_handle = panfrost_resource_get_handle;
699 pscreen->base.transfer_helper = u_transfer_helper_create(&transfer_vtbl,
700 true, false,
701 true, true);
702
703 pb_slabs_init(&pscreen->slabs,
704 MIN_SLAB_ENTRY_SIZE,
705 MAX_SLAB_ENTRY_SIZE,
706
707 3, /* Number of heaps */
708
709 pscreen,
710
711 panfrost_slab_can_reclaim,
712 panfrost_slab_alloc,
713 panfrost_slab_free);
714 }
715
716 void
717 panfrost_resource_screen_deinit(struct panfrost_screen *pscreen)
718 {
719 pb_slabs_deinit(&pscreen->slabs);
720 }
721
722 void
723 panfrost_resource_context_init(struct pipe_context *pctx)
724 {
725 pctx->transfer_map = u_transfer_helper_transfer_map;
726 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
727 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
728 pctx->buffer_subdata = u_default_buffer_subdata;
729 pctx->create_surface = panfrost_create_surface;
730 pctx->surface_destroy = panfrost_surface_destroy;
731 pctx->resource_copy_region = util_resource_copy_region;
732 pctx->blit = panfrost_blit;
733 pctx->flush_resource = panfrost_flush_resource;
734 pctx->invalidate_resource = panfrost_invalidate_resource;
735 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
736 pctx->buffer_subdata = u_default_buffer_subdata;
737 pctx->texture_subdata = u_default_texture_subdata;
738 }