panfrost: Cleanup needless if in create_bo
[mesa.git] / src / gallium / drivers / panfrost / pan_resource.c
1 /**************************************************************************
2 *
3 * Copyright 2008 VMware, Inc.
4 * Copyright 2014 Broadcom
5 * Copyright 2018 Alyssa Rosenzweig
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <xf86drm.h>
31 #include <fcntl.h>
32 #include "drm-uapi/drm_fourcc.h"
33
34 #include "state_tracker/winsys_handle.h"
35 #include "util/u_format.h"
36 #include "util/u_memory.h"
37 #include "util/u_surface.h"
38 #include "util/u_transfer.h"
39 #include "util/u_transfer_helper.h"
40
41 #include "pan_context.h"
42 #include "pan_screen.h"
43 #include "pan_resource.h"
44 #include "pan_swizzle.h"
45 #include "pan_util.h"
46
47 static struct pipe_resource *
48 panfrost_resource_from_handle(struct pipe_screen *pscreen,
49 const struct pipe_resource *templat,
50 struct winsys_handle *whandle,
51 unsigned usage)
52 {
53 struct panfrost_screen *screen = pan_screen(pscreen);
54 struct panfrost_resource *rsc;
55 struct pipe_resource *prsc;
56
57 assert(whandle->type == WINSYS_HANDLE_TYPE_FD);
58
59 rsc = CALLOC_STRUCT(panfrost_resource);
60 if (!rsc)
61 return NULL;
62
63 prsc = &rsc->base;
64
65 *prsc = *templat;
66
67 pipe_reference_init(&prsc->reference, 1);
68 prsc->screen = pscreen;
69
70 rsc->bo = screen->driver->import_bo(screen, whandle);
71
72 if (screen->ro) {
73 rsc->scanout =
74 renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
75 /* failure is expected in some cases.. */
76 }
77
78 return prsc;
79 }
80
81 static boolean
82 panfrost_resource_get_handle(struct pipe_screen *pscreen,
83 struct pipe_context *ctx,
84 struct pipe_resource *pt,
85 struct winsys_handle *handle,
86 unsigned usage)
87 {
88 struct panfrost_screen *screen = pan_screen(pscreen);
89 struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
90 struct renderonly_scanout *scanout = rsrc->scanout;
91 int bytes_per_pixel = util_format_get_blocksize(rsrc->base.format);
92 int stride = bytes_per_pixel * rsrc->base.width0; /* TODO: Alignment? */
93
94 handle->stride = stride;
95 handle->modifier = DRM_FORMAT_MOD_INVALID;
96
97 if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
98 return FALSE;
99 } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
100 if (renderonly_get_handle(scanout, handle))
101 return TRUE;
102
103 handle->handle = rsrc->bo->gem_handle;
104 return TRUE;
105 } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
106 if (scanout) {
107 struct drm_prime_handle args = {
108 .handle = scanout->handle,
109 .flags = DRM_CLOEXEC,
110 };
111
112 int ret = drmIoctl(screen->ro->kms_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
113 if (ret == -1)
114 return FALSE;
115
116 handle->handle = args.fd;
117
118 return TRUE;
119 } else
120 return screen->driver->export_bo(screen, rsrc->bo->gem_handle, handle);
121 }
122
123 return FALSE;
124 }
125
126 static void
127 panfrost_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
128 {
129 //DBG("TODO %s\n", __func__);
130 }
131
132 static void
133 panfrost_blit(struct pipe_context *pipe,
134 const struct pipe_blit_info *info)
135 {
136 /* STUB */
137 DBG("Skipping blit XXX\n");
138 return;
139 }
140
141 static struct pipe_surface *
142 panfrost_create_surface(struct pipe_context *pipe,
143 struct pipe_resource *pt,
144 const struct pipe_surface *surf_tmpl)
145 {
146 struct pipe_surface *ps = NULL;
147
148 ps = CALLOC_STRUCT(pipe_surface);
149
150 if (ps) {
151 pipe_reference_init(&ps->reference, 1);
152 pipe_resource_reference(&ps->texture, pt);
153 ps->context = pipe;
154 ps->format = surf_tmpl->format;
155
156 if (pt->target != PIPE_BUFFER) {
157 assert(surf_tmpl->u.tex.level <= pt->last_level);
158 ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level);
159 ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level);
160 ps->u.tex.level = surf_tmpl->u.tex.level;
161 ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
162 ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
163 } else {
164 /* setting width as number of elements should get us correct renderbuffer width */
165 ps->width = surf_tmpl->u.buf.last_element - surf_tmpl->u.buf.first_element + 1;
166 ps->height = pt->height0;
167 ps->u.buf.first_element = surf_tmpl->u.buf.first_element;
168 ps->u.buf.last_element = surf_tmpl->u.buf.last_element;
169 assert(ps->u.buf.first_element <= ps->u.buf.last_element);
170 assert(ps->u.buf.last_element < ps->width);
171 }
172 }
173
174 return ps;
175 }
176
177 static void
178 panfrost_surface_destroy(struct pipe_context *pipe,
179 struct pipe_surface *surf)
180 {
181 assert(surf->texture);
182 pipe_resource_reference(&surf->texture, NULL);
183 free(surf);
184 }
185
186 static struct panfrost_bo *
187 panfrost_create_bo(struct panfrost_screen *screen, const struct pipe_resource *template)
188 {
189 struct panfrost_bo *bo = CALLOC_STRUCT(panfrost_bo);
190
191 /* Calculate the size of the bo */
192
193 int bytes_per_pixel = util_format_get_blocksize(template->format);
194 int stride = bytes_per_pixel * template->width0; /* TODO: Alignment? */
195 size_t sz = stride;
196
197 if (template->height0) sz *= template->height0;
198 if (template->depth0) sz *= template->depth0;
199
200 /* Tiling textures is almost always faster, unless we only use it once */
201 bo->tiled = (template->usage != PIPE_USAGE_STREAM) && (template->bind & PIPE_BIND_SAMPLER_VIEW);
202
203 if (bo->tiled) {
204 /* For tiled, we don't map directly, so just malloc any old buffer */
205
206 for (int l = 0; l < (template->last_level + 1); ++l) {
207 bo->cpu[l] = malloc(sz);
208 sz >>= 2;
209 }
210 } else {
211 /* But for linear, we can! */
212
213 struct pb_slab_entry *entry = pb_slab_alloc(&screen->slabs, sz, HEAP_TEXTURE);
214 struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
215 struct panfrost_memory *backing = (struct panfrost_memory *) entry->slab;
216 bo->entry[0] = p_entry;
217 bo->cpu[0] = backing->cpu + p_entry->offset;
218 bo->gpu[0] = backing->gpu + p_entry->offset;
219
220 /* TODO: Mipmap */
221 }
222
223 return bo;
224 }
225
226 static struct pipe_resource *
227 panfrost_resource_create(struct pipe_screen *screen,
228 const struct pipe_resource *template)
229 {
230 struct panfrost_resource *so = CALLOC_STRUCT(panfrost_resource);
231 struct panfrost_screen *pscreen = (struct panfrost_screen *) screen;
232
233 so->base = *template;
234 so->base.screen = screen;
235
236 pipe_reference_init(&so->base.reference, 1);
237
238 /* Make sure we're familiar */
239 switch (template->target) {
240 case PIPE_BUFFER:
241 case PIPE_TEXTURE_1D:
242 case PIPE_TEXTURE_2D:
243 case PIPE_TEXTURE_3D:
244 case PIPE_TEXTURE_RECT:
245 break;
246 default:
247 DBG("Unknown texture target %d\n", template->target);
248 assert(0);
249 }
250
251 if (template->bind & PIPE_BIND_DISPLAY_TARGET ||
252 template->bind & PIPE_BIND_SCANOUT ||
253 template->bind & PIPE_BIND_SHARED) {
254 struct pipe_resource scanout_templat = *template;
255 struct renderonly_scanout *scanout;
256 struct winsys_handle handle;
257
258 /* TODO: align width0 and height0? */
259
260 scanout = renderonly_scanout_for_resource(&scanout_templat,
261 pscreen->ro, &handle);
262 if (!scanout)
263 return NULL;
264
265 assert(handle.type == WINSYS_HANDLE_TYPE_FD);
266 /* TODO: handle modifiers? */
267 so = pan_resource(screen->resource_from_handle(screen, template,
268 &handle,
269 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
270 close(handle.handle);
271 if (!so)
272 return NULL;
273
274 so->scanout = scanout;
275 pscreen->display_target = so;
276 } else {
277 so->bo = panfrost_create_bo(pscreen, template);
278 }
279
280 return (struct pipe_resource *)so;
281 }
282
283 static void
284 panfrost_destroy_bo(struct panfrost_screen *screen, struct panfrost_bo *pbo)
285 {
286 struct panfrost_bo *bo = (struct panfrost_bo *)pbo;
287
288 for (int l = 0; l < MAX_MIP_LEVELS; ++l) {
289 if (bo->entry[l] != NULL) {
290 /* Most allocations have an entry to free */
291 bo->entry[l]->freed = true;
292 pb_slab_free(&screen->slabs, &bo->entry[l]->base);
293 }
294 }
295
296 if (bo->tiled) {
297 /* Tiled has a malloc'd CPU, so just plain ol' free needed */
298
299 for (int l = 0; l < MAX_MIP_LEVELS; ++l) {
300 free(bo->cpu[l]);
301 }
302 }
303
304 if (bo->has_afbc) {
305 /* TODO */
306 DBG("--leaking afbc (%d bytes)--\n", bo->afbc_metadata_size);
307 }
308
309 if (bo->has_checksum) {
310 /* TODO */
311 DBG("--leaking checksum (%zd bytes)--\n", bo->checksum_slab.size);
312 }
313
314 if (bo->imported) {
315 screen->driver->free_imported_bo(screen, bo);
316 }
317 }
318
319 static void
320 panfrost_resource_destroy(struct pipe_screen *screen,
321 struct pipe_resource *pt)
322 {
323 struct panfrost_screen *pscreen = pan_screen(screen);
324 struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
325
326 if (rsrc->scanout)
327 renderonly_scanout_destroy(rsrc->scanout, pscreen->ro);
328
329 if (rsrc->bo)
330 panfrost_destroy_bo(pscreen, rsrc->bo);
331
332 FREE(rsrc);
333 }
334
335 static uint8_t *
336 panfrost_map_bo(struct panfrost_context *ctx, struct pipe_transfer *transfer)
337 {
338 struct panfrost_bo *bo = (struct panfrost_bo *)pan_resource(transfer->resource)->bo;
339
340 /* If non-zero level, it's a mipmapped resource and needs to be treated as such */
341 bo->is_mipmap |= transfer->level;
342
343 if (transfer->usage & PIPE_TRANSFER_MAP_DIRECTLY && bo->tiled) {
344 /* We cannot directly map tiled textures */
345 return NULL;
346 }
347
348 if (transfer->resource->bind & PIPE_BIND_DEPTH_STENCIL) {
349 /* Mipmapped readpixels?! */
350 assert(transfer->level == 0);
351
352 /* Set the CPU mapping to that of the depth/stencil buffer in memory, untiled */
353 bo->cpu[transfer->level] = ctx->depth_stencil_buffer.cpu;
354 }
355
356 return bo->cpu[transfer->level];
357 }
358
359 static void *
360 panfrost_transfer_map(struct pipe_context *pctx,
361 struct pipe_resource *resource,
362 unsigned level,
363 unsigned usage, /* a combination of PIPE_TRANSFER_x */
364 const struct pipe_box *box,
365 struct pipe_transfer **out_transfer)
366 {
367 struct panfrost_context *ctx = pan_context(pctx);
368 int bytes_per_pixel = util_format_get_blocksize(resource->format);
369 int stride = bytes_per_pixel * resource->width0; /* TODO: Alignment? */
370 uint8_t *cpu;
371
372 struct pipe_transfer *transfer = CALLOC_STRUCT(pipe_transfer);
373 transfer->level = level;
374 transfer->usage = usage;
375 transfer->box = *box;
376 transfer->stride = stride;
377 assert(!transfer->box.z);
378
379 pipe_resource_reference(&transfer->resource, resource);
380
381 *out_transfer = transfer;
382
383 if (resource->bind & PIPE_BIND_DISPLAY_TARGET ||
384 resource->bind & PIPE_BIND_SCANOUT ||
385 resource->bind & PIPE_BIND_SHARED) {
386 /* Mipmapped readpixels?! */
387 assert(level == 0);
388
389 /* Force a flush -- kill the pipeline */
390 panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
391 }
392
393 cpu = panfrost_map_bo(ctx, transfer);
394 if (cpu == NULL)
395 return NULL;
396
397 return cpu + transfer->box.x * bytes_per_pixel + transfer->box.y * stride;
398 }
399
400 static void
401 panfrost_tile_texture(struct panfrost_screen *screen, struct panfrost_resource *rsrc, int level)
402 {
403 struct panfrost_bo *bo = (struct panfrost_bo *)rsrc->bo;
404 int bytes_per_pixel = util_format_get_blocksize(rsrc->base.format);
405 int stride = bytes_per_pixel * rsrc->base.width0; /* TODO: Alignment? */
406
407 int width = rsrc->base.width0 >> level;
408 int height = rsrc->base.height0 >> level;
409
410 /* Estimate swizzled bitmap size. Slight overestimates are fine.
411 * Underestimates will result in memory corruption or worse. */
412
413 int swizzled_sz = panfrost_swizzled_size(width, height, bytes_per_pixel);
414
415 /* Save the entry. But if there was already an entry here (from a
416 * previous upload of the resource), free that one so we don't leak */
417
418 if (bo->entry[level] != NULL) {
419 bo->entry[level]->freed = true;
420 pb_slab_free(&screen->slabs, &bo->entry[level]->base);
421 }
422
423 /* Allocate the transfer given that known size but do not copy */
424 struct pb_slab_entry *entry = pb_slab_alloc(&screen->slabs, swizzled_sz, HEAP_TEXTURE);
425 struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
426 struct panfrost_memory *backing = (struct panfrost_memory *) entry->slab;
427 uint8_t *swizzled = backing->cpu + p_entry->offset;
428
429 bo->entry[level] = p_entry;
430 bo->gpu[level] = backing->gpu + p_entry->offset;
431
432 /* Run actual texture swizzle, writing directly to the mapped
433 * GPU chunk we allocated */
434
435 panfrost_texture_swizzle(width, height, bytes_per_pixel, stride, bo->cpu[level], swizzled);
436 }
437
438 static void
439 panfrost_unmap_bo(struct panfrost_context *ctx,
440 struct pipe_transfer *transfer)
441 {
442 struct panfrost_bo *bo = (struct panfrost_bo *)pan_resource(transfer->resource)->bo;
443
444 if (transfer->usage & PIPE_TRANSFER_WRITE) {
445 if (transfer->resource->target == PIPE_TEXTURE_2D) {
446 struct panfrost_resource *prsrc = (struct panfrost_resource *) transfer->resource;
447
448 /* Gallium thinks writeback happens here; instead, this is our cue to tile */
449 if (bo->has_afbc) {
450 DBG("Warning: writes to afbc surface can't possibly work out well for you...\n");
451 } else if (bo->tiled) {
452 struct pipe_context *gallium = (struct pipe_context *) ctx;
453 struct panfrost_screen *screen = pan_screen(gallium->screen);
454 panfrost_tile_texture(screen, prsrc, transfer->level);
455 }
456 }
457 }
458 }
459
460 static void
461 panfrost_transfer_unmap(struct pipe_context *pctx,
462 struct pipe_transfer *transfer)
463 {
464 struct panfrost_context *ctx = pan_context(pctx);
465
466 panfrost_unmap_bo(ctx, transfer);
467
468 /* Derefence the resource */
469 pipe_resource_reference(&transfer->resource, NULL);
470
471 /* Transfer itself is CALLOCed at the moment */
472 free(transfer);
473 }
474
475 static struct pb_slab *
476 panfrost_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_index)
477 {
478 struct panfrost_screen *screen = (struct panfrost_screen *) priv;
479 struct panfrost_memory *mem = CALLOC_STRUCT(panfrost_memory);
480
481 size_t slab_size = (1 << (MAX_SLAB_ENTRY_SIZE + 1));
482
483 mem->slab.num_entries = slab_size / entry_size;
484 mem->slab.num_free = mem->slab.num_entries;
485
486 LIST_INITHEAD(&mem->slab.free);
487 for (unsigned i = 0; i < mem->slab.num_entries; ++i) {
488 /* Create a slab entry */
489 struct panfrost_memory_entry *entry = CALLOC_STRUCT(panfrost_memory_entry);
490 entry->offset = entry_size * i;
491
492 entry->base.slab = &mem->slab;
493 entry->base.group_index = group_index;
494
495 LIST_ADDTAIL(&entry->base.head, &mem->slab.free);
496 }
497
498 /* Actually allocate the memory from kernel-space. Mapped, same_va, no
499 * special flags */
500
501 screen->driver->allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
502
503 return &mem->slab;
504 }
505
506 static bool
507 panfrost_slab_can_reclaim(void *priv, struct pb_slab_entry *entry)
508 {
509 struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
510 return p_entry->freed;
511 }
512
513 static void
514 panfrost_slab_free(void *priv, struct pb_slab *slab)
515 {
516 struct panfrost_memory *mem = (struct panfrost_memory *) slab;
517 struct panfrost_screen *screen = (struct panfrost_screen *) priv;
518
519 screen->driver->free_slab(screen, mem);
520 }
521
522 static void
523 panfrost_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
524 {
525 //DBG("TODO %s\n", __func__);
526 }
527
528 static enum pipe_format
529 panfrost_resource_get_internal_format(struct pipe_resource *prsrc)
530 {
531 return prsrc->format;
532 }
533
534 static void
535 panfrost_resource_set_stencil(struct pipe_resource *prsrc,
536 struct pipe_resource *stencil)
537 {
538 pan_resource(prsrc)->separate_stencil = pan_resource(stencil);
539 }
540
541 static struct pipe_resource *
542 panfrost_resource_get_stencil(struct pipe_resource *prsrc)
543 {
544 return &pan_resource(prsrc)->separate_stencil->base;
545 }
546
547 static const struct u_transfer_vtbl transfer_vtbl = {
548 .resource_create = panfrost_resource_create,
549 .resource_destroy = panfrost_resource_destroy,
550 .transfer_map = panfrost_transfer_map,
551 .transfer_unmap = panfrost_transfer_unmap,
552 .transfer_flush_region = u_default_transfer_flush_region,
553 .get_internal_format = panfrost_resource_get_internal_format,
554 .set_stencil = panfrost_resource_set_stencil,
555 .get_stencil = panfrost_resource_get_stencil,
556 };
557
558 void
559 panfrost_resource_screen_init(struct panfrost_screen *pscreen)
560 {
561 //pscreen->base.resource_create_with_modifiers =
562 // panfrost_resource_create_with_modifiers;
563 pscreen->base.resource_create = u_transfer_helper_resource_create;
564 pscreen->base.resource_destroy = u_transfer_helper_resource_destroy;
565 pscreen->base.resource_from_handle = panfrost_resource_from_handle;
566 pscreen->base.resource_get_handle = panfrost_resource_get_handle;
567 pscreen->base.transfer_helper = u_transfer_helper_create(&transfer_vtbl,
568 true, false,
569 true, true);
570
571 pb_slabs_init(&pscreen->slabs,
572 MIN_SLAB_ENTRY_SIZE,
573 MAX_SLAB_ENTRY_SIZE,
574
575 3, /* Number of heaps */
576
577 pscreen,
578
579 panfrost_slab_can_reclaim,
580 panfrost_slab_alloc,
581 panfrost_slab_free);
582 }
583
584 void
585 panfrost_resource_context_init(struct pipe_context *pctx)
586 {
587 pctx->transfer_map = u_transfer_helper_transfer_map;
588 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
589 pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
590 pctx->buffer_subdata = u_default_buffer_subdata;
591 pctx->create_surface = panfrost_create_surface;
592 pctx->surface_destroy = panfrost_surface_destroy;
593 pctx->resource_copy_region = util_resource_copy_region;
594 pctx->blit = panfrost_blit;
595 //pctx->generate_mipmap = panfrost_generate_mipmap;
596 pctx->flush_resource = panfrost_flush_resource;
597 pctx->invalidate_resource = panfrost_invalidate_resource;
598 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
599 pctx->buffer_subdata = u_default_buffer_subdata;
600 pctx->texture_subdata = u_default_texture_subdata;
601 }