'pan_bo.c',
'pan_blit.c',
'pan_job.c',
- 'pan_drm.c',
'pan_allocate.c',
'pan_assemble.c',
'pan_format.c',
TRANSIENT_SLAB_SIZE : ALIGN_POT(sz, 4096);
/* We can't reuse the current BO, but we can create a new one. */
- bo = panfrost_drm_create_bo(screen, bo_sz, 0);
+ bo = panfrost_bo_create(screen, bo_sz, 0);
panfrost_batch_add_bo(batch, bo);
/* Creating a BO adds a reference, and then the job adds a
* I bet someone just thought that would be a cute pun. At least,
* that's how I'd do it. */
- state->bo = panfrost_drm_create_bo(screen, size, PAN_ALLOCATE_EXECUTE);
+ state->bo = panfrost_bo_create(screen, size, PAN_ALLOCATE_EXECUTE);
memcpy(state->bo->cpu, dst, size);
meta->shader = state->bo->gpu | program.first_tag;
final.shader.first_tag = shader->first_tag;
/* Upload the shader */
- final.shader.bo = panfrost_drm_create_bo(screen, shader->size, PAN_ALLOCATE_EXECUTE);
+ final.shader.bo = panfrost_bo_create(screen, shader->size, PAN_ALLOCATE_EXECUTE);
memcpy(final.shader.bo->cpu, shader->buffer, shader->size);
/* Pass BO ownership to job */
* Authors (Collabora):
* Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
*/
+#include <stdio.h>
+#include <fcntl.h>
#include <xf86drm.h>
#include <pthread.h>
#include "drm-uapi/panfrost_drm.h"
#include "pan_screen.h"
+#include "pan_util.h"
+#include "pandecode/decode.h"
+
+#include "os/os_mman.h"
+
+#include "util/u_inlines.h"
#include "util/u_math.h"
/* This file implements a userspace BO cache. Allocating and freeing
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MADVISE, &madv);
if (!ret && !madv.retained) {
- panfrost_drm_release_bo(screen, entry, false);
+ panfrost_bo_release(screen, entry, false);
continue;
}
/* Let's go! */
list_for_each_entry_safe(struct panfrost_bo, entry, bucket, link) {
list_del(&entry->link);
- panfrost_drm_release_bo(screen, entry, false);
+ panfrost_bo_release(screen, entry, false);
}
}
pthread_mutex_unlock(&screen->bo_cache_lock);
}
+void
+panfrost_bo_mmap(struct panfrost_screen *screen, struct panfrost_bo *bo)
+{
+ struct drm_panfrost_mmap_bo mmap_bo = { .handle = bo->gem_handle };
+ int ret;
+
+ if (bo->cpu)
+ return;
+
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
+ if (ret) {
+ fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
+ assert(0);
+ }
+
+ bo->cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ screen->fd, mmap_bo.offset);
+ if (bo->cpu == MAP_FAILED) {
+ fprintf(stderr, "mmap failed: %p %m\n", bo->cpu);
+ assert(0);
+ }
+
+ /* Record the mmap if we're tracing */
+ if (pan_debug & PAN_DBG_TRACE)
+ pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
+}
+
+static void
+panfrost_bo_munmap(struct panfrost_screen *screen, struct panfrost_bo *bo)
+{
+ if (!bo->cpu)
+ return;
+
+ if (os_munmap((void *) (uintptr_t)bo->cpu, bo->size)) {
+ perror("munmap");
+ abort();
+ }
+
+ bo->cpu = NULL;
+}
+
+struct panfrost_bo *
+panfrost_bo_create(struct panfrost_screen *screen, size_t size,
+ uint32_t flags)
+{
+ struct panfrost_bo *bo;
+
+ /* Kernel will fail (confusingly) with EPERM otherwise */
+ assert(size > 0);
+
+ /* To maximize BO cache usage, don't allocate tiny BOs */
+ size = MAX2(size, 4096);
+
+ /* GROWABLE BOs cannot be mmapped */
+ if (flags & PAN_ALLOCATE_GROWABLE)
+ assert(flags & PAN_ALLOCATE_INVISIBLE);
+
+ unsigned translated_flags = 0;
+
+ if (screen->kernel_version->version_major > 1 ||
+ screen->kernel_version->version_minor >= 1) {
+ if (flags & PAN_ALLOCATE_GROWABLE)
+ translated_flags |= PANFROST_BO_HEAP;
+ if (!(flags & PAN_ALLOCATE_EXECUTE))
+ translated_flags |= PANFROST_BO_NOEXEC;
+ }
+
+ struct drm_panfrost_create_bo create_bo = {
+ .size = size,
+ .flags = translated_flags,
+ };
+
+ /* Before creating a BO, we first want to check the cache */
+
+ bo = panfrost_bo_cache_fetch(screen, size, flags);
+
+ if (bo == NULL) {
+ /* Otherwise, the cache misses and we need to allocate a BO fresh from
+ * the kernel */
+
+ int ret;
+
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_CREATE_BO, &create_bo);
+ if (ret) {
+ fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
+ assert(0);
+ }
+
+ /* We have a BO allocated from the kernel; fill in the userspace
+ * version */
+
+ bo = rzalloc(screen, struct panfrost_bo);
+ bo->size = create_bo.size;
+ bo->gpu = create_bo.offset;
+ bo->gem_handle = create_bo.handle;
+ bo->flags = flags;
+ }
+
+ /* Only mmap now if we know we need to. For CPU-invisible buffers, we
+ * never map since we don't care about their contents; they're purely
+ * for GPU-internal use. But we do trace them anyway. */
+
+ if (!(flags & (PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_DELAY_MMAP)))
+ panfrost_bo_mmap(screen, bo);
+ else if (flags & PAN_ALLOCATE_INVISIBLE) {
+ if (pan_debug & PAN_DBG_TRACE)
+ pandecode_inject_mmap(bo->gpu, NULL, bo->size, NULL);
+ }
+
+ pipe_reference_init(&bo->reference, 1);
+ return bo;
+}
+
+void
+panfrost_bo_release(struct panfrost_screen *screen, struct panfrost_bo *bo,
+ bool cacheable)
+{
+ if (!bo)
+ return;
+
+ struct drm_gem_close gem_close = { .handle = bo->gem_handle };
+ int ret;
+
+ /* Rather than freeing the BO now, we'll cache the BO for later
+ * allocations if we're allowed to */
+
+ panfrost_bo_munmap(screen, bo);
+
+ if (cacheable) {
+ bool cached = panfrost_bo_cache_put(screen, bo);
+
+ if (cached)
+ return;
+ }
+
+ /* Otherwise, if the BO wasn't cached, we'll legitimately free the BO */
+
+ ret = drmIoctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ if (ret) {
+ fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %m\n");
+ assert(0);
+ }
+
+ ralloc_free(bo);
+}
+
+struct panfrost_bo *
+panfrost_bo_import(struct panfrost_screen *screen, int fd)
+{
+ struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
+ struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
+ ASSERTED int ret;
+ unsigned gem_handle;
+
+ ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
+ assert(!ret);
+
+ get_bo_offset.handle = gem_handle;
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
+ assert(!ret);
+
+ bo->gem_handle = gem_handle;
+ bo->gpu = (mali_ptr) get_bo_offset.offset;
+ bo->size = lseek(fd, 0, SEEK_END);
+ assert(bo->size > 0);
+ pipe_reference_init(&bo->reference, 1);
+
+ // TODO map and unmap on demand?
+ panfrost_bo_mmap(screen, bo);
+ return bo;
+}
+
+int
+panfrost_bo_export(struct panfrost_screen *screen, const struct panfrost_bo *bo)
+{
+ struct drm_prime_handle args = {
+ .handle = bo->gem_handle,
+ .flags = DRM_CLOEXEC,
+ };
+
+ int ret = drmIoctl(screen->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+ if (ret == -1)
+ return -1;
+
+ return args.fd;
+}
+
if (panfrost->blitter_wallpaper)
util_blitter_destroy(panfrost->blitter_wallpaper);
- panfrost_drm_release_bo(screen, panfrost->scratchpad, false);
- panfrost_drm_release_bo(screen, panfrost->tiler_heap, false);
- panfrost_drm_release_bo(screen, panfrost->tiler_dummy, false);
+ panfrost_bo_release(screen, panfrost->scratchpad, false);
+ panfrost_bo_release(screen, panfrost->tiler_heap, false);
+ panfrost_bo_release(screen, panfrost->tiler_dummy, false);
ralloc_free(pipe);
}
struct pipe_context *gallium = (struct pipe_context *) ctx;
struct panfrost_screen *screen = pan_screen(gallium->screen);
- ctx->scratchpad = panfrost_drm_create_bo(screen, 64 * 4 * 4096, 0);
- ctx->tiler_heap = panfrost_drm_create_bo(screen, 4096 * 4096,
+ ctx->scratchpad = panfrost_bo_create(screen, 64 * 4 * 4096, 0);
+ ctx->tiler_heap = panfrost_bo_create(screen, 4096 * 4096,
PAN_ALLOCATE_INVISIBLE |
PAN_ALLOCATE_GROWABLE);
- ctx->tiler_dummy = panfrost_drm_create_bo(screen, 4096,
+ ctx->tiler_dummy = panfrost_bo_create(screen, 4096,
PAN_ALLOCATE_INVISIBLE);
assert(ctx->scratchpad && ctx->tiler_heap && ctx->tiler_dummy);
}
panfrost_blend_context_init(gallium);
panfrost_compute_context_init(gallium);
- panfrost_drm_init_context(ctx);
+ ASSERTED int ret;
+
+ ret = drmSyncobjCreate(pscreen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
+ &ctx->out_sync);
+ assert(!ret);
panfrost_setup_hardware(ctx);
return (struct panfrost_context *) pcontext;
}
-struct panfrost_fence *
-panfrost_fence_create(struct panfrost_context *ctx);
-
struct pipe_context *
panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags);
+++ /dev/null
-/*
- * © Copyright 2019 Collabora, Ltd.
- * Copyright 2019 Alyssa Rosenzweig
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <fcntl.h>
-#include <xf86drm.h>
-
-#include "drm-uapi/panfrost_drm.h"
-
-#include "util/u_memory.h"
-#include "util/os_time.h"
-#include "os/os_mman.h"
-
-#include "pan_screen.h"
-#include "pan_resource.h"
-#include "pan_context.h"
-#include "pan_util.h"
-#include "pandecode/decode.h"
-
-void
-panfrost_drm_mmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
-{
- struct drm_panfrost_mmap_bo mmap_bo = { .handle = bo->gem_handle };
- int ret;
-
- if (bo->cpu)
- return;
-
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
- if (ret) {
- fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
- assert(0);
- }
-
- bo->cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- screen->fd, mmap_bo.offset);
- if (bo->cpu == MAP_FAILED) {
- fprintf(stderr, "mmap failed: %p %m\n", bo->cpu);
- assert(0);
- }
-
- /* Record the mmap if we're tracing */
- if (pan_debug & PAN_DBG_TRACE)
- pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
-}
-
-static void
-panfrost_drm_munmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
-{
- if (!bo->cpu)
- return;
-
- if (os_munmap((void *) (uintptr_t)bo->cpu, bo->size)) {
- perror("munmap");
- abort();
- }
-
- bo->cpu = NULL;
-}
-
-struct panfrost_bo *
-panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
- uint32_t flags)
-{
- struct panfrost_bo *bo;
-
- /* Kernel will fail (confusingly) with EPERM otherwise */
- assert(size > 0);
-
- /* To maximize BO cache usage, don't allocate tiny BOs */
- size = MAX2(size, 4096);
-
- /* GROWABLE BOs cannot be mmapped */
- if (flags & PAN_ALLOCATE_GROWABLE)
- assert(flags & PAN_ALLOCATE_INVISIBLE);
-
- unsigned translated_flags = 0;
-
- if (screen->kernel_version->version_major > 1 ||
- screen->kernel_version->version_minor >= 1) {
- if (flags & PAN_ALLOCATE_GROWABLE)
- translated_flags |= PANFROST_BO_HEAP;
- if (!(flags & PAN_ALLOCATE_EXECUTE))
- translated_flags |= PANFROST_BO_NOEXEC;
- }
-
- struct drm_panfrost_create_bo create_bo = {
- .size = size,
- .flags = translated_flags,
- };
-
- /* Before creating a BO, we first want to check the cache */
-
- bo = panfrost_bo_cache_fetch(screen, size, flags);
-
- if (bo == NULL) {
- /* Otherwise, the cache misses and we need to allocate a BO fresh from
- * the kernel */
-
- int ret;
-
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_CREATE_BO, &create_bo);
- if (ret) {
- fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
- assert(0);
- }
-
- /* We have a BO allocated from the kernel; fill in the userspace
- * version */
-
- bo = rzalloc(screen, struct panfrost_bo);
- bo->size = create_bo.size;
- bo->gpu = create_bo.offset;
- bo->gem_handle = create_bo.handle;
- bo->flags = flags;
- }
-
- /* Only mmap now if we know we need to. For CPU-invisible buffers, we
- * never map since we don't care about their contents; they're purely
- * for GPU-internal use. But we do trace them anyway. */
-
- if (!(flags & (PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_DELAY_MMAP)))
- panfrost_drm_mmap_bo(screen, bo);
- else if (flags & PAN_ALLOCATE_INVISIBLE) {
- if (pan_debug & PAN_DBG_TRACE)
- pandecode_inject_mmap(bo->gpu, NULL, bo->size, NULL);
- }
-
- pipe_reference_init(&bo->reference, 1);
- return bo;
-}
-
-void
-panfrost_drm_release_bo(struct panfrost_screen *screen, struct panfrost_bo *bo, bool cacheable)
-{
- if (!bo)
- return;
-
- struct drm_gem_close gem_close = { .handle = bo->gem_handle };
- int ret;
-
- /* Rather than freeing the BO now, we'll cache the BO for later
- * allocations if we're allowed to */
-
- panfrost_drm_munmap_bo(screen, bo);
-
- if (cacheable) {
- bool cached = panfrost_bo_cache_put(screen, bo);
-
- if (cached)
- return;
- }
-
- /* Otherwise, if the BO wasn't cached, we'll legitimately free the BO */
-
- ret = drmIoctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- if (ret) {
- fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %m\n");
- assert(0);
- }
-
- ralloc_free(bo);
-}
-
-struct panfrost_bo *
-panfrost_drm_import_bo(struct panfrost_screen *screen, int fd)
-{
- struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
- struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
- ASSERTED int ret;
- unsigned gem_handle;
-
- ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
- assert(!ret);
-
- get_bo_offset.handle = gem_handle;
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
- assert(!ret);
-
- bo->gem_handle = gem_handle;
- bo->gpu = (mali_ptr) get_bo_offset.offset;
- bo->size = lseek(fd, 0, SEEK_END);
- assert(bo->size > 0);
- pipe_reference_init(&bo->reference, 1);
-
- // TODO map and unmap on demand?
- panfrost_drm_mmap_bo(screen, bo);
- return bo;
-}
-
-int
-panfrost_drm_export_bo(struct panfrost_screen *screen, const struct panfrost_bo *bo)
-{
- struct drm_prime_handle args = {
- .handle = bo->gem_handle,
- .flags = DRM_CLOEXEC,
- };
-
- int ret = drmIoctl(screen->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
- if (ret == -1)
- return -1;
-
- return args.fd;
-}
-
-static int
-panfrost_drm_submit_batch(struct panfrost_batch *batch, u64 first_job_desc,
- int reqs)
-{
- struct panfrost_context *ctx = batch->ctx;
- struct pipe_context *gallium = (struct pipe_context *) ctx;
- struct panfrost_screen *screen = pan_screen(gallium->screen);
- struct drm_panfrost_submit submit = {0,};
- uint32_t *bo_handles;
- int ret;
-
- submit.in_syncs = (u64) (uintptr_t) &ctx->out_sync;
- submit.in_sync_count = 1;
-
- submit.out_sync = ctx->out_sync;
-
- submit.jc = first_job_desc;
- submit.requirements = reqs;
-
- bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
- assert(bo_handles);
-
- set_foreach(batch->bos, entry) {
- struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
- assert(bo->gem_handle > 0);
- bo_handles[submit.bo_handle_count++] = bo->gem_handle;
- }
-
- submit.bo_handles = (u64) (uintptr_t) bo_handles;
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
- free(bo_handles);
- if (ret) {
- fprintf(stderr, "Error submitting: %m\n");
- return errno;
- }
-
- /* Trace the job if we're doing that */
- if (pan_debug & PAN_DBG_TRACE) {
- /* Wait so we can get errors reported back */
- drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
- pandecode_jc(submit.jc, FALSE);
- }
-
- return 0;
-}
-
-int
-panfrost_drm_submit_vs_fs_batch(struct panfrost_batch *batch)
-{
- struct panfrost_context *ctx = batch->ctx;
- bool has_draws = batch->last_job.gpu;
- int ret = 0;
-
- panfrost_batch_add_bo(batch, ctx->scratchpad);
- panfrost_batch_add_bo(batch, ctx->tiler_heap);
-
- if (batch->first_job.gpu) {
- ret = panfrost_drm_submit_batch(batch, batch->first_job.gpu, 0);
- assert(!ret);
- }
-
- if (batch->first_tiler.gpu || batch->clear) {
- ret = panfrost_drm_submit_batch(batch,
- panfrost_fragment_job(batch, has_draws),
- PANFROST_JD_REQ_FS);
- assert(!ret);
- }
-
- return ret;
-}
-
-struct panfrost_fence *
-panfrost_fence_create(struct panfrost_context *ctx)
-{
- struct pipe_context *gallium = (struct pipe_context *) ctx;
- struct panfrost_screen *screen = pan_screen(gallium->screen);
- struct panfrost_fence *f = calloc(1, sizeof(*f));
- if (!f)
- return NULL;
-
- /* Snapshot the last Panfrost's rendering's out fence. We'd rather have
- * another syncobj instead of a sync file, but this is all we get.
- * (HandleToFD/FDToHandle just gives you another syncobj ID for the
- * same syncobj).
- */
- drmSyncobjExportSyncFile(screen->fd, ctx->out_sync, &f->fd);
- if (f->fd == -1) {
- fprintf(stderr, "export failed: %m\n");
- free(f);
- return NULL;
- }
-
- pipe_reference_init(&f->reference, 1);
-
- return f;
-}
-
-unsigned
-panfrost_drm_query_gpu_version(struct panfrost_screen *screen)
-{
- struct drm_panfrost_get_param get_param = {0,};
- ASSERTED int ret;
-
- get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
- ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
- assert(!ret);
-
- return get_param.value;
-}
-
-int
-panfrost_drm_init_context(struct panfrost_context *ctx)
-{
- struct pipe_context *gallium = (struct pipe_context *) ctx;
- struct panfrost_screen *screen = pan_screen(gallium->screen);
-
- return drmSyncobjCreate(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
- &ctx->out_sync);
-}
-
-void
-panfrost_drm_fence_reference(struct pipe_screen *screen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence)
-{
- struct panfrost_fence **p = (struct panfrost_fence **)ptr;
- struct panfrost_fence *f = (struct panfrost_fence *)fence;
- struct panfrost_fence *old = *p;
-
- if (pipe_reference(&(*p)->reference, &f->reference)) {
- close(old->fd);
- free(old);
- }
- *p = f;
-}
-
-boolean
-panfrost_drm_fence_finish(struct pipe_screen *pscreen,
- struct pipe_context *ctx,
- struct pipe_fence_handle *fence,
- uint64_t timeout)
-{
- struct panfrost_screen *screen = pan_screen(pscreen);
- struct panfrost_fence *f = (struct panfrost_fence *)fence;
- int ret;
-
- unsigned syncobj;
- ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
- if (ret) {
- fprintf(stderr, "Failed to create syncobj to wait on: %m\n");
- return false;
- }
-
- ret = drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
- if (ret) {
- fprintf(stderr, "Failed to import fence to syncobj: %m\n");
- return false;
- }
-
- uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
- if (abs_timeout == OS_TIMEOUT_INFINITE)
- abs_timeout = INT64_MAX;
-
- ret = drmSyncobjWait(screen->fd, &syncobj, 1, abs_timeout, 0, NULL);
-
- drmSyncobjDestroy(screen->fd, syncobj);
-
- return ret >= 0;
-}
#include <assert.h>
+#include "drm-uapi/panfrost_drm.h"
+
#include "pan_context.h"
#include "util/hash_table.h"
#include "util/ralloc.h"
#include "util/u_format.h"
#include "util/u_pack_color.h"
+#include "pan_util.h"
+#include "pandecode/decode.h"
static struct panfrost_batch *
panfrost_create_batch(struct panfrost_context *ctx,
/* Create the BO as invisible, as there's no reason to map */
- batch->polygon_list = panfrost_drm_create_bo(screen,
- size, PAN_ALLOCATE_INVISIBLE);
+ batch->polygon_list = panfrost_bo_create(screen, size,
+ PAN_ALLOCATE_INVISIBLE);
panfrost_batch_add_bo(batch, batch->polygon_list);
/* A BO reference has been retained by panfrost_batch_add_bo(),
batch->ctx->wallpaper_batch = NULL;
}
+static int
+panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
+ mali_ptr first_job_desc,
+ uint32_t reqs)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ struct pipe_context *gallium = (struct pipe_context *) ctx;
+ struct panfrost_screen *screen = pan_screen(gallium->screen);
+ struct drm_panfrost_submit submit = {0,};
+ uint32_t *bo_handles;
+ int ret;
+
+ submit.in_syncs = (u64) (uintptr_t) &ctx->out_sync;
+ submit.in_sync_count = 1;
+
+ submit.out_sync = ctx->out_sync;
+
+ submit.jc = first_job_desc;
+ submit.requirements = reqs;
+
+ bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
+ assert(bo_handles);
+
+ set_foreach(batch->bos, entry) {
+ struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
+ assert(bo->gem_handle > 0);
+ bo_handles[submit.bo_handle_count++] = bo->gem_handle;
+ }
+
+ submit.bo_handles = (u64) (uintptr_t) bo_handles;
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
+ free(bo_handles);
+ if (ret) {
+ fprintf(stderr, "Error submitting: %m\n");
+ return errno;
+ }
+
+ /* Trace the job if we're doing that */
+ if (pan_debug & PAN_DBG_TRACE) {
+ /* Wait so we can get errors reported back */
+ drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
+ pandecode_jc(submit.jc, FALSE);
+ }
+
+ return 0;
+}
+
+static int
+panfrost_batch_submit_jobs(struct panfrost_batch *batch)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ bool has_draws = batch->first_job.gpu;
+ int ret = 0;
+
+ panfrost_batch_add_bo(batch, ctx->scratchpad);
+ panfrost_batch_add_bo(batch, ctx->tiler_heap);
+
+ if (has_draws) {
+ ret = panfrost_batch_submit_ioctl(batch, batch->first_job.gpu, 0);
+ assert(!ret);
+ }
+
+ if (batch->first_tiler.gpu || batch->clear) {
+ mali_ptr fragjob = panfrost_fragment_job(batch, has_draws);
+
+ ret = panfrost_batch_submit_ioctl(batch, fragjob, PANFROST_JD_REQ_FS);
+ assert(!ret);
+ }
+
+ return ret;
+}
+
void
panfrost_batch_submit(struct panfrost_batch *batch)
{
panfrost_scoreboard_link_batch(batch);
- ret = panfrost_drm_submit_vs_fs_batch(batch);
+ ret = panfrost_batch_submit_jobs(batch);
if (ret)
fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
pipe_reference_init(&prsc->reference, 1);
prsc->screen = pscreen;
- rsc->bo = panfrost_drm_import_bo(screen, whandle->handle);
+ rsc->bo = panfrost_bo_import(screen, whandle->handle);
rsc->slices[0].stride = whandle->stride;
rsc->slices[0].initialized = true;
panfrost_resource_reset_damage(rsc);
return true;
} else {
- int fd = panfrost_drm_export_bo(screen, rsrc->bo);
+ int fd = panfrost_bo_export(screen, rsrc->bo);
if (fd < 0)
return false;
/* We create a BO immediately but don't bother mapping, since we don't
* care to map e.g. FBOs which the CPU probably won't touch */
- pres->bo = panfrost_drm_create_bo(screen, bo_size, PAN_ALLOCATE_DELAY_MMAP);
+ pres->bo = panfrost_bo_create(screen, bo_size, PAN_ALLOCATE_DELAY_MMAP);
}
void
/* When the reference count goes to zero, we need to cleanup */
if (pipe_reference(&bo->reference, NULL))
- panfrost_drm_release_bo(pan_screen(screen), bo, true);
+ panfrost_bo_release(pan_screen(screen), bo, true);
}
static void
if (!bo->cpu) {
struct panfrost_screen *screen = pan_screen(pctx->screen);
- panfrost_drm_mmap_bo(screen, bo);
+ panfrost_bo_mmap(screen, bo);
}
/* Check if we're bound for rendering and this is a read pixels. If so,
/* If we grew in size, reallocate the BO */
if (new_size > rsrc->bo->size) {
- panfrost_drm_release_bo(screen, rsrc->bo, true);
- rsrc->bo = panfrost_drm_create_bo(screen, new_size, PAN_ALLOCATE_DELAY_MMAP);
+ panfrost_bo_release(screen, rsrc->bo, true);
+ rsrc->bo = panfrost_bo_create(screen, new_size, PAN_ALLOCATE_DELAY_MMAP);
}
}
#include <fcntl.h>
#include "drm-uapi/drm_fourcc.h"
+#include "drm-uapi/panfrost_drm.h"
#include "pan_screen.h"
#include "pan_resource.h"
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *fence)
{
- panfrost_drm_fence_reference(pscreen, ptr, fence);
+ struct panfrost_fence **p = (struct panfrost_fence **)ptr;
+ struct panfrost_fence *f = (struct panfrost_fence *)fence;
+ struct panfrost_fence *old = *p;
+
+ if (pipe_reference(&(*p)->reference, &f->reference)) {
+ close(old->fd);
+ free(old);
+ }
+ *p = f;
}
static bool
struct pipe_fence_handle *fence,
uint64_t timeout)
{
- return panfrost_drm_fence_finish(pscreen, ctx, fence, timeout);
+ struct panfrost_screen *screen = pan_screen(pscreen);
+ struct panfrost_fence *f = (struct panfrost_fence *)fence;
+ int ret;
+
+ unsigned syncobj;
+ ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
+ if (ret) {
+ fprintf(stderr, "Failed to create syncobj to wait on: %m\n");
+ return false;
+ }
+
+ ret = drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
+ if (ret) {
+ fprintf(stderr, "Failed to import fence to syncobj: %m\n");
+ return false;
+ }
+
+ uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
+ if (abs_timeout == OS_TIMEOUT_INFINITE)
+ abs_timeout = INT64_MAX;
+
+ ret = drmSyncobjWait(screen->fd, &syncobj, 1, abs_timeout, 0, NULL);
+
+ drmSyncobjDestroy(screen->fd, syncobj);
+
+ return ret >= 0;
+}
+
+struct panfrost_fence *
+panfrost_fence_create(struct panfrost_context *ctx)
+{
+ struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ struct panfrost_fence *f = calloc(1, sizeof(*f));
+ if (!f)
+ return NULL;
+
+ /* Snapshot the last Panfrost's rendering's out fence. We'd rather have
+ * another syncobj instead of a sync file, but this is all we get.
+ * (HandleToFD/FDToHandle just gives you another syncobj ID for the
+ * same syncobj).
+ */
+ drmSyncobjExportSyncFile(screen->fd, ctx->out_sync, &f->fd);
+ if (f->fd == -1) {
+ fprintf(stderr, "export failed: %m\n");
+ free(f);
+ return NULL;
+ }
+
+ pipe_reference_init(&f->reference, 1);
+
+ return f;
}
static const void *
return &midgard_nir_options;
}
+static unsigned
+panfrost_query_gpu_version(struct panfrost_screen *screen)
+{
+ struct drm_panfrost_get_param get_param = {0,};
+ ASSERTED int ret;
+
+ get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
+ assert(!ret);
+
+ return get_param.value;
+}
+
struct pipe_screen *
panfrost_create_screen(int fd, struct renderonly *ro)
{
screen->fd = fd;
- screen->gpu_id = panfrost_drm_query_gpu_version(screen);
+ screen->gpu_id = panfrost_query_gpu_version(screen);
screen->require_sfbd = screen->gpu_id < 0x0750; /* T760 is the first to support MFBD */
screen->kernel_version = drmGetVersion(fd);
return (struct panfrost_screen *)p;
}
+struct panfrost_fence *
+panfrost_fence_create(struct panfrost_context *ctx);
+
struct panfrost_bo *
-panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
+panfrost_bo_create(struct panfrost_screen *screen, size_t size,
uint32_t flags);
void
-panfrost_drm_mmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo);
+panfrost_bo_mmap(struct panfrost_screen *screen, struct panfrost_bo *bo);
void
-panfrost_drm_release_bo(struct panfrost_screen *screen, struct panfrost_bo *bo, bool cacheable);
+panfrost_bo_release(struct panfrost_screen *screen, struct panfrost_bo *bo,
+ bool cacheable);
struct panfrost_bo *
-panfrost_drm_import_bo(struct panfrost_screen *screen, int fd);
-int
-panfrost_drm_export_bo(struct panfrost_screen *screen, const struct panfrost_bo *bo);
-int
-panfrost_drm_submit_vs_fs_batch(struct panfrost_batch *batch);
-unsigned
-panfrost_drm_query_gpu_version(struct panfrost_screen *screen);
+panfrost_bo_import(struct panfrost_screen *screen, int fd);
int
-panfrost_drm_init_context(struct panfrost_context *ctx);
-void
-panfrost_drm_fence_reference(struct pipe_screen *screen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence);
-boolean
-panfrost_drm_fence_finish(struct pipe_screen *pscreen,
- struct pipe_context *ctx,
- struct pipe_fence_handle *fence,
- uint64_t timeout);
+panfrost_bo_export(struct panfrost_screen *screen, const struct panfrost_bo *bo);
+
struct panfrost_bo *
panfrost_bo_cache_fetch(
struct panfrost_screen *screen,