llvmpipe: add grid launch
[mesa.git] / src / gallium / drivers / panfrost / pan_drm.c
index 70051450cae4271adba20a16b7de4df5864eddd7..4e69282480975dd5dc0bc6099de1ab358e82bc49 100644 (file)
@@ -49,14 +49,14 @@ panfrost_drm_mmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
 
         ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
         if (ret) {
-                fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %d\n", ret);
+                fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
                 assert(0);
         }
 
         bo->cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
                           screen->fd, mmap_bo.offset);
         if (bo->cpu == MAP_FAILED) {
-                fprintf(stderr, "mmap failed: %p\n", bo->cpu);
+                fprintf(stderr, "mmap failed: %p %m\n", bo->cpu);
                 assert(0);
         }
 
@@ -88,9 +88,22 @@ panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
         /* Kernel will fail (confusingly) with EPERM otherwise */
         assert(size > 0);
 
+        /* To maximize BO cache usage, don't allocate tiny BOs */
+        size = MAX2(size, 4096);
+
+        /* GROWABLE BOs cannot be mmapped */
+        if (flags & PAN_ALLOCATE_GROWABLE)
+                assert(flags & PAN_ALLOCATE_INVISIBLE);
+
         unsigned translated_flags = 0;
 
-        /* TODO: translate flags to kernel flags, if the kernel supports */
+        if (screen->kernel_version->version_major > 1 ||
+            screen->kernel_version->version_minor >= 1) {
+                if (flags & PAN_ALLOCATE_GROWABLE)
+                        translated_flags |= PANFROST_BO_HEAP;
+                if (!(flags & PAN_ALLOCATE_EXECUTE))
+                        translated_flags |= PANFROST_BO_NOEXEC;
+        }
 
         struct drm_panfrost_create_bo create_bo = {
                 .size = size,
@@ -109,7 +122,7 @@ panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
 
                 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_CREATE_BO, &create_bo);
                 if (ret) {
-                        fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %d\n", ret);
+                        fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
                         assert(0);
                 }
 
@@ -120,14 +133,19 @@ panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
                 bo->size = create_bo.size;
                 bo->gpu = create_bo.offset;
                 bo->gem_handle = create_bo.handle;
+                bo->flags = flags;
         }
 
         /* Only mmap now if we know we need to. For CPU-invisible buffers, we
          * never map since we don't care about their contents; they're purely
-         * for GPU-internal use. */
+         * for GPU-internal use. But we do trace them anyway. */
 
         if (!(flags & (PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_DELAY_MMAP)))
                 panfrost_drm_mmap_bo(screen, bo);
+        else if (flags & PAN_ALLOCATE_INVISIBLE) {
+                if (pan_debug & PAN_DBG_TRACE)
+                        pandecode_inject_mmap(bo->gpu, NULL, bo->size, NULL);
+        }
 
         pipe_reference_init(&bo->reference, 1);
         return bo;
@@ -136,15 +154,17 @@ panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
 void
 panfrost_drm_release_bo(struct panfrost_screen *screen, struct panfrost_bo *bo, bool cacheable)
 {
-        struct drm_gem_close gem_close = { .handle = bo->gem_handle };
-        int ret;
-
         if (!bo)
                 return;
 
+        struct drm_gem_close gem_close = { .handle = bo->gem_handle };
+        int ret;
+
         /* Rather than freeing the BO now, we'll cache the BO for later
          * allocations if we're allowed to */
 
+        panfrost_drm_munmap_bo(screen, bo);
+
         if (cacheable) {
                 bool cached = panfrost_bo_cache_put(screen, bo);
 
@@ -154,11 +174,9 @@ panfrost_drm_release_bo(struct panfrost_screen *screen, struct panfrost_bo *bo,
 
         /* Otherwise, if the BO wasn't cached, we'll legitimately free the BO */
 
-        panfrost_drm_munmap_bo(screen, bo);
-
         ret = drmIoctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
         if (ret) {
-                fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %d\n", ret);
+                fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %m\n");
                 assert(0);
         }
 
@@ -193,7 +211,7 @@ panfrost_drm_import_bo(struct panfrost_screen *screen, int fd)
 {
         struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
         struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
-        MAYBE_UNUSED int ret;
+        ASSERTED int ret;
         unsigned gem_handle;
 
         ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
@@ -274,17 +292,15 @@ panfrost_drm_submit_job(struct panfrost_context *ctx, u64 job_desc, int reqs)
 }
 
 int
-panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws, bool is_scanout)
+panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws)
 {
         int ret = 0;
 
         struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
 
-        /* TODO: Add here the transient pools */
-        panfrost_job_add_bo(job, ctx->shaders.bo);
         panfrost_job_add_bo(job, ctx->scratchpad.bo);
         panfrost_job_add_bo(job, ctx->tiler_heap.bo);
-        panfrost_job_add_bo(job, ctx->tiler_polygon_list.bo);
+        panfrost_job_add_bo(job, job->polygon_list);
 
         if (job->first_job.gpu) {
                 ret = panfrost_drm_submit_job(ctx, job->first_job.gpu, 0);
@@ -315,7 +331,7 @@ panfrost_fence_create(struct panfrost_context *ctx)
          */
         drmSyncobjExportSyncFile(screen->fd, ctx->out_sync, &f->fd);
         if (f->fd == -1) {
-                fprintf(stderr, "export failed\n");
+                fprintf(stderr, "export failed: %m\n");
                 free(f);
                 return NULL;
         }
@@ -332,12 +348,12 @@ panfrost_drm_force_flush_fragment(struct panfrost_context *ctx,
         struct pipe_context *gallium = (struct pipe_context *) ctx;
         struct panfrost_screen *screen = pan_screen(gallium->screen);
 
-        if (!screen->last_fragment_flushed) {
+        if (!ctx->last_fragment_flushed) {
                 drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
-                screen->last_fragment_flushed = true;
+                ctx->last_fragment_flushed = true;
 
                 /* The job finished up, so we're safe to clean it up now */
-                panfrost_free_job(ctx, screen->last_job);
+                panfrost_free_job(ctx, ctx->last_job);
         }
 
         if (fence) {
@@ -351,7 +367,7 @@ unsigned
 panfrost_drm_query_gpu_version(struct panfrost_screen *screen)
 {
         struct drm_panfrost_get_param get_param = {0,};
-        MAYBE_UNUSED int ret;
+        ASSERTED int ret;
 
         get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
         ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
@@ -403,7 +419,7 @@ panfrost_drm_fence_finish(struct pipe_screen *pscreen,
                 return false;
         }
 
-        drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
+        ret = drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
         if (ret) {
                 fprintf(stderr, "Failed to import fence to syncobj: %m\n");
                 return false;