/**************************************************************************
*
* Copyright 2018-2019 Alyssa Rosenzweig
- * Copyright 2018-2019 Collabora
+ * Copyright 2018-2019 Collabora, Ltd.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
struct panfrost_resource;
struct panfrost_screen;
-//#define DUMP_PERFORMANCE_COUNTERS
-
/* Flags for allocated memory */
#define PAN_ALLOCATE_EXECUTE (1 << 0)
#define PAN_ALLOCATE_GROWABLE (1 << 1)
-
-struct panfrost_driver {
- struct panfrost_bo * (*import_bo) (struct panfrost_screen *screen, struct winsys_handle *whandle);
-
- int (*submit_vs_fs_job) (struct panfrost_context *ctx, bool has_draws, bool is_scanout);
- void (*force_flush_fragment) (struct panfrost_context *ctx);
- void (*allocate_slab) (struct panfrost_screen *screen,
- struct panfrost_memory *mem,
- size_t pages,
- bool same_va,
- int extra_flags,
- int commit_count,
- int extent);
- void (*free_slab) (struct panfrost_screen *screen,
- struct panfrost_memory *mem);
- void (*enable_counters) (struct panfrost_screen *screen);
-};
+#define PAN_ALLOCATE_INVISIBLE (1 << 2)
+#define PAN_ALLOCATE_COHERENT_LOCAL (1 << 3)
struct panfrost_screen {
struct pipe_screen base;
+ int fd;
struct renderonly *ro;
- struct panfrost_driver *driver;
-
- struct panfrost_memory perf_counters;
/* Memory management is based on subdividing slabs with AMD's allocator */
struct pb_slabs slabs;
/* TODO: Where? */
struct panfrost_resource *display_target;
- int last_fragment_id;
+ /* While we're busy building up the job for frame N, the GPU is
+ * still busy executing frame N-1. So hold a reference to
+ * yesterjob */
int last_fragment_flushed;
+ struct panfrost_job *last_job;
};
static inline struct panfrost_screen *
-panfrost_screen( struct pipe_screen *pipe )
+pan_screen(struct pipe_screen *p)
{
- return (struct panfrost_screen *)pipe;
+ return (struct panfrost_screen *)p;
}
+void
+panfrost_drm_allocate_slab(struct panfrost_screen *screen,
+ struct panfrost_memory *mem,
+ size_t pages,
+ bool same_va,
+ int extra_flags,
+ int commit_count,
+ int extent);
+void
+panfrost_drm_free_slab(struct panfrost_screen *screen,
+ struct panfrost_memory *mem);
+struct panfrost_bo *
+panfrost_drm_import_bo(struct panfrost_screen *screen, int fd);
+int
+panfrost_drm_export_bo(struct panfrost_screen *screen, const struct panfrost_bo *bo);
+int
+panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws,
+ bool is_scanout);
+void
+panfrost_drm_force_flush_fragment(struct panfrost_context *ctx,
+ struct pipe_fence_handle **fence);
+unsigned
+panfrost_drm_query_gpu_version(struct panfrost_screen *screen);
+int
+panfrost_drm_init_context(struct panfrost_context *ctx);
+void
+panfrost_drm_fence_reference(struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence);
+boolean
+panfrost_drm_fence_finish(struct pipe_screen *pscreen,
+ struct pipe_context *ctx,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout);
+
#endif /* PAN_SCREEN_H */