* let the BO logic know about this contraint. */
#define PAN_BO_DONT_REUSE (1 << 5)
+/* BO has been imported */
+#define PAN_BO_IMPORTED (1 << 6)
+
+/* BO has been exported */
+#define PAN_BO_EXPORTED (1 << 7)
+
/* GPU access flags */
/* BO is either shared (can be accessed by more than one GPU batch) or private
int gem_handle;
uint32_t flags;
+
+ /* Combination of PAN_BO_ACCESS_{READ,WRITE} flags encoding pending
+ * GPU accesses to this BO. Useful to avoid calling the WAIT_BO ioctl
+ * when the BO is idle.
+ */
+ uint32_t gpu_access;
};
+/* If a BO is accessed for a particular shader stage, will it be in the primary
+ * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
+ * fragment will be primary, e.g. compute jobs will be considered
+ * "vertex/tiler" by analogy */
+
static inline uint32_t
panfrost_bo_access_for_stage(enum pipe_shader_type stage)
{
assert(stage == PIPE_SHADER_FRAGMENT ||
- stage == PIPE_SHADER_VERTEX);
+ stage == PIPE_SHADER_VERTEX ||
+ stage == PIPE_SHADER_COMPUTE);
return stage == PIPE_SHADER_FRAGMENT ?
PAN_BO_ACCESS_FRAGMENT :
PAN_BO_ACCESS_VERTEX_TILER;
}
+bool
+panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns,
+ uint32_t access_type);
void
panfrost_bo_reference(struct panfrost_bo *bo);
void