+static inline const struct fd_resource *
+fd_resource_const(const struct pipe_resource *ptex)
+{
+ return (const struct fd_resource *)ptex;
+}
+
+static inline bool
+pending(struct fd_resource *rsc, bool write)
+{
+ /* if we have a pending GPU write, we are busy in any case: */
+ if (rsc->write_batch)
+ return true;
+
+ /* if CPU wants to write, but we are pending a GPU read, we are busy: */
+ if (write && rsc->batch_mask)
+ return true;
+
+ if (rsc->stencil && pending(rsc->stencil, write))
+ return true;
+
+ return false;
+}
+
+static inline bool
+fd_resource_busy(struct fd_resource *rsc, unsigned op)
+{
+ return fd_bo_cpu_prep(rsc->bo, NULL, op | DRM_FREEDRENO_PREP_NOSYNC) != 0;
+}
+
+static inline void
+fd_resource_lock(struct fd_resource *rsc)
+{
+ simple_mtx_lock(&rsc->lock);
+}
+
+static inline void
+fd_resource_unlock(struct fd_resource *rsc)
+{
+ simple_mtx_unlock(&rsc->lock);
+}
+
+static inline void
+fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
+{
+ if (!prsc)
+ return;
+ struct fd_resource *rsc = fd_resource(prsc);
+ /* Bits are only ever ORed in, and we expect many set_usage() per
+ * resource, so do the quick check outside of the lock.
+ */
+ if (likely(rsc->dirty & usage))
+ return;
+ fd_resource_lock(rsc);
+ rsc->dirty |= usage;
+ fd_resource_unlock(rsc);
+}
+
+static inline bool
+has_depth(enum pipe_format format)
+{
+ const struct util_format_description *desc =
+ util_format_description(format);
+ return util_format_has_depth(desc);
+}
+
+struct fd_transfer {
+ struct pipe_transfer base;
+ struct pipe_resource *staging_prsc;
+ struct pipe_box staging_box;
+};
+
+static inline struct fd_transfer *
+fd_transfer(struct pipe_transfer *ptrans)
+{
+ return (struct fd_transfer *)ptrans;
+}
+
+static inline struct fdl_slice *