#include "util/list.h"
#include "util/u_range.h"
#include "util/u_transfer_helper.h"
+#include "util/simple_mtx.h"
#include "freedreno_batch.h"
#include "freedreno_util.h"
#include "freedreno/fdl/freedreno_layout.h"
+enum fd_lrz_direction {
+ FD_LRZ_UNKNOWN,
+ /* Depth func less/less-than: */
+ FD_LRZ_LESS,
+ /* Depth func greater/greater-than: */
+ FD_LRZ_GREATER,
+};
+
struct fd_resource {
struct pipe_resource base;
struct fd_bo *bo;
/* TODO rename to secondary or auxiliary? */
struct fd_resource *stencil;
+ simple_mtx_t lock;
+
/* bitmask of in-flight batches which reference this resource. Note
* that the batch doesn't hold reference to resources (but instead
* the fd_ringbuffer holds refs to the underlying fd_bo), but in case
/* Sequence # incremented each time bo changes: */
uint16_t seqno;
+ /* bitmask of state this resource could potentially dirty when rebound,
+ * see rebind_resource()
+ */
+ enum fd_dirty_3d_state dirty;
+
/*
* LRZ
*
* fdl_layout
*/
bool lrz_valid : 1;
+ enum fd_lrz_direction lrz_direction : 2;
uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
uint16_t lrz_height;
uint16_t lrz_pitch;
struct fd_bo *lrz;
};
+struct fd_memory_object {
+ struct pipe_memory_object b;
+ struct fd_bo *bo;
+};
+
static inline struct fd_resource *
fd_resource(struct pipe_resource *ptex)
{
return (const struct fd_resource *)ptex;
}
+static inline struct fd_memory_object *
+fd_memory_object (struct pipe_memory_object *pmemobj)
+{
+ return (struct fd_memory_object *)pmemobj;
+}
+
static inline bool
pending(struct fd_resource *rsc, bool write)
{
return false;
}
+static inline bool
+fd_resource_busy(struct fd_resource *rsc, unsigned op)
+{
+ return fd_bo_cpu_prep(rsc->bo, NULL, op | DRM_FREEDRENO_PREP_NOSYNC) != 0;
+}
+
+static inline void
+fd_resource_lock(struct fd_resource *rsc)
+{
+ simple_mtx_lock(&rsc->lock);
+}
+
+static inline void
+fd_resource_unlock(struct fd_resource *rsc)
+{
+ simple_mtx_unlock(&rsc->lock);
+}
+
+static inline void
+fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
+{
+ if (!prsc)
+ return;
+ struct fd_resource *rsc = fd_resource(prsc);
+ /* Bits are only ever ORed in, and we expect many set_usage() per
+ * resource, so do the quick check outside of the lock.
+ */
+ if (likely(rsc->dirty & usage))
+ return;
+ fd_resource_lock(rsc);
+ rsc->dirty |= usage;
+ fd_resource_unlock(rsc);
+}
+
static inline bool
has_depth(enum pipe_format format)
{
return fdl_layer_stride(&rsc->layout, level);
}
+/* get pitch (in bytes) for specified mipmap level */
+static inline uint32_t
+fd_resource_pitch(struct fd_resource *rsc, unsigned level)
+{
+ if (is_a2xx(fd_screen(rsc->base.screen)))
+ return fdl2_pitch(&rsc->layout, level);
+
+ return fdl_pitch(&rsc->layout, level);
+}
+
/* get offset for specified mipmap level and texture/array layer */
static inline uint32_t
fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
uint32_t fd_setup_slices(struct fd_resource *rsc);
void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc);
+void fd_resource_dump(struct fd_resource *rsc, const char *name);
bool fd_render_condition_check(struct pipe_context *pctx);
+static inline bool
+fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
+{
+ return rsc->batch_mask & (1 << batch->idx);
+}
+
+static inline void
+fd_batch_resource_read(struct fd_batch *batch,
+ struct fd_resource *rsc)
+{
+ /* Fast path: if we hit this then we know we don't have anyone else
+ * writing to it (since both _write and _read flush other writers), and
+ * that we've already recursed for stencil.
+ */
+ if (unlikely(!fd_batch_references_resource(batch, rsc)))
+ fd_batch_resource_read_slowpath(batch, rsc);
+}
+
#endif /* FREEDRENO_RESOURCE_H_ */