#include "kernel/vc4_packet.h"
struct vc4_bo;
+struct vc4_job;
+
+/**
+ * Undefined structure, used for typechecking that you're passing the pointers
+ * to these functions correctly.
+ */
+struct vc4_cl_out;
struct vc4_cl {
void *base;
- void *next;
- void *reloc_next;
+ struct vc4_cl_out *next;
+ struct vc4_cl_out *reloc_next;
uint32_t size;
+#ifdef DEBUG
uint32_t reloc_count;
+#endif
};
-void vc4_init_cl(struct vc4_context *vc4, struct vc4_cl *cl);
+void vc4_init_cl(void *mem_ctx, struct vc4_cl *cl);
void vc4_reset_cl(struct vc4_cl *cl);
void vc4_dump_cl(void *cl, uint32_t size, bool is_render);
-uint32_t vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo);
+uint32_t vc4_gem_hindex(struct vc4_job *job, struct vc4_bo *bo);
struct PACKED unaligned_16 { uint16_t x; };
struct PACKED unaligned_32 { uint32_t x; };
-static inline void
-put_unaligned_32(void *ptr, uint32_t val)
+static inline uint32_t cl_offset(struct vc4_cl *cl)
{
- struct unaligned_32 *p = ptr;
- p->x = val;
+ return (char *)cl->next - (char *)cl->base;
}
static inline void
-put_unaligned_16(void *ptr, uint16_t val)
+cl_advance(struct vc4_cl_out **cl, uint32_t n)
{
- struct unaligned_16 *p = ptr;
- p->x = val;
+ (*cl) = (struct vc4_cl_out *)((char *)(*cl) + n);
}
-static inline void
-cl_u8(struct vc4_cl *cl, uint8_t n)
+static inline struct vc4_cl_out *
+cl_start(struct vc4_cl *cl)
{
- assert((cl->next - cl->base) + 1 <= cl->size);
-
- *(uint8_t *)cl->next = n;
- cl->next++;
+ return cl->next;
}
static inline void
-cl_u16(struct vc4_cl *cl, uint16_t n)
+cl_end(struct vc4_cl *cl, struct vc4_cl_out *next)
{
- assert((cl->next - cl->base) + 2 <= cl->size);
+ cl->next = next;
+ assert(cl_offset(cl) <= cl->size);
+}
+
- put_unaligned_16(cl->next, n);
- cl->next += 2;
+static inline void
+put_unaligned_32(struct vc4_cl_out *ptr, uint32_t val)
+{
+ struct unaligned_32 *p = (void *)ptr;
+ p->x = val;
}
static inline void
-cl_u32(struct vc4_cl *cl, uint32_t n)
+put_unaligned_16(struct vc4_cl_out *ptr, uint16_t val)
{
- assert((cl->next - cl->base) + 4 <= cl->size);
+ struct unaligned_16 *p = (void *)ptr;
+ p->x = val;
+}
- put_unaligned_32(cl->next, n);
- cl->next += 4;
+static inline void
+cl_u8(struct vc4_cl_out **cl, uint8_t n)
+{
+ *(uint8_t *)(*cl) = n;
+ cl_advance(cl, 1);
}
static inline void
-cl_aligned_u32(struct vc4_cl *cl, uint32_t n)
+cl_u16(struct vc4_cl_out **cl, uint16_t n)
{
- assert((cl->next - cl->base) + 4 <= cl->size);
+ put_unaligned_16(*cl, n);
+ cl_advance(cl, 2);
+}
- *(uint32_t *)cl->next = n;
- cl->next += 4;
+static inline void
+cl_u32(struct vc4_cl_out **cl, uint32_t n)
+{
+ put_unaligned_32(*cl, n);
+ cl_advance(cl, 4);
}
static inline void
-cl_ptr(struct vc4_cl *cl, void *ptr)
+cl_aligned_u32(struct vc4_cl_out **cl, uint32_t n)
{
- assert((cl->next - cl->base) + sizeof(void *) <= cl->size);
+ *(uint32_t *)(*cl) = n;
+ cl_advance(cl, 4);
+}
- *(void **)cl->next = ptr;
- cl->next += sizeof(void *);
+static inline void
+cl_ptr(struct vc4_cl_out **cl, void *ptr)
+{
+ *(struct vc4_cl_out **)(*cl) = ptr;
+ cl_advance(cl, sizeof(void *));
}
static inline void
-cl_f(struct vc4_cl *cl, float f)
+cl_f(struct vc4_cl_out **cl, float f)
{
cl_u32(cl, fui(f));
}
static inline void
-cl_aligned_f(struct vc4_cl *cl, float f)
+cl_aligned_f(struct vc4_cl_out **cl, float f)
{
cl_aligned_u32(cl, fui(f));
}
static inline void
-cl_start_reloc(struct vc4_cl *cl, uint32_t n)
+cl_start_reloc(struct vc4_cl *cl, struct vc4_cl_out **out, uint32_t n)
{
assert(n == 1 || n == 2);
+#ifdef DEBUG
assert(cl->reloc_count == 0);
cl->reloc_count = n;
+#endif
- cl_u8(cl, VC4_PACKET_GEM_HANDLES);
- cl->reloc_next = cl->next;
- cl_u32(cl, 0); /* Space where hindex will be written. */
- cl_u32(cl, 0); /* Space where hindex will be written. */
+ cl_u8(out, VC4_PACKET_GEM_HANDLES);
+ cl->reloc_next = *out;
+ cl_u32(out, 0); /* Space where hindex will be written. */
+ cl_u32(out, 0); /* Space where hindex will be written. */
}
-static inline void
+static inline struct vc4_cl_out *
cl_start_shader_reloc(struct vc4_cl *cl, uint32_t n)
{
+#ifdef DEBUG
assert(cl->reloc_count == 0);
cl->reloc_count = n;
+#endif
cl->reloc_next = cl->next;
- /* Space where hindex will be written. */
- cl->next += n * 4;
+ /* Reserve the space where hindex will be written. */
+ cl_advance(&cl->next, n * 4);
+
+ return cl->next;
}
static inline void
-cl_reloc_hindex(struct vc4_cl *cl, uint32_t hindex, uint32_t offset)
+cl_reloc(struct vc4_job *job, struct vc4_cl *cl, struct vc4_cl_out **cl_out,
+ struct vc4_bo *bo, uint32_t offset)
{
- *(uint32_t *)cl->reloc_next = hindex;
- cl->reloc_next += 4;
+ *(uint32_t *)cl->reloc_next = vc4_gem_hindex(job, bo);
+ cl_advance(&cl->reloc_next, 4);
+#ifdef DEBUG
cl->reloc_count--;
+#endif
- cl_u32(cl, offset);
+ cl_u32(cl_out, offset);
}
static inline void
-cl_aligned_reloc_hindex(struct vc4_cl *cl, uint32_t hindex, uint32_t offset)
+cl_aligned_reloc(struct vc4_job *job, struct vc4_cl *cl,
+ struct vc4_cl_out **cl_out,
+ struct vc4_bo *bo, uint32_t offset)
{
- *(uint32_t *)cl->reloc_next = hindex;
- cl->reloc_next += 4;
+ *(uint32_t *)cl->reloc_next = vc4_gem_hindex(job, bo);
+ cl_advance(&cl->reloc_next, 4);
+#ifdef DEBUG
cl->reloc_count--;
+#endif
- cl_aligned_u32(cl, offset);
-}
-
-static inline void
-cl_reloc(struct vc4_context *vc4, struct vc4_cl *cl,
- struct vc4_bo *bo, uint32_t offset)
-{
- cl_reloc_hindex(cl, vc4_gem_hindex(vc4, bo), offset);
-}
-
-static inline void
-cl_aligned_reloc(struct vc4_context *vc4, struct vc4_cl *cl,
- struct vc4_bo *bo, uint32_t offset)
-{
- cl_aligned_reloc_hindex(cl, vc4_gem_hindex(vc4, bo), offset);
+ cl_aligned_u32(cl_out, offset);
}
void cl_ensure_space(struct vc4_cl *cl, uint32_t size);