struct drm_vc5_get_param *args)
{
static const uint32_t reg_map[] = {
- [DRM_VC5_PARAM_V3D_HUB_UIFCFG] = V3D_HUB_CTL_UIFCFG,
+ [DRM_VC5_PARAM_V3D_UIFCFG] = V3D_HUB_CTL_UIFCFG,
[DRM_VC5_PARAM_V3D_HUB_IDENT1] = V3D_HUB_CTL_IDENT1,
[DRM_VC5_PARAM_V3D_HUB_IDENT2] = V3D_HUB_CTL_IDENT2,
[DRM_VC5_PARAM_V3D_HUB_IDENT3] = V3D_HUB_CTL_IDENT3,
return true;
}
-static int vc5_wait_seqno_ioctl(int fd, uint64_t seqno, uint64_t timeout_ns)
-{
- struct drm_vc5_wait_seqno wait = {
- .seqno = seqno,
- .timeout_ns = timeout_ns,
- };
- int ret = vc5_ioctl(fd, DRM_IOCTL_VC5_WAIT_SEQNO, &wait);
- if (ret == -1)
- return -errno;
- else
- return 0;
-
-}
-
-bool
-vc5_wait_seqno(struct vc5_screen *screen, uint64_t seqno, uint64_t timeout_ns,
- const char *reason)
-{
- if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF) && timeout_ns && reason) {
- if (vc5_wait_seqno_ioctl(screen->fd, seqno, 0) == -ETIME) {
- fprintf(stderr, "Blocking on seqno %lld for %s\n",
- (long long)seqno, reason);
- }
- }
-
- int ret = vc5_wait_seqno_ioctl(screen->fd, seqno, timeout_ns);
- if (ret) {
- if (ret != -ETIME) {
- fprintf(stderr, "wait failed: %d\n", ret);
- abort();
- }
-
- return false;
- }
-
- return true;
-}
-
static int vc5_wait_bo_ioctl(int fd, uint32_t handle, uint64_t timeout_ns)
{
struct drm_vc5_wait_bo wait = {
if (fence) {
struct pipe_screen *screen = pctx->screen;
- struct vc5_fence *f = vc5_fence_create(vc5->screen,
- vc5->last_emit_seqno);
+ struct vc5_fence *f = vc5_fence_create(vc5);
screen->fence_reference(screen, fence, NULL);
*fence = (struct pipe_fence_handle *)f;
}
vc5->screen = screen;
+ int ret = drmSyncobjCreate(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
+ &vc5->out_sync);
+ if (ret) {
+ ralloc_free(vc5);
+ return NULL;
+ }
+
pctx->screen = pscreen;
pctx->priv = priv;
pctx->destroy = vc5_context_destroy;
/** Maximum index buffer valid for the current shader_rec. */
uint32_t max_index;
- /** Seqno of the last CL flush's job. */
- uint64_t last_emit_seqno;
+ /** Sync object that our RCL will update as its out_sync. */
+ uint32_t out_sync;
struct u_upload_mgr *uploader;
void vc5_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
void vc5_blitter_save(struct vc5_context *vc5);
+struct vc5_fence *vc5_fence_create(struct vc5_context *vc5);
+
#ifdef v3dX
# include "v3dx_context.h"
#else
#endif
#define DRM_VC5_SUBMIT_CL 0x00
-#define DRM_VC5_WAIT_SEQNO 0x01
-#define DRM_VC5_WAIT_BO 0x02
-#define DRM_VC5_CREATE_BO 0x03
-#define DRM_VC5_MMAP_BO 0x04
-#define DRM_VC5_GET_PARAM 0x05
-#define DRM_VC5_GET_BO_OFFSET 0x06
+#define DRM_VC5_WAIT_BO 0x01
+#define DRM_VC5_CREATE_BO 0x02
+#define DRM_VC5_MMAP_BO 0x03
+#define DRM_VC5_GET_PARAM 0x04
+#define DRM_VC5_GET_BO_OFFSET 0x05
#define DRM_IOCTL_VC5_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC5_SUBMIT_CL, struct drm_vc5_submit_cl)
-#define DRM_IOCTL_VC5_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC5_WAIT_SEQNO, struct drm_vc5_wait_seqno)
#define DRM_IOCTL_VC5_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC5_WAIT_BO, struct drm_vc5_wait_bo)
#define DRM_IOCTL_VC5_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC5_CREATE_BO, struct drm_vc5_create_bo)
#define DRM_IOCTL_VC5_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC5_MMAP_BO, struct drm_vc5_mmap_bo)
/** End address of the RCL (first byte after the RCL) */
__u32 rcl_end;
+ /** An optional sync object to wait on before starting the BCL. */
+ __u32 in_sync_bcl;
+ /** An optional sync object to wait on before starting the RCL. */
+ __u32 in_sync_rcl;
+ /** An optional sync object to place the completion fence in. */
+ __u32 out_sync;
+
/* Offset of the tile alloc memory
*
* This is optional on V3D 3.3 (where the CL can set the value) but
*/
__u32 qma;
- /** Size of the tile alloc memory. */
+ /** Size of the tile alloc memory. */
__u32 qms;
- /** Offset of the tile state data array. */
+ /** Offset of the tile state data array. */
__u32 qts;
/* Pointer to a u32 array of the BOs that are referenced by the job.
*/
__u64 bo_handles;
- /* Pointer to an array of chunks of extra submit CL information. (the
- * chunk struct is not yet defined)
- */
- __u64 chunks;
-
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
-
- __u32 chunk_count;
-
- __u64 flags;
-};
-
-/**
- * struct drm_vc5_wait_seqno - ioctl argument for waiting for
- * DRM_VC5_SUBMIT_CL completion using its returned seqno.
- *
- * timeout_ns is the timeout in nanoseconds, where "0" means "don't
- * block, just return the status."
- */
-struct drm_vc5_wait_seqno {
- __u64 seqno;
- __u64 timeout_ns;
};
/**
* Returned offset for the BO in the V3D address space. This offset
* is private to the DRM fd and is valid for the lifetime of the GEM
* handle.
+ *
+ * This offset value will always be nonzero, since various HW
+ * units treat 0 specially.
*/
__u32 offset;
};
};
enum drm_vc5_param {
- DRM_VC5_PARAM_V3D_HUB_UIFCFG,
- DRM_VC5_PARAM_V3D_HUB_IDENT1,
- DRM_VC5_PARAM_V3D_HUB_IDENT2,
- DRM_VC5_PARAM_V3D_HUB_IDENT3,
- DRM_VC5_PARAM_V3D_CORE0_IDENT0,
- DRM_VC5_PARAM_V3D_CORE0_IDENT1,
- DRM_VC5_PARAM_V3D_CORE0_IDENT2,
+ DRM_VC5_PARAM_V3D_UIFCFG,
+ DRM_VC5_PARAM_V3D_HUB_IDENT1,
+ DRM_VC5_PARAM_V3D_HUB_IDENT2,
+ DRM_VC5_PARAM_V3D_HUB_IDENT3,
+ DRM_VC5_PARAM_V3D_CORE0_IDENT0,
+ DRM_VC5_PARAM_V3D_CORE0_IDENT1,
+ DRM_VC5_PARAM_V3D_CORE0_IDENT2,
};
struct drm_vc5_get_param {
#include "util/u_inlines.h"
-#include "vc5_screen.h"
+#include "vc5_context.h"
#include "vc5_bufmgr.h"
struct vc5_fence {
struct pipe_reference reference;
- uint64_t seqno;
+ uint32_t sync;
};
static void
struct pipe_fence_handle **pp,
struct pipe_fence_handle *pf)
{
+ struct vc5_screen *screen = vc5_screen(pscreen);
struct vc5_fence **p = (struct vc5_fence **)pp;
struct vc5_fence *f = (struct vc5_fence *)pf;
struct vc5_fence *old = *p;
if (pipe_reference(&(*p)->reference, &f->reference)) {
+ drmSyncobjDestroy(screen->fd, old->sync);
free(old);
}
*p = f;
struct vc5_screen *screen = vc5_screen(pscreen);
struct vc5_fence *f = (struct vc5_fence *)pf;
- return vc5_wait_seqno(screen, f->seqno, timeout_ns, "fence wait");
+ return drmSyncobjWait(screen->fd, &f->sync, 1, timeout_ns, 0, NULL);
}
struct vc5_fence *
-vc5_fence_create(struct vc5_screen *screen, uint64_t seqno)
+vc5_fence_create(struct vc5_context *vc5)
{
struct vc5_fence *f = calloc(1, sizeof(*f));
-
if (!f)
return NULL;
+ uint32_t new_sync;
+ /* Make a new sync object for the context. */
+ int ret = drmSyncobjCreate(vc5->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
+ &new_sync);
+ if (ret) {
+ free(f);
+ return NULL;
+ }
+
pipe_reference_init(&f->reference, 1);
- f->seqno = seqno;
+ f->sync = vc5->out_sync;
+ vc5->out_sync = new_sync;
return f;
}
v3d33_bcl_epilogue(vc5, job);
}
+ job->submit.out_sync = vc5->out_sync;
job->submit.bcl_end = job->bcl.bo->offset + cl_offset(&job->bcl);
job->submit.rcl_end = job->rcl.bo->offset + cl_offset(&job->rcl);
void
vc5_fence_init(struct vc5_screen *screen);
-struct vc5_fence *
-vc5_fence_create(struct vc5_screen *screen, uint64_t seqno);
-
#endif /* VC5_SCREEN_H */
return vc5_simulator_mmap_bo_ioctl(fd, args);
case DRM_IOCTL_VC5_WAIT_BO:
- case DRM_IOCTL_VC5_WAIT_SEQNO:
/* We do all of the vc5 rendering synchronously, so we just
* return immediately on the wait ioctls. This ignores any
* native rendering to the host BO, so it does mean we race on