* Fences for driver and IPC serialisation, scheduling and synchronisation.
*/
+#include <linux/sync_file.h>
+
#include "util/u_inlines.h"
+#include "intel/common/gen_gem.h"
#include "iris_batch.h"
#include "iris_bufmgr.h"
.flags = flags,
};
- drm_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
+ gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
return args.handle;
}
.handle = handle,
};
- drm_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
+ gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
}
/**
* Make a new sync-point.
*/
-struct iris_syncpt *
-iris_create_syncpt(struct iris_screen *screen)
+struct iris_syncobj *
+iris_create_syncobj(struct iris_screen *screen)
{
- struct iris_syncpt *syncpt = malloc(sizeof(*syncpt));
+ struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
- if (!syncpt)
+ if (!syncobj)
return NULL;
- syncpt->handle = gem_syncobj_create(screen->fd, 0);
- assert(syncpt->handle);
+ syncobj->handle = gem_syncobj_create(screen->fd, 0);
+ assert(syncobj->handle);
- pipe_reference_init(&syncpt->ref, 1);
+ pipe_reference_init(&syncobj->ref, 1);
- return syncpt;
+ return syncobj;
}
void
-iris_syncpt_destroy(struct iris_screen *screen, struct iris_syncpt *syncpt)
+iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
{
- gem_syncobj_destroy(screen->fd, syncpt->handle);
- free(syncpt);
+ gem_syncobj_destroy(screen->fd, syncobj->handle);
+ free(syncobj);
}
/**
* \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
*/
void
-iris_batch_add_syncpt(struct iris_batch *batch,
- struct iris_syncpt *syncpt,
- unsigned flags)
+iris_batch_add_syncobj(struct iris_batch *batch,
+ struct iris_syncobj *syncobj,
+ unsigned flags)
{
struct drm_i915_gem_exec_fence *fence =
- util_dynarray_grow(&batch->exec_fences, sizeof(*fence));
+ util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
*fence = (struct drm_i915_gem_exec_fence) {
- .handle = syncpt->handle,
+ .handle = syncobj->handle,
.flags = flags,
};
- struct iris_syncpt **store =
- util_dynarray_grow(&batch->syncpts, sizeof(*store));
+ struct iris_syncobj **store =
+ util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
*store = NULL;
- iris_syncpt_reference(batch->screen, store, syncpt);
+ iris_syncobj_reference(batch->screen, store, syncobj);
}
/* ------------------------------------------------------------------- */
struct pipe_fence_handle {
struct pipe_reference ref;
- struct iris_syncpt *syncpt[IRIS_BATCH_COUNT];
+ struct iris_seqno *seqno[IRIS_BATCH_COUNT];
unsigned count;
};
struct iris_screen *screen = (struct iris_screen *)p_screen;
for (unsigned i = 0; i < fence->count; i++)
- iris_syncpt_reference(screen, &fence->syncpt[i], NULL);
+ iris_seqno_reference(screen, &fence->seqno[i], NULL);
free(fence);
}
struct pipe_fence_handle **dst,
struct pipe_fence_handle *src)
{
- if (pipe_reference(&(*dst)->ref, &src->ref))
+ if (pipe_reference(*dst ? &(*dst)->ref : NULL,
+ src ? &src->ref : NULL))
iris_fence_destroy(p_screen, *dst);
*dst = src;
}
-static bool
-check_syncpt(struct pipe_screen *p_screen,
- struct iris_syncpt *syncpt)
+bool
+iris_wait_syncobj(struct pipe_screen *p_screen,
+ struct iris_syncobj *syncobj,
+ int64_t timeout_nsec)
{
- if (!syncpt)
+ if (!syncobj)
return false;
struct iris_screen *screen = (struct iris_screen *)p_screen;
struct drm_syncobj_wait args = {
- .handles = (uintptr_t)&syncpt->handle,
+ .handles = (uintptr_t)&syncobj->handle,
.count_handles = 1,
+ .timeout_nsec = timeout_nsec,
};
- return drm_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
+ return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
}
+#define CSI "\e["
+#define BLUE_HEADER CSI "0;97;44m"
+#define NORMAL CSI "0m"
+
static void
iris_fence_flush(struct pipe_context *ctx,
struct pipe_fence_handle **out_fence,
{
struct iris_screen *screen = (void *) ctx->screen;
struct iris_context *ice = (struct iris_context *)ctx;
- struct iris_batch *batch[IRIS_BATCH_COUNT] = {
- &ice->render_batch,
- &ice->compute_batch,
- };
+
+ if (flags & PIPE_FLUSH_END_OF_FRAME) {
+ ice->frame++;
+
+ if (INTEL_DEBUG & DEBUG_SUBMIT) {
+ fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
+ (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
+ ice->frame, ctx, ' ',
+ (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
+ }
+ }
/* XXX PIPE_FLUSH_DEFERRED */
- for (unsigned i = 0; i < ARRAY_SIZE(batch); i++)
- iris_batch_flush(batch[i]);
+ for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
+ iris_batch_flush(&ice->batches[i]);
if (!out_fence)
return;
pipe_reference_init(&fence->ref, 1);
- for (unsigned b = 0; b < ARRAY_SIZE(batch); b++) {
- if (!check_syncpt(ctx->screen, batch[b]->last_syncpt))
+ for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
+ struct iris_batch *batch = &ice->batches[b];
+
+ if (iris_seqno_signaled(batch->last_seqno))
continue;
- iris_syncpt_reference(screen, &fence->syncpt[fence->count++],
- batch[b]->last_syncpt);
+ iris_seqno_reference(screen,
+ &fence->seqno[fence->count++],
+ batch->last_seqno);
}
+
+ iris_fence_reference(ctx->screen, out_fence, NULL);
*out_fence = fence;
}
struct pipe_fence_handle *fence)
{
struct iris_context *ice = (struct iris_context *)ctx;
- struct iris_batch *batch[IRIS_BATCH_COUNT] = {
- &ice->render_batch,
- &ice->compute_batch,
- };
- for (unsigned b = 0; b < ARRAY_SIZE(batch); b++) {
+
+ for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
+ struct iris_batch *batch = &ice->batches[b];
+
for (unsigned i = 0; i < fence->count; i++) {
- iris_batch_add_syncpt(batch[b], fence->syncpt[i],
- I915_EXEC_FENCE_WAIT);
+ struct iris_seqno *seqno = fence->seqno[i];
+
+ if (iris_seqno_signaled(seqno))
+ continue;
+
+ iris_batch_add_syncobj(batch, seqno->syncobj, I915_EXEC_FENCE_WAIT);
}
}
}
#define MSEC_PER_SEC (1000)
static uint64_t
-rel2abs(uint64_t timeout)
+gettime_ns(void)
{
- struct timespec ts;
- uint64_t now;
+ struct timespec current;
+ clock_gettime(CLOCK_MONOTONIC, ¤t);
+ return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
+}
- if (!timeout)
+static uint64_t
+rel2abs(uint64_t timeout)
+{
+ if (timeout == 0)
return 0;
- if (timeout == PIPE_TIMEOUT_INFINITE)
- return INT64_MAX;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
- now = ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+ uint64_t current_time = gettime_ns();
+ uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
- if (now > INT64_MAX - timeout)
- return INT64_MAX;
+ timeout = MIN2(max_timeout, timeout);
- return now + timeout;
+ return current_time + timeout;
}
-static boolean
+static bool
iris_fence_finish(struct pipe_screen *p_screen,
struct pipe_context *ctx,
struct pipe_fence_handle *fence,
if (!fence->count)
return true;
- uint32_t handles[ARRAY_SIZE(fence->syncpt)];
- for (unsigned i = 0; i < fence->count; i++)
- handles[i] = fence->syncpt[i]->handle;
+ unsigned int handle_count = 0;
+ uint32_t handles[ARRAY_SIZE(fence->seqno)];
+ for (unsigned i = 0; i < fence->count; i++) {
+ struct iris_seqno *seqno = fence->seqno[i];
+
+ if (iris_seqno_signaled(seqno))
+ continue;
+
+ handles[handle_count++] = seqno->syncobj->handle;
+ }
+
+ if (handle_count == 0)
+ return true;
struct drm_syncobj_wait args = {
.handles = (uintptr_t)handles,
- .count_handles = fence->count,
- .timeout_nsec = rel2abs(timeout), /* XXX */
+ .count_handles = handle_count,
+ .timeout_nsec = rel2abs(timeout),
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
};
- return drm_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
+ return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
}
-#ifndef SYNC_IOC_MAGIC
-/* duplicated from linux/sync_file.h to avoid build-time dependency
- * on new (v4.7) kernel headers. Once distro's are mostly using
- * something newer than v4.7 drop this and #include <linux/sync_file.h>
- * instead.
- */
-struct sync_merge_data {
- char name[32];
- __s32 fd2;
- __s32 fence;
- __u32 flags;
- __u32 pad;
-};
-
-#define SYNC_IOC_MAGIC '>'
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
-#endif
-
static int
sync_merge_fd(int sync_fd, int new_fd)
{
.fence = -1,
};
- drm_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
+ gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
close(new_fd);
close(sync_fd);
struct iris_screen *screen = (struct iris_screen *)p_screen;
int fd = -1;
+ if (fence->count == 0) {
+ /* Our fence has no syncobj's recorded. This means that all of the
+ * batches had already completed, their syncobj's had been signalled,
+ * and so we didn't bother to record them. But we're being asked to
+ * export such a fence. So export a dummy already-signalled syncobj.
+ */
+ struct drm_syncobj_handle args = {
+ .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
+ };
+
+ args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
+ gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
+ gem_syncobj_destroy(screen->fd, args.handle);
+ return args.fd;
+ }
+
for (unsigned i = 0; i < fence->count; i++) {
+ struct iris_seqno *seqno = fence->seqno[i];
+
+ if (iris_seqno_signaled(seqno))
+ continue;
+
struct drm_syncobj_handle args = {
- .handle = fence->syncpt[i]->handle,
+ .handle = seqno->syncobj->handle,
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
.fd = -1,
};
- drm_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
+ gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
fd = sync_merge_fd(fd, args.fd);
}
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
struct drm_syncobj_handle args = {
+ .handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED),
.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
.fd = fd,
};
- drm_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
+ if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
+ fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
+ strerror(errno));
+ gem_syncobj_destroy(screen->fd, args.handle);
+ *out = NULL;
+ return;
+ }
- struct iris_syncpt *syncpt = malloc(sizeof(*syncpt));
- syncpt->handle = args.handle;
- pipe_reference_init(&syncpt->ref, 1);
+ struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
+ if (!syncobj) {
+ *out = NULL;
+ return;
+ }
+ syncobj->handle = args.handle;
+ pipe_reference_init(&syncobj->ref, 1);
+
+ struct iris_seqno *seqno = malloc(sizeof(*seqno));
+ if (!seqno) {
+ free(syncobj);
+ *out = NULL;
+ return;
+ }
+
+ static const uint32_t zero = 0;
+
+ /* Fences work in terms of iris_seqno, but we don't actually have a
+ * seqno for an imported fence. So, create a fake one which always
+ * returns as 'not signaled' so we fall back to using the sync object.
+ */
+ seqno->seqno = UINT32_MAX;
+ seqno->map = &zero;
+ seqno->syncobj = syncobj;
+ seqno->flags = IRIS_SEQNO_END;
+ pipe_reference_init(&seqno->reference, 1);
struct pipe_fence_handle *fence = malloc(sizeof(*fence));
+ if (!fence) {
+ free(seqno);
+ free(syncobj);
+ *out = NULL;
+ return;
+ }
pipe_reference_init(&fence->ref, 1);
- fence->syncpt[0] = syncpt;
+ fence->seqno[0] = seqno;
fence->count = 1;
*out = fence;