iris_draw.c \
iris_fence.c \
iris_fence.h \
+ iris_fine_fence.c \
+ iris_fine_fence.h
iris_formats.c \
iris_genx_macros.h \
iris_genx_protos.h \
iris_resource.h \
iris_screen.c \
iris_screen.h \
- iris_seqno.c \
- iris_seqno.h
batch->state_sizes = ice->state.sizes;
batch->name = name;
- batch->seqno.uploader =
+ batch->fine_fences.uploader =
u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, 0);
- iris_seqno_init(batch);
+ iris_fine_fence_init(batch);
batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
assert(batch->hw_ctx_id);
((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {
iris_batch_flush(batch->other_batches[b]);
iris_batch_add_syncobj(batch,
- batch->other_batches[b]->last_seqno->syncobj,
+ batch->other_batches[b]->last_fence->syncobj,
I915_EXEC_FENCE_WAIT);
}
}
ralloc_free(batch->exec_fences.mem_ctx);
- pipe_resource_reference(&batch->seqno.ref.res, NULL);
+ pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
iris_syncobj_reference(screen, s, NULL);
ralloc_free(batch->syncobjs.mem_ctx);
- iris_seqno_reference(batch->screen, &batch->last_seqno, NULL);
- u_upload_destroy(batch->seqno.uploader);
+ iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
+ u_upload_destroy(batch->fine_fences.uploader);
iris_bo_unreference(batch->bo);
batch->bo = NULL;
static void
finish_seqno(struct iris_batch *batch)
{
- struct iris_seqno *sq = iris_seqno_new(batch, IRIS_SEQNO_END);
+ struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
if (!sq)
return;
- iris_seqno_reference(batch->screen, &batch->last_seqno, sq);
- iris_seqno_reference(batch->screen, &sq, NULL);
+ iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
+ iris_fine_fence_reference(batch->screen, &sq, NULL);
}
/**
#include "common/gen_decoder.h"
#include "iris_fence.h"
-#include "iris_seqno.h"
+#include "iris_fine_fence.h"
struct iris_context;
/** The sequence number to write the next time we add a fence. */
uint32_t next;
- } seqno;
+ } fine_fences;
/** A seqno (and syncobj) for the last batch that was submitted. */
- struct iris_seqno *last_seqno;
+ struct iris_fine_fence *last_fence;
/** List of other batches which we might need to flush to use a BO */
struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1];
struct pipe_context *unflushed_ctx;
- struct iris_seqno *seqno[IRIS_BATCH_COUNT];
+ struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
};
static void
{
struct iris_screen *screen = (struct iris_screen *)p_screen;
- for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++)
- iris_seqno_reference(screen, &fence->seqno[i], NULL);
+ for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
+ iris_fine_fence_reference(screen, &fence->fine[i], NULL);
free(fence);
}
struct iris_batch *batch = &ice->batches[b];
if (deferred && iris_batch_bytes_used(batch) > 0) {
- struct iris_seqno *seqno =
- iris_seqno_new(batch, IRIS_SEQNO_BOTTOM_OF_PIPE);
- iris_seqno_reference(screen, &fence->seqno[b], seqno);
- iris_seqno_reference(screen, &seqno, NULL);
+ struct iris_fine_fence *fine =
+ iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
+ iris_fine_fence_reference(screen, &fence->fine[b], fine);
+ iris_fine_fence_reference(screen, &fine, NULL);
} else {
/* This batch has no commands queued up (perhaps we just flushed,
* or all the commands are on the other batch). Wait for the last
* syncobj on this engine - unless it's already finished by now.
*/
- if (iris_seqno_signaled(batch->last_seqno))
+ if (iris_fine_fence_signaled(batch->last_fence))
continue;
- iris_seqno_reference(screen, &fence->seqno[b], batch->last_seqno);
+ iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
}
}
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
struct iris_batch *batch = &ice->batches[b];
- for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) {
- struct iris_seqno *seqno = fence->seqno[i];
+ for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
+ struct iris_fine_fence *fine = fence->fine[i];
- if (iris_seqno_signaled(seqno))
+ if (iris_fine_fence_signaled(fine))
continue;
iris_batch_flush(batch);
- iris_batch_add_syncobj(batch, seqno->syncobj, I915_EXEC_FENCE_WAIT);
+ iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
}
}
}
*/
if (ctx && ctx == fence->unflushed_ctx) {
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
- struct iris_seqno *seqno = fence->seqno[i];
+ struct iris_fine_fence *fine = fence->fine[i];
- if (iris_seqno_signaled(seqno))
+ if (iris_fine_fence_signaled(fine))
continue;
- if (seqno->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
+ if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
iris_batch_flush(&ice->batches[i]);
}
}
unsigned int handle_count = 0;
- uint32_t handles[ARRAY_SIZE(fence->seqno)];
- for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) {
- struct iris_seqno *seqno = fence->seqno[i];
+ uint32_t handles[ARRAY_SIZE(fence->fine)];
+ for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
+ struct iris_fine_fence *fine = fence->fine[i];
- if (iris_seqno_signaled(seqno))
+ if (iris_fine_fence_signaled(fine))
continue;
- handles[handle_count++] = seqno->syncobj->handle;
+ handles[handle_count++] = fine->syncobj->handle;
}
if (handle_count == 0)
if (fence->unflushed_ctx)
return -1;
- for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) {
- struct iris_seqno *seqno = fence->seqno[i];
+ for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
+ struct iris_fine_fence *fine = fence->fine[i];
- if (iris_seqno_signaled(seqno))
+ if (iris_fine_fence_signaled(fine))
continue;
struct drm_syncobj_handle args = {
- .handle = seqno->syncobj->handle,
+ .handle = fine->syncobj->handle,
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
.fd = -1,
};
syncobj->handle = args.handle;
pipe_reference_init(&syncobj->ref, 1);
- struct iris_seqno *seqno = calloc(1, sizeof(*seqno));
- if (!seqno) {
+ struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
+ if (!fine) {
free(syncobj);
*out = NULL;
return;
static const uint32_t zero = 0;
- /* Fences work in terms of iris_seqno, but we don't actually have a
+ /* Fences work in terms of iris_fine_fence, but we don't actually have a
* seqno for an imported fence. So, create a fake one which always
* returns as 'not signaled' so we fall back to using the sync object.
*/
- seqno->seqno = UINT32_MAX;
- seqno->map = &zero;
- seqno->syncobj = syncobj;
- seqno->flags = IRIS_SEQNO_END;
- pipe_reference_init(&seqno->reference, 1);
+ fine->seqno = UINT32_MAX;
+ fine->map = &zero;
+ fine->syncobj = syncobj;
+ fine->flags = IRIS_FENCE_END;
+ pipe_reference_init(&fine->reference, 1);
struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
if (!fence) {
- free(seqno);
+ free(fine);
free(syncobj);
*out = NULL;
return;
}
pipe_reference_init(&fence->ref, 1);
- fence->seqno[0] = seqno;
+ fence->fine[0] = fine;
*out = fence;
}
--- /dev/null
+#include "iris_context.h"
+#include "iris_fine_fence.h"
+#include "util/u_upload_mgr.h"
+
+static void
+iris_fine_fence_reset(struct iris_batch *batch)
+{
+ u_upload_alloc(batch->fine_fences.uploader,
+ 0, sizeof(uint64_t), sizeof(uint64_t),
+ &batch->fine_fences.ref.offset, &batch->fine_fences.ref.res,
+ (void **)&batch->fine_fences.map);
+ WRITE_ONCE(*batch->fine_fences.map, 0);
+ batch->fine_fences.next++;
+}
+
+void
+iris_fine_fence_init(struct iris_batch *batch)
+{
+ batch->fine_fences.ref.res = NULL;
+ batch->fine_fences.next = 0;
+ iris_fine_fence_reset(batch);
+}
+
+static uint32_t
+iris_fine_fence_next(struct iris_batch *batch)
+{
+ uint32_t seqno = batch->fine_fences.next++;
+
+ if (batch->fine_fences.next == 0)
+ iris_fine_fence_reset(batch);
+
+ return seqno;
+}
+
+void
+iris_fine_fence_destroy(struct iris_screen *screen,
+ struct iris_fine_fence *fine)
+{
+ iris_syncobj_reference(screen, &fine->syncobj, NULL);
+ pipe_resource_reference(&fine->ref.res, NULL);
+ free(fine);
+}
+
+struct iris_fine_fence *
+iris_fine_fence_new(struct iris_batch *batch, unsigned flags)
+{
+ struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
+ if (!fine)
+ return NULL;
+
+ pipe_reference_init(&fine->reference, 1);
+
+ fine->seqno = iris_fine_fence_next(batch);
+
+ iris_syncobj_reference(batch->screen, &fine->syncobj,
+ iris_batch_get_signal_syncobj(batch));
+
+ pipe_resource_reference(&fine->ref.res, batch->fine_fences.ref.res);
+ fine->ref.offset = batch->fine_fences.ref.offset;
+ fine->map = batch->fine_fences.map;
+ fine->flags = flags;
+
+ unsigned pc;
+ if (flags & IRIS_FENCE_TOP_OF_PIPE) {
+ pc = PIPE_CONTROL_WRITE_IMMEDIATE | PIPE_CONTROL_CS_STALL;
+ } else {
+ pc = PIPE_CONTROL_WRITE_IMMEDIATE |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DATA_CACHE_FLUSH;
+ }
+ iris_emit_pipe_control_write(batch, "fence: fine", pc,
+ iris_resource_bo(fine->ref.res),
+ fine->ref.offset,
+ fine->seqno);
+
+ return fine;
+}
--- /dev/null
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef IRIS_FINE_FENCE_DOT_H
+#define IRIS_FINE_FENCE_DOT_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "iris_screen.h"
+#include "iris_resource.h"
+
+/**
+ * A lightweight sequence number fence.
+ *
+ * We emit PIPE_CONTROLs inside a batch (possibly in the middle)
+ * which update a monotonically increasing, 32-bit counter. We
+ * can then check if that moment has passed by either:
+ *
+ * 1. Checking on the CPU by snooping on the DWord via a coherent map
+ *
+ * 2. Blocking on the GPU with MI_SEMAPHORE_WAIT from a second batch
+ * (relying on mid-batch preemption to switch GPU execution to the
+ * batch that writes it).
+ */
+struct iris_fine_fence {
+ struct pipe_reference reference;
+
+ /** Buffer where the seqno lives */
+ struct iris_state_ref ref;
+
+ /** Coherent CPU map of the buffer containing the seqno DWord. */
+ const uint32_t *map;
+
+ /**
+ * A drm_syncobj pointing which will be signaled at the end of the
+ * batch which writes this seqno. This can be used to block until
+ * the seqno has definitely passed (but may wait longer than necessary).
+ */
+ struct iris_syncobj *syncobj;
+
+#define IRIS_FENCE_BOTTOM_OF_PIPE 0x0 /**< Written by bottom-of-pipe flush */
+#define IRIS_FENCE_TOP_OF_PIPE 0x1 /**< Written by top-of-pipe flush */
+#define IRIS_FENCE_END 0x2 /**< Written at the end of a batch */
+
+ /** Information about the type of flush involved (see IRIS_FENCE_*) */
+ uint32_t flags;
+
+ /**
+ * Sequence number expected to be written by the flush we inserted
+ * when creating this fence. The iris_fine_fence is 'signaled' when *@map
+ * (written by the flush on the GPU) is greater-than-or-equal to @seqno.
+ */
+ uint32_t seqno;
+};
+
+void iris_fine_fence_init(struct iris_batch *batch);
+
+struct iris_fine_fence *iris_fine_fence_new(struct iris_batch *batch, unsigned flags);
+
+void iris_fine_fence_destroy(struct iris_screen *screen, struct iris_fine_fence *sq);
+
+static inline void
+iris_fine_fence_reference(struct iris_screen *screen,
+ struct iris_fine_fence **dst,
+ struct iris_fine_fence *src)
+{
+ if (pipe_reference(&(*dst)->reference, &src->reference))
+ iris_fine_fence_destroy(screen, *dst);
+
+ *dst = src;
+}
+
+/**
+ * Return true if this seqno has passed.
+ *
+ * NULL is considered signaled.
+ */
+static inline bool
+iris_fine_fence_signaled(const struct iris_fine_fence *sq)
+{
+ return !sq || (READ_ONCE(*sq->map) >= sq->seqno);
+}
+
+#endif
+++ /dev/null
-#include "iris_context.h"
-#include "iris_seqno.h"
-#include "util/u_upload_mgr.h"
-
-static void
-iris_seqno_reset(struct iris_batch *batch)
-{
- u_upload_alloc(batch->seqno.uploader, 0, sizeof(uint64_t), sizeof(uint64_t),
- &batch->seqno.ref.offset, &batch->seqno.ref.res,
- (void **)&batch->seqno.map);
- WRITE_ONCE(*batch->seqno.map, 0);
- batch->seqno.next++;
-}
-
-void
-iris_seqno_init(struct iris_batch *batch)
-{
- batch->seqno.ref.res = NULL;
- batch->seqno.next = 0;
- iris_seqno_reset(batch);
-}
-
-static uint32_t
-iris_seqno_next(struct iris_batch *batch)
-{
- uint32_t seqno = batch->seqno.next++;
-
- if (batch->seqno.next == 0)
- iris_seqno_reset(batch);
-
- return seqno;
-}
-
-void
-iris_seqno_destroy(struct iris_screen *screen, struct iris_seqno *sq)
-{
- iris_syncobj_reference(screen, &sq->syncobj, NULL);
- pipe_resource_reference(&sq->ref.res, NULL);
- free(sq);
-}
-
-struct iris_seqno *
-iris_seqno_new(struct iris_batch *batch, unsigned flags)
-{
- struct iris_seqno *sq = calloc(1, sizeof(*sq));
- if (!sq)
- return NULL;
-
- pipe_reference_init(&sq->reference, 1);
-
- sq->seqno = iris_seqno_next(batch);
-
- iris_syncobj_reference(batch->screen, &sq->syncobj,
- iris_batch_get_signal_syncobj(batch));
-
- pipe_resource_reference(&sq->ref.res, batch->seqno.ref.res);
- sq->ref.offset = batch->seqno.ref.offset;
- sq->map = batch->seqno.map;
- sq->flags = flags;
-
- unsigned pc;
- if (flags & IRIS_SEQNO_TOP_OF_PIPE) {
- pc = PIPE_CONTROL_WRITE_IMMEDIATE | PIPE_CONTROL_CS_STALL;
- } else {
- pc = PIPE_CONTROL_WRITE_IMMEDIATE |
- PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DATA_CACHE_FLUSH;
- }
- iris_emit_pipe_control_write(batch, "fence: seqno", pc,
- iris_resource_bo(sq->ref.res),
- sq->ref.offset,
- sq->seqno);
-
- return sq;
-}
+++ /dev/null
-/*
- * Copyright © 2020 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef IRIS_SEQNO_DOT_H
-#define IRIS_SEQNO_DOT_H
-
-#include <stdbool.h>
-#include <stdint.h>
-
-#include "iris_screen.h"
-#include "iris_resource.h"
-
-/**
- * A lightweight sequence number fence.
- *
- * We emit PIPE_CONTROLs inside a batch (possibly in the middle)
- * which update a monotonically increasing, 32-bit counter. We
- * can then check if that moment has passed by either:
- *
- * 1. Checking on the CPU by snooping on the DWord via a coherent map
- *
- * 2. Blocking on the GPU with MI_SEMAPHORE_WAIT from a second batch
- * (relying on mid-batch preemption to switch GPU execution to the
- * batch that writes it).
- */
-struct iris_seqno {
- struct pipe_reference reference;
-
- /** Buffer where the seqno lives */
- struct iris_state_ref ref;
-
- /** Coherent CPU map of the buffer containing the seqno DWord. */
- const uint32_t *map;
-
- /**
- * A drm_syncobj pointing which will be signaled at the end of the
- * batch which writes this seqno. This can be used to block until
- * the seqno has definitely passed (but may wait longer than necessary).
- */
- struct iris_syncobj *syncobj;
-
-#define IRIS_SEQNO_BOTTOM_OF_PIPE 0x0 /**< Written by bottom-of-pipe flush */
-#define IRIS_SEQNO_TOP_OF_PIPE 0x1 /**< Written by top-of-pipe flush */
-#define IRIS_SEQNO_END 0x2 /**< Written at the end of a batch */
-
- /** Information about the type of flush involved (see IRIS_SEQNO_*) */
- uint32_t flags;
-
- /**
- * Sequence number expected to be written by the flush we inserted
- * when creating this fence. The iris_seqno is 'signaled' when *@map
- * (written by the flush on the GPU) is greater-than-or-equal to @seqno.
- */
- uint32_t seqno;
-};
-
-void iris_seqno_init(struct iris_batch *batch);
-
-struct iris_seqno *iris_seqno_new(struct iris_batch *batch, unsigned flags);
-
-void iris_seqno_destroy(struct iris_screen *screen, struct iris_seqno *sq);
-
-static inline void
-iris_seqno_reference(struct iris_screen *screen,
- struct iris_seqno **dst,
- struct iris_seqno *src)
-{
- if (pipe_reference(&(*dst)->reference, &src->reference))
- iris_seqno_destroy(screen, *dst);
-
- *dst = src;
-}
-
-/**
- * Return true if this seqno has passed.
- *
- * NULL is considered signaled.
- */
-static inline bool
-iris_seqno_signaled(const struct iris_seqno *sq)
-{
- return !sq || (READ_ONCE(*sq->map) >= sq->seqno);
-}
-
-#endif
'iris_draw.c',
'iris_fence.c',
'iris_fence.h',
+ 'iris_fine_fence.c',
+ 'iris_fine_fence.h',
'iris_formats.c',
'iris_genx_macros.h',
'iris_genx_protos.h',
'iris_resource.h',
'iris_screen.c',
'iris_screen.h',
- 'iris_seqno.c',
- 'iris_seqno.h',
'iris_disk_cache.c',
)