#include "intel/common/gen_gem.h"
#include "util/hash_table.h"
#include "util/set.h"
+#include "util/u_upload_mgr.h"
#include "main/macros.h"
#include <errno.h>
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
-/* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
- * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
- * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
- */
-#define BATCH_RESERVED 16
-
static void
iris_batch_reset(struct iris_batch *batch);
uint64_t flags = batch->validation_list[i].flags;
assert(batch->validation_list[i].handle ==
batch->exec_bos[i]->gem_handle);
- fprintf(stderr, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64"B)\t %2d refs %s\n",
+ fprintf(stderr, "[%2d]: %2d %-14s @ 0x%"PRIx64" (%"PRIu64"B)\t %2d refs %s\n",
i,
batch->validation_list[i].handle,
batch->exec_bos[i]->name,
- batch->validation_list[i].offset,
+ (uint64_t)batch->validation_list[i].offset,
batch->exec_bos[i]->size,
batch->exec_bos[i]->refcount,
(flags & EXEC_OBJECT_WRITE) ? " (write)" : "");
}
void
-iris_init_batch(struct iris_batch *batch,
- struct iris_screen *screen,
- struct iris_vtable *vtbl,
- struct pipe_debug_callback *dbg,
- struct pipe_device_reset_callback *reset,
- struct hash_table_u64 *state_sizes,
- struct iris_batch *all_batches,
+iris_init_batch(struct iris_context *ice,
enum iris_batch_name name,
int priority)
{
+ struct iris_batch *batch = &ice->batches[name];
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+
batch->screen = screen;
- batch->vtbl = vtbl;
- batch->dbg = dbg;
- batch->reset = reset;
- batch->state_sizes = state_sizes;
+ batch->dbg = &ice->dbg;
+ batch->reset = &ice->reset;
+ batch->state_sizes = ice->state.sizes;
batch->name = name;
+ batch->fine_fences.uploader =
+ u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, 0);
+ iris_fine_fence_init(batch);
+
batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
assert(batch->hw_ctx_id);
iris_hw_context_set_priority(screen->bufmgr, batch->hw_ctx_id, priority);
util_dynarray_init(&batch->exec_fences, ralloc_context(NULL));
- util_dynarray_init(&batch->syncpts, ralloc_context(NULL));
+ util_dynarray_init(&batch->syncobjs, ralloc_context(NULL));
batch->exec_count = 0;
batch->exec_array_size = 100;
batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
- batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
memset(batch->other_batches, 0, sizeof(batch->other_batches));
for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {
- if (&all_batches[i] != batch)
- batch->other_batches[j++] = &all_batches[i];
+ if (i != name)
+ batch->other_batches[j++] = &ice->batches[i];
}
if (unlikely(INTEL_DEBUG)) {
void
iris_use_pinned_bo(struct iris_batch *batch,
struct iris_bo *bo,
- bool writable)
+ bool writable, enum iris_domain access)
{
assert(bo->kflags & EXEC_OBJECT_PINNED);
if (bo == batch->screen->workaround_bo)
writable = false;
+ if (access < NUM_IRIS_DOMAINS) {
+ assert(batch->sync_region_depth);
+ iris_bo_bump_seqno(bo, batch->next_seqno, access);
+ }
+
struct drm_i915_gem_exec_object2 *existing_entry =
find_validation_entry(batch, bo);
if (other_entry &&
((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {
iris_batch_flush(batch->other_batches[b]);
- iris_batch_add_syncpt(batch, batch->other_batches[b]->last_syncpt,
- I915_EXEC_FENCE_WAIT);
+ iris_batch_add_syncobj(batch,
+ batch->other_batches[b]->last_fence->syncobj,
+ I915_EXEC_FENCE_WAIT);
}
}
}
batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
batch->map_next = batch->map;
- iris_use_pinned_bo(batch, batch->bo, false);
+ iris_use_pinned_bo(batch, batch->bo, false, IRIS_DOMAIN_NONE);
}
static void
create_batch(batch);
assert(batch->bo->index == 0);
- struct iris_syncpt *syncpt = iris_create_syncpt(screen);
- iris_batch_add_syncpt(batch, syncpt, I915_EXEC_FENCE_SIGNAL);
- iris_syncpt_reference(screen, &syncpt, NULL);
+ struct iris_syncobj *syncobj = iris_create_syncobj(screen);
+ iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
+ iris_syncobj_reference(screen, &syncobj, NULL);
- iris_cache_sets_clear(batch);
+ assert(!batch->sync_region_depth);
+ iris_batch_sync_boundary(batch);
+ iris_batch_mark_reset_sync(batch);
+
+ /* Always add the workaround BO, it contains a driver identifier at the
+ * beginning quite helpful to debug error states.
+ */
+ iris_use_pinned_bo(batch, screen->workaround_bo, false, IRIS_DOMAIN_NONE);
iris_batch_maybe_noop(batch);
}
ralloc_free(batch->exec_fences.mem_ctx);
- util_dynarray_foreach(&batch->syncpts, struct iris_syncpt *, s)
- iris_syncpt_reference(screen, s, NULL);
- ralloc_free(batch->syncpts.mem_ctx);
+ pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
+
+ util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
+ iris_syncobj_reference(screen, s, NULL);
+ ralloc_free(batch->syncobjs.mem_ctx);
- iris_syncpt_reference(screen, &batch->last_syncpt, NULL);
+ iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
+ u_upload_destroy(batch->fine_fences.uploader);
iris_bo_unreference(batch->bo);
batch->bo = NULL;
iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
_mesa_hash_table_destroy(batch->cache.render, NULL);
- _mesa_set_destroy(batch->cache.depth, NULL);
if (unlikely(INTEL_DEBUG))
gen_batch_decode_ctx_finish(&batch->decoder);
}
}
+static void
+finish_seqno(struct iris_batch *batch)
+{
+ struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
+ if (!sq)
+ return;
+
+ iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
+ iris_fine_fence_reference(batch->screen, &sq, NULL);
+}
+
/**
* Terminate a batch with MI_BATCH_BUFFER_END.
*/
{
add_aux_map_bos_to_batch(batch);
+ finish_seqno(batch);
+
/* Emit MI_BATCH_BUFFER_END to finish our batch. */
uint32_t *map = batch->map_next;
/**
* Flush the batch buffer, submitting it to the GPU and resetting it so
* we're ready to emit the next batch.
- *
- * \param in_fence_fd is ignored if -1. Otherwise, this function takes
- * ownership of the fd.
- *
- * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
- * take ownership of the returned fd.
*/
void
_iris_batch_flush(struct iris_batch *batch, const char *file, int line)
if (unlikely(INTEL_DEBUG &
(DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL))) {
+ const char *basefile = strstr(file, "iris/");
+ if (basefile)
+ file = basefile + 5;
+
fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5db (%0.1f%%) "
"(cmds), %4d BOs (%0.1fMb aperture)\n",
file, line, batch_name_to_string(batch->name), batch->hw_ctx_id,
batch->exec_count = 0;
batch->aperture_space = 0;
- struct iris_syncpt *syncpt =
- ((struct iris_syncpt **) util_dynarray_begin(&batch->syncpts))[0];
- iris_syncpt_reference(screen, &batch->last_syncpt, syncpt);
-
- util_dynarray_foreach(&batch->syncpts, struct iris_syncpt *, s)
- iris_syncpt_reference(screen, s, NULL);
- util_dynarray_clear(&batch->syncpts);
+ util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
+ iris_syncobj_reference(screen, s, NULL);
+ util_dynarray_clear(&batch->syncobjs);
util_dynarray_clear(&batch->exec_fences);
*/
if (ret == -EIO && replace_hw_ctx(batch)) {
if (batch->reset->reset) {
- /* Tell the state tracker the device is lost and it was our fault. */
+ /* Tell gallium frontends the device is lost and it was our fault. */
batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);
}
}
/**
- * Updates the state of the noop feature.
+ * Updates the state of the noop feature. Returns true if there was a noop
+ * transition that led to state invalidation.
*/
-uint64_t
-iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dirty_flags)
+bool
+iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)
{
if (batch->noop_enabled == noop_enable)
return 0;
/* We only need to update the entire state if we transition from noop ->
* not-noop.
*/
- return !batch->noop_enabled ? dirty_flags : 0;
+ return !batch->noop_enabled;
}