#include "intel/common/gen_gem.h"
#include "util/hash_table.h"
#include "util/set.h"
+#include "util/u_upload_mgr.h"
#include "main/macros.h"
#include <errno.h>
uint64_t flags = batch->validation_list[i].flags;
assert(batch->validation_list[i].handle ==
batch->exec_bos[i]->gem_handle);
- fprintf(stderr, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64"B)\t %2d refs %s\n",
+ fprintf(stderr, "[%2d]: %2d %-14s @ 0x%"PRIx64" (%"PRIu64"B)\t %2d refs %s\n",
i,
batch->validation_list[i].handle,
batch->exec_bos[i]->name,
- batch->validation_list[i].offset,
+ (uint64_t)batch->validation_list[i].offset,
batch->exec_bos[i]->size,
batch->exec_bos[i]->refcount,
(flags & EXEC_OBJECT_WRITE) ? " (write)" : "");
batch->state_sizes = ice->state.sizes;
batch->name = name;
+ batch->fine_fences.uploader =
+ u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, 0);
+ iris_fine_fence_init(batch);
+
batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
assert(batch->hw_ctx_id);
batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
- batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
memset(batch->other_batches, 0, sizeof(batch->other_batches));
void
iris_use_pinned_bo(struct iris_batch *batch,
struct iris_bo *bo,
- bool writable)
+ bool writable, enum iris_domain access)
{
assert(bo->kflags & EXEC_OBJECT_PINNED);
if (bo == batch->screen->workaround_bo)
writable = false;
+ if (access < NUM_IRIS_DOMAINS) {
+ assert(batch->sync_region_depth);
+ iris_bo_bump_seqno(bo, batch->next_seqno, access);
+ }
+
struct drm_i915_gem_exec_object2 *existing_entry =
find_validation_entry(batch, bo);
if (other_entry &&
((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {
iris_batch_flush(batch->other_batches[b]);
- iris_batch_add_syncobj(batch, batch->other_batches[b]->last_syncobj,
+ iris_batch_add_syncobj(batch,
+ batch->other_batches[b]->last_fence->syncobj,
I915_EXEC_FENCE_WAIT);
}
}
batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
batch->map_next = batch->map;
- iris_use_pinned_bo(batch, batch->bo, false);
+ iris_use_pinned_bo(batch, batch->bo, false, IRIS_DOMAIN_NONE);
}
static void
iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
iris_syncobj_reference(screen, &syncobj, NULL);
- iris_cache_sets_clear(batch);
+ assert(!batch->sync_region_depth);
+ iris_batch_sync_boundary(batch);
+ iris_batch_mark_reset_sync(batch);
+
+ /* Always add the workaround BO, it contains a driver identifier at the
+ * beginning quite helpful to debug error states.
+ */
+ iris_use_pinned_bo(batch, screen->workaround_bo, false, IRIS_DOMAIN_NONE);
iris_batch_maybe_noop(batch);
}
ralloc_free(batch->exec_fences.mem_ctx);
+ pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
+
util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
iris_syncobj_reference(screen, s, NULL);
ralloc_free(batch->syncobjs.mem_ctx);
- iris_syncobj_reference(screen, &batch->last_syncobj, NULL);
+ iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
+ u_upload_destroy(batch->fine_fences.uploader);
iris_bo_unreference(batch->bo);
batch->bo = NULL;
iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
_mesa_hash_table_destroy(batch->cache.render, NULL);
- _mesa_set_destroy(batch->cache.depth, NULL);
if (unlikely(INTEL_DEBUG))
gen_batch_decode_ctx_finish(&batch->decoder);
}
}
+static void
+finish_seqno(struct iris_batch *batch)
+{
+ struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
+ if (!sq)
+ return;
+
+ iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
+ iris_fine_fence_reference(batch->screen, &sq, NULL);
+}
+
/**
* Terminate a batch with MI_BATCH_BUFFER_END.
*/
{
add_aux_map_bos_to_batch(batch);
+ finish_seqno(batch);
+
/* Emit MI_BATCH_BUFFER_END to finish our batch. */
uint32_t *map = batch->map_next;
/**
* Flush the batch buffer, submitting it to the GPU and resetting it so
* we're ready to emit the next batch.
- *
- * \param in_fence_fd is ignored if -1. Otherwise, this function takes
- * ownership of the fd.
- *
- * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
- * take ownership of the returned fd.
*/
void
_iris_batch_flush(struct iris_batch *batch, const char *file, int line)
batch->exec_count = 0;
batch->aperture_space = 0;
- struct iris_syncobj *syncobj =
- ((struct iris_syncobj **) util_dynarray_begin(&batch->syncobjs))[0];
- iris_syncobj_reference(screen, &batch->last_syncobj, syncobj);
-
util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
iris_syncobj_reference(screen, s, NULL);
util_dynarray_clear(&batch->syncobjs);
*/
if (ret == -EIO && replace_hw_ctx(batch)) {
if (batch->reset->reset) {
- /* Tell the state tracker the device is lost and it was our fault. */
+ /* Tell gallium frontends the device is lost and it was our fault. */
batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);
}
}
/**
- * Updates the state of the noop feature.
+ * Updates the state of the noop feature. Returns true if there was a noop
+ * transition that led to state invalidation.
*/
-uint64_t
-iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dirty_flags)
+bool
+iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)
{
if (batch->noop_enabled == noop_enable)
return 0;
/* We only need to update the entire state if we transition from noop ->
* not-noop.
*/
- return !batch->noop_enabled ? dirty_flags : 0;
+ return !batch->noop_enabled;
}