iris: Explicitly cast value to uint64_t
[mesa.git] / src / gallium / drivers / iris / iris_batch.c
index 324ae016ae3ef80945ea72981ef7b15d30595178..6d1f6b86cbde942b51cd7589005b149759974566 100644 (file)
@@ -48,6 +48,7 @@
 #include "intel/common/gen_gem.h"
 #include "util/hash_table.h"
 #include "util/set.h"
+#include "util/u_upload_mgr.h"
 #include "main/macros.h"
 
 #include <errno.h>
@@ -104,11 +105,11 @@ dump_validation_list(struct iris_batch *batch)
       uint64_t flags = batch->validation_list[i].flags;
       assert(batch->validation_list[i].handle ==
              batch->exec_bos[i]->gem_handle);
-      fprintf(stderr, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64"B)\t %2d refs %s\n",
+      fprintf(stderr, "[%2d]: %2d %-14s @ 0x%"PRIx64" (%"PRIu64"B)\t %2d refs %s\n",
               i,
               batch->validation_list[i].handle,
               batch->exec_bos[i]->name,
-              batch->validation_list[i].offset,
+              (uint64_t)batch->validation_list[i].offset,
               batch->exec_bos[i]->size,
               batch->exec_bos[i]->refcount,
               (flags & EXEC_OBJECT_WRITE) ? " (write)" : "");
@@ -180,6 +181,11 @@ iris_init_batch(struct iris_context *ice,
    batch->state_sizes = ice->state.sizes;
    batch->name = name;
 
+   batch->fine_fences.uploader =
+      u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
+                      PIPE_USAGE_STAGING, 0);
+   iris_fine_fence_init(batch);
+
    batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
    assert(batch->hw_ctx_id);
 
@@ -197,8 +203,6 @@ iris_init_batch(struct iris_context *ice,
 
    batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
                                                  _mesa_key_pointer_equal);
-   batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
-                                         _mesa_key_pointer_equal);
 
    memset(batch->other_batches, 0, sizeof(batch->other_batches));
 
@@ -265,7 +269,7 @@ ensure_exec_obj_space(struct iris_batch *batch, uint32_t count)
 void
 iris_use_pinned_bo(struct iris_batch *batch,
                    struct iris_bo *bo,
-                   bool writable)
+                   bool writable, enum iris_domain access)
 {
    assert(bo->kflags & EXEC_OBJECT_PINNED);
 
@@ -277,6 +281,11 @@ iris_use_pinned_bo(struct iris_batch *batch,
    if (bo == batch->screen->workaround_bo)
       writable = false;
 
+   if (access < NUM_IRIS_DOMAINS) {
+      assert(batch->sync_region_depth);
+      iris_bo_bump_seqno(bo, batch->next_seqno, access);
+   }
+
    struct drm_i915_gem_exec_object2 *existing_entry =
       find_validation_entry(batch, bo);
 
@@ -313,7 +322,8 @@ iris_use_pinned_bo(struct iris_batch *batch,
          if (other_entry &&
              ((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {
             iris_batch_flush(batch->other_batches[b]);
-            iris_batch_add_syncobj(batch, batch->other_batches[b]->last_syncobj,
+            iris_batch_add_syncobj(batch,
+                                   batch->other_batches[b]->last_fence->syncobj,
                                    I915_EXEC_FENCE_WAIT);
          }
       }
@@ -350,7 +360,7 @@ create_batch(struct iris_batch *batch)
    batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
    batch->map_next = batch->map;
 
-   iris_use_pinned_bo(batch, batch->bo, false);
+   iris_use_pinned_bo(batch, batch->bo, false, IRIS_DOMAIN_NONE);
 }
 
 static void
@@ -389,7 +399,14 @@ iris_batch_reset(struct iris_batch *batch)
    iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
    iris_syncobj_reference(screen, &syncobj, NULL);
 
-   iris_cache_sets_clear(batch);
+   assert(!batch->sync_region_depth);
+   iris_batch_sync_boundary(batch);
+   iris_batch_mark_reset_sync(batch);
+
+   /* Always add the workaround BO, it contains a driver identifier at the
+    * beginning quite helpful to debug error states.
+    */
+   iris_use_pinned_bo(batch, screen->workaround_bo, false, IRIS_DOMAIN_NONE);
 
    iris_batch_maybe_noop(batch);
 }
@@ -408,11 +425,14 @@ iris_batch_free(struct iris_batch *batch)
 
    ralloc_free(batch->exec_fences.mem_ctx);
 
+   pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
+
    util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
       iris_syncobj_reference(screen, s, NULL);
    ralloc_free(batch->syncobjs.mem_ctx);
 
-   iris_syncobj_reference(screen, &batch->last_syncobj, NULL);
+   iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
+   u_upload_destroy(batch->fine_fences.uploader);
 
    iris_bo_unreference(batch->bo);
    batch->bo = NULL;
@@ -422,7 +442,6 @@ iris_batch_free(struct iris_batch *batch)
    iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
 
    _mesa_hash_table_destroy(batch->cache.render, NULL);
-   _mesa_set_destroy(batch->cache.depth, NULL);
 
    if (unlikely(INTEL_DEBUG))
       gen_batch_decode_ctx_finish(&batch->decoder);
@@ -497,6 +516,17 @@ add_aux_map_bos_to_batch(struct iris_batch *batch)
    }
 }
 
+static void
+finish_seqno(struct iris_batch *batch)
+{
+   struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
+   if (!sq)
+      return;
+
+   iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
+   iris_fine_fence_reference(batch->screen, &sq, NULL);
+}
+
 /**
  * Terminate a batch with MI_BATCH_BUFFER_END.
  */
@@ -505,6 +535,8 @@ iris_finish_batch(struct iris_batch *batch)
 {
    add_aux_map_bos_to_batch(batch);
 
+   finish_seqno(batch);
+
    /* Emit MI_BATCH_BUFFER_END to finish our batch. */
    uint32_t *map = batch->map_next;
 
@@ -641,12 +673,6 @@ batch_name_to_string(enum iris_batch_name name)
 /**
  * Flush the batch buffer, submitting it to the GPU and resetting it so
  * we're ready to emit the next batch.
- *
- * \param in_fence_fd is ignored if -1.  Otherwise, this function takes
- * ownership of the fd.
- *
- * \param out_fence_fd is ignored if NULL.  Otherwise, the caller must
- * take ownership of the returned fd.
  */
 void
 _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
@@ -687,10 +713,6 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
    batch->exec_count = 0;
    batch->aperture_space = 0;
 
-   struct iris_syncobj *syncobj =
-      ((struct iris_syncobj **) util_dynarray_begin(&batch->syncobjs))[0];
-   iris_syncobj_reference(screen, &batch->last_syncobj, syncobj);
-
    util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
       iris_syncobj_reference(screen, s, NULL);
    util_dynarray_clear(&batch->syncobjs);
@@ -712,7 +734,7 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
     */
    if (ret == -EIO && replace_hw_ctx(batch)) {
       if (batch->reset->reset) {
-         /* Tell the state tracker the device is lost and it was our fault. */
+         /* Tell gallium frontends the device is lost and it was our fault. */
          batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);
       }
 
@@ -741,10 +763,11 @@ iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
 }
 
 /**
- * Updates the state of the noop feature.
+ * Updates the state of the noop feature.  Returns true if there was a noop
+ * transition that led to state invalidation.
  */
-uint64_t
-iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dirty_flags)
+bool
+iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)
 {
    if (batch->noop_enabled == noop_enable)
       return 0;
@@ -760,5 +783,5 @@ iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dir
    /* We only need to update the entire state if we transition from noop ->
     * not-noop.
     */
-   return !batch->noop_enabled ? dirty_flags : 0;
+   return !batch->noop_enabled;
 }