intel/tools: Disassemble WAIT's argument as a destination
[mesa.git] / src / intel / vulkan / genX_blorp_exec.c
index ecca3928de546d74229dfeb32fbb29d14d70fe83..15ecf7805af5b8782658c89e3296595e2ecfaf81 100644 (file)
@@ -57,17 +57,25 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
                     struct blorp_address address, uint32_t delta)
 {
    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
+   uint64_t address_u64 = 0;
    VkResult result =
       anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
-                         ss_offset, address.buffer, address.offset + delta);
+                         ss_offset, address.buffer, address.offset + delta,
+                         &address_u64);
    if (result != VK_SUCCESS)
       anv_batch_set_error(&cmd_buffer->batch, result);
 
-   void *dest = cmd_buffer->device->surface_state_pool.block_pool.map +
-      ss_offset;
-   uint64_t val = ((struct anv_bo*)address.buffer)->offset + address.offset +
-      delta;
-   write_reloc(cmd_buffer->device, dest, val, false);
+   void *dest = anv_block_pool_map(
+      &cmd_buffer->device->surface_state_pool.block_pool, ss_offset, 8);
+   write_reloc(cmd_buffer->device, dest, address_u64, false);
+}
+
+static uint64_t
+blorp_get_surface_address(struct blorp_batch *blorp_batch,
+                          struct blorp_address address)
+{
+   /* We'll let blorp_surface_reloc write the address. */
+   return 0ull;
 }
 
 #if GEN_GEN >= 7 && GEN_GEN < 10
@@ -76,7 +84,7 @@ blorp_get_surface_base_address(struct blorp_batch *batch)
 {
    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
    return (struct blorp_address) {
-      .buffer = &cmd_buffer->device->surface_state_pool.block_pool.bo,
+      .buffer = cmd_buffer->device->surface_state_pool.block_pool.bo,
       .offset = 0,
    };
 }
@@ -124,8 +132,6 @@ blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
       surface_offsets[i] = surface_state.offset;
       surface_maps[i] = surface_state.map;
    }
-
-   anv_state_flush(cmd_buffer->device, bt_state);
 }
 
 static void *
@@ -133,67 +139,68 @@ blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
                           struct blorp_address *addr)
 {
    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
-
-   /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
-    *
-    *    "The VF cache needs to be invalidated before binding and then using
-    *    Vertex Buffers that overlap with any previously bound Vertex Buffer
-    *    (at a 64B granularity) since the last invalidation.  A VF cache
-    *    invalidate is performed by setting the "VF Cache Invalidation Enable"
-    *    bit in PIPE_CONTROL."
-    *
-    * This restriction first appears in the Skylake PRM but the internal docs
-    * also list it as being an issue on Broadwell.  In order to avoid this
-    * problem, we align all vertex buffer allocations to 64 bytes.
-    */
    struct anv_state vb_state =
       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 64);
 
    *addr = (struct blorp_address) {
-      .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+      .buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
       .offset = vb_state.offset,
-      .mocs = cmd_buffer->device->default_mocs,
+      .mocs = cmd_buffer->device->isl_dev.mocs.internal,
    };
 
    return vb_state.map;
 }
 
-#if GEN_GEN >= 8
-static struct blorp_address
-blorp_get_workaround_page(struct blorp_batch *batch)
+static void
+blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
+                                           const struct blorp_address *addrs,
+                                           uint32_t *sizes,
+                                           unsigned num_vbs)
+{
+   struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
+
+   for (unsigned i = 0; i < num_vbs; i++) {
+      struct anv_address anv_addr = {
+         .bo = addrs[i].buffer,
+         .offset = addrs[i].offset,
+      };
+      genX(cmd_buffer_set_binding_for_gen8_vb_flush)(cmd_buffer,
+                                                     i, anv_addr, sizes[i]);
+   }
+
+   genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
+
+   /* Technically, we should call this *after* 3DPRIMITIVE but it doesn't
+    * really matter for blorp because we never call apply_pipe_flushes after
+    * this point.
+    */
+   genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)(cmd_buffer, SEQUENTIAL,
+                                                       (1 << num_vbs) - 1);
+}
+
+UNUSED static struct blorp_address
+blorp_get_workaround_address(struct blorp_batch *batch)
 {
    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
 
    return (struct blorp_address) {
-      .buffer = &cmd_buffer->device->workaround_bo,
+      .buffer = cmd_buffer->device->workaround_address.bo,
+      .offset = cmd_buffer->device->workaround_address.offset,
    };
 }
-#endif
 
 static void
 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
 {
-   struct anv_device *device = batch->blorp->driver_ctx;
-   if (!device->info.has_llc)
-      gen_flush_range(start, size);
+   /* We don't need to flush states anymore, since everything will be snooped.
+    */
 }
 
-static void
-blorp_emit_urb_config(struct blorp_batch *batch,
-                      unsigned vs_entry_size, unsigned sf_entry_size)
+static const struct gen_l3_config *
+blorp_get_l3_config(struct blorp_batch *batch)
 {
-   struct anv_device *device = batch->blorp->driver_ctx;
    struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
-
-   assert(sf_entry_size == 0);
-
-   const unsigned entry_size[4] = { vs_entry_size, 1, 1, 1 };
-
-   genX(emit_urb_setup)(device, &cmd_buffer->batch,
-                        cmd_buffer->state.current_l3_config,
-                        VK_SHADER_STAGE_VERTEX_BIT |
-                        VK_SHADER_STAGE_FRAGMENT_BIT,
-                        entry_size);
+   return cmd_buffer->state.current_l3_config;
 }
 
 void
@@ -208,6 +215,10 @@ genX(blorp_exec)(struct blorp_batch *batch,
       genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
    }
 
+   const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
+   genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, params->x1 - params->x0,
+                                      params->y1 - params->y0, scale);
+
 #if GEN_GEN >= 11
    /* The PIPE_CONTROL command description says:
     *
@@ -243,13 +254,22 @@ genX(blorp_exec)(struct blorp_batch *batch,
     */
    genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
 
-   /* Disable VF statistics */
-   blorp_emit(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
-      vf.StatisticsEnable = false;
-   }
-
    blorp_exec(batch, params);
 
+#if GEN_GEN >= 11
+   /* The PIPE_CONTROL command description says:
+    *
+    *    "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+    *     points to a different RENDER_SURFACE_STATE, SW must issue a Render
+    *     Target Cache Flush by enabling this bit. When render target flush
+    *     is set due to new association of BTI, PS Scoreboard Stall bit must
+    *     be set in this packet."
+    */
+   cmd_buffer->state.pending_pipe_bits |=
+      ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
+      ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
+#endif
+
    cmd_buffer->state.gfx.vb_dirty = ~0;
    cmd_buffer->state.gfx.dirty = ~0;
    cmd_buffer->state.push_constants_dirty = ~0;