i965: Drop assert about number of uniforms in ARB handling.
[mesa.git] / src / mesa / drivers / dri / i965 / genX_blorp_exec.c
index 3c7a7b47dbd30a2523e57e057321116b320edd62..34bfcad03ebb2c8a1c269db0cf39f9b90a80e7c7 100644 (file)
@@ -44,7 +44,7 @@ blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
    assert(batch->blorp->driver_ctx == batch->driver_batch);
    struct brw_context *brw = batch->driver_batch;
 
-   intel_batchbuffer_begin(brw, n, RENDER_RING);
+   intel_batchbuffer_begin(brw, n);
    uint32_t *map = brw->batch.map_next;
    brw->batch.map_next += n;
    intel_batchbuffer_advance(brw);
@@ -60,7 +60,7 @@ blorp_emit_reloc(struct blorp_batch *batch,
    uint32_t offset;
 
    if (GEN_GEN < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) {
-      offset = (char *)location - (char *)brw->batch.state_map;
+      offset = (char *)location - (char *)brw->batch.state.map;
       return brw_state_reloc(&brw->batch, offset,
                              address.buffer, address.offset + delta,
                              address.reloc_flags);
@@ -68,7 +68,7 @@ blorp_emit_reloc(struct blorp_batch *batch,
 
    assert(!brw_ptr_in_state_buffer(&brw->batch, location));
 
-   offset = (char *)location - (char *)brw->batch.map;
+   offset = (char *)location - (char *)brw->batch.batch.map;
    return brw_batch_reloc(&brw->batch, offset,
                           address.buffer, address.offset + delta,
                           address.reloc_flags);
@@ -86,7 +86,7 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
       brw_state_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
                       address.reloc_flags);
 
-   void *reloc_ptr = (void *)brw->batch.state_map + ss_offset;
+   void *reloc_ptr = (void *)brw->batch.state.map + ss_offset;
 #if GEN_GEN >= 8
    *(uint64_t *)reloc_ptr = reloc_val;
 #else
@@ -94,6 +94,19 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
 #endif
 }
 
+#if GEN_GEN >= 7 && GEN_GEN < 10
+static struct blorp_address
+blorp_get_surface_base_address(struct blorp_batch *batch)
+{
+   assert(batch->blorp->driver_ctx == batch->driver_batch);
+   struct brw_context *brw = batch->driver_batch;
+   return (struct blorp_address) {
+      .buffer = brw->batch.state.bo,
+      .offset = 0,
+   };
+}
+#endif
+
 static void *
 blorp_alloc_dynamic_state(struct blorp_batch *batch,
                           uint32_t size,
@@ -150,13 +163,61 @@ blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
    void *data = brw_state_batch(brw, size, 64, &offset);
 
    *addr = (struct blorp_address) {
-      .buffer = brw->batch.state_bo,
+      .buffer = brw->batch.state.bo,
       .offset = offset,
+
+      /* The VF cache designers apparently cut corners, and made the cache
+       * only consider the bottom 32 bits of memory addresses.  If you happen
+       * to have two vertex buffers which get placed exactly 4 GiB apart and
+       * use them in back-to-back draw calls, you can get collisions.  To work
+       * around this problem, we restrict vertex buffers to the low 32 bits of
+       * the address space.
+       */
+      .reloc_flags = RELOC_32BIT,
+
+#if GEN_GEN == 10
+      .mocs = CNL_MOCS_WB,
+#elif GEN_GEN == 9
+      .mocs = SKL_MOCS_WB,
+#elif GEN_GEN == 8
+      .mocs = BDW_MOCS_WB,
+#elif GEN_GEN == 7
+      .mocs = GEN7_MOCS_L3,
+#endif
    };
 
    return data;
 }
 
+/**
+ * See vf_invalidate_for_vb_48b_transitions in genX_state_upload.c.
+ */
+static void
+blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
+                                           const struct blorp_address *addrs,
+                                           unsigned num_vbs)
+{
+#if GEN_GEN >= 8
+   struct brw_context *brw = batch->driver_batch;
+   bool need_invalidate = false;
+
+   for (unsigned i = 0; i < num_vbs; i++) {
+      struct brw_bo *bo = addrs[i].buffer;
+      uint16_t high_bits =
+         bo && (bo->kflags & EXEC_OBJECT_PINNED) ? bo->gtt_offset >> 32u : 0;
+
+      if (high_bits != brw->vb.last_bo_high_bits[i]) {
+         need_invalidate = true;
+         brw->vb.last_bo_high_bits[i] = high_bits;
+      }
+   }
+
+   if (need_invalidate) {
+      brw_emit_pipe_control_flush(brw, PIPE_CONTROL_VF_CACHE_INVALIDATE);
+   }
+#endif
+}
+
 #if GEN_GEN >= 8
 static struct blorp_address
 blorp_get_workaround_page(struct blorp_batch *batch)
@@ -171,7 +232,8 @@ blorp_get_workaround_page(struct blorp_batch *batch)
 #endif
 
 static void
-blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
+blorp_flush_range(UNUSED struct blorp_batch *batch, UNUSED void *start,
+                  UNUSED size_t size)
 {
    /* All allocated states come from the batch which we will flush before we
     * submit it.  There's nothing for us to do here.
@@ -180,7 +242,8 @@ blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
 
 static void
 blorp_emit_urb_config(struct blorp_batch *batch,
-                      unsigned vs_entry_size, unsigned sf_entry_size)
+                      unsigned vs_entry_size,
+                      MAYBE_UNUSED unsigned sf_entry_size)
 {
    assert(batch->blorp->driver_ctx == batch->driver_batch);
    struct brw_context *brw = batch->driver_batch;
@@ -207,6 +270,20 @@ genX(blorp_exec)(struct blorp_batch *batch,
    struct gl_context *ctx = &brw->ctx;
    bool check_aperture_failed_once = false;
 
+#if GEN_GEN >= 11
+   /* The PIPE_CONTROL command description says:
+    *
+    * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+    *  points to a different RENDER_SURFACE_STATE, SW must issue a Render
+    *  Target Cache Flush by enabling this bit. When render target flush
+    *  is set due to new association of BTI, PS Scoreboard Stall bit must
+    *  be set in this packet."
+   */
+   brw_emit_pipe_control_flush(brw,
+                               PIPE_CONTROL_RENDER_TARGET_FLUSH |
+                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
+#endif
+
    /* Flush the sampler and render caches.  We definitely need to flush the
     * sampler cache so that we get updated contents from the render cache for
     * the glBlitFramebuffer() source.  Also, we are sometimes warned in the
@@ -215,13 +292,21 @@ genX(blorp_exec)(struct blorp_batch *batch,
     * data.
     */
    if (params->src.enabled)
-      brw_render_cache_set_check_flush(brw, params->src.addr.buffer);
-   brw_render_cache_set_check_flush(brw, params->dst.addr.buffer);
+      brw_cache_flush_for_read(brw, params->src.addr.buffer);
+   if (params->dst.enabled) {
+      brw_cache_flush_for_render(brw, params->dst.addr.buffer,
+                                 params->dst.view.format,
+                                 params->dst.aux_usage);
+   }
+   if (params->depth.enabled)
+      brw_cache_flush_for_depth(brw, params->depth.addr.buffer);
+   if (params->stencil.enabled)
+      brw_cache_flush_for_depth(brw, params->stencil.addr.buffer);
 
    brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
 
 retry:
-   intel_batchbuffer_require_space(brw, 1400, RENDER_RING);
+   intel_batchbuffer_require_space(brw, 1400);
    brw_require_statebuffer_space(brw, 600);
    intel_batchbuffer_save_state(brw);
    brw->batch.no_wrap = true;
@@ -282,10 +367,13 @@ retry:
                               !params->stencil.enabled;
    brw->ib.index_size = -1;
 
-   if (params->dst.enabled)
-      brw_render_cache_set_add_bo(brw, params->dst.addr.buffer);
+   if (params->dst.enabled) {
+      brw_render_cache_add_bo(brw, params->dst.addr.buffer,
+                              params->dst.view.format,
+                              params->dst.aux_usage);
+   }
    if (params->depth.enabled)
-      brw_render_cache_set_add_bo(brw, params->depth.addr.buffer);
+      brw_depth_cache_add_bo(brw, params->depth.addr.buffer);
    if (params->stencil.enabled)
-      brw_render_cache_set_add_bo(brw, params->stencil.addr.buffer);
+      brw_depth_cache_add_bo(brw, params->stencil.addr.buffer);
 }