iris: Implement ALT mode for ARB_{vertex,fragment}_shader
[mesa.git] / src / gallium / drivers / iris / iris_draw.c
index 2494ade77d1d1f17dfd1a37886ed803bfadd7bca..19df5482f5ec494b24de67d7bbb2717d824f3f52 100644 (file)
@@ -4,22 +4,28 @@
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * to deal in the Software without restriction, including without limitation
- * on the rights to use, copy, modify, merge, publish, distribute, sub
- * license, and/or sell copies of the Software, and to permit persons to whom
- * the Software is furnished to do so, subject to the following conditions:
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
  *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
+
+/**
+ * @file iris_draw.c
+ *
+ * The main driver hooks for drawing and launching compute shaders.
+ */
+
 #include <stdio.h>
 #include <errno.h>
 #include "pipe/p_defines.h"
 #include "pipe/p_screen.h"
 #include "util/u_inlines.h"
 #include "util/u_transfer.h"
+#include "util/u_upload_mgr.h"
 #include "intel/compiler/brw_compiler.h"
 #include "iris_context.h"
+#include "iris_defines.h"
+
+/**
+ * Record the current primitive mode and restart information, flagging
+ * related packets as dirty if necessary.
+ */
+static void
+iris_update_draw_info(struct iris_context *ice,
+                      const struct pipe_draw_info *info)
+{
+   if (ice->state.prim_mode != info->mode) {
+      ice->state.prim_mode = info->mode;
+      ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
+   }
+
+   if (info->mode == PIPE_PRIM_PATCHES &&
+       ice->state.vertices_per_patch != info->vertices_per_patch) {
+      ice->state.vertices_per_patch = info->vertices_per_patch;
+      ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
+
+      /* Flag constants dirty for gl_PatchVerticesIn if needed. */
+      const struct shader_info *tcs_info =
+         iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
+      if (tcs_info &&
+          tcs_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
+         ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
+         ice->state.shaders[MESA_SHADER_TESS_CTRL].cbuf0_needs_upload = true;
+      }
+   }
 
+   if (ice->state.primitive_restart != info->primitive_restart ||
+       ice->state.cut_index != info->restart_index) {
+      ice->state.dirty |= IRIS_DIRTY_VF;
+      ice->state.primitive_restart = info->primitive_restart;
+      ice->state.cut_index = info->restart_index;
+   }
+}
+
+/**
+ * The pipe->draw_vbo() driver hook.  Performs a draw on the GPU.
+ */
 void
 iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
 {
    struct iris_context *ice = (struct iris_context *) ctx;
-   struct iris_batch *batch = &ice->render_batch;
+   struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+
+   if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
+      return;
+
+   /* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
+    * write offsets, changing the behavior.
+    */
+   if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
+      ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER & ~IRIS_DIRTY_SO_BUFFERS;
 
    iris_batch_maybe_flush(batch, 1500);
 
-   // XXX: actually do brw_cache_flush_for_*
-   iris_emit_pipe_control_flush(batch,
-                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
-                                PIPE_CONTROL_RENDER_TARGET_FLUSH |
-                                PIPE_CONTROL_CS_STALL);
+   iris_update_draw_info(ice, info);
+
+   iris_update_compiled_shaders(ice);
 
-   iris_emit_pipe_control_flush(batch,
-                                PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
-                                PIPE_CONTROL_CONST_CACHE_INVALIDATE);
+   for (gl_shader_stage stage = 0; stage < MESA_SHADER_COMPUTE; stage++) {
+      if (ice->shaders.prog[stage])
+         iris_predraw_resolve_inputs(batch, &ice->state.shaders[stage]);
+   }
 
-   iris_cache_sets_clear(batch);
-   // XXX: ^^^
+   iris_predraw_resolve_framebuffer(ice, batch);
 
+   iris_binder_reserve_3d(ice);
 
-   iris_update_compiled_shaders(ice);
+   ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
    ice->vtbl.upload_render_state(ice, batch, info);
 
-   // XXX: don't flush always
-   iris_batch_flush(batch);
+   ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
+
+   iris_postdraw_update_resolve_tracking(ice, batch);
+}
+
+static void
+iris_update_grid_size_resource(struct iris_context *ice,
+                               const struct pipe_grid_info *grid)
+{
+   const struct iris_screen *screen = (void *) ice->ctx.screen;
+   const struct isl_device *isl_dev = &screen->isl_dev;
+   struct iris_state_ref *grid_ref = &ice->state.grid_size;
+   struct iris_state_ref *state_ref = &ice->state.grid_surf_state;
+
+   // XXX: if the shader doesn't actually care about the grid info,
+   // don't bother uploading the surface?
+
+   if (grid->indirect) {
+      pipe_resource_reference(&grid_ref->res, grid->indirect);
+      grid_ref->offset = grid->indirect_offset;
+
+      /* Zero out the grid size so that the next non-indirect grid launch will
+       * re-upload it properly.
+       */
+      memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
+   } else {
+      /* If the size is the same, we don't need to upload anything. */
+      if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) == 0)
+         return;
+
+      memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
+
+      u_upload_data(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4,
+                    grid->grid, &grid_ref->offset, &grid_ref->res);
+   }
+
+   void *surf_map = NULL;
+   u_upload_alloc(ice->state.surface_uploader, 0, isl_dev->ss.size,
+                  isl_dev->ss.align, &state_ref->offset, &state_ref->res,
+                  &surf_map);
+   state_ref->offset +=
+      iris_bo_offset_from_base_address(iris_resource_bo(state_ref->res));
+   isl_buffer_fill_state(&screen->isl_dev, surf_map,
+                         .address = grid_ref->offset +
+                            iris_resource_bo(grid_ref->res)->gtt_offset,
+                         .size_B = sizeof(grid->grid),
+                         .format = ISL_FORMAT_RAW,
+                         .stride_B = 1,
+                         .mocs = 4); // XXX: MOCS
+
+   ice->state.dirty |= IRIS_DIRTY_BINDINGS_CS;
+}
+
+void
+iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
+{
+   struct iris_context *ice = (struct iris_context *) ctx;
+   struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
+
+   if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
+      return;
+
+   if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
+      ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
+
+   iris_batch_maybe_flush(batch, 1500);
+
+   //if (dirty & IRIS_DIRTY_UNCOMPILED_CS)
+      iris_update_compiled_compute_shader(ice);
+
+   // XXX: predraw resolves / cache flushing
+
+   iris_update_grid_size_resource(ice, grid);
+
+   iris_binder_reserve_compute(ice);
+   ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
+
+   if (ice->state.compute_predicate) {
+      ice->vtbl.load_register_mem64(batch, MI_PREDICATE_DATA,
+                                    ice->state.compute_predicate, 0);
+      ice->state.compute_predicate = NULL;
+   }
+
+   ice->vtbl.upload_compute_state(ice, batch, grid);
+
+   ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_COMPUTE;
+
+   /* Note: since compute shaders can't access the framebuffer, there's
+    * no need to call iris_postdraw_update_resolve_tracking.
+    */
 }