nvc0: allow a non-user buffer to be bound at position 0
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nve4_compute.c
index 3d0190928e3667c26ec43a36f92d0f21f9861327..022aace73dbc7a802a3722a2cb50fb8a980cda89 100644 (file)
@@ -29,6 +29,7 @@
 
 #ifdef DEBUG
 static void nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *);
+static void gp100_compute_dump_launch_desc(const struct gp100_cp_launch_desc *);
 #endif
 
 
@@ -57,6 +58,10 @@ nve4_screen_compute_setup(struct nvc0_screen *screen,
    case 0x120:
       obj_class = GM200_COMPUTE_CLASS;
       break;
+   case 0x130:
+      obj_class = (dev->chipset == 0x130 || dev->chipset == 0x13b) ?
+                      GP100_COMPUTE_CLASS : GP104_COMPUTE_CLASS;
+      break;
    default:
       NOUVEAU_ERR("unsupported chipset: NV%02x\n", dev->chipset);
       return -1;
@@ -69,11 +74,6 @@ nve4_screen_compute_setup(struct nvc0_screen *screen,
       return ret;
    }
 
-   ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 0, 1 << 12, NULL,
-                        &screen->parm);
-   if (ret)
-      return ret;
-
    BEGIN_NVC0(push, SUBC_CP(NV01_SUBCHAN_OBJECT), 1);
    PUSH_DATA (push, screen->compute->oclass);
 
@@ -134,8 +134,11 @@ nve4_screen_compute_setup(struct nvc0_screen *screen,
    BEGIN_NVC0(push, NVE4_CP(TEX_CB_INDEX), 1);
    PUSH_DATA (push, 7); /* does not interfere with 3D */
 
+   /* Disabling this UNK command avoid a read fault when using texelFetch()
+    * from a compute shader for weird reasons.
    if (obj_class == NVF0_COMPUTE_CLASS)
       IMMED_NVC0(push, SUBC_CP(0x02c4), 1);
+   */
 
    address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5);
 
@@ -189,75 +192,114 @@ nve4_screen_compute_setup(struct nvc0_screen *screen,
    return 0;
 }
 
-
 static void
-nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
+gm107_compute_validate_surfaces(struct nvc0_context *nvc0,
+                                struct pipe_image_view *view, int slot)
 {
-   struct nvc0_screen *screen = nvc0->screen;
+   struct nv04_resource *res = nv04_resource(view->resource);
    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
-   struct nv50_surface *sf;
-   struct nv04_resource *res;
-   uint32_t mask;
-   unsigned i;
-   const unsigned t = 1;
+   struct nvc0_screen *screen = nvc0->screen;
+   struct nouveau_bo *txc = nvc0->screen->txc;
+   struct nv50_tic_entry *tic;
    uint64_t address;
+   const int s = 5;
 
-   address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5);
+   tic = nv50_tic_entry(nvc0->images_tic[s][slot]);
+
+   res = nv04_resource(tic->pipe.texture);
+   nvc0_update_tic(nvc0, tic, res);
 
-   mask = nvc0->surfaces_dirty[t];
-   while (mask) {
-      i = ffs(mask) - 1;
-      mask &= ~(1 << i);
+   if (tic->id < 0) {
+      tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
 
-      /*
-       * NVE4's surface load/store instructions receive all the information
-       * directly instead of via binding points, so we have to supply them.
-       */
+      /* upload the texture view */
+      PUSH_SPACE(push, 16);
       BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
-      PUSH_DATAh(push, address + NVC0_CB_AUX_BUF_INFO(i));
-      PUSH_DATA (push, address + NVC0_CB_AUX_BUF_INFO(i));
+      PUSH_DATAh(push, txc->offset + (tic->id * 32));
+      PUSH_DATA (push, txc->offset + (tic->id * 32));
       BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
-      PUSH_DATA (push, 64);
+      PUSH_DATA (push, 32);
       PUSH_DATA (push, 1);
-      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 17);
+      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 9);
       PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
+      PUSH_DATAp(push, &tic->tic[0], 8);
+
+      BEGIN_NIC0(push, NVE4_CP(TIC_FLUSH), 1);
+      PUSH_DATA (push, (tic->id << 4) | 1);
+   } else
+   if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
+      BEGIN_NIC0(push, NVE4_CP(TEX_CACHE_CTL), 1);
+      PUSH_DATA (push, (tic->id << 4) | 1);
+   }
+   nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
 
-      nve4_set_surface_info(push, nvc0->surfaces[t][i], screen);
+   res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   res->status |=  NOUVEAU_BUFFER_STATUS_GPU_READING;
 
-      sf = nv50_surface(nvc0->surfaces[t][i]);
-      if (sf) {
-         res = nv04_resource(sf->base.texture);
+   BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
 
-         if (sf->base.writable)
-            BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
-         else
-            BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
-      }
-   }
-   if (nvc0->surfaces_dirty[t]) {
-      BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
-      PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
-   }
+   address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
+
+   /* upload the texture handle */
+   BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
+   PUSH_DATAh(push, address + NVC0_CB_AUX_TEX_INFO(slot + 32));
+   PUSH_DATA (push, address + NVC0_CB_AUX_TEX_INFO(slot + 32));
+   BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
+   PUSH_DATA (push, 4);
+   PUSH_DATA (push, 0x1);
+   BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 2);
+   PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
+   PUSH_DATA (push, tic->id);
+
+   BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
+   PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
+}
+
+static void
+nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
+{
+   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+   uint64_t address;
+   const int s = 5;
+   int i, j;
+
+   if (!nvc0->images_dirty[s])
+      return;
+
+   address = nvc0->screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
+
+   for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
+      struct pipe_image_view *view = &nvc0->images[s][i];
+
+      BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
+      PUSH_DATAh(push, address + NVC0_CB_AUX_SU_INFO(i));
+      PUSH_DATA (push, address + NVC0_CB_AUX_SU_INFO(i));
+      BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
+      PUSH_DATA (push, 16 * 4);
+      PUSH_DATA (push, 0x1);
+      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 16);
+      PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
 
-   /* re-reference non-dirty surfaces */
-   mask = nvc0->surfaces_valid[t] & ~nvc0->surfaces_dirty[t];
-   while (mask) {
-      i = ffs(mask) - 1;
-      mask &= ~(1 << i);
+      if (view->resource) {
+         struct nv04_resource *res = nv04_resource(view->resource);
 
-      sf = nv50_surface(nvc0->surfaces[t][i]);
-      res = nv04_resource(sf->base.texture);
+         if (res->base.target == PIPE_BUFFER) {
+            if (view->access & PIPE_IMAGE_ACCESS_WRITE)
+               nvc0_mark_image_range_valid(view);
+         }
 
-      if (sf->base.writable)
+         nve4_set_surface_info(push, view, nvc0);
          BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
-      else
-         BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
-   }
 
-   nvc0->surfaces_dirty[t] = 0;
+         if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
+            gm107_compute_validate_surfaces(nvc0, view, i);
+      } else {
+         for (j = 0; j < 16; j++)
+            PUSH_DATA(push, 0);
+      }
+   }
 }
 
-
 /* Thankfully, textures with samplers follow the normal rules. */
 static void
 nve4_compute_validate_samplers(struct nvc0_context *nvc0)
@@ -267,7 +309,13 @@ nve4_compute_validate_samplers(struct nvc0_context *nvc0)
       BEGIN_NVC0(nvc0->base.pushbuf, NVE4_CP(TSC_FLUSH), 1);
       PUSH_DATA (nvc0->base.pushbuf, 0);
    }
+
+   /* Invalidate all 3D samplers because they are aliased. */
+   for (int s = 0; s < 5; s++)
+      nvc0->samplers_dirty[s] = ~0;
+   nvc0->dirty_3d |= NVC0_NEW_3D_SAMPLERS;
 }
+
 /* (Code duplicated at bottom for various non-convincing reasons.
  *  E.g. we might want to use the COMPUTE subchannel to upload TIC/TSC
  *  entries to avoid a subchannel switch.
@@ -345,23 +393,24 @@ nve4_compute_validate_constbufs(struct nvc0_context *nvc0)
             uint64_t address
                = nvc0->screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
 
-            assert(i > 0); /* we really only want uniform buffer objects */
-
-            BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
-            PUSH_DATAh(push, address + NVC0_CB_AUX_UBO_INFO(i - 1));
-            PUSH_DATA (push, address + NVC0_CB_AUX_UBO_INFO(i - 1));
-            BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
-            PUSH_DATA (push, 4 * 4);
-            PUSH_DATA (push, 0x1);
-            BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 4);
-            PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
-
-            PUSH_DATA (push, res->address + nvc0->constbuf[s][i].offset);
-            PUSH_DATAh(push, res->address + nvc0->constbuf[s][i].offset);
-            PUSH_DATA (push, nvc0->constbuf[5][i].size);
-            PUSH_DATA (push, 0);
-            BCTX_REFN(nvc0->bufctx_cp, CP_CB(i), res, RD);
+            /* constbufs above 0 will are fetched via ubo info in the shader */
+            if (i > 0) {
+               BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
+               PUSH_DATAh(push, address + NVC0_CB_AUX_UBO_INFO(i - 1));
+               PUSH_DATA (push, address + NVC0_CB_AUX_UBO_INFO(i - 1));
+               BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
+               PUSH_DATA (push, 4 * 4);
+               PUSH_DATA (push, 0x1);
+               BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 4);
+               PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
+
+               PUSH_DATA (push, res->address + nvc0->constbuf[s][i].offset);
+               PUSH_DATAh(push, res->address + nvc0->constbuf[s][i].offset);
+               PUSH_DATA (push, nvc0->constbuf[s][i].size);
+               PUSH_DATA (push, 0);
+            }
 
+            BCTX_REFN(nvc0->bufctx_cp, CP_CB(i), res, RD);
             res->cb_bindings[s] |= 1 << i;
          }
       }
@@ -399,6 +448,10 @@ nve4_compute_validate_buffers(struct nvc0_context *nvc0)
          PUSH_DATA (push, nvc0->buffers[s][i].buffer_size);
          PUSH_DATA (push, 0);
          BCTX_REFN(nvc0->bufctx_cp, CP_BUF, res, RDWR);
+         util_range_add(&res->valid_buffer_range,
+                        nvc0->buffers[s][i].buffer_offset,
+                        nvc0->buffers[s][i].buffer_offset +
+                        nvc0->buffers[s][i].buffer_size);
       } else {
          PUSH_DATA (push, 0);
          PUSH_DATA (push, 0);
@@ -437,7 +490,6 @@ nve4_state_validate_cp(struct nvc0_context *nvc0, uint32_t mask)
 
 static void
 nve4_compute_upload_input(struct nvc0_context *nvc0,
-                          struct nve4_cp_launch_desc *desc,
                           const struct pipe_grid_info *info)
 {
    struct nvc0_screen *screen = nvc0->screen;
@@ -449,46 +501,42 @@ nve4_compute_upload_input(struct nvc0_context *nvc0,
 
    if (cp->parm_size) {
       BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
-      PUSH_DATAh(push, screen->parm->offset);
-      PUSH_DATA (push, screen->parm->offset);
+      PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_USR_INFO(5));
+      PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_USR_INFO(5));
       BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
       PUSH_DATA (push, cp->parm_size);
       PUSH_DATA (push, 0x1);
       BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (cp->parm_size / 4));
       PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
       PUSH_DATAp(push, info->input, cp->parm_size / 4);
-
-      /* Bind user parameters coming from clover. */
-      /* TODO: This should be harmonized with uniform_bo. */
-      assert(!(desc->cb_mask & (1 << 0)));
-      nve4_cp_launch_desc_set_cb(desc, 0, screen->parm, 0, 1 << 12);
    }
    BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
-   PUSH_DATAh(push, address + NVC0_CB_AUX_GRID_INFO);
-   PUSH_DATA (push, address + NVC0_CB_AUX_GRID_INFO);
+   PUSH_DATAh(push, address + NVC0_CB_AUX_GRID_INFO(0));
+   PUSH_DATA (push, address + NVC0_CB_AUX_GRID_INFO(0));
    BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
-   PUSH_DATA (push, 7 * 4);
+   PUSH_DATA (push, 8 * 4);
    PUSH_DATA (push, 0x1);
 
    if (unlikely(info->indirect)) {
       struct nv04_resource *res = nv04_resource(info->indirect);
       uint32_t offset = res->offset + info->indirect_offset;
 
-      nouveau_pushbuf_space(push, 16, 0, 1);
+      nouveau_pushbuf_space(push, 32, 0, 1);
       PUSH_REFN(push, res->bo, NOUVEAU_BO_RD | res->domain);
 
-      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 7);
+      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 8);
       PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
       PUSH_DATAp(push, info->block, 3);
       nouveau_pushbuf_data(push, res->bo, offset,
                            NVC0_IB_ENTRY_1_NO_PREFETCH | 3 * 4);
    } else {
-      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 7);
+      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 8);
       PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
       PUSH_DATAp(push, info->block, 3);
       PUSH_DATAp(push, info->grid, 3);
    }
    PUSH_DATA (push, 0);
+   PUSH_DATA (push, info->work_dim);
 
    BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
    PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
@@ -504,6 +552,30 @@ nve4_compute_derive_cache_split(struct nvc0_context *nvc0, uint32_t shared_size)
    return NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1;
 }
 
+static void
+nve4_compute_setup_buf_cb(struct nvc0_context *nvc0, bool gp100, void *desc)
+{
+   // only user constant buffers 0-6 can be put in the descriptor, the rest are
+   // loaded through global memory
+   for (int i = 0; i <= 6; i++) {
+      if (nvc0->constbuf[5][i].user || !nvc0->constbuf[5][i].u.buf)
+         continue;
+
+      struct nv04_resource *res =
+         nv04_resource(nvc0->constbuf[5][i].u.buf);
+
+      uint32_t base = res->offset + nvc0->constbuf[5][i].offset;
+      uint32_t size = nvc0->constbuf[5][i].size;
+      if (gp100)
+         gp100_cp_launch_desc_set_cb(desc, i, res->bo, base, size);
+      else
+         nve4_cp_launch_desc_set_cb(desc, i, res->bo, base, size);
+   }
+
+   // there is no need to do FLUSH(NVE4_COMPUTE_FLUSH_CB) because
+   // nve4_compute_upload_input() does it later
+}
+
 static void
 nve4_compute_setup_launch_desc(struct nvc0_context *nvc0,
                                struct nve4_cp_launch_desc *desc,
@@ -524,7 +596,7 @@ nve4_compute_setup_launch_desc(struct nvc0_context *nvc0,
    desc->blockdim_z = info->block[2];
 
    desc->shared_size = align(cp->cp.smem_size, 0x100);
-   desc->local_size_p = align(cp->cp.lmem_size, 0x10);
+   desc->local_size_p = (cp->hdr[1] & 0xfffff0) + align(cp->cp.lmem_size, 0x10);
    desc->local_size_n = 0;
    desc->cstack_size = 0x800;
    desc->cache_split = nve4_compute_derive_cache_split(nvc0, cp->cp.smem_size);
@@ -532,18 +604,68 @@ nve4_compute_setup_launch_desc(struct nvc0_context *nvc0,
    desc->gpr_alloc = cp->num_gprs;
    desc->bar_alloc = cp->num_barriers;
 
-   // Only bind OpenGL uniforms and the driver constant buffer through the
+   // Only bind user uniforms and the driver constant buffer through the
    // launch descriptor because UBOs are sticked to the driver cb to avoid the
    // limitation of 8 CBs.
-   if (nvc0->constbuf[5][0].user) {
+   if (nvc0->constbuf[5][0].user || cp->parm_size) {
       nve4_cp_launch_desc_set_cb(desc, 0, screen->uniform_bo,
                                  NVC0_CB_USR_INFO(5), 1 << 16);
+
+      // Later logic will attempt to bind a real buffer at position 0. That
+      // should not happen if we've bound a user buffer.
+      assert(nvc0->constbuf[5][0].user || !nvc0->constbuf[5][0].u.buf);
    }
    nve4_cp_launch_desc_set_cb(desc, 7, screen->uniform_bo,
-                              NVC0_CB_AUX_INFO(5), 1 << 10);
+                              NVC0_CB_AUX_INFO(5), 1 << 11);
+
+   nve4_compute_setup_buf_cb(nvc0, false, desc);
 }
 
-static inline struct nve4_cp_launch_desc *
+static void
+gp100_compute_setup_launch_desc(struct nvc0_context *nvc0,
+                                struct gp100_cp_launch_desc *desc,
+                                const struct pipe_grid_info *info)
+{
+   const struct nvc0_screen *screen = nvc0->screen;
+   const struct nvc0_program *cp = nvc0->compprog;
+
+   gp100_cp_launch_desc_init_default(desc);
+
+   desc->entry = nvc0_program_symbol_offset(cp, info->pc);
+
+   desc->griddim_x = info->grid[0];
+   desc->griddim_y = info->grid[1];
+   desc->griddim_z = info->grid[2];
+   desc->blockdim_x = info->block[0];
+   desc->blockdim_y = info->block[1];
+   desc->blockdim_z = info->block[2];
+
+   desc->shared_size = align(cp->cp.smem_size, 0x100);
+   desc->local_size_p = (cp->hdr[1] & 0xfffff0) + align(cp->cp.lmem_size, 0x10);
+   desc->local_size_n = 0;
+   desc->cstack_size = 0x800;
+
+   desc->gpr_alloc = cp->num_gprs;
+   desc->bar_alloc = cp->num_barriers;
+
+   // Only bind user uniforms and the driver constant buffer through the
+   // launch descriptor because UBOs are sticked to the driver cb to avoid the
+   // limitation of 8 CBs.
+   if (nvc0->constbuf[5][0].user || cp->parm_size) {
+      gp100_cp_launch_desc_set_cb(desc, 0, screen->uniform_bo,
+                                  NVC0_CB_USR_INFO(5), 1 << 16);
+
+      // Later logic will attempt to bind a real buffer at position 0. That
+      // should not happen if we've bound a user buffer.
+      assert(nvc0->constbuf[5][0].user || !nvc0->constbuf[5][0].u.buf);
+   }
+   gp100_cp_launch_desc_set_cb(desc, 7, screen->uniform_bo,
+                               NVC0_CB_AUX_INFO(5), 1 << 11);
+
+   nve4_compute_setup_buf_cb(nvc0, true, desc);
+}
+
+static inline void *
 nve4_compute_alloc_launch_desc(struct nouveau_context *nv,
                                struct nouveau_bo **pbo, uint64_t *pgpuaddr)
 {
@@ -555,15 +677,37 @@ nve4_compute_alloc_launch_desc(struct nouveau_context *nv,
       ptr += adj;
       *pgpuaddr += adj;
    }
-   return (struct nve4_cp_launch_desc *)ptr;
+   return ptr;
+}
+
+static void
+nve4_upload_indirect_desc(struct nouveau_pushbuf *push,
+                          struct nv04_resource *res,  uint64_t gpuaddr,
+                          uint32_t length, uint32_t bo_offset)
+{
+   BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
+   PUSH_DATAh(push, gpuaddr);
+   PUSH_DATA (push, gpuaddr);
+   BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
+   PUSH_DATA (push, length);
+   PUSH_DATA (push, 1);
+
+   nouveau_pushbuf_space(push, 32, 0, 1);
+   PUSH_REFN(push, res->bo, NOUVEAU_BO_RD | res->domain);
+
+   BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (length / 4));
+   PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
+   nouveau_pushbuf_data(push, res->bo, bo_offset,
+                        NVC0_IB_ENTRY_1_NO_PREFETCH | length);
 }
 
 void
 nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
 {
    struct nvc0_context *nvc0 = nvc0_context(pipe);
+   struct nvc0_screen *screen = nvc0->screen;
    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
-   struct nve4_cp_launch_desc *desc;
+   void *desc;
    uint64_t desc_gpuaddr;
    struct nouveau_bo *desc_bo;
    int ret;
@@ -576,17 +720,34 @@ nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
    BCTX_REFN_bo(nvc0->bufctx_cp, CP_DESC, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
                 desc_bo);
 
+   list_for_each_entry(struct nvc0_resident, resident, &nvc0->tex_head, list) {
+      nvc0_add_resident(nvc0->bufctx_cp, NVC0_BIND_CP_BINDLESS, resident->buf,
+                        resident->flags);
+   }
+
+   list_for_each_entry(struct nvc0_resident, resident, &nvc0->img_head, list) {
+      nvc0_add_resident(nvc0->bufctx_cp, NVC0_BIND_CP_BINDLESS, resident->buf,
+                        resident->flags);
+   }
+
    ret = !nve4_state_validate_cp(nvc0, ~0);
    if (ret)
       goto out;
 
-   nve4_compute_setup_launch_desc(nvc0, desc, info);
+   if (nvc0->screen->compute->oclass >= GP100_COMPUTE_CLASS)
+      gp100_compute_setup_launch_desc(nvc0, desc, info);
+   else
+      nve4_compute_setup_launch_desc(nvc0, desc, info);
 
-   nve4_compute_upload_input(nvc0, desc, info);
+   nve4_compute_upload_input(nvc0, info);
 
 #ifdef DEBUG
-   if (debug_get_num_option("NV50_PROG_DEBUG", 0))
-      nve4_compute_dump_launch_desc(desc);
+   if (debug_get_num_option("NV50_PROG_DEBUG", 0)) {
+      if (nvc0->screen->compute->oclass >= GP100_COMPUTE_CLASS)
+         gp100_compute_dump_launch_desc(desc);
+      else
+         nve4_compute_dump_launch_desc(desc);
+   }
 #endif
 
    if (unlikely(info->indirect)) {
@@ -604,38 +765,22 @@ nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
       PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
       PUSH_DATAp(push, (const uint32_t *)desc, 256 / 4);
 
-      /* overwrite griddim_x and griddim_y as two 32-bits integers even
-       * if griddim_y must be a 16-bits integer */
-      BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
-      PUSH_DATAh(push, desc_gpuaddr + 48);
-      PUSH_DATA (push, desc_gpuaddr + 48);
-      BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
-      PUSH_DATA (push, 8);
-      PUSH_DATA (push, 1);
-
-      nouveau_pushbuf_space(push, 16, 0, 1);
-      PUSH_REFN(push, res->bo, NOUVEAU_BO_RD | res->domain);
-
-      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (8 / 4));
-      PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
-      nouveau_pushbuf_data(push, res->bo, offset,
-                           NVC0_IB_ENTRY_1_NO_PREFETCH | 2 * 4);
+      if (nvc0->screen->compute->oclass >= GP100_COMPUTE_CLASS) {
+         nve4_upload_indirect_desc(push, res, desc_gpuaddr + 48, 12, offset);
+      } else {
+         /* overwrite griddim_x and griddim_y as two 32-bits integers even
+          * if griddim_y must be a 16-bits integer */
+         nve4_upload_indirect_desc(push, res, desc_gpuaddr + 48, 8, offset);
 
-      /* overwrite the 16 high bits of griddim_y with griddim_z because
-       * we need (z << 16) | x */
-      BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
-      PUSH_DATAh(push, desc_gpuaddr + 54);
-      PUSH_DATA (push, desc_gpuaddr + 54);
-      BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
-      PUSH_DATA (push, 4);
-      PUSH_DATA (push, 1);
-      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (4 / 4));
-      PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
-      nouveau_pushbuf_data(push, res->bo, offset + 8,
-                           NVC0_IB_ENTRY_1_NO_PREFETCH | 1 * 4);
+         /* overwrite the 16 high bits of griddim_y with griddim_z because
+          * we need (z << 16) | x */
+         nve4_upload_indirect_desc(push, res, desc_gpuaddr + 54, 4, offset + 8);
+      }
    }
 
    /* upload descriptor and flush */
+   nouveau_pushbuf_space(push, 32, 1, 0);
+   PUSH_REFN(push, screen->text, NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD);
    BEGIN_NVC0(push, NVE4_CP(LAUNCH_DESC_ADDRESS), 1);
    PUSH_DATA (push, desc_gpuaddr >> 8);
    BEGIN_NVC0(push, NVE4_CP(LAUNCH), 1);
@@ -643,11 +788,14 @@ nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
    BEGIN_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
    PUSH_DATA (push, 0);
 
+   nvc0_update_compute_invocations_counter(nvc0, info);
+
 out:
    if (ret)
       NOUVEAU_ERR("Failed to launch grid !\n");
    nouveau_scratch_done(&nvc0->base);
    nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_DESC);
+   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_BINDLESS);
 }
 
 
@@ -673,6 +821,7 @@ nve4_compute_validate_textures(struct nvc0_context *nvc0)
          continue;
       }
       res = nv04_resource(tic->pipe.texture);
+      nvc0_update_tic(nvc0, tic, res);
 
       if (tic->id < 0) {
          tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
@@ -703,8 +852,10 @@ nve4_compute_validate_textures(struct nvc0_context *nvc0)
       if (dirty)
          BCTX_REFN(nvc0->bufctx_cp, CP_TEX(i), res, RD);
    }
-   for (; i < nvc0->state.num_textures[s]; ++i)
+   for (; i < nvc0->state.num_textures[s]; ++i) {
       nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
+      nvc0->textures_dirty[s] |= 1 << i;
+   }
 
    if (n[0]) {
       BEGIN_NIC0(push, NVE4_CP(TIC_FLUSH), n[0]);
@@ -716,6 +867,14 @@ nve4_compute_validate_textures(struct nvc0_context *nvc0)
    }
 
    nvc0->state.num_textures[s] = nvc0->num_textures[s];
+
+   /* Invalidate all 3D textures because they are aliased. */
+   for (int s = 0; s < 5; s++) {
+      for (int i = 0; i < nvc0->num_textures[s]; i++)
+         nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_TEX(s, i));
+      nvc0->textures_dirty[s] = ~0;
+   }
+   nvc0->dirty_3d |= NVC0_NEW_3D_TEXTURES;
 }
 
 
@@ -763,6 +922,7 @@ nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *desc)
    debug_printf("barrier count: %u\n", desc->bar_alloc);
    debug_printf("$r count: %u\n", desc->gpr_alloc);
    debug_printf("cache split: %s\n", nve4_cache_split_name(desc->cache_split));
+   debug_printf("linked tsc: %d\n", desc->linked_tsc);
 
    for (i = 0; i < 8; ++i) {
       uint64_t address;
@@ -777,6 +937,53 @@ nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *desc)
                    i, address, size, valid ? "" : "  (invalid)");
    }
 }
+
+static void
+gp100_compute_dump_launch_desc(const struct gp100_cp_launch_desc *desc)
+{
+   const uint32_t *data = (const uint32_t *)desc;
+   unsigned i;
+   bool zero = false;
+
+   debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n");
+
+   for (i = 0; i < sizeof(*desc); i += 4) {
+      if (data[i / 4]) {
+         debug_printf("[%x]: 0x%08x\n", i, data[i / 4]);
+         zero = false;
+      } else
+      if (!zero) {
+         debug_printf("...\n");
+         zero = true;
+      }
+   }
+
+   debug_printf("entry = 0x%x\n", desc->entry);
+   debug_printf("grid dimensions = %ux%ux%u\n",
+                desc->griddim_x, desc->griddim_y, desc->griddim_z);
+   debug_printf("block dimensions = %ux%ux%u\n",
+                desc->blockdim_x, desc->blockdim_y, desc->blockdim_z);
+   debug_printf("s[] size: 0x%x\n", desc->shared_size);
+   debug_printf("l[] size: -0x%x / +0x%x\n",
+                desc->local_size_n, desc->local_size_p);
+   debug_printf("stack size: 0x%x\n", desc->cstack_size);
+   debug_printf("barrier count: %u\n", desc->bar_alloc);
+   debug_printf("$r count: %u\n", desc->gpr_alloc);
+   debug_printf("linked tsc: %d\n", desc->linked_tsc);
+
+   for (i = 0; i < 8; ++i) {
+      uint64_t address;
+      uint32_t size = desc->cb[i].size_sh4 << 4;
+      bool valid = !!(desc->cb_mask & (1 << i));
+
+      address = ((uint64_t)desc->cb[i].address_h << 32) | desc->cb[i].address_l;
+
+      if (!valid && !address && !size)
+         continue;
+      debug_printf("CB[%u]: address = 0x%"PRIx64", size 0x%x%s\n",
+                   i, address, size, valid ? "" : "  (invalid)");
+   }
+}
 #endif
 
 #ifdef NOUVEAU_NVE4_MP_TRAP_HANDLER