+static void
+nvc0_draw_indirect(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
+{
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+ struct nv04_resource *buf = nv04_resource(info->indirect->buffer);
+ struct nv04_resource *buf_count = nv04_resource(info->indirect->indirect_draw_count);
+ unsigned size, macro, count = info->indirect->draw_count, drawid = info->drawid;
+ uint32_t offset = buf->offset + info->indirect->offset;
+ struct nvc0_screen *screen = nvc0->screen;
+
+ PUSH_SPACE(push, 7);
+
+ /* must make FIFO wait for engines idle before continuing to process */
+ if ((buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr)) ||
+ (buf_count && buf_count->fence_wr &&
+ !nouveau_fence_signalled(buf_count->fence_wr))) {
+ IMMED_NVC0(push, SUBC_3D(NV10_SUBCHAN_REF_CNT), 0);
+ }
+
+ /* Queue things up to let the macros write params to the driver constbuf */
+ BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
+ PUSH_DATA (push, NVC0_CB_AUX_SIZE);
+ PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
+ PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
+ BEGIN_NVC0(push, NVC0_3D(CB_POS), 1);
+ PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO);
+
+ if (info->index_size) {
+ assert(!info->has_user_indices);
+ assert(nouveau_resource_mapped_by_gpu(info->index.resource));
+ size = 5;
+ if (buf_count)
+ macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT;
+ else
+ macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT;
+ } else {
+ if (nvc0->state.index_bias) {
+ /* index_bias is implied 0 if !info->index_size (really ?) */
+ IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
+ IMMED_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 0);
+ nvc0->state.index_bias = 0;
+ }
+ size = 4;
+ if (buf_count)
+ macro = NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT;
+ else
+ macro = NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT;
+ }
+
+ /* If the stride is not the natural stride, we have to stick a separate
+ * push data reference for each draw. Otherwise it can all go in as one.
+ * Of course there is a maximum packet size, so we have to break things up
+ * along those borders as well.
+ */
+ while (count) {
+ unsigned draws = count, pushes, i;
+ if (info->indirect->stride == size * 4) {
+ draws = MIN2(draws, (NV04_PFIFO_MAX_PACKET_LEN - 4) / size);
+ pushes = 1;
+ } else {
+ draws = MIN2(draws, 32);
+ pushes = draws;
+ }
+
+ nouveau_pushbuf_space(push, 16, 0, pushes + !!buf_count);
+ PUSH_REFN(push, buf->bo, NOUVEAU_BO_RD | buf->domain);
+ if (buf_count)
+ PUSH_REFN(push, buf_count->bo, NOUVEAU_BO_RD | buf_count->domain);
+ PUSH_DATA(push,
+ NVC0_FIFO_PKHDR_1I(0, macro, 3 + !!buf_count + draws * size));
+ PUSH_DATA(push, nvc0_prim_gl(info->mode));
+ PUSH_DATA(push, drawid);
+ PUSH_DATA(push, draws);
+ if (buf_count) {
+ nouveau_pushbuf_data(push,
+ buf_count->bo,
+ buf_count->offset + info->indirect->indirect_draw_count_offset,
+ NVC0_IB_ENTRY_1_NO_PREFETCH | 4);
+ }
+ if (pushes == 1) {
+ nouveau_pushbuf_data(push,
+ buf->bo, offset,
+ NVC0_IB_ENTRY_1_NO_PREFETCH | (size * 4 * draws));
+ offset += draws * info->indirect->stride;
+ } else {
+ for (i = 0; i < pushes; i++) {
+ nouveau_pushbuf_data(push,
+ buf->bo, offset,
+ NVC0_IB_ENTRY_1_NO_PREFETCH | (size * 4));
+ offset += info->indirect->stride;
+ }
+ }
+ count -= draws;
+ drawid += draws;
+ }
+}
+
+static inline void
+nvc0_update_prim_restart(struct nvc0_context *nvc0, bool en, uint32_t index)
+{
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+
+ if (en != nvc0->state.prim_restart) {
+ if (en) {
+ BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
+ PUSH_DATA (push, 1);
+ PUSH_DATA (push, index);
+ } else {
+ IMMED_NVC0(push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
+ }
+ nvc0->state.prim_restart = en;
+ } else
+ if (en) {
+ BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_INDEX), 1);
+ PUSH_DATA (push, index);
+ }
+}
+