nv50,nvc0: handle user index buffers
authorChristoph Bumiller <e0425955@student.tuwien.ac.at>
Wed, 16 May 2012 18:54:23 +0000 (20:54 +0200)
committerChristoph Bumiller <e0425955@student.tuwien.ac.at>
Thu, 17 May 2012 13:24:58 +0000 (15:24 +0200)
src/gallium/drivers/nv50/nv50_push.c
src/gallium/drivers/nv50/nv50_screen.c
src/gallium/drivers/nv50/nv50_state.c
src/gallium/drivers/nv50/nv50_vbo.c
src/gallium/drivers/nvc0/nvc0_screen.c
src/gallium/drivers/nvc0/nvc0_state.c
src/gallium/drivers/nvc0/nvc0_vbo.c
src/gallium/drivers/nvc0/nvc0_vbo_translate.c

index 3abe189e7b522038bbba9a01466f40abbc3c3ce6..1f7bc7679f077b393692b2a70f497bd5504d0a17 100644 (file)
@@ -13,7 +13,7 @@
 struct push_context {
    struct nouveau_pushbuf *push;
 
-   void *idxbuf;
+   const void *idxbuf;
 
    float edgeflag;
    int edgeflag_attr;
@@ -234,9 +234,13 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
    }
 
    if (info->indexed) {
-      ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
-                                               nv04_resource(nv50->idxbuf.buffer),
-                                               nv50->idxbuf.offset, NOUVEAU_BO_RD);
+      if (nv50->idxbuf.buffer) {
+         ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
+            nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
+            NOUVEAU_BO_RD);
+      } else {
+         ctx.idxbuf = nv50->idxbuf.user_buffer;
+      }
       if (!ctx.idxbuf)
          return;
       index_size = nv50->idxbuf.index_size;
index 9f356ff6556e5312e145a588741e41dbef3d02a5..1874f3ea81a693d319cf43fde52a53e2d89f63ea 100644 (file)
@@ -152,9 +152,9 @@ nv50_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
    case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
    case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
    case PIPE_CAP_USER_VERTEX_BUFFERS:
-   case PIPE_CAP_USER_INDEX_BUFFERS:
       return 0; /* state trackers will know better */
    case PIPE_CAP_USER_CONSTANT_BUFFERS:
+   case PIPE_CAP_USER_INDEX_BUFFERS:
       return 1;
    case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
       return 256;
index eea3ffd5270942f56b9db9677e7bc8e07fcec48c..1e7d17a71971be8a93868f6a3a76c55d76cb6948 100644 (file)
@@ -911,12 +911,15 @@ nv50_set_index_buffer(struct pipe_context *pipe,
    if (nv50->idxbuf.buffer)
       nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_INDEX);
 
-   if (ib && ib->buffer) {
+   if (ib) {
       pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
-      nv50->idxbuf.offset = ib->offset;
       nv50->idxbuf.index_size = ib->index_size;
-      if (nouveau_resource_mapped_by_gpu(ib->buffer))
+      if (ib->buffer) {
+         nv50->idxbuf.offset = ib->offset;
          BCTX_REFN(nv50->bufctx_3d, INDEX, nv04_resource(ib->buffer), RD);
+      } else {
+         nv50->idxbuf.user_buffer = ib->user_buffer;
+      }
    } else {
       pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
    }
index 323677eaf8076abdf6b1e2ecd841f284bdf71f5b..d21d699bebe070b293c8067afa45b77a4030e6c1 100644 (file)
@@ -454,7 +454,7 @@ nv50_draw_arrays(struct nv50_context *nv50,
 }
 
 static void
-nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
+nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
                               unsigned start, unsigned count)
 {
    map += start;
@@ -480,7 +480,7 @@ nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
 }
 
 static void
-nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
+nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
                               unsigned start, unsigned count)
 {
    map += start;
@@ -503,7 +503,7 @@ nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
 }
 
 static void
-nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
+nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
                               unsigned start, unsigned count)
 {
    map += start;
@@ -520,7 +520,8 @@ nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
 }
 
 static void
-nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, uint32_t *map,
+nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
+                                    const uint32_t *map,
                                     unsigned start, unsigned count)
 {
    map += start;
@@ -548,8 +549,6 @@ nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
                    unsigned instance_count, int32_t index_bias)
 {
    struct nouveau_pushbuf *push = nv50->base.pushbuf;
-   void *data;
-   struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
    unsigned prim;
    const unsigned index_size = nv50->idxbuf.index_size;
 
@@ -561,12 +560,13 @@ nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
       nv50->state.index_bias = index_bias;
    }
 
-   if (nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer)) {
+   if (nv50->idxbuf.buffer) {
+      struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
       unsigned pb_start;
       unsigned pb_bytes;
-      const unsigned base = buf->offset;
+      const unsigned base = buf->offset + nv50->idxbuf.offset;
 
-      start += nv50->idxbuf.offset >> (index_size >> 1);
+      assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer));
 
       while (instance_count--) {
          BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
@@ -609,10 +609,7 @@ nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
          prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
       }
    } else {
-      data = nouveau_resource_map_offset(&nv50->base, buf,
-                                         nv50->idxbuf.offset, NOUVEAU_BO_RD);
-      if (!data)
-         return;
+      const void *data = nv50->idxbuf.user_buffer;
 
       while (instance_count--) {
          BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
@@ -749,8 +746,6 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
    if (info->indexed) {
       boolean shorten = info->max_index <= 65535;
 
-      assert(nv50->idxbuf.buffer);
-
       if (info->primitive_restart != nv50->state.prim_restart) {
          if (info->primitive_restart) {
             BEGIN_NV04(push, NV50_3D(PRIM_RESTART_ENABLE), 2);
index 498207f613d04308cd94495f1185176de1802d93..3698e71924f1ca946cc91d2018a7b6ee6fdfa189 100644 (file)
@@ -140,9 +140,9 @@ nvc0_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
    case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
    case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
    case PIPE_CAP_USER_VERTEX_BUFFERS:
-   case PIPE_CAP_USER_INDEX_BUFFERS:
       return 0; /* state trackers will know better */
    case PIPE_CAP_USER_CONSTANT_BUFFERS:
+   case PIPE_CAP_USER_INDEX_BUFFERS:
       return 1;
    case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
       return 256;
index cab238e7e06748de4dee695e2bff88b579e59220..5eee9d4bf88fdd1587f31cbdfa26d10af2667ea4 100644 (file)
@@ -802,11 +802,16 @@ nvc0_set_index_buffer(struct pipe_context *pipe,
     if (nvc0->idxbuf.buffer)
        nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_IDX);
 
-    if (ib && ib->buffer) {
-       nvc0->dirty |= NVC0_NEW_IDXBUF;
+    if (ib) {
        pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
-       nvc0->idxbuf.offset = ib->offset;
        nvc0->idxbuf.index_size = ib->index_size;
+       if (ib->buffer) {
+          nvc0->idxbuf.offset = ib->offset;
+          nvc0->dirty |= NVC0_NEW_IDXBUF;
+       } else {
+          nvc0->idxbuf.user_buffer = ib->user_buffer;
+          nvc0->dirty &= ~NVC0_NEW_IDXBUF;
+       }
     } else {
        nvc0->dirty &= ~NVC0_NEW_IDXBUF;
        pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
index 5a6636f51c98302b9ab457433ebd13004797b930..c1c9050209ae3c6923fb9484ad151db940d8a8cd 100644 (file)
@@ -427,8 +427,7 @@ nvc0_idxbuf_validate(struct nvc0_context *nvc0)
    struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
 
    assert(buf);
-   if (!nouveau_resource_mapped_by_gpu(&buf->base))
-      return;
+   assert(nouveau_resource_mapped_by_gpu(&buf->base));
 
    PUSH_SPACE(push, 6);
    BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
@@ -507,7 +506,7 @@ nvc0_draw_arrays(struct nvc0_context *nvc0,
 }
 
 static void
-nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
+nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
                               unsigned start, unsigned count)
 {
    map += start;
@@ -535,7 +534,7 @@ nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
 }
 
 static void
-nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
+nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
                               unsigned start, unsigned count)
 {
    map += start;
@@ -560,7 +559,7 @@ nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
 }
 
 static void
-nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
+nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
                               unsigned start, unsigned count)
 {
    map += start;
@@ -578,7 +577,8 @@ nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
 }
 
 static void
-nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, uint32_t *map,
+nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
+                                    const uint32_t *map,
                                     unsigned start, unsigned count)
 {
    map += start;
@@ -608,7 +608,6 @@ nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
                    unsigned instance_count, int32_t index_bias)
 {
    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
-   void *data;
    unsigned prim;
    const unsigned index_size = nvc0->idxbuf.index_size;
 
@@ -621,7 +620,7 @@ nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
       nvc0->state.index_bias = index_bias;
    }
 
-   if (nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer)) {
+   if (nvc0->idxbuf.buffer) {
       PUSH_SPACE(push, 1);
       IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim);
       do {
@@ -637,11 +636,7 @@ nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
       } while (instance_count);
       IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
    } else {
-      data = nouveau_resource_map_offset(&nvc0->base,
-                                         nv04_resource(nvc0->idxbuf.buffer),
-                                         nvc0->idxbuf.offset, NOUVEAU_BO_RD);
-      if (!data)
-         return;
+      const void *data = nvc0->idxbuf.user_buffer;
 
       while (instance_count--) {
          PUSH_SPACE(push, 2);
@@ -768,8 +763,6 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
    if (info->indexed) {
       boolean shorten = info->max_index <= 65535;
 
-      assert(nvc0->idxbuf.buffer);
-
       if (info->primitive_restart != nvc0->state.prim_restart) {
          if (info->primitive_restart) {
             BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
index 26f8cb5fbafed37e250ba8492d432af31ad2693f..6317c21a8d7bbaeab8f48db7884a0eaa1fdfcfaa 100644 (file)
@@ -78,11 +78,13 @@ nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
 static INLINE void
 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
 {
-   struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
-   unsigned offset = nvc0->idxbuf.offset;
-
-   ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
-                    buf, offset, NOUVEAU_BO_RD);
+   if (nvc0->idxbuf.buffer) {
+      struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
+      ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
+         buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
+   } else {
+      ctx->idxbuf = nvc0->idxbuf.user_buffer;
+   }
 }
 
 static INLINE void