struct push_context {
struct nouveau_pushbuf *push;
- void *idxbuf;
+ const void *idxbuf;
float edgeflag;
int edgeflag_attr;
}
if (info->indexed) {
- ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
- nv04_resource(nv50->idxbuf.buffer),
- nv50->idxbuf.offset, NOUVEAU_BO_RD);
+ if (nv50->idxbuf.buffer) {
+ ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
+ nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
+ NOUVEAU_BO_RD);
+ } else {
+ ctx.idxbuf = nv50->idxbuf.user_buffer;
+ }
if (!ctx.idxbuf)
return;
index_size = nv50->idxbuf.index_size;
case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
- case PIPE_CAP_USER_INDEX_BUFFERS:
return 0; /* state trackers will know better */
case PIPE_CAP_USER_CONSTANT_BUFFERS:
+ case PIPE_CAP_USER_INDEX_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
return 256;
if (nv50->idxbuf.buffer)
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_INDEX);
- if (ib && ib->buffer) {
+ if (ib) {
pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
- nv50->idxbuf.offset = ib->offset;
nv50->idxbuf.index_size = ib->index_size;
- if (nouveau_resource_mapped_by_gpu(ib->buffer))
+ if (ib->buffer) {
+ nv50->idxbuf.offset = ib->offset;
BCTX_REFN(nv50->bufctx_3d, INDEX, nv04_resource(ib->buffer), RD);
+ } else {
+ nv50->idxbuf.user_buffer = ib->user_buffer;
+ }
} else {
pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
}
}
static void
-nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
+nv50_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
unsigned start, unsigned count)
{
map += start;
}
static void
-nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
+nv50_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
unsigned start, unsigned count)
{
map += start;
}
static void
-nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
+nv50_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
}
static void
-nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, uint32_t *map,
+nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
+ const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
unsigned instance_count, int32_t index_bias)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
- void *data;
- struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
unsigned prim;
const unsigned index_size = nv50->idxbuf.index_size;
nv50->state.index_bias = index_bias;
}
- if (nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer)) {
+ if (nv50->idxbuf.buffer) {
+ struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
unsigned pb_start;
unsigned pb_bytes;
- const unsigned base = buf->offset;
+ const unsigned base = buf->offset + nv50->idxbuf.offset;
- start += nv50->idxbuf.offset >> (index_size >> 1);
+ assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer));
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
} else {
- data = nouveau_resource_map_offset(&nv50->base, buf,
- nv50->idxbuf.offset, NOUVEAU_BO_RD);
- if (!data)
- return;
+ const void *data = nv50->idxbuf.user_buffer;
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
if (info->indexed) {
boolean shorten = info->max_index <= 65535;
- assert(nv50->idxbuf.buffer);
-
if (info->primitive_restart != nv50->state.prim_restart) {
if (info->primitive_restart) {
BEGIN_NV04(push, NV50_3D(PRIM_RESTART_ENABLE), 2);
case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
- case PIPE_CAP_USER_INDEX_BUFFERS:
return 0; /* state trackers will know better */
case PIPE_CAP_USER_CONSTANT_BUFFERS:
+ case PIPE_CAP_USER_INDEX_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
return 256;
if (nvc0->idxbuf.buffer)
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_IDX);
- if (ib && ib->buffer) {
- nvc0->dirty |= NVC0_NEW_IDXBUF;
+ if (ib) {
pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
- nvc0->idxbuf.offset = ib->offset;
nvc0->idxbuf.index_size = ib->index_size;
+ if (ib->buffer) {
+ nvc0->idxbuf.offset = ib->offset;
+ nvc0->dirty |= NVC0_NEW_IDXBUF;
+ } else {
+ nvc0->idxbuf.user_buffer = ib->user_buffer;
+ nvc0->dirty &= ~NVC0_NEW_IDXBUF;
+ }
} else {
nvc0->dirty &= ~NVC0_NEW_IDXBUF;
pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
assert(buf);
- if (!nouveau_resource_mapped_by_gpu(&buf->base))
- return;
+ assert(nouveau_resource_mapped_by_gpu(&buf->base));
PUSH_SPACE(push, 6);
BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
}
static void
-nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, uint8_t *map,
+nvc0_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
unsigned start, unsigned count)
{
map += start;
}
static void
-nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, uint16_t *map,
+nvc0_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
unsigned start, unsigned count)
{
map += start;
}
static void
-nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, uint32_t *map,
+nvc0_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
}
static void
-nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, uint32_t *map,
+nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
+ const uint32_t *map,
unsigned start, unsigned count)
{
map += start;
unsigned instance_count, int32_t index_bias)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- void *data;
unsigned prim;
const unsigned index_size = nvc0->idxbuf.index_size;
nvc0->state.index_bias = index_bias;
}
- if (nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer)) {
+ if (nvc0->idxbuf.buffer) {
PUSH_SPACE(push, 1);
IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim);
do {
} while (instance_count);
IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
} else {
- data = nouveau_resource_map_offset(&nvc0->base,
- nv04_resource(nvc0->idxbuf.buffer),
- nvc0->idxbuf.offset, NOUVEAU_BO_RD);
- if (!data)
- return;
+ const void *data = nvc0->idxbuf.user_buffer;
while (instance_count--) {
PUSH_SPACE(push, 2);
if (info->indexed) {
boolean shorten = info->max_index <= 65535;
- assert(nvc0->idxbuf.buffer);
-
if (info->primitive_restart != nvc0->state.prim_restart) {
if (info->primitive_restart) {
BEGIN_NVC0(push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
static INLINE void
nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
{
- struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
- unsigned offset = nvc0->idxbuf.offset;
-
- ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
- buf, offset, NOUVEAU_BO_RD);
+ if (nvc0->idxbuf.buffer) {
+ struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
+ ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
+ buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
+ } else {
+ ctx->idxbuf = nvc0->idxbuf.user_buffer;
+ }
}
static INLINE void