util_unreference_framebuffer_state(&nv50->framebuffer);
+ assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i)
pipe_resource_reference(&nv50->vtxbuf[i].buffer, NULL);
}
if (res->bind & PIPE_BIND_VERTEX_BUFFER) {
+ assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (nv50->vtxbuf[i].buffer == res) {
nv50->dirty |= NV50_NEW_ARRAYS;
nv50_user_vbuf_range(struct nv50_context *nv50, int vbi,
uint32_t *base, uint32_t *size)
{
+ assert(vbi < PIPE_MAX_ATTRIBS);
if (unlikely(nv50->vertex->instance_bufs & (1 << vbi))) {
/* TODO: use min and max instance divisor to get a proper range */
*base = 0;
{
unsigned b;
+ assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (b = 0; b < nv50->num_vtxbufs; ++b) {
struct nouveau_bo *bo;
const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[b];
for (i = 0; i < nv50->vertex->num_elements; ++i) {
struct pipe_vertex_element *ve = &nv50->vertex->element[i].pipe;
const unsigned b = ve->vertex_buffer_index;
- struct pipe_vertex_buffer *vb = &nv50->vtxbuf[b];
+ struct pipe_vertex_buffer *vb;
uint32_t base, size;
+ assert(b < PIPE_MAX_ATTRIBS);
+ vb = &nv50->vtxbuf[b];
+
if (!(nv50->vbo_user & (1 << b)))
continue;
if (!nv50->vbo_fifo) {
/* if vertex buffer was written by GPU - flush VBO cache */
+ assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer);
if (buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
}
for (i = 0; i < vertex->num_elements; ++i) {
const unsigned b = vertex->element[i].pipe.vertex_buffer_index;
+
+ assert(b < PIPE_MAX_ATTRIBS);
ve = &vertex->element[i];
vb = &nv50->vtxbuf[b];
for (i = 0; i < vertex->num_elements; ++i) {
uint64_t address, limit;
const unsigned b = vertex->element[i].pipe.vertex_buffer_index;
+
+ assert(b < PIPE_MAX_ATTRIBS);
ve = &vertex->element[i];
vb = &nv50->vtxbuf[b];