X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fauxiliary%2Futil%2Fu_vbuf.c;h=80c30acc76752a4e2a755d843279e257534f0ac9;hb=6fa60b5e409a514315ba787b33c80fd8b2e18d64;hp=650df9f6aef8a665fb7a5ec895db98894846b367;hpb=6cd0dbc415dd092a72a8941c45ba812b9fe74b24;p=mesa.git diff --git a/src/gallium/auxiliary/util/u_vbuf.c b/src/gallium/auxiliary/util/u_vbuf.c index 650df9f6aef..80c30acc767 100644 --- a/src/gallium/auxiliary/util/u_vbuf.c +++ b/src/gallium/auxiliary/util/u_vbuf.c @@ -146,7 +146,6 @@ struct u_vbuf { struct pipe_context *pipe; struct translate_cache *translate_cache; struct cso_cache *cso_cache; - struct u_upload_mgr *uploader; /* This is what was set in set_vertex_buffers. * May contain user buffers. */ @@ -163,9 +162,6 @@ struct u_vbuf { uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last call of set_vertex_buffers */ - /* The index buffer. */ - struct pipe_index_buffer index_buffer; - /* Vertex elements. */ struct u_vbuf_elements *ve, *ve_saved; @@ -256,7 +252,8 @@ static const struct { { PIPE_FORMAT_R8G8B8A8_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, }; -boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) +boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps, + unsigned flags) { unsigned i; boolean fallback = FALSE; @@ -269,7 +266,7 @@ boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) for (i = 0; i < PIPE_FORMAT_COUNT; i++) caps->format_translation[i] = i; - for (i = 0; i < Elements(vbuf_format_fallbacks); i++) { + for (i = 0; i < ARRAY_SIZE(vbuf_format_fallbacks); i++) { enum pipe_format format = vbuf_format_fallbacks[i].from; if (!screen->is_format_supported(screen, format, PIPE_BUFFER, 0, @@ -294,7 +291,7 @@ boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) if (!caps->buffer_offset_unaligned || !caps->buffer_stride_unaligned || !caps->velem_src_offset_unaligned || - !caps->user_vertex_buffers) { + (!(flags & U_VBUF_FLAG_NO_USER_VBOS) && !caps->user_vertex_buffers)) { fallback = TRUE; } @@ -314,9 +311,6 @@ u_vbuf_create(struct pipe_context *pipe, mgr->translate_cache = translate_cache_create(); memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); - mgr->uploader = u_upload_create(pipe, 1024 * 1024, 4, - PIPE_BIND_VERTEX_BUFFER); - return mgr; } @@ -357,7 +351,8 @@ u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count, assert(ve); if (ve != mgr->ve) - pipe->bind_vertex_elements_state(pipe, ve->driver_cso); + pipe->bind_vertex_elements_state(pipe, ve->driver_cso); + return ve; } @@ -371,41 +366,35 @@ void u_vbuf_destroy(struct u_vbuf *mgr) { struct pipe_screen *screen = mgr->pipe->screen; unsigned i; - unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX, - PIPE_SHADER_CAP_MAX_INPUTS); - - mgr->pipe->set_index_buffer(mgr->pipe, NULL); - pipe_resource_reference(&mgr->index_buffer.buffer, NULL); + const unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX, + PIPE_SHADER_CAP_MAX_INPUTS); mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL); - for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { - pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); - } - for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { - pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); - } - pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL); + for (i = 0; i < PIPE_MAX_ATTRIBS; i++) + pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]); + for (i = 0; i < PIPE_MAX_ATTRIBS; i++) + pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]); + + pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved); translate_cache_destroy(mgr->translate_cache); - u_upload_destroy(mgr->uploader); cso_cache_delete(mgr->cso_cache); FREE(mgr); } static enum pipe_error u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, + const struct pipe_draw_info *info, unsigned vb_mask, unsigned out_vb, int start_vertex, unsigned num_vertices, - int start_index, unsigned num_indices, int min_index, - boolean unroll_indices) + int min_index, boolean unroll_indices) { struct translate *tr; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; struct pipe_resource *out_buffer = NULL; uint8_t *out_map; unsigned out_offset, mask; - enum pipe_error err; /* Get a translate object. */ tr = translate_cache_find(mgr->translate_cache, key); @@ -421,17 +410,31 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, vb = &mgr->vertex_buffer[i]; offset = vb->buffer_offset + vb->stride * start_vertex; - if (vb->user_buffer) { - map = (uint8_t*)vb->user_buffer + offset; + if (vb->is_user_buffer) { + map = (uint8_t*)vb->buffer.user + offset; } else { unsigned size = vb->stride ? num_vertices * vb->stride : sizeof(double)*4; - if (offset+size > vb->buffer->width0) { - size = vb->buffer->width0 - offset; + if (offset + size > vb->buffer.resource->width0) { + /* Don't try to map past end of buffer. This often happens when + * we're translating an attribute that's at offset > 0 from the + * start of the vertex. If we'd subtract attrib's offset from + * the size, this probably wouldn't happen. + */ + size = vb->buffer.resource->width0 - offset; + + /* Also adjust num_vertices. A common user error is to call + * glDrawRangeElements() with incorrect 'end' argument. The 'end + * value should be the max index value, but people often + * accidentally add one to this value. This adjustment avoids + * crashing (by reading past the end of a hardware buffer mapping) + * when people do that. + */ + num_vertices = (size + vb->stride - 1) / vb->stride; } - map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, + map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size, PIPE_TRANSFER_READ, &vb_transfer[i]); } @@ -445,38 +448,35 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, /* Translate. */ if (unroll_indices) { - struct pipe_index_buffer *ib = &mgr->index_buffer; struct pipe_transfer *transfer = NULL; - unsigned offset = ib->offset + start_index * ib->index_size; + const unsigned offset = info->start * info->index_size; uint8_t *map; - assert((ib->buffer || ib->user_buffer) && ib->index_size); - /* Create and map the output buffer. */ - err = u_upload_alloc(mgr->uploader, 0, - key->output_stride * num_indices, - &out_offset, &out_buffer, - (void**)&out_map); - if (err != PIPE_OK) - return err; - - if (ib->user_buffer) { - map = (uint8_t*)ib->user_buffer + offset; + u_upload_alloc(mgr->pipe->stream_uploader, 0, + key->output_stride * info->count, 4, + &out_offset, &out_buffer, + (void**)&out_map); + if (!out_buffer) + return PIPE_ERROR_OUT_OF_MEMORY; + + if (info->has_user_indices) { + map = (uint8_t*)info->index.user + offset; } else { - map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, - num_indices * ib->index_size, + map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset, + info->count * info->index_size, PIPE_TRANSFER_READ, &transfer); } - switch (ib->index_size) { + switch (info->index_size) { case 4: - tr->run_elts(tr, (unsigned*)map, num_indices, 0, 0, out_map); + tr->run_elts(tr, (unsigned*)map, info->count, 0, 0, out_map); break; case 2: - tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, 0, out_map); + tr->run_elts16(tr, (uint16_t*)map, info->count, 0, 0, out_map); break; case 1: - tr->run_elts8(tr, map, num_indices, 0, 0, out_map); + tr->run_elts8(tr, map, info->count, 0, 0, out_map); break; } @@ -485,13 +485,13 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, } } else { /* Create and map the output buffer. */ - err = u_upload_alloc(mgr->uploader, - key->output_stride * start_vertex, - key->output_stride * num_vertices, - &out_offset, &out_buffer, - (void**)&out_map); - if (err != PIPE_OK) - return err; + u_upload_alloc(mgr->pipe->stream_uploader, + key->output_stride * start_vertex, + key->output_stride * num_vertices, 4, + &out_offset, &out_buffer, + (void**)&out_map); + if (!out_buffer) + return PIPE_ERROR_OUT_OF_MEMORY; out_offset -= key->output_stride * start_vertex; @@ -513,9 +513,9 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, mgr->real_vertex_buffer[out_vb].stride = key->output_stride; /* Move the buffer reference. */ - pipe_resource_reference( - &mgr->real_vertex_buffer[out_vb].buffer, NULL); - mgr->real_vertex_buffer[out_vb].buffer = out_buffer; + pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[out_vb]); + mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer; + mgr->real_vertex_buffer[out_vb].is_user_buffer = false; return PIPE_OK; } @@ -544,6 +544,7 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr, index = ffs(unused_vb_mask) - 1; fallback_vbs[type] = index; + unused_vb_mask &= ~(1 << index); /*printf("found slot=%i for type=%i\n", index, type);*/ } } @@ -560,28 +561,27 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr, static boolean u_vbuf_translate_begin(struct u_vbuf *mgr, + const struct pipe_draw_info *info, int start_vertex, unsigned num_vertices, - int start_instance, unsigned num_instances, - int start_index, unsigned num_indices, int min_index, - boolean unroll_indices) + int min_index, boolean unroll_indices) { unsigned mask[VB_NUM] = {0}; struct translate_key key[VB_NUM]; unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */ unsigned i, type; - unsigned incompatible_vb_mask = mgr->incompatible_vb_mask & - mgr->ve->used_vb_mask; + const unsigned incompatible_vb_mask = mgr->incompatible_vb_mask & + mgr->ve->used_vb_mask; - int start[VB_NUM] = { - start_vertex, /* VERTEX */ - start_instance, /* INSTANCE */ - 0 /* CONST */ + const int start[VB_NUM] = { + start_vertex, /* VERTEX */ + info->start_instance, /* INSTANCE */ + 0 /* CONST */ }; - unsigned num[VB_NUM] = { - num_vertices, /* VERTEX */ - num_instances, /* INSTANCE */ - 1 /* CONST */ + const unsigned num[VB_NUM] = { + num_vertices, /* VERTEX */ + info->instance_count, /* INSTANCE */ + 1 /* CONST */ }; memset(key, 0, sizeof(key)); @@ -625,6 +625,7 @@ u_vbuf_translate_begin(struct u_vbuf *mgr, for (i = 0; i < mgr->ve->count; i++) { struct translate_key *k; struct translate_element *te; + enum pipe_format output_format = mgr->ve->native_format[i]; unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; bit = 1 << vb_index; @@ -642,7 +643,8 @@ u_vbuf_translate_begin(struct u_vbuf *mgr, } } assert(type < VB_NUM); - assert(translate_is_output_format_supported(mgr->ve->native_format[i])); + if (mgr->ve->ve[i].src_format != output_format) + assert(translate_is_output_format_supported(output_format)); /*printf("velem=%i type=%i\n", i, type);*/ /* Add the vertex element. */ @@ -655,7 +657,7 @@ u_vbuf_translate_begin(struct u_vbuf *mgr, te->input_buffer = vb_index; te->input_format = mgr->ve->ve[i].src_format; te->input_offset = mgr->ve->ve[i].src_offset; - te->output_format = mgr->ve->native_format[i]; + te->output_format = output_format; te->output_offset = k->output_stride; k->output_stride += mgr->ve->native_format_size[i]; @@ -666,10 +668,9 @@ u_vbuf_translate_begin(struct u_vbuf *mgr, for (type = 0; type < VB_NUM; type++) { if (key[type].nr_elements) { enum pipe_error err; - err = u_vbuf_translate_buffers(mgr, &key[type], mask[type], + err = u_vbuf_translate_buffers(mgr, &key[type], info, mask[type], mgr->fallback_vbs[type], - start[type], num[type], - start_index, num_indices, min_index, + start[type], num[type], min_index, unroll_indices && type == VB_VERTEX); if (err != PIPE_OK) return FALSE; @@ -692,8 +693,8 @@ u_vbuf_translate_begin(struct u_vbuf *mgr, mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vbs[type]; /* elem_index[type][i] can only be set for one type. */ - assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0); - assert(type > VB_VERTEX || elem_index[type+2][i] == ~0); + assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0u); + assert(type > VB_VERTEX || elem_index[type+2][i] == ~0u); break; } } @@ -721,8 +722,8 @@ static void u_vbuf_translate_end(struct u_vbuf *mgr) /* Unreference the now-unused VBOs. */ for (i = 0; i < VB_NUM; i++) { unsigned vb = mgr->fallback_vbs[i]; - if (vb != ~0) { - pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL); + if (vb != ~0u) { + pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer.resource, NULL); mgr->fallback_vbs[i] = ~0; /* This will cause the buffer to be unbound in the driver later. */ @@ -780,10 +781,11 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, ve->compatible_vb_mask_all = ~ve->incompatible_vb_mask_any & used_buffers; ve->incompatible_vb_mask_all = ~ve->compatible_vb_mask_any & used_buffers; - /* Align the formats to the size of DWORD if needed. */ + /* Align the formats and offsets to the size of DWORD if needed. */ if (!mgr->caps.velem_src_offset_unaligned) { for (i = 0; i < count; i++) { ve->native_format_size[i] = align(ve->native_format_size[i], 4); + driver_attribs[i].src_offset = align(ve->ve[i].src_offset, 4); } } @@ -814,7 +816,7 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, uint32_t incompatible_vb_mask = 0; /* which buffers have a non-zero stride */ uint32_t nonzero_stride_vb_mask = 0; - uint32_t mask = ~(((1ull << count) - 1) << start_slot); + const uint32_t mask = ~(((1ull << count) - 1) << start_slot); /* Zero out the bits we are going to rewrite completely. */ mgr->user_vb_mask &= mask; @@ -830,9 +832,8 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, for (i = 0; i < count; i++) { unsigned dst_index = start_slot + i; - pipe_resource_reference(&mgr->vertex_buffer[dst_index].buffer, NULL); - pipe_resource_reference(&mgr->real_vertex_buffer[dst_index].buffer, - NULL); + pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]); + pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[dst_index]); } pipe->set_vertex_buffers(pipe, start_slot, count, NULL); @@ -845,18 +846,13 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index]; struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index]; - if (!vb->buffer && !vb->user_buffer) { - pipe_resource_reference(&orig_vb->buffer, NULL); - pipe_resource_reference(&real_vb->buffer, NULL); - real_vb->user_buffer = NULL; + if (!vb->buffer.resource) { + pipe_vertex_buffer_unreference(orig_vb); + pipe_vertex_buffer_unreference(real_vb); continue; } - pipe_resource_reference(&orig_vb->buffer, vb->buffer); - orig_vb->user_buffer = vb->user_buffer; - - real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset; - real_vb->stride = orig_vb->stride = vb->stride; + pipe_vertex_buffer_reference(orig_vb, vb); if (vb->stride) { nonzero_stride_vb_mask |= 1 << dst_index; @@ -866,18 +862,23 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) || (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) { incompatible_vb_mask |= 1 << dst_index; - pipe_resource_reference(&real_vb->buffer, NULL); + real_vb->buffer_offset = vb->buffer_offset; + real_vb->stride = vb->stride; + pipe_vertex_buffer_unreference(real_vb); + real_vb->is_user_buffer = false; continue; } - if (!mgr->caps.user_vertex_buffers && vb->user_buffer) { + if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) { user_vb_mask |= 1 << dst_index; - pipe_resource_reference(&real_vb->buffer, NULL); + real_vb->buffer_offset = vb->buffer_offset; + real_vb->stride = vb->stride; + pipe_vertex_buffer_unreference(real_vb); + real_vb->is_user_buffer = false; continue; } - pipe_resource_reference(&real_vb->buffer, vb->buffer); - real_vb->user_buffer = vb->user_buffer; + pipe_vertex_buffer_reference(real_vb, vb); } mgr->user_vb_mask |= user_vb_mask; @@ -890,22 +891,6 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, mgr->dirty_real_vb_mask |= ~mask; } -void u_vbuf_set_index_buffer(struct u_vbuf *mgr, - const struct pipe_index_buffer *ib) -{ - struct pipe_context *pipe = mgr->pipe; - - if (ib) { - assert(ib->offset % ib->index_size == 0); - pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); - memcpy(&mgr->index_buffer, ib, sizeof(*ib)); - } else { - pipe_resource_reference(&mgr->index_buffer.buffer, NULL); - } - - pipe->set_index_buffer(pipe, ib); -} - static enum pipe_error u_vbuf_upload_buffers(struct u_vbuf *mgr, int start_vertex, unsigned num_vertices, @@ -913,7 +898,7 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr, { unsigned i; unsigned nr_velems = mgr->ve->count; - struct pipe_vertex_element *velems = + const struct pipe_vertex_element *velems = mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve; unsigned start_offset[PIPE_MAX_ATTRIBS]; unsigned end_offset[PIPE_MAX_ATTRIBS]; @@ -921,7 +906,7 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr, /* Determine how much data needs to be uploaded. */ for (i = 0; i < nr_velems; i++) { - struct pipe_vertex_element *velem = &velems[i]; + const struct pipe_vertex_element *velem = &velems[i]; unsigned index = velem->vertex_buffer_index; struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index]; unsigned instance_div, first, size, index_bit; @@ -933,7 +918,7 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr, continue; } - if (!vb->user_buffer) { + if (!vb->is_user_buffer) { continue; } @@ -975,7 +960,6 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr, unsigned start, end; struct pipe_vertex_buffer *real_vb; const uint8_t *ptr; - enum pipe_error err; i = u_bit_scan(&buffer_mask); @@ -984,12 +968,12 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr, assert(start < end); real_vb = &mgr->real_vertex_buffer[i]; - ptr = mgr->vertex_buffer[i].user_buffer; + ptr = mgr->vertex_buffer[i].buffer.user; - err = u_upload_data(mgr->uploader, start, end - start, ptr + start, - &real_vb->buffer_offset, &real_vb->buffer); - if (err != PIPE_OK) - return err; + u_upload_data(mgr->pipe->stream_uploader, start, end - start, 4, + ptr + start, &real_vb->buffer_offset, &real_vb->buffer.resource); + if (!real_vb->buffer.resource) + return PIPE_ERROR_OUT_OF_MEMORY; real_vb->buffer_offset -= start; } @@ -997,66 +981,66 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr, return PIPE_OK; } -static boolean u_vbuf_need_minmax_index(struct u_vbuf *mgr) +static boolean u_vbuf_need_minmax_index(const struct u_vbuf *mgr) { /* See if there are any per-vertex attribs which will be uploaded or * translated. Use bitmasks to get the info instead of looping over vertex * elements. */ return (mgr->ve->used_vb_mask & - ((mgr->user_vb_mask | mgr->incompatible_vb_mask | + ((mgr->user_vb_mask | + mgr->incompatible_vb_mask | mgr->ve->incompatible_vb_mask_any) & - mgr->ve->noninstance_vb_mask_any & mgr->nonzero_stride_vb_mask)) != 0; + mgr->ve->noninstance_vb_mask_any & + mgr->nonzero_stride_vb_mask)) != 0; } -static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf *mgr) +static boolean u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf *mgr) { /* Return true if there are hw buffers which don't need to be translated. * * We could query whether each buffer is busy, but that would * be way more costly than this. */ return (mgr->ve->used_vb_mask & - (~mgr->user_vb_mask & ~mgr->incompatible_vb_mask & - mgr->ve->compatible_vb_mask_all & mgr->ve->noninstance_vb_mask_any & + (~mgr->user_vb_mask & + ~mgr->incompatible_vb_mask & + mgr->ve->compatible_vb_mask_all & + mgr->ve->noninstance_vb_mask_any & mgr->nonzero_stride_vb_mask)) != 0; } static void u_vbuf_get_minmax_index(struct pipe_context *pipe, - struct pipe_index_buffer *ib, - boolean primitive_restart, - unsigned restart_index, - unsigned start, unsigned count, - int *out_min_index, - int *out_max_index) + const struct pipe_draw_info *info, + int *out_min_index, int *out_max_index) { struct pipe_transfer *transfer = NULL; const void *indices; unsigned i; - if (ib->user_buffer) { - indices = (uint8_t*)ib->user_buffer + - ib->offset + start * ib->index_size; + if (info->has_user_indices) { + indices = (uint8_t*)info->index.user + + info->start * info->index_size; } else { - indices = pipe_buffer_map_range(pipe, ib->buffer, - ib->offset + start * ib->index_size, - count * ib->index_size, + indices = pipe_buffer_map_range(pipe, info->index.resource, + info->start * info->index_size, + info->count * info->index_size, PIPE_TRANSFER_READ, &transfer); } - switch (ib->index_size) { + switch (info->index_size) { case 4: { const unsigned *ui_indices = (const unsigned*)indices; unsigned max_ui = 0; unsigned min_ui = ~0U; - if (primitive_restart) { - for (i = 0; i < count; i++) { - if (ui_indices[i] != restart_index) { + if (info->primitive_restart) { + for (i = 0; i < info->count; i++) { + if (ui_indices[i] != info->restart_index) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } } else { - for (i = 0; i < count; i++) { + for (i = 0; i < info->count; i++) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } @@ -1069,16 +1053,16 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, const unsigned short *us_indices = (const unsigned short*)indices; unsigned max_us = 0; unsigned min_us = ~0U; - if (primitive_restart) { - for (i = 0; i < count; i++) { - if (us_indices[i] != restart_index) { + if (info->primitive_restart) { + for (i = 0; i < info->count; i++) { + if (us_indices[i] != info->restart_index) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } } else { - for (i = 0; i < count; i++) { + for (i = 0; i < info->count; i++) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } @@ -1091,16 +1075,16 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, const unsigned char *ub_indices = (const unsigned char*)indices; unsigned max_ub = 0; unsigned min_ub = ~0U; - if (primitive_restart) { - for (i = 0; i < count; i++) { - if (ub_indices[i] != restart_index) { + if (info->primitive_restart) { + for (i = 0; i < info->count; i++) { + if (ub_indices[i] != info->restart_index) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } } else { - for (i = 0; i < count; i++) { + for (i = 0; i < info->count; i++) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } @@ -1139,9 +1123,10 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) int start_vertex, min_index; unsigned num_vertices; boolean unroll_indices = FALSE; - uint32_t used_vb_mask = mgr->ve->used_vb_mask; + const uint32_t used_vb_mask = mgr->ve->used_vb_mask; uint32_t user_vb_mask = mgr->user_vb_mask & used_vb_mask; - uint32_t incompatible_vb_mask = mgr->incompatible_vb_mask & used_vb_mask; + const uint32_t incompatible_vb_mask = + mgr->incompatible_vb_mask & used_vb_mask; struct pipe_draw_info new_info; /* Normal draw. No fallback and no user buffers. */ @@ -1165,16 +1150,16 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) struct pipe_transfer *transfer = NULL; int *data; - if (new_info.indexed) { - data = pipe_buffer_map_range(pipe, new_info.indirect, - new_info.indirect_offset, 20, + if (new_info.index_size) { + data = pipe_buffer_map_range(pipe, new_info.indirect->buffer, + new_info.indirect->offset, 20, PIPE_TRANSFER_READ, &transfer); new_info.index_bias = data[3]; new_info.start_instance = data[4]; } else { - data = pipe_buffer_map_range(pipe, new_info.indirect, - new_info.indirect_offset, 16, + data = pipe_buffer_map_range(pipe, new_info.indirect->buffer, + new_info.indirect->offset, 16, PIPE_TRANSFER_READ, &transfer); new_info.start_instance = data[3]; } @@ -1186,19 +1171,17 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) new_info.indirect = NULL; } - if (new_info.indexed) { + if (new_info.index_size) { /* See if anything needs to be done for per-vertex attribs. */ if (u_vbuf_need_minmax_index(mgr)) { int max_index; - if (new_info.max_index != ~0) { + if (new_info.max_index != ~0u) { min_index = new_info.min_index; max_index = new_info.max_index; } else { - u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, - new_info.primitive_restart, - new_info.restart_index, new_info.start, - new_info.count, &min_index, &max_index); + u_vbuf_get_minmax_index(mgr->pipe, &new_info, + &min_index, &max_index); } assert(min_index <= max_index); @@ -1234,16 +1217,14 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) if (unroll_indices || incompatible_vb_mask || mgr->ve->incompatible_elem_mask) { - if (!u_vbuf_translate_begin(mgr, start_vertex, num_vertices, - new_info.start_instance, - new_info.instance_count, new_info.start, - new_info.count, min_index, unroll_indices)) { + if (!u_vbuf_translate_begin(mgr, &new_info, start_vertex, num_vertices, + min_index, unroll_indices)) { debug_warn_once("u_vbuf_translate_begin() failed"); return; } if (unroll_indices) { - new_info.indexed = FALSE; + new_info.index_size = 0; new_info.index_bias = 0; new_info.min_index = 0; new_info.max_index = new_info.count - 1; @@ -1287,7 +1268,7 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) } */ - u_upload_unmap(mgr->uploader); + u_upload_unmap(pipe->stream_uploader); u_vbuf_set_driver_vertex_buffers(mgr); pipe->draw_vbo(pipe, &new_info); @@ -1317,16 +1298,13 @@ void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr) void u_vbuf_save_aux_vertex_buffer_slot(struct u_vbuf *mgr) { - struct pipe_vertex_buffer *vb = - &mgr->vertex_buffer[mgr->aux_vertex_buffer_slot]; - - pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, vb->buffer); - memcpy(&mgr->aux_vertex_buffer_saved, vb, sizeof(*vb)); + pipe_vertex_buffer_reference(&mgr->aux_vertex_buffer_saved, + &mgr->vertex_buffer[mgr->aux_vertex_buffer_slot]); } void u_vbuf_restore_aux_vertex_buffer_slot(struct u_vbuf *mgr) { u_vbuf_set_vertex_buffers(mgr, mgr->aux_vertex_buffer_slot, 1, &mgr->aux_vertex_buffer_saved); - pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL); + pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved); }