X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fauxiliary%2Futil%2Fu_vbuf.c;h=532e7c004bfc3aa42fa53802ea06e095cbea54b7;hb=54010cf8b6da71a1b14c0dc586bb7e6be27052de;hp=a7266136ebf4a6e163689dc5548c126a6c1d9633;hpb=dbd60d27e8087a3bacf36d4eceef15dc4fcdccee;p=mesa.git diff --git a/src/gallium/auxiliary/util/u_vbuf.c b/src/gallium/auxiliary/util/u_vbuf.c index a7266136ebf..532e7c004bf 100644 --- a/src/gallium/auxiliary/util/u_vbuf.c +++ b/src/gallium/auxiliary/util/u_vbuf.c @@ -25,14 +25,77 @@ * **************************************************************************/ +/** + * This module uploads user buffers and translates the vertex buffers which + * contain incompatible vertices (i.e. not supported by the driver/hardware) + * into compatible ones, based on the Gallium CAPs. + * + * It does not upload index buffers. + * + * The module heavily uses bitmasks to represent per-buffer and + * per-vertex-element flags to avoid looping over the list of buffers just + * to see if there's a non-zero stride, or user buffer, or unsupported format, + * etc. + * + * There are 3 categories of vertex elements, which are processed separately: + * - per-vertex attribs (stride != 0, instance_divisor == 0) + * - instanced attribs (stride != 0, instance_divisor > 0) + * - constant attribs (stride == 0) + * + * All needed uploads and translations are performed every draw command, but + * only the subset of vertices needed for that draw command is uploaded or + * translated. (the module never translates whole buffers) + * + * + * The module consists of two main parts: + * + * + * 1) Translate (u_vbuf_translate_begin/end) + * + * This is pretty much a vertex fetch fallback. It translates vertices from + * one vertex buffer to another in an unused vertex buffer slot. It does + * whatever is needed to make the vertices readable by the hardware (changes + * vertex formats and aligns offsets and strides). The translate module is + * used here. + * + * Each of the 3 categories is translated to a separate buffer. + * Only the [min_index, max_index] range is translated. For instanced attribs, + * the range is [start_instance, start_instance+instance_count]. For constant + * attribs, the range is [0, 1]. + * + * + * 2) User buffer uploading (u_vbuf_upload_buffers) + * + * Only the [min_index, max_index] range is uploaded (just like Translate) + * with a single memcpy. + * + * This method works best for non-indexed draw operations or indexed draw + * operations where the [min_index, max_index] range is not being way bigger + * than the vertex count. + * + * If the range is too big (e.g. one triangle with indices {0, 1, 10000}), + * the per-vertex attribs are uploaded via the translate module, all packed + * into one vertex buffer, and the indexed draw call is turned into + * a non-indexed one in the process. This adds additional complexity + * to the translate part, but it prevents bad apps from bringing your frame + * rate down. + * + * + * If there is nothing to do, it forwards every command to the driver. + * The module also has its own CSO cache of vertex element states. + */ + #include "util/u_vbuf.h" +#include "util/u_dump.h" #include "util/u_format.h" #include "util/u_inlines.h" #include "util/u_memory.h" #include "util/u_upload_mgr.h" #include "translate/translate.h" #include "translate/translate_cache.h" +#include "cso_cache/cso_cache.h" +#include "cso_cache/cso_hash.h" struct u_vbuf_elements { unsigned count; @@ -46,312 +109,646 @@ struct u_vbuf_elements { enum pipe_format native_format[PIPE_MAX_ATTRIBS]; unsigned native_format_size[PIPE_MAX_ATTRIBS]; + /* Which buffers are used by the vertex element state. */ + uint32_t used_vb_mask; /* This might mean two things: * - src_format != native_format, as discussed above. * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ - boolean incompatible_layout; - /* Per-element flags. */ - boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; + uint32_t incompatible_elem_mask; /* each bit describes a corresp. attrib */ + /* Which buffer has at least one vertex element referencing it + * incompatible. */ + uint32_t incompatible_vb_mask_any; + /* Which buffer has all vertex elements referencing it incompatible. */ + uint32_t incompatible_vb_mask_all; + /* Which buffer has at least one vertex element referencing it + * compatible. */ + uint32_t compatible_vb_mask_any; + /* Which buffer has all vertex elements referencing it compatible. */ + uint32_t compatible_vb_mask_all; + + /* Which buffer has at least one vertex element referencing it + * non-instanced. */ + uint32_t noninstance_vb_mask_any; + + void *driver_cso; +}; + +enum { + VB_VERTEX = 0, + VB_INSTANCE = 1, + VB_CONST = 2, + VB_NUM = 3 }; -struct u_vbuf_priv { - struct u_vbuf b; +struct u_vbuf { + struct u_vbuf_caps caps; + struct pipe_context *pipe; struct translate_cache *translate_cache; + struct cso_cache *cso_cache; + struct u_upload_mgr *uploader; - /* Vertex element state bound by the state tracker. */ - void *saved_ve; - /* and its associated helper structure for this module. */ - struct u_vbuf_elements *ve; + /* This is what was set in set_vertex_buffers. + * May contain user buffers. */ + struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; + uint32_t enabled_vb_mask; + + /* Saved vertex buffer. */ + unsigned aux_vertex_buffer_slot; + struct pipe_vertex_buffer aux_vertex_buffer_saved; + + /* Vertex buffers for the driver. + * There are usually no user buffers. */ + struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS]; + uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last + call of set_vertex_buffers */ + + /* The index buffer. */ + struct pipe_index_buffer index_buffer; + + /* Vertex elements. */ + struct u_vbuf_elements *ve, *ve_saved; /* Vertex elements used for the translate fallback. */ struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; /* If non-NULL, this is a vertex element state used for the translate * fallback and therefore used for rendering too. */ - void *fallback_ve; + boolean using_translate; /* The vertex buffer slot index where translated vertices have been * stored in. */ - unsigned fallback_vb_slot; - /* When binding the fallback vertex element state, we don't want to - * change saved_ve and ve. This is set to TRUE in such cases. */ - boolean ve_binding_lock; - - /* Whether there is any user buffer. */ - boolean any_user_vbs; - /* Whether there is a buffer with a non-native layout. */ - boolean incompatible_vb_layout; - /* Per-buffer flags. */ - boolean incompatible_vb[PIPE_MAX_ATTRIBS]; + unsigned fallback_vbs[VB_NUM]; + + /* Which buffer is a user buffer. */ + uint32_t user_vb_mask; /* each bit describes a corresp. buffer */ + /* Which buffer is incompatible (unaligned). */ + uint32_t incompatible_vb_mask; /* each bit describes a corresp. buffer */ + /* Which buffer has a non-zero stride. */ + uint32_t nonzero_stride_vb_mask; /* each bit describes a corresp. buffer */ }; -static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) +static void * +u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *attribs); +static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso); + +static const struct { + enum pipe_format from, to; +} vbuf_format_fallbacks[] = { + { PIPE_FORMAT_R32_FIXED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R32G32_FIXED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R32G32B32_FIXED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R32G32B32A32_FIXED, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R16_FLOAT, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R16G16_FLOAT, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R16G16B16_FLOAT, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R16G16B16A16_FLOAT, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R64_FLOAT, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R64G64_FLOAT, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R64G64B64_FLOAT, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R64G64B64A64_FLOAT, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R32_UNORM, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R32G32_UNORM, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R32G32B32_UNORM, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R32G32B32A32_UNORM, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R32_SNORM, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R32G32_SNORM, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R32G32B32_SNORM, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R32G32B32A32_SNORM, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R32_USCALED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R32G32_USCALED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R32G32B32_USCALED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R32G32B32A32_USCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R32_SSCALED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R32G32_SSCALED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R32G32B32_SSCALED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R32G32B32A32_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R16_UNORM, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R16G16_UNORM, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R16G16B16_UNORM, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R16G16B16A16_UNORM, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R16G16_SNORM, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R16G16B16_SNORM, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R16_USCALED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R16G16_USCALED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R16G16B16_USCALED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R16G16B16A16_USCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R16G16_SSCALED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R16G16B16_SSCALED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R8_UNORM, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R8G8_UNORM, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R8G8B8_UNORM, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R8_SNORM, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R8G8_SNORM, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R8G8B8_SNORM, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R8G8B8A8_SNORM, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R8_USCALED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R8G8_USCALED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R8G8B8_USCALED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R8G8B8A8_USCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, + { PIPE_FORMAT_R8_SSCALED, PIPE_FORMAT_R32_FLOAT }, + { PIPE_FORMAT_R8G8_SSCALED, PIPE_FORMAT_R32G32_FLOAT }, + { PIPE_FORMAT_R8G8B8_SSCALED, PIPE_FORMAT_R32G32B32_FLOAT }, + { PIPE_FORMAT_R8G8B8A8_SSCALED, PIPE_FORMAT_R32G32B32A32_FLOAT }, +}; + +boolean u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) { - struct pipe_screen *screen = mgr->pipe->screen; + unsigned i; + boolean fallback = FALSE; + + /* I'd rather have a bitfield of which formats are supported and a static + * table of the translations indexed by format, but since we don't have C99 + * we can't easily make a sparsely-populated table indexed by format. So, + * we construct the sparse table here. + */ + for (i = 0; i < PIPE_FORMAT_COUNT; i++) + caps->format_translation[i] = i; + + for (i = 0; i < ARRAY_SIZE(vbuf_format_fallbacks); i++) { + enum pipe_format format = vbuf_format_fallbacks[i].from; + + if (!screen->is_format_supported(screen, format, PIPE_BUFFER, 0, + PIPE_BIND_VERTEX_BUFFER)) { + caps->format_translation[format] = vbuf_format_fallbacks[i].to; + fallback = TRUE; + } + } + + caps->buffer_offset_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY); + caps->buffer_stride_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY); + caps->velem_src_offset_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY); + caps->user_vertex_buffers = + screen->get_param(screen, PIPE_CAP_USER_VERTEX_BUFFERS); + + if (!caps->buffer_offset_unaligned || + !caps->buffer_stride_unaligned || + !caps->velem_src_offset_unaligned || + !caps->user_vertex_buffers) { + fallback = TRUE; + } - mgr->b.caps.format_fixed32 = - screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER); - - mgr->b.caps.format_float16 = - screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER); - - mgr->b.caps.format_float64 = - screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER); - - mgr->b.caps.format_norm32 = - screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER) && - screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER); - - mgr->b.caps.format_scaled32 = - screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER) && - screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, - 0, PIPE_BIND_VERTEX_BUFFER); + return fallback; } struct u_vbuf * u_vbuf_create(struct pipe_context *pipe, - unsigned upload_buffer_size, - unsigned upload_buffer_alignment, - unsigned upload_buffer_bind, - enum u_fetch_alignment fetch_alignment) + struct u_vbuf_caps *caps, unsigned aux_vertex_buffer_index) { - struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); + struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf); + mgr->caps = *caps; + mgr->aux_vertex_buffer_slot = aux_vertex_buffer_index; mgr->pipe = pipe; + mgr->cso_cache = cso_cache_create(); mgr->translate_cache = translate_cache_create(); - mgr->fallback_vb_slot = ~0; + memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); - mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, - upload_buffer_alignment, - upload_buffer_bind); + mgr->uploader = u_upload_create(pipe, 1024 * 1024, + PIPE_BIND_VERTEX_BUFFER, + PIPE_USAGE_STREAM); - mgr->b.caps.fetch_dword_unaligned = - fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; + return mgr; +} - u_vbuf_init_format_caps(mgr); +/* u_vbuf uses its own caching for vertex elements, because it needs to keep + * its own preprocessed state per vertex element CSO. */ +static struct u_vbuf_elements * +u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states) +{ + struct pipe_context *pipe = mgr->pipe; + unsigned key_size, hash_key; + struct cso_hash_iter iter; + struct u_vbuf_elements *ve; + struct cso_velems_state velems_state; + + /* need to include the count into the stored state data too. */ + key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned); + velems_state.count = count; + memcpy(velems_state.velems, states, + sizeof(struct pipe_vertex_element) * count); + hash_key = cso_construct_key((void*)&velems_state, key_size); + iter = cso_find_state_template(mgr->cso_cache, hash_key, CSO_VELEMENTS, + (void*)&velems_state, key_size); + + if (cso_hash_iter_is_null(iter)) { + struct cso_velements *cso = MALLOC_STRUCT(cso_velements); + memcpy(&cso->state, &velems_state, key_size); + cso->data = u_vbuf_create_vertex_elements(mgr, count, states); + cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements; + cso->context = (void*)mgr; + + iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso); + ve = cso->data; + } else { + ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data; + } - return &mgr->b; + assert(ve); + + if (ve != mgr->ve) + pipe->bind_vertex_elements_state(pipe, ve->driver_cso); + + return ve; } -void u_vbuf_destroy(struct u_vbuf *mgrb) +void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, count, states); +} + +void u_vbuf_destroy(struct u_vbuf *mgr) +{ + struct pipe_screen *screen = mgr->pipe->screen; unsigned i; + unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX, + PIPE_SHADER_CAP_MAX_INPUTS); + + mgr->pipe->set_index_buffer(mgr->pipe, NULL); + pipe_resource_reference(&mgr->index_buffer.buffer, NULL); - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); + mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL); + + for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { + pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } - for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { + pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } + pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL); translate_cache_destroy(mgr->translate_cache); - u_upload_destroy(mgr->b.uploader); + u_upload_destroy(mgr->uploader); + cso_cache_delete(mgr->cso_cache); FREE(mgr); } - -static unsigned u_vbuf_get_free_real_vb_slot(struct u_vbuf_priv *mgr) +static enum pipe_error +u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, + unsigned vb_mask, unsigned out_vb, + int start_vertex, unsigned num_vertices, + int start_index, unsigned num_indices, int min_index, + boolean unroll_indices) { - unsigned i, nr = mgr->ve->count; - boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; + struct translate *tr; + struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; + struct pipe_resource *out_buffer = NULL; + uint8_t *out_map; + unsigned out_offset, mask; + + /* Get a translate object. */ + tr = translate_cache_find(mgr->translate_cache, key); + + /* Map buffers we want to translate. */ + mask = vb_mask; + while (mask) { + struct pipe_vertex_buffer *vb; + unsigned offset; + uint8_t *map; + unsigned i = u_bit_scan(&mask); + + vb = &mgr->vertex_buffer[i]; + offset = vb->buffer_offset + vb->stride * start_vertex; - for (i = 0; i < nr; i++) { - if (!mgr->ve->incompatible_layout_elem[i]) { - unsigned index = mgr->ve->ve[i].vertex_buffer_index; + if (vb->user_buffer) { + map = (uint8_t*)vb->user_buffer + offset; + } else { + unsigned size = vb->stride ? num_vertices * vb->stride + : sizeof(double)*4; - if (!mgr->incompatible_vb[index]) { - used_vb[index] = TRUE; + if (offset+size > vb->buffer->width0) { + size = vb->buffer->width0 - offset; } + + map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, + PIPE_TRANSFER_READ, &vb_transfer[i]); } + + /* Subtract min_index so that indexing with the index buffer works. */ + if (unroll_indices) { + map -= (ptrdiff_t)vb->stride * min_index; + } + + tr->set_buffer(tr, i, map, vb->stride, ~0); } - for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { - if (!used_vb[i]) { - if (i >= mgr->b.nr_real_vertex_buffers) { - mgr->b.nr_real_vertex_buffers = i+1; + /* Translate. */ + if (unroll_indices) { + struct pipe_index_buffer *ib = &mgr->index_buffer; + struct pipe_transfer *transfer = NULL; + unsigned offset = ib->offset + start_index * ib->index_size; + uint8_t *map; + + assert((ib->buffer || ib->user_buffer) && ib->index_size); + + /* Create and map the output buffer. */ + u_upload_alloc(mgr->uploader, 0, + key->output_stride * num_indices, 4, + &out_offset, &out_buffer, + (void**)&out_map); + if (!out_buffer) + return PIPE_ERROR_OUT_OF_MEMORY; + + if (ib->user_buffer) { + map = (uint8_t*)ib->user_buffer + offset; + } else { + map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, + num_indices * ib->index_size, + PIPE_TRANSFER_READ, &transfer); + } + + switch (ib->index_size) { + case 4: + tr->run_elts(tr, (unsigned*)map, num_indices, 0, 0, out_map); + break; + case 2: + tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, 0, out_map); + break; + case 1: + tr->run_elts8(tr, map, num_indices, 0, 0, out_map); + break; + } + + if (transfer) { + pipe_buffer_unmap(mgr->pipe, transfer); + } + } else { + /* Create and map the output buffer. */ + u_upload_alloc(mgr->uploader, + key->output_stride * start_vertex, + key->output_stride * num_vertices, 4, + &out_offset, &out_buffer, + (void**)&out_map); + if (!out_buffer) + return PIPE_ERROR_OUT_OF_MEMORY; + + out_offset -= key->output_stride * start_vertex; + + tr->run(tr, 0, num_vertices, 0, 0, out_map); + } + + /* Unmap all buffers. */ + mask = vb_mask; + while (mask) { + unsigned i = u_bit_scan(&mask); + + if (vb_transfer[i]) { + pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); + } + } + + /* Setup the new vertex buffer. */ + mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; + mgr->real_vertex_buffer[out_vb].stride = key->output_stride; + + /* Move the buffer reference. */ + pipe_resource_reference( + &mgr->real_vertex_buffer[out_vb].buffer, NULL); + mgr->real_vertex_buffer[out_vb].buffer = out_buffer; + + return PIPE_OK; +} + +static boolean +u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr, + unsigned mask[VB_NUM]) +{ + unsigned type; + unsigned fallback_vbs[VB_NUM]; + /* Set the bit for each buffer which is incompatible, or isn't set. */ + uint32_t unused_vb_mask = + mgr->ve->incompatible_vb_mask_all | mgr->incompatible_vb_mask | + ~mgr->enabled_vb_mask; + + memset(fallback_vbs, ~0, sizeof(fallback_vbs)); + + /* Find free slots for each type if needed. */ + for (type = 0; type < VB_NUM; type++) { + if (mask[type]) { + uint32_t index; + + if (!unused_vb_mask) { + return FALSE; } - return i; + + index = ffs(unused_vb_mask) - 1; + fallback_vbs[type] = index; + unused_vb_mask &= ~(1 << index); + /*printf("found slot=%i for type=%i\n", index, type);*/ + } + } + + for (type = 0; type < VB_NUM; type++) { + if (mask[type]) { + mgr->dirty_real_vb_mask |= 1 << fallback_vbs[type]; } } - return ~0; + + memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs)); + return TRUE; } -static void -u_vbuf_translate_begin(struct u_vbuf_priv *mgr, - int min_index, int max_index) +static boolean +u_vbuf_translate_begin(struct u_vbuf *mgr, + int start_vertex, unsigned num_vertices, + int start_instance, unsigned num_instances, + int start_index, unsigned num_indices, int min_index, + boolean unroll_indices) { - struct translate_key key; - struct translate_element *te; - unsigned tr_elem_index[PIPE_MAX_ATTRIBS]; - struct translate *tr; - boolean vb_translated[PIPE_MAX_ATTRIBS] = {0}; - uint8_t *out_map; - struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; - struct pipe_resource *out_buffer = NULL; - unsigned i, out_offset, num_verts = max_index + 1 - min_index; + unsigned mask[VB_NUM] = {0}; + struct translate_key key[VB_NUM]; + unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */ + unsigned i, type; + unsigned incompatible_vb_mask = mgr->incompatible_vb_mask & + mgr->ve->used_vb_mask; + + int start[VB_NUM] = { + start_vertex, /* VERTEX */ + start_instance, /* INSTANCE */ + 0 /* CONST */ + }; + + unsigned num[VB_NUM] = { + num_vertices, /* VERTEX */ + num_instances, /* INSTANCE */ + 1 /* CONST */ + }; + + memset(key, 0, sizeof(key)); + memset(elem_index, ~0, sizeof(elem_index)); + + /* See if there are vertex attribs of each type to translate and + * which ones. */ + for (i = 0; i < mgr->ve->count; i++) { + unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; - memset(&key, 0, sizeof(key)); - memset(tr_elem_index, 0xff, sizeof(tr_elem_index)); + if (!mgr->vertex_buffer[vb_index].stride) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(incompatible_vb_mask & (1 << vb_index))) { + continue; + } + mask[VB_CONST] |= 1 << vb_index; + } else if (mgr->ve->ve[i].instance_divisor) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(incompatible_vb_mask & (1 << vb_index))) { + continue; + } + mask[VB_INSTANCE] |= 1 << vb_index; + } else { + if (!unroll_indices && + !(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(incompatible_vb_mask & (1 << vb_index))) { + continue; + } + mask[VB_VERTEX] |= 1 << vb_index; + } + } - /* Get a new vertex buffer slot. */ - mgr->fallback_vb_slot = u_vbuf_get_free_real_vb_slot(mgr); + assert(mask[VB_VERTEX] || mask[VB_INSTANCE] || mask[VB_CONST]); - if (mgr->fallback_vb_slot == ~0) { - return; /* XXX error, not enough attribs */ + /* Find free vertex buffer slots. */ + if (!u_vbuf_translate_find_free_vb_slots(mgr, mask)) { + return FALSE; } - /* Initialize the description of how vertices should be translated. */ + /* Initialize the translate keys. */ for (i = 0; i < mgr->ve->count; i++) { + struct translate_key *k; + struct translate_element *te; enum pipe_format output_format = mgr->ve->native_format[i]; - unsigned output_format_size = mgr->ve->native_format_size[i]; + unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; + bit = 1 << vb_index; - /* Check for support. */ - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[mgr->ve->ve[i].vertex_buffer_index]) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(incompatible_vb_mask & (1 << vb_index)) && + (!unroll_indices || !(mask[VB_VERTEX] & bit))) { continue; } - assert(translate_is_output_format_supported(output_format)); + /* Set type to what we will translate. + * Whether vertex, instance, or constant attribs. */ + for (type = 0; type < VB_NUM; type++) { + if (mask[type] & bit) { + break; + } + } + assert(type < VB_NUM); + if (mgr->ve->ve[i].src_format != output_format) + assert(translate_is_output_format_supported(output_format)); + /*printf("velem=%i type=%i\n", i, type);*/ + + /* Add the vertex element. */ + k = &key[type]; + elem_index[type][i] = k->nr_elements; - /* Add this vertex element. */ - te = &key.element[key.nr_elements]; + te = &k->element[k->nr_elements]; te->type = TRANSLATE_ELEMENT_NORMAL; te->instance_divisor = 0; - te->input_buffer = mgr->ve->ve[i].vertex_buffer_index; + te->input_buffer = vb_index; te->input_format = mgr->ve->ve[i].src_format; te->input_offset = mgr->ve->ve[i].src_offset; te->output_format = output_format; - te->output_offset = key.output_stride; + te->output_offset = k->output_stride; - key.output_stride += output_format_size; - vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE; - tr_elem_index[i] = key.nr_elements; - key.nr_elements++; + k->output_stride += mgr->ve->native_format_size[i]; + k->nr_elements++; } - /* Get a translate object. */ - tr = translate_cache_find(mgr->translate_cache, &key); - - /* Map buffers we want to translate. */ - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { - if (vb_translated[i]) { - struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; - unsigned offset = vb->buffer_offset + vb->stride * min_index; - unsigned size = vb->stride ? num_verts * vb->stride - : vb->buffer->width0 - offset; - uint8_t *map; - - if (u_vbuf_resource(vb->buffer)->user_ptr) { - map = u_vbuf_resource(vb->buffer)->user_ptr + offset; - } else { - map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, - PIPE_TRANSFER_READ, &vb_transfer[i]); + /* Translate buffers. */ + for (type = 0; type < VB_NUM; type++) { + if (key[type].nr_elements) { + enum pipe_error err; + err = u_vbuf_translate_buffers(mgr, &key[type], mask[type], + mgr->fallback_vbs[type], + start[type], num[type], + start_index, num_indices, min_index, + unroll_indices && type == VB_VERTEX); + if (err != PIPE_OK) + return FALSE; + + /* Fixup the stride for constant attribs. */ + if (type == VB_CONST) { + mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; } - - tr->set_buffer(tr, i, map, vb->stride, ~0); } } - /* Create and map the output buffer. */ - u_upload_alloc(mgr->b.uploader, - key.output_stride * min_index, - key.output_stride * num_verts, - &out_offset, &out_buffer, - (void**)&out_map); - - out_offset -= key.output_stride * min_index; - - /* Translate. */ - tr->run(tr, 0, num_verts, 0, out_map); - - /* Unmap all buffers. */ - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { - if (vb_transfer[i]) { - pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); - } - } - - /* Setup the new vertex buffer. */ - mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer_offset = out_offset; - mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].stride = key.output_stride; - - /* Move the buffer reference. */ - pipe_resource_reference( - &mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, NULL); - mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer = out_buffer; - out_buffer = NULL; - /* Setup new vertex elements. */ for (i = 0; i < mgr->ve->count; i++) { - if (tr_elem_index[i] < key.nr_elements) { - te = &key.element[tr_elem_index[i]]; - mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; - mgr->fallback_velems[i].src_format = te->output_format; - mgr->fallback_velems[i].src_offset = te->output_offset; - mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vb_slot; - } else { + for (type = 0; type < VB_NUM; type++) { + if (elem_index[type][i] < key[type].nr_elements) { + struct translate_element *te = &key[type].element[elem_index[type][i]]; + mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; + mgr->fallback_velems[i].src_format = te->output_format; + mgr->fallback_velems[i].src_offset = te->output_offset; + mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vbs[type]; + + /* elem_index[type][i] can only be set for one type. */ + assert(type > VB_INSTANCE || elem_index[type+1][i] == ~0u); + assert(type > VB_VERTEX || elem_index[type+2][i] == ~0u); + break; + } + } + /* No translating, just copy the original vertex element over. */ + if (type == VB_NUM) { memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], sizeof(struct pipe_vertex_element)); } } - - mgr->fallback_ve = - mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, - mgr->fallback_velems); - - /* Preserve saved_ve. */ - mgr->ve_binding_lock = TRUE; - mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); - mgr->ve_binding_lock = FALSE; + u_vbuf_set_vertex_elements_internal(mgr, mgr->ve->count, + mgr->fallback_velems); + mgr->using_translate = TRUE; + return TRUE; } -static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) +static void u_vbuf_translate_end(struct u_vbuf *mgr) { - if (mgr->fallback_ve == NULL) { - return; - } + unsigned i; /* Restore vertex elements. */ - /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ - mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); - mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve); - mgr->fallback_ve = NULL; - - /* Delete the now-unused VBO. */ - pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, - NULL); - mgr->fallback_vb_slot = ~0; - mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; + mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso); + mgr->using_translate = FALSE; + + /* Unreference the now-unused VBOs. */ + for (i = 0; i < VB_NUM; i++) { + unsigned vb = mgr->fallback_vbs[i]; + if (vb != ~0u) { + pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL); + mgr->fallback_vbs[i] = ~0; + + /* This will cause the buffer to be unbound in the driver later. */ + mgr->dirty_real_vb_mask |= 1 << vb; + } + } } -#define FORMAT_REPLACE(what, withwhat) \ - case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break - -struct u_vbuf_elements * -u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, - unsigned count, - const struct pipe_vertex_element *attribs, - struct pipe_vertex_element *native_attribs) +static void * +u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *attribs) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + struct pipe_context *pipe = mgr->pipe; unsigned i; + struct pipe_vertex_element driver_attribs[PIPE_MAX_ATTRIBS]; struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); + uint32_t used_buffers = 0; ve->count = count; - if (!count) { - return ve; - } - memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); - memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); + memcpy(driver_attribs, attribs, sizeof(struct pipe_vertex_element) * count); /* Set the best native format in case the original format is not * supported. */ @@ -360,211 +757,188 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, ve->src_format_size[i] = util_format_get_blocksize(format); - /* Choose a native format. - * For now we don't care about the alignment, that's going to - * be sorted out later. */ - if (!mgr->b.caps.format_fixed32) { - switch (format) { - FORMAT_REPLACE(R32_FIXED, R32_FLOAT); - FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); - FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT); - FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT); - default:; - } - } - if (!mgr->b.caps.format_float16) { - switch (format) { - FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); - FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); - FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT); - FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT); - default:; - } - } - if (!mgr->b.caps.format_float64) { - switch (format) { - FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); - FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); - FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); - FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); - default:; - } - } - if (!mgr->b.caps.format_norm32) { - switch (format) { - FORMAT_REPLACE(R32_UNORM, R32_FLOAT); - FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); - FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT); - FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT); - FORMAT_REPLACE(R32_SNORM, R32_FLOAT); - FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT); - FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT); - FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT); - default:; - } - } - if (!mgr->b.caps.format_scaled32) { - switch (format) { - FORMAT_REPLACE(R32_USCALED, R32_FLOAT); - FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); - FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT); - FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT); - FORMAT_REPLACE(R32_SSCALED, R32_FLOAT); - FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT); - FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT); - FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT); - default:; - } + used_buffers |= 1 << ve->ve[i].vertex_buffer_index; + + if (!ve->ve[i].instance_divisor) { + ve->noninstance_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; } - native_attribs[i].src_format = format; + format = mgr->caps.format_translation[format]; + + driver_attribs[i].src_format = format; ve->native_format[i] = format; ve->native_format_size[i] = util_format_get_blocksize(ve->native_format[i]); - ve->incompatible_layout_elem[i] = - ve->ve[i].src_format != ve->native_format[i] || - (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); - ve->incompatible_layout = - ve->incompatible_layout || - ve->incompatible_layout_elem[i]; + if (ve->ve[i].src_format != format || + (!mgr->caps.velem_src_offset_unaligned && + ve->ve[i].src_offset % 4 != 0)) { + ve->incompatible_elem_mask |= 1 << i; + ve->incompatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } else { + ve->compatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } } - /* Align the formats to the size of DWORD if needed. */ - if (!mgr->b.caps.fetch_dword_unaligned) { + ve->used_vb_mask = used_buffers; + ve->compatible_vb_mask_all = ~ve->incompatible_vb_mask_any & used_buffers; + ve->incompatible_vb_mask_all = ~ve->compatible_vb_mask_any & used_buffers; + + /* Align the formats and offsets to the size of DWORD if needed. */ + if (!mgr->caps.velem_src_offset_unaligned) { for (i = 0; i < count; i++) { ve->native_format_size[i] = align(ve->native_format_size[i], 4); + driver_attribs[i].src_offset = align(ve->ve[i].src_offset, 4); } } + ve->driver_cso = + pipe->create_vertex_elements_state(pipe, count, driver_attribs); return ve; } -void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, - void *cso, - struct u_vbuf_elements *ve) +static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + struct pipe_context *pipe = mgr->pipe; + struct u_vbuf_elements *ve = cso; - if (!cso) { - return; - } - - if (!mgr->ve_binding_lock) { - mgr->saved_ve = cso; - mgr->ve = ve; - } -} - -void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, - struct u_vbuf_elements *ve) -{ + pipe->delete_vertex_elements_state(pipe, ve->driver_cso); FREE(ve); } -void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, - unsigned count, +void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, + unsigned start_slot, unsigned count, const struct pipe_vertex_buffer *bufs) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; + /* which buffers are enabled */ + uint32_t enabled_vb_mask = 0; + /* which buffers are in user memory */ + uint32_t user_vb_mask = 0; + /* which buffers are incompatible with the driver */ + uint32_t incompatible_vb_mask = 0; + /* which buffers have a non-zero stride */ + uint32_t nonzero_stride_vb_mask = 0; + uint32_t mask = ~(((1ull << count) - 1) << start_slot); + + /* Zero out the bits we are going to rewrite completely. */ + mgr->user_vb_mask &= mask; + mgr->incompatible_vb_mask &= mask; + mgr->nonzero_stride_vb_mask &= mask; + mgr->enabled_vb_mask &= mask; + + if (!bufs) { + struct pipe_context *pipe = mgr->pipe; + /* Unbind. */ + mgr->dirty_real_vb_mask &= mask; - mgr->any_user_vbs = FALSE; - mgr->incompatible_vb_layout = FALSE; - memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); - - if (!mgr->b.caps.fetch_dword_unaligned) { - /* Check if the strides and offsets are aligned to the size of DWORD. */ for (i = 0; i < count; i++) { - if (bufs[i].buffer) { - if (bufs[i].stride % 4 != 0 || - bufs[i].buffer_offset % 4 != 0) { - mgr->incompatible_vb_layout = TRUE; - mgr->incompatible_vb[i] = TRUE; - } - } + unsigned dst_index = start_slot + i; + + pipe_resource_reference(&mgr->vertex_buffer[dst_index].buffer, NULL); + pipe_resource_reference(&mgr->real_vertex_buffer[dst_index].buffer, + NULL); } + + pipe->set_vertex_buffers(pipe, start_slot, count, NULL); + return; } for (i = 0; i < count; i++) { + unsigned dst_index = start_slot + i; const struct pipe_vertex_buffer *vb = &bufs[i]; + struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index]; + struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index]; - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); + if (!vb->buffer && !vb->user_buffer) { + pipe_resource_reference(&orig_vb->buffer, NULL); + pipe_resource_reference(&real_vb->buffer, NULL); + real_vb->user_buffer = NULL; + continue; + } + + pipe_resource_reference(&orig_vb->buffer, vb->buffer); + orig_vb->user_buffer = vb->user_buffer; - mgr->b.real_vertex_buffer[i].buffer_offset = - mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; + real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset; + real_vb->stride = orig_vb->stride = vb->stride; - mgr->b.real_vertex_buffer[i].stride = - mgr->b.vertex_buffer[i].stride = vb->stride; + if (vb->stride) { + nonzero_stride_vb_mask |= 1 << dst_index; + } + enabled_vb_mask |= 1 << dst_index; - if (!vb->buffer || - mgr->incompatible_vb[i]) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) || + (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) { + incompatible_vb_mask |= 1 << dst_index; + pipe_resource_reference(&real_vb->buffer, NULL); continue; } - if (u_vbuf_resource(vb->buffer)->user_ptr) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); - mgr->any_user_vbs = TRUE; + if (!mgr->caps.user_vertex_buffers && vb->user_buffer) { + user_vb_mask |= 1 << dst_index; + pipe_resource_reference(&real_vb->buffer, NULL); continue; } - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); + pipe_resource_reference(&real_vb->buffer, vb->buffer); + real_vb->user_buffer = vb->user_buffer; } - for (i = count; i < mgr->b.nr_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); - } - for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); - } + mgr->user_vb_mask |= user_vb_mask; + mgr->incompatible_vb_mask |= incompatible_vb_mask; + mgr->nonzero_stride_vb_mask |= nonzero_stride_vb_mask; + mgr->enabled_vb_mask |= enabled_vb_mask; - mgr->b.nr_vertex_buffers = count; - mgr->b.nr_real_vertex_buffers = count; + /* All changed buffers are marked as dirty, even the NULL ones, + * which will cause the NULL buffers to be unbound in the driver later. */ + mgr->dirty_real_vb_mask |= ~mask; } void u_vbuf_set_index_buffer(struct u_vbuf *mgr, const struct pipe_index_buffer *ib) { - if (ib && ib->buffer) { + struct pipe_context *pipe = mgr->pipe; + + if (ib) { assert(ib->offset % ib->index_size == 0); pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); - mgr->index_buffer.offset = ib->offset; - mgr->index_buffer.index_size = ib->index_size; + memcpy(&mgr->index_buffer, ib, sizeof(*ib)); } else { pipe_resource_reference(&mgr->index_buffer.buffer, NULL); } + + pipe->set_index_buffer(pipe, ib); } -static void -u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, - int min_index, int max_index, - unsigned start_instance, unsigned instance_count) +static enum pipe_error +u_vbuf_upload_buffers(struct u_vbuf *mgr, + int start_vertex, unsigned num_vertices, + int start_instance, unsigned num_instances) { unsigned i; - unsigned count = max_index + 1 - min_index; unsigned nr_velems = mgr->ve->count; - unsigned nr_vbufs = mgr->b.nr_vertex_buffers; struct pipe_vertex_element *velems = - mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; + mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve; unsigned start_offset[PIPE_MAX_ATTRIBS]; - unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; + unsigned end_offset[PIPE_MAX_ATTRIBS]; + uint32_t buffer_mask = 0; /* Determine how much data needs to be uploaded. */ for (i = 0; i < nr_velems; i++) { struct pipe_vertex_element *velem = &velems[i]; unsigned index = velem->vertex_buffer_index; - struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; - unsigned instance_div, first, size; + struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index]; + unsigned instance_div, first, size, index_bit; - /* Skip the buffer generated by translate. */ - if (index == mgr->fallback_vb_slot) { + /* Skip the buffers generated by translate. */ + if (index == mgr->fallback_vbs[VB_VERTEX] || + index == mgr->fallback_vbs[VB_INSTANCE] || + index == mgr->fallback_vbs[VB_CONST]) { continue; } - assert(vb->buffer); - - if (!u_vbuf_resource(vb->buffer)->user_ptr) { + if (!vb->user_buffer) { continue; } @@ -576,17 +950,19 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, size = mgr->ve->src_format_size[i]; } else if (instance_div) { /* Per-instance attrib. */ - unsigned count = (instance_count + instance_div - 1) / instance_div; + unsigned count = (num_instances + instance_div - 1) / instance_div; first += vb->stride * start_instance; size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; } else { /* Per-vertex attrib. */ - first += vb->stride * min_index; - size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; + first += vb->stride * start_vertex; + size = vb->stride * (num_vertices - 1) + mgr->ve->src_format_size[i]; } + index_bit = 1 << index; + /* Update offsets. */ - if (!end_offset[index]) { + if (!(buffer_mask & index_bit)) { start_offset[index] = first; end_offset[index] = first + size; } else { @@ -595,131 +971,82 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, if (first + size > end_offset[index]) end_offset[index] = first + size; } + + buffer_mask |= index_bit; } /* Upload buffers. */ - for (i = 0; i < nr_vbufs; i++) { - unsigned start, end = end_offset[i]; + while (buffer_mask) { + unsigned start, end; struct pipe_vertex_buffer *real_vb; - uint8_t *ptr; + const uint8_t *ptr; - if (!end) { - continue; - } + i = u_bit_scan(&buffer_mask); start = start_offset[i]; + end = end_offset[i]; assert(start < end); - real_vb = &mgr->b.real_vertex_buffer[i]; - ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; + real_vb = &mgr->real_vertex_buffer[i]; + ptr = mgr->vertex_buffer[i].user_buffer; - u_upload_data(mgr->b.uploader, start, end - start, ptr + start, + u_upload_data(mgr->uploader, start, end - start, 4, ptr + start, &real_vb->buffer_offset, &real_vb->buffer); + if (!real_vb->buffer) + return PIPE_ERROR_OUT_OF_MEMORY; real_vb->buffer_offset -= start; } + + return PIPE_OK; } -unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) +static boolean u_vbuf_need_minmax_index(const struct u_vbuf *mgr) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; - unsigned i, nr = mgr->ve->count; - struct pipe_vertex_element *velems = - mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; - unsigned result = ~0; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb = - &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; - unsigned size, max_count, value; - - /* We're not interested in constant and per-instance attribs. */ - if (!vb->buffer || - !vb->stride || - velems[i].instance_divisor) { - continue; - } - - size = vb->buffer->width0; - - /* Subtract buffer_offset. */ - value = vb->buffer_offset; - if (value >= size) { - return 0; - } - size -= value; - - /* Subtract src_offset. */ - value = velems[i].src_offset; - if (value >= size) { - return 0; - } - size -= value; - - /* Subtract format_size. */ - value = mgr->ve->native_format_size[i]; - if (value >= size) { - return 0; - } - size -= value; - - /* Compute the max count. */ - max_count = 1 + size / vb->stride; - result = MIN2(result, max_count); - } - return result; + /* See if there are any per-vertex attribs which will be uploaded or + * translated. Use bitmasks to get the info instead of looping over vertex + * elements. */ + return (mgr->ve->used_vb_mask & + ((mgr->user_vb_mask | + mgr->incompatible_vb_mask | + mgr->ve->incompatible_vb_mask_any) & + mgr->ve->noninstance_vb_mask_any & + mgr->nonzero_stride_vb_mask)) != 0; } -static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) +static boolean u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf *mgr) { - unsigned i, nr = mgr->ve->count; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb; - unsigned index; - - /* Per-instance attribs don't need min/max_index. */ - if (mgr->ve->ve[i].instance_divisor) { - continue; - } - - index = mgr->ve->ve[i].vertex_buffer_index; - vb = &mgr->b.vertex_buffer[index]; - - /* Constant attribs don't need min/max_index. */ - if (!vb->stride) { - continue; - } - - /* Per-vertex attribs need min/max_index. */ - if (u_vbuf_resource(vb->buffer)->user_ptr || - mgr->ve->incompatible_layout_elem[i] || - mgr->incompatible_vb[index]) { - return TRUE; - } - } - - return FALSE; + /* Return true if there are hw buffers which don't need to be translated. + * + * We could query whether each buffer is busy, but that would + * be way more costly than this. */ + return (mgr->ve->used_vb_mask & + (~mgr->user_vb_mask & + ~mgr->incompatible_vb_mask & + mgr->ve->compatible_vb_mask_all & + mgr->ve->noninstance_vb_mask_any & + mgr->nonzero_stride_vb_mask)) != 0; } static void u_vbuf_get_minmax_index(struct pipe_context *pipe, struct pipe_index_buffer *ib, - const struct pipe_draw_info *info, + boolean primitive_restart, + unsigned restart_index, + unsigned start, unsigned count, int *out_min_index, int *out_max_index) { struct pipe_transfer *transfer = NULL; const void *indices; unsigned i; - unsigned restart_index = info->restart_index; - if (u_vbuf_resource(ib->buffer)->user_ptr) { - indices = u_vbuf_resource(ib->buffer)->user_ptr + - ib->offset + info->start * ib->index_size; + if (ib->user_buffer) { + indices = (uint8_t*)ib->user_buffer + + ib->offset + start * ib->index_size; } else { indices = pipe_buffer_map_range(pipe, ib->buffer, - ib->offset + info->start * ib->index_size, - info->count * ib->index_size, + ib->offset + start * ib->index_size, + count * ib->index_size, PIPE_TRANSFER_READ, &transfer); } @@ -728,8 +1055,8 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, const unsigned *ui_indices = (const unsigned*)indices; unsigned max_ui = 0; unsigned min_ui = ~0U; - if (info->primitive_restart) { - for (i = 0; i < info->count; i++) { + if (primitive_restart) { + for (i = 0; i < count; i++) { if (ui_indices[i] != restart_index) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; @@ -737,7 +1064,7 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, } } else { - for (i = 0; i < info->count; i++) { + for (i = 0; i < count; i++) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } @@ -750,8 +1077,8 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, const unsigned short *us_indices = (const unsigned short*)indices; unsigned max_us = 0; unsigned min_us = ~0U; - if (info->primitive_restart) { - for (i = 0; i < info->count; i++) { + if (primitive_restart) { + for (i = 0; i < count; i++) { if (us_indices[i] != restart_index) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; @@ -759,7 +1086,7 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, } } else { - for (i = 0; i < info->count; i++) { + for (i = 0; i < count; i++) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } @@ -772,8 +1099,8 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, const unsigned char *ub_indices = (const unsigned char*)indices; unsigned max_ub = 0; unsigned min_ub = ~0U; - if (info->primitive_restart) { - for (i = 0; i < info->count; i++) { + if (primitive_restart) { + for (i = 0; i < count; i++) { if (ub_indices[i] != restart_index) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; @@ -781,7 +1108,7 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, } } else { - for (i = 0; i < info->count; i++) { + for (i = 0; i < count; i++) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } @@ -801,55 +1128,213 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, } } -enum u_vbuf_return_flags -u_vbuf_draw_begin(struct u_vbuf *mgrb, - const struct pipe_draw_info *info) +static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf *mgr) +{ + struct pipe_context *pipe = mgr->pipe; + unsigned start_slot, count; + + start_slot = ffs(mgr->dirty_real_vb_mask) - 1; + count = util_last_bit(mgr->dirty_real_vb_mask >> start_slot); + + pipe->set_vertex_buffers(pipe, start_slot, count, + mgr->real_vertex_buffer + start_slot); + mgr->dirty_real_vb_mask = 0; +} + +void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; - int min_index, max_index; - - if (!mgr->incompatible_vb_layout && - !mgr->ve->incompatible_layout && - !mgr->any_user_vbs) { - return 0; - } - - if (info->indexed) { - if (info->max_index != ~0) { - min_index = info->min_index + info->index_bias; - max_index = info->max_index + info->index_bias; - } else if (u_vbuf_need_minmax_index(mgr)) { - u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, - &min_index, &max_index); - min_index += info->index_bias; - max_index += info->index_bias; + struct pipe_context *pipe = mgr->pipe; + int start_vertex, min_index; + unsigned num_vertices; + boolean unroll_indices = FALSE; + uint32_t used_vb_mask = mgr->ve->used_vb_mask; + uint32_t user_vb_mask = mgr->user_vb_mask & used_vb_mask; + uint32_t incompatible_vb_mask = mgr->incompatible_vb_mask & used_vb_mask; + struct pipe_draw_info new_info; + + /* Normal draw. No fallback and no user buffers. */ + if (!incompatible_vb_mask && + !mgr->ve->incompatible_elem_mask && + !user_vb_mask) { + + /* Set vertex buffers if needed. */ + if (mgr->dirty_real_vb_mask & used_vb_mask) { + u_vbuf_set_driver_vertex_buffers(mgr); + } + + pipe->draw_vbo(pipe, info); + return; + } + + new_info = *info; + + /* Fallback. We need to know all the parameters. */ + if (new_info.indirect) { + struct pipe_transfer *transfer = NULL; + int *data; + + if (new_info.indexed) { + data = pipe_buffer_map_range(pipe, new_info.indirect, + new_info.indirect_offset, 20, + PIPE_TRANSFER_READ, &transfer); + new_info.index_bias = data[3]; + new_info.start_instance = data[4]; + } + else { + data = pipe_buffer_map_range(pipe, new_info.indirect, + new_info.indirect_offset, 16, + PIPE_TRANSFER_READ, &transfer); + new_info.start_instance = data[3]; + } + + new_info.count = data[0]; + new_info.instance_count = data[1]; + new_info.start = data[2]; + pipe_buffer_unmap(pipe, transfer); + new_info.indirect = NULL; + } + + if (new_info.indexed) { + /* See if anything needs to be done for per-vertex attribs. */ + if (u_vbuf_need_minmax_index(mgr)) { + int max_index; + + if (new_info.max_index != ~0u) { + min_index = new_info.min_index; + max_index = new_info.max_index; + } else { + u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, + new_info.primitive_restart, + new_info.restart_index, new_info.start, + new_info.count, &min_index, &max_index); + } + + assert(min_index <= max_index); + + start_vertex = min_index + new_info.index_bias; + num_vertices = max_index + 1 - min_index; + + /* Primitive restart doesn't work when unrolling indices. + * We would have to break this drawing operation into several ones. */ + /* Use some heuristic to see if unrolling indices improves + * performance. */ + if (!new_info.primitive_restart && + num_vertices > new_info.count*2 && + num_vertices - new_info.count > 32 && + !u_vbuf_mapping_vertex_buffer_blocks(mgr)) { + unroll_indices = TRUE; + user_vb_mask &= ~(mgr->nonzero_stride_vb_mask & + mgr->ve->noninstance_vb_mask_any); + } } else { + /* Nothing to do for per-vertex attribs. */ + start_vertex = 0; + num_vertices = 0; min_index = 0; - max_index = 0; } } else { - min_index = info->start; - max_index = info->start + info->count - 1; + start_vertex = new_info.start; + num_vertices = new_info.count; + min_index = 0; } /* Translate vertices with non-native layouts or formats. */ - if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) { - u_vbuf_translate_begin(mgr, min_index, max_index); + if (unroll_indices || + incompatible_vb_mask || + mgr->ve->incompatible_elem_mask) { + if (!u_vbuf_translate_begin(mgr, start_vertex, num_vertices, + new_info.start_instance, + new_info.instance_count, new_info.start, + new_info.count, min_index, unroll_indices)) { + debug_warn_once("u_vbuf_translate_begin() failed"); + return; + } + + if (unroll_indices) { + new_info.indexed = FALSE; + new_info.index_bias = 0; + new_info.min_index = 0; + new_info.max_index = new_info.count - 1; + new_info.start = 0; + } + + user_vb_mask &= ~(incompatible_vb_mask | + mgr->ve->incompatible_vb_mask_all); } /* Upload user buffers. */ - if (mgr->any_user_vbs) { - u_vbuf_upload_buffers(mgr, min_index, max_index, - info->start_instance, info->instance_count); + if (user_vb_mask) { + if (u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, + new_info.start_instance, + new_info.instance_count) != PIPE_OK) { + debug_warn_once("u_vbuf_upload_buffers() failed"); + return; + } + + mgr->dirty_real_vb_mask |= user_vb_mask; + } + + /* + if (unroll_indices) { + printf("unrolling indices: start_vertex = %i, num_vertices = %i\n", + start_vertex, num_vertices); + util_dump_draw_info(stdout, info); + printf("\n"); + } + + unsigned i; + for (i = 0; i < mgr->nr_vertex_buffers; i++) { + printf("input %i: ", i); + util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i); + printf("\n"); + } + for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { + printf("real %i: ", i); + util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i); + printf("\n"); + } + */ + + u_upload_unmap(mgr->uploader); + u_vbuf_set_driver_vertex_buffers(mgr); + + pipe->draw_vbo(pipe, &new_info); + + if (mgr->using_translate) { + u_vbuf_translate_end(mgr); } - return U_VBUF_BUFFERS_UPDATED; } -void u_vbuf_draw_end(struct u_vbuf *mgrb) +void u_vbuf_save_vertex_elements(struct u_vbuf *mgr) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + assert(!mgr->ve_saved); + mgr->ve_saved = mgr->ve; +} - if (mgr->fallback_ve) { - u_vbuf_translate_end(mgr); +void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr) +{ + if (mgr->ve != mgr->ve_saved) { + struct pipe_context *pipe = mgr->pipe; + + mgr->ve = mgr->ve_saved; + pipe->bind_vertex_elements_state(pipe, + mgr->ve ? mgr->ve->driver_cso : NULL); } + mgr->ve_saved = NULL; +} + +void u_vbuf_save_aux_vertex_buffer_slot(struct u_vbuf *mgr) +{ + struct pipe_vertex_buffer *vb = + &mgr->vertex_buffer[mgr->aux_vertex_buffer_slot]; + + pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, vb->buffer); + memcpy(&mgr->aux_vertex_buffer_saved, vb, sizeof(*vb)); +} + +void u_vbuf_restore_aux_vertex_buffer_slot(struct u_vbuf *mgr) +{ + u_vbuf_set_vertex_buffers(mgr, mgr->aux_vertex_buffer_slot, 1, + &mgr->aux_vertex_buffer_saved); + pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL); }