for (i = 0 ; i < exec->vtx.prim_count ; i++) {
struct _mesa_prim *prim = &exec->vtx.prim[i];
- printf(" prim %d: %s%s %d..%d %s %s\n",
+ printf(" prim %d: %s %d..%d %s %s\n",
i,
_mesa_lookup_prim_by_nr(prim->mode),
- prim->weak ? " (weak)" : "",
prim->start,
prim->start + prim->count,
prim->begin ? "BEGIN" : "(wrap)",
GLintptr buffer_offset;
if (_mesa_is_bufferobj(exec->vtx.bufferobj)) {
assert(exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Pointer);
- buffer_offset = exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset;
+ buffer_offset = exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset +
+ exec->vtx.buffer_offset;
} else {
/* Ptr into ordinary app memory */
buffer_offset = (GLbyte *)exec->vtx.buffer_map - (GLbyte *)NULL;
GLbitfield vao_enabled = _vbo_get_vao_enabled_from_vbo(mode, exec->vtx.enabled);
/* At first disable arrays no longer needed */
- GLbitfield mask = vao->_Enabled & ~vao_enabled;
- while (mask) {
- const int vao_attr = u_bit_scan(&mask);
- _mesa_disable_vertex_array_attrib(ctx, vao, vao_attr);
- }
- assert((~vao_enabled & vao->_Enabled) == 0);
+ _mesa_disable_vertex_array_attribs(ctx, vao, VERT_BIT_ALL & ~vao_enabled);
+ assert((~vao_enabled & vao->Enabled) == 0);
/* Bind the buffer object */
const GLuint stride = exec->vtx.vertex_size*sizeof(GLfloat);
*/
const GLubyte *const vao_to_vbo_map = _vbo_attribute_alias_map[mode];
/* Now set the enabled arrays */
- mask = vao_enabled;
+ GLbitfield mask = vao_enabled;
while (mask) {
const int vao_attr = u_bit_scan(&mask);
const GLubyte vbo_attr = vao_to_vbo_map[vao_attr];
- const GLubyte size = exec->vtx.attrsz[vbo_attr];
- const GLenum16 type = exec->vtx.attrtype[vbo_attr];
+ const GLubyte size = exec->vtx.attr[vbo_attr].size;
+ const GLenum16 type = exec->vtx.attr[vbo_attr].type;
const GLuint offset = (GLuint)((GLbyte *)exec->vtx.attrptr[vbo_attr] -
(GLbyte *)exec->vtx.vertex);
assert(offset <= ctx->Const.MaxVertexAttribRelativeOffset);
/* Set and enable */
_vbo_set_attrib_format(ctx, vao, vao_attr, buffer_offset,
size, type, offset);
- if ((vao->_Enabled & VERT_BIT(vao_attr)) == 0)
- _mesa_enable_vertex_array_attrib(ctx, vao, vao_attr);
/* The vao is initially created with all bindings set to 0. */
assert(vao->VertexAttrib[vao_attr].BufferBindingIndex == 0);
}
- assert(vao_enabled == vao->_Enabled);
+ _mesa_enable_vertex_array_attribs(ctx, vao, vao_enabled);
+ assert(vao_enabled == vao->Enabled);
assert(!_mesa_is_bufferobj(exec->vtx.bufferobj) ||
(vao_enabled & ~vao->VertexAttribBufferMask) == 0);
if (_mesa_is_bufferobj(exec->vtx.bufferobj)) {
struct gl_context *ctx = exec->ctx;
- if (ctx->Driver.FlushMappedBufferRange) {
+ if (ctx->Driver.FlushMappedBufferRange &&
+ !ctx->Extensions.ARB_buffer_storage) {
GLintptr offset = exec->vtx.buffer_used -
exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset;
GLsizeiptr length = (exec->vtx.buffer_ptr - exec->vtx.buffer_map) *
}
}
+static bool
+vbo_exec_buffer_has_space(struct vbo_exec_context *exec)
+{
+ return VBO_VERT_BUFFER_SIZE > exec->vtx.buffer_used + 1024;
+}
+
/**
* Map the vertex buffer to begin storing glVertex, glColor, etc data.
vbo_exec_vtx_map(struct vbo_exec_context *exec)
{
struct gl_context *ctx = exec->ctx;
- const GLenum accessRange = GL_MAP_WRITE_BIT | /* for MapBufferRange */
- GL_MAP_INVALIDATE_RANGE_BIT |
- GL_MAP_UNSYNCHRONIZED_BIT |
- GL_MAP_FLUSH_EXPLICIT_BIT |
- MESA_MAP_NOWAIT_BIT;
const GLenum usage = GL_STREAM_DRAW_ARB;
+ GLenum accessRange = GL_MAP_WRITE_BIT | /* for MapBufferRange */
+ GL_MAP_UNSYNCHRONIZED_BIT;
+
+ if (ctx->Extensions.ARB_buffer_storage) {
+ /* We sometimes read from the buffer, so map it for read too.
+ * Only the persistent mapping can do that, because the non-persistent
+ * mapping uses flags that are incompatible with GL_MAP_READ_BIT.
+ */
+ accessRange |= GL_MAP_PERSISTENT_BIT |
+ GL_MAP_COHERENT_BIT |
+ GL_MAP_READ_BIT;
+ } else {
+ accessRange |= GL_MAP_INVALIDATE_RANGE_BIT |
+ GL_MAP_FLUSH_EXPLICIT_BIT |
+ MESA_MAP_NOWAIT_BIT;
+ }
if (!_mesa_is_bufferobj(exec->vtx.bufferobj))
return;
assert(!exec->vtx.buffer_map);
assert(!exec->vtx.buffer_ptr);
- if (VBO_VERT_BUFFER_SIZE > exec->vtx.buffer_used + 1024) {
+ if (vbo_exec_buffer_has_space(exec)) {
/* The VBO exists and there's room for more */
if (exec->vtx.bufferobj->Size > 0) {
exec->vtx.buffer_map = (fi_type *)
VBO_VERT_BUFFER_SIZE,
NULL, usage,
GL_MAP_WRITE_BIT |
+ (ctx->Extensions.ARB_buffer_storage ?
+ GL_MAP_PERSISTENT_BIT |
+ GL_MAP_COHERENT_BIT |
+ GL_MAP_READ_BIT : 0) |
GL_DYNAMIC_STORAGE_BIT |
GL_CLIENT_STORAGE_BIT,
exec->vtx.bufferobj)) {
}
exec->vtx.buffer_ptr = exec->vtx.buffer_map;
+ exec->vtx.buffer_offset = 0;
if (!exec->vtx.buffer_map) {
/* out of memory */
/**
* Execute the buffer and save copied verts.
- * \param keep_unmapped if true, leave the VBO unmapped when we're done.
*/
void
-vbo_exec_vtx_flush(struct vbo_exec_context *exec, GLboolean keepUnmapped)
+vbo_exec_vtx_flush(struct vbo_exec_context *exec)
{
+ /* Only unmap if persistent mappings are unsupported. */
+ bool persistent_mapping = exec->ctx->Extensions.ARB_buffer_storage &&
+ _mesa_is_bufferobj(exec->vtx.bufferobj) &&
+ exec->vtx.buffer_map;
+
if (0)
vbo_exec_debug_verts(exec);
if (exec->vtx.copied.nr != exec->vtx.vert_count) {
struct gl_context *ctx = exec->ctx;
- /* Before the update_state() as this may raise _NEW_VARYING_VP_INPUTS
- * from _mesa_set_varying_vp_inputs().
- */
+ /* Prepare and set the exec draws internal VAO for drawing. */
vbo_exec_bind_arrays(ctx);
if (ctx->NewState)
_mesa_update_state(ctx);
- vbo_exec_vtx_unmap(exec);
+ if (!persistent_mapping)
+ vbo_exec_vtx_unmap(exec);
assert(ctx->NewState == 0);
NULL, 0, NULL);
/* Get new storage -- unless asked not to. */
- if (!keepUnmapped)
+ if (!persistent_mapping)
vbo_exec_vtx_map(exec);
}
}
- /* May have to unmap explicitly if we didn't draw:
- */
- if (keepUnmapped && exec->vtx.buffer_map) {
- vbo_exec_vtx_unmap(exec);
+ if (persistent_mapping) {
+ exec->vtx.buffer_used += (exec->vtx.buffer_ptr - exec->vtx.buffer_map) *
+ sizeof(float);
+ exec->vtx.buffer_map = exec->vtx.buffer_ptr;
+
+ /* Set the buffer offset for the next draw. */
+ exec->vtx.buffer_offset = exec->vtx.buffer_used;
+
+ if (!vbo_exec_buffer_has_space(exec)) {
+ /* This will allocate a new buffer. */
+ vbo_exec_vtx_unmap(exec);
+ vbo_exec_vtx_map(exec);
+ }
}
- if (keepUnmapped || exec->vtx.vertex_size == 0)
+ if (exec->vtx.vertex_size == 0)
exec->vtx.max_vert = 0;
else
exec->vtx.max_vert = vbo_compute_max_verts(exec);