+
+/**
+ * Convert GL_LINE_LOOP primitive into GL_LINE_STRIP so that drivers
+ * don't have to worry about handling the _mesa_prim::begin/end flags.
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=81174
+ */
+static void
+convert_line_loop_to_strip(struct vbo_save_context *save,
+ struct vbo_save_vertex_list *node)
+{
+ struct _mesa_prim *prim = &node->prims[node->prim_count - 1];
+
+ assert(prim->mode == GL_LINE_LOOP);
+
+ if (prim->end) {
+ /* Copy the 0th vertex to end of the buffer and extend the
+ * vertex count by one to finish the line loop.
+ */
+ const GLuint sz = save->vertex_size;
+ /* 0th vertex: */
+ const fi_type *src = save->buffer_map + prim->start * sz;
+ /* end of buffer: */
+ fi_type *dst = save->buffer_map + (prim->start + prim->count) * sz;
+
+ memcpy(dst, src, sz * sizeof(float));
+
+ prim->count++;
+ node->vertex_count++;
+ save->vert_count++;
+ save->buffer_ptr += sz;
+ save->vertex_store->used += sz;
+ }
+
+ if (!prim->begin) {
+ /* Drawing the second or later section of a long line loop.
+ * Skip the 0th vertex.
+ */
+ prim->start++;
+ prim->count--;
+ }
+
+ prim->mode = GL_LINE_STRIP;
+}
+
+
+/* Compare the present vao if it has the same setup. */
+static bool
+compare_vao(gl_vertex_processing_mode mode,
+ const struct gl_vertex_array_object *vao,
+ const struct gl_buffer_object *bo, GLintptr buffer_offset,
+ GLuint stride, GLbitfield64 vao_enabled,
+ const GLubyte size[VBO_ATTRIB_MAX],
+ const GLenum16 type[VBO_ATTRIB_MAX],
+ const GLuint offset[VBO_ATTRIB_MAX])
+{
+ if (!vao)
+ return false;
+
+ /* If the enabled arrays are not the same we are not equal. */
+ if (vao_enabled != vao->_Enabled)
+ return false;
+
+ /* Check the buffer binding at 0 */
+ if (vao->BufferBinding[0].BufferObj != bo)
+ return false;
+ /* BufferBinding[0].Offset != buffer_offset is checked per attribute */
+ if (vao->BufferBinding[0].Stride != stride)
+ return false;
+ assert(vao->BufferBinding[0].InstanceDivisor == 0);
+
+ /* Retrieve the mapping from VBO_ATTRIB to VERT_ATTRIB space */
+ const GLubyte *const vao_to_vbo_map = _vbo_attribute_alias_map[mode];
+
+ /* Now check the enabled arrays */
+ GLbitfield mask = vao_enabled;
+ while (mask) {
+ const int attr = u_bit_scan(&mask);
+ const unsigned char vbo_attr = vao_to_vbo_map[attr];
+ const GLenum16 tp = type[vbo_attr];
+ const GLintptr off = offset[vbo_attr] + buffer_offset;
+ const struct gl_array_attributes *attrib = &vao->VertexAttrib[attr];
+ if (attrib->RelativeOffset + vao->BufferBinding[0].Offset != off)
+ return false;
+ if (attrib->Type != tp)
+ return false;
+ if (attrib->Size != size[vbo_attr])
+ return false;
+ assert(attrib->Format == GL_RGBA);
+ assert(attrib->Enabled == GL_TRUE);
+ assert(attrib->Normalized == GL_FALSE);
+ assert(attrib->Integer == vbo_attrtype_to_integer_flag(tp));
+ assert(attrib->Doubles == vbo_attrtype_to_double_flag(tp));
+ assert(attrib->BufferBindingIndex == 0);
+ }
+
+ return true;
+}
+
+
+/* Create or reuse the vao for the vertex processing mode. */
+static void
+update_vao(struct gl_context *ctx,
+ gl_vertex_processing_mode mode,
+ struct gl_vertex_array_object **vao,
+ struct gl_buffer_object *bo, GLintptr buffer_offset,
+ GLuint stride, GLbitfield64 vbo_enabled,
+ const GLubyte size[VBO_ATTRIB_MAX],
+ const GLenum16 type[VBO_ATTRIB_MAX],
+ const GLuint offset[VBO_ATTRIB_MAX])
+{
+ /* Compute the bitmasks of vao_enabled arrays */
+ GLbitfield vao_enabled = _vbo_get_vao_enabled_from_vbo(mode, vbo_enabled);
+
+ /*
+ * Check if we can possibly reuse the exisiting one.
+ * In the long term we should reset them when something changes.
+ */
+ if (compare_vao(mode, *vao, bo, buffer_offset, stride,
+ vao_enabled, size, type, offset))
+ return;
+
+ /* The initial refcount is 1 */
+ _mesa_reference_vao(ctx, vao, NULL);
+ *vao = _mesa_new_vao(ctx, ~((GLuint)0));
+
+ /*
+ * assert(stride <= ctx->Const.MaxVertexAttribStride);
+ * MaxVertexAttribStride is not set for drivers that does not
+ * expose GL 44 or GLES 31.
+ */
+
+ /* Bind the buffer object at binding point 0 */
+ _mesa_bind_vertex_buffer(ctx, *vao, 0, bo, buffer_offset, stride);
+
+ /* Retrieve the mapping from VBO_ATTRIB to VERT_ATTRIB space
+ * Note that the position/generic0 aliasing is done in the VAO.
+ */
+ const GLubyte *const vao_to_vbo_map = _vbo_attribute_alias_map[mode];
+ /* Now set the enable arrays */
+ GLbitfield mask = vao_enabled;
+ while (mask) {
+ const int vao_attr = u_bit_scan(&mask);
+ const GLubyte vbo_attr = vao_to_vbo_map[vao_attr];
+ assert(offset[vbo_attr] <= ctx->Const.MaxVertexAttribRelativeOffset);
+
+ _vbo_set_attrib_format(ctx, *vao, vao_attr, buffer_offset,
+ size[vbo_attr], type[vbo_attr], offset[vbo_attr]);
+ _mesa_vertex_attrib_binding(ctx, *vao, vao_attr, 0);
+ _mesa_enable_vertex_array_attrib(ctx, *vao, vao_attr);
+ }
+ assert(vao_enabled == (*vao)->_Enabled);
+ assert((vao_enabled & ~(*vao)->VertexAttribBufferMask) == 0);
+
+ /* Finalize and freeze the VAO */
+ _mesa_set_vao_immutable(ctx, *vao);
+}
+
+