uint16_t cmd_id,
int size)
{
- struct glthread_state *glthread = ctx->GLThread;
- struct glthread_batch *next = &glthread->batches[glthread->next];
+ struct glthread_state *glthread = &ctx->GLThread;
+ struct glthread_batch *next = glthread->next_batch;
struct marshal_cmd_base *cmd_base;
- const int aligned_size = ALIGN(size, 8);
if (unlikely(next->used + size > MARSHAL_MAX_CMD_SIZE)) {
_mesa_glthread_flush_batch(ctx);
- next = &glthread->batches[glthread->next];
+ next = glthread->next_batch;
}
+ const int aligned_size = align(size, 8);
cmd_base = (struct marshal_cmd_base *)&next->buffer[next->used];
next->used += aligned_size;
cmd_base->cmd_id = cmd_id;
* calls (deprecated and removed in GL core), we just disable threading.
*/
static inline bool
-_mesa_glthread_is_non_vbo_draw_elements(const struct gl_context *ctx)
+_mesa_glthread_has_non_vbo_vertices_or_indices(const struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
+ const struct glthread_state *glthread = &ctx->GLThread;
+ struct glthread_vao *vao = glthread->CurrentVAO;
return ctx->API != API_OPENGL_CORE &&
- (glthread->CurrentVAO->IndexBufferIsUserPointer ||
- glthread->CurrentVAO->HasUserPointer);
+ (vao->CurrentElementBufferName == 0 ||
+ (vao->UserPointerMask & vao->Enabled));
}
static inline bool
-_mesa_glthread_is_non_vbo_draw_arrays(const struct gl_context *ctx)
+_mesa_glthread_has_non_vbo_vertices(const struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
+ const struct glthread_state *glthread = &ctx->GLThread;
+ const struct glthread_vao *vao = glthread->CurrentVAO;
- return ctx->API != API_OPENGL_CORE && glthread->CurrentVAO->HasUserPointer;
+ return ctx->API != API_OPENGL_CORE &&
+ (vao->UserPointerMask & vao->Enabled);
}
static inline bool
-_mesa_glthread_is_non_vbo_draw_arrays_indirect(const struct gl_context *ctx)
+_mesa_glthread_has_non_vbo_vertices_or_indirect(const struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
+ const struct glthread_state *glthread = &ctx->GLThread;
+ const struct glthread_vao *vao = glthread->CurrentVAO;
return ctx->API != API_OPENGL_CORE &&
- (!glthread->draw_indirect_buffer_is_vbo ||
- glthread->CurrentVAO->HasUserPointer );
+ (glthread->CurrentDrawIndirectBufferName == 0 ||
+ (vao->UserPointerMask & vao->Enabled));
}
static inline bool
-_mesa_glthread_is_non_vbo_draw_elements_indirect(const struct gl_context *ctx)
+_mesa_glthread_has_non_vbo_vertices_or_indices_or_indirect(const struct gl_context *ctx)
{
- struct glthread_state *glthread = ctx->GLThread;
+ const struct glthread_state *glthread = &ctx->GLThread;
+ struct glthread_vao *vao = glthread->CurrentVAO;
return ctx->API != API_OPENGL_CORE &&
- (!glthread->draw_indirect_buffer_is_vbo ||
- glthread->CurrentVAO->IndexBufferIsUserPointer ||
- glthread->CurrentVAO->HasUserPointer);
+ (glthread->CurrentDrawIndirectBufferName == 0 ||
+ vao->CurrentElementBufferName == 0 ||
+ (vao->UserPointerMask & vao->Enabled));
}
struct _glapi_table *
_mesa_create_marshal_table(const struct gl_context *ctx);
-void
-_mesa_glthread_BindBuffer(struct gl_context *ctx, GLenum target, GLuint buffer);
-
static inline unsigned
_mesa_buffer_enum_to_count(GLenum buffer)
{
}
}
+static inline gl_vert_attrib
+_mesa_array_to_attrib(struct gl_context *ctx, GLenum array)
+{
+ switch (array) {
+ case GL_VERTEX_ARRAY:
+ return VERT_ATTRIB_POS;
+ case GL_NORMAL_ARRAY:
+ return VERT_ATTRIB_NORMAL;
+ case GL_COLOR_ARRAY:
+ return VERT_ATTRIB_COLOR0;
+ case GL_INDEX_ARRAY:
+ return VERT_ATTRIB_COLOR_INDEX;
+ case GL_TEXTURE_COORD_ARRAY:
+ return VERT_ATTRIB_TEX(ctx->GLThread.ClientActiveTexture);
+ case GL_EDGE_FLAG_ARRAY:
+ return VERT_ATTRIB_EDGEFLAG;
+ case GL_FOG_COORDINATE_ARRAY:
+ return VERT_ATTRIB_FOG;
+ case GL_SECONDARY_COLOR_ARRAY:
+ return VERT_ATTRIB_COLOR1;
+ case GL_POINT_SIZE_ARRAY_OES:
+ return VERT_ATTRIB_POINT_SIZE;
+ default:
+ if (array >= GL_TEXTURE0 && array <= GL_TEXTURE7)
+ return VERT_ATTRIB_TEX(array - GL_TEXTURE0);
+ return VERT_ATTRIB_MAX;
+ }
+}
+
#endif /* MARSHAL_H */