X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fmain%2Fmarshal.h;h=676050319f1d8485fa1c40e743b5cd3990b27182;hb=108fdb54c6c1b82ec3131b0c2e00d554b3729cfb;hp=0b69d66d9af34c799f126bc46c4acb89f0ea7243;hpb=154a4f267959f6975647720237a28bae1ae86c8d;p=mesa.git diff --git a/src/mesa/main/marshal.h b/src/mesa/main/marshal.h index 0b69d66d9af..676050319f1 100644 --- a/src/mesa/main/marshal.h +++ b/src/mesa/main/marshal.h @@ -32,6 +32,8 @@ #include "main/glthread.h" #include "main/context.h" +#include "main/macros.h" +#include "marshal_generated.h" struct marshal_cmd_base { @@ -46,70 +48,148 @@ struct marshal_cmd_base uint16_t cmd_size; }; +typedef void (*_mesa_unmarshal_func)(struct gl_context *ctx, const void *cmd); +extern const _mesa_unmarshal_func _mesa_unmarshal_dispatch[NUM_DISPATCH_CMD]; static inline void * _mesa_glthread_allocate_command(struct gl_context *ctx, uint16_t cmd_id, - size_t size) + int size) { struct glthread_state *glthread = ctx->GLThread; + struct glthread_batch *next = &glthread->batches[glthread->next]; struct marshal_cmd_base *cmd_base; + const int aligned_size = ALIGN(size, 8); - if (unlikely(glthread->batch->used + size > MARSHAL_MAX_CMD_SIZE)) + if (unlikely(next->used + size > MARSHAL_MAX_CMD_SIZE)) { _mesa_glthread_flush_batch(ctx); + next = &glthread->batches[glthread->next]; + } - cmd_base = (struct marshal_cmd_base *) - &glthread->batch->buffer[glthread->batch->used]; - glthread->batch->used += size; + cmd_base = (struct marshal_cmd_base *)&next->buffer[next->used]; + next->used += aligned_size; cmd_base->cmd_id = cmd_id; - cmd_base->cmd_size = size; + cmd_base->cmd_size = aligned_size; return cmd_base; } +/** + * Instead of conditionally handling marshaling previously-bound user vertex + * array data in draw calls (deprecated and removed in GL core), we just + * disable threading at the point where the user sets a user vertex array. + */ +static inline bool +_mesa_glthread_is_non_vbo_vertex_attrib_pointer(const struct gl_context *ctx) +{ + struct glthread_state *glthread = ctx->GLThread; + + return ctx->API != API_OPENGL_CORE && !glthread->vertex_array_is_vbo; +} + +/** + * Instead of conditionally handling marshaling immediate index data in draw + * calls (deprecated and removed in GL core), we just disable threading. + */ +static inline bool +_mesa_glthread_is_non_vbo_draw_elements(const struct gl_context *ctx) +{ + struct glthread_state *glthread = ctx->GLThread; + + return ctx->API != API_OPENGL_CORE && !glthread->element_array_is_vbo; +} + +static inline bool +_mesa_glthread_is_non_vbo_draw_arrays_indirect(const struct gl_context *ctx) +{ + struct glthread_state *glthread = ctx->GLThread; + + return ctx->API != API_OPENGL_CORE && + !glthread->draw_indirect_buffer_is_vbo; +} + +static inline bool +_mesa_glthread_is_non_vbo_draw_elements_indirect(const struct gl_context *ctx) +{ + struct glthread_state *glthread = ctx->GLThread; + + return ctx->API != API_OPENGL_CORE && + (!glthread->draw_indirect_buffer_is_vbo || + !glthread->element_array_is_vbo); +} + #define DEBUG_MARSHAL_PRINT_CALLS 0 +/** + * This is printed when we have fallen back to a sync. This can happen when + * MARSHAL_MAX_CMD_SIZE is exceeded. + */ static inline void -debug_print_sync(const char *func) +debug_print_sync_fallback(const char *func) { #if DEBUG_MARSHAL_PRINT_CALLS - printf("sync: %s\n", func); + printf("fallback to sync: %s\n", func); #endif } + static inline void -debug_print_marshal(const char *func) +debug_print_sync(const char *func) { #if DEBUG_MARSHAL_PRINT_CALLS - printf("marshal: %s\n", func); + printf("sync: %s\n", func); #endif } static inline void -debug_print_unmarshal(const char *func) +debug_print_marshal(const char *func) { #if DEBUG_MARSHAL_PRINT_CALLS - printf("unmarshal: %s\n", func); + printf("marshal: %s\n", func); #endif } struct _glapi_table * _mesa_create_marshal_table(const struct gl_context *ctx); -size_t -_mesa_unmarshal_dispatch_cmd(struct gl_context *ctx, const void *cmd); -static inline void -_mesa_post_marshal_hook(struct gl_context *ctx) +/** + * Checks whether we're on a compat context for code-generated + * glBindVertexArray(). + * + * In order to decide whether a draw call uses only VBOs for vertex and index + * buffers, we track the current vertex and index buffer bindings by + * glBindBuffer(). However, the index buffer binding is stored in the vertex + * array as opposed to the context. If we were to accurately track whether + * the index buffer was a user pointer ot not, we'd have to track it per + * vertex array, which would mean synchronizing with the client thread and + * looking into the hash table to find the actual vertex array object. That's + * more tracking than we'd like to do in the main thread, if possible. + * + * Instead, just punt for now and disable threading on apps using vertex + * arrays and compat contexts. Apps using vertex arrays can probably use a + * core context. + */ +static inline bool +_mesa_glthread_is_compat_bind_vertex_array(const struct gl_context *ctx) { - /* This can be enabled for debugging whether a failure is a synchronization - * problem between the main thread and the worker thread, or a failure in - * how we actually marshal. - */ - if (false) - _mesa_glthread_finish(ctx); + return ctx->API != API_OPENGL_CORE; } +struct marshal_cmd_Enable; struct marshal_cmd_ShaderSource; +struct marshal_cmd_Flush; +struct marshal_cmd_BindBuffer; +struct marshal_cmd_BufferData; +struct marshal_cmd_BufferSubData; +struct marshal_cmd_NamedBufferData; +struct marshal_cmd_NamedBufferSubData; + +void +_mesa_unmarshal_Enable(struct gl_context *ctx, + const struct marshal_cmd_Enable *cmd); + +void GLAPIENTRY +_mesa_marshal_Enable(GLenum cap); void GLAPIENTRY _mesa_marshal_ShaderSource(GLuint shader, GLsizei count, @@ -119,4 +199,66 @@ void _mesa_unmarshal_ShaderSource(struct gl_context *ctx, const struct marshal_cmd_ShaderSource *cmd); +void GLAPIENTRY +_mesa_marshal_Flush(void); + +void +_mesa_unmarshal_Flush(struct gl_context *ctx, + const struct marshal_cmd_Flush *cmd); + +void GLAPIENTRY +_mesa_marshal_BindBuffer(GLenum target, GLuint buffer); + +void +_mesa_unmarshal_BindBuffer(struct gl_context *ctx, + const struct marshal_cmd_BindBuffer *cmd); + +void +_mesa_unmarshal_BufferData(struct gl_context *ctx, + const struct marshal_cmd_BufferData *cmd); + +void GLAPIENTRY +_mesa_marshal_BufferData(GLenum target, GLsizeiptr size, const GLvoid * data, + GLenum usage); + +void +_mesa_unmarshal_BufferSubData(struct gl_context *ctx, + const struct marshal_cmd_BufferSubData *cmd); + +void GLAPIENTRY +_mesa_marshal_BufferSubData(GLenum target, GLintptr offset, GLsizeiptr size, + const GLvoid * data); + +void +_mesa_unmarshal_NamedBufferData(struct gl_context *ctx, + const struct marshal_cmd_NamedBufferData *cmd); + +void GLAPIENTRY +_mesa_marshal_NamedBufferData(GLuint buffer, GLsizeiptr size, + const GLvoid * data, GLenum usage); + +void +_mesa_unmarshal_NamedBufferSubData(struct gl_context *ctx, + const struct marshal_cmd_NamedBufferSubData *cmd); + +void GLAPIENTRY +_mesa_marshal_NamedBufferSubData(GLuint buffer, GLintptr offset, GLsizeiptr size, + const GLvoid * data); + +static inline unsigned +_mesa_buffer_enum_to_count(GLenum buffer) +{ + switch (buffer) { + case GL_COLOR: + return 4; + case GL_DEPTH_STENCIL: + return 2; + case GL_STENCIL: + case GL_DEPTH: + return 1; + default: + return 0; + } +} + #endif /* MARSHAL_H */