2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * Declarations of functions related to marshalling GL calls from a client
27 * thread to a server thread.
33 #include "main/glthread.h"
34 #include "main/context.h"
35 #include "main/macros.h"
36 #include "marshal_generated.h"
38 struct marshal_cmd_base
41 * Type of command. See enum marshal_dispatch_cmd_id.
46 * Size of command, in multiples of 4 bytes, including cmd_base.
51 typedef void (*_mesa_unmarshal_func
)(struct gl_context
*ctx
, const void *cmd
);
52 extern const _mesa_unmarshal_func _mesa_unmarshal_dispatch
[NUM_DISPATCH_CMD
];
55 _mesa_glthread_allocate_command(struct gl_context
*ctx
,
59 struct glthread_state
*glthread
= ctx
->GLThread
;
60 struct glthread_batch
*next
= &glthread
->batches
[glthread
->next
];
61 struct marshal_cmd_base
*cmd_base
;
62 const int aligned_size
= ALIGN(size
, 8);
64 if (unlikely(next
->used
+ size
> MARSHAL_MAX_CMD_SIZE
)) {
65 _mesa_glthread_flush_batch(ctx
);
66 next
= &glthread
->batches
[glthread
->next
];
69 cmd_base
= (struct marshal_cmd_base
*)&next
->buffer
[next
->used
];
70 next
->used
+= aligned_size
;
71 cmd_base
->cmd_id
= cmd_id
;
72 cmd_base
->cmd_size
= aligned_size
;
77 * Instead of conditionally handling marshaling previously-bound user vertex
78 * array data in draw calls (deprecated and removed in GL core), we just
79 * disable threading at the point where the user sets a user vertex array.
82 _mesa_glthread_is_non_vbo_vertex_attrib_pointer(const struct gl_context
*ctx
)
84 struct glthread_state
*glthread
= ctx
->GLThread
;
86 return ctx
->API
!= API_OPENGL_CORE
&& !glthread
->vertex_array_is_vbo
;
90 * Instead of conditionally handling marshaling immediate index data in draw
91 * calls (deprecated and removed in GL core), we just disable threading.
94 _mesa_glthread_is_non_vbo_draw_elements(const struct gl_context
*ctx
)
96 struct glthread_state
*glthread
= ctx
->GLThread
;
98 return ctx
->API
!= API_OPENGL_CORE
&& !glthread
->element_array_is_vbo
;
102 _mesa_glthread_is_non_vbo_draw_arrays_indirect(const struct gl_context
*ctx
)
104 struct glthread_state
*glthread
= ctx
->GLThread
;
106 return ctx
->API
!= API_OPENGL_CORE
&&
107 !glthread
->draw_indirect_buffer_is_vbo
;
111 _mesa_glthread_is_non_vbo_draw_elements_indirect(const struct gl_context
*ctx
)
113 struct glthread_state
*glthread
= ctx
->GLThread
;
115 return ctx
->API
!= API_OPENGL_CORE
&&
116 (!glthread
->draw_indirect_buffer_is_vbo
||
117 !glthread
->element_array_is_vbo
);
120 #define DEBUG_MARSHAL_PRINT_CALLS 0
123 * This is printed when we have fallen back to a sync. This can happen when
124 * MARSHAL_MAX_CMD_SIZE is exceeded.
127 debug_print_sync_fallback(const char *func
)
129 #if DEBUG_MARSHAL_PRINT_CALLS
130 printf("fallback to sync: %s\n", func
);
136 debug_print_sync(const char *func
)
138 #if DEBUG_MARSHAL_PRINT_CALLS
139 printf("sync: %s\n", func
);
144 debug_print_marshal(const char *func
)
146 #if DEBUG_MARSHAL_PRINT_CALLS
147 printf("marshal: %s\n", func
);
151 struct _glapi_table
*
152 _mesa_create_marshal_table(const struct gl_context
*ctx
);
156 * Checks whether we're on a compat context for code-generated
157 * glBindVertexArray().
159 * In order to decide whether a draw call uses only VBOs for vertex and index
160 * buffers, we track the current vertex and index buffer bindings by
161 * glBindBuffer(). However, the index buffer binding is stored in the vertex
162 * array as opposed to the context. If we were to accurately track whether
163 * the index buffer was a user pointer ot not, we'd have to track it per
164 * vertex array, which would mean synchronizing with the client thread and
165 * looking into the hash table to find the actual vertex array object. That's
166 * more tracking than we'd like to do in the main thread, if possible.
168 * Instead, just punt for now and disable threading on apps using vertex
169 * arrays and compat contexts. Apps using vertex arrays can probably use a
173 _mesa_glthread_is_compat_bind_vertex_array(const struct gl_context
*ctx
)
175 return ctx
->API
!= API_OPENGL_CORE
;
178 struct marshal_cmd_Enable
;
179 struct marshal_cmd_ShaderSource
;
180 struct marshal_cmd_Flush
;
181 struct marshal_cmd_BindBuffer
;
182 struct marshal_cmd_BufferData
;
183 struct marshal_cmd_BufferSubData
;
184 struct marshal_cmd_NamedBufferData
;
185 struct marshal_cmd_NamedBufferSubData
;
188 _mesa_unmarshal_Enable(struct gl_context
*ctx
,
189 const struct marshal_cmd_Enable
*cmd
);
192 _mesa_marshal_Enable(GLenum cap
);
195 _mesa_marshal_ShaderSource(GLuint shader
, GLsizei count
,
196 const GLchar
* const *string
, const GLint
*length
);
199 _mesa_unmarshal_ShaderSource(struct gl_context
*ctx
,
200 const struct marshal_cmd_ShaderSource
*cmd
);
203 _mesa_marshal_Flush(void);
206 _mesa_unmarshal_Flush(struct gl_context
*ctx
,
207 const struct marshal_cmd_Flush
*cmd
);
210 _mesa_marshal_BindBuffer(GLenum target
, GLuint buffer
);
213 _mesa_unmarshal_BindBuffer(struct gl_context
*ctx
,
214 const struct marshal_cmd_BindBuffer
*cmd
);
217 _mesa_unmarshal_BufferData(struct gl_context
*ctx
,
218 const struct marshal_cmd_BufferData
*cmd
);
221 _mesa_marshal_BufferData(GLenum target
, GLsizeiptr size
, const GLvoid
* data
,
225 _mesa_unmarshal_BufferSubData(struct gl_context
*ctx
,
226 const struct marshal_cmd_BufferSubData
*cmd
);
229 _mesa_marshal_BufferSubData(GLenum target
, GLintptr offset
, GLsizeiptr size
,
230 const GLvoid
* data
);
233 _mesa_unmarshal_NamedBufferData(struct gl_context
*ctx
,
234 const struct marshal_cmd_NamedBufferData
*cmd
);
237 _mesa_marshal_NamedBufferData(GLuint buffer
, GLsizeiptr size
,
238 const GLvoid
* data
, GLenum usage
);
241 _mesa_unmarshal_NamedBufferSubData(struct gl_context
*ctx
,
242 const struct marshal_cmd_NamedBufferSubData
*cmd
);
245 _mesa_marshal_NamedBufferSubData(GLuint buffer
, GLintptr offset
, GLsizeiptr size
,
246 const GLvoid
* data
);
248 static inline unsigned
249 _mesa_buffer_enum_to_count(GLenum buffer
)
254 case GL_DEPTH_STENCIL
:
264 static inline unsigned
265 _mesa_tex_param_enum_to_count(GLenum pname
)
268 case GL_TEXTURE_MIN_FILTER
:
269 case GL_TEXTURE_MAG_FILTER
:
270 case GL_TEXTURE_WRAP_S
:
271 case GL_TEXTURE_WRAP_T
:
272 case GL_TEXTURE_WRAP_R
:
273 case GL_TEXTURE_BASE_LEVEL
:
274 case GL_TEXTURE_MAX_LEVEL
:
275 case GL_GENERATE_MIPMAP_SGIS
:
276 case GL_TEXTURE_COMPARE_MODE_ARB
:
277 case GL_TEXTURE_COMPARE_FUNC_ARB
:
278 case GL_DEPTH_TEXTURE_MODE_ARB
:
279 case GL_DEPTH_STENCIL_TEXTURE_MODE
:
280 case GL_TEXTURE_SRGB_DECODE_EXT
:
281 case GL_TEXTURE_CUBE_MAP_SEAMLESS
:
282 case GL_TEXTURE_SWIZZLE_R
:
283 case GL_TEXTURE_SWIZZLE_G
:
284 case GL_TEXTURE_SWIZZLE_B
:
285 case GL_TEXTURE_SWIZZLE_A
:
286 case GL_TEXTURE_MIN_LOD
:
287 case GL_TEXTURE_MAX_LOD
:
288 case GL_TEXTURE_PRIORITY
:
289 case GL_TEXTURE_MAX_ANISOTROPY_EXT
:
290 case GL_TEXTURE_LOD_BIAS
:
291 case GL_TEXTURE_TILING_EXT
:
293 case GL_TEXTURE_CROP_RECT_OES
:
294 case GL_TEXTURE_SWIZZLE_RGBA
:
295 case GL_TEXTURE_BORDER_COLOR
:
302 #endif /* MARSHAL_H */