2 * Copyright © 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /* Draw function marshalling for glthread.
26 * The purpose of these glDraw wrappers is to upload non-VBO vertex and
27 * index data, so that glthread doesn't have to execute synchronously.
30 #include "c99_alloca.h"
32 #include "main/glthread_marshal.h"
33 #include "main/dispatch.h"
34 #include "main/varray.h"
36 static inline unsigned
37 get_index_size(GLenum type
)
39 /* GL_UNSIGNED_BYTE - GL_UNSIGNED_BYTE = 0
40 * GL_UNSIGNED_SHORT - GL_UNSIGNED_BYTE = 2
41 * GL_UNSIGNED_INT - GL_UNSIGNED_BYTE = 4
43 * Divide by 2 to get n=0,1,2, then the index size is: 1 << n
45 return 1 << ((type
- GL_UNSIGNED_BYTE
) >> 1);
49 is_index_type_valid(GLenum type
)
51 /* GL_UNSIGNED_BYTE = 0x1401
52 * GL_UNSIGNED_SHORT = 0x1403
53 * GL_UNSIGNED_INT = 0x1405
55 * The trick is that bit 1 and bit 2 mean USHORT and UINT, respectively.
56 * After clearing those two bits (with ~6), we should get UBYTE.
57 * Both bits can't be set, because the enum would be greater than UINT.
59 return type
<= GL_UNSIGNED_INT
&& (type
& ~6) == GL_UNSIGNED_BYTE
;
62 static ALWAYS_INLINE
struct gl_buffer_object
*
63 upload_indices(struct gl_context
*ctx
, unsigned count
, unsigned index_size
,
64 const GLvoid
**indices
)
66 struct gl_buffer_object
*upload_buffer
= NULL
;
67 unsigned upload_offset
= 0;
71 _mesa_glthread_upload(ctx
, *indices
, index_size
* count
,
72 &upload_offset
, &upload_buffer
, NULL
);
73 assert(upload_buffer
);
74 *indices
= (const GLvoid
*)(intptr_t)upload_offset
;
79 static ALWAYS_INLINE
struct gl_buffer_object
*
80 upload_multi_indices(struct gl_context
*ctx
, unsigned total_count
,
81 unsigned index_size
, unsigned draw_count
,
82 const GLsizei
*count
, const GLvoid
*const *indices
,
83 const GLvoid
**out_indices
)
85 struct gl_buffer_object
*upload_buffer
= NULL
;
86 unsigned upload_offset
= 0;
87 uint8_t *upload_ptr
= NULL
;
91 _mesa_glthread_upload(ctx
, NULL
, index_size
* total_count
,
92 &upload_offset
, &upload_buffer
, &upload_ptr
);
93 assert(upload_buffer
);
95 for (unsigned i
= 0, offset
= 0; i
< draw_count
; i
++) {
99 unsigned size
= count
[i
] * index_size
;
101 memcpy(upload_ptr
+ offset
, indices
[i
], size
);
102 out_indices
[i
] = (const GLvoid
*)(intptr_t)(upload_offset
+ offset
);
106 return upload_buffer
;
109 static ALWAYS_INLINE
bool
110 upload_vertices(struct gl_context
*ctx
, unsigned attrib_mask
,
111 unsigned start_vertex
, unsigned num_vertices
,
112 unsigned start_instance
, unsigned num_instances
,
113 struct glthread_attrib_binding
*attribs
)
115 struct glthread_vao
*vao
= ctx
->GLThread
.CurrentVAO
;
116 unsigned attrib_mask_iter
= attrib_mask
;
117 unsigned num_attribs
= 0;
119 assert((num_vertices
|| !(attrib_mask
& ~vao
->NonZeroDivisorMask
)) &&
120 (num_instances
|| !(attrib_mask
& vao
->NonZeroDivisorMask
)));
122 while (attrib_mask_iter
) {
123 unsigned i
= u_bit_scan(&attrib_mask_iter
);
124 struct gl_buffer_object
*upload_buffer
= NULL
;
125 unsigned upload_offset
= 0;
126 unsigned stride
= vao
->Attrib
[i
].Stride
;
127 unsigned instance_div
= vao
->Attrib
[i
].Divisor
;
128 unsigned element_size
= vao
->Attrib
[i
].ElementSize
;
129 unsigned offset
, size
;
132 /* Per-instance attrib. */
134 /* Figure out how many instances we'll render given instance_div. We
135 * can't use the typical div_round_up() pattern because the CTS uses
136 * instance_div = ~0 for a test, which overflows div_round_up()'s
139 unsigned count
= num_instances
/ instance_div
;
140 if (count
* instance_div
!= num_instances
)
143 offset
= stride
* start_instance
;
144 size
= stride
* (count
- 1) + element_size
;
146 /* Per-vertex attrib. */
147 offset
= stride
* start_vertex
;
148 size
= stride
* (num_vertices
- 1) + element_size
;
151 const void *ptr
= vao
->Attrib
[i
].Pointer
;
152 _mesa_glthread_upload(ctx
, (uint8_t*)ptr
+ offset
,
153 size
, &upload_offset
, &upload_buffer
, NULL
);
154 assert(upload_buffer
);
156 attribs
[num_attribs
].buffer
= upload_buffer
;
157 attribs
[num_attribs
].offset
= upload_offset
- offset
;
158 attribs
[num_attribs
].original_pointer
= ptr
;
164 struct marshal_cmd_DrawArraysInstancedBaseInstance
166 struct marshal_cmd_base cmd_base
;
170 GLsizei instance_count
;
172 GLuint non_vbo_attrib_mask
;
176 _mesa_unmarshal_DrawArraysInstancedBaseInstance(struct gl_context
*ctx
,
177 const struct marshal_cmd_DrawArraysInstancedBaseInstance
*cmd
)
179 const GLenum mode
= cmd
->mode
;
180 const GLint first
= cmd
->first
;
181 const GLsizei count
= cmd
->count
;
182 const GLsizei instance_count
= cmd
->instance_count
;
183 const GLuint baseinstance
= cmd
->baseinstance
;
184 const GLuint non_vbo_attrib_mask
= cmd
->non_vbo_attrib_mask
;
185 const struct glthread_attrib_binding
*attribs
=
186 (const struct glthread_attrib_binding
*)(cmd
+ 1);
188 /* Bind uploaded buffers if needed. */
189 if (non_vbo_attrib_mask
) {
190 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
194 CALL_DrawArraysInstancedBaseInstance(ctx
->CurrentServerDispatch
,
195 (mode
, first
, count
, instance_count
,
198 /* Restore states. */
199 if (non_vbo_attrib_mask
) {
200 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
205 static ALWAYS_INLINE
void
206 draw_arrays_async(struct gl_context
*ctx
, GLenum mode
, GLint first
,
207 GLsizei count
, GLsizei instance_count
, GLuint baseinstance
,
208 unsigned non_vbo_attrib_mask
,
209 const struct glthread_attrib_binding
*attribs
)
211 int attribs_size
= util_bitcount(non_vbo_attrib_mask
) * sizeof(attribs
[0]);
212 int cmd_size
= sizeof(struct marshal_cmd_DrawArraysInstancedBaseInstance
) +
214 struct marshal_cmd_DrawArraysInstancedBaseInstance
*cmd
;
216 cmd
= _mesa_glthread_allocate_command(ctx
, DISPATCH_CMD_DrawArraysInstancedBaseInstance
,
221 cmd
->instance_count
= instance_count
;
222 cmd
->baseinstance
= baseinstance
;
223 cmd
->non_vbo_attrib_mask
= non_vbo_attrib_mask
;
225 if (non_vbo_attrib_mask
)
226 memcpy(cmd
+ 1, attribs
, attribs_size
);
229 static ALWAYS_INLINE
void
230 draw_arrays(GLenum mode
, GLint first
, GLsizei count
, GLsizei instance_count
,
231 GLuint baseinstance
, bool compiled_into_dlist
)
233 GET_CURRENT_CONTEXT(ctx
);
235 struct glthread_vao
*vao
= ctx
->GLThread
.CurrentVAO
;
236 unsigned non_vbo_attrib_mask
= vao
->UserPointerMask
& vao
->Enabled
;
238 if (compiled_into_dlist
&& ctx
->GLThread
.inside_dlist
) {
239 _mesa_glthread_finish_before(ctx
, "DrawArrays");
240 /* Use the function that's compiled into a display list. */
241 CALL_DrawArrays(ctx
->CurrentServerDispatch
, (mode
, first
, count
));
245 /* Fast path when nothing needs to be done.
247 * This is also an error path. Zero counts should still call the driver
248 * for possible GL errors.
250 if (ctx
->API
== API_OPENGL_CORE
|| !non_vbo_attrib_mask
||
251 count
<= 0 || instance_count
<= 0) {
252 draw_arrays_async(ctx
, mode
, first
, count
, instance_count
, baseinstance
,
257 /* Upload and draw. */
258 struct glthread_attrib_binding attribs
[VERT_ATTRIB_MAX
];
259 if (!ctx
->GLThread
.SupportsNonVBOUploads
||
260 !upload_vertices(ctx
, non_vbo_attrib_mask
, first
, count
, baseinstance
,
261 instance_count
, attribs
)) {
262 _mesa_glthread_finish_before(ctx
, "DrawArrays");
263 CALL_DrawArraysInstancedBaseInstance(ctx
->CurrentServerDispatch
,
264 (mode
, first
, count
, instance_count
,
269 draw_arrays_async(ctx
, mode
, first
, count
, instance_count
, baseinstance
,
270 non_vbo_attrib_mask
, attribs
);
273 struct marshal_cmd_MultiDrawArrays
275 struct marshal_cmd_base cmd_base
;
278 GLuint non_vbo_attrib_mask
;
282 _mesa_unmarshal_MultiDrawArrays(struct gl_context
*ctx
,
283 const struct marshal_cmd_MultiDrawArrays
*cmd
)
285 const GLenum mode
= cmd
->mode
;
286 const GLsizei draw_count
= cmd
->draw_count
;
287 const GLuint non_vbo_attrib_mask
= cmd
->non_vbo_attrib_mask
;
289 const char *variable_data
= (const char *)(cmd
+ 1);
290 const GLint
*first
= (GLint
*)variable_data
;
291 variable_data
+= sizeof(GLint
) * draw_count
;
292 const GLsizei
*count
= (GLsizei
*)variable_data
;
293 variable_data
+= sizeof(GLsizei
) * draw_count
;
294 const struct glthread_attrib_binding
*attribs
=
295 (const struct glthread_attrib_binding
*)variable_data
;
297 /* Bind uploaded buffers if needed. */
298 if (non_vbo_attrib_mask
) {
299 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
303 CALL_MultiDrawArrays(ctx
->CurrentServerDispatch
,
304 (mode
, first
, count
, draw_count
));
306 /* Restore states. */
307 if (non_vbo_attrib_mask
) {
308 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
313 static ALWAYS_INLINE
void
314 multi_draw_arrays_async(struct gl_context
*ctx
, GLenum mode
,
315 const GLint
*first
, const GLsizei
*count
,
316 GLsizei draw_count
, unsigned non_vbo_attrib_mask
,
317 const struct glthread_attrib_binding
*attribs
)
319 int first_size
= sizeof(GLint
) * draw_count
;
320 int count_size
= sizeof(GLsizei
) * draw_count
;
321 int attribs_size
= util_bitcount(non_vbo_attrib_mask
) * sizeof(attribs
[0]);
322 int cmd_size
= sizeof(struct marshal_cmd_MultiDrawArrays
) +
323 first_size
+ count_size
+ attribs_size
;
324 struct marshal_cmd_MultiDrawArrays
*cmd
;
326 cmd
= _mesa_glthread_allocate_command(ctx
, DISPATCH_CMD_MultiDrawArrays
,
329 cmd
->draw_count
= draw_count
;
330 cmd
->non_vbo_attrib_mask
= non_vbo_attrib_mask
;
332 char *variable_data
= (char*)(cmd
+ 1);
333 memcpy(variable_data
, first
, first_size
);
334 variable_data
+= first_size
;
335 memcpy(variable_data
, count
, count_size
);
337 if (non_vbo_attrib_mask
) {
338 variable_data
+= count_size
;
339 memcpy(variable_data
, attribs
, attribs_size
);
344 _mesa_marshal_MultiDrawArrays(GLenum mode
, const GLint
*first
,
345 const GLsizei
*count
, GLsizei draw_count
)
347 GET_CURRENT_CONTEXT(ctx
);
349 struct glthread_vao
*vao
= ctx
->GLThread
.CurrentVAO
;
350 unsigned non_vbo_attrib_mask
= vao
->UserPointerMask
& vao
->Enabled
;
352 if (ctx
->GLThread
.inside_dlist
)
355 if (draw_count
>= 0 &&
356 (ctx
->API
== API_OPENGL_CORE
|| !non_vbo_attrib_mask
)) {
357 multi_draw_arrays_async(ctx
, mode
, first
, count
, draw_count
, 0, NULL
);
361 /* If the draw count is too high or negative, the queue can't be used. */
362 if (!ctx
->GLThread
.SupportsNonVBOUploads
||
363 draw_count
< 0 || draw_count
> MARSHAL_MAX_CMD_SIZE
/ 16)
366 unsigned min_index
= ~0;
367 unsigned max_index_exclusive
= 0;
369 for (unsigned i
= 0; i
< draw_count
; i
++) {
370 GLsizei vertex_count
= count
[i
];
372 if (vertex_count
< 0) {
373 /* Just call the driver to set the error. */
374 multi_draw_arrays_async(ctx
, mode
, first
, count
, draw_count
, 0, NULL
);
377 if (vertex_count
== 0)
380 min_index
= MIN2(min_index
, first
[i
]);
381 max_index_exclusive
= MAX2(max_index_exclusive
, first
[i
] + vertex_count
);
384 unsigned num_vertices
= max_index_exclusive
- min_index
;
385 if (num_vertices
== 0) {
386 /* Nothing to do, but call the driver to set possible GL errors. */
387 multi_draw_arrays_async(ctx
, mode
, first
, count
, draw_count
, 0, NULL
);
391 /* Upload and draw. */
392 struct glthread_attrib_binding attribs
[VERT_ATTRIB_MAX
];
393 if (!upload_vertices(ctx
, non_vbo_attrib_mask
, min_index
, num_vertices
,
397 multi_draw_arrays_async(ctx
, mode
, first
, count
, draw_count
,
398 non_vbo_attrib_mask
, attribs
);
402 _mesa_glthread_finish_before(ctx
, "MultiDrawArrays");
403 CALL_MultiDrawArrays(ctx
->CurrentServerDispatch
,
404 (mode
, first
, count
, draw_count
));
407 struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance
409 struct marshal_cmd_base cmd_base
;
410 bool index_bounds_valid
;
414 GLsizei instance_count
;
419 GLuint non_vbo_attrib_mask
;
420 const GLvoid
*indices
;
421 struct gl_buffer_object
*index_buffer
;
425 _mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstance(struct gl_context
*ctx
,
426 const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance
*cmd
)
428 const GLenum mode
= cmd
->mode
;
429 const GLsizei count
= cmd
->count
;
430 const GLenum type
= cmd
->type
;
431 const GLvoid
*indices
= cmd
->indices
;
432 const GLsizei instance_count
= cmd
->instance_count
;
433 const GLint basevertex
= cmd
->basevertex
;
434 const GLuint baseinstance
= cmd
->baseinstance
;
435 const GLuint min_index
= cmd
->min_index
;
436 const GLuint max_index
= cmd
->max_index
;
437 const GLuint non_vbo_attrib_mask
= cmd
->non_vbo_attrib_mask
;
438 struct gl_buffer_object
*index_buffer
= cmd
->index_buffer
;
439 const struct glthread_attrib_binding
*attribs
=
440 (const struct glthread_attrib_binding
*)(cmd
+ 1);
442 /* Bind uploaded buffers if needed. */
443 if (non_vbo_attrib_mask
) {
444 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
448 _mesa_InternalBindElementBuffer(ctx
, index_buffer
);
452 if (cmd
->index_bounds_valid
&& instance_count
== 1 && baseinstance
== 0) {
453 CALL_DrawRangeElementsBaseVertex(ctx
->CurrentServerDispatch
,
454 (mode
, min_index
, max_index
, count
,
455 type
, indices
, basevertex
));
457 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx
->CurrentServerDispatch
,
458 (mode
, count
, type
, indices
,
459 instance_count
, basevertex
,
463 /* Restore states. */
465 _mesa_InternalBindElementBuffer(ctx
, NULL
);
467 if (non_vbo_attrib_mask
) {
468 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
473 static ALWAYS_INLINE
void
474 draw_elements_async(struct gl_context
*ctx
, GLenum mode
, GLsizei count
,
475 GLenum type
, const GLvoid
*indices
, GLsizei instance_count
,
476 GLint basevertex
, GLuint baseinstance
,
477 bool index_bounds_valid
, GLuint min_index
, GLuint max_index
,
478 struct gl_buffer_object
*index_buffer
,
479 unsigned non_vbo_attrib_mask
,
480 const struct glthread_attrib_binding
*attribs
)
482 int attribs_size
= util_bitcount(non_vbo_attrib_mask
) * sizeof(attribs
[0]);
483 int cmd_size
= sizeof(struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance
) +
485 struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance
*cmd
;
487 cmd
= _mesa_glthread_allocate_command(ctx
, DISPATCH_CMD_DrawElementsInstancedBaseVertexBaseInstance
, cmd_size
);
491 cmd
->indices
= indices
;
492 cmd
->instance_count
= instance_count
;
493 cmd
->basevertex
= basevertex
;
494 cmd
->baseinstance
= baseinstance
;
495 cmd
->min_index
= min_index
;
496 cmd
->max_index
= max_index
;
497 cmd
->non_vbo_attrib_mask
= non_vbo_attrib_mask
;
498 cmd
->index_bounds_valid
= index_bounds_valid
;
499 cmd
->index_buffer
= index_buffer
;
501 if (non_vbo_attrib_mask
)
502 memcpy(cmd
+ 1, attribs
, attribs_size
);
506 draw_elements(GLenum mode
, GLsizei count
, GLenum type
, const GLvoid
*indices
,
507 GLsizei instance_count
, GLint basevertex
, GLuint baseinstance
,
508 bool index_bounds_valid
, GLuint min_index
, GLuint max_index
,
509 bool compiled_into_dlist
)
511 GET_CURRENT_CONTEXT(ctx
);
513 struct glthread_vao
*vao
= ctx
->GLThread
.CurrentVAO
;
514 unsigned non_vbo_attrib_mask
= vao
->UserPointerMask
& vao
->Enabled
;
515 bool has_user_indices
= vao
->CurrentElementBufferName
== 0;
517 if (compiled_into_dlist
&& ctx
->GLThread
.inside_dlist
)
520 /* Fast path when nothing needs to be done.
522 * This is also an error path. Zero counts should still call the driver
523 * for possible GL errors.
525 if (ctx
->API
== API_OPENGL_CORE
||
526 count
<= 0 || instance_count
<= 0 || max_index
< min_index
||
527 !is_index_type_valid(type
) ||
528 (!non_vbo_attrib_mask
&& !has_user_indices
)) {
529 draw_elements_async(ctx
, mode
, count
, type
, indices
, instance_count
,
530 basevertex
, baseinstance
, index_bounds_valid
,
531 min_index
, max_index
, 0, 0, NULL
);
535 if (!ctx
->GLThread
.SupportsNonVBOUploads
)
538 bool need_index_bounds
= non_vbo_attrib_mask
& ~vao
->NonZeroDivisorMask
;
539 unsigned index_size
= get_index_size(type
);
541 if (need_index_bounds
&& !index_bounds_valid
) {
542 /* Sync if indices come from a buffer and vertices come from memory
543 * and index bounds are not valid.
545 * We would have to map the indices to compute the index bounds, and
546 * for that we would have to sync anyway.
548 if (!has_user_indices
)
551 /* Compute the index bounds. */
554 vbo_get_minmax_index_mapped(count
, index_size
,
555 ctx
->GLThread
._RestartIndex
[index_size
- 1],
556 ctx
->GLThread
._PrimitiveRestart
, indices
,
557 &min_index
, &max_index
);
558 index_bounds_valid
= true;
561 unsigned start_vertex
= min_index
+ basevertex
;
562 unsigned num_vertices
= max_index
+ 1 - min_index
;
564 /* If there is too much data to upload, sync and let the driver unroll
566 if (util_is_vbo_upload_ratio_too_large(count
, num_vertices
))
569 struct glthread_attrib_binding attribs
[VERT_ATTRIB_MAX
];
570 if (non_vbo_attrib_mask
&&
571 !upload_vertices(ctx
, non_vbo_attrib_mask
, start_vertex
, num_vertices
,
572 baseinstance
, instance_count
, attribs
))
575 /* Upload indices. */
576 struct gl_buffer_object
*index_buffer
= NULL
;
577 if (has_user_indices
)
578 index_buffer
= upload_indices(ctx
, count
, index_size
, &indices
);
580 /* Draw asynchronously. */
581 draw_elements_async(ctx
, mode
, count
, type
, indices
, instance_count
,
582 basevertex
, baseinstance
, index_bounds_valid
,
583 min_index
, max_index
, index_buffer
,
584 non_vbo_attrib_mask
, attribs
);
588 _mesa_glthread_finish_before(ctx
, "DrawElements");
590 if (compiled_into_dlist
&& ctx
->GLThread
.inside_dlist
) {
591 /* Only use the ones that are compiled into display lists. */
593 CALL_DrawElementsBaseVertex(ctx
->CurrentServerDispatch
,
594 (mode
, count
, type
, indices
, basevertex
));
595 } else if (index_bounds_valid
) {
596 CALL_DrawRangeElements(ctx
->CurrentServerDispatch
,
597 (mode
, min_index
, max_index
, count
, type
, indices
));
599 CALL_DrawElements(ctx
->CurrentServerDispatch
, (mode
, count
, type
, indices
));
601 } else if (index_bounds_valid
&& instance_count
== 1 && baseinstance
== 0) {
602 CALL_DrawRangeElementsBaseVertex(ctx
->CurrentServerDispatch
,
603 (mode
, min_index
, max_index
, count
,
604 type
, indices
, basevertex
));
606 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx
->CurrentServerDispatch
,
607 (mode
, count
, type
, indices
,
608 instance_count
, basevertex
,
613 struct marshal_cmd_MultiDrawElementsBaseVertex
615 struct marshal_cmd_base cmd_base
;
616 bool has_base_vertex
;
620 GLuint non_vbo_attrib_mask
;
621 struct gl_buffer_object
*index_buffer
;
625 _mesa_unmarshal_MultiDrawElementsBaseVertex(struct gl_context
*ctx
,
626 const struct marshal_cmd_MultiDrawElementsBaseVertex
*cmd
)
628 const GLenum mode
= cmd
->mode
;
629 const GLenum type
= cmd
->type
;
630 const GLsizei draw_count
= cmd
->draw_count
;
631 const GLuint non_vbo_attrib_mask
= cmd
->non_vbo_attrib_mask
;
632 struct gl_buffer_object
*index_buffer
= cmd
->index_buffer
;
633 const bool has_base_vertex
= cmd
->has_base_vertex
;
635 const char *variable_data
= (const char *)(cmd
+ 1);
636 const GLsizei
*count
= (GLsizei
*)variable_data
;
637 variable_data
+= sizeof(GLsizei
) * draw_count
;
638 const GLvoid
*const *indices
= (const GLvoid
*const *)variable_data
;
639 variable_data
+= sizeof(const GLvoid
*const *) * draw_count
;
640 const GLsizei
*basevertex
= NULL
;
641 if (has_base_vertex
) {
642 basevertex
= (GLsizei
*)variable_data
;
643 variable_data
+= sizeof(GLsizei
) * draw_count
;
645 const struct glthread_attrib_binding
*attribs
=
646 (const struct glthread_attrib_binding
*)variable_data
;
648 /* Bind uploaded buffers if needed. */
649 if (non_vbo_attrib_mask
) {
650 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
654 _mesa_InternalBindElementBuffer(ctx
, index_buffer
);
658 if (has_base_vertex
) {
659 CALL_MultiDrawElementsBaseVertex(ctx
->CurrentServerDispatch
,
660 (mode
, count
, type
, indices
, draw_count
,
663 CALL_MultiDrawElementsEXT(ctx
->CurrentServerDispatch
,
664 (mode
, count
, type
, indices
, draw_count
));
667 /* Restore states. */
669 _mesa_InternalBindElementBuffer(ctx
, NULL
);
671 if (non_vbo_attrib_mask
) {
672 _mesa_InternalBindVertexBuffers(ctx
, attribs
, non_vbo_attrib_mask
,
677 static ALWAYS_INLINE
void
678 multi_draw_elements_async(struct gl_context
*ctx
, GLenum mode
,
679 const GLsizei
*count
, GLenum type
,
680 const GLvoid
*const *indices
, GLsizei draw_count
,
681 const GLsizei
*basevertex
,
682 struct gl_buffer_object
*index_buffer
,
683 unsigned non_vbo_attrib_mask
,
684 const struct glthread_attrib_binding
*attribs
)
686 int count_size
= sizeof(GLsizei
) * draw_count
;
687 int indices_size
= sizeof(indices
[0]) * draw_count
;
688 int basevertex_size
= basevertex
? sizeof(GLsizei
) * draw_count
: 0;
689 int attribs_size
= util_bitcount(non_vbo_attrib_mask
) * sizeof(attribs
[0]);
690 int cmd_size
= sizeof(struct marshal_cmd_MultiDrawElementsBaseVertex
) +
691 count_size
+ indices_size
+ basevertex_size
+ attribs_size
;
692 struct marshal_cmd_MultiDrawElementsBaseVertex
*cmd
;
694 cmd
= _mesa_glthread_allocate_command(ctx
, DISPATCH_CMD_MultiDrawElementsBaseVertex
, cmd_size
);
697 cmd
->draw_count
= draw_count
;
698 cmd
->non_vbo_attrib_mask
= non_vbo_attrib_mask
;
699 cmd
->index_buffer
= index_buffer
;
700 cmd
->has_base_vertex
= basevertex
!= NULL
;
702 char *variable_data
= (char*)(cmd
+ 1);
703 memcpy(variable_data
, count
, count_size
);
704 variable_data
+= count_size
;
705 memcpy(variable_data
, indices
, indices_size
);
706 variable_data
+= indices_size
;
709 memcpy(variable_data
, basevertex
, basevertex_size
);
710 variable_data
+= basevertex_size
;
713 if (non_vbo_attrib_mask
)
714 memcpy(variable_data
, attribs
, attribs_size
);
718 _mesa_marshal_MultiDrawElementsBaseVertex(GLenum mode
, const GLsizei
*count
,
720 const GLvoid
*const *indices
,
722 const GLsizei
*basevertex
)
724 GET_CURRENT_CONTEXT(ctx
);
726 struct glthread_vao
*vao
= ctx
->GLThread
.CurrentVAO
;
727 unsigned non_vbo_attrib_mask
= vao
->UserPointerMask
& vao
->Enabled
;
728 bool has_user_indices
= vao
->CurrentElementBufferName
== 0;
730 if (ctx
->GLThread
.inside_dlist
)
733 /* Fast path when nothing needs to be done. */
734 if (draw_count
>= 0 &&
735 (ctx
->API
== API_OPENGL_CORE
||
736 !is_index_type_valid(type
) ||
737 (!non_vbo_attrib_mask
&& !has_user_indices
))) {
738 multi_draw_elements_async(ctx
, mode
, count
, type
, indices
, draw_count
,
739 basevertex
, 0, 0, NULL
);
743 bool need_index_bounds
= non_vbo_attrib_mask
& ~vao
->NonZeroDivisorMask
;
745 /* If the draw count is too high or negative, the queue can't be used.
747 * Sync if indices come from a buffer and vertices come from memory
748 * and index bounds are not valid. We would have to map the indices
749 * to compute the index bounds, and for that we would have to sync anyway.
751 if (!ctx
->GLThread
.SupportsNonVBOUploads
||
752 draw_count
< 0 || draw_count
> MARSHAL_MAX_CMD_SIZE
/ 32 ||
753 (need_index_bounds
&& !has_user_indices
))
756 unsigned index_size
= get_index_size(type
);
757 unsigned min_index
= ~0;
758 unsigned max_index
= 0;
759 unsigned total_count
= 0;
760 unsigned num_vertices
= 0;
762 /* This is always true if there is per-vertex data that needs to be
765 if (need_index_bounds
) {
766 /* Compute the index bounds. */
767 for (unsigned i
= 0; i
< draw_count
; i
++) {
768 GLsizei vertex_count
= count
[i
];
770 if (vertex_count
< 0) {
771 /* Just call the driver to set the error. */
772 multi_draw_elements_async(ctx
, mode
, count
, type
, indices
, draw_count
,
773 basevertex
, 0, 0, NULL
);
776 if (vertex_count
== 0)
779 unsigned min
= ~0, max
= 0;
780 vbo_get_minmax_index_mapped(vertex_count
, index_size
,
781 ctx
->GLThread
._RestartIndex
[index_size
- 1],
782 ctx
->GLThread
._PrimitiveRestart
, indices
[i
],
785 min
+= basevertex
[i
];
786 max
+= basevertex
[i
];
788 min_index
= MIN2(min_index
, min
);
789 max_index
= MAX2(max_index
, max
);
790 total_count
+= vertex_count
;
793 num_vertices
= max_index
+ 1 - min_index
;
795 if (total_count
== 0 || num_vertices
== 0) {
796 /* Nothing to do, but call the driver to set possible GL errors. */
797 multi_draw_elements_async(ctx
, mode
, count
, type
, indices
, draw_count
,
798 basevertex
, 0, 0, NULL
);
802 /* If there is too much data to upload, sync and let the driver unroll
804 if (util_is_vbo_upload_ratio_too_large(total_count
, num_vertices
))
806 } else if (has_user_indices
) {
807 /* Only compute total_count for the upload of indices. */
808 for (unsigned i
= 0; i
< draw_count
; i
++) {
809 GLsizei vertex_count
= count
[i
];
811 if (vertex_count
< 0) {
812 /* Just call the driver to set the error. */
813 multi_draw_elements_async(ctx
, mode
, count
, type
, indices
, draw_count
,
814 basevertex
, 0, 0, NULL
);
817 if (vertex_count
== 0)
820 total_count
+= vertex_count
;
823 if (total_count
== 0) {
824 /* Nothing to do, but call the driver to set possible GL errors. */
825 multi_draw_elements_async(ctx
, mode
, count
, type
, indices
, draw_count
,
826 basevertex
, 0, 0, NULL
);
831 /* Upload vertices. */
832 struct glthread_attrib_binding attribs
[VERT_ATTRIB_MAX
];
833 if (non_vbo_attrib_mask
&&
834 !upload_vertices(ctx
, non_vbo_attrib_mask
, min_index
, num_vertices
,
838 /* Upload indices. */
839 struct gl_buffer_object
*index_buffer
= NULL
;
840 if (has_user_indices
) {
841 const GLvoid
**out_indices
= alloca(sizeof(indices
[0]) * draw_count
);
843 index_buffer
= upload_multi_indices(ctx
, total_count
, index_size
,
844 draw_count
, count
, indices
,
846 indices
= out_indices
;
849 /* Draw asynchronously. */
850 multi_draw_elements_async(ctx
, mode
, count
, type
, indices
, draw_count
,
851 basevertex
, index_buffer
, non_vbo_attrib_mask
,
856 _mesa_glthread_finish_before(ctx
, "DrawElements");
859 CALL_MultiDrawElementsBaseVertex(ctx
->CurrentServerDispatch
,
860 (mode
, count
, type
, indices
, draw_count
,
863 CALL_MultiDrawElementsEXT(ctx
->CurrentServerDispatch
,
864 (mode
, count
, type
, indices
, draw_count
));
869 _mesa_marshal_DrawArrays(GLenum mode
, GLint first
, GLsizei count
)
871 draw_arrays(mode
, first
, count
, 1, 0, true);
875 _mesa_marshal_DrawArraysInstancedARB(GLenum mode
, GLint first
, GLsizei count
,
876 GLsizei instance_count
)
878 draw_arrays(mode
, first
, count
, instance_count
, 0, false);
882 _mesa_marshal_DrawArraysInstancedBaseInstance(GLenum mode
, GLint first
,
883 GLsizei count
, GLsizei instance_count
,
886 draw_arrays(mode
, first
, count
, instance_count
, baseinstance
, false);
890 _mesa_marshal_DrawElements(GLenum mode
, GLsizei count
, GLenum type
,
891 const GLvoid
*indices
)
893 draw_elements(mode
, count
, type
, indices
, 1, 0, 0, false, 0, 0, true);
897 _mesa_marshal_DrawRangeElements(GLenum mode
, GLuint start
, GLuint end
,
898 GLsizei count
, GLenum type
,
899 const GLvoid
*indices
)
901 draw_elements(mode
, count
, type
, indices
, 1, 0, 0, true, start
, end
, true);
905 _mesa_marshal_DrawElementsInstancedARB(GLenum mode
, GLsizei count
, GLenum type
,
906 const GLvoid
*indices
, GLsizei instance_count
)
908 draw_elements(mode
, count
, type
, indices
, instance_count
, 0, 0, false, 0, 0, false);
912 _mesa_marshal_DrawElementsBaseVertex(GLenum mode
, GLsizei count
, GLenum type
,
913 const GLvoid
*indices
, GLint basevertex
)
915 draw_elements(mode
, count
, type
, indices
, 1, basevertex
, 0, false, 0, 0, true);
919 _mesa_marshal_DrawRangeElementsBaseVertex(GLenum mode
, GLuint start
, GLuint end
,
920 GLsizei count
, GLenum type
,
921 const GLvoid
*indices
, GLint basevertex
)
923 draw_elements(mode
, count
, type
, indices
, 1, basevertex
, 0, true, start
, end
, false);
927 _mesa_marshal_DrawElementsInstancedBaseVertex(GLenum mode
, GLsizei count
,
928 GLenum type
, const GLvoid
*indices
,
929 GLsizei instance_count
, GLint basevertex
)
931 draw_elements(mode
, count
, type
, indices
, instance_count
, basevertex
, 0, false, 0, 0, false);
935 _mesa_marshal_DrawElementsInstancedBaseInstance(GLenum mode
, GLsizei count
,
936 GLenum type
, const GLvoid
*indices
,
937 GLsizei instance_count
, GLuint baseinstance
)
939 draw_elements(mode
, count
, type
, indices
, instance_count
, 0, baseinstance
, false, 0, 0, false);
943 _mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode
, GLsizei count
,
944 GLenum type
, const GLvoid
*indices
,
945 GLsizei instance_count
, GLint basevertex
,
948 draw_elements(mode
, count
, type
, indices
, instance_count
, basevertex
, baseinstance
, false, 0, 0, false);
952 _mesa_marshal_MultiDrawElementsEXT(GLenum mode
, const GLsizei
*count
,
953 GLenum type
, const GLvoid
*const *indices
,
956 _mesa_marshal_MultiDrawElementsBaseVertex(mode
, count
, type
, indices
,
961 _mesa_unmarshal_DrawArrays(struct gl_context
*ctx
, const struct marshal_cmd_DrawArrays
*cmd
)
963 unreachable("never used - DrawArraysInstancedBaseInstance is used instead");
967 _mesa_unmarshal_DrawArraysInstancedARB(struct gl_context
*ctx
, const struct marshal_cmd_DrawArraysInstancedARB
*cmd
)
969 unreachable("never used - DrawArraysInstancedBaseInstance is used instead");
973 _mesa_unmarshal_DrawElements(struct gl_context
*ctx
, const struct marshal_cmd_DrawElements
*cmd
)
975 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
979 _mesa_unmarshal_DrawRangeElements(struct gl_context
*ctx
, const struct marshal_cmd_DrawRangeElements
*cmd
)
981 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
985 _mesa_unmarshal_DrawElementsInstancedARB(struct gl_context
*ctx
, const struct marshal_cmd_DrawElementsInstancedARB
*cmd
)
987 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
991 _mesa_unmarshal_DrawElementsBaseVertex(struct gl_context
*ctx
, const struct marshal_cmd_DrawElementsBaseVertex
*cmd
)
993 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
997 _mesa_unmarshal_DrawRangeElementsBaseVertex(struct gl_context
*ctx
, const struct marshal_cmd_DrawRangeElementsBaseVertex
*cmd
)
999 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1003 _mesa_unmarshal_DrawElementsInstancedBaseVertex(struct gl_context
*ctx
, const struct marshal_cmd_DrawElementsInstancedBaseVertex
*cmd
)
1005 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1009 _mesa_unmarshal_DrawElementsInstancedBaseInstance(struct gl_context
*ctx
, const struct marshal_cmd_DrawElementsInstancedBaseInstance
*cmd
)
1011 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead");
1015 _mesa_unmarshal_MultiDrawElementsEXT(struct gl_context
*ctx
, const struct marshal_cmd_MultiDrawElementsEXT
*cmd
)
1017 unreachable("never used - MultiDrawElementsBaseVertex is used instead");