gallium/u_threaded: remove 16 bytes from tc_batch
[mesa.git] / src / gallium / auxiliary / util / u_threaded_context.c
1 /**************************************************************************
2 *
3 * Copyright 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "util/u_threaded_context.h"
28 #include "util/u_cpu_detect.h"
29 #include "util/u_format.h"
30 #include "util/u_inlines.h"
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
33
34 /* 0 = disabled, 1 = assertions, 2 = printfs */
35 #define TC_DEBUG 0
36
37 #if TC_DEBUG >= 1
38 #define tc_assert assert
39 #else
40 #define tc_assert(x)
41 #endif
42
43 #if TC_DEBUG >= 2
44 #define tc_printf printf
45 #define tc_asprintf asprintf
46 #define tc_strcmp strcmp
47 #else
48 #define tc_printf(...)
49 #define tc_asprintf(...) 0
50 #define tc_strcmp(...) 0
51 #endif
52
53 #define TC_SENTINEL 0x5ca1ab1e
54
55 enum tc_call_id {
56 #define CALL(name) TC_CALL_##name,
57 #include "u_threaded_context_calls.h"
58 #undef CALL
59 TC_NUM_CALLS,
60 };
61
62 typedef void (*tc_execute)(struct pipe_context *pipe, union tc_payload *payload);
63
64 static const tc_execute execute_func[TC_NUM_CALLS];
65
66 static void
67 tc_batch_check(struct tc_batch *batch)
68 {
69 tc_assert(batch->sentinel == TC_SENTINEL);
70 tc_assert(batch->num_total_call_slots <= TC_CALLS_PER_BATCH);
71 }
72
73 static void
74 tc_debug_check(struct threaded_context *tc)
75 {
76 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
77 tc_batch_check(&tc->batch_slots[i]);
78 tc_assert(tc->batch_slots[i].pipe == tc->pipe);
79 }
80 }
81
82 static void
83 tc_batch_execute(void *job, int thread_index)
84 {
85 struct tc_batch *batch = job;
86 struct pipe_context *pipe = batch->pipe;
87 struct tc_call *last = &batch->call[batch->num_total_call_slots];
88
89 tc_batch_check(batch);
90
91 for (struct tc_call *iter = batch->call; iter != last;
92 iter += iter->num_call_slots) {
93 tc_assert(iter->sentinel == TC_SENTINEL);
94 execute_func[iter->call_id](pipe, &iter->payload);
95 }
96
97 tc_batch_check(batch);
98 batch->num_total_call_slots = 0;
99 }
100
101 static void
102 tc_batch_flush(struct threaded_context *tc)
103 {
104 struct tc_batch *next = &tc->batch_slots[tc->next];
105
106 tc_assert(next->num_total_call_slots != 0);
107 tc_batch_check(next);
108 tc_debug_check(tc);
109 p_atomic_add(&tc->num_offloaded_slots, next->num_total_call_slots);
110
111 util_queue_add_job(&tc->queue, next, &next->fence, tc_batch_execute,
112 NULL);
113 tc->last = tc->next;
114 tc->next = (tc->next + 1) % TC_MAX_BATCHES;
115 }
116
117 /* This is the function that adds variable-sized calls into the current
118 * batch. It also flushes the batch if there is not enough space there.
119 * All other higher-level "add" functions use it.
120 */
121 static union tc_payload *
122 tc_add_sized_call(struct threaded_context *tc, enum tc_call_id id,
123 unsigned payload_size)
124 {
125 struct tc_batch *next = &tc->batch_slots[tc->next];
126 unsigned total_size = offsetof(struct tc_call, payload) + payload_size;
127 unsigned num_call_slots = DIV_ROUND_UP(total_size, sizeof(struct tc_call));
128
129 tc_debug_check(tc);
130
131 if (unlikely(next->num_total_call_slots + num_call_slots > TC_CALLS_PER_BATCH)) {
132 tc_batch_flush(tc);
133 next = &tc->batch_slots[tc->next];
134 tc_assert(next->num_total_call_slots == 0);
135 }
136
137 tc_assert(util_queue_fence_is_signalled(&next->fence));
138
139 struct tc_call *call = &next->call[next->num_total_call_slots];
140 next->num_total_call_slots += num_call_slots;
141
142 call->sentinel = TC_SENTINEL;
143 call->call_id = id;
144 call->num_call_slots = num_call_slots;
145
146 tc_debug_check(tc);
147 return &call->payload;
148 }
149
150 #define tc_add_struct_typed_call(tc, execute, type) \
151 ((struct type*)tc_add_sized_call(tc, execute, sizeof(struct type)))
152
153 #define tc_add_slot_based_call(tc, execute, type, num_slots) \
154 ((struct type*)tc_add_sized_call(tc, execute, \
155 sizeof(struct type) + \
156 sizeof(((struct type*)NULL)->slot[0]) * \
157 (num_slots)))
158
159 static union tc_payload *
160 tc_add_small_call(struct threaded_context *tc, enum tc_call_id id)
161 {
162 return tc_add_sized_call(tc, id, 0);
163 }
164
165 static void
166 _tc_sync(struct threaded_context *tc, const char *info, const char *func)
167 {
168 struct tc_batch *last = &tc->batch_slots[tc->last];
169 struct tc_batch *next = &tc->batch_slots[tc->next];
170 bool synced = false;
171
172 tc_debug_check(tc);
173
174 /* Only wait for queued calls... */
175 if (!util_queue_fence_is_signalled(&last->fence)) {
176 util_queue_fence_wait(&last->fence);
177 synced = true;
178 }
179
180 tc_debug_check(tc);
181
182 /* .. and execute unflushed calls directly. */
183 if (next->num_total_call_slots) {
184 p_atomic_add(&tc->num_direct_slots, next->num_total_call_slots);
185 tc_batch_execute(next, 0);
186 synced = true;
187 }
188
189 if (synced) {
190 p_atomic_inc(&tc->num_syncs);
191
192 if (tc_strcmp(func, "tc_destroy") != 0)
193 tc_printf("sync %s %s\n", func, info);
194 }
195
196 tc_debug_check(tc);
197 }
198
199 #define tc_sync(tc) _tc_sync(tc, "", __func__)
200 #define tc_sync_msg(tc, info) _tc_sync(tc, info, __func__)
201
202 static void
203 tc_set_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
204 {
205 *dst = NULL;
206 pipe_resource_reference(dst, src);
207 }
208
209 void
210 threaded_resource_init(struct pipe_resource *res)
211 {
212 struct threaded_resource *tres = threaded_resource(res);
213
214 tres->latest = &tres->b;
215 util_range_init(&tres->valid_buffer_range);
216 tres->base_valid_buffer_range = &tres->valid_buffer_range;
217 tres->is_shared = false;
218 tres->is_user_ptr = false;
219 }
220
221 void
222 threaded_resource_deinit(struct pipe_resource *res)
223 {
224 struct threaded_resource *tres = threaded_resource(res);
225
226 if (tres->latest != &tres->b)
227 pipe_resource_reference(&tres->latest, NULL);
228 util_range_destroy(&tres->valid_buffer_range);
229 }
230
231 struct pipe_context *
232 threaded_context_unwrap_sync(struct pipe_context *pipe)
233 {
234 if (!pipe || !pipe->priv)
235 return pipe;
236
237 tc_sync(threaded_context(pipe));
238 return (struct pipe_context*)pipe->priv;
239 }
240
241
242 /********************************************************************
243 * simple functions
244 */
245
246 #define TC_FUNC1(func, m_payload, qualifier, type, deref, deref2) \
247 static void \
248 tc_call_##func(struct pipe_context *pipe, union tc_payload *payload) \
249 { \
250 pipe->func(pipe, deref2((type*)payload)); \
251 } \
252 \
253 static void \
254 tc_##func(struct pipe_context *_pipe, qualifier type deref param) \
255 { \
256 struct threaded_context *tc = threaded_context(_pipe); \
257 type *p = (type*)tc_add_sized_call(tc, TC_CALL_##func, sizeof(type)); \
258 *p = deref(param); \
259 }
260
261 TC_FUNC1(set_active_query_state, flags, , boolean, , *)
262
263 TC_FUNC1(set_blend_color, blend_color, const, struct pipe_blend_color, *, )
264 TC_FUNC1(set_stencil_ref, stencil_ref, const, struct pipe_stencil_ref, *, )
265 TC_FUNC1(set_clip_state, clip_state, const, struct pipe_clip_state, *, )
266 TC_FUNC1(set_sample_mask, sample_mask, , unsigned, , *)
267 TC_FUNC1(set_min_samples, min_samples, , unsigned, , *)
268 TC_FUNC1(set_polygon_stipple, polygon_stipple, const, struct pipe_poly_stipple, *, )
269
270 TC_FUNC1(texture_barrier, flags, , unsigned, , *)
271 TC_FUNC1(memory_barrier, flags, , unsigned, , *)
272
273
274 /********************************************************************
275 * queries
276 */
277
278 static struct pipe_query *
279 tc_create_query(struct pipe_context *_pipe, unsigned query_type,
280 unsigned index)
281 {
282 struct threaded_context *tc = threaded_context(_pipe);
283 struct pipe_context *pipe = tc->pipe;
284
285 return pipe->create_query(pipe, query_type, index);
286 }
287
288 static struct pipe_query *
289 tc_create_batch_query(struct pipe_context *_pipe, unsigned num_queries,
290 unsigned *query_types)
291 {
292 struct threaded_context *tc = threaded_context(_pipe);
293 struct pipe_context *pipe = tc->pipe;
294
295 return pipe->create_batch_query(pipe, num_queries, query_types);
296 }
297
298 static void
299 tc_call_destroy_query(struct pipe_context *pipe, union tc_payload *payload)
300 {
301 pipe->destroy_query(pipe, payload->query);
302 }
303
304 static void
305 tc_destroy_query(struct pipe_context *_pipe, struct pipe_query *query)
306 {
307 struct threaded_context *tc = threaded_context(_pipe);
308 struct threaded_query *tq = threaded_query(query);
309
310 if (tq->head_unflushed.next)
311 LIST_DEL(&tq->head_unflushed);
312
313 tc_add_small_call(tc, TC_CALL_destroy_query)->query = query;
314 }
315
316 static void
317 tc_call_begin_query(struct pipe_context *pipe, union tc_payload *payload)
318 {
319 pipe->begin_query(pipe, payload->query);
320 }
321
322 static boolean
323 tc_begin_query(struct pipe_context *_pipe, struct pipe_query *query)
324 {
325 struct threaded_context *tc = threaded_context(_pipe);
326 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_begin_query);
327
328 payload->query = query;
329 return true; /* we don't care about the return value for this call */
330 }
331
332 static void
333 tc_call_end_query(struct pipe_context *pipe, union tc_payload *payload)
334 {
335 pipe->end_query(pipe, payload->query);
336 }
337
338 static bool
339 tc_end_query(struct pipe_context *_pipe, struct pipe_query *query)
340 {
341 struct threaded_context *tc = threaded_context(_pipe);
342 struct threaded_query *tq = threaded_query(query);
343 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_end_query);
344
345 payload->query = query;
346
347 tq->flushed = false;
348 if (!tq->head_unflushed.next)
349 LIST_ADD(&tq->head_unflushed, &tc->unflushed_queries);
350
351 return true; /* we don't care about the return value for this call */
352 }
353
354 static boolean
355 tc_get_query_result(struct pipe_context *_pipe,
356 struct pipe_query *query, boolean wait,
357 union pipe_query_result *result)
358 {
359 struct threaded_context *tc = threaded_context(_pipe);
360 struct threaded_query *tq = threaded_query(query);
361 struct pipe_context *pipe = tc->pipe;
362
363 if (!tq->flushed)
364 tc_sync_msg(tc, wait ? "wait" : "nowait");
365
366 bool success = pipe->get_query_result(pipe, query, wait, result);
367
368 if (success) {
369 tq->flushed = true;
370 if (tq->head_unflushed.next)
371 LIST_DEL(&tq->head_unflushed);
372 }
373 return success;
374 }
375
376 struct tc_query_result_resource {
377 struct pipe_query *query;
378 boolean wait;
379 enum pipe_query_value_type result_type;
380 int index;
381 struct pipe_resource *resource;
382 unsigned offset;
383 };
384
385 static void
386 tc_call_get_query_result_resource(struct pipe_context *pipe,
387 union tc_payload *payload)
388 {
389 struct tc_query_result_resource *p = (struct tc_query_result_resource *)payload;
390
391 pipe->get_query_result_resource(pipe, p->query, p->wait, p->result_type,
392 p->index, p->resource, p->offset);
393 pipe_resource_reference(&p->resource, NULL);
394 }
395
396 static void
397 tc_get_query_result_resource(struct pipe_context *_pipe,
398 struct pipe_query *query, boolean wait,
399 enum pipe_query_value_type result_type, int index,
400 struct pipe_resource *resource, unsigned offset)
401 {
402 struct threaded_context *tc = threaded_context(_pipe);
403 struct tc_query_result_resource *p =
404 tc_add_struct_typed_call(tc, TC_CALL_get_query_result_resource,
405 tc_query_result_resource);
406
407 p->query = query;
408 p->wait = wait;
409 p->result_type = result_type;
410 p->index = index;
411 tc_set_resource_reference(&p->resource, resource);
412 p->offset = offset;
413 }
414
415 struct tc_render_condition {
416 struct pipe_query *query;
417 bool condition;
418 unsigned mode;
419 };
420
421 static void
422 tc_call_render_condition(struct pipe_context *pipe, union tc_payload *payload)
423 {
424 struct tc_render_condition *p = (struct tc_render_condition *)payload;
425 pipe->render_condition(pipe, p->query, p->condition, p->mode);
426 }
427
428 static void
429 tc_render_condition(struct pipe_context *_pipe,
430 struct pipe_query *query, boolean condition,
431 uint mode)
432 {
433 struct threaded_context *tc = threaded_context(_pipe);
434 struct tc_render_condition *p =
435 tc_add_struct_typed_call(tc, TC_CALL_render_condition, tc_render_condition);
436
437 p->query = query;
438 p->condition = condition;
439 p->mode = mode;
440 }
441
442
443 /********************************************************************
444 * constant (immutable) states
445 */
446
447 #define TC_CSO_CREATE(name, sname) \
448 static void * \
449 tc_create_##name##_state(struct pipe_context *_pipe, \
450 const struct pipe_##sname##_state *state) \
451 { \
452 struct pipe_context *pipe = threaded_context(_pipe)->pipe; \
453 return pipe->create_##name##_state(pipe, state); \
454 }
455
456 #define TC_CSO_BIND(name) TC_FUNC1(bind_##name##_state, cso, , void *, , *)
457 #define TC_CSO_DELETE(name) TC_FUNC1(delete_##name##_state, cso, , void *, , *)
458
459 #define TC_CSO_WHOLE2(name, sname) \
460 TC_CSO_CREATE(name, sname) \
461 TC_CSO_BIND(name) \
462 TC_CSO_DELETE(name)
463
464 #define TC_CSO_WHOLE(name) TC_CSO_WHOLE2(name, name)
465
466 TC_CSO_WHOLE(blend)
467 TC_CSO_WHOLE(rasterizer)
468 TC_CSO_WHOLE(depth_stencil_alpha)
469 TC_CSO_WHOLE(compute)
470 TC_CSO_WHOLE2(fs, shader)
471 TC_CSO_WHOLE2(vs, shader)
472 TC_CSO_WHOLE2(gs, shader)
473 TC_CSO_WHOLE2(tcs, shader)
474 TC_CSO_WHOLE2(tes, shader)
475 TC_CSO_CREATE(sampler, sampler)
476 TC_CSO_DELETE(sampler)
477 TC_CSO_BIND(vertex_elements)
478 TC_CSO_DELETE(vertex_elements)
479
480 static void *
481 tc_create_vertex_elements_state(struct pipe_context *_pipe, unsigned count,
482 const struct pipe_vertex_element *elems)
483 {
484 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
485
486 return pipe->create_vertex_elements_state(pipe, count, elems);
487 }
488
489 struct tc_sampler_states {
490 ubyte shader, start, count;
491 void *slot[0]; /* more will be allocated if needed */
492 };
493
494 static void
495 tc_call_bind_sampler_states(struct pipe_context *pipe, union tc_payload *payload)
496 {
497 struct tc_sampler_states *p = (struct tc_sampler_states *)payload;
498 pipe->bind_sampler_states(pipe, p->shader, p->start, p->count, p->slot);
499 }
500
501 static void
502 tc_bind_sampler_states(struct pipe_context *_pipe,
503 enum pipe_shader_type shader,
504 unsigned start, unsigned count, void **states)
505 {
506 if (!count)
507 return;
508
509 struct threaded_context *tc = threaded_context(_pipe);
510 struct tc_sampler_states *p =
511 tc_add_slot_based_call(tc, TC_CALL_bind_sampler_states, tc_sampler_states, count);
512
513 p->shader = shader;
514 p->start = start;
515 p->count = count;
516 memcpy(p->slot, states, count * sizeof(states[0]));
517 }
518
519
520 /********************************************************************
521 * immediate states
522 */
523
524 static void
525 tc_call_set_framebuffer_state(struct pipe_context *pipe, union tc_payload *payload)
526 {
527 struct pipe_framebuffer_state *p = (struct pipe_framebuffer_state *)payload;
528
529 pipe->set_framebuffer_state(pipe, p);
530
531 unsigned nr_cbufs = p->nr_cbufs;
532 for (unsigned i = 0; i < nr_cbufs; i++)
533 pipe_surface_reference(&p->cbufs[i], NULL);
534 pipe_surface_reference(&p->zsbuf, NULL);
535 }
536
537 static void
538 tc_set_framebuffer_state(struct pipe_context *_pipe,
539 const struct pipe_framebuffer_state *fb)
540 {
541 struct threaded_context *tc = threaded_context(_pipe);
542 struct pipe_framebuffer_state *p =
543 tc_add_struct_typed_call(tc, TC_CALL_set_framebuffer_state,
544 pipe_framebuffer_state);
545 unsigned nr_cbufs = fb->nr_cbufs;
546
547 p->width = fb->width;
548 p->height = fb->height;
549 p->samples = fb->samples;
550 p->layers = fb->layers;
551 p->nr_cbufs = nr_cbufs;
552
553 for (unsigned i = 0; i < nr_cbufs; i++) {
554 p->cbufs[i] = NULL;
555 pipe_surface_reference(&p->cbufs[i], fb->cbufs[i]);
556 }
557 p->zsbuf = NULL;
558 pipe_surface_reference(&p->zsbuf, fb->zsbuf);
559 }
560
561 static void
562 tc_call_set_tess_state(struct pipe_context *pipe, union tc_payload *payload)
563 {
564 float *p = (float*)payload;
565 pipe->set_tess_state(pipe, p, p + 4);
566 }
567
568 static void
569 tc_set_tess_state(struct pipe_context *_pipe,
570 const float default_outer_level[4],
571 const float default_inner_level[2])
572 {
573 struct threaded_context *tc = threaded_context(_pipe);
574 float *p = (float*)tc_add_sized_call(tc, TC_CALL_set_tess_state,
575 sizeof(float) * 6);
576
577 memcpy(p, default_outer_level, 4 * sizeof(float));
578 memcpy(p + 4, default_inner_level, 2 * sizeof(float));
579 }
580
581 struct tc_constant_buffer {
582 ubyte shader, index;
583 struct pipe_constant_buffer cb;
584 };
585
586 static void
587 tc_call_set_constant_buffer(struct pipe_context *pipe, union tc_payload *payload)
588 {
589 struct tc_constant_buffer *p = (struct tc_constant_buffer *)payload;
590
591 pipe->set_constant_buffer(pipe,
592 p->shader,
593 p->index,
594 &p->cb);
595 pipe_resource_reference(&p->cb.buffer, NULL);
596 }
597
598 static void
599 tc_set_constant_buffer(struct pipe_context *_pipe,
600 uint shader, uint index,
601 const struct pipe_constant_buffer *cb)
602 {
603 struct threaded_context *tc = threaded_context(_pipe);
604 struct pipe_resource *buffer = NULL;
605 unsigned offset;
606
607 /* This must be done before adding set_constant_buffer, because it could
608 * generate e.g. transfer_unmap and flush partially-uninitialized
609 * set_constant_buffer to the driver if it was done afterwards.
610 */
611 if (cb && cb->user_buffer) {
612 u_upload_data(tc->base.const_uploader, 0, cb->buffer_size, 64,
613 cb->user_buffer, &offset, &buffer);
614 }
615
616 struct tc_constant_buffer *p =
617 tc_add_struct_typed_call(tc, TC_CALL_set_constant_buffer,
618 tc_constant_buffer);
619 p->shader = shader;
620 p->index = index;
621
622 if (cb) {
623 if (cb->user_buffer) {
624 p->cb.buffer_size = cb->buffer_size;
625 p->cb.user_buffer = NULL;
626 p->cb.buffer_offset = offset;
627 p->cb.buffer = buffer;
628 } else {
629 tc_set_resource_reference(&p->cb.buffer,
630 cb->buffer);
631 memcpy(&p->cb, cb, sizeof(*cb));
632 }
633 } else {
634 memset(&p->cb, 0, sizeof(*cb));
635 }
636 }
637
638 struct tc_scissors {
639 ubyte start, count;
640 struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
641 };
642
643 static void
644 tc_call_set_scissor_states(struct pipe_context *pipe, union tc_payload *payload)
645 {
646 struct tc_scissors *p = (struct tc_scissors *)payload;
647 pipe->set_scissor_states(pipe, p->start, p->count, p->slot);
648 }
649
650 static void
651 tc_set_scissor_states(struct pipe_context *_pipe,
652 unsigned start, unsigned count,
653 const struct pipe_scissor_state *states)
654 {
655 struct threaded_context *tc = threaded_context(_pipe);
656 struct tc_scissors *p =
657 tc_add_slot_based_call(tc, TC_CALL_set_scissor_states, tc_scissors, count);
658
659 p->start = start;
660 p->count = count;
661 memcpy(&p->slot, states, count * sizeof(states[0]));
662 }
663
664 struct tc_viewports {
665 ubyte start, count;
666 struct pipe_viewport_state slot[0]; /* more will be allocated if needed */
667 };
668
669 static void
670 tc_call_set_viewport_states(struct pipe_context *pipe, union tc_payload *payload)
671 {
672 struct tc_viewports *p = (struct tc_viewports *)payload;
673 pipe->set_viewport_states(pipe, p->start, p->count, p->slot);
674 }
675
676 static void
677 tc_set_viewport_states(struct pipe_context *_pipe,
678 unsigned start, unsigned count,
679 const struct pipe_viewport_state *states)
680 {
681 if (!count)
682 return;
683
684 struct threaded_context *tc = threaded_context(_pipe);
685 struct tc_viewports *p =
686 tc_add_slot_based_call(tc, TC_CALL_set_viewport_states, tc_viewports, count);
687
688 p->start = start;
689 p->count = count;
690 memcpy(&p->slot, states, count * sizeof(states[0]));
691 }
692
693 struct tc_window_rects {
694 bool include;
695 ubyte count;
696 struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
697 };
698
699 static void
700 tc_call_set_window_rectangles(struct pipe_context *pipe,
701 union tc_payload *payload)
702 {
703 struct tc_window_rects *p = (struct tc_window_rects *)payload;
704 pipe->set_window_rectangles(pipe, p->include, p->count, p->slot);
705 }
706
707 static void
708 tc_set_window_rectangles(struct pipe_context *_pipe, boolean include,
709 unsigned count,
710 const struct pipe_scissor_state *rects)
711 {
712 struct threaded_context *tc = threaded_context(_pipe);
713 struct tc_window_rects *p =
714 tc_add_slot_based_call(tc, TC_CALL_set_window_rectangles, tc_window_rects, count);
715
716 p->include = include;
717 p->count = count;
718 memcpy(p->slot, rects, count * sizeof(rects[0]));
719 }
720
721 struct tc_sampler_views {
722 ubyte shader, start, count;
723 struct pipe_sampler_view *slot[0]; /* more will be allocated if needed */
724 };
725
726 static void
727 tc_call_set_sampler_views(struct pipe_context *pipe, union tc_payload *payload)
728 {
729 struct tc_sampler_views *p = (struct tc_sampler_views *)payload;
730 unsigned count = p->count;
731
732 pipe->set_sampler_views(pipe, p->shader, p->start, p->count, p->slot);
733 for (unsigned i = 0; i < count; i++)
734 pipe_sampler_view_reference(&p->slot[i], NULL);
735 }
736
737 static void
738 tc_set_sampler_views(struct pipe_context *_pipe,
739 enum pipe_shader_type shader,
740 unsigned start, unsigned count,
741 struct pipe_sampler_view **views)
742 {
743 if (!count)
744 return;
745
746 struct threaded_context *tc = threaded_context(_pipe);
747 struct tc_sampler_views *p =
748 tc_add_slot_based_call(tc, TC_CALL_set_sampler_views, tc_sampler_views, count);
749
750 p->shader = shader;
751 p->start = start;
752 p->count = count;
753
754 if (views) {
755 for (unsigned i = 0; i < count; i++) {
756 p->slot[i] = NULL;
757 pipe_sampler_view_reference(&p->slot[i], views[i]);
758 }
759 } else {
760 memset(p->slot, 0, count * sizeof(views[0]));
761 }
762 }
763
764 struct tc_shader_images {
765 ubyte shader, start, count;
766 bool unbind;
767 struct pipe_image_view slot[0]; /* more will be allocated if needed */
768 };
769
770 static void
771 tc_call_set_shader_images(struct pipe_context *pipe, union tc_payload *payload)
772 {
773 struct tc_shader_images *p = (struct tc_shader_images *)payload;
774 unsigned count = p->count;
775
776 if (p->unbind) {
777 pipe->set_shader_images(pipe, p->shader, p->start, p->count, NULL);
778 return;
779 }
780
781 pipe->set_shader_images(pipe, p->shader, p->start, p->count, p->slot);
782
783 for (unsigned i = 0; i < count; i++)
784 pipe_resource_reference(&p->slot[i].resource, NULL);
785 }
786
787 static void
788 tc_set_shader_images(struct pipe_context *_pipe,
789 enum pipe_shader_type shader,
790 unsigned start, unsigned count,
791 const struct pipe_image_view *images)
792 {
793 if (!count)
794 return;
795
796 struct threaded_context *tc = threaded_context(_pipe);
797 struct tc_shader_images *p =
798 tc_add_slot_based_call(tc, TC_CALL_set_shader_images, tc_shader_images,
799 images ? count : 0);
800
801 p->shader = shader;
802 p->start = start;
803 p->count = count;
804 p->unbind = images == NULL;
805
806 if (images) {
807 for (unsigned i = 0; i < count; i++) {
808 tc_set_resource_reference(&p->slot[i].resource, images[i].resource);
809
810 if (images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
811 images[i].resource &&
812 images[i].resource->target == PIPE_BUFFER) {
813 struct threaded_resource *tres =
814 threaded_resource(images[i].resource);
815
816 util_range_add(&tres->valid_buffer_range, images[i].u.buf.offset,
817 images[i].u.buf.offset + images[i].u.buf.size);
818 }
819 }
820 memcpy(p->slot, images, count * sizeof(images[0]));
821 }
822 }
823
824 struct tc_shader_buffers {
825 ubyte shader, start, count;
826 bool unbind;
827 struct pipe_shader_buffer slot[0]; /* more will be allocated if needed */
828 };
829
830 static void
831 tc_call_set_shader_buffers(struct pipe_context *pipe, union tc_payload *payload)
832 {
833 struct tc_shader_buffers *p = (struct tc_shader_buffers *)payload;
834 unsigned count = p->count;
835
836 if (p->unbind) {
837 pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, NULL);
838 return;
839 }
840
841 pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, p->slot);
842
843 for (unsigned i = 0; i < count; i++)
844 pipe_resource_reference(&p->slot[i].buffer, NULL);
845 }
846
847 static void
848 tc_set_shader_buffers(struct pipe_context *_pipe, unsigned shader,
849 unsigned start, unsigned count,
850 const struct pipe_shader_buffer *buffers)
851 {
852 if (!count)
853 return;
854
855 struct threaded_context *tc = threaded_context(_pipe);
856 struct tc_shader_buffers *p =
857 tc_add_slot_based_call(tc, TC_CALL_set_shader_buffers, tc_shader_buffers,
858 buffers ? count : 0);
859
860 p->shader = shader;
861 p->start = start;
862 p->count = count;
863 p->unbind = buffers == NULL;
864
865 if (buffers) {
866 for (unsigned i = 0; i < count; i++) {
867 struct pipe_shader_buffer *dst = &p->slot[i];
868 const struct pipe_shader_buffer *src = buffers + i;
869
870 tc_set_resource_reference(&dst->buffer, src->buffer);
871 dst->buffer_offset = src->buffer_offset;
872 dst->buffer_size = src->buffer_size;
873
874 if (src->buffer) {
875 struct threaded_resource *tres = threaded_resource(src->buffer);
876
877 util_range_add(&tres->valid_buffer_range, src->buffer_offset,
878 src->buffer_offset + src->buffer_size);
879 }
880 }
881 }
882 }
883
884 struct tc_vertex_buffers {
885 ubyte start, count;
886 bool unbind;
887 struct pipe_vertex_buffer slot[0]; /* more will be allocated if needed */
888 };
889
890 static void
891 tc_call_set_vertex_buffers(struct pipe_context *pipe, union tc_payload *payload)
892 {
893 struct tc_vertex_buffers *p = (struct tc_vertex_buffers *)payload;
894 unsigned count = p->count;
895
896 if (p->unbind) {
897 pipe->set_vertex_buffers(pipe, p->start, count, NULL);
898 return;
899 }
900
901 for (unsigned i = 0; i < count; i++)
902 tc_assert(!p->slot[i].is_user_buffer);
903
904 pipe->set_vertex_buffers(pipe, p->start, count, p->slot);
905 for (unsigned i = 0; i < count; i++)
906 pipe_resource_reference(&p->slot[i].buffer.resource, NULL);
907 }
908
909 static void
910 tc_set_vertex_buffers(struct pipe_context *_pipe,
911 unsigned start, unsigned count,
912 const struct pipe_vertex_buffer *buffers)
913 {
914 struct threaded_context *tc = threaded_context(_pipe);
915
916 if (!count)
917 return;
918
919 if (buffers) {
920 struct tc_vertex_buffers *p =
921 tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, count);
922 p->start = start;
923 p->count = count;
924 p->unbind = false;
925
926 for (unsigned i = 0; i < count; i++) {
927 struct pipe_vertex_buffer *dst = &p->slot[i];
928 const struct pipe_vertex_buffer *src = buffers + i;
929
930 tc_assert(!src->is_user_buffer);
931 dst->stride = src->stride;
932 dst->is_user_buffer = false;
933 tc_set_resource_reference(&dst->buffer.resource,
934 src->buffer.resource);
935 dst->buffer_offset = src->buffer_offset;
936 }
937 } else {
938 struct tc_vertex_buffers *p =
939 tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, 0);
940 p->start = start;
941 p->count = count;
942 p->unbind = true;
943 }
944 }
945
946 struct tc_stream_outputs {
947 unsigned count;
948 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
949 unsigned offsets[PIPE_MAX_SO_BUFFERS];
950 };
951
952 static void
953 tc_call_set_stream_output_targets(struct pipe_context *pipe, union tc_payload *payload)
954 {
955 struct tc_stream_outputs *p = (struct tc_stream_outputs *)payload;
956 unsigned count = p->count;
957
958 pipe->set_stream_output_targets(pipe, count, p->targets, p->offsets);
959 for (unsigned i = 0; i < count; i++)
960 pipe_so_target_reference(&p->targets[i], NULL);
961 }
962
963 static void
964 tc_set_stream_output_targets(struct pipe_context *_pipe,
965 unsigned count,
966 struct pipe_stream_output_target **tgs,
967 const unsigned *offsets)
968 {
969 struct threaded_context *tc = threaded_context(_pipe);
970 struct tc_stream_outputs *p =
971 tc_add_struct_typed_call(tc, TC_CALL_set_stream_output_targets,
972 tc_stream_outputs);
973
974 for (unsigned i = 0; i < count; i++) {
975 p->targets[i] = NULL;
976 pipe_so_target_reference(&p->targets[i], tgs[i]);
977 }
978 p->count = count;
979 memcpy(p->offsets, offsets, count * sizeof(unsigned));
980 }
981
982 static void
983 tc_set_compute_resources(struct pipe_context *_pipe, unsigned start,
984 unsigned count, struct pipe_surface **resources)
985 {
986 struct threaded_context *tc = threaded_context(_pipe);
987 struct pipe_context *pipe = tc->pipe;
988
989 tc_sync(tc);
990 pipe->set_compute_resources(pipe, start, count, resources);
991 }
992
993 static void
994 tc_set_global_binding(struct pipe_context *_pipe, unsigned first,
995 unsigned count, struct pipe_resource **resources,
996 uint32_t **handles)
997 {
998 struct threaded_context *tc = threaded_context(_pipe);
999 struct pipe_context *pipe = tc->pipe;
1000
1001 tc_sync(tc);
1002 pipe->set_global_binding(pipe, first, count, resources, handles);
1003 }
1004
1005
1006 /********************************************************************
1007 * views
1008 */
1009
1010 static struct pipe_surface *
1011 tc_create_surface(struct pipe_context *_pipe,
1012 struct pipe_resource *resource,
1013 const struct pipe_surface *surf_tmpl)
1014 {
1015 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1016 struct pipe_surface *view =
1017 pipe->create_surface(pipe, resource, surf_tmpl);
1018
1019 if (view)
1020 view->context = _pipe;
1021 return view;
1022 }
1023
1024 static void
1025 tc_surface_destroy(struct pipe_context *_pipe,
1026 struct pipe_surface *surf)
1027 {
1028 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1029
1030 pipe->surface_destroy(pipe, surf);
1031 }
1032
1033 static struct pipe_sampler_view *
1034 tc_create_sampler_view(struct pipe_context *_pipe,
1035 struct pipe_resource *resource,
1036 const struct pipe_sampler_view *templ)
1037 {
1038 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1039 struct pipe_sampler_view *view =
1040 pipe->create_sampler_view(pipe, resource, templ);
1041
1042 if (view)
1043 view->context = _pipe;
1044 return view;
1045 }
1046
1047 static void
1048 tc_sampler_view_destroy(struct pipe_context *_pipe,
1049 struct pipe_sampler_view *view)
1050 {
1051 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1052
1053 pipe->sampler_view_destroy(pipe, view);
1054 }
1055
1056 static struct pipe_stream_output_target *
1057 tc_create_stream_output_target(struct pipe_context *_pipe,
1058 struct pipe_resource *res,
1059 unsigned buffer_offset,
1060 unsigned buffer_size)
1061 {
1062 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1063 struct threaded_resource *tres = threaded_resource(res);
1064 struct pipe_stream_output_target *view;
1065
1066 tc_sync(threaded_context(_pipe));
1067 util_range_add(&tres->valid_buffer_range, buffer_offset,
1068 buffer_offset + buffer_size);
1069
1070 view = pipe->create_stream_output_target(pipe, res, buffer_offset,
1071 buffer_size);
1072 if (view)
1073 view->context = _pipe;
1074 return view;
1075 }
1076
1077 static void
1078 tc_stream_output_target_destroy(struct pipe_context *_pipe,
1079 struct pipe_stream_output_target *target)
1080 {
1081 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1082
1083 pipe->stream_output_target_destroy(pipe, target);
1084 }
1085
1086
1087 /********************************************************************
1088 * transfer
1089 */
1090
1091 struct tc_replace_buffer_storage {
1092 struct pipe_resource *dst;
1093 struct pipe_resource *src;
1094 tc_replace_buffer_storage_func func;
1095 };
1096
1097 static void
1098 tc_call_replace_buffer_storage(struct pipe_context *pipe,
1099 union tc_payload *payload)
1100 {
1101 struct tc_replace_buffer_storage *p =
1102 (struct tc_replace_buffer_storage *)payload;
1103
1104 p->func(pipe, p->dst, p->src);
1105 pipe_resource_reference(&p->dst, NULL);
1106 pipe_resource_reference(&p->src, NULL);
1107 }
1108
1109 static bool
1110 tc_invalidate_buffer(struct threaded_context *tc,
1111 struct threaded_resource *tbuf)
1112 {
1113 /* We can't check if the buffer is idle, so we invalidate it
1114 * unconditionally. */
1115 struct pipe_screen *screen = tc->base.screen;
1116 struct pipe_resource *new_buf;
1117
1118 /* Shared, pinned, and sparse buffers can't be reallocated. */
1119 if (tbuf->is_shared ||
1120 tbuf->is_user_ptr ||
1121 tbuf->b.flags & PIPE_RESOURCE_FLAG_SPARSE)
1122 return false;
1123
1124 /* Allocate a new one. */
1125 new_buf = screen->resource_create(screen, &tbuf->b);
1126 if (!new_buf)
1127 return false;
1128
1129 /* Replace the "latest" pointer. */
1130 if (tbuf->latest != &tbuf->b)
1131 pipe_resource_reference(&tbuf->latest, NULL);
1132
1133 tbuf->latest = new_buf;
1134 util_range_set_empty(&tbuf->valid_buffer_range);
1135
1136 /* The valid range should point to the original buffer. */
1137 threaded_resource(new_buf)->base_valid_buffer_range =
1138 &tbuf->valid_buffer_range;
1139
1140 /* Enqueue storage replacement of the original buffer. */
1141 struct tc_replace_buffer_storage *p =
1142 tc_add_struct_typed_call(tc, TC_CALL_replace_buffer_storage,
1143 tc_replace_buffer_storage);
1144
1145 p->func = tc->replace_buffer_storage;
1146 tc_set_resource_reference(&p->dst, &tbuf->b);
1147 tc_set_resource_reference(&p->src, new_buf);
1148 return true;
1149 }
1150
1151 static unsigned
1152 tc_improve_map_buffer_flags(struct threaded_context *tc,
1153 struct threaded_resource *tres, unsigned usage,
1154 unsigned offset, unsigned size)
1155 {
1156 /* Sparse buffers can't be mapped directly and can't be reallocated
1157 * (fully invalidated). That may just be a radeonsi limitation, but
1158 * the threaded context must obey it with radeonsi.
1159 */
1160 if (tres->b.flags & PIPE_RESOURCE_FLAG_SPARSE) {
1161 /* We can use DISCARD_RANGE instead of full discard. This is the only
1162 * fast path for sparse buffers that doesn't need thread synchronization.
1163 */
1164 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
1165 usage |= PIPE_TRANSFER_DISCARD_RANGE;
1166
1167 /* Allow DISCARD_WHOLE_RESOURCE and infering UNSYNCHRONIZED in drivers.
1168 * The threaded context doesn't do unsychronized mappings and invalida-
1169 * tions of sparse buffers, therefore a correct driver behavior won't
1170 * result in an incorrect behavior with the threaded context.
1171 */
1172 return usage;
1173 }
1174
1175 /* Handle CPU reads trivially. */
1176 if (usage & PIPE_TRANSFER_READ) {
1177 /* Driver aren't allowed to do buffer invalidations. */
1178 return (usage & ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) |
1179 TC_TRANSFER_MAP_NO_INVALIDATE |
1180 TC_TRANSFER_MAP_IGNORE_VALID_RANGE;
1181 }
1182
1183 /* See if the buffer range being mapped has never been initialized,
1184 * in which case it can be mapped unsynchronized. */
1185 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
1186 !tres->is_shared &&
1187 !util_ranges_intersect(&tres->valid_buffer_range, offset, offset + size))
1188 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1189
1190 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
1191 /* If discarding the entire range, discard the whole resource instead. */
1192 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
1193 offset == 0 && size == tres->b.width0)
1194 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1195
1196 /* Discard the whole resource if needed. */
1197 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
1198 if (tc_invalidate_buffer(tc, tres))
1199 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1200 else
1201 usage |= PIPE_TRANSFER_DISCARD_RANGE; /* fallback */
1202 }
1203 }
1204
1205 /* We won't need this flag anymore. */
1206 /* TODO: We might not need TC_TRANSFER_MAP_NO_INVALIDATE with this. */
1207 usage &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1208
1209 /* GL_AMD_pinned_memory and persistent mappings can't use staging
1210 * buffers. */
1211 if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
1212 PIPE_TRANSFER_PERSISTENT) ||
1213 tres->is_user_ptr)
1214 usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
1215
1216 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1217 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
1218 usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* notify the driver */
1219
1220 /* Never invalidate inside the driver and never infer "unsynchronized". */
1221 return usage |
1222 TC_TRANSFER_MAP_NO_INVALIDATE |
1223 TC_TRANSFER_MAP_IGNORE_VALID_RANGE;
1224 }
1225
1226 static void *
1227 tc_transfer_map(struct pipe_context *_pipe,
1228 struct pipe_resource *resource, unsigned level,
1229 unsigned usage, const struct pipe_box *box,
1230 struct pipe_transfer **transfer)
1231 {
1232 struct threaded_context *tc = threaded_context(_pipe);
1233 struct threaded_resource *tres = threaded_resource(resource);
1234 struct pipe_context *pipe = tc->pipe;
1235
1236 if (resource->target == PIPE_BUFFER) {
1237 usage = tc_improve_map_buffer_flags(tc, tres, usage, box->x, box->width);
1238
1239 /* Do a staging transfer within the threaded context. The driver should
1240 * only get resource_copy_region.
1241 */
1242 if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
1243 struct threaded_transfer *ttrans = slab_alloc(&tc->pool_transfers);
1244 uint8_t *map;
1245
1246 ttrans->staging = NULL;
1247
1248 u_upload_alloc(tc->base.stream_uploader, 0,
1249 box->width + (box->x % tc->map_buffer_alignment),
1250 64, &ttrans->offset, &ttrans->staging, (void**)&map);
1251 if (!map) {
1252 slab_free(&tc->pool_transfers, ttrans);
1253 return NULL;
1254 }
1255
1256 tc_set_resource_reference(&ttrans->b.resource, resource);
1257 ttrans->b.level = 0;
1258 ttrans->b.usage = usage;
1259 ttrans->b.box = *box;
1260 ttrans->b.stride = 0;
1261 ttrans->b.layer_stride = 0;
1262 *transfer = &ttrans->b;
1263 return map + (box->x % tc->map_buffer_alignment);
1264 }
1265 }
1266
1267 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1268 if (!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC))
1269 tc_sync_msg(tc, resource->target != PIPE_BUFFER ? " texture" :
1270 usage & PIPE_TRANSFER_DISCARD_RANGE ? " discard_range" :
1271 usage & PIPE_TRANSFER_READ ? " read" : " ??");
1272
1273 return pipe->transfer_map(pipe, tres->latest ? tres->latest : resource,
1274 level, usage, box, transfer);
1275 }
1276
1277 struct tc_transfer_flush_region {
1278 struct pipe_transfer *transfer;
1279 struct pipe_box box;
1280 };
1281
1282 static void
1283 tc_call_transfer_flush_region(struct pipe_context *pipe,
1284 union tc_payload *payload)
1285 {
1286 struct tc_transfer_flush_region *p =
1287 (struct tc_transfer_flush_region *)payload;
1288
1289 pipe->transfer_flush_region(pipe, p->transfer, &p->box);
1290 }
1291
1292 struct tc_resource_copy_region {
1293 struct pipe_resource *dst;
1294 unsigned dst_level;
1295 unsigned dstx, dsty, dstz;
1296 struct pipe_resource *src;
1297 unsigned src_level;
1298 struct pipe_box src_box;
1299 };
1300
1301 static void
1302 tc_resource_copy_region(struct pipe_context *_pipe,
1303 struct pipe_resource *dst, unsigned dst_level,
1304 unsigned dstx, unsigned dsty, unsigned dstz,
1305 struct pipe_resource *src, unsigned src_level,
1306 const struct pipe_box *src_box);
1307
1308 static void
1309 tc_buffer_do_flush_region(struct threaded_context *tc,
1310 struct threaded_transfer *ttrans,
1311 const struct pipe_box *box)
1312 {
1313 struct threaded_resource *tres = threaded_resource(ttrans->b.resource);
1314
1315 if (ttrans->staging) {
1316 struct pipe_box src_box;
1317
1318 u_box_1d(ttrans->offset + box->x % tc->map_buffer_alignment,
1319 box->width, &src_box);
1320
1321 /* Copy the staging buffer into the original one. */
1322 tc_resource_copy_region(&tc->base, ttrans->b.resource, 0, box->x, 0, 0,
1323 ttrans->staging, 0, &src_box);
1324 }
1325
1326 util_range_add(tres->base_valid_buffer_range, box->x, box->x + box->width);
1327 }
1328
1329 static void
1330 tc_transfer_flush_region(struct pipe_context *_pipe,
1331 struct pipe_transfer *transfer,
1332 const struct pipe_box *rel_box)
1333 {
1334 struct threaded_context *tc = threaded_context(_pipe);
1335 struct threaded_transfer *ttrans = threaded_transfer(transfer);
1336 struct threaded_resource *tres = threaded_resource(transfer->resource);
1337 unsigned required_usage = PIPE_TRANSFER_WRITE |
1338 PIPE_TRANSFER_FLUSH_EXPLICIT;
1339
1340 if (tres->b.target == PIPE_BUFFER) {
1341 if ((transfer->usage & required_usage) == required_usage) {
1342 struct pipe_box box;
1343
1344 u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
1345 tc_buffer_do_flush_region(tc, ttrans, &box);
1346 }
1347
1348 /* Staging transfers don't send the call to the driver. */
1349 if (ttrans->staging)
1350 return;
1351 }
1352
1353 struct tc_transfer_flush_region *p =
1354 tc_add_struct_typed_call(tc, TC_CALL_transfer_flush_region,
1355 tc_transfer_flush_region);
1356 p->transfer = transfer;
1357 p->box = *rel_box;
1358 }
1359
1360 static void
1361 tc_call_transfer_unmap(struct pipe_context *pipe, union tc_payload *payload)
1362 {
1363 pipe->transfer_unmap(pipe, payload->transfer);
1364 }
1365
1366 static void
1367 tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
1368 {
1369 struct threaded_context *tc = threaded_context(_pipe);
1370 struct threaded_transfer *ttrans = threaded_transfer(transfer);
1371 struct threaded_resource *tres = threaded_resource(transfer->resource);
1372
1373 if (tres->b.target == PIPE_BUFFER) {
1374 if (transfer->usage & PIPE_TRANSFER_WRITE &&
1375 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
1376 tc_buffer_do_flush_region(tc, ttrans, &transfer->box);
1377
1378 /* Staging transfers don't send the call to the driver. */
1379 if (ttrans->staging) {
1380 pipe_resource_reference(&ttrans->staging, NULL);
1381 pipe_resource_reference(&ttrans->b.resource, NULL);
1382 slab_free(&tc->pool_transfers, ttrans);
1383 return;
1384 }
1385 }
1386
1387 tc_add_small_call(tc, TC_CALL_transfer_unmap)->transfer = transfer;
1388 }
1389
1390 struct tc_buffer_subdata {
1391 struct pipe_resource *resource;
1392 unsigned usage, offset, size;
1393 char slot[0]; /* more will be allocated if needed */
1394 };
1395
1396 static void
1397 tc_call_buffer_subdata(struct pipe_context *pipe, union tc_payload *payload)
1398 {
1399 struct tc_buffer_subdata *p = (struct tc_buffer_subdata *)payload;
1400
1401 pipe->buffer_subdata(pipe, p->resource, p->usage, p->offset, p->size,
1402 p->slot);
1403 pipe_resource_reference(&p->resource, NULL);
1404 }
1405
1406 static void
1407 tc_buffer_subdata(struct pipe_context *_pipe,
1408 struct pipe_resource *resource,
1409 unsigned usage, unsigned offset,
1410 unsigned size, const void *data)
1411 {
1412 struct threaded_context *tc = threaded_context(_pipe);
1413 struct threaded_resource *tres = threaded_resource(resource);
1414
1415 if (!size)
1416 return;
1417
1418 usage |= PIPE_TRANSFER_WRITE |
1419 PIPE_TRANSFER_DISCARD_RANGE;
1420
1421 usage = tc_improve_map_buffer_flags(tc, tres, usage, offset, size);
1422
1423 /* Unsychronized and big transfers should use transfer_map. Also handle
1424 * full invalidations, because drivers aren't allowed to do them.
1425 */
1426 if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
1427 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) ||
1428 size > TC_MAX_SUBDATA_BYTES) {
1429 struct pipe_transfer *transfer;
1430 struct pipe_box box;
1431 uint8_t *map = NULL;
1432
1433 u_box_1d(offset, size, &box);
1434
1435 map = tc_transfer_map(_pipe, resource, 0, usage, &box, &transfer);
1436 if (map) {
1437 memcpy(map, data, size);
1438 tc_transfer_unmap(_pipe, transfer);
1439 }
1440 return;
1441 }
1442
1443 util_range_add(&tres->valid_buffer_range, offset, offset + size);
1444
1445 /* The upload is small. Enqueue it. */
1446 struct tc_buffer_subdata *p =
1447 tc_add_slot_based_call(tc, TC_CALL_buffer_subdata, tc_buffer_subdata, size);
1448
1449 tc_set_resource_reference(&p->resource, resource);
1450 p->usage = usage;
1451 p->offset = offset;
1452 p->size = size;
1453 memcpy(p->slot, data, size);
1454 }
1455
1456 struct tc_texture_subdata {
1457 struct pipe_resource *resource;
1458 unsigned level, usage, stride, layer_stride;
1459 struct pipe_box box;
1460 char slot[0]; /* more will be allocated if needed */
1461 };
1462
1463 static void
1464 tc_call_texture_subdata(struct pipe_context *pipe, union tc_payload *payload)
1465 {
1466 struct tc_texture_subdata *p = (struct tc_texture_subdata *)payload;
1467
1468 pipe->texture_subdata(pipe, p->resource, p->level, p->usage, &p->box,
1469 p->slot, p->stride, p->layer_stride);
1470 pipe_resource_reference(&p->resource, NULL);
1471 }
1472
1473 static void
1474 tc_texture_subdata(struct pipe_context *_pipe,
1475 struct pipe_resource *resource,
1476 unsigned level, unsigned usage,
1477 const struct pipe_box *box,
1478 const void *data, unsigned stride,
1479 unsigned layer_stride)
1480 {
1481 struct threaded_context *tc = threaded_context(_pipe);
1482 unsigned size;
1483
1484 assert(box->height >= 1);
1485 assert(box->depth >= 1);
1486
1487 size = (box->depth - 1) * layer_stride +
1488 (box->height - 1) * stride +
1489 box->width * util_format_get_blocksize(resource->format);
1490 if (!size)
1491 return;
1492
1493 /* Small uploads can be enqueued, big uploads must sync. */
1494 if (size <= TC_MAX_SUBDATA_BYTES) {
1495 struct tc_texture_subdata *p =
1496 tc_add_slot_based_call(tc, TC_CALL_texture_subdata, tc_texture_subdata, size);
1497
1498 tc_set_resource_reference(&p->resource, resource);
1499 p->level = level;
1500 p->usage = usage;
1501 p->box = *box;
1502 p->stride = stride;
1503 p->layer_stride = layer_stride;
1504 memcpy(p->slot, data, size);
1505 } else {
1506 struct pipe_context *pipe = tc->pipe;
1507
1508 tc_sync(tc);
1509 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1510 stride, layer_stride);
1511 }
1512 }
1513
1514
1515 /********************************************************************
1516 * miscellaneous
1517 */
1518
1519 #define TC_FUNC_SYNC_RET0(ret_type, func) \
1520 static ret_type \
1521 tc_##func(struct pipe_context *_pipe) \
1522 { \
1523 struct threaded_context *tc = threaded_context(_pipe); \
1524 struct pipe_context *pipe = tc->pipe; \
1525 tc_sync(tc); \
1526 return pipe->func(pipe); \
1527 }
1528
1529 TC_FUNC_SYNC_RET0(enum pipe_reset_status, get_device_reset_status)
1530 TC_FUNC_SYNC_RET0(uint64_t, get_timestamp)
1531
1532 static void
1533 tc_get_sample_position(struct pipe_context *_pipe,
1534 unsigned sample_count, unsigned sample_index,
1535 float *out_value)
1536 {
1537 struct threaded_context *tc = threaded_context(_pipe);
1538 struct pipe_context *pipe = tc->pipe;
1539
1540 tc_sync(tc);
1541 pipe->get_sample_position(pipe, sample_count, sample_index,
1542 out_value);
1543 }
1544
1545 static void
1546 tc_set_device_reset_callback(struct pipe_context *_pipe,
1547 const struct pipe_device_reset_callback *cb)
1548 {
1549 struct threaded_context *tc = threaded_context(_pipe);
1550 struct pipe_context *pipe = tc->pipe;
1551
1552 tc_sync(tc);
1553 pipe->set_device_reset_callback(pipe, cb);
1554 }
1555
1556 struct tc_string_marker {
1557 int len;
1558 char slot[0]; /* more will be allocated if needed */
1559 };
1560
1561 static void
1562 tc_call_emit_string_marker(struct pipe_context *pipe, union tc_payload *payload)
1563 {
1564 struct tc_string_marker *p = (struct tc_string_marker *)payload;
1565 pipe->emit_string_marker(pipe, p->slot, p->len);
1566 }
1567
1568 static void
1569 tc_emit_string_marker(struct pipe_context *_pipe,
1570 const char *string, int len)
1571 {
1572 struct threaded_context *tc = threaded_context(_pipe);
1573
1574 if (len <= TC_MAX_STRING_MARKER_BYTES) {
1575 struct tc_string_marker *p =
1576 tc_add_slot_based_call(tc, TC_CALL_emit_string_marker, tc_string_marker, len);
1577
1578 memcpy(p->slot, string, len);
1579 p->len = len;
1580 } else {
1581 struct pipe_context *pipe = tc->pipe;
1582
1583 tc_sync(tc);
1584 pipe->emit_string_marker(pipe, string, len);
1585 }
1586 }
1587
1588 static void
1589 tc_dump_debug_state(struct pipe_context *_pipe, FILE *stream,
1590 unsigned flags)
1591 {
1592 struct threaded_context *tc = threaded_context(_pipe);
1593 struct pipe_context *pipe = tc->pipe;
1594
1595 tc_sync(tc);
1596 pipe->dump_debug_state(pipe, stream, flags);
1597 }
1598
1599 static void
1600 tc_set_debug_callback(struct pipe_context *_pipe,
1601 const struct pipe_debug_callback *cb)
1602 {
1603 struct threaded_context *tc = threaded_context(_pipe);
1604 struct pipe_context *pipe = tc->pipe;
1605
1606 /* Drop all synchronous debug callbacks. Drivers are expected to be OK
1607 * with this. shader-db will use an environment variable to disable
1608 * the threaded context.
1609 */
1610 if (cb && cb->debug_message && !cb->async)
1611 return;
1612
1613 tc_sync(tc);
1614 pipe->set_debug_callback(pipe, cb);
1615 }
1616
1617 static void
1618 tc_create_fence_fd(struct pipe_context *_pipe,
1619 struct pipe_fence_handle **fence, int fd)
1620 {
1621 struct threaded_context *tc = threaded_context(_pipe);
1622 struct pipe_context *pipe = tc->pipe;
1623
1624 tc_sync(tc);
1625 pipe->create_fence_fd(pipe, fence, fd);
1626 }
1627
1628 static void
1629 tc_fence_server_sync(struct pipe_context *_pipe,
1630 struct pipe_fence_handle *fence)
1631 {
1632 struct threaded_context *tc = threaded_context(_pipe);
1633 struct pipe_context *pipe = tc->pipe;
1634
1635 tc_sync(tc);
1636 pipe->fence_server_sync(pipe, fence);
1637 }
1638
1639 static struct pipe_video_codec *
1640 tc_create_video_codec(struct pipe_context *_pipe,
1641 const struct pipe_video_codec *templ)
1642 {
1643 unreachable("Threaded context should not be enabled for video APIs");
1644 return NULL;
1645 }
1646
1647 static struct pipe_video_buffer *
1648 tc_create_video_buffer(struct pipe_context *_pipe,
1649 const struct pipe_video_buffer *templ)
1650 {
1651 unreachable("Threaded context should not be enabled for video APIs");
1652 return NULL;
1653 }
1654
1655
1656 /********************************************************************
1657 * draw, launch, clear, blit, copy, flush
1658 */
1659
1660 static void
1661 tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence,
1662 unsigned flags)
1663 {
1664 struct threaded_context *tc = threaded_context(_pipe);
1665 struct pipe_context *pipe = tc->pipe;
1666 struct threaded_query *tq, *tmp;
1667
1668 LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) {
1669 tq->flushed = true;
1670 LIST_DEL(&tq->head_unflushed);
1671 }
1672
1673 /* TODO: deferred flushes? */
1674 tc_sync_msg(tc, flags & PIPE_FLUSH_END_OF_FRAME ? "end of frame" :
1675 flags & PIPE_FLUSH_DEFERRED ? "deferred fence" : "normal");
1676 pipe->flush(pipe, fence, flags);
1677 }
1678
1679 /* This is actually variable-sized, because indirect isn't allocated if it's
1680 * not needed. */
1681 struct tc_full_draw_info {
1682 struct pipe_draw_info draw;
1683 struct pipe_draw_indirect_info indirect;
1684 };
1685
1686 static void
1687 tc_call_draw_vbo(struct pipe_context *pipe, union tc_payload *payload)
1688 {
1689 struct tc_full_draw_info *info = (struct tc_full_draw_info*)payload;
1690
1691 pipe->draw_vbo(pipe, &info->draw);
1692 pipe_so_target_reference(&info->draw.count_from_stream_output, NULL);
1693 if (info->draw.index_size)
1694 pipe_resource_reference(&info->draw.index.resource, NULL);
1695 if (info->draw.indirect) {
1696 pipe_resource_reference(&info->indirect.buffer, NULL);
1697 pipe_resource_reference(&info->indirect.indirect_draw_count, NULL);
1698 }
1699 }
1700
1701 static struct tc_full_draw_info *
1702 tc_add_draw_vbo(struct pipe_context *_pipe, bool indirect)
1703 {
1704 return (struct tc_full_draw_info*)
1705 tc_add_sized_call(threaded_context(_pipe), TC_CALL_draw_vbo,
1706 indirect ? sizeof(struct tc_full_draw_info) :
1707 sizeof(struct pipe_draw_info));
1708 }
1709
1710 static void
1711 tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
1712 {
1713 struct threaded_context *tc = threaded_context(_pipe);
1714 struct pipe_draw_indirect_info *indirect = info->indirect;
1715 unsigned index_size = info->index_size;
1716 bool has_user_indices = info->has_user_indices;
1717
1718 if (index_size && has_user_indices) {
1719 unsigned size = info->count * index_size;
1720 struct pipe_resource *buffer = NULL;
1721 unsigned offset;
1722
1723 tc_assert(!indirect);
1724
1725 /* This must be done before adding draw_vbo, because it could generate
1726 * e.g. transfer_unmap and flush partially-uninitialized draw_vbo
1727 * to the driver if it was done afterwards.
1728 */
1729 u_upload_data(tc->base.stream_uploader, 0, size, 4, info->index.user,
1730 &offset, &buffer);
1731 if (unlikely(!buffer))
1732 return;
1733
1734 struct tc_full_draw_info *p = tc_add_draw_vbo(_pipe, false);
1735 p->draw.count_from_stream_output = NULL;
1736 pipe_so_target_reference(&p->draw.count_from_stream_output,
1737 info->count_from_stream_output);
1738 memcpy(&p->draw, info, sizeof(*info));
1739 p->draw.has_user_indices = false;
1740 p->draw.index.resource = buffer;
1741 p->draw.start = offset / index_size;
1742 } else {
1743 /* Non-indexed call or indexed with a real index buffer. */
1744 struct tc_full_draw_info *p = tc_add_draw_vbo(_pipe, indirect != NULL);
1745 p->draw.count_from_stream_output = NULL;
1746 pipe_so_target_reference(&p->draw.count_from_stream_output,
1747 info->count_from_stream_output);
1748 if (index_size) {
1749 tc_set_resource_reference(&p->draw.index.resource,
1750 info->index.resource);
1751 }
1752 memcpy(&p->draw, info, sizeof(*info));
1753
1754 if (indirect) {
1755 tc_set_resource_reference(&p->draw.indirect->buffer, indirect->buffer);
1756 tc_set_resource_reference(&p->indirect.indirect_draw_count,
1757 indirect->indirect_draw_count);
1758 memcpy(&p->indirect, indirect, sizeof(*indirect));
1759 p->draw.indirect = &p->indirect;
1760 }
1761 }
1762 }
1763
1764 static void
1765 tc_call_launch_grid(struct pipe_context *pipe, union tc_payload *payload)
1766 {
1767 struct pipe_grid_info *p = (struct pipe_grid_info *)payload;
1768
1769 pipe->launch_grid(pipe, p);
1770 pipe_resource_reference(&p->indirect, NULL);
1771 }
1772
1773 static void
1774 tc_launch_grid(struct pipe_context *_pipe,
1775 const struct pipe_grid_info *info)
1776 {
1777 struct threaded_context *tc = threaded_context(_pipe);
1778 struct pipe_grid_info *p = tc_add_struct_typed_call(tc, TC_CALL_launch_grid,
1779 pipe_grid_info);
1780 assert(info->input == NULL);
1781
1782 tc_set_resource_reference(&p->indirect, info->indirect);
1783 memcpy(p, info, sizeof(*info));
1784 }
1785
1786 static void
1787 tc_call_resource_copy_region(struct pipe_context *pipe, union tc_payload *payload)
1788 {
1789 struct tc_resource_copy_region *p = (struct tc_resource_copy_region *)payload;
1790
1791 pipe->resource_copy_region(pipe, p->dst, p->dst_level, p->dstx, p->dsty,
1792 p->dstz, p->src, p->src_level, &p->src_box);
1793 pipe_resource_reference(&p->dst, NULL);
1794 pipe_resource_reference(&p->src, NULL);
1795 }
1796
1797 static void
1798 tc_resource_copy_region(struct pipe_context *_pipe,
1799 struct pipe_resource *dst, unsigned dst_level,
1800 unsigned dstx, unsigned dsty, unsigned dstz,
1801 struct pipe_resource *src, unsigned src_level,
1802 const struct pipe_box *src_box)
1803 {
1804 struct threaded_context *tc = threaded_context(_pipe);
1805 struct threaded_resource *tdst = threaded_resource(dst);
1806 struct tc_resource_copy_region *p =
1807 tc_add_struct_typed_call(tc, TC_CALL_resource_copy_region,
1808 tc_resource_copy_region);
1809
1810 tc_set_resource_reference(&p->dst, dst);
1811 p->dst_level = dst_level;
1812 p->dstx = dstx;
1813 p->dsty = dsty;
1814 p->dstz = dstz;
1815 tc_set_resource_reference(&p->src, src);
1816 p->src_level = src_level;
1817 p->src_box = *src_box;
1818
1819 if (dst->target == PIPE_BUFFER)
1820 util_range_add(&tdst->valid_buffer_range, dstx, dstx + src_box->width);
1821 }
1822
1823 static void
1824 tc_call_blit(struct pipe_context *pipe, union tc_payload *payload)
1825 {
1826 struct pipe_blit_info *blit = (struct pipe_blit_info*)payload;
1827
1828 pipe->blit(pipe, blit);
1829 pipe_resource_reference(&blit->dst.resource, NULL);
1830 pipe_resource_reference(&blit->src.resource, NULL);
1831 }
1832
1833 static void
1834 tc_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1835 {
1836 struct threaded_context *tc = threaded_context(_pipe);
1837 struct pipe_blit_info *blit =
1838 tc_add_struct_typed_call(tc, TC_CALL_blit, pipe_blit_info);
1839
1840 tc_set_resource_reference(&blit->dst.resource, info->dst.resource);
1841 tc_set_resource_reference(&blit->src.resource, info->src.resource);
1842 memcpy(blit, info, sizeof(*info));
1843 }
1844
1845 struct tc_generate_mipmap {
1846 struct pipe_resource *res;
1847 enum pipe_format format;
1848 unsigned base_level;
1849 unsigned last_level;
1850 unsigned first_layer;
1851 unsigned last_layer;
1852 };
1853
1854 static void
1855 tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload)
1856 {
1857 struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload;
1858 bool result = pipe->generate_mipmap(pipe, p->res, p->format, p->base_level,
1859 p->last_level, p->first_layer,
1860 p->last_layer);
1861 assert(result);
1862 pipe_resource_reference(&p->res, NULL);
1863 }
1864
1865 static boolean
1866 tc_generate_mipmap(struct pipe_context *_pipe,
1867 struct pipe_resource *res,
1868 enum pipe_format format,
1869 unsigned base_level,
1870 unsigned last_level,
1871 unsigned first_layer,
1872 unsigned last_layer)
1873 {
1874 struct threaded_context *tc = threaded_context(_pipe);
1875 struct pipe_context *pipe = tc->pipe;
1876 struct pipe_screen *screen = pipe->screen;
1877 unsigned bind = PIPE_BIND_SAMPLER_VIEW;
1878
1879 if (util_format_is_depth_or_stencil(format))
1880 bind = PIPE_BIND_DEPTH_STENCIL;
1881 else
1882 bind = PIPE_BIND_RENDER_TARGET;
1883
1884 if (!screen->is_format_supported(screen, format, res->target,
1885 res->nr_samples, bind))
1886 return false;
1887
1888 struct tc_generate_mipmap *p =
1889 tc_add_struct_typed_call(tc, TC_CALL_generate_mipmap, tc_generate_mipmap);
1890
1891 tc_set_resource_reference(&p->res, res);
1892 p->format = format;
1893 p->base_level = base_level;
1894 p->last_level = last_level;
1895 p->first_layer = first_layer;
1896 p->last_layer = last_layer;
1897 return true;
1898 }
1899
1900 static void
1901 tc_call_flush_resource(struct pipe_context *pipe, union tc_payload *payload)
1902 {
1903 pipe->flush_resource(pipe, payload->resource);
1904 pipe_resource_reference(&payload->resource, NULL);
1905 }
1906
1907 static void
1908 tc_flush_resource(struct pipe_context *_pipe,
1909 struct pipe_resource *resource)
1910 {
1911 struct threaded_context *tc = threaded_context(_pipe);
1912 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_flush_resource);
1913
1914 tc_set_resource_reference(&payload->resource, resource);
1915 }
1916
1917 static void
1918 tc_call_invalidate_resource(struct pipe_context *pipe, union tc_payload *payload)
1919 {
1920 pipe->invalidate_resource(pipe, payload->resource);
1921 pipe_resource_reference(&payload->resource, NULL);
1922 }
1923
1924 static void
1925 tc_invalidate_resource(struct pipe_context *_pipe,
1926 struct pipe_resource *resource)
1927 {
1928 struct threaded_context *tc = threaded_context(_pipe);
1929
1930 if (resource->target == PIPE_BUFFER) {
1931 tc_invalidate_buffer(tc, threaded_resource(resource));
1932 return;
1933 }
1934
1935 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_invalidate_resource);
1936 tc_set_resource_reference(&payload->resource, resource);
1937 }
1938
1939 struct tc_clear {
1940 unsigned buffers;
1941 union pipe_color_union color;
1942 double depth;
1943 unsigned stencil;
1944 };
1945
1946 static void
1947 tc_call_clear(struct pipe_context *pipe, union tc_payload *payload)
1948 {
1949 struct tc_clear *p = (struct tc_clear *)payload;
1950 pipe->clear(pipe, p->buffers, &p->color, p->depth, p->stencil);
1951 }
1952
1953 static void
1954 tc_clear(struct pipe_context *_pipe, unsigned buffers,
1955 const union pipe_color_union *color, double depth,
1956 unsigned stencil)
1957 {
1958 struct threaded_context *tc = threaded_context(_pipe);
1959 struct tc_clear *p = tc_add_struct_typed_call(tc, TC_CALL_clear, tc_clear);
1960
1961 p->buffers = buffers;
1962 p->color = *color;
1963 p->depth = depth;
1964 p->stencil = stencil;
1965 }
1966
1967 static void
1968 tc_clear_render_target(struct pipe_context *_pipe,
1969 struct pipe_surface *dst,
1970 const union pipe_color_union *color,
1971 unsigned dstx, unsigned dsty,
1972 unsigned width, unsigned height,
1973 bool render_condition_enabled)
1974 {
1975 struct threaded_context *tc = threaded_context(_pipe);
1976 struct pipe_context *pipe = tc->pipe;
1977
1978 tc_sync(tc);
1979 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1980 render_condition_enabled);
1981 }
1982
1983 static void
1984 tc_clear_depth_stencil(struct pipe_context *_pipe,
1985 struct pipe_surface *dst, unsigned clear_flags,
1986 double depth, unsigned stencil, unsigned dstx,
1987 unsigned dsty, unsigned width, unsigned height,
1988 bool render_condition_enabled)
1989 {
1990 struct threaded_context *tc = threaded_context(_pipe);
1991 struct pipe_context *pipe = tc->pipe;
1992
1993 tc_sync(tc);
1994 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1995 dstx, dsty, width, height,
1996 render_condition_enabled);
1997 }
1998
1999 struct tc_clear_buffer {
2000 struct pipe_resource *res;
2001 unsigned offset;
2002 unsigned size;
2003 char clear_value[16];
2004 int clear_value_size;
2005 };
2006
2007 static void
2008 tc_call_clear_buffer(struct pipe_context *pipe, union tc_payload *payload)
2009 {
2010 struct tc_clear_buffer *p = (struct tc_clear_buffer *)payload;
2011
2012 pipe->clear_buffer(pipe, p->res, p->offset, p->size, p->clear_value,
2013 p->clear_value_size);
2014 pipe_resource_reference(&p->res, NULL);
2015 }
2016
2017 static void
2018 tc_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
2019 unsigned offset, unsigned size,
2020 const void *clear_value, int clear_value_size)
2021 {
2022 struct threaded_context *tc = threaded_context(_pipe);
2023 struct threaded_resource *tres = threaded_resource(res);
2024 struct tc_clear_buffer *p =
2025 tc_add_struct_typed_call(tc, TC_CALL_clear_buffer, tc_clear_buffer);
2026
2027 tc_set_resource_reference(&p->res, res);
2028 p->offset = offset;
2029 p->size = size;
2030 memcpy(p->clear_value, clear_value, clear_value_size);
2031 p->clear_value_size = clear_value_size;
2032
2033 util_range_add(&tres->valid_buffer_range, offset, offset + size);
2034 }
2035
2036 struct tc_clear_texture {
2037 struct pipe_resource *res;
2038 unsigned level;
2039 struct pipe_box box;
2040 char data[16];
2041 };
2042
2043 static void
2044 tc_call_clear_texture(struct pipe_context *pipe, union tc_payload *payload)
2045 {
2046 struct tc_clear_texture *p = (struct tc_clear_texture *)payload;
2047
2048 pipe->clear_texture(pipe, p->res, p->level, &p->box, p->data);
2049 pipe_resource_reference(&p->res, NULL);
2050 }
2051
2052 static void
2053 tc_clear_texture(struct pipe_context *_pipe, struct pipe_resource *res,
2054 unsigned level, const struct pipe_box *box, const void *data)
2055 {
2056 struct threaded_context *tc = threaded_context(_pipe);
2057 struct tc_clear_texture *p =
2058 tc_add_struct_typed_call(tc, TC_CALL_clear_texture, tc_clear_texture);
2059
2060 tc_set_resource_reference(&p->res, res);
2061 p->level = level;
2062 p->box = *box;
2063 memcpy(p->data, data,
2064 util_format_get_blocksize(res->format));
2065 }
2066
2067 struct tc_resource_commit {
2068 struct pipe_resource *res;
2069 unsigned level;
2070 struct pipe_box box;
2071 bool commit;
2072 };
2073
2074 static void
2075 tc_call_resource_commit(struct pipe_context *pipe, union tc_payload *payload)
2076 {
2077 struct tc_resource_commit *p = (struct tc_resource_commit *)payload;
2078
2079 pipe->resource_commit(pipe, p->res, p->level, &p->box, p->commit);
2080 pipe_resource_reference(&p->res, NULL);
2081 }
2082
2083 static bool
2084 tc_resource_commit(struct pipe_context *_pipe, struct pipe_resource *res,
2085 unsigned level, struct pipe_box *box, bool commit)
2086 {
2087 struct threaded_context *tc = threaded_context(_pipe);
2088 struct tc_resource_commit *p =
2089 tc_add_struct_typed_call(tc, TC_CALL_resource_commit, tc_resource_commit);
2090
2091 tc_set_resource_reference(&p->res, res);
2092 p->level = level;
2093 p->box = *box;
2094 p->commit = commit;
2095 return true; /* we don't care about the return value for this call */
2096 }
2097
2098
2099 /********************************************************************
2100 * create & destroy
2101 */
2102
2103 static void
2104 tc_destroy(struct pipe_context *_pipe)
2105 {
2106 struct threaded_context *tc = threaded_context(_pipe);
2107 struct pipe_context *pipe = tc->pipe;
2108
2109 tc_sync(tc);
2110
2111 if (util_queue_is_initialized(&tc->queue)) {
2112 util_queue_destroy(&tc->queue);
2113
2114 for (unsigned i = 0; i < TC_MAX_BATCHES; i++)
2115 util_queue_fence_destroy(&tc->batch_slots[i].fence);
2116 }
2117
2118 if (tc->base.const_uploader &&
2119 tc->base.stream_uploader != tc->base.const_uploader)
2120 u_upload_destroy(tc->base.const_uploader);
2121
2122 if (tc->base.stream_uploader)
2123 u_upload_destroy(tc->base.stream_uploader);
2124
2125 slab_destroy_child(&tc->pool_transfers);
2126 pipe->destroy(pipe);
2127 os_free_aligned(tc);
2128 }
2129
2130 static const tc_execute execute_func[TC_NUM_CALLS] = {
2131 #define CALL(name) tc_call_##name,
2132 #include "u_threaded_context_calls.h"
2133 #undef CALL
2134 };
2135
2136 /**
2137 * Wrap an existing pipe_context into a threaded_context.
2138 *
2139 * \param pipe pipe_context to wrap
2140 * \param parent_transfer_pool parent slab pool set up for creating pipe_-
2141 * transfer objects; the driver should have one
2142 * in pipe_screen.
2143 * \param replace_buffer callback for replacing a pipe_resource's storage
2144 * with another pipe_resource's storage.
2145 * \param out if successful, the threaded_context will be returned here in
2146 * addition to the return value if "out" != NULL
2147 */
2148 struct pipe_context *
2149 threaded_context_create(struct pipe_context *pipe,
2150 struct slab_parent_pool *parent_transfer_pool,
2151 tc_replace_buffer_storage_func replace_buffer,
2152 struct threaded_context **out)
2153 {
2154 struct threaded_context *tc;
2155
2156 STATIC_ASSERT(sizeof(union tc_payload) <= 8);
2157 STATIC_ASSERT(sizeof(struct tc_call) <= 16);
2158
2159 if (!pipe)
2160 return NULL;
2161
2162 util_cpu_detect();
2163
2164 if (!debug_get_bool_option("GALLIUM_THREAD", util_cpu_caps.nr_cpus > 1))
2165 return pipe;
2166
2167 tc = os_malloc_aligned(sizeof(struct threaded_context), 16);
2168 if (!tc) {
2169 pipe->destroy(pipe);
2170 return NULL;
2171 }
2172 memset(tc, 0, sizeof(*tc));
2173
2174 assert((uintptr_t)tc % 16 == 0);
2175 STATIC_ASSERT(offsetof(struct threaded_context, batch_slots[0]) % 16 == 0);
2176 STATIC_ASSERT(offsetof(struct threaded_context, batch_slots[0].call[0]) % 16 == 0);
2177 STATIC_ASSERT(offsetof(struct threaded_context, batch_slots[0].call[1]) % 16 == 0);
2178 STATIC_ASSERT(offsetof(struct threaded_context, batch_slots[1].call[0]) % 16 == 0);
2179
2180 /* The driver context isn't wrapped, so set its "priv" to NULL. */
2181 pipe->priv = NULL;
2182
2183 tc->pipe = pipe;
2184 tc->replace_buffer_storage = replace_buffer;
2185 tc->map_buffer_alignment =
2186 pipe->screen->get_param(pipe->screen, PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT);
2187 tc->base.priv = pipe; /* priv points to the wrapped driver context */
2188 tc->base.screen = pipe->screen;
2189 tc->base.destroy = tc_destroy;
2190
2191 tc->base.stream_uploader = u_upload_clone(&tc->base, pipe->stream_uploader);
2192 if (pipe->stream_uploader == pipe->const_uploader)
2193 tc->base.const_uploader = tc->base.stream_uploader;
2194 else
2195 tc->base.const_uploader = u_upload_clone(&tc->base, pipe->const_uploader);
2196
2197 if (!tc->base.stream_uploader || !tc->base.const_uploader)
2198 goto fail;
2199
2200 /* The queue size is the number of batches "waiting". Batches are removed
2201 * from the queue before being executed, so keep one tc_batch slot for that
2202 * execution. Also, keep one unused slot for an unflushed batch.
2203 */
2204 if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1))
2205 goto fail;
2206
2207 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
2208 tc->batch_slots[i].sentinel = TC_SENTINEL;
2209 tc->batch_slots[i].pipe = pipe;
2210 util_queue_fence_init(&tc->batch_slots[i].fence);
2211 }
2212
2213 LIST_INITHEAD(&tc->unflushed_queries);
2214
2215 slab_create_child(&tc->pool_transfers, parent_transfer_pool);
2216
2217 #define CTX_INIT(_member) \
2218 tc->base._member = tc->pipe->_member ? tc_##_member : NULL
2219
2220 CTX_INIT(flush);
2221 CTX_INIT(draw_vbo);
2222 CTX_INIT(launch_grid);
2223 CTX_INIT(resource_copy_region);
2224 CTX_INIT(blit);
2225 CTX_INIT(clear);
2226 CTX_INIT(clear_render_target);
2227 CTX_INIT(clear_depth_stencil);
2228 CTX_INIT(clear_buffer);
2229 CTX_INIT(clear_texture);
2230 CTX_INIT(flush_resource);
2231 CTX_INIT(generate_mipmap);
2232 CTX_INIT(render_condition);
2233 CTX_INIT(create_query);
2234 CTX_INIT(create_batch_query);
2235 CTX_INIT(destroy_query);
2236 CTX_INIT(begin_query);
2237 CTX_INIT(end_query);
2238 CTX_INIT(get_query_result);
2239 CTX_INIT(get_query_result_resource);
2240 CTX_INIT(set_active_query_state);
2241 CTX_INIT(create_blend_state);
2242 CTX_INIT(bind_blend_state);
2243 CTX_INIT(delete_blend_state);
2244 CTX_INIT(create_sampler_state);
2245 CTX_INIT(bind_sampler_states);
2246 CTX_INIT(delete_sampler_state);
2247 CTX_INIT(create_rasterizer_state);
2248 CTX_INIT(bind_rasterizer_state);
2249 CTX_INIT(delete_rasterizer_state);
2250 CTX_INIT(create_depth_stencil_alpha_state);
2251 CTX_INIT(bind_depth_stencil_alpha_state);
2252 CTX_INIT(delete_depth_stencil_alpha_state);
2253 CTX_INIT(create_fs_state);
2254 CTX_INIT(bind_fs_state);
2255 CTX_INIT(delete_fs_state);
2256 CTX_INIT(create_vs_state);
2257 CTX_INIT(bind_vs_state);
2258 CTX_INIT(delete_vs_state);
2259 CTX_INIT(create_gs_state);
2260 CTX_INIT(bind_gs_state);
2261 CTX_INIT(delete_gs_state);
2262 CTX_INIT(create_tcs_state);
2263 CTX_INIT(bind_tcs_state);
2264 CTX_INIT(delete_tcs_state);
2265 CTX_INIT(create_tes_state);
2266 CTX_INIT(bind_tes_state);
2267 CTX_INIT(delete_tes_state);
2268 CTX_INIT(create_compute_state);
2269 CTX_INIT(bind_compute_state);
2270 CTX_INIT(delete_compute_state);
2271 CTX_INIT(create_vertex_elements_state);
2272 CTX_INIT(bind_vertex_elements_state);
2273 CTX_INIT(delete_vertex_elements_state);
2274 CTX_INIT(set_blend_color);
2275 CTX_INIT(set_stencil_ref);
2276 CTX_INIT(set_sample_mask);
2277 CTX_INIT(set_min_samples);
2278 CTX_INIT(set_clip_state);
2279 CTX_INIT(set_constant_buffer);
2280 CTX_INIT(set_framebuffer_state);
2281 CTX_INIT(set_polygon_stipple);
2282 CTX_INIT(set_scissor_states);
2283 CTX_INIT(set_viewport_states);
2284 CTX_INIT(set_window_rectangles);
2285 CTX_INIT(set_sampler_views);
2286 CTX_INIT(set_tess_state);
2287 CTX_INIT(set_shader_buffers);
2288 CTX_INIT(set_shader_images);
2289 CTX_INIT(set_vertex_buffers);
2290 CTX_INIT(create_stream_output_target);
2291 CTX_INIT(stream_output_target_destroy);
2292 CTX_INIT(set_stream_output_targets);
2293 CTX_INIT(create_sampler_view);
2294 CTX_INIT(sampler_view_destroy);
2295 CTX_INIT(create_surface);
2296 CTX_INIT(surface_destroy);
2297 CTX_INIT(transfer_map);
2298 CTX_INIT(transfer_flush_region);
2299 CTX_INIT(transfer_unmap);
2300 CTX_INIT(buffer_subdata);
2301 CTX_INIT(texture_subdata);
2302 CTX_INIT(texture_barrier);
2303 CTX_INIT(memory_barrier);
2304 CTX_INIT(resource_commit);
2305 CTX_INIT(create_video_codec);
2306 CTX_INIT(create_video_buffer);
2307 CTX_INIT(set_compute_resources);
2308 CTX_INIT(set_global_binding);
2309 CTX_INIT(get_sample_position);
2310 CTX_INIT(invalidate_resource);
2311 CTX_INIT(get_device_reset_status);
2312 CTX_INIT(set_device_reset_callback);
2313 CTX_INIT(dump_debug_state);
2314 CTX_INIT(emit_string_marker);
2315 CTX_INIT(set_debug_callback);
2316 CTX_INIT(create_fence_fd);
2317 CTX_INIT(fence_server_sync);
2318 CTX_INIT(get_timestamp);
2319 #undef CTX_INIT
2320
2321 if (out)
2322 *out = tc;
2323
2324 return &tc->base;
2325
2326 fail:
2327 tc_destroy(&tc->base);
2328 return NULL;
2329 }