tc: add ARB_bindless_texture support
[mesa.git] / src / gallium / auxiliary / util / u_threaded_context.c
1 /**************************************************************************
2 *
3 * Copyright 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "util/u_threaded_context.h"
28 #include "util/u_cpu_detect.h"
29 #include "util/u_format.h"
30 #include "util/u_inlines.h"
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
33
34 /* 0 = disabled, 1 = assertions, 2 = printfs */
35 #define TC_DEBUG 0
36
37 #if TC_DEBUG >= 1
38 #define tc_assert assert
39 #else
40 #define tc_assert(x)
41 #endif
42
43 #if TC_DEBUG >= 2
44 #define tc_printf printf
45 #define tc_asprintf asprintf
46 #define tc_strcmp strcmp
47 #else
48 #define tc_printf(...)
49 #define tc_asprintf(...) 0
50 #define tc_strcmp(...) 0
51 #endif
52
53 #define TC_SENTINEL 0x5ca1ab1e
54
55 enum tc_call_id {
56 #define CALL(name) TC_CALL_##name,
57 #include "u_threaded_context_calls.h"
58 #undef CALL
59 TC_NUM_CALLS,
60 };
61
62 typedef void (*tc_execute)(struct pipe_context *pipe, union tc_payload *payload);
63
64 static const tc_execute execute_func[TC_NUM_CALLS];
65
66 static void
67 tc_batch_check(struct tc_batch *batch)
68 {
69 tc_assert(batch->sentinel == TC_SENTINEL);
70 tc_assert(batch->num_total_call_slots <= TC_CALLS_PER_BATCH);
71 }
72
73 static void
74 tc_debug_check(struct threaded_context *tc)
75 {
76 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
77 tc_batch_check(&tc->batch_slots[i]);
78 tc_assert(tc->batch_slots[i].pipe == tc->pipe);
79 }
80 }
81
82 static void
83 tc_batch_execute(void *job, int thread_index)
84 {
85 struct tc_batch *batch = job;
86 struct pipe_context *pipe = batch->pipe;
87 struct tc_call *last = &batch->call[batch->num_total_call_slots];
88
89 tc_batch_check(batch);
90
91 for (struct tc_call *iter = batch->call; iter != last;
92 iter += iter->num_call_slots) {
93 tc_assert(iter->sentinel == TC_SENTINEL);
94 execute_func[iter->call_id](pipe, &iter->payload);
95 }
96
97 tc_batch_check(batch);
98 batch->num_total_call_slots = 0;
99 }
100
101 static void
102 tc_batch_flush(struct threaded_context *tc)
103 {
104 struct tc_batch *next = &tc->batch_slots[tc->next];
105
106 tc_assert(next->num_total_call_slots != 0);
107 tc_batch_check(next);
108 tc_debug_check(tc);
109 p_atomic_add(&tc->num_offloaded_slots, next->num_total_call_slots);
110
111 util_queue_add_job(&tc->queue, next, &next->fence, tc_batch_execute,
112 NULL);
113 tc->last = tc->next;
114 tc->next = (tc->next + 1) % TC_MAX_BATCHES;
115 }
116
117 /* This is the function that adds variable-sized calls into the current
118 * batch. It also flushes the batch if there is not enough space there.
119 * All other higher-level "add" functions use it.
120 */
121 static union tc_payload *
122 tc_add_sized_call(struct threaded_context *tc, enum tc_call_id id,
123 unsigned payload_size)
124 {
125 struct tc_batch *next = &tc->batch_slots[tc->next];
126 unsigned total_size = offsetof(struct tc_call, payload) + payload_size;
127 unsigned num_call_slots = DIV_ROUND_UP(total_size, sizeof(struct tc_call));
128
129 tc_debug_check(tc);
130
131 if (unlikely(next->num_total_call_slots + num_call_slots > TC_CALLS_PER_BATCH)) {
132 tc_batch_flush(tc);
133 next = &tc->batch_slots[tc->next];
134 tc_assert(next->num_total_call_slots == 0);
135 }
136
137 tc_assert(util_queue_fence_is_signalled(&next->fence));
138
139 struct tc_call *call = &next->call[next->num_total_call_slots];
140 next->num_total_call_slots += num_call_slots;
141
142 call->sentinel = TC_SENTINEL;
143 call->call_id = id;
144 call->num_call_slots = num_call_slots;
145
146 tc_debug_check(tc);
147 return &call->payload;
148 }
149
150 #define tc_add_struct_typed_call(tc, execute, type) \
151 ((struct type*)tc_add_sized_call(tc, execute, sizeof(struct type)))
152
153 #define tc_add_slot_based_call(tc, execute, type, num_slots) \
154 ((struct type*)tc_add_sized_call(tc, execute, \
155 sizeof(struct type) + \
156 sizeof(((struct type*)NULL)->slot[0]) * \
157 (num_slots)))
158
159 static union tc_payload *
160 tc_add_small_call(struct threaded_context *tc, enum tc_call_id id)
161 {
162 return tc_add_sized_call(tc, id, 0);
163 }
164
165 static void
166 _tc_sync(struct threaded_context *tc, const char *info, const char *func)
167 {
168 struct tc_batch *last = &tc->batch_slots[tc->last];
169 struct tc_batch *next = &tc->batch_slots[tc->next];
170 bool synced = false;
171
172 tc_debug_check(tc);
173
174 /* Only wait for queued calls... */
175 if (!util_queue_fence_is_signalled(&last->fence)) {
176 util_queue_fence_wait(&last->fence);
177 synced = true;
178 }
179
180 tc_debug_check(tc);
181
182 /* .. and execute unflushed calls directly. */
183 if (next->num_total_call_slots) {
184 p_atomic_add(&tc->num_direct_slots, next->num_total_call_slots);
185 tc_batch_execute(next, 0);
186 synced = true;
187 }
188
189 if (synced) {
190 p_atomic_inc(&tc->num_syncs);
191
192 if (tc_strcmp(func, "tc_destroy") != 0)
193 tc_printf("sync %s %s\n", func, info);
194 }
195
196 tc_debug_check(tc);
197 }
198
199 #define tc_sync(tc) _tc_sync(tc, "", __func__)
200 #define tc_sync_msg(tc, info) _tc_sync(tc, info, __func__)
201
202 static void
203 tc_set_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
204 {
205 *dst = NULL;
206 pipe_resource_reference(dst, src);
207 }
208
209 void
210 threaded_resource_init(struct pipe_resource *res)
211 {
212 struct threaded_resource *tres = threaded_resource(res);
213
214 tres->latest = &tres->b;
215 util_range_init(&tres->valid_buffer_range);
216 tres->base_valid_buffer_range = &tres->valid_buffer_range;
217 tres->is_shared = false;
218 tres->is_user_ptr = false;
219 }
220
221 void
222 threaded_resource_deinit(struct pipe_resource *res)
223 {
224 struct threaded_resource *tres = threaded_resource(res);
225
226 if (tres->latest != &tres->b)
227 pipe_resource_reference(&tres->latest, NULL);
228 util_range_destroy(&tres->valid_buffer_range);
229 }
230
231 struct pipe_context *
232 threaded_context_unwrap_sync(struct pipe_context *pipe)
233 {
234 if (!pipe || !pipe->priv)
235 return pipe;
236
237 tc_sync(threaded_context(pipe));
238 return (struct pipe_context*)pipe->priv;
239 }
240
241
242 /********************************************************************
243 * simple functions
244 */
245
246 #define TC_FUNC1(func, m_payload, qualifier, type, deref, deref2) \
247 static void \
248 tc_call_##func(struct pipe_context *pipe, union tc_payload *payload) \
249 { \
250 pipe->func(pipe, deref2((type*)payload)); \
251 } \
252 \
253 static void \
254 tc_##func(struct pipe_context *_pipe, qualifier type deref param) \
255 { \
256 struct threaded_context *tc = threaded_context(_pipe); \
257 type *p = (type*)tc_add_sized_call(tc, TC_CALL_##func, sizeof(type)); \
258 *p = deref(param); \
259 }
260
261 TC_FUNC1(set_active_query_state, flags, , boolean, , *)
262
263 TC_FUNC1(set_blend_color, blend_color, const, struct pipe_blend_color, *, )
264 TC_FUNC1(set_stencil_ref, stencil_ref, const, struct pipe_stencil_ref, *, )
265 TC_FUNC1(set_clip_state, clip_state, const, struct pipe_clip_state, *, )
266 TC_FUNC1(set_sample_mask, sample_mask, , unsigned, , *)
267 TC_FUNC1(set_min_samples, min_samples, , unsigned, , *)
268 TC_FUNC1(set_polygon_stipple, polygon_stipple, const, struct pipe_poly_stipple, *, )
269
270 TC_FUNC1(texture_barrier, flags, , unsigned, , *)
271 TC_FUNC1(memory_barrier, flags, , unsigned, , *)
272
273
274 /********************************************************************
275 * queries
276 */
277
278 static struct pipe_query *
279 tc_create_query(struct pipe_context *_pipe, unsigned query_type,
280 unsigned index)
281 {
282 struct threaded_context *tc = threaded_context(_pipe);
283 struct pipe_context *pipe = tc->pipe;
284
285 return pipe->create_query(pipe, query_type, index);
286 }
287
288 static struct pipe_query *
289 tc_create_batch_query(struct pipe_context *_pipe, unsigned num_queries,
290 unsigned *query_types)
291 {
292 struct threaded_context *tc = threaded_context(_pipe);
293 struct pipe_context *pipe = tc->pipe;
294
295 return pipe->create_batch_query(pipe, num_queries, query_types);
296 }
297
298 static void
299 tc_call_destroy_query(struct pipe_context *pipe, union tc_payload *payload)
300 {
301 pipe->destroy_query(pipe, payload->query);
302 }
303
304 static void
305 tc_destroy_query(struct pipe_context *_pipe, struct pipe_query *query)
306 {
307 struct threaded_context *tc = threaded_context(_pipe);
308 struct threaded_query *tq = threaded_query(query);
309
310 if (tq->head_unflushed.next)
311 LIST_DEL(&tq->head_unflushed);
312
313 tc_add_small_call(tc, TC_CALL_destroy_query)->query = query;
314 }
315
316 static void
317 tc_call_begin_query(struct pipe_context *pipe, union tc_payload *payload)
318 {
319 pipe->begin_query(pipe, payload->query);
320 }
321
322 static boolean
323 tc_begin_query(struct pipe_context *_pipe, struct pipe_query *query)
324 {
325 struct threaded_context *tc = threaded_context(_pipe);
326 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_begin_query);
327
328 payload->query = query;
329 return true; /* we don't care about the return value for this call */
330 }
331
332 static void
333 tc_call_end_query(struct pipe_context *pipe, union tc_payload *payload)
334 {
335 pipe->end_query(pipe, payload->query);
336 }
337
338 static bool
339 tc_end_query(struct pipe_context *_pipe, struct pipe_query *query)
340 {
341 struct threaded_context *tc = threaded_context(_pipe);
342 struct threaded_query *tq = threaded_query(query);
343 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_end_query);
344
345 payload->query = query;
346
347 tq->flushed = false;
348 if (!tq->head_unflushed.next)
349 LIST_ADD(&tq->head_unflushed, &tc->unflushed_queries);
350
351 return true; /* we don't care about the return value for this call */
352 }
353
354 static boolean
355 tc_get_query_result(struct pipe_context *_pipe,
356 struct pipe_query *query, boolean wait,
357 union pipe_query_result *result)
358 {
359 struct threaded_context *tc = threaded_context(_pipe);
360 struct threaded_query *tq = threaded_query(query);
361 struct pipe_context *pipe = tc->pipe;
362
363 if (!tq->flushed)
364 tc_sync_msg(tc, wait ? "wait" : "nowait");
365
366 bool success = pipe->get_query_result(pipe, query, wait, result);
367
368 if (success) {
369 tq->flushed = true;
370 if (tq->head_unflushed.next)
371 LIST_DEL(&tq->head_unflushed);
372 }
373 return success;
374 }
375
376 struct tc_query_result_resource {
377 struct pipe_query *query;
378 boolean wait;
379 enum pipe_query_value_type result_type;
380 int index;
381 struct pipe_resource *resource;
382 unsigned offset;
383 };
384
385 static void
386 tc_call_get_query_result_resource(struct pipe_context *pipe,
387 union tc_payload *payload)
388 {
389 struct tc_query_result_resource *p = (struct tc_query_result_resource *)payload;
390
391 pipe->get_query_result_resource(pipe, p->query, p->wait, p->result_type,
392 p->index, p->resource, p->offset);
393 pipe_resource_reference(&p->resource, NULL);
394 }
395
396 static void
397 tc_get_query_result_resource(struct pipe_context *_pipe,
398 struct pipe_query *query, boolean wait,
399 enum pipe_query_value_type result_type, int index,
400 struct pipe_resource *resource, unsigned offset)
401 {
402 struct threaded_context *tc = threaded_context(_pipe);
403 struct tc_query_result_resource *p =
404 tc_add_struct_typed_call(tc, TC_CALL_get_query_result_resource,
405 tc_query_result_resource);
406
407 p->query = query;
408 p->wait = wait;
409 p->result_type = result_type;
410 p->index = index;
411 tc_set_resource_reference(&p->resource, resource);
412 p->offset = offset;
413 }
414
415 struct tc_render_condition {
416 struct pipe_query *query;
417 bool condition;
418 unsigned mode;
419 };
420
421 static void
422 tc_call_render_condition(struct pipe_context *pipe, union tc_payload *payload)
423 {
424 struct tc_render_condition *p = (struct tc_render_condition *)payload;
425 pipe->render_condition(pipe, p->query, p->condition, p->mode);
426 }
427
428 static void
429 tc_render_condition(struct pipe_context *_pipe,
430 struct pipe_query *query, boolean condition,
431 enum pipe_render_cond_flag mode)
432 {
433 struct threaded_context *tc = threaded_context(_pipe);
434 struct tc_render_condition *p =
435 tc_add_struct_typed_call(tc, TC_CALL_render_condition, tc_render_condition);
436
437 p->query = query;
438 p->condition = condition;
439 p->mode = mode;
440 }
441
442
443 /********************************************************************
444 * constant (immutable) states
445 */
446
447 #define TC_CSO_CREATE(name, sname) \
448 static void * \
449 tc_create_##name##_state(struct pipe_context *_pipe, \
450 const struct pipe_##sname##_state *state) \
451 { \
452 struct pipe_context *pipe = threaded_context(_pipe)->pipe; \
453 return pipe->create_##name##_state(pipe, state); \
454 }
455
456 #define TC_CSO_BIND(name) TC_FUNC1(bind_##name##_state, cso, , void *, , *)
457 #define TC_CSO_DELETE(name) TC_FUNC1(delete_##name##_state, cso, , void *, , *)
458
459 #define TC_CSO_WHOLE2(name, sname) \
460 TC_CSO_CREATE(name, sname) \
461 TC_CSO_BIND(name) \
462 TC_CSO_DELETE(name)
463
464 #define TC_CSO_WHOLE(name) TC_CSO_WHOLE2(name, name)
465
466 TC_CSO_WHOLE(blend)
467 TC_CSO_WHOLE(rasterizer)
468 TC_CSO_WHOLE(depth_stencil_alpha)
469 TC_CSO_WHOLE(compute)
470 TC_CSO_WHOLE2(fs, shader)
471 TC_CSO_WHOLE2(vs, shader)
472 TC_CSO_WHOLE2(gs, shader)
473 TC_CSO_WHOLE2(tcs, shader)
474 TC_CSO_WHOLE2(tes, shader)
475 TC_CSO_CREATE(sampler, sampler)
476 TC_CSO_DELETE(sampler)
477 TC_CSO_BIND(vertex_elements)
478 TC_CSO_DELETE(vertex_elements)
479
480 static void *
481 tc_create_vertex_elements_state(struct pipe_context *_pipe, unsigned count,
482 const struct pipe_vertex_element *elems)
483 {
484 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
485
486 return pipe->create_vertex_elements_state(pipe, count, elems);
487 }
488
489 struct tc_sampler_states {
490 ubyte shader, start, count;
491 void *slot[0]; /* more will be allocated if needed */
492 };
493
494 static void
495 tc_call_bind_sampler_states(struct pipe_context *pipe, union tc_payload *payload)
496 {
497 struct tc_sampler_states *p = (struct tc_sampler_states *)payload;
498 pipe->bind_sampler_states(pipe, p->shader, p->start, p->count, p->slot);
499 }
500
501 static void
502 tc_bind_sampler_states(struct pipe_context *_pipe,
503 enum pipe_shader_type shader,
504 unsigned start, unsigned count, void **states)
505 {
506 if (!count)
507 return;
508
509 struct threaded_context *tc = threaded_context(_pipe);
510 struct tc_sampler_states *p =
511 tc_add_slot_based_call(tc, TC_CALL_bind_sampler_states, tc_sampler_states, count);
512
513 p->shader = shader;
514 p->start = start;
515 p->count = count;
516 memcpy(p->slot, states, count * sizeof(states[0]));
517 }
518
519
520 /********************************************************************
521 * immediate states
522 */
523
524 static void
525 tc_call_set_framebuffer_state(struct pipe_context *pipe, union tc_payload *payload)
526 {
527 struct pipe_framebuffer_state *p = (struct pipe_framebuffer_state *)payload;
528
529 pipe->set_framebuffer_state(pipe, p);
530
531 unsigned nr_cbufs = p->nr_cbufs;
532 for (unsigned i = 0; i < nr_cbufs; i++)
533 pipe_surface_reference(&p->cbufs[i], NULL);
534 pipe_surface_reference(&p->zsbuf, NULL);
535 }
536
537 static void
538 tc_set_framebuffer_state(struct pipe_context *_pipe,
539 const struct pipe_framebuffer_state *fb)
540 {
541 struct threaded_context *tc = threaded_context(_pipe);
542 struct pipe_framebuffer_state *p =
543 tc_add_struct_typed_call(tc, TC_CALL_set_framebuffer_state,
544 pipe_framebuffer_state);
545 unsigned nr_cbufs = fb->nr_cbufs;
546
547 p->width = fb->width;
548 p->height = fb->height;
549 p->samples = fb->samples;
550 p->layers = fb->layers;
551 p->nr_cbufs = nr_cbufs;
552
553 for (unsigned i = 0; i < nr_cbufs; i++) {
554 p->cbufs[i] = NULL;
555 pipe_surface_reference(&p->cbufs[i], fb->cbufs[i]);
556 }
557 p->zsbuf = NULL;
558 pipe_surface_reference(&p->zsbuf, fb->zsbuf);
559 }
560
561 static void
562 tc_call_set_tess_state(struct pipe_context *pipe, union tc_payload *payload)
563 {
564 float *p = (float*)payload;
565 pipe->set_tess_state(pipe, p, p + 4);
566 }
567
568 static void
569 tc_set_tess_state(struct pipe_context *_pipe,
570 const float default_outer_level[4],
571 const float default_inner_level[2])
572 {
573 struct threaded_context *tc = threaded_context(_pipe);
574 float *p = (float*)tc_add_sized_call(tc, TC_CALL_set_tess_state,
575 sizeof(float) * 6);
576
577 memcpy(p, default_outer_level, 4 * sizeof(float));
578 memcpy(p + 4, default_inner_level, 2 * sizeof(float));
579 }
580
581 struct tc_constant_buffer {
582 ubyte shader, index;
583 struct pipe_constant_buffer cb;
584 };
585
586 static void
587 tc_call_set_constant_buffer(struct pipe_context *pipe, union tc_payload *payload)
588 {
589 struct tc_constant_buffer *p = (struct tc_constant_buffer *)payload;
590
591 pipe->set_constant_buffer(pipe,
592 p->shader,
593 p->index,
594 &p->cb);
595 pipe_resource_reference(&p->cb.buffer, NULL);
596 }
597
598 static void
599 tc_set_constant_buffer(struct pipe_context *_pipe,
600 enum pipe_shader_type shader, uint index,
601 const struct pipe_constant_buffer *cb)
602 {
603 struct threaded_context *tc = threaded_context(_pipe);
604 struct pipe_resource *buffer = NULL;
605 unsigned offset;
606
607 /* This must be done before adding set_constant_buffer, because it could
608 * generate e.g. transfer_unmap and flush partially-uninitialized
609 * set_constant_buffer to the driver if it was done afterwards.
610 */
611 if (cb && cb->user_buffer) {
612 u_upload_data(tc->base.const_uploader, 0, cb->buffer_size, 64,
613 cb->user_buffer, &offset, &buffer);
614 }
615
616 struct tc_constant_buffer *p =
617 tc_add_struct_typed_call(tc, TC_CALL_set_constant_buffer,
618 tc_constant_buffer);
619 p->shader = shader;
620 p->index = index;
621
622 if (cb) {
623 if (cb->user_buffer) {
624 p->cb.buffer_size = cb->buffer_size;
625 p->cb.user_buffer = NULL;
626 p->cb.buffer_offset = offset;
627 p->cb.buffer = buffer;
628 } else {
629 tc_set_resource_reference(&p->cb.buffer,
630 cb->buffer);
631 memcpy(&p->cb, cb, sizeof(*cb));
632 }
633 } else {
634 memset(&p->cb, 0, sizeof(*cb));
635 }
636 }
637
638 struct tc_scissors {
639 ubyte start, count;
640 struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
641 };
642
643 static void
644 tc_call_set_scissor_states(struct pipe_context *pipe, union tc_payload *payload)
645 {
646 struct tc_scissors *p = (struct tc_scissors *)payload;
647 pipe->set_scissor_states(pipe, p->start, p->count, p->slot);
648 }
649
650 static void
651 tc_set_scissor_states(struct pipe_context *_pipe,
652 unsigned start, unsigned count,
653 const struct pipe_scissor_state *states)
654 {
655 struct threaded_context *tc = threaded_context(_pipe);
656 struct tc_scissors *p =
657 tc_add_slot_based_call(tc, TC_CALL_set_scissor_states, tc_scissors, count);
658
659 p->start = start;
660 p->count = count;
661 memcpy(&p->slot, states, count * sizeof(states[0]));
662 }
663
664 struct tc_viewports {
665 ubyte start, count;
666 struct pipe_viewport_state slot[0]; /* more will be allocated if needed */
667 };
668
669 static void
670 tc_call_set_viewport_states(struct pipe_context *pipe, union tc_payload *payload)
671 {
672 struct tc_viewports *p = (struct tc_viewports *)payload;
673 pipe->set_viewport_states(pipe, p->start, p->count, p->slot);
674 }
675
676 static void
677 tc_set_viewport_states(struct pipe_context *_pipe,
678 unsigned start, unsigned count,
679 const struct pipe_viewport_state *states)
680 {
681 if (!count)
682 return;
683
684 struct threaded_context *tc = threaded_context(_pipe);
685 struct tc_viewports *p =
686 tc_add_slot_based_call(tc, TC_CALL_set_viewport_states, tc_viewports, count);
687
688 p->start = start;
689 p->count = count;
690 memcpy(&p->slot, states, count * sizeof(states[0]));
691 }
692
693 struct tc_window_rects {
694 bool include;
695 ubyte count;
696 struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
697 };
698
699 static void
700 tc_call_set_window_rectangles(struct pipe_context *pipe,
701 union tc_payload *payload)
702 {
703 struct tc_window_rects *p = (struct tc_window_rects *)payload;
704 pipe->set_window_rectangles(pipe, p->include, p->count, p->slot);
705 }
706
707 static void
708 tc_set_window_rectangles(struct pipe_context *_pipe, boolean include,
709 unsigned count,
710 const struct pipe_scissor_state *rects)
711 {
712 struct threaded_context *tc = threaded_context(_pipe);
713 struct tc_window_rects *p =
714 tc_add_slot_based_call(tc, TC_CALL_set_window_rectangles, tc_window_rects, count);
715
716 p->include = include;
717 p->count = count;
718 memcpy(p->slot, rects, count * sizeof(rects[0]));
719 }
720
721 struct tc_sampler_views {
722 ubyte shader, start, count;
723 struct pipe_sampler_view *slot[0]; /* more will be allocated if needed */
724 };
725
726 static void
727 tc_call_set_sampler_views(struct pipe_context *pipe, union tc_payload *payload)
728 {
729 struct tc_sampler_views *p = (struct tc_sampler_views *)payload;
730 unsigned count = p->count;
731
732 pipe->set_sampler_views(pipe, p->shader, p->start, p->count, p->slot);
733 for (unsigned i = 0; i < count; i++)
734 pipe_sampler_view_reference(&p->slot[i], NULL);
735 }
736
737 static void
738 tc_set_sampler_views(struct pipe_context *_pipe,
739 enum pipe_shader_type shader,
740 unsigned start, unsigned count,
741 struct pipe_sampler_view **views)
742 {
743 if (!count)
744 return;
745
746 struct threaded_context *tc = threaded_context(_pipe);
747 struct tc_sampler_views *p =
748 tc_add_slot_based_call(tc, TC_CALL_set_sampler_views, tc_sampler_views, count);
749
750 p->shader = shader;
751 p->start = start;
752 p->count = count;
753
754 if (views) {
755 for (unsigned i = 0; i < count; i++) {
756 p->slot[i] = NULL;
757 pipe_sampler_view_reference(&p->slot[i], views[i]);
758 }
759 } else {
760 memset(p->slot, 0, count * sizeof(views[0]));
761 }
762 }
763
764 struct tc_shader_images {
765 ubyte shader, start, count;
766 bool unbind;
767 struct pipe_image_view slot[0]; /* more will be allocated if needed */
768 };
769
770 static void
771 tc_call_set_shader_images(struct pipe_context *pipe, union tc_payload *payload)
772 {
773 struct tc_shader_images *p = (struct tc_shader_images *)payload;
774 unsigned count = p->count;
775
776 if (p->unbind) {
777 pipe->set_shader_images(pipe, p->shader, p->start, p->count, NULL);
778 return;
779 }
780
781 pipe->set_shader_images(pipe, p->shader, p->start, p->count, p->slot);
782
783 for (unsigned i = 0; i < count; i++)
784 pipe_resource_reference(&p->slot[i].resource, NULL);
785 }
786
787 static void
788 tc_set_shader_images(struct pipe_context *_pipe,
789 enum pipe_shader_type shader,
790 unsigned start, unsigned count,
791 const struct pipe_image_view *images)
792 {
793 if (!count)
794 return;
795
796 struct threaded_context *tc = threaded_context(_pipe);
797 struct tc_shader_images *p =
798 tc_add_slot_based_call(tc, TC_CALL_set_shader_images, tc_shader_images,
799 images ? count : 0);
800
801 p->shader = shader;
802 p->start = start;
803 p->count = count;
804 p->unbind = images == NULL;
805
806 if (images) {
807 for (unsigned i = 0; i < count; i++) {
808 tc_set_resource_reference(&p->slot[i].resource, images[i].resource);
809
810 if (images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
811 images[i].resource &&
812 images[i].resource->target == PIPE_BUFFER) {
813 struct threaded_resource *tres =
814 threaded_resource(images[i].resource);
815
816 util_range_add(&tres->valid_buffer_range, images[i].u.buf.offset,
817 images[i].u.buf.offset + images[i].u.buf.size);
818 }
819 }
820 memcpy(p->slot, images, count * sizeof(images[0]));
821 }
822 }
823
824 struct tc_shader_buffers {
825 ubyte shader, start, count;
826 bool unbind;
827 struct pipe_shader_buffer slot[0]; /* more will be allocated if needed */
828 };
829
830 static void
831 tc_call_set_shader_buffers(struct pipe_context *pipe, union tc_payload *payload)
832 {
833 struct tc_shader_buffers *p = (struct tc_shader_buffers *)payload;
834 unsigned count = p->count;
835
836 if (p->unbind) {
837 pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, NULL);
838 return;
839 }
840
841 pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, p->slot);
842
843 for (unsigned i = 0; i < count; i++)
844 pipe_resource_reference(&p->slot[i].buffer, NULL);
845 }
846
847 static void
848 tc_set_shader_buffers(struct pipe_context *_pipe,
849 enum pipe_shader_type shader,
850 unsigned start, unsigned count,
851 const struct pipe_shader_buffer *buffers)
852 {
853 if (!count)
854 return;
855
856 struct threaded_context *tc = threaded_context(_pipe);
857 struct tc_shader_buffers *p =
858 tc_add_slot_based_call(tc, TC_CALL_set_shader_buffers, tc_shader_buffers,
859 buffers ? count : 0);
860
861 p->shader = shader;
862 p->start = start;
863 p->count = count;
864 p->unbind = buffers == NULL;
865
866 if (buffers) {
867 for (unsigned i = 0; i < count; i++) {
868 struct pipe_shader_buffer *dst = &p->slot[i];
869 const struct pipe_shader_buffer *src = buffers + i;
870
871 tc_set_resource_reference(&dst->buffer, src->buffer);
872 dst->buffer_offset = src->buffer_offset;
873 dst->buffer_size = src->buffer_size;
874
875 if (src->buffer) {
876 struct threaded_resource *tres = threaded_resource(src->buffer);
877
878 util_range_add(&tres->valid_buffer_range, src->buffer_offset,
879 src->buffer_offset + src->buffer_size);
880 }
881 }
882 }
883 }
884
885 struct tc_vertex_buffers {
886 ubyte start, count;
887 bool unbind;
888 struct pipe_vertex_buffer slot[0]; /* more will be allocated if needed */
889 };
890
891 static void
892 tc_call_set_vertex_buffers(struct pipe_context *pipe, union tc_payload *payload)
893 {
894 struct tc_vertex_buffers *p = (struct tc_vertex_buffers *)payload;
895 unsigned count = p->count;
896
897 if (p->unbind) {
898 pipe->set_vertex_buffers(pipe, p->start, count, NULL);
899 return;
900 }
901
902 for (unsigned i = 0; i < count; i++)
903 tc_assert(!p->slot[i].is_user_buffer);
904
905 pipe->set_vertex_buffers(pipe, p->start, count, p->slot);
906 for (unsigned i = 0; i < count; i++)
907 pipe_resource_reference(&p->slot[i].buffer.resource, NULL);
908 }
909
910 static void
911 tc_set_vertex_buffers(struct pipe_context *_pipe,
912 unsigned start, unsigned count,
913 const struct pipe_vertex_buffer *buffers)
914 {
915 struct threaded_context *tc = threaded_context(_pipe);
916
917 if (!count)
918 return;
919
920 if (buffers) {
921 struct tc_vertex_buffers *p =
922 tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, count);
923 p->start = start;
924 p->count = count;
925 p->unbind = false;
926
927 for (unsigned i = 0; i < count; i++) {
928 struct pipe_vertex_buffer *dst = &p->slot[i];
929 const struct pipe_vertex_buffer *src = buffers + i;
930
931 tc_assert(!src->is_user_buffer);
932 dst->stride = src->stride;
933 dst->is_user_buffer = false;
934 tc_set_resource_reference(&dst->buffer.resource,
935 src->buffer.resource);
936 dst->buffer_offset = src->buffer_offset;
937 }
938 } else {
939 struct tc_vertex_buffers *p =
940 tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, 0);
941 p->start = start;
942 p->count = count;
943 p->unbind = true;
944 }
945 }
946
947 struct tc_stream_outputs {
948 unsigned count;
949 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
950 unsigned offsets[PIPE_MAX_SO_BUFFERS];
951 };
952
953 static void
954 tc_call_set_stream_output_targets(struct pipe_context *pipe, union tc_payload *payload)
955 {
956 struct tc_stream_outputs *p = (struct tc_stream_outputs *)payload;
957 unsigned count = p->count;
958
959 pipe->set_stream_output_targets(pipe, count, p->targets, p->offsets);
960 for (unsigned i = 0; i < count; i++)
961 pipe_so_target_reference(&p->targets[i], NULL);
962 }
963
964 static void
965 tc_set_stream_output_targets(struct pipe_context *_pipe,
966 unsigned count,
967 struct pipe_stream_output_target **tgs,
968 const unsigned *offsets)
969 {
970 struct threaded_context *tc = threaded_context(_pipe);
971 struct tc_stream_outputs *p =
972 tc_add_struct_typed_call(tc, TC_CALL_set_stream_output_targets,
973 tc_stream_outputs);
974
975 for (unsigned i = 0; i < count; i++) {
976 p->targets[i] = NULL;
977 pipe_so_target_reference(&p->targets[i], tgs[i]);
978 }
979 p->count = count;
980 memcpy(p->offsets, offsets, count * sizeof(unsigned));
981 }
982
983 static void
984 tc_set_compute_resources(struct pipe_context *_pipe, unsigned start,
985 unsigned count, struct pipe_surface **resources)
986 {
987 struct threaded_context *tc = threaded_context(_pipe);
988 struct pipe_context *pipe = tc->pipe;
989
990 tc_sync(tc);
991 pipe->set_compute_resources(pipe, start, count, resources);
992 }
993
994 static void
995 tc_set_global_binding(struct pipe_context *_pipe, unsigned first,
996 unsigned count, struct pipe_resource **resources,
997 uint32_t **handles)
998 {
999 struct threaded_context *tc = threaded_context(_pipe);
1000 struct pipe_context *pipe = tc->pipe;
1001
1002 tc_sync(tc);
1003 pipe->set_global_binding(pipe, first, count, resources, handles);
1004 }
1005
1006
1007 /********************************************************************
1008 * views
1009 */
1010
1011 static struct pipe_surface *
1012 tc_create_surface(struct pipe_context *_pipe,
1013 struct pipe_resource *resource,
1014 const struct pipe_surface *surf_tmpl)
1015 {
1016 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1017 struct pipe_surface *view =
1018 pipe->create_surface(pipe, resource, surf_tmpl);
1019
1020 if (view)
1021 view->context = _pipe;
1022 return view;
1023 }
1024
1025 static void
1026 tc_surface_destroy(struct pipe_context *_pipe,
1027 struct pipe_surface *surf)
1028 {
1029 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1030
1031 pipe->surface_destroy(pipe, surf);
1032 }
1033
1034 static struct pipe_sampler_view *
1035 tc_create_sampler_view(struct pipe_context *_pipe,
1036 struct pipe_resource *resource,
1037 const struct pipe_sampler_view *templ)
1038 {
1039 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1040 struct pipe_sampler_view *view =
1041 pipe->create_sampler_view(pipe, resource, templ);
1042
1043 if (view)
1044 view->context = _pipe;
1045 return view;
1046 }
1047
1048 static void
1049 tc_sampler_view_destroy(struct pipe_context *_pipe,
1050 struct pipe_sampler_view *view)
1051 {
1052 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1053
1054 pipe->sampler_view_destroy(pipe, view);
1055 }
1056
1057 static struct pipe_stream_output_target *
1058 tc_create_stream_output_target(struct pipe_context *_pipe,
1059 struct pipe_resource *res,
1060 unsigned buffer_offset,
1061 unsigned buffer_size)
1062 {
1063 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1064 struct threaded_resource *tres = threaded_resource(res);
1065 struct pipe_stream_output_target *view;
1066
1067 tc_sync(threaded_context(_pipe));
1068 util_range_add(&tres->valid_buffer_range, buffer_offset,
1069 buffer_offset + buffer_size);
1070
1071 view = pipe->create_stream_output_target(pipe, res, buffer_offset,
1072 buffer_size);
1073 if (view)
1074 view->context = _pipe;
1075 return view;
1076 }
1077
1078 static void
1079 tc_stream_output_target_destroy(struct pipe_context *_pipe,
1080 struct pipe_stream_output_target *target)
1081 {
1082 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1083
1084 pipe->stream_output_target_destroy(pipe, target);
1085 }
1086
1087
1088 /********************************************************************
1089 * bindless
1090 */
1091
1092 static uint64_t
1093 tc_create_texture_handle(struct pipe_context *_pipe,
1094 struct pipe_sampler_view *view,
1095 const struct pipe_sampler_state *state)
1096 {
1097 struct threaded_context *tc = threaded_context(_pipe);
1098 struct pipe_context *pipe = tc->pipe;
1099
1100 tc_sync(tc);
1101 return pipe->create_texture_handle(pipe, view, state);
1102 }
1103
1104 static void
1105 tc_call_delete_texture_handle(struct pipe_context *pipe,
1106 union tc_payload *payload)
1107 {
1108 pipe->delete_texture_handle(pipe, payload->handle);
1109 }
1110
1111 static void
1112 tc_delete_texture_handle(struct pipe_context *_pipe, uint64_t handle)
1113 {
1114 struct threaded_context *tc = threaded_context(_pipe);
1115 union tc_payload *payload =
1116 tc_add_small_call(tc, TC_CALL_delete_texture_handle);
1117
1118 payload->handle = handle;
1119 }
1120
1121 struct tc_make_texture_handle_resident
1122 {
1123 uint64_t handle;
1124 bool resident;
1125 };
1126
1127 static void
1128 tc_call_make_texture_handle_resident(struct pipe_context *pipe,
1129 union tc_payload *payload)
1130 {
1131 struct tc_make_texture_handle_resident *p =
1132 (struct tc_make_texture_handle_resident *)payload;
1133
1134 pipe->make_texture_handle_resident(pipe, p->handle, p->resident);
1135 }
1136
1137 static void
1138 tc_make_texture_handle_resident(struct pipe_context *_pipe, uint64_t handle,
1139 bool resident)
1140 {
1141 struct threaded_context *tc = threaded_context(_pipe);
1142 struct tc_make_texture_handle_resident *p =
1143 tc_add_struct_typed_call(tc, TC_CALL_make_texture_handle_resident,
1144 tc_make_texture_handle_resident);
1145
1146 p->handle = handle;
1147 p->resident = resident;
1148 }
1149
1150 static uint64_t
1151 tc_create_image_handle(struct pipe_context *_pipe,
1152 const struct pipe_image_view *image)
1153 {
1154 struct threaded_context *tc = threaded_context(_pipe);
1155 struct pipe_context *pipe = tc->pipe;
1156
1157 tc_sync(tc);
1158 return pipe->create_image_handle(pipe, image);
1159 }
1160
1161 static void
1162 tc_call_delete_image_handle(struct pipe_context *pipe,
1163 union tc_payload *payload)
1164 {
1165 pipe->delete_image_handle(pipe, payload->handle);
1166 }
1167
1168 static void
1169 tc_delete_image_handle(struct pipe_context *_pipe, uint64_t handle)
1170 {
1171 struct threaded_context *tc = threaded_context(_pipe);
1172 union tc_payload *payload =
1173 tc_add_small_call(tc, TC_CALL_delete_image_handle);
1174
1175 payload->handle = handle;
1176 }
1177
1178 struct tc_make_image_handle_resident
1179 {
1180 uint64_t handle;
1181 unsigned access;
1182 bool resident;
1183 };
1184
1185 static void
1186 tc_call_make_image_handle_resident(struct pipe_context *pipe,
1187 union tc_payload *payload)
1188 {
1189 struct tc_make_image_handle_resident *p =
1190 (struct tc_make_image_handle_resident *)payload;
1191
1192 pipe->make_image_handle_resident(pipe, p->handle, p->access, p->resident);
1193 }
1194
1195 static void
1196 tc_make_image_handle_resident(struct pipe_context *_pipe, uint64_t handle,
1197 unsigned access, bool resident)
1198 {
1199 struct threaded_context *tc = threaded_context(_pipe);
1200 struct tc_make_image_handle_resident *p =
1201 tc_add_struct_typed_call(tc, TC_CALL_make_image_handle_resident,
1202 tc_make_image_handle_resident);
1203
1204 p->handle = handle;
1205 p->access = access;
1206 p->resident = resident;
1207 }
1208
1209
1210 /********************************************************************
1211 * transfer
1212 */
1213
1214 struct tc_replace_buffer_storage {
1215 struct pipe_resource *dst;
1216 struct pipe_resource *src;
1217 tc_replace_buffer_storage_func func;
1218 };
1219
1220 static void
1221 tc_call_replace_buffer_storage(struct pipe_context *pipe,
1222 union tc_payload *payload)
1223 {
1224 struct tc_replace_buffer_storage *p =
1225 (struct tc_replace_buffer_storage *)payload;
1226
1227 p->func(pipe, p->dst, p->src);
1228 pipe_resource_reference(&p->dst, NULL);
1229 pipe_resource_reference(&p->src, NULL);
1230 }
1231
1232 static bool
1233 tc_invalidate_buffer(struct threaded_context *tc,
1234 struct threaded_resource *tbuf)
1235 {
1236 /* We can't check if the buffer is idle, so we invalidate it
1237 * unconditionally. */
1238 struct pipe_screen *screen = tc->base.screen;
1239 struct pipe_resource *new_buf;
1240
1241 /* Shared, pinned, and sparse buffers can't be reallocated. */
1242 if (tbuf->is_shared ||
1243 tbuf->is_user_ptr ||
1244 tbuf->b.flags & PIPE_RESOURCE_FLAG_SPARSE)
1245 return false;
1246
1247 /* Allocate a new one. */
1248 new_buf = screen->resource_create(screen, &tbuf->b);
1249 if (!new_buf)
1250 return false;
1251
1252 /* Replace the "latest" pointer. */
1253 if (tbuf->latest != &tbuf->b)
1254 pipe_resource_reference(&tbuf->latest, NULL);
1255
1256 tbuf->latest = new_buf;
1257 util_range_set_empty(&tbuf->valid_buffer_range);
1258
1259 /* The valid range should point to the original buffer. */
1260 threaded_resource(new_buf)->base_valid_buffer_range =
1261 &tbuf->valid_buffer_range;
1262
1263 /* Enqueue storage replacement of the original buffer. */
1264 struct tc_replace_buffer_storage *p =
1265 tc_add_struct_typed_call(tc, TC_CALL_replace_buffer_storage,
1266 tc_replace_buffer_storage);
1267
1268 p->func = tc->replace_buffer_storage;
1269 tc_set_resource_reference(&p->dst, &tbuf->b);
1270 tc_set_resource_reference(&p->src, new_buf);
1271 return true;
1272 }
1273
1274 static unsigned
1275 tc_improve_map_buffer_flags(struct threaded_context *tc,
1276 struct threaded_resource *tres, unsigned usage,
1277 unsigned offset, unsigned size)
1278 {
1279 /* Sparse buffers can't be mapped directly and can't be reallocated
1280 * (fully invalidated). That may just be a radeonsi limitation, but
1281 * the threaded context must obey it with radeonsi.
1282 */
1283 if (tres->b.flags & PIPE_RESOURCE_FLAG_SPARSE) {
1284 /* We can use DISCARD_RANGE instead of full discard. This is the only
1285 * fast path for sparse buffers that doesn't need thread synchronization.
1286 */
1287 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
1288 usage |= PIPE_TRANSFER_DISCARD_RANGE;
1289
1290 /* Allow DISCARD_WHOLE_RESOURCE and infering UNSYNCHRONIZED in drivers.
1291 * The threaded context doesn't do unsychronized mappings and invalida-
1292 * tions of sparse buffers, therefore a correct driver behavior won't
1293 * result in an incorrect behavior with the threaded context.
1294 */
1295 return usage;
1296 }
1297
1298 /* Handle CPU reads trivially. */
1299 if (usage & PIPE_TRANSFER_READ) {
1300 /* Driver aren't allowed to do buffer invalidations. */
1301 return (usage & ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) |
1302 TC_TRANSFER_MAP_NO_INVALIDATE |
1303 TC_TRANSFER_MAP_IGNORE_VALID_RANGE;
1304 }
1305
1306 /* See if the buffer range being mapped has never been initialized,
1307 * in which case it can be mapped unsynchronized. */
1308 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
1309 !tres->is_shared &&
1310 !util_ranges_intersect(&tres->valid_buffer_range, offset, offset + size))
1311 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1312
1313 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
1314 /* If discarding the entire range, discard the whole resource instead. */
1315 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
1316 offset == 0 && size == tres->b.width0)
1317 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1318
1319 /* Discard the whole resource if needed. */
1320 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
1321 if (tc_invalidate_buffer(tc, tres))
1322 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1323 else
1324 usage |= PIPE_TRANSFER_DISCARD_RANGE; /* fallback */
1325 }
1326 }
1327
1328 /* We won't need this flag anymore. */
1329 /* TODO: We might not need TC_TRANSFER_MAP_NO_INVALIDATE with this. */
1330 usage &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1331
1332 /* GL_AMD_pinned_memory and persistent mappings can't use staging
1333 * buffers. */
1334 if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
1335 PIPE_TRANSFER_PERSISTENT) ||
1336 tres->is_user_ptr)
1337 usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
1338
1339 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1340 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
1341 usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* notify the driver */
1342
1343 /* Never invalidate inside the driver and never infer "unsynchronized". */
1344 return usage |
1345 TC_TRANSFER_MAP_NO_INVALIDATE |
1346 TC_TRANSFER_MAP_IGNORE_VALID_RANGE;
1347 }
1348
1349 static void *
1350 tc_transfer_map(struct pipe_context *_pipe,
1351 struct pipe_resource *resource, unsigned level,
1352 unsigned usage, const struct pipe_box *box,
1353 struct pipe_transfer **transfer)
1354 {
1355 struct threaded_context *tc = threaded_context(_pipe);
1356 struct threaded_resource *tres = threaded_resource(resource);
1357 struct pipe_context *pipe = tc->pipe;
1358
1359 if (resource->target == PIPE_BUFFER) {
1360 usage = tc_improve_map_buffer_flags(tc, tres, usage, box->x, box->width);
1361
1362 /* Do a staging transfer within the threaded context. The driver should
1363 * only get resource_copy_region.
1364 */
1365 if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
1366 struct threaded_transfer *ttrans = slab_alloc(&tc->pool_transfers);
1367 uint8_t *map;
1368
1369 ttrans->staging = NULL;
1370
1371 u_upload_alloc(tc->base.stream_uploader, 0,
1372 box->width + (box->x % tc->map_buffer_alignment),
1373 64, &ttrans->offset, &ttrans->staging, (void**)&map);
1374 if (!map) {
1375 slab_free(&tc->pool_transfers, ttrans);
1376 return NULL;
1377 }
1378
1379 tc_set_resource_reference(&ttrans->b.resource, resource);
1380 ttrans->b.level = 0;
1381 ttrans->b.usage = usage;
1382 ttrans->b.box = *box;
1383 ttrans->b.stride = 0;
1384 ttrans->b.layer_stride = 0;
1385 *transfer = &ttrans->b;
1386 return map + (box->x % tc->map_buffer_alignment);
1387 }
1388 }
1389
1390 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1391 if (!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC))
1392 tc_sync_msg(tc, resource->target != PIPE_BUFFER ? " texture" :
1393 usage & PIPE_TRANSFER_DISCARD_RANGE ? " discard_range" :
1394 usage & PIPE_TRANSFER_READ ? " read" : " ??");
1395
1396 return pipe->transfer_map(pipe, tres->latest ? tres->latest : resource,
1397 level, usage, box, transfer);
1398 }
1399
1400 struct tc_transfer_flush_region {
1401 struct pipe_transfer *transfer;
1402 struct pipe_box box;
1403 };
1404
1405 static void
1406 tc_call_transfer_flush_region(struct pipe_context *pipe,
1407 union tc_payload *payload)
1408 {
1409 struct tc_transfer_flush_region *p =
1410 (struct tc_transfer_flush_region *)payload;
1411
1412 pipe->transfer_flush_region(pipe, p->transfer, &p->box);
1413 }
1414
1415 struct tc_resource_copy_region {
1416 struct pipe_resource *dst;
1417 unsigned dst_level;
1418 unsigned dstx, dsty, dstz;
1419 struct pipe_resource *src;
1420 unsigned src_level;
1421 struct pipe_box src_box;
1422 };
1423
1424 static void
1425 tc_resource_copy_region(struct pipe_context *_pipe,
1426 struct pipe_resource *dst, unsigned dst_level,
1427 unsigned dstx, unsigned dsty, unsigned dstz,
1428 struct pipe_resource *src, unsigned src_level,
1429 const struct pipe_box *src_box);
1430
1431 static void
1432 tc_buffer_do_flush_region(struct threaded_context *tc,
1433 struct threaded_transfer *ttrans,
1434 const struct pipe_box *box)
1435 {
1436 struct threaded_resource *tres = threaded_resource(ttrans->b.resource);
1437
1438 if (ttrans->staging) {
1439 struct pipe_box src_box;
1440
1441 u_box_1d(ttrans->offset + box->x % tc->map_buffer_alignment,
1442 box->width, &src_box);
1443
1444 /* Copy the staging buffer into the original one. */
1445 tc_resource_copy_region(&tc->base, ttrans->b.resource, 0, box->x, 0, 0,
1446 ttrans->staging, 0, &src_box);
1447 }
1448
1449 util_range_add(tres->base_valid_buffer_range, box->x, box->x + box->width);
1450 }
1451
1452 static void
1453 tc_transfer_flush_region(struct pipe_context *_pipe,
1454 struct pipe_transfer *transfer,
1455 const struct pipe_box *rel_box)
1456 {
1457 struct threaded_context *tc = threaded_context(_pipe);
1458 struct threaded_transfer *ttrans = threaded_transfer(transfer);
1459 struct threaded_resource *tres = threaded_resource(transfer->resource);
1460 unsigned required_usage = PIPE_TRANSFER_WRITE |
1461 PIPE_TRANSFER_FLUSH_EXPLICIT;
1462
1463 if (tres->b.target == PIPE_BUFFER) {
1464 if ((transfer->usage & required_usage) == required_usage) {
1465 struct pipe_box box;
1466
1467 u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
1468 tc_buffer_do_flush_region(tc, ttrans, &box);
1469 }
1470
1471 /* Staging transfers don't send the call to the driver. */
1472 if (ttrans->staging)
1473 return;
1474 }
1475
1476 struct tc_transfer_flush_region *p =
1477 tc_add_struct_typed_call(tc, TC_CALL_transfer_flush_region,
1478 tc_transfer_flush_region);
1479 p->transfer = transfer;
1480 p->box = *rel_box;
1481 }
1482
1483 static void
1484 tc_call_transfer_unmap(struct pipe_context *pipe, union tc_payload *payload)
1485 {
1486 pipe->transfer_unmap(pipe, payload->transfer);
1487 }
1488
1489 static void
1490 tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
1491 {
1492 struct threaded_context *tc = threaded_context(_pipe);
1493 struct threaded_transfer *ttrans = threaded_transfer(transfer);
1494 struct threaded_resource *tres = threaded_resource(transfer->resource);
1495
1496 if (tres->b.target == PIPE_BUFFER) {
1497 if (transfer->usage & PIPE_TRANSFER_WRITE &&
1498 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
1499 tc_buffer_do_flush_region(tc, ttrans, &transfer->box);
1500
1501 /* Staging transfers don't send the call to the driver. */
1502 if (ttrans->staging) {
1503 pipe_resource_reference(&ttrans->staging, NULL);
1504 pipe_resource_reference(&ttrans->b.resource, NULL);
1505 slab_free(&tc->pool_transfers, ttrans);
1506 return;
1507 }
1508 }
1509
1510 tc_add_small_call(tc, TC_CALL_transfer_unmap)->transfer = transfer;
1511 }
1512
1513 struct tc_buffer_subdata {
1514 struct pipe_resource *resource;
1515 unsigned usage, offset, size;
1516 char slot[0]; /* more will be allocated if needed */
1517 };
1518
1519 static void
1520 tc_call_buffer_subdata(struct pipe_context *pipe, union tc_payload *payload)
1521 {
1522 struct tc_buffer_subdata *p = (struct tc_buffer_subdata *)payload;
1523
1524 pipe->buffer_subdata(pipe, p->resource, p->usage, p->offset, p->size,
1525 p->slot);
1526 pipe_resource_reference(&p->resource, NULL);
1527 }
1528
1529 static void
1530 tc_buffer_subdata(struct pipe_context *_pipe,
1531 struct pipe_resource *resource,
1532 unsigned usage, unsigned offset,
1533 unsigned size, const void *data)
1534 {
1535 struct threaded_context *tc = threaded_context(_pipe);
1536 struct threaded_resource *tres = threaded_resource(resource);
1537
1538 if (!size)
1539 return;
1540
1541 usage |= PIPE_TRANSFER_WRITE |
1542 PIPE_TRANSFER_DISCARD_RANGE;
1543
1544 usage = tc_improve_map_buffer_flags(tc, tres, usage, offset, size);
1545
1546 /* Unsychronized and big transfers should use transfer_map. Also handle
1547 * full invalidations, because drivers aren't allowed to do them.
1548 */
1549 if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
1550 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) ||
1551 size > TC_MAX_SUBDATA_BYTES) {
1552 struct pipe_transfer *transfer;
1553 struct pipe_box box;
1554 uint8_t *map = NULL;
1555
1556 u_box_1d(offset, size, &box);
1557
1558 map = tc_transfer_map(_pipe, resource, 0, usage, &box, &transfer);
1559 if (map) {
1560 memcpy(map, data, size);
1561 tc_transfer_unmap(_pipe, transfer);
1562 }
1563 return;
1564 }
1565
1566 util_range_add(&tres->valid_buffer_range, offset, offset + size);
1567
1568 /* The upload is small. Enqueue it. */
1569 struct tc_buffer_subdata *p =
1570 tc_add_slot_based_call(tc, TC_CALL_buffer_subdata, tc_buffer_subdata, size);
1571
1572 tc_set_resource_reference(&p->resource, resource);
1573 p->usage = usage;
1574 p->offset = offset;
1575 p->size = size;
1576 memcpy(p->slot, data, size);
1577 }
1578
1579 struct tc_texture_subdata {
1580 struct pipe_resource *resource;
1581 unsigned level, usage, stride, layer_stride;
1582 struct pipe_box box;
1583 char slot[0]; /* more will be allocated if needed */
1584 };
1585
1586 static void
1587 tc_call_texture_subdata(struct pipe_context *pipe, union tc_payload *payload)
1588 {
1589 struct tc_texture_subdata *p = (struct tc_texture_subdata *)payload;
1590
1591 pipe->texture_subdata(pipe, p->resource, p->level, p->usage, &p->box,
1592 p->slot, p->stride, p->layer_stride);
1593 pipe_resource_reference(&p->resource, NULL);
1594 }
1595
1596 static void
1597 tc_texture_subdata(struct pipe_context *_pipe,
1598 struct pipe_resource *resource,
1599 unsigned level, unsigned usage,
1600 const struct pipe_box *box,
1601 const void *data, unsigned stride,
1602 unsigned layer_stride)
1603 {
1604 struct threaded_context *tc = threaded_context(_pipe);
1605 unsigned size;
1606
1607 assert(box->height >= 1);
1608 assert(box->depth >= 1);
1609
1610 size = (box->depth - 1) * layer_stride +
1611 (box->height - 1) * stride +
1612 box->width * util_format_get_blocksize(resource->format);
1613 if (!size)
1614 return;
1615
1616 /* Small uploads can be enqueued, big uploads must sync. */
1617 if (size <= TC_MAX_SUBDATA_BYTES) {
1618 struct tc_texture_subdata *p =
1619 tc_add_slot_based_call(tc, TC_CALL_texture_subdata, tc_texture_subdata, size);
1620
1621 tc_set_resource_reference(&p->resource, resource);
1622 p->level = level;
1623 p->usage = usage;
1624 p->box = *box;
1625 p->stride = stride;
1626 p->layer_stride = layer_stride;
1627 memcpy(p->slot, data, size);
1628 } else {
1629 struct pipe_context *pipe = tc->pipe;
1630
1631 tc_sync(tc);
1632 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1633 stride, layer_stride);
1634 }
1635 }
1636
1637
1638 /********************************************************************
1639 * miscellaneous
1640 */
1641
1642 #define TC_FUNC_SYNC_RET0(ret_type, func) \
1643 static ret_type \
1644 tc_##func(struct pipe_context *_pipe) \
1645 { \
1646 struct threaded_context *tc = threaded_context(_pipe); \
1647 struct pipe_context *pipe = tc->pipe; \
1648 tc_sync(tc); \
1649 return pipe->func(pipe); \
1650 }
1651
1652 TC_FUNC_SYNC_RET0(enum pipe_reset_status, get_device_reset_status)
1653 TC_FUNC_SYNC_RET0(uint64_t, get_timestamp)
1654
1655 static void
1656 tc_get_sample_position(struct pipe_context *_pipe,
1657 unsigned sample_count, unsigned sample_index,
1658 float *out_value)
1659 {
1660 struct threaded_context *tc = threaded_context(_pipe);
1661 struct pipe_context *pipe = tc->pipe;
1662
1663 tc_sync(tc);
1664 pipe->get_sample_position(pipe, sample_count, sample_index,
1665 out_value);
1666 }
1667
1668 static void
1669 tc_set_device_reset_callback(struct pipe_context *_pipe,
1670 const struct pipe_device_reset_callback *cb)
1671 {
1672 struct threaded_context *tc = threaded_context(_pipe);
1673 struct pipe_context *pipe = tc->pipe;
1674
1675 tc_sync(tc);
1676 pipe->set_device_reset_callback(pipe, cb);
1677 }
1678
1679 struct tc_string_marker {
1680 int len;
1681 char slot[0]; /* more will be allocated if needed */
1682 };
1683
1684 static void
1685 tc_call_emit_string_marker(struct pipe_context *pipe, union tc_payload *payload)
1686 {
1687 struct tc_string_marker *p = (struct tc_string_marker *)payload;
1688 pipe->emit_string_marker(pipe, p->slot, p->len);
1689 }
1690
1691 static void
1692 tc_emit_string_marker(struct pipe_context *_pipe,
1693 const char *string, int len)
1694 {
1695 struct threaded_context *tc = threaded_context(_pipe);
1696
1697 if (len <= TC_MAX_STRING_MARKER_BYTES) {
1698 struct tc_string_marker *p =
1699 tc_add_slot_based_call(tc, TC_CALL_emit_string_marker, tc_string_marker, len);
1700
1701 memcpy(p->slot, string, len);
1702 p->len = len;
1703 } else {
1704 struct pipe_context *pipe = tc->pipe;
1705
1706 tc_sync(tc);
1707 pipe->emit_string_marker(pipe, string, len);
1708 }
1709 }
1710
1711 static void
1712 tc_dump_debug_state(struct pipe_context *_pipe, FILE *stream,
1713 unsigned flags)
1714 {
1715 struct threaded_context *tc = threaded_context(_pipe);
1716 struct pipe_context *pipe = tc->pipe;
1717
1718 tc_sync(tc);
1719 pipe->dump_debug_state(pipe, stream, flags);
1720 }
1721
1722 static void
1723 tc_set_debug_callback(struct pipe_context *_pipe,
1724 const struct pipe_debug_callback *cb)
1725 {
1726 struct threaded_context *tc = threaded_context(_pipe);
1727 struct pipe_context *pipe = tc->pipe;
1728
1729 /* Drop all synchronous debug callbacks. Drivers are expected to be OK
1730 * with this. shader-db will use an environment variable to disable
1731 * the threaded context.
1732 */
1733 if (cb && cb->debug_message && !cb->async)
1734 return;
1735
1736 tc_sync(tc);
1737 pipe->set_debug_callback(pipe, cb);
1738 }
1739
1740 static void
1741 tc_create_fence_fd(struct pipe_context *_pipe,
1742 struct pipe_fence_handle **fence, int fd)
1743 {
1744 struct threaded_context *tc = threaded_context(_pipe);
1745 struct pipe_context *pipe = tc->pipe;
1746
1747 tc_sync(tc);
1748 pipe->create_fence_fd(pipe, fence, fd);
1749 }
1750
1751 static void
1752 tc_fence_server_sync(struct pipe_context *_pipe,
1753 struct pipe_fence_handle *fence)
1754 {
1755 struct threaded_context *tc = threaded_context(_pipe);
1756 struct pipe_context *pipe = tc->pipe;
1757
1758 tc_sync(tc);
1759 pipe->fence_server_sync(pipe, fence);
1760 }
1761
1762 static struct pipe_video_codec *
1763 tc_create_video_codec(struct pipe_context *_pipe,
1764 const struct pipe_video_codec *templ)
1765 {
1766 unreachable("Threaded context should not be enabled for video APIs");
1767 return NULL;
1768 }
1769
1770 static struct pipe_video_buffer *
1771 tc_create_video_buffer(struct pipe_context *_pipe,
1772 const struct pipe_video_buffer *templ)
1773 {
1774 unreachable("Threaded context should not be enabled for video APIs");
1775 return NULL;
1776 }
1777
1778
1779 /********************************************************************
1780 * draw, launch, clear, blit, copy, flush
1781 */
1782
1783 static void
1784 tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence,
1785 unsigned flags)
1786 {
1787 struct threaded_context *tc = threaded_context(_pipe);
1788 struct pipe_context *pipe = tc->pipe;
1789 struct threaded_query *tq, *tmp;
1790
1791 LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) {
1792 tq->flushed = true;
1793 LIST_DEL(&tq->head_unflushed);
1794 }
1795
1796 /* TODO: deferred flushes? */
1797 tc_sync_msg(tc, flags & PIPE_FLUSH_END_OF_FRAME ? "end of frame" :
1798 flags & PIPE_FLUSH_DEFERRED ? "deferred fence" : "normal");
1799 pipe->flush(pipe, fence, flags);
1800 }
1801
1802 /* This is actually variable-sized, because indirect isn't allocated if it's
1803 * not needed. */
1804 struct tc_full_draw_info {
1805 struct pipe_draw_info draw;
1806 struct pipe_draw_indirect_info indirect;
1807 };
1808
1809 static void
1810 tc_call_draw_vbo(struct pipe_context *pipe, union tc_payload *payload)
1811 {
1812 struct tc_full_draw_info *info = (struct tc_full_draw_info*)payload;
1813
1814 pipe->draw_vbo(pipe, &info->draw);
1815 pipe_so_target_reference(&info->draw.count_from_stream_output, NULL);
1816 if (info->draw.index_size)
1817 pipe_resource_reference(&info->draw.index.resource, NULL);
1818 if (info->draw.indirect) {
1819 pipe_resource_reference(&info->indirect.buffer, NULL);
1820 pipe_resource_reference(&info->indirect.indirect_draw_count, NULL);
1821 }
1822 }
1823
1824 static struct tc_full_draw_info *
1825 tc_add_draw_vbo(struct pipe_context *_pipe, bool indirect)
1826 {
1827 return (struct tc_full_draw_info*)
1828 tc_add_sized_call(threaded_context(_pipe), TC_CALL_draw_vbo,
1829 indirect ? sizeof(struct tc_full_draw_info) :
1830 sizeof(struct pipe_draw_info));
1831 }
1832
1833 static void
1834 tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
1835 {
1836 struct threaded_context *tc = threaded_context(_pipe);
1837 struct pipe_draw_indirect_info *indirect = info->indirect;
1838 unsigned index_size = info->index_size;
1839 bool has_user_indices = info->has_user_indices;
1840
1841 if (index_size && has_user_indices) {
1842 unsigned size = info->count * index_size;
1843 struct pipe_resource *buffer = NULL;
1844 unsigned offset;
1845
1846 tc_assert(!indirect);
1847
1848 /* This must be done before adding draw_vbo, because it could generate
1849 * e.g. transfer_unmap and flush partially-uninitialized draw_vbo
1850 * to the driver if it was done afterwards.
1851 */
1852 u_upload_data(tc->base.stream_uploader, 0, size, 4, info->index.user,
1853 &offset, &buffer);
1854 if (unlikely(!buffer))
1855 return;
1856
1857 struct tc_full_draw_info *p = tc_add_draw_vbo(_pipe, false);
1858 p->draw.count_from_stream_output = NULL;
1859 pipe_so_target_reference(&p->draw.count_from_stream_output,
1860 info->count_from_stream_output);
1861 memcpy(&p->draw, info, sizeof(*info));
1862 p->draw.has_user_indices = false;
1863 p->draw.index.resource = buffer;
1864 p->draw.start = offset / index_size;
1865 } else {
1866 /* Non-indexed call or indexed with a real index buffer. */
1867 struct tc_full_draw_info *p = tc_add_draw_vbo(_pipe, indirect != NULL);
1868 p->draw.count_from_stream_output = NULL;
1869 pipe_so_target_reference(&p->draw.count_from_stream_output,
1870 info->count_from_stream_output);
1871 if (index_size) {
1872 tc_set_resource_reference(&p->draw.index.resource,
1873 info->index.resource);
1874 }
1875 memcpy(&p->draw, info, sizeof(*info));
1876
1877 if (indirect) {
1878 tc_set_resource_reference(&p->draw.indirect->buffer, indirect->buffer);
1879 tc_set_resource_reference(&p->indirect.indirect_draw_count,
1880 indirect->indirect_draw_count);
1881 memcpy(&p->indirect, indirect, sizeof(*indirect));
1882 p->draw.indirect = &p->indirect;
1883 }
1884 }
1885 }
1886
1887 static void
1888 tc_call_launch_grid(struct pipe_context *pipe, union tc_payload *payload)
1889 {
1890 struct pipe_grid_info *p = (struct pipe_grid_info *)payload;
1891
1892 pipe->launch_grid(pipe, p);
1893 pipe_resource_reference(&p->indirect, NULL);
1894 }
1895
1896 static void
1897 tc_launch_grid(struct pipe_context *_pipe,
1898 const struct pipe_grid_info *info)
1899 {
1900 struct threaded_context *tc = threaded_context(_pipe);
1901 struct pipe_grid_info *p = tc_add_struct_typed_call(tc, TC_CALL_launch_grid,
1902 pipe_grid_info);
1903 assert(info->input == NULL);
1904
1905 tc_set_resource_reference(&p->indirect, info->indirect);
1906 memcpy(p, info, sizeof(*info));
1907 }
1908
1909 static void
1910 tc_call_resource_copy_region(struct pipe_context *pipe, union tc_payload *payload)
1911 {
1912 struct tc_resource_copy_region *p = (struct tc_resource_copy_region *)payload;
1913
1914 pipe->resource_copy_region(pipe, p->dst, p->dst_level, p->dstx, p->dsty,
1915 p->dstz, p->src, p->src_level, &p->src_box);
1916 pipe_resource_reference(&p->dst, NULL);
1917 pipe_resource_reference(&p->src, NULL);
1918 }
1919
1920 static void
1921 tc_resource_copy_region(struct pipe_context *_pipe,
1922 struct pipe_resource *dst, unsigned dst_level,
1923 unsigned dstx, unsigned dsty, unsigned dstz,
1924 struct pipe_resource *src, unsigned src_level,
1925 const struct pipe_box *src_box)
1926 {
1927 struct threaded_context *tc = threaded_context(_pipe);
1928 struct threaded_resource *tdst = threaded_resource(dst);
1929 struct tc_resource_copy_region *p =
1930 tc_add_struct_typed_call(tc, TC_CALL_resource_copy_region,
1931 tc_resource_copy_region);
1932
1933 tc_set_resource_reference(&p->dst, dst);
1934 p->dst_level = dst_level;
1935 p->dstx = dstx;
1936 p->dsty = dsty;
1937 p->dstz = dstz;
1938 tc_set_resource_reference(&p->src, src);
1939 p->src_level = src_level;
1940 p->src_box = *src_box;
1941
1942 if (dst->target == PIPE_BUFFER)
1943 util_range_add(&tdst->valid_buffer_range, dstx, dstx + src_box->width);
1944 }
1945
1946 static void
1947 tc_call_blit(struct pipe_context *pipe, union tc_payload *payload)
1948 {
1949 struct pipe_blit_info *blit = (struct pipe_blit_info*)payload;
1950
1951 pipe->blit(pipe, blit);
1952 pipe_resource_reference(&blit->dst.resource, NULL);
1953 pipe_resource_reference(&blit->src.resource, NULL);
1954 }
1955
1956 static void
1957 tc_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1958 {
1959 struct threaded_context *tc = threaded_context(_pipe);
1960 struct pipe_blit_info *blit =
1961 tc_add_struct_typed_call(tc, TC_CALL_blit, pipe_blit_info);
1962
1963 tc_set_resource_reference(&blit->dst.resource, info->dst.resource);
1964 tc_set_resource_reference(&blit->src.resource, info->src.resource);
1965 memcpy(blit, info, sizeof(*info));
1966 }
1967
1968 struct tc_generate_mipmap {
1969 struct pipe_resource *res;
1970 enum pipe_format format;
1971 unsigned base_level;
1972 unsigned last_level;
1973 unsigned first_layer;
1974 unsigned last_layer;
1975 };
1976
1977 static void
1978 tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload)
1979 {
1980 struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload;
1981 bool result = pipe->generate_mipmap(pipe, p->res, p->format, p->base_level,
1982 p->last_level, p->first_layer,
1983 p->last_layer);
1984 assert(result);
1985 pipe_resource_reference(&p->res, NULL);
1986 }
1987
1988 static boolean
1989 tc_generate_mipmap(struct pipe_context *_pipe,
1990 struct pipe_resource *res,
1991 enum pipe_format format,
1992 unsigned base_level,
1993 unsigned last_level,
1994 unsigned first_layer,
1995 unsigned last_layer)
1996 {
1997 struct threaded_context *tc = threaded_context(_pipe);
1998 struct pipe_context *pipe = tc->pipe;
1999 struct pipe_screen *screen = pipe->screen;
2000 unsigned bind = PIPE_BIND_SAMPLER_VIEW;
2001
2002 if (util_format_is_depth_or_stencil(format))
2003 bind = PIPE_BIND_DEPTH_STENCIL;
2004 else
2005 bind = PIPE_BIND_RENDER_TARGET;
2006
2007 if (!screen->is_format_supported(screen, format, res->target,
2008 res->nr_samples, bind))
2009 return false;
2010
2011 struct tc_generate_mipmap *p =
2012 tc_add_struct_typed_call(tc, TC_CALL_generate_mipmap, tc_generate_mipmap);
2013
2014 tc_set_resource_reference(&p->res, res);
2015 p->format = format;
2016 p->base_level = base_level;
2017 p->last_level = last_level;
2018 p->first_layer = first_layer;
2019 p->last_layer = last_layer;
2020 return true;
2021 }
2022
2023 static void
2024 tc_call_flush_resource(struct pipe_context *pipe, union tc_payload *payload)
2025 {
2026 pipe->flush_resource(pipe, payload->resource);
2027 pipe_resource_reference(&payload->resource, NULL);
2028 }
2029
2030 static void
2031 tc_flush_resource(struct pipe_context *_pipe,
2032 struct pipe_resource *resource)
2033 {
2034 struct threaded_context *tc = threaded_context(_pipe);
2035 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_flush_resource);
2036
2037 tc_set_resource_reference(&payload->resource, resource);
2038 }
2039
2040 static void
2041 tc_call_invalidate_resource(struct pipe_context *pipe, union tc_payload *payload)
2042 {
2043 pipe->invalidate_resource(pipe, payload->resource);
2044 pipe_resource_reference(&payload->resource, NULL);
2045 }
2046
2047 static void
2048 tc_invalidate_resource(struct pipe_context *_pipe,
2049 struct pipe_resource *resource)
2050 {
2051 struct threaded_context *tc = threaded_context(_pipe);
2052
2053 if (resource->target == PIPE_BUFFER) {
2054 tc_invalidate_buffer(tc, threaded_resource(resource));
2055 return;
2056 }
2057
2058 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_invalidate_resource);
2059 tc_set_resource_reference(&payload->resource, resource);
2060 }
2061
2062 struct tc_clear {
2063 unsigned buffers;
2064 union pipe_color_union color;
2065 double depth;
2066 unsigned stencil;
2067 };
2068
2069 static void
2070 tc_call_clear(struct pipe_context *pipe, union tc_payload *payload)
2071 {
2072 struct tc_clear *p = (struct tc_clear *)payload;
2073 pipe->clear(pipe, p->buffers, &p->color, p->depth, p->stencil);
2074 }
2075
2076 static void
2077 tc_clear(struct pipe_context *_pipe, unsigned buffers,
2078 const union pipe_color_union *color, double depth,
2079 unsigned stencil)
2080 {
2081 struct threaded_context *tc = threaded_context(_pipe);
2082 struct tc_clear *p = tc_add_struct_typed_call(tc, TC_CALL_clear, tc_clear);
2083
2084 p->buffers = buffers;
2085 p->color = *color;
2086 p->depth = depth;
2087 p->stencil = stencil;
2088 }
2089
2090 static void
2091 tc_clear_render_target(struct pipe_context *_pipe,
2092 struct pipe_surface *dst,
2093 const union pipe_color_union *color,
2094 unsigned dstx, unsigned dsty,
2095 unsigned width, unsigned height,
2096 bool render_condition_enabled)
2097 {
2098 struct threaded_context *tc = threaded_context(_pipe);
2099 struct pipe_context *pipe = tc->pipe;
2100
2101 tc_sync(tc);
2102 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
2103 render_condition_enabled);
2104 }
2105
2106 static void
2107 tc_clear_depth_stencil(struct pipe_context *_pipe,
2108 struct pipe_surface *dst, unsigned clear_flags,
2109 double depth, unsigned stencil, unsigned dstx,
2110 unsigned dsty, unsigned width, unsigned height,
2111 bool render_condition_enabled)
2112 {
2113 struct threaded_context *tc = threaded_context(_pipe);
2114 struct pipe_context *pipe = tc->pipe;
2115
2116 tc_sync(tc);
2117 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
2118 dstx, dsty, width, height,
2119 render_condition_enabled);
2120 }
2121
2122 struct tc_clear_buffer {
2123 struct pipe_resource *res;
2124 unsigned offset;
2125 unsigned size;
2126 char clear_value[16];
2127 int clear_value_size;
2128 };
2129
2130 static void
2131 tc_call_clear_buffer(struct pipe_context *pipe, union tc_payload *payload)
2132 {
2133 struct tc_clear_buffer *p = (struct tc_clear_buffer *)payload;
2134
2135 pipe->clear_buffer(pipe, p->res, p->offset, p->size, p->clear_value,
2136 p->clear_value_size);
2137 pipe_resource_reference(&p->res, NULL);
2138 }
2139
2140 static void
2141 tc_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
2142 unsigned offset, unsigned size,
2143 const void *clear_value, int clear_value_size)
2144 {
2145 struct threaded_context *tc = threaded_context(_pipe);
2146 struct threaded_resource *tres = threaded_resource(res);
2147 struct tc_clear_buffer *p =
2148 tc_add_struct_typed_call(tc, TC_CALL_clear_buffer, tc_clear_buffer);
2149
2150 tc_set_resource_reference(&p->res, res);
2151 p->offset = offset;
2152 p->size = size;
2153 memcpy(p->clear_value, clear_value, clear_value_size);
2154 p->clear_value_size = clear_value_size;
2155
2156 util_range_add(&tres->valid_buffer_range, offset, offset + size);
2157 }
2158
2159 struct tc_clear_texture {
2160 struct pipe_resource *res;
2161 unsigned level;
2162 struct pipe_box box;
2163 char data[16];
2164 };
2165
2166 static void
2167 tc_call_clear_texture(struct pipe_context *pipe, union tc_payload *payload)
2168 {
2169 struct tc_clear_texture *p = (struct tc_clear_texture *)payload;
2170
2171 pipe->clear_texture(pipe, p->res, p->level, &p->box, p->data);
2172 pipe_resource_reference(&p->res, NULL);
2173 }
2174
2175 static void
2176 tc_clear_texture(struct pipe_context *_pipe, struct pipe_resource *res,
2177 unsigned level, const struct pipe_box *box, const void *data)
2178 {
2179 struct threaded_context *tc = threaded_context(_pipe);
2180 struct tc_clear_texture *p =
2181 tc_add_struct_typed_call(tc, TC_CALL_clear_texture, tc_clear_texture);
2182
2183 tc_set_resource_reference(&p->res, res);
2184 p->level = level;
2185 p->box = *box;
2186 memcpy(p->data, data,
2187 util_format_get_blocksize(res->format));
2188 }
2189
2190 struct tc_resource_commit {
2191 struct pipe_resource *res;
2192 unsigned level;
2193 struct pipe_box box;
2194 bool commit;
2195 };
2196
2197 static void
2198 tc_call_resource_commit(struct pipe_context *pipe, union tc_payload *payload)
2199 {
2200 struct tc_resource_commit *p = (struct tc_resource_commit *)payload;
2201
2202 pipe->resource_commit(pipe, p->res, p->level, &p->box, p->commit);
2203 pipe_resource_reference(&p->res, NULL);
2204 }
2205
2206 static bool
2207 tc_resource_commit(struct pipe_context *_pipe, struct pipe_resource *res,
2208 unsigned level, struct pipe_box *box, bool commit)
2209 {
2210 struct threaded_context *tc = threaded_context(_pipe);
2211 struct tc_resource_commit *p =
2212 tc_add_struct_typed_call(tc, TC_CALL_resource_commit, tc_resource_commit);
2213
2214 tc_set_resource_reference(&p->res, res);
2215 p->level = level;
2216 p->box = *box;
2217 p->commit = commit;
2218 return true; /* we don't care about the return value for this call */
2219 }
2220
2221
2222 /********************************************************************
2223 * create & destroy
2224 */
2225
2226 static void
2227 tc_destroy(struct pipe_context *_pipe)
2228 {
2229 struct threaded_context *tc = threaded_context(_pipe);
2230 struct pipe_context *pipe = tc->pipe;
2231
2232 tc_sync(tc);
2233
2234 if (util_queue_is_initialized(&tc->queue)) {
2235 util_queue_destroy(&tc->queue);
2236
2237 for (unsigned i = 0; i < TC_MAX_BATCHES; i++)
2238 util_queue_fence_destroy(&tc->batch_slots[i].fence);
2239 }
2240
2241 if (tc->base.const_uploader &&
2242 tc->base.stream_uploader != tc->base.const_uploader)
2243 u_upload_destroy(tc->base.const_uploader);
2244
2245 if (tc->base.stream_uploader)
2246 u_upload_destroy(tc->base.stream_uploader);
2247
2248 slab_destroy_child(&tc->pool_transfers);
2249 pipe->destroy(pipe);
2250 os_free_aligned(tc);
2251 }
2252
2253 static const tc_execute execute_func[TC_NUM_CALLS] = {
2254 #define CALL(name) tc_call_##name,
2255 #include "u_threaded_context_calls.h"
2256 #undef CALL
2257 };
2258
2259 /**
2260 * Wrap an existing pipe_context into a threaded_context.
2261 *
2262 * \param pipe pipe_context to wrap
2263 * \param parent_transfer_pool parent slab pool set up for creating pipe_-
2264 * transfer objects; the driver should have one
2265 * in pipe_screen.
2266 * \param replace_buffer callback for replacing a pipe_resource's storage
2267 * with another pipe_resource's storage.
2268 * \param out if successful, the threaded_context will be returned here in
2269 * addition to the return value if "out" != NULL
2270 */
2271 struct pipe_context *
2272 threaded_context_create(struct pipe_context *pipe,
2273 struct slab_parent_pool *parent_transfer_pool,
2274 tc_replace_buffer_storage_func replace_buffer,
2275 struct threaded_context **out)
2276 {
2277 struct threaded_context *tc;
2278
2279 STATIC_ASSERT(sizeof(union tc_payload) <= 8);
2280 STATIC_ASSERT(sizeof(struct tc_call) <= 16);
2281
2282 if (!pipe)
2283 return NULL;
2284
2285 util_cpu_detect();
2286
2287 if (!debug_get_bool_option("GALLIUM_THREAD", util_cpu_caps.nr_cpus > 1))
2288 return pipe;
2289
2290 tc = os_malloc_aligned(sizeof(struct threaded_context), 16);
2291 if (!tc) {
2292 pipe->destroy(pipe);
2293 return NULL;
2294 }
2295 memset(tc, 0, sizeof(*tc));
2296
2297 assert((uintptr_t)tc % 16 == 0);
2298 /* These should be static asserts, but they don't work with MSVC */
2299 assert(offsetof(struct threaded_context, batch_slots) % 16 == 0);
2300 assert(offsetof(struct threaded_context, batch_slots[0].call) % 16 == 0);
2301 assert(offsetof(struct threaded_context, batch_slots[0].call[1]) % 16 == 0);
2302 assert(offsetof(struct threaded_context, batch_slots[1].call) % 16 == 0);
2303
2304 /* The driver context isn't wrapped, so set its "priv" to NULL. */
2305 pipe->priv = NULL;
2306
2307 tc->pipe = pipe;
2308 tc->replace_buffer_storage = replace_buffer;
2309 tc->map_buffer_alignment =
2310 pipe->screen->get_param(pipe->screen, PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT);
2311 tc->base.priv = pipe; /* priv points to the wrapped driver context */
2312 tc->base.screen = pipe->screen;
2313 tc->base.destroy = tc_destroy;
2314
2315 tc->base.stream_uploader = u_upload_clone(&tc->base, pipe->stream_uploader);
2316 if (pipe->stream_uploader == pipe->const_uploader)
2317 tc->base.const_uploader = tc->base.stream_uploader;
2318 else
2319 tc->base.const_uploader = u_upload_clone(&tc->base, pipe->const_uploader);
2320
2321 if (!tc->base.stream_uploader || !tc->base.const_uploader)
2322 goto fail;
2323
2324 /* The queue size is the number of batches "waiting". Batches are removed
2325 * from the queue before being executed, so keep one tc_batch slot for that
2326 * execution. Also, keep one unused slot for an unflushed batch.
2327 */
2328 if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0))
2329 goto fail;
2330
2331 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
2332 tc->batch_slots[i].sentinel = TC_SENTINEL;
2333 tc->batch_slots[i].pipe = pipe;
2334 util_queue_fence_init(&tc->batch_slots[i].fence);
2335 }
2336
2337 LIST_INITHEAD(&tc->unflushed_queries);
2338
2339 slab_create_child(&tc->pool_transfers, parent_transfer_pool);
2340
2341 #define CTX_INIT(_member) \
2342 tc->base._member = tc->pipe->_member ? tc_##_member : NULL
2343
2344 CTX_INIT(flush);
2345 CTX_INIT(draw_vbo);
2346 CTX_INIT(launch_grid);
2347 CTX_INIT(resource_copy_region);
2348 CTX_INIT(blit);
2349 CTX_INIT(clear);
2350 CTX_INIT(clear_render_target);
2351 CTX_INIT(clear_depth_stencil);
2352 CTX_INIT(clear_buffer);
2353 CTX_INIT(clear_texture);
2354 CTX_INIT(flush_resource);
2355 CTX_INIT(generate_mipmap);
2356 CTX_INIT(render_condition);
2357 CTX_INIT(create_query);
2358 CTX_INIT(create_batch_query);
2359 CTX_INIT(destroy_query);
2360 CTX_INIT(begin_query);
2361 CTX_INIT(end_query);
2362 CTX_INIT(get_query_result);
2363 CTX_INIT(get_query_result_resource);
2364 CTX_INIT(set_active_query_state);
2365 CTX_INIT(create_blend_state);
2366 CTX_INIT(bind_blend_state);
2367 CTX_INIT(delete_blend_state);
2368 CTX_INIT(create_sampler_state);
2369 CTX_INIT(bind_sampler_states);
2370 CTX_INIT(delete_sampler_state);
2371 CTX_INIT(create_rasterizer_state);
2372 CTX_INIT(bind_rasterizer_state);
2373 CTX_INIT(delete_rasterizer_state);
2374 CTX_INIT(create_depth_stencil_alpha_state);
2375 CTX_INIT(bind_depth_stencil_alpha_state);
2376 CTX_INIT(delete_depth_stencil_alpha_state);
2377 CTX_INIT(create_fs_state);
2378 CTX_INIT(bind_fs_state);
2379 CTX_INIT(delete_fs_state);
2380 CTX_INIT(create_vs_state);
2381 CTX_INIT(bind_vs_state);
2382 CTX_INIT(delete_vs_state);
2383 CTX_INIT(create_gs_state);
2384 CTX_INIT(bind_gs_state);
2385 CTX_INIT(delete_gs_state);
2386 CTX_INIT(create_tcs_state);
2387 CTX_INIT(bind_tcs_state);
2388 CTX_INIT(delete_tcs_state);
2389 CTX_INIT(create_tes_state);
2390 CTX_INIT(bind_tes_state);
2391 CTX_INIT(delete_tes_state);
2392 CTX_INIT(create_compute_state);
2393 CTX_INIT(bind_compute_state);
2394 CTX_INIT(delete_compute_state);
2395 CTX_INIT(create_vertex_elements_state);
2396 CTX_INIT(bind_vertex_elements_state);
2397 CTX_INIT(delete_vertex_elements_state);
2398 CTX_INIT(set_blend_color);
2399 CTX_INIT(set_stencil_ref);
2400 CTX_INIT(set_sample_mask);
2401 CTX_INIT(set_min_samples);
2402 CTX_INIT(set_clip_state);
2403 CTX_INIT(set_constant_buffer);
2404 CTX_INIT(set_framebuffer_state);
2405 CTX_INIT(set_polygon_stipple);
2406 CTX_INIT(set_scissor_states);
2407 CTX_INIT(set_viewport_states);
2408 CTX_INIT(set_window_rectangles);
2409 CTX_INIT(set_sampler_views);
2410 CTX_INIT(set_tess_state);
2411 CTX_INIT(set_shader_buffers);
2412 CTX_INIT(set_shader_images);
2413 CTX_INIT(set_vertex_buffers);
2414 CTX_INIT(create_stream_output_target);
2415 CTX_INIT(stream_output_target_destroy);
2416 CTX_INIT(set_stream_output_targets);
2417 CTX_INIT(create_sampler_view);
2418 CTX_INIT(sampler_view_destroy);
2419 CTX_INIT(create_surface);
2420 CTX_INIT(surface_destroy);
2421 CTX_INIT(transfer_map);
2422 CTX_INIT(transfer_flush_region);
2423 CTX_INIT(transfer_unmap);
2424 CTX_INIT(buffer_subdata);
2425 CTX_INIT(texture_subdata);
2426 CTX_INIT(texture_barrier);
2427 CTX_INIT(memory_barrier);
2428 CTX_INIT(resource_commit);
2429 CTX_INIT(create_video_codec);
2430 CTX_INIT(create_video_buffer);
2431 CTX_INIT(set_compute_resources);
2432 CTX_INIT(set_global_binding);
2433 CTX_INIT(get_sample_position);
2434 CTX_INIT(invalidate_resource);
2435 CTX_INIT(get_device_reset_status);
2436 CTX_INIT(set_device_reset_callback);
2437 CTX_INIT(dump_debug_state);
2438 CTX_INIT(emit_string_marker);
2439 CTX_INIT(set_debug_callback);
2440 CTX_INIT(create_fence_fd);
2441 CTX_INIT(fence_server_sync);
2442 CTX_INIT(get_timestamp);
2443 CTX_INIT(create_texture_handle);
2444 CTX_INIT(delete_texture_handle);
2445 CTX_INIT(make_texture_handle_resident);
2446 CTX_INIT(create_image_handle);
2447 CTX_INIT(delete_image_handle);
2448 CTX_INIT(make_image_handle_resident);
2449 #undef CTX_INIT
2450
2451 if (out)
2452 *out = tc;
2453
2454 return &tc->base;
2455
2456 fail:
2457 tc_destroy(&tc->base);
2458 return NULL;
2459 }