u_blitter: add a msaa parameter to util_blitter_clear
[mesa.git] / src / gallium / auxiliary / util / u_threaded_context.c
1 /**************************************************************************
2 *
3 * Copyright 2017 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "util/u_threaded_context.h"
28 #include "util/u_cpu_detect.h"
29 #include "util/u_format.h"
30 #include "util/u_inlines.h"
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
33
34 /* 0 = disabled, 1 = assertions, 2 = printfs */
35 #define TC_DEBUG 0
36
37 #if TC_DEBUG >= 1
38 #define tc_assert assert
39 #else
40 #define tc_assert(x)
41 #endif
42
43 #if TC_DEBUG >= 2
44 #define tc_printf printf
45 #define tc_asprintf asprintf
46 #define tc_strcmp strcmp
47 #else
48 #define tc_printf(...)
49 #define tc_asprintf(...) 0
50 #define tc_strcmp(...) 0
51 #endif
52
53 #define TC_SENTINEL 0x5ca1ab1e
54
55 enum tc_call_id {
56 #define CALL(name) TC_CALL_##name,
57 #include "u_threaded_context_calls.h"
58 #undef CALL
59 TC_NUM_CALLS,
60 };
61
62 typedef void (*tc_execute)(struct pipe_context *pipe, union tc_payload *payload);
63
64 static const tc_execute execute_func[TC_NUM_CALLS];
65
66 static void
67 tc_batch_check(MAYBE_UNUSED struct tc_batch *batch)
68 {
69 tc_assert(batch->sentinel == TC_SENTINEL);
70 tc_assert(batch->num_total_call_slots <= TC_CALLS_PER_BATCH);
71 }
72
73 static void
74 tc_debug_check(struct threaded_context *tc)
75 {
76 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
77 tc_batch_check(&tc->batch_slots[i]);
78 tc_assert(tc->batch_slots[i].pipe == tc->pipe);
79 }
80 }
81
82 static void
83 tc_batch_execute(void *job, UNUSED int thread_index)
84 {
85 struct tc_batch *batch = job;
86 struct pipe_context *pipe = batch->pipe;
87 struct tc_call *last = &batch->call[batch->num_total_call_slots];
88
89 tc_batch_check(batch);
90
91 assert(!batch->token);
92
93 for (struct tc_call *iter = batch->call; iter != last;
94 iter += iter->num_call_slots) {
95 tc_assert(iter->sentinel == TC_SENTINEL);
96 execute_func[iter->call_id](pipe, &iter->payload);
97 }
98
99 tc_batch_check(batch);
100 batch->num_total_call_slots = 0;
101 }
102
103 static void
104 tc_batch_flush(struct threaded_context *tc)
105 {
106 struct tc_batch *next = &tc->batch_slots[tc->next];
107
108 tc_assert(next->num_total_call_slots != 0);
109 tc_batch_check(next);
110 tc_debug_check(tc);
111 p_atomic_add(&tc->num_offloaded_slots, next->num_total_call_slots);
112
113 if (next->token) {
114 next->token->tc = NULL;
115 tc_unflushed_batch_token_reference(&next->token, NULL);
116 }
117
118 util_queue_add_job(&tc->queue, next, &next->fence, tc_batch_execute,
119 NULL);
120 tc->last = tc->next;
121 tc->next = (tc->next + 1) % TC_MAX_BATCHES;
122 }
123
124 /* This is the function that adds variable-sized calls into the current
125 * batch. It also flushes the batch if there is not enough space there.
126 * All other higher-level "add" functions use it.
127 */
128 static union tc_payload *
129 tc_add_sized_call(struct threaded_context *tc, enum tc_call_id id,
130 unsigned payload_size)
131 {
132 struct tc_batch *next = &tc->batch_slots[tc->next];
133 unsigned total_size = offsetof(struct tc_call, payload) + payload_size;
134 unsigned num_call_slots = DIV_ROUND_UP(total_size, sizeof(struct tc_call));
135
136 tc_debug_check(tc);
137
138 if (unlikely(next->num_total_call_slots + num_call_slots > TC_CALLS_PER_BATCH)) {
139 tc_batch_flush(tc);
140 next = &tc->batch_slots[tc->next];
141 tc_assert(next->num_total_call_slots == 0);
142 }
143
144 tc_assert(util_queue_fence_is_signalled(&next->fence));
145
146 struct tc_call *call = &next->call[next->num_total_call_slots];
147 next->num_total_call_slots += num_call_slots;
148
149 call->sentinel = TC_SENTINEL;
150 call->call_id = id;
151 call->num_call_slots = num_call_slots;
152
153 tc_debug_check(tc);
154 return &call->payload;
155 }
156
157 #define tc_add_struct_typed_call(tc, execute, type) \
158 ((struct type*)tc_add_sized_call(tc, execute, sizeof(struct type)))
159
160 #define tc_add_slot_based_call(tc, execute, type, num_slots) \
161 ((struct type*)tc_add_sized_call(tc, execute, \
162 sizeof(struct type) + \
163 sizeof(((struct type*)NULL)->slot[0]) * \
164 (num_slots)))
165
166 static union tc_payload *
167 tc_add_small_call(struct threaded_context *tc, enum tc_call_id id)
168 {
169 return tc_add_sized_call(tc, id, 0);
170 }
171
172 static bool
173 tc_is_sync(struct threaded_context *tc)
174 {
175 struct tc_batch *last = &tc->batch_slots[tc->last];
176 struct tc_batch *next = &tc->batch_slots[tc->next];
177
178 return util_queue_fence_is_signalled(&last->fence) &&
179 !next->num_total_call_slots;
180 }
181
182 static void
183 _tc_sync(struct threaded_context *tc, MAYBE_UNUSED const char *info, MAYBE_UNUSED const char *func)
184 {
185 struct tc_batch *last = &tc->batch_slots[tc->last];
186 struct tc_batch *next = &tc->batch_slots[tc->next];
187 bool synced = false;
188
189 tc_debug_check(tc);
190
191 /* Only wait for queued calls... */
192 if (!util_queue_fence_is_signalled(&last->fence)) {
193 util_queue_fence_wait(&last->fence);
194 synced = true;
195 }
196
197 tc_debug_check(tc);
198
199 if (next->token) {
200 next->token->tc = NULL;
201 tc_unflushed_batch_token_reference(&next->token, NULL);
202 }
203
204 /* .. and execute unflushed calls directly. */
205 if (next->num_total_call_slots) {
206 p_atomic_add(&tc->num_direct_slots, next->num_total_call_slots);
207 tc_batch_execute(next, 0);
208 synced = true;
209 }
210
211 if (synced) {
212 p_atomic_inc(&tc->num_syncs);
213
214 if (tc_strcmp(func, "tc_destroy") != 0) {
215 tc_printf("sync %s %s\n", func, info);
216 }
217 }
218
219 tc_debug_check(tc);
220 }
221
222 #define tc_sync(tc) _tc_sync(tc, "", __func__)
223 #define tc_sync_msg(tc, info) _tc_sync(tc, info, __func__)
224
225 /**
226 * Call this from fence_finish for same-context fence waits of deferred fences
227 * that haven't been flushed yet.
228 *
229 * The passed pipe_context must be the one passed to pipe_screen::fence_finish,
230 * i.e., the wrapped one.
231 */
232 void
233 threaded_context_flush(struct pipe_context *_pipe,
234 struct tc_unflushed_batch_token *token,
235 bool prefer_async)
236 {
237 struct threaded_context *tc = threaded_context(_pipe);
238
239 /* This is called from the state-tracker / application thread. */
240 if (token->tc && token->tc == tc) {
241 struct tc_batch *last = &tc->batch_slots[tc->last];
242
243 /* Prefer to do the flush in the driver thread if it is already
244 * running. That should be better for cache locality.
245 */
246 if (prefer_async || !util_queue_fence_is_signalled(&last->fence))
247 tc_batch_flush(tc);
248 else
249 tc_sync(token->tc);
250 }
251 }
252
253 static void
254 tc_set_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
255 {
256 *dst = NULL;
257 pipe_resource_reference(dst, src);
258 }
259
260 void
261 threaded_resource_init(struct pipe_resource *res)
262 {
263 struct threaded_resource *tres = threaded_resource(res);
264
265 tres->latest = &tres->b;
266 util_range_init(&tres->valid_buffer_range);
267 tres->base_valid_buffer_range = &tres->valid_buffer_range;
268 tres->is_shared = false;
269 tres->is_user_ptr = false;
270 }
271
272 void
273 threaded_resource_deinit(struct pipe_resource *res)
274 {
275 struct threaded_resource *tres = threaded_resource(res);
276
277 if (tres->latest != &tres->b)
278 pipe_resource_reference(&tres->latest, NULL);
279 util_range_destroy(&tres->valid_buffer_range);
280 }
281
282 struct pipe_context *
283 threaded_context_unwrap_sync(struct pipe_context *pipe)
284 {
285 if (!pipe || !pipe->priv)
286 return pipe;
287
288 tc_sync(threaded_context(pipe));
289 return (struct pipe_context*)pipe->priv;
290 }
291
292
293 /********************************************************************
294 * simple functions
295 */
296
297 #define TC_FUNC1(func, m_payload, qualifier, type, deref, deref2) \
298 static void \
299 tc_call_##func(struct pipe_context *pipe, union tc_payload *payload) \
300 { \
301 pipe->func(pipe, deref2((type*)payload)); \
302 } \
303 \
304 static void \
305 tc_##func(struct pipe_context *_pipe, qualifier type deref param) \
306 { \
307 struct threaded_context *tc = threaded_context(_pipe); \
308 type *p = (type*)tc_add_sized_call(tc, TC_CALL_##func, sizeof(type)); \
309 *p = deref(param); \
310 }
311
312 TC_FUNC1(set_active_query_state, flags, , bool, , *)
313
314 TC_FUNC1(set_blend_color, blend_color, const, struct pipe_blend_color, *, )
315 TC_FUNC1(set_stencil_ref, stencil_ref, const, struct pipe_stencil_ref, *, )
316 TC_FUNC1(set_clip_state, clip_state, const, struct pipe_clip_state, *, )
317 TC_FUNC1(set_sample_mask, sample_mask, , unsigned, , *)
318 TC_FUNC1(set_min_samples, min_samples, , unsigned, , *)
319 TC_FUNC1(set_polygon_stipple, polygon_stipple, const, struct pipe_poly_stipple, *, )
320
321 TC_FUNC1(texture_barrier, flags, , unsigned, , *)
322 TC_FUNC1(memory_barrier, flags, , unsigned, , *)
323
324
325 /********************************************************************
326 * queries
327 */
328
329 static struct pipe_query *
330 tc_create_query(struct pipe_context *_pipe, unsigned query_type,
331 unsigned index)
332 {
333 struct threaded_context *tc = threaded_context(_pipe);
334 struct pipe_context *pipe = tc->pipe;
335
336 return pipe->create_query(pipe, query_type, index);
337 }
338
339 static struct pipe_query *
340 tc_create_batch_query(struct pipe_context *_pipe, unsigned num_queries,
341 unsigned *query_types)
342 {
343 struct threaded_context *tc = threaded_context(_pipe);
344 struct pipe_context *pipe = tc->pipe;
345
346 return pipe->create_batch_query(pipe, num_queries, query_types);
347 }
348
349 static void
350 tc_call_destroy_query(struct pipe_context *pipe, union tc_payload *payload)
351 {
352 struct threaded_query *tq = threaded_query(payload->query);
353
354 if (tq->head_unflushed.next)
355 LIST_DEL(&tq->head_unflushed);
356
357 pipe->destroy_query(pipe, payload->query);
358 }
359
360 static void
361 tc_destroy_query(struct pipe_context *_pipe, struct pipe_query *query)
362 {
363 struct threaded_context *tc = threaded_context(_pipe);
364
365 tc_add_small_call(tc, TC_CALL_destroy_query)->query = query;
366 }
367
368 static void
369 tc_call_begin_query(struct pipe_context *pipe, union tc_payload *payload)
370 {
371 pipe->begin_query(pipe, payload->query);
372 }
373
374 static bool
375 tc_begin_query(struct pipe_context *_pipe, struct pipe_query *query)
376 {
377 struct threaded_context *tc = threaded_context(_pipe);
378 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_begin_query);
379
380 payload->query = query;
381 return true; /* we don't care about the return value for this call */
382 }
383
384 struct tc_end_query_payload {
385 struct threaded_context *tc;
386 struct pipe_query *query;
387 };
388
389 static void
390 tc_call_end_query(struct pipe_context *pipe, union tc_payload *payload)
391 {
392 struct tc_end_query_payload *p = (struct tc_end_query_payload *)payload;
393 struct threaded_query *tq = threaded_query(p->query);
394
395 if (!tq->head_unflushed.next)
396 LIST_ADD(&tq->head_unflushed, &p->tc->unflushed_queries);
397
398 pipe->end_query(pipe, p->query);
399 }
400
401 static bool
402 tc_end_query(struct pipe_context *_pipe, struct pipe_query *query)
403 {
404 struct threaded_context *tc = threaded_context(_pipe);
405 struct threaded_query *tq = threaded_query(query);
406 struct tc_end_query_payload *payload =
407 tc_add_struct_typed_call(tc, TC_CALL_end_query, tc_end_query_payload);
408
409 payload->tc = tc;
410 payload->query = query;
411
412 tq->flushed = false;
413
414 return true; /* we don't care about the return value for this call */
415 }
416
417 static bool
418 tc_get_query_result(struct pipe_context *_pipe,
419 struct pipe_query *query, bool wait,
420 union pipe_query_result *result)
421 {
422 struct threaded_context *tc = threaded_context(_pipe);
423 struct threaded_query *tq = threaded_query(query);
424 struct pipe_context *pipe = tc->pipe;
425
426 if (!tq->flushed)
427 tc_sync_msg(tc, wait ? "wait" : "nowait");
428
429 bool success = pipe->get_query_result(pipe, query, wait, result);
430
431 if (success) {
432 tq->flushed = true;
433 if (tq->head_unflushed.next) {
434 /* This is safe because it can only happen after we sync'd. */
435 LIST_DEL(&tq->head_unflushed);
436 }
437 }
438 return success;
439 }
440
441 struct tc_query_result_resource {
442 struct pipe_query *query;
443 bool wait;
444 enum pipe_query_value_type result_type;
445 int index;
446 struct pipe_resource *resource;
447 unsigned offset;
448 };
449
450 static void
451 tc_call_get_query_result_resource(struct pipe_context *pipe,
452 union tc_payload *payload)
453 {
454 struct tc_query_result_resource *p = (struct tc_query_result_resource *)payload;
455
456 pipe->get_query_result_resource(pipe, p->query, p->wait, p->result_type,
457 p->index, p->resource, p->offset);
458 pipe_resource_reference(&p->resource, NULL);
459 }
460
461 static void
462 tc_get_query_result_resource(struct pipe_context *_pipe,
463 struct pipe_query *query, bool wait,
464 enum pipe_query_value_type result_type, int index,
465 struct pipe_resource *resource, unsigned offset)
466 {
467 struct threaded_context *tc = threaded_context(_pipe);
468 struct tc_query_result_resource *p =
469 tc_add_struct_typed_call(tc, TC_CALL_get_query_result_resource,
470 tc_query_result_resource);
471
472 p->query = query;
473 p->wait = wait;
474 p->result_type = result_type;
475 p->index = index;
476 tc_set_resource_reference(&p->resource, resource);
477 p->offset = offset;
478 }
479
480 struct tc_render_condition {
481 struct pipe_query *query;
482 bool condition;
483 unsigned mode;
484 };
485
486 static void
487 tc_call_render_condition(struct pipe_context *pipe, union tc_payload *payload)
488 {
489 struct tc_render_condition *p = (struct tc_render_condition *)payload;
490 pipe->render_condition(pipe, p->query, p->condition, p->mode);
491 }
492
493 static void
494 tc_render_condition(struct pipe_context *_pipe,
495 struct pipe_query *query, bool condition,
496 enum pipe_render_cond_flag mode)
497 {
498 struct threaded_context *tc = threaded_context(_pipe);
499 struct tc_render_condition *p =
500 tc_add_struct_typed_call(tc, TC_CALL_render_condition, tc_render_condition);
501
502 p->query = query;
503 p->condition = condition;
504 p->mode = mode;
505 }
506
507
508 /********************************************************************
509 * constant (immutable) states
510 */
511
512 #define TC_CSO_CREATE(name, sname) \
513 static void * \
514 tc_create_##name##_state(struct pipe_context *_pipe, \
515 const struct pipe_##sname##_state *state) \
516 { \
517 struct pipe_context *pipe = threaded_context(_pipe)->pipe; \
518 return pipe->create_##name##_state(pipe, state); \
519 }
520
521 #define TC_CSO_BIND(name) TC_FUNC1(bind_##name##_state, cso, , void *, , *)
522 #define TC_CSO_DELETE(name) TC_FUNC1(delete_##name##_state, cso, , void *, , *)
523
524 #define TC_CSO_WHOLE2(name, sname) \
525 TC_CSO_CREATE(name, sname) \
526 TC_CSO_BIND(name) \
527 TC_CSO_DELETE(name)
528
529 #define TC_CSO_WHOLE(name) TC_CSO_WHOLE2(name, name)
530
531 TC_CSO_WHOLE(blend)
532 TC_CSO_WHOLE(rasterizer)
533 TC_CSO_WHOLE(depth_stencil_alpha)
534 TC_CSO_WHOLE(compute)
535 TC_CSO_WHOLE2(fs, shader)
536 TC_CSO_WHOLE2(vs, shader)
537 TC_CSO_WHOLE2(gs, shader)
538 TC_CSO_WHOLE2(tcs, shader)
539 TC_CSO_WHOLE2(tes, shader)
540 TC_CSO_CREATE(sampler, sampler)
541 TC_CSO_DELETE(sampler)
542 TC_CSO_BIND(vertex_elements)
543 TC_CSO_DELETE(vertex_elements)
544
545 static void *
546 tc_create_vertex_elements_state(struct pipe_context *_pipe, unsigned count,
547 const struct pipe_vertex_element *elems)
548 {
549 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
550
551 return pipe->create_vertex_elements_state(pipe, count, elems);
552 }
553
554 struct tc_sampler_states {
555 ubyte shader, start, count;
556 void *slot[0]; /* more will be allocated if needed */
557 };
558
559 static void
560 tc_call_bind_sampler_states(struct pipe_context *pipe, union tc_payload *payload)
561 {
562 struct tc_sampler_states *p = (struct tc_sampler_states *)payload;
563 pipe->bind_sampler_states(pipe, p->shader, p->start, p->count, p->slot);
564 }
565
566 static void
567 tc_bind_sampler_states(struct pipe_context *_pipe,
568 enum pipe_shader_type shader,
569 unsigned start, unsigned count, void **states)
570 {
571 if (!count)
572 return;
573
574 struct threaded_context *tc = threaded_context(_pipe);
575 struct tc_sampler_states *p =
576 tc_add_slot_based_call(tc, TC_CALL_bind_sampler_states, tc_sampler_states, count);
577
578 p->shader = shader;
579 p->start = start;
580 p->count = count;
581 memcpy(p->slot, states, count * sizeof(states[0]));
582 }
583
584
585 /********************************************************************
586 * immediate states
587 */
588
589 static void
590 tc_call_set_framebuffer_state(struct pipe_context *pipe, union tc_payload *payload)
591 {
592 struct pipe_framebuffer_state *p = (struct pipe_framebuffer_state *)payload;
593
594 pipe->set_framebuffer_state(pipe, p);
595
596 unsigned nr_cbufs = p->nr_cbufs;
597 for (unsigned i = 0; i < nr_cbufs; i++)
598 pipe_surface_reference(&p->cbufs[i], NULL);
599 pipe_surface_reference(&p->zsbuf, NULL);
600 }
601
602 static void
603 tc_set_framebuffer_state(struct pipe_context *_pipe,
604 const struct pipe_framebuffer_state *fb)
605 {
606 struct threaded_context *tc = threaded_context(_pipe);
607 struct pipe_framebuffer_state *p =
608 tc_add_struct_typed_call(tc, TC_CALL_set_framebuffer_state,
609 pipe_framebuffer_state);
610 unsigned nr_cbufs = fb->nr_cbufs;
611
612 p->width = fb->width;
613 p->height = fb->height;
614 p->samples = fb->samples;
615 p->layers = fb->layers;
616 p->nr_cbufs = nr_cbufs;
617
618 for (unsigned i = 0; i < nr_cbufs; i++) {
619 p->cbufs[i] = NULL;
620 pipe_surface_reference(&p->cbufs[i], fb->cbufs[i]);
621 }
622 p->zsbuf = NULL;
623 pipe_surface_reference(&p->zsbuf, fb->zsbuf);
624 }
625
626 static void
627 tc_call_set_tess_state(struct pipe_context *pipe, union tc_payload *payload)
628 {
629 float *p = (float*)payload;
630 pipe->set_tess_state(pipe, p, p + 4);
631 }
632
633 static void
634 tc_set_tess_state(struct pipe_context *_pipe,
635 const float default_outer_level[4],
636 const float default_inner_level[2])
637 {
638 struct threaded_context *tc = threaded_context(_pipe);
639 float *p = (float*)tc_add_sized_call(tc, TC_CALL_set_tess_state,
640 sizeof(float) * 6);
641
642 memcpy(p, default_outer_level, 4 * sizeof(float));
643 memcpy(p + 4, default_inner_level, 2 * sizeof(float));
644 }
645
646 struct tc_constant_buffer {
647 ubyte shader, index;
648 struct pipe_constant_buffer cb;
649 };
650
651 static void
652 tc_call_set_constant_buffer(struct pipe_context *pipe, union tc_payload *payload)
653 {
654 struct tc_constant_buffer *p = (struct tc_constant_buffer *)payload;
655
656 pipe->set_constant_buffer(pipe,
657 p->shader,
658 p->index,
659 &p->cb);
660 pipe_resource_reference(&p->cb.buffer, NULL);
661 }
662
663 static void
664 tc_set_constant_buffer(struct pipe_context *_pipe,
665 enum pipe_shader_type shader, uint index,
666 const struct pipe_constant_buffer *cb)
667 {
668 struct threaded_context *tc = threaded_context(_pipe);
669 struct pipe_resource *buffer = NULL;
670 unsigned offset;
671
672 /* This must be done before adding set_constant_buffer, because it could
673 * generate e.g. transfer_unmap and flush partially-uninitialized
674 * set_constant_buffer to the driver if it was done afterwards.
675 */
676 if (cb && cb->user_buffer) {
677 u_upload_data(tc->base.const_uploader, 0, cb->buffer_size, 64,
678 cb->user_buffer, &offset, &buffer);
679 u_upload_unmap(tc->base.const_uploader);
680 }
681
682 struct tc_constant_buffer *p =
683 tc_add_struct_typed_call(tc, TC_CALL_set_constant_buffer,
684 tc_constant_buffer);
685 p->shader = shader;
686 p->index = index;
687
688 if (cb) {
689 if (cb->user_buffer) {
690 p->cb.buffer_size = cb->buffer_size;
691 p->cb.user_buffer = NULL;
692 p->cb.buffer_offset = offset;
693 p->cb.buffer = buffer;
694 } else {
695 tc_set_resource_reference(&p->cb.buffer,
696 cb->buffer);
697 memcpy(&p->cb, cb, sizeof(*cb));
698 }
699 } else {
700 memset(&p->cb, 0, sizeof(*cb));
701 }
702 }
703
704 struct tc_scissors {
705 ubyte start, count;
706 struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
707 };
708
709 static void
710 tc_call_set_scissor_states(struct pipe_context *pipe, union tc_payload *payload)
711 {
712 struct tc_scissors *p = (struct tc_scissors *)payload;
713 pipe->set_scissor_states(pipe, p->start, p->count, p->slot);
714 }
715
716 static void
717 tc_set_scissor_states(struct pipe_context *_pipe,
718 unsigned start, unsigned count,
719 const struct pipe_scissor_state *states)
720 {
721 struct threaded_context *tc = threaded_context(_pipe);
722 struct tc_scissors *p =
723 tc_add_slot_based_call(tc, TC_CALL_set_scissor_states, tc_scissors, count);
724
725 p->start = start;
726 p->count = count;
727 memcpy(&p->slot, states, count * sizeof(states[0]));
728 }
729
730 struct tc_viewports {
731 ubyte start, count;
732 struct pipe_viewport_state slot[0]; /* more will be allocated if needed */
733 };
734
735 static void
736 tc_call_set_viewport_states(struct pipe_context *pipe, union tc_payload *payload)
737 {
738 struct tc_viewports *p = (struct tc_viewports *)payload;
739 pipe->set_viewport_states(pipe, p->start, p->count, p->slot);
740 }
741
742 static void
743 tc_set_viewport_states(struct pipe_context *_pipe,
744 unsigned start, unsigned count,
745 const struct pipe_viewport_state *states)
746 {
747 if (!count)
748 return;
749
750 struct threaded_context *tc = threaded_context(_pipe);
751 struct tc_viewports *p =
752 tc_add_slot_based_call(tc, TC_CALL_set_viewport_states, tc_viewports, count);
753
754 p->start = start;
755 p->count = count;
756 memcpy(&p->slot, states, count * sizeof(states[0]));
757 }
758
759 struct tc_window_rects {
760 bool include;
761 ubyte count;
762 struct pipe_scissor_state slot[0]; /* more will be allocated if needed */
763 };
764
765 static void
766 tc_call_set_window_rectangles(struct pipe_context *pipe,
767 union tc_payload *payload)
768 {
769 struct tc_window_rects *p = (struct tc_window_rects *)payload;
770 pipe->set_window_rectangles(pipe, p->include, p->count, p->slot);
771 }
772
773 static void
774 tc_set_window_rectangles(struct pipe_context *_pipe, bool include,
775 unsigned count,
776 const struct pipe_scissor_state *rects)
777 {
778 struct threaded_context *tc = threaded_context(_pipe);
779 struct tc_window_rects *p =
780 tc_add_slot_based_call(tc, TC_CALL_set_window_rectangles, tc_window_rects, count);
781
782 p->include = include;
783 p->count = count;
784 memcpy(p->slot, rects, count * sizeof(rects[0]));
785 }
786
787 struct tc_sampler_views {
788 ubyte shader, start, count;
789 struct pipe_sampler_view *slot[0]; /* more will be allocated if needed */
790 };
791
792 static void
793 tc_call_set_sampler_views(struct pipe_context *pipe, union tc_payload *payload)
794 {
795 struct tc_sampler_views *p = (struct tc_sampler_views *)payload;
796 unsigned count = p->count;
797
798 pipe->set_sampler_views(pipe, p->shader, p->start, p->count, p->slot);
799 for (unsigned i = 0; i < count; i++)
800 pipe_sampler_view_reference(&p->slot[i], NULL);
801 }
802
803 static void
804 tc_set_sampler_views(struct pipe_context *_pipe,
805 enum pipe_shader_type shader,
806 unsigned start, unsigned count,
807 struct pipe_sampler_view **views)
808 {
809 if (!count)
810 return;
811
812 struct threaded_context *tc = threaded_context(_pipe);
813 struct tc_sampler_views *p =
814 tc_add_slot_based_call(tc, TC_CALL_set_sampler_views, tc_sampler_views, count);
815
816 p->shader = shader;
817 p->start = start;
818 p->count = count;
819
820 if (views) {
821 for (unsigned i = 0; i < count; i++) {
822 p->slot[i] = NULL;
823 pipe_sampler_view_reference(&p->slot[i], views[i]);
824 }
825 } else {
826 memset(p->slot, 0, count * sizeof(views[0]));
827 }
828 }
829
830 struct tc_shader_images {
831 ubyte shader, start, count;
832 bool unbind;
833 struct pipe_image_view slot[0]; /* more will be allocated if needed */
834 };
835
836 static void
837 tc_call_set_shader_images(struct pipe_context *pipe, union tc_payload *payload)
838 {
839 struct tc_shader_images *p = (struct tc_shader_images *)payload;
840 unsigned count = p->count;
841
842 if (p->unbind) {
843 pipe->set_shader_images(pipe, p->shader, p->start, p->count, NULL);
844 return;
845 }
846
847 pipe->set_shader_images(pipe, p->shader, p->start, p->count, p->slot);
848
849 for (unsigned i = 0; i < count; i++)
850 pipe_resource_reference(&p->slot[i].resource, NULL);
851 }
852
853 static void
854 tc_set_shader_images(struct pipe_context *_pipe,
855 enum pipe_shader_type shader,
856 unsigned start, unsigned count,
857 const struct pipe_image_view *images)
858 {
859 if (!count)
860 return;
861
862 struct threaded_context *tc = threaded_context(_pipe);
863 struct tc_shader_images *p =
864 tc_add_slot_based_call(tc, TC_CALL_set_shader_images, tc_shader_images,
865 images ? count : 0);
866
867 p->shader = shader;
868 p->start = start;
869 p->count = count;
870 p->unbind = images == NULL;
871
872 if (images) {
873 for (unsigned i = 0; i < count; i++) {
874 tc_set_resource_reference(&p->slot[i].resource, images[i].resource);
875
876 if (images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
877 images[i].resource &&
878 images[i].resource->target == PIPE_BUFFER) {
879 struct threaded_resource *tres =
880 threaded_resource(images[i].resource);
881
882 util_range_add(&tres->valid_buffer_range, images[i].u.buf.offset,
883 images[i].u.buf.offset + images[i].u.buf.size);
884 }
885 }
886 memcpy(p->slot, images, count * sizeof(images[0]));
887 }
888 }
889
890 struct tc_shader_buffers {
891 ubyte shader, start, count;
892 bool unbind;
893 unsigned writable_bitmask;
894 struct pipe_shader_buffer slot[0]; /* more will be allocated if needed */
895 };
896
897 static void
898 tc_call_set_shader_buffers(struct pipe_context *pipe, union tc_payload *payload)
899 {
900 struct tc_shader_buffers *p = (struct tc_shader_buffers *)payload;
901 unsigned count = p->count;
902
903 if (p->unbind) {
904 pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, NULL, 0);
905 return;
906 }
907
908 pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, p->slot,
909 p->writable_bitmask);
910
911 for (unsigned i = 0; i < count; i++)
912 pipe_resource_reference(&p->slot[i].buffer, NULL);
913 }
914
915 static void
916 tc_set_shader_buffers(struct pipe_context *_pipe,
917 enum pipe_shader_type shader,
918 unsigned start, unsigned count,
919 const struct pipe_shader_buffer *buffers,
920 unsigned writable_bitmask)
921 {
922 if (!count)
923 return;
924
925 struct threaded_context *tc = threaded_context(_pipe);
926 struct tc_shader_buffers *p =
927 tc_add_slot_based_call(tc, TC_CALL_set_shader_buffers, tc_shader_buffers,
928 buffers ? count : 0);
929
930 p->shader = shader;
931 p->start = start;
932 p->count = count;
933 p->unbind = buffers == NULL;
934 p->writable_bitmask = writable_bitmask;
935
936 if (buffers) {
937 for (unsigned i = 0; i < count; i++) {
938 struct pipe_shader_buffer *dst = &p->slot[i];
939 const struct pipe_shader_buffer *src = buffers + i;
940
941 tc_set_resource_reference(&dst->buffer, src->buffer);
942 dst->buffer_offset = src->buffer_offset;
943 dst->buffer_size = src->buffer_size;
944
945 if (src->buffer) {
946 struct threaded_resource *tres = threaded_resource(src->buffer);
947
948 util_range_add(&tres->valid_buffer_range, src->buffer_offset,
949 src->buffer_offset + src->buffer_size);
950 }
951 }
952 }
953 }
954
955 struct tc_vertex_buffers {
956 ubyte start, count;
957 bool unbind;
958 struct pipe_vertex_buffer slot[0]; /* more will be allocated if needed */
959 };
960
961 static void
962 tc_call_set_vertex_buffers(struct pipe_context *pipe, union tc_payload *payload)
963 {
964 struct tc_vertex_buffers *p = (struct tc_vertex_buffers *)payload;
965 unsigned count = p->count;
966
967 if (p->unbind) {
968 pipe->set_vertex_buffers(pipe, p->start, count, NULL);
969 return;
970 }
971
972 for (unsigned i = 0; i < count; i++)
973 tc_assert(!p->slot[i].is_user_buffer);
974
975 pipe->set_vertex_buffers(pipe, p->start, count, p->slot);
976 for (unsigned i = 0; i < count; i++)
977 pipe_resource_reference(&p->slot[i].buffer.resource, NULL);
978 }
979
980 static void
981 tc_set_vertex_buffers(struct pipe_context *_pipe,
982 unsigned start, unsigned count,
983 const struct pipe_vertex_buffer *buffers)
984 {
985 struct threaded_context *tc = threaded_context(_pipe);
986
987 if (!count)
988 return;
989
990 if (buffers) {
991 struct tc_vertex_buffers *p =
992 tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, count);
993 p->start = start;
994 p->count = count;
995 p->unbind = false;
996
997 for (unsigned i = 0; i < count; i++) {
998 struct pipe_vertex_buffer *dst = &p->slot[i];
999 const struct pipe_vertex_buffer *src = buffers + i;
1000
1001 tc_assert(!src->is_user_buffer);
1002 dst->stride = src->stride;
1003 dst->is_user_buffer = false;
1004 tc_set_resource_reference(&dst->buffer.resource,
1005 src->buffer.resource);
1006 dst->buffer_offset = src->buffer_offset;
1007 }
1008 } else {
1009 struct tc_vertex_buffers *p =
1010 tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, 0);
1011 p->start = start;
1012 p->count = count;
1013 p->unbind = true;
1014 }
1015 }
1016
1017 struct tc_stream_outputs {
1018 unsigned count;
1019 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
1020 unsigned offsets[PIPE_MAX_SO_BUFFERS];
1021 };
1022
1023 static void
1024 tc_call_set_stream_output_targets(struct pipe_context *pipe, union tc_payload *payload)
1025 {
1026 struct tc_stream_outputs *p = (struct tc_stream_outputs *)payload;
1027 unsigned count = p->count;
1028
1029 pipe->set_stream_output_targets(pipe, count, p->targets, p->offsets);
1030 for (unsigned i = 0; i < count; i++)
1031 pipe_so_target_reference(&p->targets[i], NULL);
1032 }
1033
1034 static void
1035 tc_set_stream_output_targets(struct pipe_context *_pipe,
1036 unsigned count,
1037 struct pipe_stream_output_target **tgs,
1038 const unsigned *offsets)
1039 {
1040 struct threaded_context *tc = threaded_context(_pipe);
1041 struct tc_stream_outputs *p =
1042 tc_add_struct_typed_call(tc, TC_CALL_set_stream_output_targets,
1043 tc_stream_outputs);
1044
1045 for (unsigned i = 0; i < count; i++) {
1046 p->targets[i] = NULL;
1047 pipe_so_target_reference(&p->targets[i], tgs[i]);
1048 }
1049 p->count = count;
1050 memcpy(p->offsets, offsets, count * sizeof(unsigned));
1051 }
1052
1053 static void
1054 tc_set_compute_resources(struct pipe_context *_pipe, unsigned start,
1055 unsigned count, struct pipe_surface **resources)
1056 {
1057 struct threaded_context *tc = threaded_context(_pipe);
1058 struct pipe_context *pipe = tc->pipe;
1059
1060 tc_sync(tc);
1061 pipe->set_compute_resources(pipe, start, count, resources);
1062 }
1063
1064 static void
1065 tc_set_global_binding(struct pipe_context *_pipe, unsigned first,
1066 unsigned count, struct pipe_resource **resources,
1067 uint32_t **handles)
1068 {
1069 struct threaded_context *tc = threaded_context(_pipe);
1070 struct pipe_context *pipe = tc->pipe;
1071
1072 tc_sync(tc);
1073 pipe->set_global_binding(pipe, first, count, resources, handles);
1074 }
1075
1076
1077 /********************************************************************
1078 * views
1079 */
1080
1081 static struct pipe_surface *
1082 tc_create_surface(struct pipe_context *_pipe,
1083 struct pipe_resource *resource,
1084 const struct pipe_surface *surf_tmpl)
1085 {
1086 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1087 struct pipe_surface *view =
1088 pipe->create_surface(pipe, resource, surf_tmpl);
1089
1090 if (view)
1091 view->context = _pipe;
1092 return view;
1093 }
1094
1095 static void
1096 tc_surface_destroy(struct pipe_context *_pipe,
1097 struct pipe_surface *surf)
1098 {
1099 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1100
1101 pipe->surface_destroy(pipe, surf);
1102 }
1103
1104 static struct pipe_sampler_view *
1105 tc_create_sampler_view(struct pipe_context *_pipe,
1106 struct pipe_resource *resource,
1107 const struct pipe_sampler_view *templ)
1108 {
1109 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1110 struct pipe_sampler_view *view =
1111 pipe->create_sampler_view(pipe, resource, templ);
1112
1113 if (view)
1114 view->context = _pipe;
1115 return view;
1116 }
1117
1118 static void
1119 tc_sampler_view_destroy(struct pipe_context *_pipe,
1120 struct pipe_sampler_view *view)
1121 {
1122 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1123
1124 pipe->sampler_view_destroy(pipe, view);
1125 }
1126
1127 static struct pipe_stream_output_target *
1128 tc_create_stream_output_target(struct pipe_context *_pipe,
1129 struct pipe_resource *res,
1130 unsigned buffer_offset,
1131 unsigned buffer_size)
1132 {
1133 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1134 struct threaded_resource *tres = threaded_resource(res);
1135 struct pipe_stream_output_target *view;
1136
1137 tc_sync(threaded_context(_pipe));
1138 util_range_add(&tres->valid_buffer_range, buffer_offset,
1139 buffer_offset + buffer_size);
1140
1141 view = pipe->create_stream_output_target(pipe, res, buffer_offset,
1142 buffer_size);
1143 if (view)
1144 view->context = _pipe;
1145 return view;
1146 }
1147
1148 static void
1149 tc_stream_output_target_destroy(struct pipe_context *_pipe,
1150 struct pipe_stream_output_target *target)
1151 {
1152 struct pipe_context *pipe = threaded_context(_pipe)->pipe;
1153
1154 pipe->stream_output_target_destroy(pipe, target);
1155 }
1156
1157
1158 /********************************************************************
1159 * bindless
1160 */
1161
1162 static uint64_t
1163 tc_create_texture_handle(struct pipe_context *_pipe,
1164 struct pipe_sampler_view *view,
1165 const struct pipe_sampler_state *state)
1166 {
1167 struct threaded_context *tc = threaded_context(_pipe);
1168 struct pipe_context *pipe = tc->pipe;
1169
1170 tc_sync(tc);
1171 return pipe->create_texture_handle(pipe, view, state);
1172 }
1173
1174 static void
1175 tc_call_delete_texture_handle(struct pipe_context *pipe,
1176 union tc_payload *payload)
1177 {
1178 pipe->delete_texture_handle(pipe, payload->handle);
1179 }
1180
1181 static void
1182 tc_delete_texture_handle(struct pipe_context *_pipe, uint64_t handle)
1183 {
1184 struct threaded_context *tc = threaded_context(_pipe);
1185 union tc_payload *payload =
1186 tc_add_small_call(tc, TC_CALL_delete_texture_handle);
1187
1188 payload->handle = handle;
1189 }
1190
1191 struct tc_make_texture_handle_resident
1192 {
1193 uint64_t handle;
1194 bool resident;
1195 };
1196
1197 static void
1198 tc_call_make_texture_handle_resident(struct pipe_context *pipe,
1199 union tc_payload *payload)
1200 {
1201 struct tc_make_texture_handle_resident *p =
1202 (struct tc_make_texture_handle_resident *)payload;
1203
1204 pipe->make_texture_handle_resident(pipe, p->handle, p->resident);
1205 }
1206
1207 static void
1208 tc_make_texture_handle_resident(struct pipe_context *_pipe, uint64_t handle,
1209 bool resident)
1210 {
1211 struct threaded_context *tc = threaded_context(_pipe);
1212 struct tc_make_texture_handle_resident *p =
1213 tc_add_struct_typed_call(tc, TC_CALL_make_texture_handle_resident,
1214 tc_make_texture_handle_resident);
1215
1216 p->handle = handle;
1217 p->resident = resident;
1218 }
1219
1220 static uint64_t
1221 tc_create_image_handle(struct pipe_context *_pipe,
1222 const struct pipe_image_view *image)
1223 {
1224 struct threaded_context *tc = threaded_context(_pipe);
1225 struct pipe_context *pipe = tc->pipe;
1226
1227 tc_sync(tc);
1228 return pipe->create_image_handle(pipe, image);
1229 }
1230
1231 static void
1232 tc_call_delete_image_handle(struct pipe_context *pipe,
1233 union tc_payload *payload)
1234 {
1235 pipe->delete_image_handle(pipe, payload->handle);
1236 }
1237
1238 static void
1239 tc_delete_image_handle(struct pipe_context *_pipe, uint64_t handle)
1240 {
1241 struct threaded_context *tc = threaded_context(_pipe);
1242 union tc_payload *payload =
1243 tc_add_small_call(tc, TC_CALL_delete_image_handle);
1244
1245 payload->handle = handle;
1246 }
1247
1248 struct tc_make_image_handle_resident
1249 {
1250 uint64_t handle;
1251 unsigned access;
1252 bool resident;
1253 };
1254
1255 static void
1256 tc_call_make_image_handle_resident(struct pipe_context *pipe,
1257 union tc_payload *payload)
1258 {
1259 struct tc_make_image_handle_resident *p =
1260 (struct tc_make_image_handle_resident *)payload;
1261
1262 pipe->make_image_handle_resident(pipe, p->handle, p->access, p->resident);
1263 }
1264
1265 static void
1266 tc_make_image_handle_resident(struct pipe_context *_pipe, uint64_t handle,
1267 unsigned access, bool resident)
1268 {
1269 struct threaded_context *tc = threaded_context(_pipe);
1270 struct tc_make_image_handle_resident *p =
1271 tc_add_struct_typed_call(tc, TC_CALL_make_image_handle_resident,
1272 tc_make_image_handle_resident);
1273
1274 p->handle = handle;
1275 p->access = access;
1276 p->resident = resident;
1277 }
1278
1279
1280 /********************************************************************
1281 * transfer
1282 */
1283
1284 struct tc_replace_buffer_storage {
1285 struct pipe_resource *dst;
1286 struct pipe_resource *src;
1287 tc_replace_buffer_storage_func func;
1288 };
1289
1290 static void
1291 tc_call_replace_buffer_storage(struct pipe_context *pipe,
1292 union tc_payload *payload)
1293 {
1294 struct tc_replace_buffer_storage *p =
1295 (struct tc_replace_buffer_storage *)payload;
1296
1297 p->func(pipe, p->dst, p->src);
1298 pipe_resource_reference(&p->dst, NULL);
1299 pipe_resource_reference(&p->src, NULL);
1300 }
1301
1302 static bool
1303 tc_invalidate_buffer(struct threaded_context *tc,
1304 struct threaded_resource *tbuf)
1305 {
1306 /* We can't check if the buffer is idle, so we invalidate it
1307 * unconditionally. */
1308 struct pipe_screen *screen = tc->base.screen;
1309 struct pipe_resource *new_buf;
1310
1311 /* Shared, pinned, and sparse buffers can't be reallocated. */
1312 if (tbuf->is_shared ||
1313 tbuf->is_user_ptr ||
1314 tbuf->b.flags & PIPE_RESOURCE_FLAG_SPARSE)
1315 return false;
1316
1317 /* Allocate a new one. */
1318 new_buf = screen->resource_create(screen, &tbuf->b);
1319 if (!new_buf)
1320 return false;
1321
1322 /* Replace the "latest" pointer. */
1323 if (tbuf->latest != &tbuf->b)
1324 pipe_resource_reference(&tbuf->latest, NULL);
1325
1326 tbuf->latest = new_buf;
1327 util_range_set_empty(&tbuf->valid_buffer_range);
1328
1329 /* The valid range should point to the original buffer. */
1330 threaded_resource(new_buf)->base_valid_buffer_range =
1331 &tbuf->valid_buffer_range;
1332
1333 /* Enqueue storage replacement of the original buffer. */
1334 struct tc_replace_buffer_storage *p =
1335 tc_add_struct_typed_call(tc, TC_CALL_replace_buffer_storage,
1336 tc_replace_buffer_storage);
1337
1338 p->func = tc->replace_buffer_storage;
1339 tc_set_resource_reference(&p->dst, &tbuf->b);
1340 tc_set_resource_reference(&p->src, new_buf);
1341 return true;
1342 }
1343
1344 static unsigned
1345 tc_improve_map_buffer_flags(struct threaded_context *tc,
1346 struct threaded_resource *tres, unsigned usage,
1347 unsigned offset, unsigned size)
1348 {
1349 /* Never invalidate inside the driver and never infer "unsynchronized". */
1350 unsigned tc_flags = TC_TRANSFER_MAP_NO_INVALIDATE |
1351 TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED;
1352
1353 /* Prevent a reentry. */
1354 if (usage & tc_flags)
1355 return usage;
1356
1357 /* Use the staging upload if it's preferred. */
1358 if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
1359 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
1360 !(usage & PIPE_TRANSFER_PERSISTENT) &&
1361 /* Try not to decrement the counter if it's not positive. Still racy,
1362 * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
1363 tres->max_forced_staging_uploads > 0 &&
1364 p_atomic_dec_return(&tres->max_forced_staging_uploads) >= 0) {
1365 usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
1366 PIPE_TRANSFER_UNSYNCHRONIZED);
1367
1368 return usage | tc_flags | PIPE_TRANSFER_DISCARD_RANGE;
1369 }
1370
1371 /* Sparse buffers can't be mapped directly and can't be reallocated
1372 * (fully invalidated). That may just be a radeonsi limitation, but
1373 * the threaded context must obey it with radeonsi.
1374 */
1375 if (tres->b.flags & PIPE_RESOURCE_FLAG_SPARSE) {
1376 /* We can use DISCARD_RANGE instead of full discard. This is the only
1377 * fast path for sparse buffers that doesn't need thread synchronization.
1378 */
1379 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
1380 usage |= PIPE_TRANSFER_DISCARD_RANGE;
1381
1382 /* Allow DISCARD_WHOLE_RESOURCE and infering UNSYNCHRONIZED in drivers.
1383 * The threaded context doesn't do unsychronized mappings and invalida-
1384 * tions of sparse buffers, therefore a correct driver behavior won't
1385 * result in an incorrect behavior with the threaded context.
1386 */
1387 return usage;
1388 }
1389
1390 usage |= tc_flags;
1391
1392 /* Handle CPU reads trivially. */
1393 if (usage & PIPE_TRANSFER_READ) {
1394 /* Drivers aren't allowed to do buffer invalidations. */
1395 return usage & ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1396 }
1397
1398 /* See if the buffer range being mapped has never been initialized,
1399 * in which case it can be mapped unsynchronized. */
1400 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
1401 !tres->is_shared &&
1402 !util_ranges_intersect(&tres->valid_buffer_range, offset, offset + size))
1403 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1404
1405 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
1406 /* If discarding the entire range, discard the whole resource instead. */
1407 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
1408 offset == 0 && size == tres->b.width0)
1409 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1410
1411 /* Discard the whole resource if needed. */
1412 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
1413 if (tc_invalidate_buffer(tc, tres))
1414 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1415 else
1416 usage |= PIPE_TRANSFER_DISCARD_RANGE; /* fallback */
1417 }
1418 }
1419
1420 /* We won't need this flag anymore. */
1421 /* TODO: We might not need TC_TRANSFER_MAP_NO_INVALIDATE with this. */
1422 usage &= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
1423
1424 /* GL_AMD_pinned_memory and persistent mappings can't use staging
1425 * buffers. */
1426 if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
1427 PIPE_TRANSFER_PERSISTENT) ||
1428 tres->is_user_ptr)
1429 usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
1430
1431 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1432 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
1433 usage &= ~PIPE_TRANSFER_DISCARD_RANGE;
1434 usage |= TC_TRANSFER_MAP_THREADED_UNSYNC; /* notify the driver */
1435 }
1436
1437 return usage;
1438 }
1439
1440 static void *
1441 tc_transfer_map(struct pipe_context *_pipe,
1442 struct pipe_resource *resource, unsigned level,
1443 unsigned usage, const struct pipe_box *box,
1444 struct pipe_transfer **transfer)
1445 {
1446 struct threaded_context *tc = threaded_context(_pipe);
1447 struct threaded_resource *tres = threaded_resource(resource);
1448 struct pipe_context *pipe = tc->pipe;
1449
1450 if (resource->target == PIPE_BUFFER) {
1451 usage = tc_improve_map_buffer_flags(tc, tres, usage, box->x, box->width);
1452
1453 /* Do a staging transfer within the threaded context. The driver should
1454 * only get resource_copy_region.
1455 */
1456 if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
1457 struct threaded_transfer *ttrans = slab_alloc(&tc->pool_transfers);
1458 uint8_t *map;
1459
1460 ttrans->staging = NULL;
1461
1462 u_upload_alloc(tc->base.stream_uploader, 0,
1463 box->width + (box->x % tc->map_buffer_alignment),
1464 64, &ttrans->offset, &ttrans->staging, (void**)&map);
1465 if (!map) {
1466 slab_free(&tc->pool_transfers, ttrans);
1467 return NULL;
1468 }
1469
1470 tc_set_resource_reference(&ttrans->b.resource, resource);
1471 ttrans->b.level = 0;
1472 ttrans->b.usage = usage;
1473 ttrans->b.box = *box;
1474 ttrans->b.stride = 0;
1475 ttrans->b.layer_stride = 0;
1476 *transfer = &ttrans->b;
1477 return map + (box->x % tc->map_buffer_alignment);
1478 }
1479 }
1480
1481 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1482 if (!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC))
1483 tc_sync_msg(tc, resource->target != PIPE_BUFFER ? " texture" :
1484 usage & PIPE_TRANSFER_DISCARD_RANGE ? " discard_range" :
1485 usage & PIPE_TRANSFER_READ ? " read" : " ??");
1486
1487 return pipe->transfer_map(pipe, tres->latest ? tres->latest : resource,
1488 level, usage, box, transfer);
1489 }
1490
1491 struct tc_transfer_flush_region {
1492 struct pipe_transfer *transfer;
1493 struct pipe_box box;
1494 };
1495
1496 static void
1497 tc_call_transfer_flush_region(struct pipe_context *pipe,
1498 union tc_payload *payload)
1499 {
1500 struct tc_transfer_flush_region *p =
1501 (struct tc_transfer_flush_region *)payload;
1502
1503 pipe->transfer_flush_region(pipe, p->transfer, &p->box);
1504 }
1505
1506 struct tc_resource_copy_region {
1507 struct pipe_resource *dst;
1508 unsigned dst_level;
1509 unsigned dstx, dsty, dstz;
1510 struct pipe_resource *src;
1511 unsigned src_level;
1512 struct pipe_box src_box;
1513 };
1514
1515 static void
1516 tc_resource_copy_region(struct pipe_context *_pipe,
1517 struct pipe_resource *dst, unsigned dst_level,
1518 unsigned dstx, unsigned dsty, unsigned dstz,
1519 struct pipe_resource *src, unsigned src_level,
1520 const struct pipe_box *src_box);
1521
1522 static void
1523 tc_buffer_do_flush_region(struct threaded_context *tc,
1524 struct threaded_transfer *ttrans,
1525 const struct pipe_box *box)
1526 {
1527 struct threaded_resource *tres = threaded_resource(ttrans->b.resource);
1528
1529 if (ttrans->staging) {
1530 struct pipe_box src_box;
1531
1532 u_box_1d(ttrans->offset + ttrans->b.box.x % tc->map_buffer_alignment +
1533 (box->x - ttrans->b.box.x),
1534 box->width, &src_box);
1535
1536 /* Copy the staging buffer into the original one. */
1537 tc_resource_copy_region(&tc->base, ttrans->b.resource, 0, box->x, 0, 0,
1538 ttrans->staging, 0, &src_box);
1539 }
1540
1541 util_range_add(tres->base_valid_buffer_range, box->x, box->x + box->width);
1542 }
1543
1544 static void
1545 tc_transfer_flush_region(struct pipe_context *_pipe,
1546 struct pipe_transfer *transfer,
1547 const struct pipe_box *rel_box)
1548 {
1549 struct threaded_context *tc = threaded_context(_pipe);
1550 struct threaded_transfer *ttrans = threaded_transfer(transfer);
1551 struct threaded_resource *tres = threaded_resource(transfer->resource);
1552 unsigned required_usage = PIPE_TRANSFER_WRITE |
1553 PIPE_TRANSFER_FLUSH_EXPLICIT;
1554
1555 if (tres->b.target == PIPE_BUFFER) {
1556 if ((transfer->usage & required_usage) == required_usage) {
1557 struct pipe_box box;
1558
1559 u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
1560 tc_buffer_do_flush_region(tc, ttrans, &box);
1561 }
1562
1563 /* Staging transfers don't send the call to the driver. */
1564 if (ttrans->staging)
1565 return;
1566 }
1567
1568 struct tc_transfer_flush_region *p =
1569 tc_add_struct_typed_call(tc, TC_CALL_transfer_flush_region,
1570 tc_transfer_flush_region);
1571 p->transfer = transfer;
1572 p->box = *rel_box;
1573 }
1574
1575 static void
1576 tc_call_transfer_unmap(struct pipe_context *pipe, union tc_payload *payload)
1577 {
1578 pipe->transfer_unmap(pipe, payload->transfer);
1579 }
1580
1581 static void
1582 tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
1583 {
1584 struct threaded_context *tc = threaded_context(_pipe);
1585 struct threaded_transfer *ttrans = threaded_transfer(transfer);
1586 struct threaded_resource *tres = threaded_resource(transfer->resource);
1587
1588 if (tres->b.target == PIPE_BUFFER) {
1589 if (transfer->usage & PIPE_TRANSFER_WRITE &&
1590 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
1591 tc_buffer_do_flush_region(tc, ttrans, &transfer->box);
1592
1593 /* Staging transfers don't send the call to the driver. */
1594 if (ttrans->staging) {
1595 pipe_resource_reference(&ttrans->staging, NULL);
1596 pipe_resource_reference(&ttrans->b.resource, NULL);
1597 slab_free(&tc->pool_transfers, ttrans);
1598 return;
1599 }
1600 }
1601
1602 tc_add_small_call(tc, TC_CALL_transfer_unmap)->transfer = transfer;
1603 }
1604
1605 struct tc_buffer_subdata {
1606 struct pipe_resource *resource;
1607 unsigned usage, offset, size;
1608 char slot[0]; /* more will be allocated if needed */
1609 };
1610
1611 static void
1612 tc_call_buffer_subdata(struct pipe_context *pipe, union tc_payload *payload)
1613 {
1614 struct tc_buffer_subdata *p = (struct tc_buffer_subdata *)payload;
1615
1616 pipe->buffer_subdata(pipe, p->resource, p->usage, p->offset, p->size,
1617 p->slot);
1618 pipe_resource_reference(&p->resource, NULL);
1619 }
1620
1621 static void
1622 tc_buffer_subdata(struct pipe_context *_pipe,
1623 struct pipe_resource *resource,
1624 unsigned usage, unsigned offset,
1625 unsigned size, const void *data)
1626 {
1627 struct threaded_context *tc = threaded_context(_pipe);
1628 struct threaded_resource *tres = threaded_resource(resource);
1629
1630 if (!size)
1631 return;
1632
1633 usage |= PIPE_TRANSFER_WRITE;
1634
1635 /* PIPE_TRANSFER_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
1636 if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY))
1637 usage |= PIPE_TRANSFER_DISCARD_RANGE;
1638
1639 usage = tc_improve_map_buffer_flags(tc, tres, usage, offset, size);
1640
1641 /* Unsychronized and big transfers should use transfer_map. Also handle
1642 * full invalidations, because drivers aren't allowed to do them.
1643 */
1644 if (usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
1645 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) ||
1646 size > TC_MAX_SUBDATA_BYTES) {
1647 struct pipe_transfer *transfer;
1648 struct pipe_box box;
1649 uint8_t *map = NULL;
1650
1651 u_box_1d(offset, size, &box);
1652
1653 map = tc_transfer_map(_pipe, resource, 0, usage, &box, &transfer);
1654 if (map) {
1655 memcpy(map, data, size);
1656 tc_transfer_unmap(_pipe, transfer);
1657 }
1658 return;
1659 }
1660
1661 util_range_add(&tres->valid_buffer_range, offset, offset + size);
1662
1663 /* The upload is small. Enqueue it. */
1664 struct tc_buffer_subdata *p =
1665 tc_add_slot_based_call(tc, TC_CALL_buffer_subdata, tc_buffer_subdata, size);
1666
1667 tc_set_resource_reference(&p->resource, resource);
1668 p->usage = usage;
1669 p->offset = offset;
1670 p->size = size;
1671 memcpy(p->slot, data, size);
1672 }
1673
1674 struct tc_texture_subdata {
1675 struct pipe_resource *resource;
1676 unsigned level, usage, stride, layer_stride;
1677 struct pipe_box box;
1678 char slot[0]; /* more will be allocated if needed */
1679 };
1680
1681 static void
1682 tc_call_texture_subdata(struct pipe_context *pipe, union tc_payload *payload)
1683 {
1684 struct tc_texture_subdata *p = (struct tc_texture_subdata *)payload;
1685
1686 pipe->texture_subdata(pipe, p->resource, p->level, p->usage, &p->box,
1687 p->slot, p->stride, p->layer_stride);
1688 pipe_resource_reference(&p->resource, NULL);
1689 }
1690
1691 static void
1692 tc_texture_subdata(struct pipe_context *_pipe,
1693 struct pipe_resource *resource,
1694 unsigned level, unsigned usage,
1695 const struct pipe_box *box,
1696 const void *data, unsigned stride,
1697 unsigned layer_stride)
1698 {
1699 struct threaded_context *tc = threaded_context(_pipe);
1700 unsigned size;
1701
1702 assert(box->height >= 1);
1703 assert(box->depth >= 1);
1704
1705 size = (box->depth - 1) * layer_stride +
1706 (box->height - 1) * stride +
1707 box->width * util_format_get_blocksize(resource->format);
1708 if (!size)
1709 return;
1710
1711 /* Small uploads can be enqueued, big uploads must sync. */
1712 if (size <= TC_MAX_SUBDATA_BYTES) {
1713 struct tc_texture_subdata *p =
1714 tc_add_slot_based_call(tc, TC_CALL_texture_subdata, tc_texture_subdata, size);
1715
1716 tc_set_resource_reference(&p->resource, resource);
1717 p->level = level;
1718 p->usage = usage;
1719 p->box = *box;
1720 p->stride = stride;
1721 p->layer_stride = layer_stride;
1722 memcpy(p->slot, data, size);
1723 } else {
1724 struct pipe_context *pipe = tc->pipe;
1725
1726 tc_sync(tc);
1727 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1728 stride, layer_stride);
1729 }
1730 }
1731
1732
1733 /********************************************************************
1734 * miscellaneous
1735 */
1736
1737 #define TC_FUNC_SYNC_RET0(ret_type, func) \
1738 static ret_type \
1739 tc_##func(struct pipe_context *_pipe) \
1740 { \
1741 struct threaded_context *tc = threaded_context(_pipe); \
1742 struct pipe_context *pipe = tc->pipe; \
1743 tc_sync(tc); \
1744 return pipe->func(pipe); \
1745 }
1746
1747 TC_FUNC_SYNC_RET0(enum pipe_reset_status, get_device_reset_status)
1748 TC_FUNC_SYNC_RET0(uint64_t, get_timestamp)
1749
1750 static void
1751 tc_get_sample_position(struct pipe_context *_pipe,
1752 unsigned sample_count, unsigned sample_index,
1753 float *out_value)
1754 {
1755 struct threaded_context *tc = threaded_context(_pipe);
1756 struct pipe_context *pipe = tc->pipe;
1757
1758 tc_sync(tc);
1759 pipe->get_sample_position(pipe, sample_count, sample_index,
1760 out_value);
1761 }
1762
1763 static void
1764 tc_set_device_reset_callback(struct pipe_context *_pipe,
1765 const struct pipe_device_reset_callback *cb)
1766 {
1767 struct threaded_context *tc = threaded_context(_pipe);
1768 struct pipe_context *pipe = tc->pipe;
1769
1770 tc_sync(tc);
1771 pipe->set_device_reset_callback(pipe, cb);
1772 }
1773
1774 struct tc_string_marker {
1775 int len;
1776 char slot[0]; /* more will be allocated if needed */
1777 };
1778
1779 static void
1780 tc_call_emit_string_marker(struct pipe_context *pipe, union tc_payload *payload)
1781 {
1782 struct tc_string_marker *p = (struct tc_string_marker *)payload;
1783 pipe->emit_string_marker(pipe, p->slot, p->len);
1784 }
1785
1786 static void
1787 tc_emit_string_marker(struct pipe_context *_pipe,
1788 const char *string, int len)
1789 {
1790 struct threaded_context *tc = threaded_context(_pipe);
1791
1792 if (len <= TC_MAX_STRING_MARKER_BYTES) {
1793 struct tc_string_marker *p =
1794 tc_add_slot_based_call(tc, TC_CALL_emit_string_marker, tc_string_marker, len);
1795
1796 memcpy(p->slot, string, len);
1797 p->len = len;
1798 } else {
1799 struct pipe_context *pipe = tc->pipe;
1800
1801 tc_sync(tc);
1802 pipe->emit_string_marker(pipe, string, len);
1803 }
1804 }
1805
1806 static void
1807 tc_dump_debug_state(struct pipe_context *_pipe, FILE *stream,
1808 unsigned flags)
1809 {
1810 struct threaded_context *tc = threaded_context(_pipe);
1811 struct pipe_context *pipe = tc->pipe;
1812
1813 tc_sync(tc);
1814 pipe->dump_debug_state(pipe, stream, flags);
1815 }
1816
1817 static void
1818 tc_set_debug_callback(struct pipe_context *_pipe,
1819 const struct pipe_debug_callback *cb)
1820 {
1821 struct threaded_context *tc = threaded_context(_pipe);
1822 struct pipe_context *pipe = tc->pipe;
1823
1824 /* Drop all synchronous debug callbacks. Drivers are expected to be OK
1825 * with this. shader-db will use an environment variable to disable
1826 * the threaded context.
1827 */
1828 if (cb && cb->debug_message && !cb->async)
1829 return;
1830
1831 tc_sync(tc);
1832 pipe->set_debug_callback(pipe, cb);
1833 }
1834
1835 static void
1836 tc_set_log_context(struct pipe_context *_pipe, struct u_log_context *log)
1837 {
1838 struct threaded_context *tc = threaded_context(_pipe);
1839 struct pipe_context *pipe = tc->pipe;
1840
1841 tc_sync(tc);
1842 pipe->set_log_context(pipe, log);
1843 }
1844
1845 static void
1846 tc_create_fence_fd(struct pipe_context *_pipe,
1847 struct pipe_fence_handle **fence, int fd,
1848 enum pipe_fd_type type)
1849 {
1850 struct threaded_context *tc = threaded_context(_pipe);
1851 struct pipe_context *pipe = tc->pipe;
1852
1853 tc_sync(tc);
1854 pipe->create_fence_fd(pipe, fence, fd, type);
1855 }
1856
1857 static void
1858 tc_call_fence_server_sync(struct pipe_context *pipe, union tc_payload *payload)
1859 {
1860 pipe->fence_server_sync(pipe, payload->fence);
1861 pipe->screen->fence_reference(pipe->screen, &payload->fence, NULL);
1862 }
1863
1864 static void
1865 tc_fence_server_sync(struct pipe_context *_pipe,
1866 struct pipe_fence_handle *fence)
1867 {
1868 struct threaded_context *tc = threaded_context(_pipe);
1869 struct pipe_screen *screen = tc->pipe->screen;
1870 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_fence_server_sync);
1871
1872 payload->fence = NULL;
1873 screen->fence_reference(screen, &payload->fence, fence);
1874 }
1875
1876 static void
1877 tc_call_fence_server_signal(struct pipe_context *pipe, union tc_payload *payload)
1878 {
1879 pipe->fence_server_signal(pipe, payload->fence);
1880 pipe->screen->fence_reference(pipe->screen, &payload->fence, NULL);
1881 }
1882
1883 static void
1884 tc_fence_server_signal(struct pipe_context *_pipe,
1885 struct pipe_fence_handle *fence)
1886 {
1887 struct threaded_context *tc = threaded_context(_pipe);
1888 struct pipe_screen *screen = tc->pipe->screen;
1889 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_fence_server_signal);
1890
1891 payload->fence = NULL;
1892 screen->fence_reference(screen, &payload->fence, fence);
1893 }
1894
1895 static struct pipe_video_codec *
1896 tc_create_video_codec(UNUSED struct pipe_context *_pipe,
1897 UNUSED const struct pipe_video_codec *templ)
1898 {
1899 unreachable("Threaded context should not be enabled for video APIs");
1900 return NULL;
1901 }
1902
1903 static struct pipe_video_buffer *
1904 tc_create_video_buffer(UNUSED struct pipe_context *_pipe,
1905 UNUSED const struct pipe_video_buffer *templ)
1906 {
1907 unreachable("Threaded context should not be enabled for video APIs");
1908 return NULL;
1909 }
1910
1911 struct tc_context_param {
1912 enum pipe_context_param param;
1913 unsigned value;
1914 };
1915
1916 static void
1917 tc_call_set_context_param(struct pipe_context *pipe,
1918 union tc_payload *payload)
1919 {
1920 struct tc_context_param *p = (struct tc_context_param*)payload;
1921
1922 if (pipe->set_context_param)
1923 pipe->set_context_param(pipe, p->param, p->value);
1924 }
1925
1926 static void
1927 tc_set_context_param(struct pipe_context *_pipe,
1928 enum pipe_context_param param,
1929 unsigned value)
1930 {
1931 struct threaded_context *tc = threaded_context(_pipe);
1932
1933 if (tc->pipe->set_context_param) {
1934 struct tc_context_param *payload =
1935 tc_add_struct_typed_call(tc, TC_CALL_set_context_param,
1936 tc_context_param);
1937
1938 payload->param = param;
1939 payload->value = value;
1940 }
1941
1942 if (param == PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE) {
1943 /* Pin the gallium thread as requested. */
1944 util_pin_thread_to_L3(tc->queue.threads[0], value,
1945 util_cpu_caps.cores_per_L3);
1946 }
1947 }
1948
1949
1950 /********************************************************************
1951 * draw, launch, clear, blit, copy, flush
1952 */
1953
1954 struct tc_flush_payload {
1955 struct threaded_context *tc;
1956 struct pipe_fence_handle *fence;
1957 unsigned flags;
1958 };
1959
1960 static void
1961 tc_flush_queries(struct threaded_context *tc)
1962 {
1963 struct threaded_query *tq, *tmp;
1964 LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) {
1965 LIST_DEL(&tq->head_unflushed);
1966
1967 /* Memory release semantics: due to a possible race with
1968 * tc_get_query_result, we must ensure that the linked list changes
1969 * are visible before setting tq->flushed.
1970 */
1971 p_atomic_set(&tq->flushed, true);
1972 }
1973 }
1974
1975 static void
1976 tc_call_flush(struct pipe_context *pipe, union tc_payload *payload)
1977 {
1978 struct tc_flush_payload *p = (struct tc_flush_payload *)payload;
1979 struct pipe_screen *screen = pipe->screen;
1980
1981 pipe->flush(pipe, p->fence ? &p->fence : NULL, p->flags);
1982 screen->fence_reference(screen, &p->fence, NULL);
1983
1984 if (!(p->flags & PIPE_FLUSH_DEFERRED))
1985 tc_flush_queries(p->tc);
1986 }
1987
1988 static void
1989 tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence,
1990 unsigned flags)
1991 {
1992 struct threaded_context *tc = threaded_context(_pipe);
1993 struct pipe_context *pipe = tc->pipe;
1994 struct pipe_screen *screen = pipe->screen;
1995 bool async = flags & PIPE_FLUSH_DEFERRED;
1996
1997 if (flags & PIPE_FLUSH_ASYNC) {
1998 struct tc_batch *last = &tc->batch_slots[tc->last];
1999
2000 /* Prefer to do the flush in the driver thread, but avoid the inter-thread
2001 * communication overhead if the driver thread is currently idle and the
2002 * caller is going to wait for the fence immediately anyway.
2003 */
2004 if (!(util_queue_fence_is_signalled(&last->fence) &&
2005 (flags & PIPE_FLUSH_HINT_FINISH)))
2006 async = true;
2007 }
2008
2009 if (async && tc->create_fence) {
2010 if (fence) {
2011 struct tc_batch *next = &tc->batch_slots[tc->next];
2012
2013 if (!next->token) {
2014 next->token = malloc(sizeof(*next->token));
2015 if (!next->token)
2016 goto out_of_memory;
2017
2018 pipe_reference_init(&next->token->ref, 1);
2019 next->token->tc = tc;
2020 }
2021
2022 screen->fence_reference(screen, fence, tc->create_fence(pipe, next->token));
2023 if (!*fence)
2024 goto out_of_memory;
2025 }
2026
2027 struct tc_flush_payload *p =
2028 tc_add_struct_typed_call(tc, TC_CALL_flush, tc_flush_payload);
2029 p->tc = tc;
2030 p->fence = fence ? *fence : NULL;
2031 p->flags = flags | TC_FLUSH_ASYNC;
2032
2033 if (!(flags & PIPE_FLUSH_DEFERRED))
2034 tc_batch_flush(tc);
2035 return;
2036 }
2037
2038 out_of_memory:
2039 tc_sync_msg(tc, flags & PIPE_FLUSH_END_OF_FRAME ? "end of frame" :
2040 flags & PIPE_FLUSH_DEFERRED ? "deferred fence" : "normal");
2041
2042 if (!(flags & PIPE_FLUSH_DEFERRED))
2043 tc_flush_queries(tc);
2044 pipe->flush(pipe, fence, flags);
2045 }
2046
2047 /* This is actually variable-sized, because indirect isn't allocated if it's
2048 * not needed. */
2049 struct tc_full_draw_info {
2050 struct pipe_draw_info draw;
2051 struct pipe_draw_indirect_info indirect;
2052 };
2053
2054 static void
2055 tc_call_draw_vbo(struct pipe_context *pipe, union tc_payload *payload)
2056 {
2057 struct tc_full_draw_info *info = (struct tc_full_draw_info*)payload;
2058
2059 pipe->draw_vbo(pipe, &info->draw);
2060 pipe_so_target_reference(&info->draw.count_from_stream_output, NULL);
2061 if (info->draw.index_size)
2062 pipe_resource_reference(&info->draw.index.resource, NULL);
2063 if (info->draw.indirect) {
2064 pipe_resource_reference(&info->indirect.buffer, NULL);
2065 pipe_resource_reference(&info->indirect.indirect_draw_count, NULL);
2066 }
2067 }
2068
2069 static struct tc_full_draw_info *
2070 tc_add_draw_vbo(struct pipe_context *_pipe, bool indirect)
2071 {
2072 return (struct tc_full_draw_info*)
2073 tc_add_sized_call(threaded_context(_pipe), TC_CALL_draw_vbo,
2074 indirect ? sizeof(struct tc_full_draw_info) :
2075 sizeof(struct pipe_draw_info));
2076 }
2077
2078 static void
2079 tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
2080 {
2081 struct threaded_context *tc = threaded_context(_pipe);
2082 struct pipe_draw_indirect_info *indirect = info->indirect;
2083 unsigned index_size = info->index_size;
2084 bool has_user_indices = info->has_user_indices;
2085
2086 if (index_size && has_user_indices) {
2087 unsigned size = info->count * index_size;
2088 struct pipe_resource *buffer = NULL;
2089 unsigned offset;
2090
2091 tc_assert(!indirect);
2092
2093 /* This must be done before adding draw_vbo, because it could generate
2094 * e.g. transfer_unmap and flush partially-uninitialized draw_vbo
2095 * to the driver if it was done afterwards.
2096 */
2097 u_upload_data(tc->base.stream_uploader, 0, size, 4, info->index.user,
2098 &offset, &buffer);
2099 if (unlikely(!buffer))
2100 return;
2101
2102 struct tc_full_draw_info *p = tc_add_draw_vbo(_pipe, false);
2103 p->draw.count_from_stream_output = NULL;
2104 pipe_so_target_reference(&p->draw.count_from_stream_output,
2105 info->count_from_stream_output);
2106 memcpy(&p->draw, info, sizeof(*info));
2107 p->draw.has_user_indices = false;
2108 p->draw.index.resource = buffer;
2109 p->draw.start = offset / index_size;
2110 } else {
2111 /* Non-indexed call or indexed with a real index buffer. */
2112 struct tc_full_draw_info *p = tc_add_draw_vbo(_pipe, indirect != NULL);
2113 p->draw.count_from_stream_output = NULL;
2114 pipe_so_target_reference(&p->draw.count_from_stream_output,
2115 info->count_from_stream_output);
2116 if (index_size) {
2117 tc_set_resource_reference(&p->draw.index.resource,
2118 info->index.resource);
2119 }
2120 memcpy(&p->draw, info, sizeof(*info));
2121
2122 if (indirect) {
2123 tc_set_resource_reference(&p->draw.indirect->buffer, indirect->buffer);
2124 tc_set_resource_reference(&p->indirect.indirect_draw_count,
2125 indirect->indirect_draw_count);
2126 memcpy(&p->indirect, indirect, sizeof(*indirect));
2127 p->draw.indirect = &p->indirect;
2128 }
2129 }
2130 }
2131
2132 static void
2133 tc_call_launch_grid(struct pipe_context *pipe, union tc_payload *payload)
2134 {
2135 struct pipe_grid_info *p = (struct pipe_grid_info *)payload;
2136
2137 pipe->launch_grid(pipe, p);
2138 pipe_resource_reference(&p->indirect, NULL);
2139 }
2140
2141 static void
2142 tc_launch_grid(struct pipe_context *_pipe,
2143 const struct pipe_grid_info *info)
2144 {
2145 struct threaded_context *tc = threaded_context(_pipe);
2146 struct pipe_grid_info *p = tc_add_struct_typed_call(tc, TC_CALL_launch_grid,
2147 pipe_grid_info);
2148 assert(info->input == NULL);
2149
2150 tc_set_resource_reference(&p->indirect, info->indirect);
2151 memcpy(p, info, sizeof(*info));
2152 }
2153
2154 static void
2155 tc_call_resource_copy_region(struct pipe_context *pipe, union tc_payload *payload)
2156 {
2157 struct tc_resource_copy_region *p = (struct tc_resource_copy_region *)payload;
2158
2159 pipe->resource_copy_region(pipe, p->dst, p->dst_level, p->dstx, p->dsty,
2160 p->dstz, p->src, p->src_level, &p->src_box);
2161 pipe_resource_reference(&p->dst, NULL);
2162 pipe_resource_reference(&p->src, NULL);
2163 }
2164
2165 static void
2166 tc_resource_copy_region(struct pipe_context *_pipe,
2167 struct pipe_resource *dst, unsigned dst_level,
2168 unsigned dstx, unsigned dsty, unsigned dstz,
2169 struct pipe_resource *src, unsigned src_level,
2170 const struct pipe_box *src_box)
2171 {
2172 struct threaded_context *tc = threaded_context(_pipe);
2173 struct threaded_resource *tdst = threaded_resource(dst);
2174 struct tc_resource_copy_region *p =
2175 tc_add_struct_typed_call(tc, TC_CALL_resource_copy_region,
2176 tc_resource_copy_region);
2177
2178 tc_set_resource_reference(&p->dst, dst);
2179 p->dst_level = dst_level;
2180 p->dstx = dstx;
2181 p->dsty = dsty;
2182 p->dstz = dstz;
2183 tc_set_resource_reference(&p->src, src);
2184 p->src_level = src_level;
2185 p->src_box = *src_box;
2186
2187 if (dst->target == PIPE_BUFFER)
2188 util_range_add(&tdst->valid_buffer_range, dstx, dstx + src_box->width);
2189 }
2190
2191 static void
2192 tc_call_blit(struct pipe_context *pipe, union tc_payload *payload)
2193 {
2194 struct pipe_blit_info *blit = (struct pipe_blit_info*)payload;
2195
2196 pipe->blit(pipe, blit);
2197 pipe_resource_reference(&blit->dst.resource, NULL);
2198 pipe_resource_reference(&blit->src.resource, NULL);
2199 }
2200
2201 static void
2202 tc_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
2203 {
2204 struct threaded_context *tc = threaded_context(_pipe);
2205 struct pipe_blit_info *blit =
2206 tc_add_struct_typed_call(tc, TC_CALL_blit, pipe_blit_info);
2207
2208 tc_set_resource_reference(&blit->dst.resource, info->dst.resource);
2209 tc_set_resource_reference(&blit->src.resource, info->src.resource);
2210 memcpy(blit, info, sizeof(*info));
2211 }
2212
2213 struct tc_generate_mipmap {
2214 struct pipe_resource *res;
2215 enum pipe_format format;
2216 unsigned base_level;
2217 unsigned last_level;
2218 unsigned first_layer;
2219 unsigned last_layer;
2220 };
2221
2222 static void
2223 tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload)
2224 {
2225 struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload;
2226 MAYBE_UNUSED bool result = pipe->generate_mipmap(pipe, p->res, p->format,
2227 p->base_level,
2228 p->last_level,
2229 p->first_layer,
2230 p->last_layer);
2231 assert(result);
2232 pipe_resource_reference(&p->res, NULL);
2233 }
2234
2235 static bool
2236 tc_generate_mipmap(struct pipe_context *_pipe,
2237 struct pipe_resource *res,
2238 enum pipe_format format,
2239 unsigned base_level,
2240 unsigned last_level,
2241 unsigned first_layer,
2242 unsigned last_layer)
2243 {
2244 struct threaded_context *tc = threaded_context(_pipe);
2245 struct pipe_context *pipe = tc->pipe;
2246 struct pipe_screen *screen = pipe->screen;
2247 unsigned bind = PIPE_BIND_SAMPLER_VIEW;
2248
2249 if (util_format_is_depth_or_stencil(format))
2250 bind = PIPE_BIND_DEPTH_STENCIL;
2251 else
2252 bind = PIPE_BIND_RENDER_TARGET;
2253
2254 if (!screen->is_format_supported(screen, format, res->target,
2255 res->nr_samples, res->nr_storage_samples,
2256 bind))
2257 return false;
2258
2259 struct tc_generate_mipmap *p =
2260 tc_add_struct_typed_call(tc, TC_CALL_generate_mipmap, tc_generate_mipmap);
2261
2262 tc_set_resource_reference(&p->res, res);
2263 p->format = format;
2264 p->base_level = base_level;
2265 p->last_level = last_level;
2266 p->first_layer = first_layer;
2267 p->last_layer = last_layer;
2268 return true;
2269 }
2270
2271 static void
2272 tc_call_flush_resource(struct pipe_context *pipe, union tc_payload *payload)
2273 {
2274 pipe->flush_resource(pipe, payload->resource);
2275 pipe_resource_reference(&payload->resource, NULL);
2276 }
2277
2278 static void
2279 tc_flush_resource(struct pipe_context *_pipe,
2280 struct pipe_resource *resource)
2281 {
2282 struct threaded_context *tc = threaded_context(_pipe);
2283 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_flush_resource);
2284
2285 tc_set_resource_reference(&payload->resource, resource);
2286 }
2287
2288 static void
2289 tc_call_invalidate_resource(struct pipe_context *pipe, union tc_payload *payload)
2290 {
2291 pipe->invalidate_resource(pipe, payload->resource);
2292 pipe_resource_reference(&payload->resource, NULL);
2293 }
2294
2295 static void
2296 tc_invalidate_resource(struct pipe_context *_pipe,
2297 struct pipe_resource *resource)
2298 {
2299 struct threaded_context *tc = threaded_context(_pipe);
2300
2301 if (resource->target == PIPE_BUFFER) {
2302 tc_invalidate_buffer(tc, threaded_resource(resource));
2303 return;
2304 }
2305
2306 union tc_payload *payload = tc_add_small_call(tc, TC_CALL_invalidate_resource);
2307 tc_set_resource_reference(&payload->resource, resource);
2308 }
2309
2310 struct tc_clear {
2311 unsigned buffers;
2312 union pipe_color_union color;
2313 double depth;
2314 unsigned stencil;
2315 };
2316
2317 static void
2318 tc_call_clear(struct pipe_context *pipe, union tc_payload *payload)
2319 {
2320 struct tc_clear *p = (struct tc_clear *)payload;
2321 pipe->clear(pipe, p->buffers, &p->color, p->depth, p->stencil);
2322 }
2323
2324 static void
2325 tc_clear(struct pipe_context *_pipe, unsigned buffers,
2326 const union pipe_color_union *color, double depth,
2327 unsigned stencil)
2328 {
2329 struct threaded_context *tc = threaded_context(_pipe);
2330 struct tc_clear *p = tc_add_struct_typed_call(tc, TC_CALL_clear, tc_clear);
2331
2332 p->buffers = buffers;
2333 p->color = *color;
2334 p->depth = depth;
2335 p->stencil = stencil;
2336 }
2337
2338 static void
2339 tc_clear_render_target(struct pipe_context *_pipe,
2340 struct pipe_surface *dst,
2341 const union pipe_color_union *color,
2342 unsigned dstx, unsigned dsty,
2343 unsigned width, unsigned height,
2344 bool render_condition_enabled)
2345 {
2346 struct threaded_context *tc = threaded_context(_pipe);
2347 struct pipe_context *pipe = tc->pipe;
2348
2349 tc_sync(tc);
2350 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
2351 render_condition_enabled);
2352 }
2353
2354 static void
2355 tc_clear_depth_stencil(struct pipe_context *_pipe,
2356 struct pipe_surface *dst, unsigned clear_flags,
2357 double depth, unsigned stencil, unsigned dstx,
2358 unsigned dsty, unsigned width, unsigned height,
2359 bool render_condition_enabled)
2360 {
2361 struct threaded_context *tc = threaded_context(_pipe);
2362 struct pipe_context *pipe = tc->pipe;
2363
2364 tc_sync(tc);
2365 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
2366 dstx, dsty, width, height,
2367 render_condition_enabled);
2368 }
2369
2370 struct tc_clear_buffer {
2371 struct pipe_resource *res;
2372 unsigned offset;
2373 unsigned size;
2374 char clear_value[16];
2375 int clear_value_size;
2376 };
2377
2378 static void
2379 tc_call_clear_buffer(struct pipe_context *pipe, union tc_payload *payload)
2380 {
2381 struct tc_clear_buffer *p = (struct tc_clear_buffer *)payload;
2382
2383 pipe->clear_buffer(pipe, p->res, p->offset, p->size, p->clear_value,
2384 p->clear_value_size);
2385 pipe_resource_reference(&p->res, NULL);
2386 }
2387
2388 static void
2389 tc_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
2390 unsigned offset, unsigned size,
2391 const void *clear_value, int clear_value_size)
2392 {
2393 struct threaded_context *tc = threaded_context(_pipe);
2394 struct threaded_resource *tres = threaded_resource(res);
2395 struct tc_clear_buffer *p =
2396 tc_add_struct_typed_call(tc, TC_CALL_clear_buffer, tc_clear_buffer);
2397
2398 tc_set_resource_reference(&p->res, res);
2399 p->offset = offset;
2400 p->size = size;
2401 memcpy(p->clear_value, clear_value, clear_value_size);
2402 p->clear_value_size = clear_value_size;
2403
2404 util_range_add(&tres->valid_buffer_range, offset, offset + size);
2405 }
2406
2407 struct tc_clear_texture {
2408 struct pipe_resource *res;
2409 unsigned level;
2410 struct pipe_box box;
2411 char data[16];
2412 };
2413
2414 static void
2415 tc_call_clear_texture(struct pipe_context *pipe, union tc_payload *payload)
2416 {
2417 struct tc_clear_texture *p = (struct tc_clear_texture *)payload;
2418
2419 pipe->clear_texture(pipe, p->res, p->level, &p->box, p->data);
2420 pipe_resource_reference(&p->res, NULL);
2421 }
2422
2423 static void
2424 tc_clear_texture(struct pipe_context *_pipe, struct pipe_resource *res,
2425 unsigned level, const struct pipe_box *box, const void *data)
2426 {
2427 struct threaded_context *tc = threaded_context(_pipe);
2428 struct tc_clear_texture *p =
2429 tc_add_struct_typed_call(tc, TC_CALL_clear_texture, tc_clear_texture);
2430
2431 tc_set_resource_reference(&p->res, res);
2432 p->level = level;
2433 p->box = *box;
2434 memcpy(p->data, data,
2435 util_format_get_blocksize(res->format));
2436 }
2437
2438 struct tc_resource_commit {
2439 struct pipe_resource *res;
2440 unsigned level;
2441 struct pipe_box box;
2442 bool commit;
2443 };
2444
2445 static void
2446 tc_call_resource_commit(struct pipe_context *pipe, union tc_payload *payload)
2447 {
2448 struct tc_resource_commit *p = (struct tc_resource_commit *)payload;
2449
2450 pipe->resource_commit(pipe, p->res, p->level, &p->box, p->commit);
2451 pipe_resource_reference(&p->res, NULL);
2452 }
2453
2454 static bool
2455 tc_resource_commit(struct pipe_context *_pipe, struct pipe_resource *res,
2456 unsigned level, struct pipe_box *box, bool commit)
2457 {
2458 struct threaded_context *tc = threaded_context(_pipe);
2459 struct tc_resource_commit *p =
2460 tc_add_struct_typed_call(tc, TC_CALL_resource_commit, tc_resource_commit);
2461
2462 tc_set_resource_reference(&p->res, res);
2463 p->level = level;
2464 p->box = *box;
2465 p->commit = commit;
2466 return true; /* we don't care about the return value for this call */
2467 }
2468
2469
2470 /********************************************************************
2471 * callback
2472 */
2473
2474 struct tc_callback_payload {
2475 void (*fn)(void *data);
2476 void *data;
2477 };
2478
2479 static void
2480 tc_call_callback(UNUSED struct pipe_context *pipe, union tc_payload *payload)
2481 {
2482 struct tc_callback_payload *p = (struct tc_callback_payload *)payload;
2483
2484 p->fn(p->data);
2485 }
2486
2487 static void
2488 tc_callback(struct pipe_context *_pipe, void (*fn)(void *), void *data,
2489 bool asap)
2490 {
2491 struct threaded_context *tc = threaded_context(_pipe);
2492
2493 if (asap && tc_is_sync(tc)) {
2494 fn(data);
2495 return;
2496 }
2497
2498 struct tc_callback_payload *p =
2499 tc_add_struct_typed_call(tc, TC_CALL_callback, tc_callback_payload);
2500 p->fn = fn;
2501 p->data = data;
2502 }
2503
2504
2505 /********************************************************************
2506 * create & destroy
2507 */
2508
2509 static void
2510 tc_destroy(struct pipe_context *_pipe)
2511 {
2512 struct threaded_context *tc = threaded_context(_pipe);
2513 struct pipe_context *pipe = tc->pipe;
2514
2515 if (tc->base.const_uploader &&
2516 tc->base.stream_uploader != tc->base.const_uploader)
2517 u_upload_destroy(tc->base.const_uploader);
2518
2519 if (tc->base.stream_uploader)
2520 u_upload_destroy(tc->base.stream_uploader);
2521
2522 tc_sync(tc);
2523
2524 if (util_queue_is_initialized(&tc->queue)) {
2525 util_queue_destroy(&tc->queue);
2526
2527 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
2528 util_queue_fence_destroy(&tc->batch_slots[i].fence);
2529 assert(!tc->batch_slots[i].token);
2530 }
2531 }
2532
2533 slab_destroy_child(&tc->pool_transfers);
2534 assert(tc->batch_slots[tc->next].num_total_call_slots == 0);
2535 pipe->destroy(pipe);
2536 os_free_aligned(tc);
2537 }
2538
2539 static const tc_execute execute_func[TC_NUM_CALLS] = {
2540 #define CALL(name) tc_call_##name,
2541 #include "u_threaded_context_calls.h"
2542 #undef CALL
2543 };
2544
2545 /**
2546 * Wrap an existing pipe_context into a threaded_context.
2547 *
2548 * \param pipe pipe_context to wrap
2549 * \param parent_transfer_pool parent slab pool set up for creating pipe_-
2550 * transfer objects; the driver should have one
2551 * in pipe_screen.
2552 * \param replace_buffer callback for replacing a pipe_resource's storage
2553 * with another pipe_resource's storage.
2554 * \param out if successful, the threaded_context will be returned here in
2555 * addition to the return value if "out" != NULL
2556 */
2557 struct pipe_context *
2558 threaded_context_create(struct pipe_context *pipe,
2559 struct slab_parent_pool *parent_transfer_pool,
2560 tc_replace_buffer_storage_func replace_buffer,
2561 tc_create_fence_func create_fence,
2562 struct threaded_context **out)
2563 {
2564 struct threaded_context *tc;
2565
2566 STATIC_ASSERT(sizeof(union tc_payload) <= 8);
2567 STATIC_ASSERT(sizeof(struct tc_call) <= 16);
2568
2569 if (!pipe)
2570 return NULL;
2571
2572 util_cpu_detect();
2573
2574 if (!debug_get_bool_option("GALLIUM_THREAD", util_cpu_caps.nr_cpus > 1))
2575 return pipe;
2576
2577 tc = os_malloc_aligned(sizeof(struct threaded_context), 16);
2578 if (!tc) {
2579 pipe->destroy(pipe);
2580 return NULL;
2581 }
2582 memset(tc, 0, sizeof(*tc));
2583
2584 assert((uintptr_t)tc % 16 == 0);
2585 /* These should be static asserts, but they don't work with MSVC */
2586 assert(offsetof(struct threaded_context, batch_slots) % 16 == 0);
2587 assert(offsetof(struct threaded_context, batch_slots[0].call) % 16 == 0);
2588 assert(offsetof(struct threaded_context, batch_slots[0].call[1]) % 16 == 0);
2589 assert(offsetof(struct threaded_context, batch_slots[1].call) % 16 == 0);
2590
2591 /* The driver context isn't wrapped, so set its "priv" to NULL. */
2592 pipe->priv = NULL;
2593
2594 tc->pipe = pipe;
2595 tc->replace_buffer_storage = replace_buffer;
2596 tc->create_fence = create_fence;
2597 tc->map_buffer_alignment =
2598 pipe->screen->get_param(pipe->screen, PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT);
2599 tc->base.priv = pipe; /* priv points to the wrapped driver context */
2600 tc->base.screen = pipe->screen;
2601 tc->base.destroy = tc_destroy;
2602 tc->base.callback = tc_callback;
2603
2604 tc->base.stream_uploader = u_upload_clone(&tc->base, pipe->stream_uploader);
2605 if (pipe->stream_uploader == pipe->const_uploader)
2606 tc->base.const_uploader = tc->base.stream_uploader;
2607 else
2608 tc->base.const_uploader = u_upload_clone(&tc->base, pipe->const_uploader);
2609
2610 if (!tc->base.stream_uploader || !tc->base.const_uploader)
2611 goto fail;
2612
2613 /* The queue size is the number of batches "waiting". Batches are removed
2614 * from the queue before being executed, so keep one tc_batch slot for that
2615 * execution. Also, keep one unused slot for an unflushed batch.
2616 */
2617 if (!util_queue_init(&tc->queue, "gdrv", TC_MAX_BATCHES - 2, 1, 0))
2618 goto fail;
2619
2620 for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
2621 tc->batch_slots[i].sentinel = TC_SENTINEL;
2622 tc->batch_slots[i].pipe = pipe;
2623 util_queue_fence_init(&tc->batch_slots[i].fence);
2624 }
2625
2626 LIST_INITHEAD(&tc->unflushed_queries);
2627
2628 slab_create_child(&tc->pool_transfers, parent_transfer_pool);
2629
2630 tc->base.set_context_param = tc_set_context_param; /* always set this */
2631
2632 #define CTX_INIT(_member) \
2633 tc->base._member = tc->pipe->_member ? tc_##_member : NULL
2634
2635 CTX_INIT(flush);
2636 CTX_INIT(draw_vbo);
2637 CTX_INIT(launch_grid);
2638 CTX_INIT(resource_copy_region);
2639 CTX_INIT(blit);
2640 CTX_INIT(clear);
2641 CTX_INIT(clear_render_target);
2642 CTX_INIT(clear_depth_stencil);
2643 CTX_INIT(clear_buffer);
2644 CTX_INIT(clear_texture);
2645 CTX_INIT(flush_resource);
2646 CTX_INIT(generate_mipmap);
2647 CTX_INIT(render_condition);
2648 CTX_INIT(create_query);
2649 CTX_INIT(create_batch_query);
2650 CTX_INIT(destroy_query);
2651 CTX_INIT(begin_query);
2652 CTX_INIT(end_query);
2653 CTX_INIT(get_query_result);
2654 CTX_INIT(get_query_result_resource);
2655 CTX_INIT(set_active_query_state);
2656 CTX_INIT(create_blend_state);
2657 CTX_INIT(bind_blend_state);
2658 CTX_INIT(delete_blend_state);
2659 CTX_INIT(create_sampler_state);
2660 CTX_INIT(bind_sampler_states);
2661 CTX_INIT(delete_sampler_state);
2662 CTX_INIT(create_rasterizer_state);
2663 CTX_INIT(bind_rasterizer_state);
2664 CTX_INIT(delete_rasterizer_state);
2665 CTX_INIT(create_depth_stencil_alpha_state);
2666 CTX_INIT(bind_depth_stencil_alpha_state);
2667 CTX_INIT(delete_depth_stencil_alpha_state);
2668 CTX_INIT(create_fs_state);
2669 CTX_INIT(bind_fs_state);
2670 CTX_INIT(delete_fs_state);
2671 CTX_INIT(create_vs_state);
2672 CTX_INIT(bind_vs_state);
2673 CTX_INIT(delete_vs_state);
2674 CTX_INIT(create_gs_state);
2675 CTX_INIT(bind_gs_state);
2676 CTX_INIT(delete_gs_state);
2677 CTX_INIT(create_tcs_state);
2678 CTX_INIT(bind_tcs_state);
2679 CTX_INIT(delete_tcs_state);
2680 CTX_INIT(create_tes_state);
2681 CTX_INIT(bind_tes_state);
2682 CTX_INIT(delete_tes_state);
2683 CTX_INIT(create_compute_state);
2684 CTX_INIT(bind_compute_state);
2685 CTX_INIT(delete_compute_state);
2686 CTX_INIT(create_vertex_elements_state);
2687 CTX_INIT(bind_vertex_elements_state);
2688 CTX_INIT(delete_vertex_elements_state);
2689 CTX_INIT(set_blend_color);
2690 CTX_INIT(set_stencil_ref);
2691 CTX_INIT(set_sample_mask);
2692 CTX_INIT(set_min_samples);
2693 CTX_INIT(set_clip_state);
2694 CTX_INIT(set_constant_buffer);
2695 CTX_INIT(set_framebuffer_state);
2696 CTX_INIT(set_polygon_stipple);
2697 CTX_INIT(set_scissor_states);
2698 CTX_INIT(set_viewport_states);
2699 CTX_INIT(set_window_rectangles);
2700 CTX_INIT(set_sampler_views);
2701 CTX_INIT(set_tess_state);
2702 CTX_INIT(set_shader_buffers);
2703 CTX_INIT(set_shader_images);
2704 CTX_INIT(set_vertex_buffers);
2705 CTX_INIT(create_stream_output_target);
2706 CTX_INIT(stream_output_target_destroy);
2707 CTX_INIT(set_stream_output_targets);
2708 CTX_INIT(create_sampler_view);
2709 CTX_INIT(sampler_view_destroy);
2710 CTX_INIT(create_surface);
2711 CTX_INIT(surface_destroy);
2712 CTX_INIT(transfer_map);
2713 CTX_INIT(transfer_flush_region);
2714 CTX_INIT(transfer_unmap);
2715 CTX_INIT(buffer_subdata);
2716 CTX_INIT(texture_subdata);
2717 CTX_INIT(texture_barrier);
2718 CTX_INIT(memory_barrier);
2719 CTX_INIT(resource_commit);
2720 CTX_INIT(create_video_codec);
2721 CTX_INIT(create_video_buffer);
2722 CTX_INIT(set_compute_resources);
2723 CTX_INIT(set_global_binding);
2724 CTX_INIT(get_sample_position);
2725 CTX_INIT(invalidate_resource);
2726 CTX_INIT(get_device_reset_status);
2727 CTX_INIT(set_device_reset_callback);
2728 CTX_INIT(dump_debug_state);
2729 CTX_INIT(set_log_context);
2730 CTX_INIT(emit_string_marker);
2731 CTX_INIT(set_debug_callback);
2732 CTX_INIT(create_fence_fd);
2733 CTX_INIT(fence_server_sync);
2734 CTX_INIT(fence_server_signal);
2735 CTX_INIT(get_timestamp);
2736 CTX_INIT(create_texture_handle);
2737 CTX_INIT(delete_texture_handle);
2738 CTX_INIT(make_texture_handle_resident);
2739 CTX_INIT(create_image_handle);
2740 CTX_INIT(delete_image_handle);
2741 CTX_INIT(make_image_handle_resident);
2742 #undef CTX_INIT
2743
2744 if (out)
2745 *out = tc;
2746
2747 return &tc->base;
2748
2749 fail:
2750 tc_destroy(&tc->base);
2751 return NULL;
2752 }