1 /**************************************************************************
3 * Copyright 2017 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
27 #include "util/u_threaded_context.h"
28 #include "util/u_cpu_detect.h"
29 #include "util/u_format.h"
30 #include "util/u_inlines.h"
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
34 /* 0 = disabled, 1 = assertions, 2 = printfs */
38 #define tc_assert assert
44 #define tc_printf printf
45 #define tc_asprintf asprintf
46 #define tc_strcmp strcmp
48 #define tc_printf(...)
49 #define tc_asprintf(...) 0
50 #define tc_strcmp(...) 0
53 #define TC_SENTINEL 0x5ca1ab1e
56 #define CALL(name) TC_CALL_##name,
57 #include "u_threaded_context_calls.h"
62 typedef void (*tc_execute
)(struct pipe_context
*pipe
, union tc_payload
*payload
);
64 static const tc_execute execute_func
[TC_NUM_CALLS
];
67 tc_batch_check(UNUSED
struct tc_batch
*batch
)
69 tc_assert(batch
->sentinel
== TC_SENTINEL
);
70 tc_assert(batch
->num_total_call_slots
<= TC_CALLS_PER_BATCH
);
74 tc_debug_check(struct threaded_context
*tc
)
76 for (unsigned i
= 0; i
< TC_MAX_BATCHES
; i
++) {
77 tc_batch_check(&tc
->batch_slots
[i
]);
78 tc_assert(tc
->batch_slots
[i
].pipe
== tc
->pipe
);
83 tc_batch_execute(void *job
, UNUSED
int thread_index
)
85 struct tc_batch
*batch
= job
;
86 struct pipe_context
*pipe
= batch
->pipe
;
87 struct tc_call
*last
= &batch
->call
[batch
->num_total_call_slots
];
89 tc_batch_check(batch
);
91 assert(!batch
->token
);
93 for (struct tc_call
*iter
= batch
->call
; iter
!= last
;
94 iter
+= iter
->num_call_slots
) {
95 tc_assert(iter
->sentinel
== TC_SENTINEL
);
96 execute_func
[iter
->call_id
](pipe
, &iter
->payload
);
99 tc_batch_check(batch
);
100 batch
->num_total_call_slots
= 0;
104 tc_batch_flush(struct threaded_context
*tc
)
106 struct tc_batch
*next
= &tc
->batch_slots
[tc
->next
];
108 tc_assert(next
->num_total_call_slots
!= 0);
109 tc_batch_check(next
);
111 p_atomic_add(&tc
->num_offloaded_slots
, next
->num_total_call_slots
);
114 next
->token
->tc
= NULL
;
115 tc_unflushed_batch_token_reference(&next
->token
, NULL
);
118 util_queue_add_job(&tc
->queue
, next
, &next
->fence
, tc_batch_execute
,
121 tc
->next
= (tc
->next
+ 1) % TC_MAX_BATCHES
;
124 /* This is the function that adds variable-sized calls into the current
125 * batch. It also flushes the batch if there is not enough space there.
126 * All other higher-level "add" functions use it.
128 static union tc_payload
*
129 tc_add_sized_call(struct threaded_context
*tc
, enum tc_call_id id
,
130 unsigned payload_size
)
132 struct tc_batch
*next
= &tc
->batch_slots
[tc
->next
];
133 unsigned total_size
= offsetof(struct tc_call
, payload
) + payload_size
;
134 unsigned num_call_slots
= DIV_ROUND_UP(total_size
, sizeof(struct tc_call
));
138 if (unlikely(next
->num_total_call_slots
+ num_call_slots
> TC_CALLS_PER_BATCH
)) {
140 next
= &tc
->batch_slots
[tc
->next
];
141 tc_assert(next
->num_total_call_slots
== 0);
144 tc_assert(util_queue_fence_is_signalled(&next
->fence
));
146 struct tc_call
*call
= &next
->call
[next
->num_total_call_slots
];
147 next
->num_total_call_slots
+= num_call_slots
;
149 call
->sentinel
= TC_SENTINEL
;
151 call
->num_call_slots
= num_call_slots
;
154 return &call
->payload
;
157 #define tc_add_struct_typed_call(tc, execute, type) \
158 ((struct type*)tc_add_sized_call(tc, execute, sizeof(struct type)))
160 #define tc_add_slot_based_call(tc, execute, type, num_slots) \
161 ((struct type*)tc_add_sized_call(tc, execute, \
162 sizeof(struct type) + \
163 sizeof(((struct type*)NULL)->slot[0]) * \
166 static union tc_payload
*
167 tc_add_small_call(struct threaded_context
*tc
, enum tc_call_id id
)
169 return tc_add_sized_call(tc
, id
, 0);
173 tc_is_sync(struct threaded_context
*tc
)
175 struct tc_batch
*last
= &tc
->batch_slots
[tc
->last
];
176 struct tc_batch
*next
= &tc
->batch_slots
[tc
->next
];
178 return util_queue_fence_is_signalled(&last
->fence
) &&
179 !next
->num_total_call_slots
;
183 _tc_sync(struct threaded_context
*tc
, UNUSED
const char *info
, UNUSED
const char *func
)
185 struct tc_batch
*last
= &tc
->batch_slots
[tc
->last
];
186 struct tc_batch
*next
= &tc
->batch_slots
[tc
->next
];
191 /* Only wait for queued calls... */
192 if (!util_queue_fence_is_signalled(&last
->fence
)) {
193 util_queue_fence_wait(&last
->fence
);
200 next
->token
->tc
= NULL
;
201 tc_unflushed_batch_token_reference(&next
->token
, NULL
);
204 /* .. and execute unflushed calls directly. */
205 if (next
->num_total_call_slots
) {
206 p_atomic_add(&tc
->num_direct_slots
, next
->num_total_call_slots
);
207 tc_batch_execute(next
, 0);
212 p_atomic_inc(&tc
->num_syncs
);
214 if (tc_strcmp(func
, "tc_destroy") != 0) {
215 tc_printf("sync %s %s\n", func
, info
);
222 #define tc_sync(tc) _tc_sync(tc, "", __func__)
223 #define tc_sync_msg(tc, info) _tc_sync(tc, info, __func__)
226 * Call this from fence_finish for same-context fence waits of deferred fences
227 * that haven't been flushed yet.
229 * The passed pipe_context must be the one passed to pipe_screen::fence_finish,
230 * i.e., the wrapped one.
233 threaded_context_flush(struct pipe_context
*_pipe
,
234 struct tc_unflushed_batch_token
*token
,
237 struct threaded_context
*tc
= threaded_context(_pipe
);
239 /* This is called from the state-tracker / application thread. */
240 if (token
->tc
&& token
->tc
== tc
) {
241 struct tc_batch
*last
= &tc
->batch_slots
[tc
->last
];
243 /* Prefer to do the flush in the driver thread if it is already
244 * running. That should be better for cache locality.
246 if (prefer_async
|| !util_queue_fence_is_signalled(&last
->fence
))
254 tc_set_resource_reference(struct pipe_resource
**dst
, struct pipe_resource
*src
)
257 pipe_resource_reference(dst
, src
);
261 threaded_resource_init(struct pipe_resource
*res
)
263 struct threaded_resource
*tres
= threaded_resource(res
);
265 tres
->latest
= &tres
->b
;
266 util_range_init(&tres
->valid_buffer_range
);
267 tres
->base_valid_buffer_range
= &tres
->valid_buffer_range
;
268 tres
->is_shared
= false;
269 tres
->is_user_ptr
= false;
273 threaded_resource_deinit(struct pipe_resource
*res
)
275 struct threaded_resource
*tres
= threaded_resource(res
);
277 if (tres
->latest
!= &tres
->b
)
278 pipe_resource_reference(&tres
->latest
, NULL
);
279 util_range_destroy(&tres
->valid_buffer_range
);
282 struct pipe_context
*
283 threaded_context_unwrap_sync(struct pipe_context
*pipe
)
285 if (!pipe
|| !pipe
->priv
)
288 tc_sync(threaded_context(pipe
));
289 return (struct pipe_context
*)pipe
->priv
;
293 /********************************************************************
297 #define TC_FUNC1(func, m_payload, qualifier, type, deref, deref2) \
299 tc_call_##func(struct pipe_context *pipe, union tc_payload *payload) \
301 pipe->func(pipe, deref2((type*)payload)); \
305 tc_##func(struct pipe_context *_pipe, qualifier type deref param) \
307 struct threaded_context *tc = threaded_context(_pipe); \
308 type *p = (type*)tc_add_sized_call(tc, TC_CALL_##func, sizeof(type)); \
312 TC_FUNC1(set_active_query_state
, flags
, , bool, , *)
314 TC_FUNC1(set_blend_color
, blend_color
, const, struct pipe_blend_color
, *, )
315 TC_FUNC1(set_stencil_ref
, stencil_ref
, const, struct pipe_stencil_ref
, *, )
316 TC_FUNC1(set_clip_state
, clip_state
, const, struct pipe_clip_state
, *, )
317 TC_FUNC1(set_sample_mask
, sample_mask
, , unsigned, , *)
318 TC_FUNC1(set_min_samples
, min_samples
, , unsigned, , *)
319 TC_FUNC1(set_polygon_stipple
, polygon_stipple
, const, struct pipe_poly_stipple
, *, )
321 TC_FUNC1(texture_barrier
, flags
, , unsigned, , *)
322 TC_FUNC1(memory_barrier
, flags
, , unsigned, , *)
325 /********************************************************************
329 static struct pipe_query
*
330 tc_create_query(struct pipe_context
*_pipe
, unsigned query_type
,
333 struct threaded_context
*tc
= threaded_context(_pipe
);
334 struct pipe_context
*pipe
= tc
->pipe
;
336 return pipe
->create_query(pipe
, query_type
, index
);
339 static struct pipe_query
*
340 tc_create_batch_query(struct pipe_context
*_pipe
, unsigned num_queries
,
341 unsigned *query_types
)
343 struct threaded_context
*tc
= threaded_context(_pipe
);
344 struct pipe_context
*pipe
= tc
->pipe
;
346 return pipe
->create_batch_query(pipe
, num_queries
, query_types
);
350 tc_call_destroy_query(struct pipe_context
*pipe
, union tc_payload
*payload
)
352 struct threaded_query
*tq
= threaded_query(payload
->query
);
354 if (tq
->head_unflushed
.next
)
355 LIST_DEL(&tq
->head_unflushed
);
357 pipe
->destroy_query(pipe
, payload
->query
);
361 tc_destroy_query(struct pipe_context
*_pipe
, struct pipe_query
*query
)
363 struct threaded_context
*tc
= threaded_context(_pipe
);
365 tc_add_small_call(tc
, TC_CALL_destroy_query
)->query
= query
;
369 tc_call_begin_query(struct pipe_context
*pipe
, union tc_payload
*payload
)
371 pipe
->begin_query(pipe
, payload
->query
);
375 tc_begin_query(struct pipe_context
*_pipe
, struct pipe_query
*query
)
377 struct threaded_context
*tc
= threaded_context(_pipe
);
378 union tc_payload
*payload
= tc_add_small_call(tc
, TC_CALL_begin_query
);
380 payload
->query
= query
;
381 return true; /* we don't care about the return value for this call */
384 struct tc_end_query_payload
{
385 struct threaded_context
*tc
;
386 struct pipe_query
*query
;
390 tc_call_end_query(struct pipe_context
*pipe
, union tc_payload
*payload
)
392 struct tc_end_query_payload
*p
= (struct tc_end_query_payload
*)payload
;
393 struct threaded_query
*tq
= threaded_query(p
->query
);
395 if (!tq
->head_unflushed
.next
)
396 list_add(&tq
->head_unflushed
, &p
->tc
->unflushed_queries
);
398 pipe
->end_query(pipe
, p
->query
);
402 tc_end_query(struct pipe_context
*_pipe
, struct pipe_query
*query
)
404 struct threaded_context
*tc
= threaded_context(_pipe
);
405 struct threaded_query
*tq
= threaded_query(query
);
406 struct tc_end_query_payload
*payload
=
407 tc_add_struct_typed_call(tc
, TC_CALL_end_query
, tc_end_query_payload
);
410 payload
->query
= query
;
414 return true; /* we don't care about the return value for this call */
418 tc_get_query_result(struct pipe_context
*_pipe
,
419 struct pipe_query
*query
, bool wait
,
420 union pipe_query_result
*result
)
422 struct threaded_context
*tc
= threaded_context(_pipe
);
423 struct threaded_query
*tq
= threaded_query(query
);
424 struct pipe_context
*pipe
= tc
->pipe
;
427 tc_sync_msg(tc
, wait
? "wait" : "nowait");
429 bool success
= pipe
->get_query_result(pipe
, query
, wait
, result
);
433 if (tq
->head_unflushed
.next
) {
434 /* This is safe because it can only happen after we sync'd. */
435 LIST_DEL(&tq
->head_unflushed
);
441 struct tc_query_result_resource
{
442 struct pipe_query
*query
;
444 enum pipe_query_value_type result_type
;
446 struct pipe_resource
*resource
;
451 tc_call_get_query_result_resource(struct pipe_context
*pipe
,
452 union tc_payload
*payload
)
454 struct tc_query_result_resource
*p
= (struct tc_query_result_resource
*)payload
;
456 pipe
->get_query_result_resource(pipe
, p
->query
, p
->wait
, p
->result_type
,
457 p
->index
, p
->resource
, p
->offset
);
458 pipe_resource_reference(&p
->resource
, NULL
);
462 tc_get_query_result_resource(struct pipe_context
*_pipe
,
463 struct pipe_query
*query
, bool wait
,
464 enum pipe_query_value_type result_type
, int index
,
465 struct pipe_resource
*resource
, unsigned offset
)
467 struct threaded_context
*tc
= threaded_context(_pipe
);
468 struct tc_query_result_resource
*p
=
469 tc_add_struct_typed_call(tc
, TC_CALL_get_query_result_resource
,
470 tc_query_result_resource
);
474 p
->result_type
= result_type
;
476 tc_set_resource_reference(&p
->resource
, resource
);
480 struct tc_render_condition
{
481 struct pipe_query
*query
;
487 tc_call_render_condition(struct pipe_context
*pipe
, union tc_payload
*payload
)
489 struct tc_render_condition
*p
= (struct tc_render_condition
*)payload
;
490 pipe
->render_condition(pipe
, p
->query
, p
->condition
, p
->mode
);
494 tc_render_condition(struct pipe_context
*_pipe
,
495 struct pipe_query
*query
, bool condition
,
496 enum pipe_render_cond_flag mode
)
498 struct threaded_context
*tc
= threaded_context(_pipe
);
499 struct tc_render_condition
*p
=
500 tc_add_struct_typed_call(tc
, TC_CALL_render_condition
, tc_render_condition
);
503 p
->condition
= condition
;
508 /********************************************************************
509 * constant (immutable) states
512 #define TC_CSO_CREATE(name, sname) \
514 tc_create_##name##_state(struct pipe_context *_pipe, \
515 const struct pipe_##sname##_state *state) \
517 struct pipe_context *pipe = threaded_context(_pipe)->pipe; \
518 return pipe->create_##name##_state(pipe, state); \
521 #define TC_CSO_BIND(name) TC_FUNC1(bind_##name##_state, cso, , void *, , *)
522 #define TC_CSO_DELETE(name) TC_FUNC1(delete_##name##_state, cso, , void *, , *)
524 #define TC_CSO_WHOLE2(name, sname) \
525 TC_CSO_CREATE(name, sname) \
529 #define TC_CSO_WHOLE(name) TC_CSO_WHOLE2(name, name)
532 TC_CSO_WHOLE(rasterizer
)
533 TC_CSO_WHOLE(depth_stencil_alpha
)
534 TC_CSO_WHOLE(compute
)
535 TC_CSO_WHOLE2(fs
, shader
)
536 TC_CSO_WHOLE2(vs
, shader
)
537 TC_CSO_WHOLE2(gs
, shader
)
538 TC_CSO_WHOLE2(tcs
, shader
)
539 TC_CSO_WHOLE2(tes
, shader
)
540 TC_CSO_CREATE(sampler
, sampler
)
541 TC_CSO_DELETE(sampler
)
542 TC_CSO_BIND(vertex_elements
)
543 TC_CSO_DELETE(vertex_elements
)
546 tc_create_vertex_elements_state(struct pipe_context
*_pipe
, unsigned count
,
547 const struct pipe_vertex_element
*elems
)
549 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
551 return pipe
->create_vertex_elements_state(pipe
, count
, elems
);
554 struct tc_sampler_states
{
555 ubyte shader
, start
, count
;
556 void *slot
[0]; /* more will be allocated if needed */
560 tc_call_bind_sampler_states(struct pipe_context
*pipe
, union tc_payload
*payload
)
562 struct tc_sampler_states
*p
= (struct tc_sampler_states
*)payload
;
563 pipe
->bind_sampler_states(pipe
, p
->shader
, p
->start
, p
->count
, p
->slot
);
567 tc_bind_sampler_states(struct pipe_context
*_pipe
,
568 enum pipe_shader_type shader
,
569 unsigned start
, unsigned count
, void **states
)
574 struct threaded_context
*tc
= threaded_context(_pipe
);
575 struct tc_sampler_states
*p
=
576 tc_add_slot_based_call(tc
, TC_CALL_bind_sampler_states
, tc_sampler_states
, count
);
581 memcpy(p
->slot
, states
, count
* sizeof(states
[0]));
585 /********************************************************************
590 tc_call_set_framebuffer_state(struct pipe_context
*pipe
, union tc_payload
*payload
)
592 struct pipe_framebuffer_state
*p
= (struct pipe_framebuffer_state
*)payload
;
594 pipe
->set_framebuffer_state(pipe
, p
);
596 unsigned nr_cbufs
= p
->nr_cbufs
;
597 for (unsigned i
= 0; i
< nr_cbufs
; i
++)
598 pipe_surface_reference(&p
->cbufs
[i
], NULL
);
599 pipe_surface_reference(&p
->zsbuf
, NULL
);
603 tc_set_framebuffer_state(struct pipe_context
*_pipe
,
604 const struct pipe_framebuffer_state
*fb
)
606 struct threaded_context
*tc
= threaded_context(_pipe
);
607 struct pipe_framebuffer_state
*p
=
608 tc_add_struct_typed_call(tc
, TC_CALL_set_framebuffer_state
,
609 pipe_framebuffer_state
);
610 unsigned nr_cbufs
= fb
->nr_cbufs
;
612 p
->width
= fb
->width
;
613 p
->height
= fb
->height
;
614 p
->samples
= fb
->samples
;
615 p
->layers
= fb
->layers
;
616 p
->nr_cbufs
= nr_cbufs
;
618 for (unsigned i
= 0; i
< nr_cbufs
; i
++) {
620 pipe_surface_reference(&p
->cbufs
[i
], fb
->cbufs
[i
]);
623 pipe_surface_reference(&p
->zsbuf
, fb
->zsbuf
);
627 tc_call_set_tess_state(struct pipe_context
*pipe
, union tc_payload
*payload
)
629 float *p
= (float*)payload
;
630 pipe
->set_tess_state(pipe
, p
, p
+ 4);
634 tc_set_tess_state(struct pipe_context
*_pipe
,
635 const float default_outer_level
[4],
636 const float default_inner_level
[2])
638 struct threaded_context
*tc
= threaded_context(_pipe
);
639 float *p
= (float*)tc_add_sized_call(tc
, TC_CALL_set_tess_state
,
642 memcpy(p
, default_outer_level
, 4 * sizeof(float));
643 memcpy(p
+ 4, default_inner_level
, 2 * sizeof(float));
646 struct tc_constant_buffer
{
648 struct pipe_constant_buffer cb
;
652 tc_call_set_constant_buffer(struct pipe_context
*pipe
, union tc_payload
*payload
)
654 struct tc_constant_buffer
*p
= (struct tc_constant_buffer
*)payload
;
656 pipe
->set_constant_buffer(pipe
,
660 pipe_resource_reference(&p
->cb
.buffer
, NULL
);
664 tc_set_constant_buffer(struct pipe_context
*_pipe
,
665 enum pipe_shader_type shader
, uint index
,
666 const struct pipe_constant_buffer
*cb
)
668 struct threaded_context
*tc
= threaded_context(_pipe
);
669 struct pipe_resource
*buffer
= NULL
;
672 /* This must be done before adding set_constant_buffer, because it could
673 * generate e.g. transfer_unmap and flush partially-uninitialized
674 * set_constant_buffer to the driver if it was done afterwards.
676 if (cb
&& cb
->user_buffer
) {
677 u_upload_data(tc
->base
.const_uploader
, 0, cb
->buffer_size
, 64,
678 cb
->user_buffer
, &offset
, &buffer
);
679 u_upload_unmap(tc
->base
.const_uploader
);
682 struct tc_constant_buffer
*p
=
683 tc_add_struct_typed_call(tc
, TC_CALL_set_constant_buffer
,
689 if (cb
->user_buffer
) {
690 p
->cb
.buffer_size
= cb
->buffer_size
;
691 p
->cb
.user_buffer
= NULL
;
692 p
->cb
.buffer_offset
= offset
;
693 p
->cb
.buffer
= buffer
;
695 tc_set_resource_reference(&p
->cb
.buffer
,
697 memcpy(&p
->cb
, cb
, sizeof(*cb
));
700 memset(&p
->cb
, 0, sizeof(*cb
));
706 struct pipe_scissor_state slot
[0]; /* more will be allocated if needed */
710 tc_call_set_scissor_states(struct pipe_context
*pipe
, union tc_payload
*payload
)
712 struct tc_scissors
*p
= (struct tc_scissors
*)payload
;
713 pipe
->set_scissor_states(pipe
, p
->start
, p
->count
, p
->slot
);
717 tc_set_scissor_states(struct pipe_context
*_pipe
,
718 unsigned start
, unsigned count
,
719 const struct pipe_scissor_state
*states
)
721 struct threaded_context
*tc
= threaded_context(_pipe
);
722 struct tc_scissors
*p
=
723 tc_add_slot_based_call(tc
, TC_CALL_set_scissor_states
, tc_scissors
, count
);
727 memcpy(&p
->slot
, states
, count
* sizeof(states
[0]));
730 struct tc_viewports
{
732 struct pipe_viewport_state slot
[0]; /* more will be allocated if needed */
736 tc_call_set_viewport_states(struct pipe_context
*pipe
, union tc_payload
*payload
)
738 struct tc_viewports
*p
= (struct tc_viewports
*)payload
;
739 pipe
->set_viewport_states(pipe
, p
->start
, p
->count
, p
->slot
);
743 tc_set_viewport_states(struct pipe_context
*_pipe
,
744 unsigned start
, unsigned count
,
745 const struct pipe_viewport_state
*states
)
750 struct threaded_context
*tc
= threaded_context(_pipe
);
751 struct tc_viewports
*p
=
752 tc_add_slot_based_call(tc
, TC_CALL_set_viewport_states
, tc_viewports
, count
);
756 memcpy(&p
->slot
, states
, count
* sizeof(states
[0]));
759 struct tc_window_rects
{
762 struct pipe_scissor_state slot
[0]; /* more will be allocated if needed */
766 tc_call_set_window_rectangles(struct pipe_context
*pipe
,
767 union tc_payload
*payload
)
769 struct tc_window_rects
*p
= (struct tc_window_rects
*)payload
;
770 pipe
->set_window_rectangles(pipe
, p
->include
, p
->count
, p
->slot
);
774 tc_set_window_rectangles(struct pipe_context
*_pipe
, bool include
,
776 const struct pipe_scissor_state
*rects
)
778 struct threaded_context
*tc
= threaded_context(_pipe
);
779 struct tc_window_rects
*p
=
780 tc_add_slot_based_call(tc
, TC_CALL_set_window_rectangles
, tc_window_rects
, count
);
782 p
->include
= include
;
784 memcpy(p
->slot
, rects
, count
* sizeof(rects
[0]));
787 struct tc_sampler_views
{
788 ubyte shader
, start
, count
;
789 struct pipe_sampler_view
*slot
[0]; /* more will be allocated if needed */
793 tc_call_set_sampler_views(struct pipe_context
*pipe
, union tc_payload
*payload
)
795 struct tc_sampler_views
*p
= (struct tc_sampler_views
*)payload
;
796 unsigned count
= p
->count
;
798 pipe
->set_sampler_views(pipe
, p
->shader
, p
->start
, p
->count
, p
->slot
);
799 for (unsigned i
= 0; i
< count
; i
++)
800 pipe_sampler_view_reference(&p
->slot
[i
], NULL
);
804 tc_set_sampler_views(struct pipe_context
*_pipe
,
805 enum pipe_shader_type shader
,
806 unsigned start
, unsigned count
,
807 struct pipe_sampler_view
**views
)
812 struct threaded_context
*tc
= threaded_context(_pipe
);
813 struct tc_sampler_views
*p
=
814 tc_add_slot_based_call(tc
, TC_CALL_set_sampler_views
, tc_sampler_views
, count
);
821 for (unsigned i
= 0; i
< count
; i
++) {
823 pipe_sampler_view_reference(&p
->slot
[i
], views
[i
]);
826 memset(p
->slot
, 0, count
* sizeof(views
[0]));
830 struct tc_shader_images
{
831 ubyte shader
, start
, count
;
833 struct pipe_image_view slot
[0]; /* more will be allocated if needed */
837 tc_call_set_shader_images(struct pipe_context
*pipe
, union tc_payload
*payload
)
839 struct tc_shader_images
*p
= (struct tc_shader_images
*)payload
;
840 unsigned count
= p
->count
;
843 pipe
->set_shader_images(pipe
, p
->shader
, p
->start
, p
->count
, NULL
);
847 pipe
->set_shader_images(pipe
, p
->shader
, p
->start
, p
->count
, p
->slot
);
849 for (unsigned i
= 0; i
< count
; i
++)
850 pipe_resource_reference(&p
->slot
[i
].resource
, NULL
);
854 tc_set_shader_images(struct pipe_context
*_pipe
,
855 enum pipe_shader_type shader
,
856 unsigned start
, unsigned count
,
857 const struct pipe_image_view
*images
)
862 struct threaded_context
*tc
= threaded_context(_pipe
);
863 struct tc_shader_images
*p
=
864 tc_add_slot_based_call(tc
, TC_CALL_set_shader_images
, tc_shader_images
,
870 p
->unbind
= images
== NULL
;
873 for (unsigned i
= 0; i
< count
; i
++) {
874 tc_set_resource_reference(&p
->slot
[i
].resource
, images
[i
].resource
);
876 if (images
[i
].access
& PIPE_IMAGE_ACCESS_WRITE
&&
877 images
[i
].resource
&&
878 images
[i
].resource
->target
== PIPE_BUFFER
) {
879 struct threaded_resource
*tres
=
880 threaded_resource(images
[i
].resource
);
882 util_range_add(&tres
->b
, &tres
->valid_buffer_range
,
883 images
[i
].u
.buf
.offset
,
884 images
[i
].u
.buf
.offset
+ images
[i
].u
.buf
.size
);
887 memcpy(p
->slot
, images
, count
* sizeof(images
[0]));
891 struct tc_shader_buffers
{
892 ubyte shader
, start
, count
;
894 unsigned writable_bitmask
;
895 struct pipe_shader_buffer slot
[0]; /* more will be allocated if needed */
899 tc_call_set_shader_buffers(struct pipe_context
*pipe
, union tc_payload
*payload
)
901 struct tc_shader_buffers
*p
= (struct tc_shader_buffers
*)payload
;
902 unsigned count
= p
->count
;
905 pipe
->set_shader_buffers(pipe
, p
->shader
, p
->start
, p
->count
, NULL
, 0);
909 pipe
->set_shader_buffers(pipe
, p
->shader
, p
->start
, p
->count
, p
->slot
,
910 p
->writable_bitmask
);
912 for (unsigned i
= 0; i
< count
; i
++)
913 pipe_resource_reference(&p
->slot
[i
].buffer
, NULL
);
917 tc_set_shader_buffers(struct pipe_context
*_pipe
,
918 enum pipe_shader_type shader
,
919 unsigned start
, unsigned count
,
920 const struct pipe_shader_buffer
*buffers
,
921 unsigned writable_bitmask
)
926 struct threaded_context
*tc
= threaded_context(_pipe
);
927 struct tc_shader_buffers
*p
=
928 tc_add_slot_based_call(tc
, TC_CALL_set_shader_buffers
, tc_shader_buffers
,
929 buffers
? count
: 0);
934 p
->unbind
= buffers
== NULL
;
935 p
->writable_bitmask
= writable_bitmask
;
938 for (unsigned i
= 0; i
< count
; i
++) {
939 struct pipe_shader_buffer
*dst
= &p
->slot
[i
];
940 const struct pipe_shader_buffer
*src
= buffers
+ i
;
942 tc_set_resource_reference(&dst
->buffer
, src
->buffer
);
943 dst
->buffer_offset
= src
->buffer_offset
;
944 dst
->buffer_size
= src
->buffer_size
;
947 struct threaded_resource
*tres
= threaded_resource(src
->buffer
);
949 util_range_add(&tres
->b
, &tres
->valid_buffer_range
,
951 src
->buffer_offset
+ src
->buffer_size
);
957 struct tc_vertex_buffers
{
960 struct pipe_vertex_buffer slot
[0]; /* more will be allocated if needed */
964 tc_call_set_vertex_buffers(struct pipe_context
*pipe
, union tc_payload
*payload
)
966 struct tc_vertex_buffers
*p
= (struct tc_vertex_buffers
*)payload
;
967 unsigned count
= p
->count
;
970 pipe
->set_vertex_buffers(pipe
, p
->start
, count
, NULL
);
974 for (unsigned i
= 0; i
< count
; i
++)
975 tc_assert(!p
->slot
[i
].is_user_buffer
);
977 pipe
->set_vertex_buffers(pipe
, p
->start
, count
, p
->slot
);
978 for (unsigned i
= 0; i
< count
; i
++)
979 pipe_resource_reference(&p
->slot
[i
].buffer
.resource
, NULL
);
983 tc_set_vertex_buffers(struct pipe_context
*_pipe
,
984 unsigned start
, unsigned count
,
985 const struct pipe_vertex_buffer
*buffers
)
987 struct threaded_context
*tc
= threaded_context(_pipe
);
993 struct tc_vertex_buffers
*p
=
994 tc_add_slot_based_call(tc
, TC_CALL_set_vertex_buffers
, tc_vertex_buffers
, count
);
999 for (unsigned i
= 0; i
< count
; i
++) {
1000 struct pipe_vertex_buffer
*dst
= &p
->slot
[i
];
1001 const struct pipe_vertex_buffer
*src
= buffers
+ i
;
1003 tc_assert(!src
->is_user_buffer
);
1004 dst
->stride
= src
->stride
;
1005 dst
->is_user_buffer
= false;
1006 tc_set_resource_reference(&dst
->buffer
.resource
,
1007 src
->buffer
.resource
);
1008 dst
->buffer_offset
= src
->buffer_offset
;
1011 struct tc_vertex_buffers
*p
=
1012 tc_add_slot_based_call(tc
, TC_CALL_set_vertex_buffers
, tc_vertex_buffers
, 0);
1019 struct tc_stream_outputs
{
1021 struct pipe_stream_output_target
*targets
[PIPE_MAX_SO_BUFFERS
];
1022 unsigned offsets
[PIPE_MAX_SO_BUFFERS
];
1026 tc_call_set_stream_output_targets(struct pipe_context
*pipe
, union tc_payload
*payload
)
1028 struct tc_stream_outputs
*p
= (struct tc_stream_outputs
*)payload
;
1029 unsigned count
= p
->count
;
1031 pipe
->set_stream_output_targets(pipe
, count
, p
->targets
, p
->offsets
);
1032 for (unsigned i
= 0; i
< count
; i
++)
1033 pipe_so_target_reference(&p
->targets
[i
], NULL
);
1037 tc_set_stream_output_targets(struct pipe_context
*_pipe
,
1039 struct pipe_stream_output_target
**tgs
,
1040 const unsigned *offsets
)
1042 struct threaded_context
*tc
= threaded_context(_pipe
);
1043 struct tc_stream_outputs
*p
=
1044 tc_add_struct_typed_call(tc
, TC_CALL_set_stream_output_targets
,
1047 for (unsigned i
= 0; i
< count
; i
++) {
1048 p
->targets
[i
] = NULL
;
1049 pipe_so_target_reference(&p
->targets
[i
], tgs
[i
]);
1052 memcpy(p
->offsets
, offsets
, count
* sizeof(unsigned));
1056 tc_set_compute_resources(struct pipe_context
*_pipe
, unsigned start
,
1057 unsigned count
, struct pipe_surface
**resources
)
1059 struct threaded_context
*tc
= threaded_context(_pipe
);
1060 struct pipe_context
*pipe
= tc
->pipe
;
1063 pipe
->set_compute_resources(pipe
, start
, count
, resources
);
1067 tc_set_global_binding(struct pipe_context
*_pipe
, unsigned first
,
1068 unsigned count
, struct pipe_resource
**resources
,
1071 struct threaded_context
*tc
= threaded_context(_pipe
);
1072 struct pipe_context
*pipe
= tc
->pipe
;
1075 pipe
->set_global_binding(pipe
, first
, count
, resources
, handles
);
1079 /********************************************************************
1083 static struct pipe_surface
*
1084 tc_create_surface(struct pipe_context
*_pipe
,
1085 struct pipe_resource
*resource
,
1086 const struct pipe_surface
*surf_tmpl
)
1088 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
1089 struct pipe_surface
*view
=
1090 pipe
->create_surface(pipe
, resource
, surf_tmpl
);
1093 view
->context
= _pipe
;
1098 tc_surface_destroy(struct pipe_context
*_pipe
,
1099 struct pipe_surface
*surf
)
1101 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
1103 pipe
->surface_destroy(pipe
, surf
);
1106 static struct pipe_sampler_view
*
1107 tc_create_sampler_view(struct pipe_context
*_pipe
,
1108 struct pipe_resource
*resource
,
1109 const struct pipe_sampler_view
*templ
)
1111 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
1112 struct pipe_sampler_view
*view
=
1113 pipe
->create_sampler_view(pipe
, resource
, templ
);
1116 view
->context
= _pipe
;
1121 tc_sampler_view_destroy(struct pipe_context
*_pipe
,
1122 struct pipe_sampler_view
*view
)
1124 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
1126 pipe
->sampler_view_destroy(pipe
, view
);
1129 static struct pipe_stream_output_target
*
1130 tc_create_stream_output_target(struct pipe_context
*_pipe
,
1131 struct pipe_resource
*res
,
1132 unsigned buffer_offset
,
1133 unsigned buffer_size
)
1135 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
1136 struct threaded_resource
*tres
= threaded_resource(res
);
1137 struct pipe_stream_output_target
*view
;
1139 tc_sync(threaded_context(_pipe
));
1140 util_range_add(&tres
->b
, &tres
->valid_buffer_range
, buffer_offset
,
1141 buffer_offset
+ buffer_size
);
1143 view
= pipe
->create_stream_output_target(pipe
, res
, buffer_offset
,
1146 view
->context
= _pipe
;
1151 tc_stream_output_target_destroy(struct pipe_context
*_pipe
,
1152 struct pipe_stream_output_target
*target
)
1154 struct pipe_context
*pipe
= threaded_context(_pipe
)->pipe
;
1156 pipe
->stream_output_target_destroy(pipe
, target
);
1160 /********************************************************************
1165 tc_create_texture_handle(struct pipe_context
*_pipe
,
1166 struct pipe_sampler_view
*view
,
1167 const struct pipe_sampler_state
*state
)
1169 struct threaded_context
*tc
= threaded_context(_pipe
);
1170 struct pipe_context
*pipe
= tc
->pipe
;
1173 return pipe
->create_texture_handle(pipe
, view
, state
);
1177 tc_call_delete_texture_handle(struct pipe_context
*pipe
,
1178 union tc_payload
*payload
)
1180 pipe
->delete_texture_handle(pipe
, payload
->handle
);
1184 tc_delete_texture_handle(struct pipe_context
*_pipe
, uint64_t handle
)
1186 struct threaded_context
*tc
= threaded_context(_pipe
);
1187 union tc_payload
*payload
=
1188 tc_add_small_call(tc
, TC_CALL_delete_texture_handle
);
1190 payload
->handle
= handle
;
1193 struct tc_make_texture_handle_resident
1200 tc_call_make_texture_handle_resident(struct pipe_context
*pipe
,
1201 union tc_payload
*payload
)
1203 struct tc_make_texture_handle_resident
*p
=
1204 (struct tc_make_texture_handle_resident
*)payload
;
1206 pipe
->make_texture_handle_resident(pipe
, p
->handle
, p
->resident
);
1210 tc_make_texture_handle_resident(struct pipe_context
*_pipe
, uint64_t handle
,
1213 struct threaded_context
*tc
= threaded_context(_pipe
);
1214 struct tc_make_texture_handle_resident
*p
=
1215 tc_add_struct_typed_call(tc
, TC_CALL_make_texture_handle_resident
,
1216 tc_make_texture_handle_resident
);
1219 p
->resident
= resident
;
1223 tc_create_image_handle(struct pipe_context
*_pipe
,
1224 const struct pipe_image_view
*image
)
1226 struct threaded_context
*tc
= threaded_context(_pipe
);
1227 struct pipe_context
*pipe
= tc
->pipe
;
1230 return pipe
->create_image_handle(pipe
, image
);
1234 tc_call_delete_image_handle(struct pipe_context
*pipe
,
1235 union tc_payload
*payload
)
1237 pipe
->delete_image_handle(pipe
, payload
->handle
);
1241 tc_delete_image_handle(struct pipe_context
*_pipe
, uint64_t handle
)
1243 struct threaded_context
*tc
= threaded_context(_pipe
);
1244 union tc_payload
*payload
=
1245 tc_add_small_call(tc
, TC_CALL_delete_image_handle
);
1247 payload
->handle
= handle
;
1250 struct tc_make_image_handle_resident
1258 tc_call_make_image_handle_resident(struct pipe_context
*pipe
,
1259 union tc_payload
*payload
)
1261 struct tc_make_image_handle_resident
*p
=
1262 (struct tc_make_image_handle_resident
*)payload
;
1264 pipe
->make_image_handle_resident(pipe
, p
->handle
, p
->access
, p
->resident
);
1268 tc_make_image_handle_resident(struct pipe_context
*_pipe
, uint64_t handle
,
1269 unsigned access
, bool resident
)
1271 struct threaded_context
*tc
= threaded_context(_pipe
);
1272 struct tc_make_image_handle_resident
*p
=
1273 tc_add_struct_typed_call(tc
, TC_CALL_make_image_handle_resident
,
1274 tc_make_image_handle_resident
);
1278 p
->resident
= resident
;
1282 /********************************************************************
1286 struct tc_replace_buffer_storage
{
1287 struct pipe_resource
*dst
;
1288 struct pipe_resource
*src
;
1289 tc_replace_buffer_storage_func func
;
1293 tc_call_replace_buffer_storage(struct pipe_context
*pipe
,
1294 union tc_payload
*payload
)
1296 struct tc_replace_buffer_storage
*p
=
1297 (struct tc_replace_buffer_storage
*)payload
;
1299 p
->func(pipe
, p
->dst
, p
->src
);
1300 pipe_resource_reference(&p
->dst
, NULL
);
1301 pipe_resource_reference(&p
->src
, NULL
);
1305 tc_invalidate_buffer(struct threaded_context
*tc
,
1306 struct threaded_resource
*tbuf
)
1308 /* We can't check if the buffer is idle, so we invalidate it
1309 * unconditionally. */
1310 struct pipe_screen
*screen
= tc
->base
.screen
;
1311 struct pipe_resource
*new_buf
;
1313 /* Shared, pinned, and sparse buffers can't be reallocated. */
1314 if (tbuf
->is_shared
||
1315 tbuf
->is_user_ptr
||
1316 tbuf
->b
.flags
& PIPE_RESOURCE_FLAG_SPARSE
)
1319 /* Allocate a new one. */
1320 new_buf
= screen
->resource_create(screen
, &tbuf
->b
);
1324 /* Replace the "latest" pointer. */
1325 if (tbuf
->latest
!= &tbuf
->b
)
1326 pipe_resource_reference(&tbuf
->latest
, NULL
);
1328 tbuf
->latest
= new_buf
;
1329 util_range_set_empty(&tbuf
->valid_buffer_range
);
1331 /* The valid range should point to the original buffer. */
1332 threaded_resource(new_buf
)->base_valid_buffer_range
=
1333 &tbuf
->valid_buffer_range
;
1335 /* Enqueue storage replacement of the original buffer. */
1336 struct tc_replace_buffer_storage
*p
=
1337 tc_add_struct_typed_call(tc
, TC_CALL_replace_buffer_storage
,
1338 tc_replace_buffer_storage
);
1340 p
->func
= tc
->replace_buffer_storage
;
1341 tc_set_resource_reference(&p
->dst
, &tbuf
->b
);
1342 tc_set_resource_reference(&p
->src
, new_buf
);
1347 tc_improve_map_buffer_flags(struct threaded_context
*tc
,
1348 struct threaded_resource
*tres
, unsigned usage
,
1349 unsigned offset
, unsigned size
)
1351 /* Never invalidate inside the driver and never infer "unsynchronized". */
1352 unsigned tc_flags
= TC_TRANSFER_MAP_NO_INVALIDATE
|
1353 TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED
;
1355 /* Prevent a reentry. */
1356 if (usage
& tc_flags
)
1359 /* Use the staging upload if it's preferred. */
1360 if (usage
& (PIPE_TRANSFER_DISCARD_RANGE
|
1361 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) &&
1362 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
1363 /* Try not to decrement the counter if it's not positive. Still racy,
1364 * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
1365 tres
->max_forced_staging_uploads
> 0 &&
1366 p_atomic_dec_return(&tres
->max_forced_staging_uploads
) >= 0) {
1367 usage
&= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
|
1368 PIPE_TRANSFER_UNSYNCHRONIZED
);
1370 return usage
| tc_flags
| PIPE_TRANSFER_DISCARD_RANGE
;
1373 /* Sparse buffers can't be mapped directly and can't be reallocated
1374 * (fully invalidated). That may just be a radeonsi limitation, but
1375 * the threaded context must obey it with radeonsi.
1377 if (tres
->b
.flags
& PIPE_RESOURCE_FLAG_SPARSE
) {
1378 /* We can use DISCARD_RANGE instead of full discard. This is the only
1379 * fast path for sparse buffers that doesn't need thread synchronization.
1381 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
)
1382 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
1384 /* Allow DISCARD_WHOLE_RESOURCE and infering UNSYNCHRONIZED in drivers.
1385 * The threaded context doesn't do unsychronized mappings and invalida-
1386 * tions of sparse buffers, therefore a correct driver behavior won't
1387 * result in an incorrect behavior with the threaded context.
1394 /* Handle CPU reads trivially. */
1395 if (usage
& PIPE_TRANSFER_READ
) {
1396 /* Drivers aren't allowed to do buffer invalidations. */
1397 return usage
& ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
1400 /* See if the buffer range being mapped has never been initialized,
1401 * in which case it can be mapped unsynchronized. */
1402 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
1404 !util_ranges_intersect(&tres
->valid_buffer_range
, offset
, offset
+ size
))
1405 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1407 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
1408 /* If discarding the entire range, discard the whole resource instead. */
1409 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
1410 offset
== 0 && size
== tres
->b
.width0
)
1411 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
1413 /* Discard the whole resource if needed. */
1414 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
1415 if (tc_invalidate_buffer(tc
, tres
))
1416 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
1418 usage
|= PIPE_TRANSFER_DISCARD_RANGE
; /* fallback */
1422 /* We won't need this flag anymore. */
1423 /* TODO: We might not need TC_TRANSFER_MAP_NO_INVALIDATE with this. */
1424 usage
&= ~PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
1426 /* GL_AMD_pinned_memory and persistent mappings can't use staging
1428 if (usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
1429 PIPE_TRANSFER_PERSISTENT
) ||
1431 usage
&= ~PIPE_TRANSFER_DISCARD_RANGE
;
1433 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1434 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
1435 usage
&= ~PIPE_TRANSFER_DISCARD_RANGE
;
1436 usage
|= TC_TRANSFER_MAP_THREADED_UNSYNC
; /* notify the driver */
1443 tc_transfer_map(struct pipe_context
*_pipe
,
1444 struct pipe_resource
*resource
, unsigned level
,
1445 unsigned usage
, const struct pipe_box
*box
,
1446 struct pipe_transfer
**transfer
)
1448 struct threaded_context
*tc
= threaded_context(_pipe
);
1449 struct threaded_resource
*tres
= threaded_resource(resource
);
1450 struct pipe_context
*pipe
= tc
->pipe
;
1452 if (resource
->target
== PIPE_BUFFER
) {
1453 usage
= tc_improve_map_buffer_flags(tc
, tres
, usage
, box
->x
, box
->width
);
1455 /* Do a staging transfer within the threaded context. The driver should
1456 * only get resource_copy_region.
1458 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
) {
1459 struct threaded_transfer
*ttrans
= slab_alloc(&tc
->pool_transfers
);
1462 ttrans
->staging
= NULL
;
1464 u_upload_alloc(tc
->base
.stream_uploader
, 0,
1465 box
->width
+ (box
->x
% tc
->map_buffer_alignment
),
1466 64, &ttrans
->offset
, &ttrans
->staging
, (void**)&map
);
1468 slab_free(&tc
->pool_transfers
, ttrans
);
1472 tc_set_resource_reference(&ttrans
->b
.resource
, resource
);
1473 ttrans
->b
.level
= 0;
1474 ttrans
->b
.usage
= usage
;
1475 ttrans
->b
.box
= *box
;
1476 ttrans
->b
.stride
= 0;
1477 ttrans
->b
.layer_stride
= 0;
1478 *transfer
= &ttrans
->b
;
1479 return map
+ (box
->x
% tc
->map_buffer_alignment
);
1483 /* Unsychronized buffer mappings don't have to synchronize the thread. */
1484 if (!(usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
))
1485 tc_sync_msg(tc
, resource
->target
!= PIPE_BUFFER
? " texture" :
1486 usage
& PIPE_TRANSFER_DISCARD_RANGE
? " discard_range" :
1487 usage
& PIPE_TRANSFER_READ
? " read" : " ??");
1489 return pipe
->transfer_map(pipe
, tres
->latest
? tres
->latest
: resource
,
1490 level
, usage
, box
, transfer
);
1493 struct tc_transfer_flush_region
{
1494 struct pipe_transfer
*transfer
;
1495 struct pipe_box box
;
1499 tc_call_transfer_flush_region(struct pipe_context
*pipe
,
1500 union tc_payload
*payload
)
1502 struct tc_transfer_flush_region
*p
=
1503 (struct tc_transfer_flush_region
*)payload
;
1505 pipe
->transfer_flush_region(pipe
, p
->transfer
, &p
->box
);
1508 struct tc_resource_copy_region
{
1509 struct pipe_resource
*dst
;
1511 unsigned dstx
, dsty
, dstz
;
1512 struct pipe_resource
*src
;
1514 struct pipe_box src_box
;
1518 tc_resource_copy_region(struct pipe_context
*_pipe
,
1519 struct pipe_resource
*dst
, unsigned dst_level
,
1520 unsigned dstx
, unsigned dsty
, unsigned dstz
,
1521 struct pipe_resource
*src
, unsigned src_level
,
1522 const struct pipe_box
*src_box
);
1525 tc_buffer_do_flush_region(struct threaded_context
*tc
,
1526 struct threaded_transfer
*ttrans
,
1527 const struct pipe_box
*box
)
1529 struct threaded_resource
*tres
= threaded_resource(ttrans
->b
.resource
);
1531 if (ttrans
->staging
) {
1532 struct pipe_box src_box
;
1534 u_box_1d(ttrans
->offset
+ ttrans
->b
.box
.x
% tc
->map_buffer_alignment
+
1535 (box
->x
- ttrans
->b
.box
.x
),
1536 box
->width
, &src_box
);
1538 /* Copy the staging buffer into the original one. */
1539 tc_resource_copy_region(&tc
->base
, ttrans
->b
.resource
, 0, box
->x
, 0, 0,
1540 ttrans
->staging
, 0, &src_box
);
1543 util_range_add(&tres
->b
, tres
->base_valid_buffer_range
,
1544 box
->x
, box
->x
+ box
->width
);
1548 tc_transfer_flush_region(struct pipe_context
*_pipe
,
1549 struct pipe_transfer
*transfer
,
1550 const struct pipe_box
*rel_box
)
1552 struct threaded_context
*tc
= threaded_context(_pipe
);
1553 struct threaded_transfer
*ttrans
= threaded_transfer(transfer
);
1554 struct threaded_resource
*tres
= threaded_resource(transfer
->resource
);
1555 unsigned required_usage
= PIPE_TRANSFER_WRITE
|
1556 PIPE_TRANSFER_FLUSH_EXPLICIT
;
1558 if (tres
->b
.target
== PIPE_BUFFER
) {
1559 if ((transfer
->usage
& required_usage
) == required_usage
) {
1560 struct pipe_box box
;
1562 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
1563 tc_buffer_do_flush_region(tc
, ttrans
, &box
);
1566 /* Staging transfers don't send the call to the driver. */
1567 if (ttrans
->staging
)
1571 struct tc_transfer_flush_region
*p
=
1572 tc_add_struct_typed_call(tc
, TC_CALL_transfer_flush_region
,
1573 tc_transfer_flush_region
);
1574 p
->transfer
= transfer
;
1579 tc_call_transfer_unmap(struct pipe_context
*pipe
, union tc_payload
*payload
)
1581 pipe
->transfer_unmap(pipe
, payload
->transfer
);
1585 tc_transfer_unmap(struct pipe_context
*_pipe
, struct pipe_transfer
*transfer
)
1587 struct threaded_context
*tc
= threaded_context(_pipe
);
1588 struct threaded_transfer
*ttrans
= threaded_transfer(transfer
);
1589 struct threaded_resource
*tres
= threaded_resource(transfer
->resource
);
1591 if (tres
->b
.target
== PIPE_BUFFER
) {
1592 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
1593 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
1594 tc_buffer_do_flush_region(tc
, ttrans
, &transfer
->box
);
1596 /* Staging transfers don't send the call to the driver. */
1597 if (ttrans
->staging
) {
1598 pipe_resource_reference(&ttrans
->staging
, NULL
);
1599 pipe_resource_reference(&ttrans
->b
.resource
, NULL
);
1600 slab_free(&tc
->pool_transfers
, ttrans
);
1605 tc_add_small_call(tc
, TC_CALL_transfer_unmap
)->transfer
= transfer
;
1608 struct tc_buffer_subdata
{
1609 struct pipe_resource
*resource
;
1610 unsigned usage
, offset
, size
;
1611 char slot
[0]; /* more will be allocated if needed */
1615 tc_call_buffer_subdata(struct pipe_context
*pipe
, union tc_payload
*payload
)
1617 struct tc_buffer_subdata
*p
= (struct tc_buffer_subdata
*)payload
;
1619 pipe
->buffer_subdata(pipe
, p
->resource
, p
->usage
, p
->offset
, p
->size
,
1621 pipe_resource_reference(&p
->resource
, NULL
);
1625 tc_buffer_subdata(struct pipe_context
*_pipe
,
1626 struct pipe_resource
*resource
,
1627 unsigned usage
, unsigned offset
,
1628 unsigned size
, const void *data
)
1630 struct threaded_context
*tc
= threaded_context(_pipe
);
1631 struct threaded_resource
*tres
= threaded_resource(resource
);
1636 usage
|= PIPE_TRANSFER_WRITE
;
1638 /* PIPE_TRANSFER_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
1639 if (!(usage
& PIPE_TRANSFER_MAP_DIRECTLY
))
1640 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
1642 usage
= tc_improve_map_buffer_flags(tc
, tres
, usage
, offset
, size
);
1644 /* Unsychronized and big transfers should use transfer_map. Also handle
1645 * full invalidations, because drivers aren't allowed to do them.
1647 if (usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
1648 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) ||
1649 size
> TC_MAX_SUBDATA_BYTES
) {
1650 struct pipe_transfer
*transfer
;
1651 struct pipe_box box
;
1652 uint8_t *map
= NULL
;
1654 u_box_1d(offset
, size
, &box
);
1656 map
= tc_transfer_map(_pipe
, resource
, 0, usage
, &box
, &transfer
);
1658 memcpy(map
, data
, size
);
1659 tc_transfer_unmap(_pipe
, transfer
);
1664 util_range_add(&tres
->b
, &tres
->valid_buffer_range
, offset
, offset
+ size
);
1666 /* The upload is small. Enqueue it. */
1667 struct tc_buffer_subdata
*p
=
1668 tc_add_slot_based_call(tc
, TC_CALL_buffer_subdata
, tc_buffer_subdata
, size
);
1670 tc_set_resource_reference(&p
->resource
, resource
);
1674 memcpy(p
->slot
, data
, size
);
1677 struct tc_texture_subdata
{
1678 struct pipe_resource
*resource
;
1679 unsigned level
, usage
, stride
, layer_stride
;
1680 struct pipe_box box
;
1681 char slot
[0]; /* more will be allocated if needed */
1685 tc_call_texture_subdata(struct pipe_context
*pipe
, union tc_payload
*payload
)
1687 struct tc_texture_subdata
*p
= (struct tc_texture_subdata
*)payload
;
1689 pipe
->texture_subdata(pipe
, p
->resource
, p
->level
, p
->usage
, &p
->box
,
1690 p
->slot
, p
->stride
, p
->layer_stride
);
1691 pipe_resource_reference(&p
->resource
, NULL
);
1695 tc_texture_subdata(struct pipe_context
*_pipe
,
1696 struct pipe_resource
*resource
,
1697 unsigned level
, unsigned usage
,
1698 const struct pipe_box
*box
,
1699 const void *data
, unsigned stride
,
1700 unsigned layer_stride
)
1702 struct threaded_context
*tc
= threaded_context(_pipe
);
1705 assert(box
->height
>= 1);
1706 assert(box
->depth
>= 1);
1708 size
= (box
->depth
- 1) * layer_stride
+
1709 (box
->height
- 1) * stride
+
1710 box
->width
* util_format_get_blocksize(resource
->format
);
1714 /* Small uploads can be enqueued, big uploads must sync. */
1715 if (size
<= TC_MAX_SUBDATA_BYTES
) {
1716 struct tc_texture_subdata
*p
=
1717 tc_add_slot_based_call(tc
, TC_CALL_texture_subdata
, tc_texture_subdata
, size
);
1719 tc_set_resource_reference(&p
->resource
, resource
);
1724 p
->layer_stride
= layer_stride
;
1725 memcpy(p
->slot
, data
, size
);
1727 struct pipe_context
*pipe
= tc
->pipe
;
1730 pipe
->texture_subdata(pipe
, resource
, level
, usage
, box
, data
,
1731 stride
, layer_stride
);
1736 /********************************************************************
1740 #define TC_FUNC_SYNC_RET0(ret_type, func) \
1742 tc_##func(struct pipe_context *_pipe) \
1744 struct threaded_context *tc = threaded_context(_pipe); \
1745 struct pipe_context *pipe = tc->pipe; \
1747 return pipe->func(pipe); \
1750 TC_FUNC_SYNC_RET0(enum pipe_reset_status
, get_device_reset_status
)
1751 TC_FUNC_SYNC_RET0(uint64_t, get_timestamp
)
1754 tc_get_sample_position(struct pipe_context
*_pipe
,
1755 unsigned sample_count
, unsigned sample_index
,
1758 struct threaded_context
*tc
= threaded_context(_pipe
);
1759 struct pipe_context
*pipe
= tc
->pipe
;
1762 pipe
->get_sample_position(pipe
, sample_count
, sample_index
,
1767 tc_set_device_reset_callback(struct pipe_context
*_pipe
,
1768 const struct pipe_device_reset_callback
*cb
)
1770 struct threaded_context
*tc
= threaded_context(_pipe
);
1771 struct pipe_context
*pipe
= tc
->pipe
;
1774 pipe
->set_device_reset_callback(pipe
, cb
);
1777 struct tc_string_marker
{
1779 char slot
[0]; /* more will be allocated if needed */
1783 tc_call_emit_string_marker(struct pipe_context
*pipe
, union tc_payload
*payload
)
1785 struct tc_string_marker
*p
= (struct tc_string_marker
*)payload
;
1786 pipe
->emit_string_marker(pipe
, p
->slot
, p
->len
);
1790 tc_emit_string_marker(struct pipe_context
*_pipe
,
1791 const char *string
, int len
)
1793 struct threaded_context
*tc
= threaded_context(_pipe
);
1795 if (len
<= TC_MAX_STRING_MARKER_BYTES
) {
1796 struct tc_string_marker
*p
=
1797 tc_add_slot_based_call(tc
, TC_CALL_emit_string_marker
, tc_string_marker
, len
);
1799 memcpy(p
->slot
, string
, len
);
1802 struct pipe_context
*pipe
= tc
->pipe
;
1805 pipe
->emit_string_marker(pipe
, string
, len
);
1810 tc_dump_debug_state(struct pipe_context
*_pipe
, FILE *stream
,
1813 struct threaded_context
*tc
= threaded_context(_pipe
);
1814 struct pipe_context
*pipe
= tc
->pipe
;
1817 pipe
->dump_debug_state(pipe
, stream
, flags
);
1821 tc_set_debug_callback(struct pipe_context
*_pipe
,
1822 const struct pipe_debug_callback
*cb
)
1824 struct threaded_context
*tc
= threaded_context(_pipe
);
1825 struct pipe_context
*pipe
= tc
->pipe
;
1827 /* Drop all synchronous debug callbacks. Drivers are expected to be OK
1828 * with this. shader-db will use an environment variable to disable
1829 * the threaded context.
1831 if (cb
&& cb
->debug_message
&& !cb
->async
)
1835 pipe
->set_debug_callback(pipe
, cb
);
1839 tc_set_log_context(struct pipe_context
*_pipe
, struct u_log_context
*log
)
1841 struct threaded_context
*tc
= threaded_context(_pipe
);
1842 struct pipe_context
*pipe
= tc
->pipe
;
1845 pipe
->set_log_context(pipe
, log
);
1849 tc_create_fence_fd(struct pipe_context
*_pipe
,
1850 struct pipe_fence_handle
**fence
, int fd
,
1851 enum pipe_fd_type type
)
1853 struct threaded_context
*tc
= threaded_context(_pipe
);
1854 struct pipe_context
*pipe
= tc
->pipe
;
1857 pipe
->create_fence_fd(pipe
, fence
, fd
, type
);
1861 tc_call_fence_server_sync(struct pipe_context
*pipe
, union tc_payload
*payload
)
1863 pipe
->fence_server_sync(pipe
, payload
->fence
);
1864 pipe
->screen
->fence_reference(pipe
->screen
, &payload
->fence
, NULL
);
1868 tc_fence_server_sync(struct pipe_context
*_pipe
,
1869 struct pipe_fence_handle
*fence
)
1871 struct threaded_context
*tc
= threaded_context(_pipe
);
1872 struct pipe_screen
*screen
= tc
->pipe
->screen
;
1873 union tc_payload
*payload
= tc_add_small_call(tc
, TC_CALL_fence_server_sync
);
1875 payload
->fence
= NULL
;
1876 screen
->fence_reference(screen
, &payload
->fence
, fence
);
1880 tc_call_fence_server_signal(struct pipe_context
*pipe
, union tc_payload
*payload
)
1882 pipe
->fence_server_signal(pipe
, payload
->fence
);
1883 pipe
->screen
->fence_reference(pipe
->screen
, &payload
->fence
, NULL
);
1887 tc_fence_server_signal(struct pipe_context
*_pipe
,
1888 struct pipe_fence_handle
*fence
)
1890 struct threaded_context
*tc
= threaded_context(_pipe
);
1891 struct pipe_screen
*screen
= tc
->pipe
->screen
;
1892 union tc_payload
*payload
= tc_add_small_call(tc
, TC_CALL_fence_server_signal
);
1894 payload
->fence
= NULL
;
1895 screen
->fence_reference(screen
, &payload
->fence
, fence
);
1898 static struct pipe_video_codec
*
1899 tc_create_video_codec(UNUSED
struct pipe_context
*_pipe
,
1900 UNUSED
const struct pipe_video_codec
*templ
)
1902 unreachable("Threaded context should not be enabled for video APIs");
1906 static struct pipe_video_buffer
*
1907 tc_create_video_buffer(UNUSED
struct pipe_context
*_pipe
,
1908 UNUSED
const struct pipe_video_buffer
*templ
)
1910 unreachable("Threaded context should not be enabled for video APIs");
1914 struct tc_context_param
{
1915 enum pipe_context_param param
;
1920 tc_call_set_context_param(struct pipe_context
*pipe
,
1921 union tc_payload
*payload
)
1923 struct tc_context_param
*p
= (struct tc_context_param
*)payload
;
1925 if (pipe
->set_context_param
)
1926 pipe
->set_context_param(pipe
, p
->param
, p
->value
);
1930 tc_set_context_param(struct pipe_context
*_pipe
,
1931 enum pipe_context_param param
,
1934 struct threaded_context
*tc
= threaded_context(_pipe
);
1936 if (tc
->pipe
->set_context_param
) {
1937 struct tc_context_param
*payload
=
1938 tc_add_struct_typed_call(tc
, TC_CALL_set_context_param
,
1941 payload
->param
= param
;
1942 payload
->value
= value
;
1945 if (param
== PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE
) {
1946 /* Pin the gallium thread as requested. */
1947 util_pin_thread_to_L3(tc
->queue
.threads
[0], value
,
1948 util_cpu_caps
.cores_per_L3
);
1953 /********************************************************************
1954 * draw, launch, clear, blit, copy, flush
1957 struct tc_flush_payload
{
1958 struct threaded_context
*tc
;
1959 struct pipe_fence_handle
*fence
;
1964 tc_flush_queries(struct threaded_context
*tc
)
1966 struct threaded_query
*tq
, *tmp
;
1967 LIST_FOR_EACH_ENTRY_SAFE(tq
, tmp
, &tc
->unflushed_queries
, head_unflushed
) {
1968 LIST_DEL(&tq
->head_unflushed
);
1970 /* Memory release semantics: due to a possible race with
1971 * tc_get_query_result, we must ensure that the linked list changes
1972 * are visible before setting tq->flushed.
1974 p_atomic_set(&tq
->flushed
, true);
1979 tc_call_flush(struct pipe_context
*pipe
, union tc_payload
*payload
)
1981 struct tc_flush_payload
*p
= (struct tc_flush_payload
*)payload
;
1982 struct pipe_screen
*screen
= pipe
->screen
;
1984 pipe
->flush(pipe
, p
->fence
? &p
->fence
: NULL
, p
->flags
);
1985 screen
->fence_reference(screen
, &p
->fence
, NULL
);
1987 if (!(p
->flags
& PIPE_FLUSH_DEFERRED
))
1988 tc_flush_queries(p
->tc
);
1992 tc_flush(struct pipe_context
*_pipe
, struct pipe_fence_handle
**fence
,
1995 struct threaded_context
*tc
= threaded_context(_pipe
);
1996 struct pipe_context
*pipe
= tc
->pipe
;
1997 struct pipe_screen
*screen
= pipe
->screen
;
1998 bool async
= flags
& PIPE_FLUSH_DEFERRED
;
2000 if (flags
& PIPE_FLUSH_ASYNC
) {
2001 struct tc_batch
*last
= &tc
->batch_slots
[tc
->last
];
2003 /* Prefer to do the flush in the driver thread, but avoid the inter-thread
2004 * communication overhead if the driver thread is currently idle and the
2005 * caller is going to wait for the fence immediately anyway.
2007 if (!(util_queue_fence_is_signalled(&last
->fence
) &&
2008 (flags
& PIPE_FLUSH_HINT_FINISH
)))
2012 if (async
&& tc
->create_fence
) {
2014 struct tc_batch
*next
= &tc
->batch_slots
[tc
->next
];
2017 next
->token
= malloc(sizeof(*next
->token
));
2021 pipe_reference_init(&next
->token
->ref
, 1);
2022 next
->token
->tc
= tc
;
2025 screen
->fence_reference(screen
, fence
, tc
->create_fence(pipe
, next
->token
));
2030 struct tc_flush_payload
*p
=
2031 tc_add_struct_typed_call(tc
, TC_CALL_flush
, tc_flush_payload
);
2033 p
->fence
= fence
? *fence
: NULL
;
2034 p
->flags
= flags
| TC_FLUSH_ASYNC
;
2036 if (!(flags
& PIPE_FLUSH_DEFERRED
))
2042 tc_sync_msg(tc
, flags
& PIPE_FLUSH_END_OF_FRAME
? "end of frame" :
2043 flags
& PIPE_FLUSH_DEFERRED
? "deferred fence" : "normal");
2045 if (!(flags
& PIPE_FLUSH_DEFERRED
))
2046 tc_flush_queries(tc
);
2047 pipe
->flush(pipe
, fence
, flags
);
2050 /* This is actually variable-sized, because indirect isn't allocated if it's
2052 struct tc_full_draw_info
{
2053 struct pipe_draw_info draw
;
2054 struct pipe_draw_indirect_info indirect
;
2058 tc_call_draw_vbo(struct pipe_context
*pipe
, union tc_payload
*payload
)
2060 struct tc_full_draw_info
*info
= (struct tc_full_draw_info
*)payload
;
2062 pipe
->draw_vbo(pipe
, &info
->draw
);
2063 pipe_so_target_reference(&info
->draw
.count_from_stream_output
, NULL
);
2064 if (info
->draw
.index_size
)
2065 pipe_resource_reference(&info
->draw
.index
.resource
, NULL
);
2066 if (info
->draw
.indirect
) {
2067 pipe_resource_reference(&info
->indirect
.buffer
, NULL
);
2068 pipe_resource_reference(&info
->indirect
.indirect_draw_count
, NULL
);
2072 static struct tc_full_draw_info
*
2073 tc_add_draw_vbo(struct pipe_context
*_pipe
, bool indirect
)
2075 return (struct tc_full_draw_info
*)
2076 tc_add_sized_call(threaded_context(_pipe
), TC_CALL_draw_vbo
,
2077 indirect
? sizeof(struct tc_full_draw_info
) :
2078 sizeof(struct pipe_draw_info
));
2082 tc_draw_vbo(struct pipe_context
*_pipe
, const struct pipe_draw_info
*info
)
2084 struct threaded_context
*tc
= threaded_context(_pipe
);
2085 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
2086 unsigned index_size
= info
->index_size
;
2087 bool has_user_indices
= info
->has_user_indices
;
2089 if (index_size
&& has_user_indices
) {
2090 unsigned size
= info
->count
* index_size
;
2091 struct pipe_resource
*buffer
= NULL
;
2094 tc_assert(!indirect
);
2096 /* This must be done before adding draw_vbo, because it could generate
2097 * e.g. transfer_unmap and flush partially-uninitialized draw_vbo
2098 * to the driver if it was done afterwards.
2100 u_upload_data(tc
->base
.stream_uploader
, 0, size
, 4, info
->index
.user
,
2102 if (unlikely(!buffer
))
2105 struct tc_full_draw_info
*p
= tc_add_draw_vbo(_pipe
, false);
2106 p
->draw
.count_from_stream_output
= NULL
;
2107 pipe_so_target_reference(&p
->draw
.count_from_stream_output
,
2108 info
->count_from_stream_output
);
2109 memcpy(&p
->draw
, info
, sizeof(*info
));
2110 p
->draw
.has_user_indices
= false;
2111 p
->draw
.index
.resource
= buffer
;
2112 p
->draw
.start
= offset
/ index_size
;
2114 /* Non-indexed call or indexed with a real index buffer. */
2115 struct tc_full_draw_info
*p
= tc_add_draw_vbo(_pipe
, indirect
!= NULL
);
2116 p
->draw
.count_from_stream_output
= NULL
;
2117 pipe_so_target_reference(&p
->draw
.count_from_stream_output
,
2118 info
->count_from_stream_output
);
2120 tc_set_resource_reference(&p
->draw
.index
.resource
,
2121 info
->index
.resource
);
2123 memcpy(&p
->draw
, info
, sizeof(*info
));
2126 tc_set_resource_reference(&p
->draw
.indirect
->buffer
, indirect
->buffer
);
2127 tc_set_resource_reference(&p
->indirect
.indirect_draw_count
,
2128 indirect
->indirect_draw_count
);
2129 memcpy(&p
->indirect
, indirect
, sizeof(*indirect
));
2130 p
->draw
.indirect
= &p
->indirect
;
2136 tc_call_launch_grid(struct pipe_context
*pipe
, union tc_payload
*payload
)
2138 struct pipe_grid_info
*p
= (struct pipe_grid_info
*)payload
;
2140 pipe
->launch_grid(pipe
, p
);
2141 pipe_resource_reference(&p
->indirect
, NULL
);
2145 tc_launch_grid(struct pipe_context
*_pipe
,
2146 const struct pipe_grid_info
*info
)
2148 struct threaded_context
*tc
= threaded_context(_pipe
);
2149 struct pipe_grid_info
*p
= tc_add_struct_typed_call(tc
, TC_CALL_launch_grid
,
2151 assert(info
->input
== NULL
);
2153 tc_set_resource_reference(&p
->indirect
, info
->indirect
);
2154 memcpy(p
, info
, sizeof(*info
));
2158 tc_call_resource_copy_region(struct pipe_context
*pipe
, union tc_payload
*payload
)
2160 struct tc_resource_copy_region
*p
= (struct tc_resource_copy_region
*)payload
;
2162 pipe
->resource_copy_region(pipe
, p
->dst
, p
->dst_level
, p
->dstx
, p
->dsty
,
2163 p
->dstz
, p
->src
, p
->src_level
, &p
->src_box
);
2164 pipe_resource_reference(&p
->dst
, NULL
);
2165 pipe_resource_reference(&p
->src
, NULL
);
2169 tc_resource_copy_region(struct pipe_context
*_pipe
,
2170 struct pipe_resource
*dst
, unsigned dst_level
,
2171 unsigned dstx
, unsigned dsty
, unsigned dstz
,
2172 struct pipe_resource
*src
, unsigned src_level
,
2173 const struct pipe_box
*src_box
)
2175 struct threaded_context
*tc
= threaded_context(_pipe
);
2176 struct threaded_resource
*tdst
= threaded_resource(dst
);
2177 struct tc_resource_copy_region
*p
=
2178 tc_add_struct_typed_call(tc
, TC_CALL_resource_copy_region
,
2179 tc_resource_copy_region
);
2181 tc_set_resource_reference(&p
->dst
, dst
);
2182 p
->dst_level
= dst_level
;
2186 tc_set_resource_reference(&p
->src
, src
);
2187 p
->src_level
= src_level
;
2188 p
->src_box
= *src_box
;
2190 if (dst
->target
== PIPE_BUFFER
)
2191 util_range_add(&tdst
->b
, &tdst
->valid_buffer_range
,
2192 dstx
, dstx
+ src_box
->width
);
2196 tc_call_blit(struct pipe_context
*pipe
, union tc_payload
*payload
)
2198 struct pipe_blit_info
*blit
= (struct pipe_blit_info
*)payload
;
2200 pipe
->blit(pipe
, blit
);
2201 pipe_resource_reference(&blit
->dst
.resource
, NULL
);
2202 pipe_resource_reference(&blit
->src
.resource
, NULL
);
2206 tc_blit(struct pipe_context
*_pipe
, const struct pipe_blit_info
*info
)
2208 struct threaded_context
*tc
= threaded_context(_pipe
);
2209 struct pipe_blit_info
*blit
=
2210 tc_add_struct_typed_call(tc
, TC_CALL_blit
, pipe_blit_info
);
2212 tc_set_resource_reference(&blit
->dst
.resource
, info
->dst
.resource
);
2213 tc_set_resource_reference(&blit
->src
.resource
, info
->src
.resource
);
2214 memcpy(blit
, info
, sizeof(*info
));
2217 struct tc_generate_mipmap
{
2218 struct pipe_resource
*res
;
2219 enum pipe_format format
;
2220 unsigned base_level
;
2221 unsigned last_level
;
2222 unsigned first_layer
;
2223 unsigned last_layer
;
2227 tc_call_generate_mipmap(struct pipe_context
*pipe
, union tc_payload
*payload
)
2229 struct tc_generate_mipmap
*p
= (struct tc_generate_mipmap
*)payload
;
2230 ASSERTED
bool result
= pipe
->generate_mipmap(pipe
, p
->res
, p
->format
,
2236 pipe_resource_reference(&p
->res
, NULL
);
2240 tc_generate_mipmap(struct pipe_context
*_pipe
,
2241 struct pipe_resource
*res
,
2242 enum pipe_format format
,
2243 unsigned base_level
,
2244 unsigned last_level
,
2245 unsigned first_layer
,
2246 unsigned last_layer
)
2248 struct threaded_context
*tc
= threaded_context(_pipe
);
2249 struct pipe_context
*pipe
= tc
->pipe
;
2250 struct pipe_screen
*screen
= pipe
->screen
;
2251 unsigned bind
= PIPE_BIND_SAMPLER_VIEW
;
2253 if (util_format_is_depth_or_stencil(format
))
2254 bind
= PIPE_BIND_DEPTH_STENCIL
;
2256 bind
= PIPE_BIND_RENDER_TARGET
;
2258 if (!screen
->is_format_supported(screen
, format
, res
->target
,
2259 res
->nr_samples
, res
->nr_storage_samples
,
2263 struct tc_generate_mipmap
*p
=
2264 tc_add_struct_typed_call(tc
, TC_CALL_generate_mipmap
, tc_generate_mipmap
);
2266 tc_set_resource_reference(&p
->res
, res
);
2268 p
->base_level
= base_level
;
2269 p
->last_level
= last_level
;
2270 p
->first_layer
= first_layer
;
2271 p
->last_layer
= last_layer
;
2276 tc_call_flush_resource(struct pipe_context
*pipe
, union tc_payload
*payload
)
2278 pipe
->flush_resource(pipe
, payload
->resource
);
2279 pipe_resource_reference(&payload
->resource
, NULL
);
2283 tc_flush_resource(struct pipe_context
*_pipe
,
2284 struct pipe_resource
*resource
)
2286 struct threaded_context
*tc
= threaded_context(_pipe
);
2287 union tc_payload
*payload
= tc_add_small_call(tc
, TC_CALL_flush_resource
);
2289 tc_set_resource_reference(&payload
->resource
, resource
);
2293 tc_call_invalidate_resource(struct pipe_context
*pipe
, union tc_payload
*payload
)
2295 pipe
->invalidate_resource(pipe
, payload
->resource
);
2296 pipe_resource_reference(&payload
->resource
, NULL
);
2300 tc_invalidate_resource(struct pipe_context
*_pipe
,
2301 struct pipe_resource
*resource
)
2303 struct threaded_context
*tc
= threaded_context(_pipe
);
2305 if (resource
->target
== PIPE_BUFFER
) {
2306 tc_invalidate_buffer(tc
, threaded_resource(resource
));
2310 union tc_payload
*payload
= tc_add_small_call(tc
, TC_CALL_invalidate_resource
);
2311 tc_set_resource_reference(&payload
->resource
, resource
);
2316 union pipe_color_union color
;
2322 tc_call_clear(struct pipe_context
*pipe
, union tc_payload
*payload
)
2324 struct tc_clear
*p
= (struct tc_clear
*)payload
;
2325 pipe
->clear(pipe
, p
->buffers
, &p
->color
, p
->depth
, p
->stencil
);
2329 tc_clear(struct pipe_context
*_pipe
, unsigned buffers
,
2330 const union pipe_color_union
*color
, double depth
,
2333 struct threaded_context
*tc
= threaded_context(_pipe
);
2334 struct tc_clear
*p
= tc_add_struct_typed_call(tc
, TC_CALL_clear
, tc_clear
);
2336 p
->buffers
= buffers
;
2339 p
->stencil
= stencil
;
2343 tc_clear_render_target(struct pipe_context
*_pipe
,
2344 struct pipe_surface
*dst
,
2345 const union pipe_color_union
*color
,
2346 unsigned dstx
, unsigned dsty
,
2347 unsigned width
, unsigned height
,
2348 bool render_condition_enabled
)
2350 struct threaded_context
*tc
= threaded_context(_pipe
);
2351 struct pipe_context
*pipe
= tc
->pipe
;
2354 pipe
->clear_render_target(pipe
, dst
, color
, dstx
, dsty
, width
, height
,
2355 render_condition_enabled
);
2359 tc_clear_depth_stencil(struct pipe_context
*_pipe
,
2360 struct pipe_surface
*dst
, unsigned clear_flags
,
2361 double depth
, unsigned stencil
, unsigned dstx
,
2362 unsigned dsty
, unsigned width
, unsigned height
,
2363 bool render_condition_enabled
)
2365 struct threaded_context
*tc
= threaded_context(_pipe
);
2366 struct pipe_context
*pipe
= tc
->pipe
;
2369 pipe
->clear_depth_stencil(pipe
, dst
, clear_flags
, depth
, stencil
,
2370 dstx
, dsty
, width
, height
,
2371 render_condition_enabled
);
2374 struct tc_clear_buffer
{
2375 struct pipe_resource
*res
;
2378 char clear_value
[16];
2379 int clear_value_size
;
2383 tc_call_clear_buffer(struct pipe_context
*pipe
, union tc_payload
*payload
)
2385 struct tc_clear_buffer
*p
= (struct tc_clear_buffer
*)payload
;
2387 pipe
->clear_buffer(pipe
, p
->res
, p
->offset
, p
->size
, p
->clear_value
,
2388 p
->clear_value_size
);
2389 pipe_resource_reference(&p
->res
, NULL
);
2393 tc_clear_buffer(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
2394 unsigned offset
, unsigned size
,
2395 const void *clear_value
, int clear_value_size
)
2397 struct threaded_context
*tc
= threaded_context(_pipe
);
2398 struct threaded_resource
*tres
= threaded_resource(res
);
2399 struct tc_clear_buffer
*p
=
2400 tc_add_struct_typed_call(tc
, TC_CALL_clear_buffer
, tc_clear_buffer
);
2402 tc_set_resource_reference(&p
->res
, res
);
2405 memcpy(p
->clear_value
, clear_value
, clear_value_size
);
2406 p
->clear_value_size
= clear_value_size
;
2408 util_range_add(&tres
->b
, &tres
->valid_buffer_range
, offset
, offset
+ size
);
2411 struct tc_clear_texture
{
2412 struct pipe_resource
*res
;
2414 struct pipe_box box
;
2419 tc_call_clear_texture(struct pipe_context
*pipe
, union tc_payload
*payload
)
2421 struct tc_clear_texture
*p
= (struct tc_clear_texture
*)payload
;
2423 pipe
->clear_texture(pipe
, p
->res
, p
->level
, &p
->box
, p
->data
);
2424 pipe_resource_reference(&p
->res
, NULL
);
2428 tc_clear_texture(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
2429 unsigned level
, const struct pipe_box
*box
, const void *data
)
2431 struct threaded_context
*tc
= threaded_context(_pipe
);
2432 struct tc_clear_texture
*p
=
2433 tc_add_struct_typed_call(tc
, TC_CALL_clear_texture
, tc_clear_texture
);
2435 tc_set_resource_reference(&p
->res
, res
);
2438 memcpy(p
->data
, data
,
2439 util_format_get_blocksize(res
->format
));
2442 struct tc_resource_commit
{
2443 struct pipe_resource
*res
;
2445 struct pipe_box box
;
2450 tc_call_resource_commit(struct pipe_context
*pipe
, union tc_payload
*payload
)
2452 struct tc_resource_commit
*p
= (struct tc_resource_commit
*)payload
;
2454 pipe
->resource_commit(pipe
, p
->res
, p
->level
, &p
->box
, p
->commit
);
2455 pipe_resource_reference(&p
->res
, NULL
);
2459 tc_resource_commit(struct pipe_context
*_pipe
, struct pipe_resource
*res
,
2460 unsigned level
, struct pipe_box
*box
, bool commit
)
2462 struct threaded_context
*tc
= threaded_context(_pipe
);
2463 struct tc_resource_commit
*p
=
2464 tc_add_struct_typed_call(tc
, TC_CALL_resource_commit
, tc_resource_commit
);
2466 tc_set_resource_reference(&p
->res
, res
);
2470 return true; /* we don't care about the return value for this call */
2474 /********************************************************************
2478 struct tc_callback_payload
{
2479 void (*fn
)(void *data
);
2484 tc_call_callback(UNUSED
struct pipe_context
*pipe
, union tc_payload
*payload
)
2486 struct tc_callback_payload
*p
= (struct tc_callback_payload
*)payload
;
2492 tc_callback(struct pipe_context
*_pipe
, void (*fn
)(void *), void *data
,
2495 struct threaded_context
*tc
= threaded_context(_pipe
);
2497 if (asap
&& tc_is_sync(tc
)) {
2502 struct tc_callback_payload
*p
=
2503 tc_add_struct_typed_call(tc
, TC_CALL_callback
, tc_callback_payload
);
2509 /********************************************************************
2514 tc_destroy(struct pipe_context
*_pipe
)
2516 struct threaded_context
*tc
= threaded_context(_pipe
);
2517 struct pipe_context
*pipe
= tc
->pipe
;
2519 if (tc
->base
.const_uploader
&&
2520 tc
->base
.stream_uploader
!= tc
->base
.const_uploader
)
2521 u_upload_destroy(tc
->base
.const_uploader
);
2523 if (tc
->base
.stream_uploader
)
2524 u_upload_destroy(tc
->base
.stream_uploader
);
2528 if (util_queue_is_initialized(&tc
->queue
)) {
2529 util_queue_destroy(&tc
->queue
);
2531 for (unsigned i
= 0; i
< TC_MAX_BATCHES
; i
++) {
2532 util_queue_fence_destroy(&tc
->batch_slots
[i
].fence
);
2533 assert(!tc
->batch_slots
[i
].token
);
2537 slab_destroy_child(&tc
->pool_transfers
);
2538 assert(tc
->batch_slots
[tc
->next
].num_total_call_slots
== 0);
2539 pipe
->destroy(pipe
);
2540 os_free_aligned(tc
);
2543 static const tc_execute execute_func
[TC_NUM_CALLS
] = {
2544 #define CALL(name) tc_call_##name,
2545 #include "u_threaded_context_calls.h"
2550 * Wrap an existing pipe_context into a threaded_context.
2552 * \param pipe pipe_context to wrap
2553 * \param parent_transfer_pool parent slab pool set up for creating pipe_-
2554 * transfer objects; the driver should have one
2556 * \param replace_buffer callback for replacing a pipe_resource's storage
2557 * with another pipe_resource's storage.
2558 * \param out if successful, the threaded_context will be returned here in
2559 * addition to the return value if "out" != NULL
2561 struct pipe_context
*
2562 threaded_context_create(struct pipe_context
*pipe
,
2563 struct slab_parent_pool
*parent_transfer_pool
,
2564 tc_replace_buffer_storage_func replace_buffer
,
2565 tc_create_fence_func create_fence
,
2566 struct threaded_context
**out
)
2568 struct threaded_context
*tc
;
2570 STATIC_ASSERT(sizeof(union tc_payload
) <= 8);
2571 STATIC_ASSERT(sizeof(struct tc_call
) <= 16);
2578 if (!debug_get_bool_option("GALLIUM_THREAD", util_cpu_caps
.nr_cpus
> 1))
2581 tc
= os_malloc_aligned(sizeof(struct threaded_context
), 16);
2583 pipe
->destroy(pipe
);
2586 memset(tc
, 0, sizeof(*tc
));
2588 assert((uintptr_t)tc
% 16 == 0);
2589 /* These should be static asserts, but they don't work with MSVC */
2590 assert(offsetof(struct threaded_context
, batch_slots
) % 16 == 0);
2591 assert(offsetof(struct threaded_context
, batch_slots
[0].call
) % 16 == 0);
2592 assert(offsetof(struct threaded_context
, batch_slots
[0].call
[1]) % 16 == 0);
2593 assert(offsetof(struct threaded_context
, batch_slots
[1].call
) % 16 == 0);
2595 /* The driver context isn't wrapped, so set its "priv" to NULL. */
2599 tc
->replace_buffer_storage
= replace_buffer
;
2600 tc
->create_fence
= create_fence
;
2601 tc
->map_buffer_alignment
=
2602 pipe
->screen
->get_param(pipe
->screen
, PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT
);
2603 tc
->base
.priv
= pipe
; /* priv points to the wrapped driver context */
2604 tc
->base
.screen
= pipe
->screen
;
2605 tc
->base
.destroy
= tc_destroy
;
2606 tc
->base
.callback
= tc_callback
;
2608 tc
->base
.stream_uploader
= u_upload_clone(&tc
->base
, pipe
->stream_uploader
);
2609 if (pipe
->stream_uploader
== pipe
->const_uploader
)
2610 tc
->base
.const_uploader
= tc
->base
.stream_uploader
;
2612 tc
->base
.const_uploader
= u_upload_clone(&tc
->base
, pipe
->const_uploader
);
2614 if (!tc
->base
.stream_uploader
|| !tc
->base
.const_uploader
)
2617 /* The queue size is the number of batches "waiting". Batches are removed
2618 * from the queue before being executed, so keep one tc_batch slot for that
2619 * execution. Also, keep one unused slot for an unflushed batch.
2621 if (!util_queue_init(&tc
->queue
, "gdrv", TC_MAX_BATCHES
- 2, 1, 0))
2624 for (unsigned i
= 0; i
< TC_MAX_BATCHES
; i
++) {
2625 tc
->batch_slots
[i
].sentinel
= TC_SENTINEL
;
2626 tc
->batch_slots
[i
].pipe
= pipe
;
2627 util_queue_fence_init(&tc
->batch_slots
[i
].fence
);
2630 list_inithead(&tc
->unflushed_queries
);
2632 slab_create_child(&tc
->pool_transfers
, parent_transfer_pool
);
2634 tc
->base
.set_context_param
= tc_set_context_param
; /* always set this */
2636 #define CTX_INIT(_member) \
2637 tc->base._member = tc->pipe->_member ? tc_##_member : NULL
2641 CTX_INIT(launch_grid
);
2642 CTX_INIT(resource_copy_region
);
2645 CTX_INIT(clear_render_target
);
2646 CTX_INIT(clear_depth_stencil
);
2647 CTX_INIT(clear_buffer
);
2648 CTX_INIT(clear_texture
);
2649 CTX_INIT(flush_resource
);
2650 CTX_INIT(generate_mipmap
);
2651 CTX_INIT(render_condition
);
2652 CTX_INIT(create_query
);
2653 CTX_INIT(create_batch_query
);
2654 CTX_INIT(destroy_query
);
2655 CTX_INIT(begin_query
);
2656 CTX_INIT(end_query
);
2657 CTX_INIT(get_query_result
);
2658 CTX_INIT(get_query_result_resource
);
2659 CTX_INIT(set_active_query_state
);
2660 CTX_INIT(create_blend_state
);
2661 CTX_INIT(bind_blend_state
);
2662 CTX_INIT(delete_blend_state
);
2663 CTX_INIT(create_sampler_state
);
2664 CTX_INIT(bind_sampler_states
);
2665 CTX_INIT(delete_sampler_state
);
2666 CTX_INIT(create_rasterizer_state
);
2667 CTX_INIT(bind_rasterizer_state
);
2668 CTX_INIT(delete_rasterizer_state
);
2669 CTX_INIT(create_depth_stencil_alpha_state
);
2670 CTX_INIT(bind_depth_stencil_alpha_state
);
2671 CTX_INIT(delete_depth_stencil_alpha_state
);
2672 CTX_INIT(create_fs_state
);
2673 CTX_INIT(bind_fs_state
);
2674 CTX_INIT(delete_fs_state
);
2675 CTX_INIT(create_vs_state
);
2676 CTX_INIT(bind_vs_state
);
2677 CTX_INIT(delete_vs_state
);
2678 CTX_INIT(create_gs_state
);
2679 CTX_INIT(bind_gs_state
);
2680 CTX_INIT(delete_gs_state
);
2681 CTX_INIT(create_tcs_state
);
2682 CTX_INIT(bind_tcs_state
);
2683 CTX_INIT(delete_tcs_state
);
2684 CTX_INIT(create_tes_state
);
2685 CTX_INIT(bind_tes_state
);
2686 CTX_INIT(delete_tes_state
);
2687 CTX_INIT(create_compute_state
);
2688 CTX_INIT(bind_compute_state
);
2689 CTX_INIT(delete_compute_state
);
2690 CTX_INIT(create_vertex_elements_state
);
2691 CTX_INIT(bind_vertex_elements_state
);
2692 CTX_INIT(delete_vertex_elements_state
);
2693 CTX_INIT(set_blend_color
);
2694 CTX_INIT(set_stencil_ref
);
2695 CTX_INIT(set_sample_mask
);
2696 CTX_INIT(set_min_samples
);
2697 CTX_INIT(set_clip_state
);
2698 CTX_INIT(set_constant_buffer
);
2699 CTX_INIT(set_framebuffer_state
);
2700 CTX_INIT(set_polygon_stipple
);
2701 CTX_INIT(set_scissor_states
);
2702 CTX_INIT(set_viewport_states
);
2703 CTX_INIT(set_window_rectangles
);
2704 CTX_INIT(set_sampler_views
);
2705 CTX_INIT(set_tess_state
);
2706 CTX_INIT(set_shader_buffers
);
2707 CTX_INIT(set_shader_images
);
2708 CTX_INIT(set_vertex_buffers
);
2709 CTX_INIT(create_stream_output_target
);
2710 CTX_INIT(stream_output_target_destroy
);
2711 CTX_INIT(set_stream_output_targets
);
2712 CTX_INIT(create_sampler_view
);
2713 CTX_INIT(sampler_view_destroy
);
2714 CTX_INIT(create_surface
);
2715 CTX_INIT(surface_destroy
);
2716 CTX_INIT(transfer_map
);
2717 CTX_INIT(transfer_flush_region
);
2718 CTX_INIT(transfer_unmap
);
2719 CTX_INIT(buffer_subdata
);
2720 CTX_INIT(texture_subdata
);
2721 CTX_INIT(texture_barrier
);
2722 CTX_INIT(memory_barrier
);
2723 CTX_INIT(resource_commit
);
2724 CTX_INIT(create_video_codec
);
2725 CTX_INIT(create_video_buffer
);
2726 CTX_INIT(set_compute_resources
);
2727 CTX_INIT(set_global_binding
);
2728 CTX_INIT(get_sample_position
);
2729 CTX_INIT(invalidate_resource
);
2730 CTX_INIT(get_device_reset_status
);
2731 CTX_INIT(set_device_reset_callback
);
2732 CTX_INIT(dump_debug_state
);
2733 CTX_INIT(set_log_context
);
2734 CTX_INIT(emit_string_marker
);
2735 CTX_INIT(set_debug_callback
);
2736 CTX_INIT(create_fence_fd
);
2737 CTX_INIT(fence_server_sync
);
2738 CTX_INIT(fence_server_signal
);
2739 CTX_INIT(get_timestamp
);
2740 CTX_INIT(create_texture_handle
);
2741 CTX_INIT(delete_texture_handle
);
2742 CTX_INIT(make_texture_handle_resident
);
2743 CTX_INIT(create_image_handle
);
2744 CTX_INIT(delete_image_handle
);
2745 CTX_INIT(make_image_handle_resident
);
2754 tc_destroy(&tc
->base
);