static const tc_execute execute_func[TC_NUM_CALLS];
static void
-tc_batch_check(struct tc_batch *batch)
+tc_batch_check(MAYBE_UNUSED struct tc_batch *batch)
{
tc_assert(batch->sentinel == TC_SENTINEL);
tc_assert(batch->num_total_call_slots <= TC_CALLS_PER_BATCH);
}
static void
-tc_batch_execute(void *job, int thread_index)
+tc_batch_execute(void *job, UNUSED int thread_index)
{
struct tc_batch *batch = job;
struct pipe_context *pipe = batch->pipe;
return tc_add_sized_call(tc, id, 0);
}
+static bool
+tc_is_sync(struct threaded_context *tc)
+{
+ struct tc_batch *last = &tc->batch_slots[tc->last];
+ struct tc_batch *next = &tc->batch_slots[tc->next];
+
+ return util_queue_fence_is_signalled(&last->fence) &&
+ !next->num_total_call_slots;
+}
+
static void
-_tc_sync(struct threaded_context *tc, const char *info, const char *func)
+_tc_sync(struct threaded_context *tc, MAYBE_UNUSED const char *info, MAYBE_UNUSED const char *func)
{
struct tc_batch *last = &tc->batch_slots[tc->last];
struct tc_batch *next = &tc->batch_slots[tc->next];
if (synced) {
p_atomic_inc(&tc->num_syncs);
- if (tc_strcmp(func, "tc_destroy") != 0)
+ if (tc_strcmp(func, "tc_destroy") != 0) {
tc_printf("sync %s %s\n", func, info);
+ }
}
tc_debug_check(tc);
*/
void
threaded_context_flush(struct pipe_context *_pipe,
- struct tc_unflushed_batch_token *token)
+ struct tc_unflushed_batch_token *token,
+ bool prefer_async)
{
struct threaded_context *tc = threaded_context(_pipe);
/* This is called from the state-tracker / application thread. */
- if (token->tc && token->tc == tc)
- tc_sync(token->tc);
+ if (token->tc && token->tc == tc) {
+ struct tc_batch *last = &tc->batch_slots[tc->last];
+
+ /* Prefer to do the flush in the driver thread if it is already
+ * running. That should be better for cache locality.
+ */
+ if (prefer_async || !util_queue_fence_is_signalled(&last->fence))
+ tc_batch_flush(tc);
+ else
+ tc_sync(token->tc);
+ }
}
static void
struct tc_end_query_payload *payload =
tc_add_struct_typed_call(tc, TC_CALL_end_query, tc_end_query_payload);
- tc_add_small_call(tc, TC_CALL_end_query);
-
payload->tc = tc;
payload->query = query;
pipe->set_debug_callback(pipe, cb);
}
+static void
+tc_set_log_context(struct pipe_context *_pipe, struct u_log_context *log)
+{
+ struct threaded_context *tc = threaded_context(_pipe);
+ struct pipe_context *pipe = tc->pipe;
+
+ tc_sync(tc);
+ pipe->set_log_context(pipe, log);
+}
+
static void
tc_create_fence_fd(struct pipe_context *_pipe,
- struct pipe_fence_handle **fence, int fd)
+ struct pipe_fence_handle **fence, int fd,
+ enum pipe_fd_type type)
{
struct threaded_context *tc = threaded_context(_pipe);
struct pipe_context *pipe = tc->pipe;
tc_sync(tc);
- pipe->create_fence_fd(pipe, fence, fd);
+ pipe->create_fence_fd(pipe, fence, fd, type);
+}
+
+static void
+tc_call_fence_server_sync(struct pipe_context *pipe, union tc_payload *payload)
+{
+ pipe->fence_server_sync(pipe, payload->fence);
+ pipe->screen->fence_reference(pipe->screen, &payload->fence, NULL);
}
static void
struct pipe_fence_handle *fence)
{
struct threaded_context *tc = threaded_context(_pipe);
- struct pipe_context *pipe = tc->pipe;
+ struct pipe_screen *screen = tc->pipe->screen;
+ union tc_payload *payload = tc_add_small_call(tc, TC_CALL_fence_server_sync);
- tc_sync(tc);
- pipe->fence_server_sync(pipe, fence);
+ payload->fence = NULL;
+ screen->fence_reference(screen, &payload->fence, fence);
+}
+
+static void
+tc_call_fence_server_signal(struct pipe_context *pipe, union tc_payload *payload)
+{
+ pipe->fence_server_signal(pipe, payload->fence);
+ pipe->screen->fence_reference(pipe->screen, &payload->fence, NULL);
+}
+
+static void
+tc_fence_server_signal(struct pipe_context *_pipe,
+ struct pipe_fence_handle *fence)
+{
+ struct threaded_context *tc = threaded_context(_pipe);
+ struct pipe_screen *screen = tc->pipe->screen;
+ union tc_payload *payload = tc_add_small_call(tc, TC_CALL_fence_server_signal);
+
+ payload->fence = NULL;
+ screen->fence_reference(screen, &payload->fence, fence);
}
static struct pipe_video_codec *
-tc_create_video_codec(struct pipe_context *_pipe,
- const struct pipe_video_codec *templ)
+tc_create_video_codec(UNUSED struct pipe_context *_pipe,
+ UNUSED const struct pipe_video_codec *templ)
{
unreachable("Threaded context should not be enabled for video APIs");
return NULL;
}
static struct pipe_video_buffer *
-tc_create_video_buffer(struct pipe_context *_pipe,
- const struct pipe_video_buffer *templ)
+tc_create_video_buffer(UNUSED struct pipe_context *_pipe,
+ UNUSED const struct pipe_video_buffer *templ)
{
unreachable("Threaded context should not be enabled for video APIs");
return NULL;
}
+struct tc_context_param {
+ enum pipe_context_param param;
+ unsigned value;
+};
+
+static void
+tc_call_set_context_param(struct pipe_context *pipe,
+ union tc_payload *payload)
+{
+ struct tc_context_param *p = (struct tc_context_param*)payload;
+
+ if (pipe->set_context_param)
+ pipe->set_context_param(pipe, p->param, p->value);
+}
+
+static void
+tc_set_context_param(struct pipe_context *_pipe,
+ enum pipe_context_param param,
+ unsigned value)
+{
+ struct threaded_context *tc = threaded_context(_pipe);
+
+ if (tc->pipe->set_context_param) {
+ struct tc_context_param *payload =
+ tc_add_struct_typed_call(tc, TC_CALL_set_context_param,
+ tc_context_param);
+
+ payload->param = param;
+ payload->value = value;
+ }
+
+ if (param == PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE) {
+ /* Pin the gallium thread as requested. */
+ util_pin_thread_to_L3(tc->queue.threads[0], value,
+ util_cpu_caps.cores_per_L3);
+ }
+}
+
/********************************************************************
* draw, launch, clear, blit, copy, flush
if (async && tc->create_fence) {
if (fence) {
- struct tc_unflushed_batch_token *token = NULL;
struct tc_batch *next = &tc->batch_slots[tc->next];
if (!next->token) {
next->token->tc = tc;
}
- screen->fence_reference(screen, fence, tc->create_fence(pipe, token));
+ screen->fence_reference(screen, fence, tc->create_fence(pipe, next->token));
if (!*fence)
goto out_of_memory;
}
tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload)
{
struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload;
- bool MAYBE_UNUSED result = pipe->generate_mipmap(pipe, p->res, p->format,
+ MAYBE_UNUSED bool result = pipe->generate_mipmap(pipe, p->res, p->format,
p->base_level,
p->last_level,
p->first_layer,
bind = PIPE_BIND_RENDER_TARGET;
if (!screen->is_format_supported(screen, format, res->target,
- res->nr_samples, bind))
+ res->nr_samples, res->nr_storage_samples,
+ bind))
return false;
struct tc_generate_mipmap *p =
}
+/********************************************************************
+ * callback
+ */
+
+struct tc_callback_payload {
+ void (*fn)(void *data);
+ void *data;
+};
+
+static void
+tc_call_callback(UNUSED struct pipe_context *pipe, union tc_payload *payload)
+{
+ struct tc_callback_payload *p = (struct tc_callback_payload *)payload;
+
+ p->fn(p->data);
+}
+
+static void
+tc_callback(struct pipe_context *_pipe, void (*fn)(void *), void *data,
+ bool asap)
+{
+ struct threaded_context *tc = threaded_context(_pipe);
+
+ if (asap && tc_is_sync(tc)) {
+ fn(data);
+ return;
+ }
+
+ struct tc_callback_payload *p =
+ tc_add_struct_typed_call(tc, TC_CALL_callback, tc_callback_payload);
+ p->fn = fn;
+ p->data = data;
+}
+
+
/********************************************************************
* create & destroy
*/
tc->base.priv = pipe; /* priv points to the wrapped driver context */
tc->base.screen = pipe->screen;
tc->base.destroy = tc_destroy;
+ tc->base.callback = tc_callback;
tc->base.stream_uploader = u_upload_clone(&tc->base, pipe->stream_uploader);
if (pipe->stream_uploader == pipe->const_uploader)
* from the queue before being executed, so keep one tc_batch slot for that
* execution. Also, keep one unused slot for an unflushed batch.
*/
- if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0))
+ if (!util_queue_init(&tc->queue, "gdrv", TC_MAX_BATCHES - 2, 1, 0))
goto fail;
for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
slab_create_child(&tc->pool_transfers, parent_transfer_pool);
+ tc->base.set_context_param = tc_set_context_param; /* always set this */
+
#define CTX_INIT(_member) \
tc->base._member = tc->pipe->_member ? tc_##_member : NULL
CTX_INIT(get_device_reset_status);
CTX_INIT(set_device_reset_callback);
CTX_INIT(dump_debug_state);
+ CTX_INIT(set_log_context);
CTX_INIT(emit_string_marker);
CTX_INIT(set_debug_callback);
CTX_INIT(create_fence_fd);
CTX_INIT(fence_server_sync);
+ CTX_INIT(fence_server_signal);
CTX_INIT(get_timestamp);
CTX_INIT(create_texture_handle);
CTX_INIT(delete_texture_handle);