X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fauxiliary%2Futil%2Fu_threaded_context.c;h=b50b2c0d24ec4622b60d02764a31d51eb9650c36;hb=0c09df52e10f0339973f44cef68ff5e237720c8a;hp=ccce12b00ce2557e73a8ee5939ad09e7a76b7e8e;hpb=4b0dc098b2561c07c59f7dab2813640a25789bf1;p=mesa.git diff --git a/src/gallium/auxiliary/util/u_threaded_context.c b/src/gallium/auxiliary/util/u_threaded_context.c index ccce12b00ce..b50b2c0d24e 100644 --- a/src/gallium/auxiliary/util/u_threaded_context.c +++ b/src/gallium/auxiliary/util/u_threaded_context.c @@ -64,7 +64,7 @@ typedef void (*tc_execute)(struct pipe_context *pipe, union tc_payload *payload) static const tc_execute execute_func[TC_NUM_CALLS]; static void -tc_batch_check(struct tc_batch *batch) +tc_batch_check(UNUSED struct tc_batch *batch) { tc_assert(batch->sentinel == TC_SENTINEL); tc_assert(batch->num_total_call_slots <= TC_CALLS_PER_BATCH); @@ -80,7 +80,7 @@ tc_debug_check(struct threaded_context *tc) } static void -tc_batch_execute(void *job, int thread_index) +tc_batch_execute(void *job, UNUSED int thread_index) { struct tc_batch *batch = job; struct pipe_context *pipe = batch->pipe; @@ -88,6 +88,8 @@ tc_batch_execute(void *job, int thread_index) tc_batch_check(batch); + assert(!batch->token); + for (struct tc_call *iter = batch->call; iter != last; iter += iter->num_call_slots) { tc_assert(iter->sentinel == TC_SENTINEL); @@ -108,8 +110,13 @@ tc_batch_flush(struct threaded_context *tc) tc_debug_check(tc); p_atomic_add(&tc->num_offloaded_slots, next->num_total_call_slots); + if (next->token) { + next->token->tc = NULL; + tc_unflushed_batch_token_reference(&next->token, NULL); + } + util_queue_add_job(&tc->queue, next, &next->fence, tc_batch_execute, - NULL); + NULL, 0); tc->last = tc->next; tc->next = (tc->next + 1) % TC_MAX_BATCHES; } @@ -162,8 +169,18 @@ tc_add_small_call(struct threaded_context *tc, enum tc_call_id id) return tc_add_sized_call(tc, id, 0); } +static bool +tc_is_sync(struct threaded_context *tc) +{ + struct tc_batch *last = &tc->batch_slots[tc->last]; + struct tc_batch *next = &tc->batch_slots[tc->next]; + + return util_queue_fence_is_signalled(&last->fence) && + !next->num_total_call_slots; +} + static void -_tc_sync(struct threaded_context *tc, const char *info, const char *func) +_tc_sync(struct threaded_context *tc, UNUSED const char *info, UNUSED const char *func) { struct tc_batch *last = &tc->batch_slots[tc->last]; struct tc_batch *next = &tc->batch_slots[tc->next]; @@ -179,6 +196,11 @@ _tc_sync(struct threaded_context *tc, const char *info, const char *func) tc_debug_check(tc); + if (next->token) { + next->token->tc = NULL; + tc_unflushed_batch_token_reference(&next->token, NULL); + } + /* .. and execute unflushed calls directly. */ if (next->num_total_call_slots) { p_atomic_add(&tc->num_direct_slots, next->num_total_call_slots); @@ -189,8 +211,9 @@ _tc_sync(struct threaded_context *tc, const char *info, const char *func) if (synced) { p_atomic_inc(&tc->num_syncs); - if (tc_strcmp(func, "tc_destroy") != 0) + if (tc_strcmp(func, "tc_destroy") != 0) { tc_printf("sync %s %s\n", func, info); + } } tc_debug_check(tc); @@ -199,6 +222,34 @@ _tc_sync(struct threaded_context *tc, const char *info, const char *func) #define tc_sync(tc) _tc_sync(tc, "", __func__) #define tc_sync_msg(tc, info) _tc_sync(tc, info, __func__) +/** + * Call this from fence_finish for same-context fence waits of deferred fences + * that haven't been flushed yet. + * + * The passed pipe_context must be the one passed to pipe_screen::fence_finish, + * i.e., the wrapped one. + */ +void +threaded_context_flush(struct pipe_context *_pipe, + struct tc_unflushed_batch_token *token, + bool prefer_async) +{ + struct threaded_context *tc = threaded_context(_pipe); + + /* This is called from the state-tracker / application thread. */ + if (token->tc && token->tc == tc) { + struct tc_batch *last = &tc->batch_slots[tc->last]; + + /* Prefer to do the flush in the driver thread if it is already + * running. That should be better for cache locality. + */ + if (prefer_async || !util_queue_fence_is_signalled(&last->fence)) + tc_batch_flush(tc); + else + tc_sync(token->tc); + } +} + static void tc_set_resource_reference(struct pipe_resource **dst, struct pipe_resource *src) { @@ -258,7 +309,7 @@ threaded_context_unwrap_sync(struct pipe_context *pipe) *p = deref(param); \ } -TC_FUNC1(set_active_query_state, flags, , boolean, , *) +TC_FUNC1(set_active_query_state, flags, , bool, , *) TC_FUNC1(set_blend_color, blend_color, const, struct pipe_blend_color, *, ) TC_FUNC1(set_stencil_ref, stencil_ref, const, struct pipe_stencil_ref, *, ) @@ -298,6 +349,11 @@ tc_create_batch_query(struct pipe_context *_pipe, unsigned num_queries, static void tc_call_destroy_query(struct pipe_context *pipe, union tc_payload *payload) { + struct threaded_query *tq = threaded_query(payload->query); + + if (tq->head_unflushed.next) + LIST_DEL(&tq->head_unflushed); + pipe->destroy_query(pipe, payload->query); } @@ -305,10 +361,6 @@ static void tc_destroy_query(struct pipe_context *_pipe, struct pipe_query *query) { struct threaded_context *tc = threaded_context(_pipe); - struct threaded_query *tq = threaded_query(query); - - if (tq->head_unflushed.next) - LIST_DEL(&tq->head_unflushed); tc_add_small_call(tc, TC_CALL_destroy_query)->query = query; } @@ -319,7 +371,7 @@ tc_call_begin_query(struct pipe_context *pipe, union tc_payload *payload) pipe->begin_query(pipe, payload->query); } -static boolean +static bool tc_begin_query(struct pipe_context *_pipe, struct pipe_query *query) { struct threaded_context *tc = threaded_context(_pipe); @@ -329,10 +381,21 @@ tc_begin_query(struct pipe_context *_pipe, struct pipe_query *query) return true; /* we don't care about the return value for this call */ } +struct tc_end_query_payload { + struct threaded_context *tc; + struct pipe_query *query; +}; + static void tc_call_end_query(struct pipe_context *pipe, union tc_payload *payload) { - pipe->end_query(pipe, payload->query); + struct tc_end_query_payload *p = (struct tc_end_query_payload *)payload; + struct threaded_query *tq = threaded_query(p->query); + + if (!tq->head_unflushed.next) + LIST_ADD(&tq->head_unflushed, &p->tc->unflushed_queries); + + pipe->end_query(pipe, p->query); } static bool @@ -340,20 +403,20 @@ tc_end_query(struct pipe_context *_pipe, struct pipe_query *query) { struct threaded_context *tc = threaded_context(_pipe); struct threaded_query *tq = threaded_query(query); - union tc_payload *payload = tc_add_small_call(tc, TC_CALL_end_query); + struct tc_end_query_payload *payload = + tc_add_struct_typed_call(tc, TC_CALL_end_query, tc_end_query_payload); + payload->tc = tc; payload->query = query; tq->flushed = false; - if (!tq->head_unflushed.next) - LIST_ADD(&tq->head_unflushed, &tc->unflushed_queries); return true; /* we don't care about the return value for this call */ } -static boolean +static bool tc_get_query_result(struct pipe_context *_pipe, - struct pipe_query *query, boolean wait, + struct pipe_query *query, bool wait, union pipe_query_result *result) { struct threaded_context *tc = threaded_context(_pipe); @@ -367,15 +430,17 @@ tc_get_query_result(struct pipe_context *_pipe, if (success) { tq->flushed = true; - if (tq->head_unflushed.next) + if (tq->head_unflushed.next) { + /* This is safe because it can only happen after we sync'd. */ LIST_DEL(&tq->head_unflushed); + } } return success; } struct tc_query_result_resource { struct pipe_query *query; - boolean wait; + bool wait; enum pipe_query_value_type result_type; int index; struct pipe_resource *resource; @@ -395,7 +460,7 @@ tc_call_get_query_result_resource(struct pipe_context *pipe, static void tc_get_query_result_resource(struct pipe_context *_pipe, - struct pipe_query *query, boolean wait, + struct pipe_query *query, bool wait, enum pipe_query_value_type result_type, int index, struct pipe_resource *resource, unsigned offset) { @@ -427,7 +492,7 @@ tc_call_render_condition(struct pipe_context *pipe, union tc_payload *payload) static void tc_render_condition(struct pipe_context *_pipe, - struct pipe_query *query, boolean condition, + struct pipe_query *query, bool condition, enum pipe_render_cond_flag mode) { struct threaded_context *tc = threaded_context(_pipe); @@ -611,6 +676,7 @@ tc_set_constant_buffer(struct pipe_context *_pipe, if (cb && cb->user_buffer) { u_upload_data(tc->base.const_uploader, 0, cb->buffer_size, 64, cb->user_buffer, &offset, &buffer); + u_upload_unmap(tc->base.const_uploader); } struct tc_constant_buffer *p = @@ -705,7 +771,7 @@ tc_call_set_window_rectangles(struct pipe_context *pipe, } static void -tc_set_window_rectangles(struct pipe_context *_pipe, boolean include, +tc_set_window_rectangles(struct pipe_context *_pipe, bool include, unsigned count, const struct pipe_scissor_state *rects) { @@ -813,7 +879,8 @@ tc_set_shader_images(struct pipe_context *_pipe, struct threaded_resource *tres = threaded_resource(images[i].resource); - util_range_add(&tres->valid_buffer_range, images[i].u.buf.offset, + util_range_add(&tres->b, &tres->valid_buffer_range, + images[i].u.buf.offset, images[i].u.buf.offset + images[i].u.buf.size); } } @@ -824,6 +891,7 @@ tc_set_shader_images(struct pipe_context *_pipe, struct tc_shader_buffers { ubyte shader, start, count; bool unbind; + unsigned writable_bitmask; struct pipe_shader_buffer slot[0]; /* more will be allocated if needed */ }; @@ -834,11 +902,12 @@ tc_call_set_shader_buffers(struct pipe_context *pipe, union tc_payload *payload) unsigned count = p->count; if (p->unbind) { - pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, NULL); + pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, NULL, 0); return; } - pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, p->slot); + pipe->set_shader_buffers(pipe, p->shader, p->start, p->count, p->slot, + p->writable_bitmask); for (unsigned i = 0; i < count; i++) pipe_resource_reference(&p->slot[i].buffer, NULL); @@ -848,7 +917,8 @@ static void tc_set_shader_buffers(struct pipe_context *_pipe, enum pipe_shader_type shader, unsigned start, unsigned count, - const struct pipe_shader_buffer *buffers) + const struct pipe_shader_buffer *buffers, + unsigned writable_bitmask) { if (!count) return; @@ -862,6 +932,7 @@ tc_set_shader_buffers(struct pipe_context *_pipe, p->start = start; p->count = count; p->unbind = buffers == NULL; + p->writable_bitmask = writable_bitmask; if (buffers) { for (unsigned i = 0; i < count; i++) { @@ -875,7 +946,8 @@ tc_set_shader_buffers(struct pipe_context *_pipe, if (src->buffer) { struct threaded_resource *tres = threaded_resource(src->buffer); - util_range_add(&tres->valid_buffer_range, src->buffer_offset, + util_range_add(&tres->b, &tres->valid_buffer_range, + src->buffer_offset, src->buffer_offset + src->buffer_size); } } @@ -1065,7 +1137,7 @@ tc_create_stream_output_target(struct pipe_context *_pipe, struct pipe_stream_output_target *view; tc_sync(threaded_context(_pipe)); - util_range_add(&tres->valid_buffer_range, buffer_offset, + util_range_add(&tres->b, &tres->valid_buffer_range, buffer_offset, buffer_offset + buffer_size); view = pipe->create_stream_output_target(pipe, res, buffer_offset, @@ -1459,7 +1531,8 @@ tc_buffer_do_flush_region(struct threaded_context *tc, if (ttrans->staging) { struct pipe_box src_box; - u_box_1d(ttrans->offset + box->x % tc->map_buffer_alignment, + u_box_1d(ttrans->offset + ttrans->b.box.x % tc->map_buffer_alignment + + (box->x - ttrans->b.box.x), box->width, &src_box); /* Copy the staging buffer into the original one. */ @@ -1467,7 +1540,8 @@ tc_buffer_do_flush_region(struct threaded_context *tc, ttrans->staging, 0, &src_box); } - util_range_add(tres->base_valid_buffer_range, box->x, box->x + box->width); + util_range_add(&tres->b, tres->base_valid_buffer_range, + box->x, box->x + box->width); } static void @@ -1559,8 +1633,11 @@ tc_buffer_subdata(struct pipe_context *_pipe, if (!size) return; - usage |= PIPE_TRANSFER_WRITE | - PIPE_TRANSFER_DISCARD_RANGE; + usage |= PIPE_TRANSFER_WRITE; + + /* PIPE_TRANSFER_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */ + if (!(usage & PIPE_TRANSFER_MAP_DIRECTLY)) + usage |= PIPE_TRANSFER_DISCARD_RANGE; usage = tc_improve_map_buffer_flags(tc, tres, usage, offset, size); @@ -1584,7 +1661,7 @@ tc_buffer_subdata(struct pipe_context *_pipe, return; } - util_range_add(&tres->valid_buffer_range, offset, offset + size); + util_range_add(&tres->b, &tres->valid_buffer_range, offset, offset + size); /* The upload is small. Enqueue it. */ struct tc_buffer_subdata *p = @@ -1758,15 +1835,33 @@ tc_set_debug_callback(struct pipe_context *_pipe, pipe->set_debug_callback(pipe, cb); } +static void +tc_set_log_context(struct pipe_context *_pipe, struct u_log_context *log) +{ + struct threaded_context *tc = threaded_context(_pipe); + struct pipe_context *pipe = tc->pipe; + + tc_sync(tc); + pipe->set_log_context(pipe, log); +} + static void tc_create_fence_fd(struct pipe_context *_pipe, - struct pipe_fence_handle **fence, int fd) + struct pipe_fence_handle **fence, int fd, + enum pipe_fd_type type) { struct threaded_context *tc = threaded_context(_pipe); struct pipe_context *pipe = tc->pipe; tc_sync(tc); - pipe->create_fence_fd(pipe, fence, fd); + pipe->create_fence_fd(pipe, fence, fd, type); +} + +static void +tc_call_fence_server_sync(struct pipe_context *pipe, union tc_payload *payload) +{ + pipe->fence_server_sync(pipe, payload->fence); + pipe->screen->fence_reference(pipe->screen, &payload->fence, NULL); } static void @@ -1774,49 +1869,181 @@ tc_fence_server_sync(struct pipe_context *_pipe, struct pipe_fence_handle *fence) { struct threaded_context *tc = threaded_context(_pipe); - struct pipe_context *pipe = tc->pipe; + struct pipe_screen *screen = tc->pipe->screen; + union tc_payload *payload = tc_add_small_call(tc, TC_CALL_fence_server_sync); - tc_sync(tc); - pipe->fence_server_sync(pipe, fence); + payload->fence = NULL; + screen->fence_reference(screen, &payload->fence, fence); +} + +static void +tc_call_fence_server_signal(struct pipe_context *pipe, union tc_payload *payload) +{ + pipe->fence_server_signal(pipe, payload->fence); + pipe->screen->fence_reference(pipe->screen, &payload->fence, NULL); +} + +static void +tc_fence_server_signal(struct pipe_context *_pipe, + struct pipe_fence_handle *fence) +{ + struct threaded_context *tc = threaded_context(_pipe); + struct pipe_screen *screen = tc->pipe->screen; + union tc_payload *payload = tc_add_small_call(tc, TC_CALL_fence_server_signal); + + payload->fence = NULL; + screen->fence_reference(screen, &payload->fence, fence); } static struct pipe_video_codec * -tc_create_video_codec(struct pipe_context *_pipe, - const struct pipe_video_codec *templ) +tc_create_video_codec(UNUSED struct pipe_context *_pipe, + UNUSED const struct pipe_video_codec *templ) { unreachable("Threaded context should not be enabled for video APIs"); return NULL; } static struct pipe_video_buffer * -tc_create_video_buffer(struct pipe_context *_pipe, - const struct pipe_video_buffer *templ) +tc_create_video_buffer(UNUSED struct pipe_context *_pipe, + UNUSED const struct pipe_video_buffer *templ) { unreachable("Threaded context should not be enabled for video APIs"); return NULL; } +struct tc_context_param { + enum pipe_context_param param; + unsigned value; +}; + +static void +tc_call_set_context_param(struct pipe_context *pipe, + union tc_payload *payload) +{ + struct tc_context_param *p = (struct tc_context_param*)payload; + + if (pipe->set_context_param) + pipe->set_context_param(pipe, p->param, p->value); +} + +static void +tc_set_context_param(struct pipe_context *_pipe, + enum pipe_context_param param, + unsigned value) +{ + struct threaded_context *tc = threaded_context(_pipe); + + if (tc->pipe->set_context_param) { + struct tc_context_param *payload = + tc_add_struct_typed_call(tc, TC_CALL_set_context_param, + tc_context_param); + + payload->param = param; + payload->value = value; + } + + if (param == PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE) { + /* Pin the gallium thread as requested. */ + util_pin_thread_to_L3(tc->queue.threads[0], value, + util_cpu_caps.cores_per_L3); + } +} + /******************************************************************** * draw, launch, clear, blit, copy, flush */ +struct tc_flush_payload { + struct threaded_context *tc; + struct pipe_fence_handle *fence; + unsigned flags; +}; + +static void +tc_flush_queries(struct threaded_context *tc) +{ + struct threaded_query *tq, *tmp; + LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) { + LIST_DEL(&tq->head_unflushed); + + /* Memory release semantics: due to a possible race with + * tc_get_query_result, we must ensure that the linked list changes + * are visible before setting tq->flushed. + */ + p_atomic_set(&tq->flushed, true); + } +} + +static void +tc_call_flush(struct pipe_context *pipe, union tc_payload *payload) +{ + struct tc_flush_payload *p = (struct tc_flush_payload *)payload; + struct pipe_screen *screen = pipe->screen; + + pipe->flush(pipe, p->fence ? &p->fence : NULL, p->flags); + screen->fence_reference(screen, &p->fence, NULL); + + if (!(p->flags & PIPE_FLUSH_DEFERRED)) + tc_flush_queries(p->tc); +} + static void tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence, unsigned flags) { struct threaded_context *tc = threaded_context(_pipe); struct pipe_context *pipe = tc->pipe; - struct threaded_query *tq, *tmp; + struct pipe_screen *screen = pipe->screen; + bool async = flags & PIPE_FLUSH_DEFERRED; - LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) { - tq->flushed = true; - LIST_DEL(&tq->head_unflushed); + if (flags & PIPE_FLUSH_ASYNC) { + struct tc_batch *last = &tc->batch_slots[tc->last]; + + /* Prefer to do the flush in the driver thread, but avoid the inter-thread + * communication overhead if the driver thread is currently idle and the + * caller is going to wait for the fence immediately anyway. + */ + if (!(util_queue_fence_is_signalled(&last->fence) && + (flags & PIPE_FLUSH_HINT_FINISH))) + async = true; + } + + if (async && tc->create_fence) { + if (fence) { + struct tc_batch *next = &tc->batch_slots[tc->next]; + + if (!next->token) { + next->token = malloc(sizeof(*next->token)); + if (!next->token) + goto out_of_memory; + + pipe_reference_init(&next->token->ref, 1); + next->token->tc = tc; + } + + screen->fence_reference(screen, fence, tc->create_fence(pipe, next->token)); + if (!*fence) + goto out_of_memory; + } + + struct tc_flush_payload *p = + tc_add_struct_typed_call(tc, TC_CALL_flush, tc_flush_payload); + p->tc = tc; + p->fence = fence ? *fence : NULL; + p->flags = flags | TC_FLUSH_ASYNC; + + if (!(flags & PIPE_FLUSH_DEFERRED)) + tc_batch_flush(tc); + return; } - /* TODO: deferred flushes? */ +out_of_memory: tc_sync_msg(tc, flags & PIPE_FLUSH_END_OF_FRAME ? "end of frame" : flags & PIPE_FLUSH_DEFERRED ? "deferred fence" : "normal"); + + if (!(flags & PIPE_FLUSH_DEFERRED)) + tc_flush_queries(tc); pipe->flush(pipe, fence, flags); } @@ -1961,7 +2188,8 @@ tc_resource_copy_region(struct pipe_context *_pipe, p->src_box = *src_box; if (dst->target == PIPE_BUFFER) - util_range_add(&tdst->valid_buffer_range, dstx, dstx + src_box->width); + util_range_add(&tdst->b, &tdst->valid_buffer_range, + dstx, dstx + src_box->width); } static void @@ -1999,7 +2227,7 @@ static void tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload) { struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload; - bool MAYBE_UNUSED result = pipe->generate_mipmap(pipe, p->res, p->format, + ASSERTED bool result = pipe->generate_mipmap(pipe, p->res, p->format, p->base_level, p->last_level, p->first_layer, @@ -2008,7 +2236,7 @@ tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload) pipe_resource_reference(&p->res, NULL); } -static boolean +static bool tc_generate_mipmap(struct pipe_context *_pipe, struct pipe_resource *res, enum pipe_format format, @@ -2028,7 +2256,8 @@ tc_generate_mipmap(struct pipe_context *_pipe, bind = PIPE_BIND_RENDER_TARGET; if (!screen->is_format_supported(screen, format, res->target, - res->nr_samples, bind)) + res->nr_samples, res->nr_storage_samples, + bind)) return false; struct tc_generate_mipmap *p = @@ -2176,7 +2405,7 @@ tc_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res, memcpy(p->clear_value, clear_value, clear_value_size); p->clear_value_size = clear_value_size; - util_range_add(&tres->valid_buffer_range, offset, offset + size); + util_range_add(&tres->b, &tres->valid_buffer_range, offset, offset + size); } struct tc_clear_texture { @@ -2242,6 +2471,41 @@ tc_resource_commit(struct pipe_context *_pipe, struct pipe_resource *res, } +/******************************************************************** + * callback + */ + +struct tc_callback_payload { + void (*fn)(void *data); + void *data; +}; + +static void +tc_call_callback(UNUSED struct pipe_context *pipe, union tc_payload *payload) +{ + struct tc_callback_payload *p = (struct tc_callback_payload *)payload; + + p->fn(p->data); +} + +static void +tc_callback(struct pipe_context *_pipe, void (*fn)(void *), void *data, + bool asap) +{ + struct threaded_context *tc = threaded_context(_pipe); + + if (asap && tc_is_sync(tc)) { + fn(data); + return; + } + + struct tc_callback_payload *p = + tc_add_struct_typed_call(tc, TC_CALL_callback, tc_callback_payload); + p->fn = fn; + p->data = data; +} + + /******************************************************************** * create & destroy */ @@ -2264,8 +2528,10 @@ tc_destroy(struct pipe_context *_pipe) if (util_queue_is_initialized(&tc->queue)) { util_queue_destroy(&tc->queue); - for (unsigned i = 0; i < TC_MAX_BATCHES; i++) + for (unsigned i = 0; i < TC_MAX_BATCHES; i++) { util_queue_fence_destroy(&tc->batch_slots[i].fence); + assert(!tc->batch_slots[i].token); + } } slab_destroy_child(&tc->pool_transfers); @@ -2296,6 +2562,7 @@ struct pipe_context * threaded_context_create(struct pipe_context *pipe, struct slab_parent_pool *parent_transfer_pool, tc_replace_buffer_storage_func replace_buffer, + tc_create_fence_func create_fence, struct threaded_context **out) { struct threaded_context *tc; @@ -2330,11 +2597,13 @@ threaded_context_create(struct pipe_context *pipe, tc->pipe = pipe; tc->replace_buffer_storage = replace_buffer; + tc->create_fence = create_fence; tc->map_buffer_alignment = pipe->screen->get_param(pipe->screen, PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT); tc->base.priv = pipe; /* priv points to the wrapped driver context */ tc->base.screen = pipe->screen; tc->base.destroy = tc_destroy; + tc->base.callback = tc_callback; tc->base.stream_uploader = u_upload_clone(&tc->base, pipe->stream_uploader); if (pipe->stream_uploader == pipe->const_uploader) @@ -2349,7 +2618,7 @@ threaded_context_create(struct pipe_context *pipe, * from the queue before being executed, so keep one tc_batch slot for that * execution. Also, keep one unused slot for an unflushed batch. */ - if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0)) + if (!util_queue_init(&tc->queue, "gdrv", TC_MAX_BATCHES - 2, 1, 0)) goto fail; for (unsigned i = 0; i < TC_MAX_BATCHES; i++) { @@ -2362,6 +2631,8 @@ threaded_context_create(struct pipe_context *pipe, slab_create_child(&tc->pool_transfers, parent_transfer_pool); + tc->base.set_context_param = tc_set_context_param; /* always set this */ + #define CTX_INIT(_member) \ tc->base._member = tc->pipe->_member ? tc_##_member : NULL @@ -2459,10 +2730,12 @@ threaded_context_create(struct pipe_context *pipe, CTX_INIT(get_device_reset_status); CTX_INIT(set_device_reset_callback); CTX_INIT(dump_debug_state); + CTX_INIT(set_log_context); CTX_INIT(emit_string_marker); CTX_INIT(set_debug_callback); CTX_INIT(create_fence_fd); CTX_INIT(fence_server_sync); + CTX_INIT(fence_server_signal); CTX_INIT(get_timestamp); CTX_INIT(create_texture_handle); CTX_INIT(delete_texture_handle);