if (!main_part)
return false;
+ /* We can leave the fence as permanently signaled because the
+ * main part becomes visible globally only after it has been
+ * compiled. */
+ util_queue_fence_init(&main_part->ready);
+
main_part->selector = sel;
main_part->key.as_es = key->as_es;
main_part->key.as_ls = key->as_ls;
* variants, it will cost just a computation of the key and this
* test. */
if (likely(current &&
- memcmp(¤t->key, key, sizeof(*key)) == 0 &&
- (!current->is_optimized ||
- util_queue_fence_is_signalled(¤t->optimized_ready))))
+ memcmp(¤t->key, key, sizeof(*key)) == 0)) {
+ if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) {
+ if (current->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto current_not_ready;
+ }
+
+ util_queue_fence_wait(¤t->ready);
+ }
+
return current->compilation_failed ? -1 : 0;
+ }
+current_not_ready:
/* This must be done before the mutex is locked, because async GS
* compilation calls this function too, and therefore must enter
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
- /* If it's an optimized shader and its compilation has
- * been started but isn't done, use the unoptimized
- * shader so as not to cause a stall due to compilation.
- */
- if (iter->is_optimized &&
- !util_queue_fence_is_signalled(&iter->optimized_ready)) {
- memset(&key->opt, 0, sizeof(key->opt));
- mtx_unlock(&sel->mutex);
- goto again;
+ if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
+ /* If it's an optimized shader and its compilation has
+ * been started but isn't done, use the unoptimized
+ * shader so as not to cause a stall due to compilation.
+ */
+ if (iter->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ mtx_unlock(&sel->mutex);
+ goto again;
+ }
+
+ util_queue_fence_wait(&iter->ready);
}
if (iter->compilation_failed) {
mtx_unlock(&sel->mutex);
return -ENOMEM;
}
+
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
shader->key = *key;
shader->compiler_ctx_state = *compiler_state;
shader->is_optimized =
!is_pure_monolithic &&
memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
- if (shader->is_optimized)
- util_queue_fence_init(&shader->optimized_ready);
-
- if (!sel->last_variant) {
- sel->first_variant = shader;
- sel->last_variant = shader;
- } else {
- sel->last_variant->next_variant = shader;
- sel->last_variant = shader;
- }
/* If it's an optimized shader, compile it asynchronously. */
if (shader->is_optimized &&
thread_index < 0) {
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
- shader, &shader->optimized_ready,
+ shader, &shader->ready,
si_build_shader_variant_low_priority, NULL);
+ /* Add only after the ready fence was reset, to guard against a
+ * race with si_bind_XX_shader. */
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
+ }
+
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
mtx_unlock(&sel->mutex);
goto again;
}
+ /* Reset the fence before adding to the variant list. */
+ util_queue_fence_reset(&shader->ready);
+
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
+ }
+
assert(!shader->is_optimized);
si_build_shader_variant(shader, thread_index, false);
+ util_queue_fence_signal(&shader->ready);
+
if (!shader->compilation_failed)
state->current = shader;
return;
}
+ /* We can leave the fence signaled because use of the default
+ * main part is guarded by the selector's ready fence. */
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
si_parse_next_shader_property(&sel->info,
sel->so.num_outputs != 0,
{
if (shader->is_optimized) {
util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority,
- &shader->optimized_ready);
- util_queue_fence_destroy(&shader->optimized_ready);
+ &shader->ready);
}
+ util_queue_fence_destroy(&shader->ready);
+
if (shader->pm4) {
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX: