void *ir_binary = si_get_ir_binary(sel, false, false);
/* Try to load the shader from the shader cache. */
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
if (ir_binary &&
si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
si_shader_dump(sscreen, shader, debug, stderr, true);
if (!si_shader_binary_upload(sscreen, shader, 0))
program->shader.compilation_failed = true;
} else {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
if (!si_shader_create(sscreen, compiler, &program->shader, debug)) {
program->shader.compilation_failed = true;
S_00B84C_LDS_SIZE(shader->config.lds_size);
if (ir_binary) {
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
FREE(ir_binary);
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
}
}
{
struct si_context *ctx = (struct si_context*)sscreen->aux_context;
- mtx_lock(&sscreen->aux_context_lock);
+ simple_mtx_lock(&sscreen->aux_context_lock);
si_sdma_clear_buffer(ctx, dst, offset, size, value);
sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- mtx_unlock(&sscreen->aux_context_lock);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
}
{
/* Start the thread if needed. */
if (!sscreen->gpu_load_thread) {
- mtx_lock(&sscreen->gpu_load_mutex);
+ simple_mtx_lock(&sscreen->gpu_load_mutex);
/* Check again inside the mutex. */
if (!sscreen->gpu_load_thread)
sscreen->gpu_load_thread =
u_thread_create(si_gpu_load_thread, sscreen);
- mtx_unlock(&sscreen->gpu_load_mutex);
+ simple_mtx_unlock(&sscreen->gpu_load_mutex);
}
unsigned busy = p_atomic_read(&sscreen->mmio_counters.array[busy_index]);
if (!sscreen->ws->unref(sscreen->ws))
return;
- mtx_destroy(&sscreen->aux_context_lock);
+ simple_mtx_destroy(&sscreen->aux_context_lock);
struct u_log_context *aux_log = ((struct si_context *)sscreen->aux_context)->log;
if (aux_log) {
FREE(part);
}
}
- mtx_destroy(&sscreen->shader_parts_mutex);
+ simple_mtx_destroy(&sscreen->shader_parts_mutex);
si_destroy_shader_cache(sscreen);
si_destroy_perfcounters(sscreen);
si_gpu_load_kill_thread(sscreen);
- mtx_destroy(&sscreen->gpu_load_mutex);
+ simple_mtx_destroy(&sscreen->gpu_load_mutex);
slab_destroy_parent(&sscreen->pool_transfers);
1 << util_logbase2(sscreen->force_aniso));
}
- (void) mtx_init(&sscreen->aux_context_lock, mtx_plain);
- (void) mtx_init(&sscreen->gpu_load_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->aux_context_lock, mtx_plain);
+ (void) simple_mtx_init(&sscreen->gpu_load_mutex, mtx_plain);
si_init_gs_info(sscreen);
if (!si_init_shader_cache(sscreen)) {
sscreen->dcc_msaa_allowed =
!(sscreen->debug_flags & DBG(NO_DCC_MSAA));
- (void) mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
sscreen->use_monolithic_shaders =
(sscreen->debug_flags & DBG(MONOLITHIC_SHADERS)) != 0;
/* Auxiliary context. Mainly used to initialize resources.
* It must be locked prior to using and flushed before unlocking. */
struct pipe_context *aux_context;
- mtx_t aux_context_lock;
+ simple_mtx_t aux_context_lock;
/* This must be in the screen, because UE4 uses one context for
* compilation and another one for rendering.
unsigned num_shader_cache_hits;
/* GPU load thread. */
- mtx_t gpu_load_mutex;
+ simple_mtx_t gpu_load_mutex;
thrd_t gpu_load_thread;
union si_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
unsigned L2_to_cp;
} barrier_flags;
- mtx_t shader_parts_mutex;
+ simple_mtx_t shader_parts_mutex;
struct si_shader_part *vs_prologs;
struct si_shader_part *tcs_epilogs;
struct si_shader_part *gs_prologs;
* - GS and CS aren't cached, but it's certainly possible to cache
* those as well.
*/
- mtx_t shader_cache_mutex;
+ simple_mtx_t shader_cache_mutex;
struct hash_table *shader_cache;
/* Shader compiler queue for multithreaded compilation. */
{
struct si_shader_part *result;
- mtx_lock(&sscreen->shader_parts_mutex);
+ simple_mtx_lock(&sscreen->shader_parts_mutex);
/* Find existing. */
for (result = *list; result; result = result->next) {
if (memcmp(&result->key, key, sizeof(*key)) == 0) {
- mtx_unlock(&sscreen->shader_parts_mutex);
+ simple_mtx_unlock(&sscreen->shader_parts_mutex);
return result;
}
}
out:
si_llvm_dispose(&ctx);
- mtx_unlock(&sscreen->shader_parts_mutex);
+ simple_mtx_unlock(&sscreen->shader_parts_mutex);
return result;
}
#include "tgsi/tgsi_scan.h"
#include "util/u_inlines.h"
#include "util/u_queue.h"
+#include "util/simple_mtx.h"
#include "ac_binary.h"
#include "ac_llvm_build.h"
struct util_queue_fence ready;
struct si_compiler_ctx_state compiler_ctx_state;
- mtx_t mutex;
+ simple_mtx_t mutex;
struct si_shader *first_variant; /* immutable after the first variant */
struct si_shader *last_variant; /* mutable */
bool si_init_shader_cache(struct si_screen *sscreen)
{
- (void) mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
sscreen->shader_cache =
_mesa_hash_table_create(NULL,
si_shader_cache_key_hash,
if (sscreen->shader_cache)
_mesa_hash_table_destroy(sscreen->shader_cache,
si_destroy_shader_cache_entry);
- mtx_destroy(&sscreen->shader_cache_mutex);
+ simple_mtx_destroy(&sscreen->shader_cache_mutex);
}
/* SHADER STATES */
if (thread_index < 0)
util_queue_fence_wait(&sel->ready);
- mtx_lock(&sel->mutex);
+ simple_mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
/* If it's an optimized shader and its compilation has
/* Build a new shader. */
shader = CALLOC_STRUCT(si_shader);
if (!shader) {
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
return -ENOMEM;
}
assert(0);
}
- mtx_lock(&previous_stage_sel->mutex);
+ simple_mtx_lock(&previous_stage_sel->mutex);
ok = si_check_missing_main_part(sscreen,
previous_stage_sel,
compiler_state, &shader1_key);
- mtx_unlock(&previous_stage_sel->mutex);
+ simple_mtx_unlock(&previous_stage_sel->mutex);
}
if (ok) {
if (!ok) {
FREE(shader);
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
return -ENOMEM; /* skip the draw call */
}
}
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
if (sscreen->options.sync_compile)
util_queue_fence_wait(&shader->ready);
sel->last_variant = shader;
}
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
assert(!shader->is_optimized);
si_build_shader_variant(shader, thread_index, false);
}
/* Try to load the shader from the shader cache. */
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
if (ir_binary &&
si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
} else {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
if (si_compile_tgsi_shader(sscreen, compiler, shader,
}
if (ir_binary) {
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
FREE(ir_binary);
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
}
}
if (sel->info.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE])
sel->db_shader_control |= S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(1);
- (void) mtx_init(&sel->mutex, mtx_plain);
+ (void) simple_mtx_init(&sel->mutex, mtx_plain);
si_schedule_initial_compile(sctx, sel->info.processor, &sel->ready,
&sel->compiler_ctx_state, sel,
si_delete_shader(sctx, sel->gs_copy_shader);
util_queue_fence_destroy(&sel->ready);
- mtx_destroy(&sel->mutex);
+ simple_mtx_destroy(&sel->mutex);
free(sel->tokens);
ralloc_free(sel->nir);
free(sel);
static void si_shader_lock(struct si_shader *shader)
{
- mtx_lock(&shader->selector->mutex);
+ simple_mtx_lock(&shader->selector->mutex);
if (shader->previous_stage_sel) {
assert(shader->previous_stage_sel != shader->selector);
- mtx_lock(&shader->previous_stage_sel->mutex);
+ simple_mtx_lock(&shader->previous_stage_sel->mutex);
}
}
static void si_shader_unlock(struct si_shader *shader)
{
if (shader->previous_stage_sel)
- mtx_unlock(&shader->previous_stage_sel->mutex);
- mtx_unlock(&shader->selector->mutex);
+ simple_mtx_unlock(&shader->previous_stage_sel->mutex);
+ simple_mtx_unlock(&shader->selector->mutex);
}
/**
struct pipe_context *ctx = &sctx->b;
if (ctx == sscreen->aux_context)
- mtx_lock(&sscreen->aux_context_lock);
+ simple_mtx_lock(&sscreen->aux_context_lock);
unsigned n = sctx->num_decompress_calls;
ctx->flush_resource(ctx, &tex->buffer.b.b);
ctx->flush(ctx, NULL, 0);
if (ctx == sscreen->aux_context)
- mtx_unlock(&sscreen->aux_context_lock);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
}
void si_texture_discard_cmask(struct si_screen *sscreen,
return false;
if (&sctx->b == sscreen->aux_context)
- mtx_lock(&sscreen->aux_context_lock);
+ simple_mtx_lock(&sscreen->aux_context_lock);
/* Decompress DCC. */
si_decompress_dcc(sctx, tex);
sctx->b.flush(&sctx->b, NULL, 0);
if (&sctx->b == sscreen->aux_context)
- mtx_unlock(&sscreen->aux_context_lock);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
return si_texture_discard_dcc(sscreen, tex);
}
u_box_1d(0, buf->b.b.width0, &box);
assert(tex->surface.dcc_retile_map_offset <= UINT_MAX);
- mtx_lock(&sscreen->aux_context_lock);
+ simple_mtx_lock(&sscreen->aux_context_lock);
sctx->dma_copy(&sctx->b, &tex->buffer.b.b, 0,
tex->surface.dcc_retile_map_offset, 0, 0,
&buf->b.b, 0, &box);
sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- mtx_unlock(&sscreen->aux_context_lock);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
si_resource_reference(&buf, NULL);
}