X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_lower_atomics_to_ssbo.c;h=b9ba4e4b273a6a6ad994ff3837fe26af6887eb19;hb=00b28a50b2c492eee25ef3f75538aabe1e569ff1;hp=934ae81d750595af5b9427b44e7b2f9801d8c6d7;hpb=ef4c42fc3ae468d6cb42fbb8564831549a3bdebe;p=mesa.git diff --git a/src/compiler/nir/nir_lower_atomics_to_ssbo.c b/src/compiler/nir/nir_lower_atomics_to_ssbo.c index 934ae81d750..b9ba4e4b273 100644 --- a/src/compiler/nir/nir_lower_atomics_to_ssbo.c +++ b/src/compiler/nir/nir_lower_atomics_to_ssbo.c @@ -32,46 +32,29 @@ #endif /* - * Remap atomic counters to SSBOs. Atomic counters get remapped to - * SSBO binding points [0..ssbo_offset) and the original SSBOs are - * remapped to [ssbo_offset..n) (mostly to align with what mesa/st - * does. + * Remap atomic counters to SSBOs, starting from the shader's next SSBO slot + * (info.num_ssbos). */ static bool lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b) { nir_intrinsic_op op; - int idx_src; b->cursor = nir_before_instr(&instr->instr); switch (instr->intrinsic) { - case nir_intrinsic_ssbo_atomic_add: - case nir_intrinsic_ssbo_atomic_imin: - case nir_intrinsic_ssbo_atomic_umin: - case nir_intrinsic_ssbo_atomic_imax: - case nir_intrinsic_ssbo_atomic_umax: - case nir_intrinsic_ssbo_atomic_and: - case nir_intrinsic_ssbo_atomic_or: - case nir_intrinsic_ssbo_atomic_xor: - case nir_intrinsic_ssbo_atomic_exchange: - case nir_intrinsic_ssbo_atomic_comp_swap: - case nir_intrinsic_store_ssbo: - case nir_intrinsic_load_ssbo: - case nir_intrinsic_get_buffer_size: - /* easy case, keep same opcode and just remap SSBO buffer index: */ - op = instr->intrinsic; - idx_src = (op == nir_intrinsic_store_ssbo) ? 1 : 0; - nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[idx_src], 1); - nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, ssbo_offset)); - nir_instr_rewrite_src(&instr->instr, - &instr->src[idx_src], - nir_src_for_ssa(new_idx)); + case nir_intrinsic_memory_barrier_atomic_counter: + /* Atomic counters are now SSBOs so memoryBarrierAtomicCounter() is now + * memoryBarrierBuffer(). + */ + instr->intrinsic = nir_intrinsic_memory_barrier_buffer; return true; + case nir_intrinsic_atomic_counter_inc: case nir_intrinsic_atomic_counter_add: - case nir_intrinsic_atomic_counter_dec: + case nir_intrinsic_atomic_counter_pre_dec: + case nir_intrinsic_atomic_counter_post_dec: /* inc and dec get remapped to add: */ op = nir_intrinsic_ssbo_atomic_add; break; @@ -103,7 +86,7 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b) return false; } - nir_ssa_def *buffer = nir_imm_int(b, nir_intrinsic_base(instr)); + nir_ssa_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr)); nir_ssa_def *temp = NULL; nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(ralloc_parent(instr), op); @@ -119,7 +102,8 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b) nir_src_copy(&new_instr->src[1], &instr->src[0], new_instr); new_instr->src[2] = nir_src_for_ssa(temp); break; - case nir_intrinsic_atomic_counter_dec: + case nir_intrinsic_atomic_counter_pre_dec: + case nir_intrinsic_atomic_counter_post_dec: /* remapped to ssbo_atomic_add: { buffer_idx, offset, -1 } */ /* NOTE semantic difference so we adjust the return value below */ temp = nir_imm_int(b, -1); @@ -137,18 +121,29 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b) new_instr->src[0] = nir_src_for_ssa(buffer); nir_src_copy(&new_instr->src[1], &instr->src[0], new_instr); nir_src_copy(&new_instr->src[2], &instr->src[1], new_instr); - if (op == nir_intrinsic_ssbo_atomic_comp_swap) + if (op == nir_intrinsic_ssbo_atomic_comp_swap || + op == nir_intrinsic_ssbo_atomic_fcomp_swap) nir_src_copy(&new_instr->src[3], &instr->src[2], new_instr); break; } + if (new_instr->intrinsic == nir_intrinsic_load_ssbo) { + nir_intrinsic_set_align(new_instr, 4, 0); + + /* we could be replacing an intrinsic with fixed # of dest + * num_components with one that has variable number. So + * best to take this from the dest: + */ + new_instr->num_components = instr->dest.ssa.num_components; + } + nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, instr->dest.ssa.num_components, instr->dest.ssa.bit_size, NULL); nir_instr_insert_before(&instr->instr, &new_instr->instr); nir_instr_remove(&instr->instr); - if (instr->intrinsic == nir_intrinsic_atomic_counter_dec) { + if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) { b->cursor = nir_after_instr(&new_instr->instr); nir_ssa_def *result = nir_iadd(b, &new_instr->dest.ssa, temp); nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(result)); @@ -156,11 +151,6 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b) nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&new_instr->dest.ssa)); } - /* we could be replacing an intrinsic with fixed # of dest num_components - * with one that has variable number. So best to take this from the dest: - */ - new_instr->num_components = instr->dest.ssa.num_components; - return true; } @@ -173,8 +163,9 @@ is_atomic_uint(const struct glsl_type *type) } bool -nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset) +nir_lower_atomics_to_ssbo(nir_shader *shader) { + unsigned ssbo_offset = shader->info.num_ssbos; bool progress = false; nir_foreach_function(function, shader) { @@ -197,7 +188,7 @@ nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset) if (progress) { /* replace atomic_uint uniforms with ssbo's: */ unsigned replaced = 0; - nir_foreach_variable_safe(var, &shader->uniforms) { + nir_foreach_uniform_variable_safe(var, shader) { if (is_atomic_uint(var->type)) { exec_node_remove(&var->node); @@ -208,13 +199,26 @@ nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset) char name[16]; /* A length of 0 is used to denote unsized arrays */ - const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0); + const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0, 0); snprintf(name, sizeof(name), "counter%d", var->data.binding); - ssbo = nir_variable_create(shader, nir_var_shader_storage, - type, name); - ssbo->data.binding = var->data.binding; + ssbo = nir_variable_create(shader, nir_var_mem_ssbo, type, name); + ssbo->data.binding = ssbo_offset + var->data.binding; + + /* We can't use num_abos, because it only represents the number of + * active atomic counters, and currently unlike SSBO's they aren't + * compacted so num_abos actually isn't a bound on the index passed + * to nir_intrinsic_atomic_counter_*. e.g. if we have a single atomic + * counter declared like: + * + * layout(binding=1) atomic_uint counter0; + * + * then when we lower accesses to it the atomic_counter_* intrinsics + * will have 1 as the index but num_abos will still be 1. + */ + shader->info.num_ssbos = MAX2(shader->info.num_ssbos, + ssbo->data.binding + 1); struct glsl_struct_field field = { .type = type, @@ -229,6 +233,8 @@ nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset) replaced |= (1 << var->data.binding); } } + + shader->info.num_abos = 0; } return progress;