#endif
/*
- * Remap atomic counters to SSBOs. Atomic counters get remapped to
- * SSBO binding points [0..ssbo_offset) and the original SSBOs are
- * remapped to [ssbo_offset..n) (mostly to align with what mesa/st
- * does.
+ * Remap atomic counters to SSBOs, starting from the passed in ssbo_offset.
*/
static bool
lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b)
{
nir_intrinsic_op op;
- int idx_src;
b->cursor = nir_before_instr(&instr->instr);
instr->intrinsic = nir_intrinsic_memory_barrier_buffer;
return true;
- case nir_intrinsic_ssbo_atomic_add:
- case nir_intrinsic_ssbo_atomic_imin:
- case nir_intrinsic_ssbo_atomic_umin:
- case nir_intrinsic_ssbo_atomic_imax:
- case nir_intrinsic_ssbo_atomic_umax:
- case nir_intrinsic_ssbo_atomic_and:
- case nir_intrinsic_ssbo_atomic_or:
- case nir_intrinsic_ssbo_atomic_xor:
- case nir_intrinsic_ssbo_atomic_exchange:
- case nir_intrinsic_ssbo_atomic_comp_swap:
- case nir_intrinsic_ssbo_atomic_fadd:
- case nir_intrinsic_ssbo_atomic_fmin:
- case nir_intrinsic_ssbo_atomic_fmax:
- case nir_intrinsic_ssbo_atomic_fcomp_swap:
- case nir_intrinsic_store_ssbo:
- case nir_intrinsic_load_ssbo:
- case nir_intrinsic_get_buffer_size:
- /* easy case, keep same opcode and just remap SSBO buffer index: */
- op = instr->intrinsic;
- idx_src = (op == nir_intrinsic_store_ssbo) ? 1 : 0;
- nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[idx_src], 1);
- nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, ssbo_offset));
- nir_instr_rewrite_src(&instr->instr,
- &instr->src[idx_src],
- nir_src_for_ssa(new_idx));
- return true;
case nir_intrinsic_atomic_counter_inc:
case nir_intrinsic_atomic_counter_add:
case nir_intrinsic_atomic_counter_pre_dec:
return false;
}
- nir_ssa_def *buffer = nir_imm_int(b, nir_intrinsic_base(instr));
+ nir_ssa_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr));
nir_ssa_def *temp = NULL;
nir_intrinsic_instr *new_instr =
nir_intrinsic_instr_create(ralloc_parent(instr), op);
snprintf(name, sizeof(name), "counter%d", var->data.binding);
ssbo = nir_variable_create(shader, nir_var_mem_ssbo, type, name);
- ssbo->data.binding = var->data.binding;
+ ssbo->data.binding = ssbo_offset + var->data.binding;
+
+ /* We can't use num_abos, because it only represents the number of
+ * active atomic counters, and currently unlike SSBO's they aren't
+ * compacted so num_abos actually isn't a bound on the index passed
+ * to nir_intrinsic_atomic_counter_*. e.g. if we have a single atomic
+ * counter declared like:
+ *
+ * layout(binding=1) atomic_uint counter0;
+ *
+ * then when we lower accesses to it the atomic_counter_* intrinsics
+ * will have 1 as the index but num_abos will still be 1.
+ */
+ shader->info.num_ssbos = MAX2(shader->info.num_ssbos,
+ ssbo->data.binding + 1);
struct glsl_struct_field field = {
.type = type,
}
}
- /* Make sure that shader->info.num_ssbos still reflects the maximum SSBO
- * index that can be used in the shader.
- */
- if (shader->info.num_ssbos > 0) {
- shader->info.num_ssbos += ssbo_offset;
- } else {
- /* We can't use num_abos, because it only represents the number of
- * active atomic counters, and currently unlike SSBO's they aren't
- * compacted so num_abos actually isn't a bound on the index passed
- * to nir_intrinsic_atomic_counter_*. e.g. if we have a single atomic
- * counter declared like:
- *
- * layout(binding=1) atomic_uint counter0;
- *
- * then when we lower accesses to it the atomic_counter_* intrinsics
- * will have 1 as the index but num_abos will still be 1.
- * */
- shader->info.num_ssbos = util_last_bit(replaced);
- }
shader->info.num_abos = 0;
}
NIR_PASS_V(prog->nir, gl_nir_lower_atomics, shProg, false);
NIR_PASS_V(prog->nir, nir_lower_atomics_to_ssbo,
- prog->nir->info.num_abos);
+ prog->nir->info.num_ssbos);
nir_sweep(prog->nir);
ISL_FORMAT_R32G32B32A32_FLOAT, 0);
}
- uint32_t *abo_surf_offsets =
+ uint32_t *ssbo_surf_offsets =
&stage_state->surf_offset[prog_data->binding_table.ssbo_start];
- uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
+ uint32_t *abo_surf_offsets = ssbo_surf_offsets + prog->info.num_ssbos;
for (int i = 0; i < prog->info.num_abos; i++) {
struct gl_buffer_binding *binding =
static void
st_bind_atomics(struct st_context *st, struct gl_program *prog,
- enum pipe_shader_type shader_type)
+ gl_shader_stage stage)
{
unsigned i;
+ enum pipe_shader_type shader_type = pipe_shader_type_from_mesa(stage);
if (!prog || !st->pipe->set_shader_buffers || st->has_hw_atomics)
return;
+ /* For !has_hw_atomics, the atomic counters have been rewritten to be above
+ * the SSBO range.
+ */
+ unsigned buffer_base = st->ctx->Const.Program[stage].MaxShaderStorageBlocks;
+
for (i = 0; i < prog->sh.data->NumAtomicBuffers; i++) {
struct gl_active_atomic_buffer *atomic =
&prog->sh.data->AtomicBuffers[i];
st_binding_to_sb(&st->ctx->AtomicBufferBindings[atomic->Binding], &sb);
st->pipe->set_shader_buffers(st->pipe, shader_type,
- atomic->Binding, 1, &sb, 0x1);
+ buffer_base + atomic->Binding, 1, &sb, 0x1);
}
}
struct gl_program *prog =
st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX];
- st_bind_atomics(st, prog, PIPE_SHADER_VERTEX);
+ st_bind_atomics(st, prog, MESA_SHADER_VERTEX);
}
void
struct gl_program *prog =
st->ctx->_Shader->CurrentProgram[MESA_SHADER_FRAGMENT];
- st_bind_atomics(st, prog, PIPE_SHADER_FRAGMENT);
+ st_bind_atomics(st, prog, MESA_SHADER_FRAGMENT);
}
void
struct gl_program *prog =
st->ctx->_Shader->CurrentProgram[MESA_SHADER_GEOMETRY];
- st_bind_atomics(st, prog, PIPE_SHADER_GEOMETRY);
+ st_bind_atomics(st, prog, MESA_SHADER_GEOMETRY);
}
void
struct gl_program *prog =
st->ctx->_Shader->CurrentProgram[MESA_SHADER_TESS_CTRL];
- st_bind_atomics(st, prog, PIPE_SHADER_TESS_CTRL);
+ st_bind_atomics(st, prog, MESA_SHADER_TESS_CTRL);
}
void
struct gl_program *prog =
st->ctx->_Shader->CurrentProgram[MESA_SHADER_TESS_EVAL];
- st_bind_atomics(st, prog, PIPE_SHADER_TESS_EVAL);
+ st_bind_atomics(st, prog, MESA_SHADER_TESS_EVAL);
}
void
struct gl_program *prog =
st->ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
- st_bind_atomics(st, prog, PIPE_SHADER_COMPUTE);
+ st_bind_atomics(st, prog, MESA_SHADER_COMPUTE);
}
void
unsigned i;
struct pipe_shader_buffer buffers[MAX_SHADER_STORAGE_BUFFERS];
struct gl_program_constants *c;
- int buffer_base;
if (!prog || !st->pipe->set_shader_buffers)
return;
c = &st->ctx->Const.Program[prog->info.stage];
- buffer_base = st->has_hw_atomics ? 0 : c->MaxAtomicBuffers;
-
for (i = 0; i < prog->info.num_ssbos; i++) {
struct gl_buffer_binding *binding;
struct st_buffer_object *st_obj;
sb->buffer_size = 0;
}
}
- st->pipe->set_shader_buffers(st->pipe, shader_type, buffer_base,
+ st->pipe->set_shader_buffers(st->pipe, shader_type, 0,
prog->info.num_ssbos, buffers,
prog->sh.ShaderStorageBlocksWriteAccess);
/* clear out any stale shader buffers */
if (prog->info.num_ssbos < c->MaxShaderStorageBlocks)
st->pipe->set_shader_buffers(
st->pipe, shader_type,
- buffer_base + prog->info.num_ssbos,
+ prog->info.num_ssbos,
c->MaxShaderStorageBlocks - prog->info.num_ssbos,
NULL, 0);
}
/* shader buffers */
/* TODO: atomic counter buffers */
struct pipe_transfer *ssbo_transfer[PIPE_MAX_SHADER_BUFFERS] = {0};
- unsigned ssbo_first_slot = st->has_hw_atomics ? 0 :
- st->ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers;
for (unsigned i = 0; i < prog->info.num_ssbos; i++) {
struct gl_buffer_binding *binding =
PIPE_TRANSFER_READ, &ssbo_transfer[i]);
draw_set_mapped_shader_buffer(draw, PIPE_SHADER_VERTEX,
- ssbo_first_slot + i, ptr, size);
+ i, ptr, size);
}
/* samplers */
if (!st->has_hw_atomics)
NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
- st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
+ st->ctx->Const.Program[nir->info.stage].MaxShaderStorageBlocks);
st_finalize_nir_before_variants(nir);
case ir_unop_get_buffer_size: {
ir_constant *const_offset = ir->operands[0]->as_constant();
- int buf_base = ctx->st->has_hw_atomics
- ? 0 : ctx->Const.Program[shader->Stage].MaxAtomicBuffers;
st_src_reg buffer(
PROGRAM_BUFFER,
- buf_base + (const_offset ? const_offset->value.u[0] : 0),
+ const_offset ? const_offset->value.u[0] : 0,
GLSL_TYPE_UINT);
if (!const_offset) {
buffer.reladdr = ralloc(mem_ctx, st_src_reg);
resource = buffer;
} else {
- st_src_reg buffer(PROGRAM_BUFFER, location->data.binding,
+ st_src_reg buffer(PROGRAM_BUFFER,
+ ctx->Const.Program[shader->Stage].MaxShaderStorageBlocks +
+ location->data.binding,
GLSL_TYPE_ATOMIC_UINT);
if (offset.file != PROGRAM_UNDEFINED) {
ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
ir_constant *const_block = block->as_constant();
- int buf_base = st_context(ctx)->has_hw_atomics
- ? 0 : ctx->Const.Program[shader->Stage].MaxAtomicBuffers;
st_src_reg buffer(
PROGRAM_BUFFER,
- buf_base + (const_block ? const_block->value.u[0] : 0),
+ const_block ? const_block->value.u[0] : 0,
GLSL_TYPE_UINT);
if (!const_block) {
if (!st_context(ctx)->has_hw_atomics) {
for (i = 0; i < prog->info.num_abos; i++) {
- unsigned index = prog->sh.AtomicBuffers[i]->Binding;
- assert(index < frag_const->MaxAtomicBuffers);
+ unsigned index = (frag_const->MaxShaderStorageBlocks +
+ prog->sh.AtomicBuffers[i]->Binding);
+ assert(prog->sh.AtomicBuffers[i]->Binding <
+ frag_const->MaxAtomicBuffers);
t->buffers[index] = ureg_DECL_buffer(ureg, index, true);
}
} else {
assert(prog->info.num_ssbos <= frag_const->MaxShaderStorageBlocks);
for (i = 0; i < prog->info.num_ssbos; i++) {
- unsigned index = i;
- if (!st_context(ctx)->has_hw_atomics)
- index += frag_const->MaxAtomicBuffers;
-
- t->buffers[index] = ureg_DECL_buffer(ureg, index, false);
+ t->buffers[i] = ureg_DECL_buffer(ureg, i, false);
}
}