#include "program/prog_parameter.h"
#include "nir/nir_builder.h"
#include "compiler/brw_nir.h"
+#include "util/mesa-sha1.h"
#include "util/set.h"
/* Sampler tables don't actually have a maximum size but we pick one just so
nir_shader *shader;
nir_builder builder;
- struct anv_pipeline_layout *layout;
+ const struct anv_pipeline_layout *layout;
bool add_bounds_checks;
nir_address_format ssbo_addr_format;
/* Place to flag lowered instructions so we don't lower them twice */
struct set *lowered_instrs;
- int dynamic_offset_uniform_start;
-
bool uses_constants;
+ bool has_dynamic_buffers;
uint8_t constants_offset;
struct {
bool desc_buffer_used;
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
desc_load->src[1] = nir_src_for_ssa(desc_offset);
+ nir_intrinsic_set_align(desc_load, 8, 0);
desc_load->num_components = 4;
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest, 4, 32, NULL);
nir_builder_instr_insert(b, &desc_load->instr);
if (!state->add_bounds_checks)
desc = nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
- if (state->dynamic_offset_uniform_start >= 0) {
+ if (state->has_dynamic_buffers) {
/* This shader has dynamic offsets and we have no way of knowing
* (save from the dynamic offset base index) if this buffer has a
* dynamic offset.
}
nir_intrinsic_instr *dyn_load =
- nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
- nir_intrinsic_set_base(dyn_load, state->dynamic_offset_uniform_start);
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_load_push_constant);
+ nir_intrinsic_set_base(dyn_load, offsetof(struct anv_push_constants,
+ dynamic_offsets));
nir_intrinsic_set_range(dyn_load, MAX_DYNAMIC_BUFFERS * 4);
dyn_load->src[0] = nir_src_for_ssa(nir_imul_imm(b, dyn_offset_idx, 4));
dyn_load->num_components = 1;
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
desc_load->src[1] = nir_src_for_ssa(desc_offset);
+ nir_intrinsic_set_align(desc_load, 8, offset % 8);
desc_load->num_components = num_components;
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
num_components, bit_size, NULL);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
} else if (binding_offset > MAX_BINDING_TABLE_SIZE) {
const bool write_only =
- (var->data.image.access & ACCESS_NON_READABLE) != 0;
+ (var->data.access & ACCESS_NON_READABLE) != 0;
nir_ssa_def *desc =
build_descriptor_load(deref, 0, 2, 32, state);
nir_ssa_def *handle = nir_channel(b, desc, write_only ? 1 : 0);
b->cursor = nir_before_instr(&intrin->instr);
+ /* Any constant-offset load_constant instructions should have been removed
+ * by constant folding.
+ */
+ assert(!nir_src_is_const(intrin->src[0]));
+
nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
nir_imm_int(b, nir_intrinsic_base(intrin)));
load_ubo->num_components = intrin->num_components;
load_ubo->src[0] = nir_src_for_ssa(index);
load_ubo->src[1] = nir_src_for_ssa(offset);
+ nir_intrinsic_set_align(load_ubo, intrin->dest.ssa.bit_size / 8, 0);
nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
intrin->dest.ssa.num_components,
intrin->dest.ssa.bit_size, NULL);
lower_tex_deref(tex, nir_tex_src_sampler_deref,
&tex->sampler_index, plane, state);
-
- /* The backend only ever uses this to mark used surfaces. We don't care
- * about that little optimization so it just needs to be non-zero.
- */
- tex->texture_array_size = 1;
}
static void
void
anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
bool robust_buffer_access,
- struct anv_pipeline_layout *layout,
+ const struct anv_pipeline_layout *layout,
nir_shader *shader,
- struct brw_stage_prog_data *prog_data,
struct anv_pipeline_bind_map *map)
{
void *mem_ctx = ralloc_context(NULL);
.add_bounds_checks = robust_buffer_access,
.ssbo_addr_format = anv_nir_ssbo_addr_format(pdevice, robust_buffer_access),
.lowered_instrs = _mesa_pointer_set_create(mem_ctx),
- .dynamic_offset_uniform_start = -1,
};
for (unsigned s = 0; s < layout->num_sets; s++) {
rzalloc_array(mem_ctx, struct binding_info, used_binding_count);
used_binding_count = 0;
for (uint32_t set = 0; set < layout->num_sets; set++) {
- struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
+ const struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
for (unsigned b = 0; b < set_layout->binding_count; b++) {
if (state.set[set].use_count[b] == 0)
continue;
- struct anv_descriptor_set_binding_layout *binding =
+ const struct anv_descriptor_set_binding_layout *binding =
&layout->set[set].layout->binding[b];
/* Do a fixed-point calculation to generate a score based on the
qsort(infos, used_binding_count, sizeof(struct binding_info),
compare_binding_infos);
- bool have_dynamic_buffers = false;
-
for (unsigned i = 0; i < used_binding_count; i++) {
unsigned set = infos[i].set, b = infos[i].binding;
- struct anv_descriptor_set_binding_layout *binding =
+ const struct anv_descriptor_set_binding_layout *binding =
&layout->set[set].layout->binding[b];
- if (binding->dynamic_offset_index >= 0)
- have_dynamic_buffers = true;
-
const uint32_t array_size = binding->array_size;
+ if (binding->dynamic_offset_index >= 0)
+ state.has_dynamic_buffers = true;
+
if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
if (map->surface_count + array_size > MAX_BINDING_TABLE_SIZE ||
anv_descriptor_requires_bindless(pdevice, binding, false)) {
}
}
- if (have_dynamic_buffers) {
- state.dynamic_offset_uniform_start = shader->num_uniforms;
- uint32_t *param = brw_stage_prog_data_add_params(prog_data,
- MAX_DYNAMIC_BUFFERS);
- for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++)
- param[i] = ANV_PARAM_DYN_OFFSET(i);
- shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 4;
- assert(shader->num_uniforms == prog_data->nr_params * 4);
- }
-
nir_foreach_variable(var, &shader->uniforms) {
const struct glsl_type *glsl_type = glsl_without_array(var->type);
const uint32_t set = var->data.descriptor_set;
const uint32_t binding = var->data.binding;
- struct anv_descriptor_set_binding_layout *bind_layout =
+ const struct anv_descriptor_set_binding_layout *bind_layout =
&layout->set[set].layout->binding[binding];
const uint32_t array_size = bind_layout->array_size;
dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
pipe_binding[i].input_attachment_index = var->data.index + i;
+ /* NOTE: This is a uint8_t so we really do need to != 0 here */
pipe_binding[i].write_only =
- (var->data.image.access & ACCESS_NON_READABLE) != 0;
+ (var->data.access & ACCESS_NON_READABLE) != 0;
}
}
if (!function->impl)
continue;
+ nir_builder_init(&state.builder, function->impl);
+
/* Before we do the normal lowering, we look for any SSBO operations
* that we can lower to the BTI model and lower them up-front. The BTI
* model can perform better than the A64 model for a couple reasons:
*/
lower_direct_buffer_access(function->impl, &state);
- nir_builder_init(&state.builder, function->impl);
nir_foreach_block(block, function->impl)
apply_pipeline_layout_block(block, &state);
nir_metadata_preserve(function->impl, nir_metadata_block_index |
}
ralloc_free(mem_ctx);
+
+ /* Now that we're done computing the surface and sampler portions of the
+ * bind map, hash them. This lets us quickly determine if the actual
+ * mapping has changed and not just a no-op pipeline change.
+ */
+ _mesa_sha1_compute(map->surface_to_descriptor,
+ map->surface_count * sizeof(struct anv_pipeline_binding),
+ map->surface_sha1);
+ _mesa_sha1_compute(map->sampler_to_descriptor,
+ map->sampler_count * sizeof(struct anv_pipeline_binding),
+ map->sampler_sha1);
}