X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fanv_nir_apply_pipeline_layout.c;h=67bcf5e29ef1dd6bc4ae15730cc1bd2d45323085;hb=db37c0be13e64cce70491d6c6c0090a8f1d3d1d6;hp=91f4322f83a61a75a450262fac3d601828622d41;hpb=9464d8c49813aba77285e7465b96e92a91ed327c;p=mesa.git diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c index 91f4322f83a..67bcf5e29ef 100644 --- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c @@ -29,6 +29,9 @@ struct apply_pipeline_layout_state { nir_shader *shader; nir_builder builder; + struct anv_pipeline_layout *layout; + bool add_bounds_checks; + struct { BITSET_WORD *used; uint8_t *surface_offsets; @@ -110,17 +113,15 @@ lower_res_index_intrinsic(nir_intrinsic_instr *intrin, uint32_t binding = nir_intrinsic_binding(intrin); uint32_t surface_index = state->set[set].surface_offsets[binding]; + uint32_t array_size = + state->layout->set[set].layout->binding[binding].array_size; - nir_const_value *const_block_idx = - nir_src_as_const_value(intrin->src[0]); + nir_ssa_def *block_index = nir_ssa_for_src(b, intrin->src[0], 1); - nir_ssa_def *block_index; - if (const_block_idx) { - block_index = nir_imm_int(b, surface_index + const_block_idx->u32[0]); - } else { - block_index = nir_iadd(b, nir_imm_int(b, surface_index), - nir_ssa_for_src(b, intrin->src[0], 1)); - } + if (state->add_bounds_checks) + block_index = nir_umin(b, block_index, nir_imm_int(b, array_size - 1)); + + block_index = nir_iadd(b, nir_imm_int(b, surface_index), block_index); assert(intrin->dest.is_ssa); nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index)); @@ -129,16 +130,24 @@ lower_res_index_intrinsic(nir_intrinsic_instr *intrin, static void lower_tex_deref(nir_tex_instr *tex, nir_deref_var *deref, - unsigned *const_index, nir_tex_src_type src_type, + unsigned *const_index, unsigned array_size, + nir_tex_src_type src_type, struct apply_pipeline_layout_state *state) { + nir_builder *b = &state->builder; + if (deref->deref.child) { assert(deref->deref.child->deref_type == nir_deref_type_array); nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child); - *const_index += deref_array->base_offset; - if (deref_array->deref_array_type == nir_deref_array_type_indirect) { + nir_ssa_def *index = + nir_iadd(b, nir_imm_int(b, deref_array->base_offset), + nir_ssa_for_src(b, deref_array->indirect, 1)); + + if (state->add_bounds_checks) + index = nir_umin(b, index, nir_imm_int(b, array_size - 1)); + nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src, tex->num_srcs + 1); @@ -154,10 +163,11 @@ lower_tex_deref(nir_tex_instr *tex, nir_deref_var *deref, * first-class texture source. */ tex->src[tex->num_srcs].src_type = src_type; + nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs].src, + nir_src_for_ssa(index)); tex->num_srcs++; - assert(deref_array->indirect.is_ssa); - nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs - 1].src, - deref_array->indirect); + } else { + *const_index += MIN2(deref_array->base_offset, array_size - 1); } } } @@ -182,17 +192,23 @@ lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state) /* No one should have come by and lowered it already */ assert(tex->texture); + state->builder.cursor = nir_before_instr(&tex->instr); + unsigned set = tex->texture->var->data.descriptor_set; unsigned binding = tex->texture->var->data.binding; + unsigned array_size = + state->layout->set[set].layout->binding[binding].array_size; tex->texture_index = state->set[set].surface_offsets[binding]; - lower_tex_deref(tex, tex->texture, &tex->texture_index, + lower_tex_deref(tex, tex->texture, &tex->texture_index, array_size, nir_tex_src_texture_offset, state); if (tex->sampler) { unsigned set = tex->sampler->var->data.descriptor_set; unsigned binding = tex->sampler->var->data.binding; + unsigned array_size = + state->layout->set[set].layout->binding[binding].array_size; tex->sampler_index = state->set[set].sampler_offsets[binding]; - lower_tex_deref(tex, tex->sampler, &tex->sampler_index, + lower_tex_deref(tex, tex->sampler, &tex->sampler_index, array_size, nir_tex_src_sampler_offset, state); } @@ -254,6 +270,8 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline, struct apply_pipeline_layout_state state = { .shader = shader, + .layout = layout, + .add_bounds_checks = pipeline->device->robust_buffer_access, }; void *mem_ctx = ralloc_context(NULL); @@ -300,13 +318,13 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline, BITSET_FOREACH_SET(b, _tmp, state.set[set].used, set_layout->binding_count) { unsigned array_size = set_layout->binding[b].array_size; - unsigned set_offset = set_layout->binding[b].descriptor_index; if (set_layout->binding[b].stage[shader->stage].surface_index >= 0) { state.set[set].surface_offsets[b] = surface; for (unsigned i = 0; i < array_size; i++) { map->surface_to_descriptor[surface + i].set = set; - map->surface_to_descriptor[surface + i].offset = set_offset + i; + map->surface_to_descriptor[surface + i].binding = b; + map->surface_to_descriptor[surface + i].index = i; } surface += array_size; } @@ -315,7 +333,8 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline, state.set[set].sampler_offsets[b] = sampler; for (unsigned i = 0; i < array_size; i++) { map->sampler_to_descriptor[sampler + i].set = set; - map->sampler_to_descriptor[sampler + i].offset = set_offset + i; + map->sampler_to_descriptor[sampler + i].binding = b; + map->sampler_to_descriptor[sampler + i].index = i; } sampler += array_size; } @@ -327,6 +346,35 @@ anv_nir_apply_pipeline_layout(struct anv_pipeline *pipeline, } } + nir_foreach_variable(var, &shader->uniforms) { + if (!glsl_type_is_image(var->interface_type)) + continue; + + enum glsl_sampler_dim dim = glsl_get_sampler_dim(var->interface_type); + + const uint32_t set = var->data.descriptor_set; + const uint32_t binding = var->data.binding; + const uint32_t array_size = + layout->set[set].layout->binding[binding].array_size; + + if (!BITSET_TEST(state.set[set].used, binding)) + continue; + + struct anv_pipeline_binding *pipe_binding = + &map->surface_to_descriptor[state.set[set].surface_offsets[binding]]; + for (unsigned i = 0; i < array_size; i++) { + assert(pipe_binding[i].set == set); + assert(pipe_binding[i].binding == binding); + assert(pipe_binding[i].index == i); + + if (dim == GLSL_SAMPLER_DIM_SUBPASS || + dim == GLSL_SAMPLER_DIM_SUBPASS_MS) + pipe_binding[i].input_attachment_index = var->data.index + i; + + pipe_binding[i].write_only = var->data.image.write_only; + } + } + nir_foreach_function(function, shader) { if (!function->impl) continue;