+ return 0;
+}
+
+/* If one side of a divergent IF ends in a branch and the other doesn't, we
+ * might have to emit the contents of the side without the branch at the merge
+ * block instead. This is so that we can use any SGPR live-out of the side
+ * without the branch without creating a linear phi in the invert or merge block. */
+bool
+sanitize_if(nir_function_impl *impl, nir_if *nif)
+{
+ //TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
+
+ nir_block *then_block = nir_if_last_then_block(nif);
+ nir_block *else_block = nir_if_last_else_block(nif);
+ bool then_jump = nir_block_ends_in_jump(then_block) || nir_block_is_unreachable(then_block);
+ bool else_jump = nir_block_ends_in_jump(else_block) || nir_block_is_unreachable(else_block);
+ if (then_jump == else_jump)
+ return false;
+
+ /* If the continue from block is empty then return as there is nothing to
+ * move.
+ */
+ if (nir_cf_list_is_empty_block(else_jump ? &nif->then_list : &nif->else_list))
+ return false;
+
+ /* Even though this if statement has a jump on one side, we may still have
+ * phis afterwards. Single-source phis can be produced by loop unrolling
+ * or dead control-flow passes and are perfectly legal. Run a quick phi
+ * removal on the block after the if to clean up any such phis.
+ */
+ nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
+
+ /* Finally, move the continue from branch after the if-statement. */
+ nir_block *last_continue_from_blk = else_jump ? then_block : else_block;
+ nir_block *first_continue_from_blk = else_jump ?
+ nir_if_first_then_block(nif) : nir_if_first_else_block(nif);
+
+ nir_cf_list tmp;
+ nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
+ nir_after_block(last_continue_from_blk));
+ nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
+
+ /* nir_cf_extract() invalidates dominance metadata, but it should still be
+ * correct because of the specific type of transformation we did. Block
+ * indices are not valid except for block_0's, which is all we care about for
+ * nir_block_is_unreachable(). */
+ impl->valid_metadata =
+ (nir_metadata)(impl->valid_metadata | nir_metadata_dominance | nir_metadata_block_index);
+
+ return true;
+}
+
+bool
+sanitize_cf_list(nir_function_impl *impl, struct exec_list *cf_list)
+{
+ bool progress = false;
+ foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
+ switch (cf_node->type) {
+ case nir_cf_node_block:
+ break;
+ case nir_cf_node_if: {
+ nir_if *nif = nir_cf_node_as_if(cf_node);
+ progress |= sanitize_cf_list(impl, &nif->then_list);
+ progress |= sanitize_cf_list(impl, &nif->else_list);
+ progress |= sanitize_if(impl, nif);
+ break;
+ }
+ case nir_cf_node_loop: {
+ nir_loop *loop = nir_cf_node_as_loop(cf_node);
+ progress |= sanitize_cf_list(impl, &loop->body);
+ break;
+ }
+ case nir_cf_node_function:
+ unreachable("Invalid cf type");
+ }
+ }
+
+ return progress;
+}
+
+void get_buffer_resource_flags(isel_context *ctx, nir_ssa_def *def, unsigned access,
+ uint8_t **flags, uint32_t *count)
+{
+ int desc_set = -1;
+ unsigned binding = 0;
+
+ if (!def) {
+ /* global resources are considered aliasing with all other buffers and
+ * buffer images */
+ // TODO: only merge flags of resources which can really alias.
+ } else if (def->parent_instr->type == nir_instr_type_intrinsic) {
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
+ if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
+ desc_set = nir_intrinsic_desc_set(intrin);
+ binding = nir_intrinsic_binding(intrin);
+ }
+ } else if (def->parent_instr->type == nir_instr_type_deref) {
+ nir_deref_instr *deref = nir_instr_as_deref(def->parent_instr);
+ assert(deref->type->is_image());
+ if (deref->type->sampler_dimensionality != GLSL_SAMPLER_DIM_BUF) {
+ *flags = NULL;
+ *count = 0;
+ return;
+ }
+
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+ desc_set = var->data.descriptor_set;
+ binding = var->data.binding;
+ }
+
+ if (desc_set < 0) {
+ *flags = ctx->buffer_resource_flags.data();
+ *count = ctx->buffer_resource_flags.size();
+ return;
+ }
+
+ unsigned set_offset = ctx->resource_flag_offsets[desc_set];
+
+ if (!(ctx->buffer_resource_flags[set_offset + binding] & buffer_is_restrict)) {
+ /* Non-restrict buffers alias only with other non-restrict buffers.
+ * We reserve flags[0] for these. */
+ *flags = ctx->buffer_resource_flags.data();
+ *count = 1;
+ return;
+ }
+
+ *flags = ctx->buffer_resource_flags.data() + set_offset + binding;
+ *count = 1;
+}
+
+uint8_t get_all_buffer_resource_flags(isel_context *ctx, nir_ssa_def *def, unsigned access)
+{
+ uint8_t *flags;
+ uint32_t count;
+ get_buffer_resource_flags(ctx, def, access, &flags, &count);
+
+ uint8_t res = 0;
+ for (unsigned i = 0; i < count; i++)
+ res |= flags[i];
+ return res;
+}
+
+bool can_subdword_ssbo_store_use_smem(nir_intrinsic_instr *intrin)
+{
+ unsigned wrmask = nir_intrinsic_write_mask(intrin);
+ if (util_last_bit(wrmask) != util_bitcount(wrmask) ||
+ util_bitcount(wrmask) * intrin->src[0].ssa->bit_size % 32 ||
+ util_bitcount(wrmask) != intrin->src[0].ssa->num_components)
+ return false;
+
+ if (nir_intrinsic_align_mul(intrin) % 4 || nir_intrinsic_align_offset(intrin) % 4)
+ return false;
+
+ return true;
+}
+
+void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
+{
+ radv_pipeline_layout *pipeline_layout = ctx->options->layout;
+
+ unsigned resource_flag_count = 1; /* +1 to reserve flags[0] for aliased resources */
+ for (unsigned i = 0; i < pipeline_layout->num_sets; i++) {
+ radv_descriptor_set_layout *layout = pipeline_layout->set[i].layout;
+ ctx->resource_flag_offsets[i] = resource_flag_count;
+ resource_flag_count += layout->binding_count;
+ }
+ ctx->buffer_resource_flags = std::vector<uint8_t>(resource_flag_count);
+
+ nir_foreach_variable_with_modes(var, impl->function->shader, nir_var_mem_ssbo) {
+ if (var->data.access & ACCESS_RESTRICT) {
+ uint32_t offset = ctx->resource_flag_offsets[var->data.descriptor_set];
+ ctx->buffer_resource_flags[offset + var->data.binding] |= buffer_is_restrict;
+ }
+ }
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ if (!(nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_ACCESS]))
+ continue;
+
+ nir_ssa_def *res = NULL;
+ unsigned access = nir_intrinsic_access(intrin);
+ unsigned flags = 0;
+ bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_ssbo: {
+ if (nir_dest_is_divergent(intrin->dest) && (!glc || ctx->program->chip_class >= GFX8))
+ flags |= glc ? has_glc_vmem_load : has_nonglc_vmem_load;
+ res = intrin->src[0].ssa;
+ break;
+ }
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ flags |= has_glc_vmem_load | has_glc_vmem_store;
+ res = intrin->src[0].ssa;
+ break;
+ case nir_intrinsic_store_ssbo:
+ if (nir_src_is_divergent(intrin->src[2]) ||
+ ctx->program->chip_class < GFX8 || ctx->program->chip_class >= GFX10_3 ||
+ (intrin->src[0].ssa->bit_size < 32 && !can_subdword_ssbo_store_use_smem(intrin)))
+ flags |= glc ? has_glc_vmem_store : has_nonglc_vmem_store;
+ res = intrin->src[1].ssa;
+ break;
+ case nir_intrinsic_load_global:
+ if (!(access & ACCESS_NON_WRITEABLE))
+ flags |= glc ? has_glc_vmem_load : has_nonglc_vmem_load;
+ break;
+ case nir_intrinsic_store_global:
+ flags |= glc ? has_glc_vmem_store : has_nonglc_vmem_store;
+ break;
+ case nir_intrinsic_global_atomic_add:
+ case nir_intrinsic_global_atomic_imin:
+ case nir_intrinsic_global_atomic_umin:
+ case nir_intrinsic_global_atomic_imax:
+ case nir_intrinsic_global_atomic_umax:
+ case nir_intrinsic_global_atomic_and:
+ case nir_intrinsic_global_atomic_or:
+ case nir_intrinsic_global_atomic_xor:
+ case nir_intrinsic_global_atomic_exchange:
+ case nir_intrinsic_global_atomic_comp_swap:
+ flags |= has_glc_vmem_load | has_glc_vmem_store;
+ break;
+ case nir_intrinsic_image_deref_load:
+ res = intrin->src[0].ssa;
+ flags |= glc ? has_glc_vmem_load : has_nonglc_vmem_load;
+ break;
+ case nir_intrinsic_image_deref_store:
+ res = intrin->src[0].ssa;
+ flags |= (glc || ctx->program->chip_class == GFX6) ? has_glc_vmem_store : has_nonglc_vmem_store;
+ break;
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ res = intrin->src[0].ssa;
+ flags |= has_glc_vmem_load | has_glc_vmem_store;
+ break;
+ default:
+ continue;
+ }
+
+ uint8_t *flags_ptr;
+ uint32_t count;
+ get_buffer_resource_flags(ctx, res, access, &flags_ptr, &count);
+
+ for (unsigned i = 0; i < count; i++)
+ flags_ptr[i] |= flags;
+ }
+ }
+}
+
+void apply_nuw_to_ssa(nir_shader *shader, struct hash_table *range_ht, nir_ssa_def *ssa,
+ const nir_unsigned_upper_bound_config *config)
+{
+ nir_ssa_scalar scalar;
+ scalar.def = ssa;
+ scalar.comp = 0;
+
+ if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
+ return;
+
+ nir_alu_instr *add = nir_instr_as_alu(ssa->parent_instr);
+
+ if (add->no_unsigned_wrap)
+ return;
+
+ nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
+ nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
+
+ if (nir_ssa_scalar_is_const(src0)) {
+ nir_ssa_scalar tmp = src0;
+ src0 = src1;
+ src1 = tmp;
+ }
+
+ uint32_t src1_ub = nir_unsigned_upper_bound(shader, range_ht, src1, config);
+ add->no_unsigned_wrap = !nir_addition_might_overflow(shader, range_ht, src0, src1_ub, config);
+}
+
+void apply_nuw_to_offsets(isel_context *ctx, nir_function_impl *impl)
+{
+ nir_unsigned_upper_bound_config config;
+ config.min_subgroup_size = 64;
+ config.max_subgroup_size = 64;
+ if (ctx->shader->info.stage == MESA_SHADER_COMPUTE && ctx->options->key.cs.subgroup_size) {
+ config.min_subgroup_size = ctx->options->key.cs.subgroup_size;
+ config.max_subgroup_size = ctx->options->key.cs.subgroup_size;
+ }
+ config.max_work_group_invocations = 2048;
+ config.max_work_group_count[0] = 65535;
+ config.max_work_group_count[1] = 65535;
+ config.max_work_group_count[2] = 65535;
+ config.max_work_group_size[0] = 2048;
+ config.max_work_group_size[1] = 2048;
+ config.max_work_group_size[2] = 2048;
+ for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; i++) {
+ unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[i];
+ unsigned dfmt = attrib_format & 0xf;
+ unsigned nfmt = (attrib_format >> 4) & 0x7;
+
+ uint32_t max = UINT32_MAX;
+ if (nfmt == V_008F0C_BUF_NUM_FORMAT_UNORM) {
+ max = 0x3f800000u;
+ } else if (nfmt == V_008F0C_BUF_NUM_FORMAT_UINT ||
+ nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED) {
+ bool uscaled = nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED;
+ switch (dfmt) {
+ case V_008F0C_BUF_DATA_FORMAT_8:
+ case V_008F0C_BUF_DATA_FORMAT_8_8:
+ case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
+ max = uscaled ? 0x437f0000u : UINT8_MAX;
+ break;
+ case V_008F0C_BUF_DATA_FORMAT_10_10_10_2:
+ case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
+ max = uscaled ? 0x447fc000u : 1023;
+ break;
+ case V_008F0C_BUF_DATA_FORMAT_10_11_11:
+ case V_008F0C_BUF_DATA_FORMAT_11_11_10:
+ max = uscaled ? 0x44ffe000u : 2047;
+ break;
+ case V_008F0C_BUF_DATA_FORMAT_16:
+ case V_008F0C_BUF_DATA_FORMAT_16_16:
+ case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
+ max = uscaled ? 0x477fff00u : UINT16_MAX;
+ break;
+ case V_008F0C_BUF_DATA_FORMAT_32:
+ case V_008F0C_BUF_DATA_FORMAT_32_32:
+ case V_008F0C_BUF_DATA_FORMAT_32_32_32:
+ case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
+ max = uscaled ? 0x4f800000u : UINT32_MAX;
+ break;
+ }
+ }
+ config.vertex_attrib_max[i] = max;
+ }
+
+ struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_constant:
+ case nir_intrinsic_load_uniform:
+ case nir_intrinsic_load_push_constant:
+ if (!nir_src_is_divergent(intrin->src[0]))
+ apply_nuw_to_ssa(ctx->shader, range_ht, intrin->src[0].ssa, &config);
+ break;
+ case nir_intrinsic_load_ubo:
+ case nir_intrinsic_load_ssbo:
+ if (!nir_src_is_divergent(intrin->src[1]))
+ apply_nuw_to_ssa(ctx->shader, range_ht, intrin->src[1].ssa, &config);
+ break;
+ case nir_intrinsic_store_ssbo:
+ if (!nir_src_is_divergent(intrin->src[2]))
+ apply_nuw_to_ssa(ctx->shader, range_ht, intrin->src[2].ssa, &config);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ _mesa_hash_table_destroy(range_ht, NULL);
+}
+
+RegClass get_reg_class(isel_context *ctx, RegType type, unsigned components, unsigned bitsize)
+{
+ if (bitsize == 1)
+ return RegClass(RegType::sgpr, ctx->program->lane_mask.size() * components);
+ else
+ return RegClass::get(type, components * bitsize / 8u);