* creating duplicate variants..
*/
- if (ir3_key_lowers_nir(&so->key)) {
- nir_shader *s = nir_shader_clone(ctx, so->shader->nir);
- ctx->s = ir3_optimize_nir(so->shader, s, &so->key);
- } else {
- /* fast-path for shader key that lowers nothing in NIR: */
- ctx->s = nir_shader_clone(ctx, so->shader->nir);
- }
+ ctx->s = nir_shader_clone(ctx, so->shader->nir);
+ if (ir3_key_lowers_nir(&so->key))
+ ir3_optimize_nir(so->shader, ctx->s, &so->key);
/* this needs to be the last pass run, so do this here instead of
* in ir3_optimize_nir():
*/
- NIR_PASS_V(ctx->s, nir_lower_bool_to_int32);
- NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
- NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
+ NIR_PASS_V(ctx->s, nir_lower_bool_to_bitsize);
+ bool progress = false;
+ NIR_PASS(progress, ctx->s, nir_lower_locals_to_regs);
+
+ /* we could need cleanup after lower_locals_to_regs */
+ while (progress) {
+ progress = false;
+ NIR_PASS(progress, ctx->s, nir_opt_algebraic);
+ NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
+ }
- if (ir3_shader_debug & IR3_DBG_DISASM) {
- DBG("dump nir%dv%d: type=%d, k={cts=%u,hp=%u}",
- so->shader->id, so->id, so->type,
- so->key.color_two_side, so->key.half_precision);
- nir_print_shader(ctx->s, stdout);
+ /* We want to lower nir_op_imul as late as possible, to catch also
+ * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
+ * However, we want a final swing of a few passes to have a chance
+ * at optimizing the result.
+ */
+ progress = false;
+ NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
+ while (progress) {
+ progress = false;
+ NIR_PASS(progress, ctx->s, nir_opt_algebraic);
+ NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
+ NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
+ NIR_PASS(progress, ctx->s, nir_opt_dce);
+ NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
}
+ /* Enable the texture pre-fetch feature only a4xx onwards. But
+ * only enable it on generations that have been tested:
+ */
+ if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
+ NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
+
+ NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
+
if (shader_debug_enabled(so->type)) {
- fprintf(stderr, "NIR (final form) for %s shader:\n",
- _mesa_shader_stage_to_string(so->type));
- nir_print_shader(ctx->s, stderr);
+ fprintf(stdout, "NIR (final form) for %s shader %s:\n",
+ ir3_shader_stage(so), so->shader->nir->info.name);
+ nir_print_shader(ctx->s, stdout);
}
ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
ralloc_array(ctx, struct ir3_instruction *, num_components);
if (src->reg.indirect)
- addr = ir3_get_addr(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
+ addr = ir3_get_addr0(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
reg->num_components);
for (unsigned i = 0; i < num_components; i++) {
for (unsigned i = 0; i < ctx->last_dst_n; i++) {
struct ir3_instruction *dst = ctx->last_dst[i];
dst->regs[0]->flags |= IR3_REG_HALF;
- if (ctx->last_dst[i]->opc == OPC_META_FO)
+ if (ctx->last_dst[i]->opc == OPC_META_SPLIT)
dst->regs[1]->instr->regs[0]->flags |= IR3_REG_HALF;
}
}
struct ir3_instruction *addr = NULL;
if (dst->reg.indirect)
- addr = ir3_get_addr(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
+ addr = ir3_get_addr0(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
reg->num_components);
for (unsigned i = 0; i < num_components; i++) {
ctx->last_dst_n = 0;
}
+static unsigned
+dest_flags(struct ir3_instruction *instr)
+{
+ return instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
+}
+
struct ir3_instruction *
ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
unsigned arrsz)
if (arrsz == 0)
return NULL;
- unsigned flags = arr[0]->regs[0]->flags & IR3_REG_HALF;
+ unsigned flags = dest_flags(arr[0]);
- collect = ir3_instr_create2(block, OPC_META_FI, 1 + arrsz);
- ir3_reg_create(collect, 0, flags); /* dst */
+ collect = ir3_instr_create2(block, OPC_META_COLLECT, 1 + arrsz);
+ __ssa_dst(collect)->flags |= flags;
for (unsigned i = 0; i < arrsz; i++) {
struct ir3_instruction *elem = arr[i];
elem = ir3_MOV(block, elem, type);
}
- compile_assert(ctx, (elem->regs[0]->flags & IR3_REG_HALF) == flags);
- ir3_reg_create(collect, 0, IR3_REG_SSA | flags)->instr = elem;
+ compile_assert(ctx, dest_flags(elem) == flags);
+ __ssa_src(collect, elem, flags);
}
collect->regs[0]->wrmask = MASK(arrsz);
}
/* helper for instructions that produce multiple consecutive scalar
- * outputs which need to have a split/fanout meta instruction inserted
+ * outputs which need to have a split meta instruction inserted
*/
void
ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
return;
}
- unsigned flags = src->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
+ if (src->opc == OPC_META_COLLECT) {
+ debug_assert((base + n) < src->regs_count);
+
+ for (int i = 0; i < n; i++) {
+ dst[i] = ssa(src->regs[i + base + 1]);
+ }
+
+ return;
+ }
+
+ unsigned flags = dest_flags(src);
for (int i = 0, j = 0; i < n; i++) {
- struct ir3_instruction *split = ir3_instr_create(block, OPC_META_FO);
- ir3_reg_create(split, 0, IR3_REG_SSA | flags);
- ir3_reg_create(split, 0, IR3_REG_SSA | flags)->instr = src;
- split->fo.off = i + base;
+ struct ir3_instruction *split =
+ ir3_instr_create(block, OPC_META_SPLIT);
+ __ssa_dst(split)->flags |= flags;
+ __ssa_src(split, src, flags);
+ split->split.off = i + base;
if (prev) {
split->cp.left = prev;
}
static struct ir3_instruction *
-create_addr(struct ir3_block *block, struct ir3_instruction *src, int align)
+create_addr0(struct ir3_block *block, struct ir3_instruction *src, int align)
{
struct ir3_instruction *instr, *immed;
instr = ir3_MOV(block, instr, TYPE_S16);
instr->regs[0]->num = regid(REG_A0, 0);
+ instr->regs[0]->flags &= ~IR3_REG_SSA;
instr->regs[0]->flags |= IR3_REG_HALF;
instr->regs[1]->flags |= IR3_REG_HALF;
return instr;
}
+static struct ir3_instruction *
+create_addr1(struct ir3_block *block, unsigned const_val)
+{
+
+ struct ir3_instruction *immed = create_immed(block, const_val);
+ struct ir3_instruction *instr = ir3_MOV(block, immed, TYPE_S16);
+ instr->regs[0]->num = regid(REG_A0, 1);
+ instr->regs[0]->flags &= ~IR3_REG_SSA;
+ instr->regs[0]->flags |= IR3_REG_HALF;
+ instr->regs[1]->flags |= IR3_REG_HALF;
+ return instr;
+}
+
/* caches addr values to avoid generating multiple cov/shl/mova
* sequences for each use of a given NIR level src as address
*/
struct ir3_instruction *
-ir3_get_addr(struct ir3_context *ctx, struct ir3_instruction *src, int align)
+ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
{
struct ir3_instruction *addr;
unsigned idx = align - 1;
- compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr_ht));
+ compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
- if (!ctx->addr_ht[idx]) {
- ctx->addr_ht[idx] = _mesa_hash_table_create(ctx,
+ if (!ctx->addr0_ht[idx]) {
+ ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx,
_mesa_hash_pointer, _mesa_key_pointer_equal);
} else {
struct hash_entry *entry;
- entry = _mesa_hash_table_search(ctx->addr_ht[idx], src);
+ entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
if (entry)
return entry->data;
}
- addr = create_addr(ctx->block, src, align);
- _mesa_hash_table_insert(ctx->addr_ht[idx], src, addr);
+ addr = create_addr0(ctx->block, src, align);
+ _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
+
+ return addr;
+}
+
+/* Similar to ir3_get_addr0, but for a1.x. */
+struct ir3_instruction *
+ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
+{
+ struct ir3_instruction *addr;
+
+ if (!ctx->addr1_ht) {
+ ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
+ } else {
+ addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
+ if (addr)
+ return addr;
+ }
+
+ addr = create_addr1(ctx->block, const_val);
+ _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
return addr;
}
/* condition always goes in predicate register: */
cond->regs[0]->num = regid(REG_P0, 0);
+ cond->regs[0]->flags &= ~IR3_REG_SSA;
return cond;
}
struct ir3_array *
ir3_get_array(struct ir3_context *ctx, nir_register *reg)
{
- list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
+ foreach_array (arr, &ctx->ir->array_list) {
if (arr->r == reg)
return arr;
}
mov->cat1.src_type = TYPE_U16;
mov->cat1.dst_type = TYPE_U16;
flags |= IR3_REG_HALF;
+ arr->half = true;
} else {
mov->cat1.src_type = TYPE_U32;
mov->cat1.dst_type = TYPE_U32;
mov->barrier_class = IR3_BARRIER_ARRAY_R;
mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
- ir3_reg_create(mov, 0, flags);
+ __ssa_dst(mov)->flags |= flags;
src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
COND(address, IR3_REG_RELATIV) | flags);
src->instr = arr->last_write;
/* if not relative store, don't create an extra mov, since that
* ends up being difficult for cp to remove.
+ *
+ * Also, don't skip the mov if the src is meta (like fanout/split),
+ * since that creates a situation that RA can't really handle properly.
*/
- if (!address) {
+ if (!address && !is_meta(src)) {
dst = src->regs[0];
src->barrier_class |= IR3_BARRIER_ARRAY_W;