X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Ffreedreno%2Fir3%2Fir3_compiler_nir.c;h=5f55596bb6382bf07b2788a0dc50ed13d30d43c5;hb=0e51082cfa733b3b8255bbd77fc4af46f4108c1d;hp=2d3668804da19147cc7625a5aa9e3fdd2dc9c12b;hpb=7b2166785a98a4fa8492fe1f99d1707919945a8e;p=mesa.git diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index 2d3668804da..5f55596bb63 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -62,7 +62,7 @@ create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n, } static struct ir3_instruction * -create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask) +create_input(struct ir3_context *ctx, unsigned compmask) { struct ir3_instruction *in; @@ -70,13 +70,9 @@ create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask) in->input.sysval = ~0; __ssa_dst(in)->wrmask = compmask; - return in; -} + array_insert(ctx->ir, ctx->ir->inputs, in); -static struct ir3_instruction * -create_input(struct ir3_context *ctx, unsigned n) -{ - return create_input_compmask(ctx, n, 0x1); + return in; } static struct ir3_instruction * @@ -111,48 +107,13 @@ create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp) } /* - * Adreno uses uint rather than having dedicated bool type, - * which (potentially) requires some conversion, in particular - * when using output of an bool instr to int input, or visa - * versa. - * - * | Adreno | NIR | - * -------+---------+-------+- - * true | 1 | ~0 | - * false | 0 | 0 | - * - * To convert from an adreno bool (uint) to nir, use: - * - * absneg.s dst, (neg)src - * - * To convert back in the other direction: - * - * absneg.s dst, (abs)arc - * - * The CP step can clean up the absneg.s that cancel each other - * out, and with a slight bit of extra cleverness (to recognize - * the instructions which produce either a 0 or 1) can eliminate - * the absneg.s's completely when an instruction that wants - * 0/1 consumes the result. For example, when a nir 'bcsel' - * consumes the result of 'feq'. So we should be able to get by - * without a boolean resolve step, and without incuring any - * extra penalty in instruction count. + * Adreno's comparisons produce a 1 for true and 0 for false, in either 16 or + * 32-bit registers. We use NIR's 1-bit integers to represent bools, and + * trust that we will only see and/or/xor on those 1-bit values, so we can + * safely store NIR i1s in a 32-bit reg while always containing either a 1 or + * 0. */ -/* NIR bool -> native (adreno): */ -static struct ir3_instruction * -ir3_b2n(struct ir3_block *block, struct ir3_instruction *instr) -{ - return ir3_ABSNEG_S(block, instr, IR3_REG_SABS); -} - -/* native (adreno) -> NIR bool: */ -static struct ir3_instruction * -ir3_n2b(struct ir3_block *block, struct ir3_instruction *instr) -{ - return ir3_ABSNEG_S(block, instr, IR3_REG_SNEG); -} - /* * alu/sfu instructions: */ @@ -226,6 +187,14 @@ create_cov(struct ir3_context *ctx, struct ir3_instruction *src, } break; + case nir_op_b2f16: + case nir_op_b2f32: + case nir_op_b2i8: + case nir_op_b2i16: + case nir_op_b2i32: + src_type = TYPE_U32; + break; + default: ir3_context_error(ctx, "invalid conversion op: %u", op); } @@ -234,30 +203,34 @@ create_cov(struct ir3_context *ctx, struct ir3_instruction *src, case nir_op_f2f32: case nir_op_i2f32: case nir_op_u2f32: + case nir_op_b2f32: dst_type = TYPE_F32; break; case nir_op_f2f16_rtne: case nir_op_f2f16_rtz: case nir_op_f2f16: - /* TODO how to handle rounding mode? */ case nir_op_i2f16: case nir_op_u2f16: + case nir_op_b2f16: dst_type = TYPE_F16; break; case nir_op_f2i32: case nir_op_i2i32: + case nir_op_b2i32: dst_type = TYPE_S32; break; case nir_op_f2i16: case nir_op_i2i16: + case nir_op_b2i16: dst_type = TYPE_S16; break; case nir_op_f2i8: case nir_op_i2i8: + case nir_op_b2i8: dst_type = TYPE_S8; break; @@ -280,7 +253,16 @@ create_cov(struct ir3_context *ctx, struct ir3_instruction *src, ir3_context_error(ctx, "invalid conversion op: %u", op); } - return ir3_COV(ctx->block, src, src_type, dst_type); + if (src_type == dst_type) + return src; + + struct ir3_instruction *cov = + ir3_COV(ctx->block, src, src_type, dst_type); + + if (op == nir_op_f2f16_rtne) + cov->regs[0]->flags |= IR3_REG_EVEN; + + return cov; } static void @@ -291,7 +273,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) unsigned bs[info->num_inputs]; /* bit size */ struct ir3_block *b = ctx->block; unsigned dst_sz, wrmask; - type_t dst_type = nir_dest_bit_size(alu->dest.dest) < 32 ? + type_t dst_type = nir_dest_bit_size(alu->dest.dest) == 16 ? TYPE_U16 : TYPE_U32; if (alu->dest.dest.is_ssa) { @@ -382,33 +364,52 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) case nir_op_u2u32: case nir_op_u2u16: case nir_op_u2u8: + case nir_op_b2f16: + case nir_op_b2f32: + case nir_op_b2i8: + case nir_op_b2i16: + case nir_op_b2i32: dst[0] = create_cov(ctx, src[0], bs[0], alu->op); break; + case nir_op_fquantize2f16: dst[0] = create_cov(ctx, create_cov(ctx, src[0], 32, nir_op_f2f16), 16, nir_op_f2f32); break; - case nir_op_f2b32: - dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0); + case nir_op_f2b1: + dst[0] = ir3_CMPS_F(b, + src[0], 0, + create_immed_typed(b, 0, bs[0] == 16 ? TYPE_F16 : TYPE_F32), 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); - break; - case nir_op_b2f16: - dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16); - break; - case nir_op_b2f32: - dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32); - break; - case nir_op_b2i8: - case nir_op_b2i16: - case nir_op_b2i32: - dst[0] = ir3_b2n(b, src[0]); break; - case nir_op_i2b32: + + case nir_op_i2b1: + /* i2b1 will appear when translating from nir_load_ubo or + * nir_intrinsic_load_ssbo, where any non-zero value is true. + */ dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); + break; + + case nir_op_b2b1: + /* b2b1 will appear when translating from + * + * - nir_intrinsic_load_shared of a 32-bit 0/~0 value. + * - nir_intrinsic_load_constant of a 32-bit 0/~0 value + * + * A negate can turn those into a 1 or 0 for us. + */ + dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG); + break; + + case nir_op_b2b32: + /* b2b32 will appear when converting our 1-bit bools to a store_shared + * argument. + * + * A negate can turn those into a ~0 for us. + */ + dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG); break; case nir_op_fneg: @@ -461,31 +462,35 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) dst[0] = ir3_DSX(b, src[0], 0); dst[0]->cat5.type = TYPE_F32; break; + case nir_op_fddx_fine: + dst[0] = ir3_DSXPP_1(b, src[0], 0); + dst[0]->cat5.type = TYPE_F32; + break; case nir_op_fddy: case nir_op_fddy_coarse: dst[0] = ir3_DSY(b, src[0], 0); dst[0]->cat5.type = TYPE_F32; break; break; - case nir_op_flt32: + case nir_op_fddy_fine: + dst[0] = ir3_DSYPP_1(b, src[0], 0); + dst[0]->cat5.type = TYPE_F32; + break; + case nir_op_flt: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_LT; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_fge32: + case nir_op_fge: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_GE; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_feq32: + case nir_op_feq: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_EQ; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_fne32: + case nir_op_fne: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); break; case nir_op_fceil: dst[0] = ir3_CEIL_F(b, src[0], 0); @@ -562,7 +567,11 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG); break; case nir_op_inot: - dst[0] = ir3_NOT_B(b, src[0], 0); + if (bs[0] == 1) { + dst[0] = ir3_SUB_U(b, create_immed(ctx->block, 1), 0, src[0], 0); + } else { + dst[0] = ir3_NOT_B(b, src[0], 0); + } break; case nir_op_ior: dst[0] = ir3_OR_B(b, src[0], 0, src[1], 0); @@ -582,47 +591,63 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) case nir_op_ushr: dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0); break; - case nir_op_ilt32: + case nir_op_ilt: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_LT; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_ige32: + case nir_op_ige: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_GE; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_ieq32: + case nir_op_ieq: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_EQ; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_ine32: + case nir_op_ine: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_ult32: + case nir_op_ult: dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_LT; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_uge32: + case nir_op_uge: dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_GE; - dst[0] = ir3_n2b(b, dst[0]); break; - case nir_op_b32csel: { - struct ir3_instruction *cond = ir3_b2n(b, src[0]); + case nir_op_bcsel: { + struct ir3_instruction *cond = src[0]; + + /* If src[0] is a negation (likely as a result of an ir3_b2n(cond)), + * we can ignore that and use original cond, since the nonzero-ness of + * cond stays the same. + */ + if (cond->opc == OPC_ABSNEG_S && + cond->flags == 0 && + (cond->regs[1]->flags & (IR3_REG_SNEG | IR3_REG_SABS)) == IR3_REG_SNEG) { + cond = cond->regs[1]->instr; + } + compile_assert(ctx, bs[1] == bs[2]); - /* the boolean condition is 32b even if src[1] and src[2] are - * half-precision, but sel.b16 wants all three src's to be the - * same type. + /* The condition's size has to match the other two arguments' size, so + * convert down if necessary. */ - if (bs[1] < 32) - cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16); - dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0); + if (bs[1] == 16) { + struct hash_entry *prev_entry = + _mesa_hash_table_search(ctx->sel_cond_conversions, src[0]); + if (prev_entry) { + cond = prev_entry->data; + } else { + cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16); + _mesa_hash_table_insert(ctx->sel_cond_conversions, src[0], cond); + } + } + + if (bs[1] != 16) + dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0); + else + dst[0] = ir3_SEL_B16(b, src[1], 0, cond, 0, src[2], 0); break; } case nir_op_bit_count: { @@ -679,9 +704,55 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) break; } + if (nir_alu_type_get_base_type(info->output_type) == nir_type_bool) { + assert(nir_dest_bit_size(alu->dest.dest) == 1 || + alu->op == nir_op_b2b32); + assert(dst_sz == 1); + } else { + /* 1-bit values stored in 32-bit registers are only valid for certain + * ALU ops. + */ + switch (alu->op) { + case nir_op_iand: + case nir_op_ior: + case nir_op_ixor: + case nir_op_inot: + case nir_op_bcsel: + break; + default: + compile_assert(ctx, nir_dest_bit_size(alu->dest.dest) != 1); + } + } + ir3_put_dst(ctx, &alu->dest.dest); } +static void +emit_intrinsic_load_ubo_ldc(struct ir3_context *ctx, nir_intrinsic_instr *intr, + struct ir3_instruction **dst) +{ + struct ir3_block *b = ctx->block; + + unsigned ncomp = intr->num_components; + struct ir3_instruction *offset = ir3_get_src(ctx, &intr->src[1])[0]; + struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[0])[0]; + struct ir3_instruction *ldc = ir3_LDC(b, idx, 0, offset, 0); + ldc->regs[0]->wrmask = MASK(ncomp); + ldc->cat6.iim_val = ncomp; + ldc->cat6.d = nir_intrinsic_base(intr); + ldc->cat6.type = TYPE_U32; + + nir_intrinsic_instr *bindless = ir3_bindless_resource(intr->src[0]); + if (bindless) { + ldc->flags |= IR3_INSTR_B; + ldc->cat6.base = nir_intrinsic_desc_set(bindless); + ctx->so->bindless_ubo = true; + } + + ir3_split_dest(b, dst, ldc, 0, ncomp); +} + + /* handles direct/indirect UBO reads: */ static void emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr, @@ -705,8 +776,8 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr, base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz)); base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1); } else { - base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, ptrsz)); - base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, ptrsz)); + base_lo = create_uniform_indirect(b, ubo, ir3_get_addr0(ctx, src0, ptrsz)); + base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr0(ctx, src0, ptrsz)); /* NOTE: since relative addressing is used, make sure constlen is * at least big enough to cover all the UBO addresses, since the @@ -995,23 +1066,102 @@ emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr) return atomic; } +struct tex_src_info { + /* For prefetch */ + unsigned tex_base, samp_base, tex_idx, samp_idx; + /* For normal tex instructions */ + unsigned base, combined_idx, a1_val, flags; + struct ir3_instruction *samp_tex; +}; + /* TODO handle actual indirect/dynamic case.. which is going to be weird * to handle with the image_mapping table.. */ -static struct ir3_instruction * +static struct tex_src_info get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr) { - unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0])); - unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot); - struct ir3_instruction *texture, *sampler; + struct ir3_block *b = ctx->block; + struct tex_src_info info = { 0 }; + nir_intrinsic_instr *bindless_tex = ir3_bindless_resource(intr->src[0]); + ctx->so->bindless_tex = true; + + if (bindless_tex) { + /* Bindless case */ + info.flags |= IR3_INSTR_B; + + /* Gather information required to determine which encoding to + * choose as well as for prefetch. + */ + info.tex_base = nir_intrinsic_desc_set(bindless_tex); + bool tex_const = nir_src_is_const(bindless_tex->src[0]); + if (tex_const) + info.tex_idx = nir_src_as_uint(bindless_tex->src[0]); + info.samp_idx = 0; + + /* Choose encoding. */ + if (tex_const && info.tex_idx < 256) { + if (info.tex_idx < 16) { + /* Everything fits within the instruction */ + info.base = info.tex_base; + info.combined_idx = info.samp_idx | (info.tex_idx << 4); + } else { + info.base = info.tex_base; + info.a1_val = info.tex_idx << 3; + info.combined_idx = 0; + info.flags |= IR3_INSTR_A1EN; + } + info.samp_tex = NULL; + } else { + info.flags |= IR3_INSTR_S2EN; + info.base = info.tex_base; + + /* Note: the indirect source is now a vec2 instead of hvec2 */ + struct ir3_instruction *texture, *sampler; + + texture = ir3_get_src(ctx, &intr->src[0])[0]; + sampler = create_immed(b, 0); + info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + texture, + sampler, + }, 2); + } + } else { + info.flags |= IR3_INSTR_S2EN; + unsigned slot = nir_src_as_uint(intr->src[0]); + unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot); + struct ir3_instruction *texture, *sampler; - texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16); - sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16); + texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16); + sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16); + + info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + sampler, + texture, + }, 2); + } + + return info; +} - return ir3_create_collect(ctx, (struct ir3_instruction*[]){ - sampler, - texture, - }, 2); +static struct ir3_instruction * +emit_sam(struct ir3_context *ctx, opc_t opc, struct tex_src_info info, + type_t type, unsigned wrmask, struct ir3_instruction *src0, + struct ir3_instruction *src1) +{ + struct ir3_instruction *sam, *addr; + if (info.flags & IR3_INSTR_A1EN) { + addr = ir3_get_addr1(ctx, info.a1_val); + } + sam = ir3_SAM(ctx->block, opc, type, 0b1111, info.flags, + info.samp_tex, src0, src1); + if (info.flags & IR3_INSTR_A1EN) { + ir3_instr_set_address(sam, addr); + } + if (info.flags & IR3_INSTR_B) { + sam->cat5.tex_base = info.base; + sam->cat5.samp = info.combined_idx; + } + return sam; } /* src[] = { deref, coord, sample_index }. const_index[] = {} */ @@ -1020,13 +1170,12 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr, struct ir3_instruction **dst) { struct ir3_block *b = ctx->block; - const nir_variable *var = nir_intrinsic_get_var(intr, 0); - struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr); + struct tex_src_info info = get_image_samp_tex_src(ctx, intr); struct ir3_instruction *sam; struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]); struct ir3_instruction *coords[4]; - unsigned flags, ncoords = ir3_get_image_coords(var, &flags); - type_t type = ir3_get_image_type(var); + unsigned flags, ncoords = ir3_get_image_coords(intr, &flags); + type_t type = ir3_get_type_for_image_intrinsic(intr); /* hmm, this seems a bit odd, but it is what blob does and (at least * a5xx) just faults on bogus addresses otherwise: @@ -1035,6 +1184,7 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr, flags &= ~IR3_INSTR_3D; flags |= IR3_INSTR_A; } + info.flags |= flags; for (unsigned i = 0; i < ncoords; i++) coords[i] = src0[i]; @@ -1042,8 +1192,8 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr, if (ncoords == 1) coords[ncoords++] = create_immed(b, 0); - sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags, - samp_tex, ir3_create_collect(ctx, coords, ncoords), NULL); + sam = emit_sam(ctx, OPC_ISAM, info, type, 0b1111, + ir3_create_collect(ctx, coords, ncoords), NULL); sam->barrier_class = IR3_BARRIER_IMAGE_R; sam->barrier_conflict = IR3_BARRIER_IMAGE_W; @@ -1056,14 +1206,15 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, struct ir3_instruction **dst) { struct ir3_block *b = ctx->block; - const nir_variable *var = nir_intrinsic_get_var(intr, 0); - struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr); + struct tex_src_info info = get_image_samp_tex_src(ctx, intr); struct ir3_instruction *sam, *lod; - unsigned flags, ncoords = ir3_get_image_coords(var, &flags); + unsigned flags, ncoords = ir3_get_image_coords(intr, &flags); + type_t dst_type = nir_dest_bit_size(intr->dest) == 16 ? + TYPE_U16 : TYPE_U32; + info.flags |= flags; lod = create_immed(b, 0); - sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags, - samp_tex, lod, NULL); + sam = emit_sam(ctx, OPC_GETSIZE, info, dst_type, 0b1111, lod, NULL); /* Array size actually ends up in .w rather than .z. This doesn't * matter for miplevel 0, but for higher mips the value in z is @@ -1085,9 +1236,7 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, * * TODO: This is at least true on a5xx. Check other gens. */ - enum glsl_sampler_dim dim = - glsl_get_sampler_dim(glsl_without_array(var->type)); - if (dim == GLSL_SAMPLER_DIM_BUF) { + if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF) { /* Since all the possible values the divisor can take are * power-of-two (4, 8, or 16), the division is implemented * as a shift-right. @@ -1097,7 +1246,7 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, */ struct ir3_const_state *const_state = &ctx->so->shader->const_state; unsigned cb = regid(const_state->offsets.image_dims, 0) + - const_state->image_dims.off[var->data.driver_location]; + const_state->image_dims.off[nir_src_as_uint(intr->src[0])]; struct ir3_instruction *aux = create_uniform(b, cb + 1); tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0); @@ -1122,7 +1271,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr) struct ir3_instruction *barrier; switch (intr->intrinsic) { - case nir_intrinsic_barrier: + case nir_intrinsic_control_barrier: barrier = ir3_BAR(b); barrier->cat7.g = true; barrier->cat7.l = true; @@ -1141,7 +1290,6 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr) IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W | IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W; break; - case nir_intrinsic_memory_barrier_atomic_counter: case nir_intrinsic_memory_barrier_buffer: barrier = ir3_FENCE(b); barrier->cat7.g = true; @@ -1198,27 +1346,27 @@ static void add_sysval_input_compmask(struct ir3_context *ctx, struct ir3_instruction *instr) { struct ir3_shader_variant *so = ctx->so; - unsigned r = regid(so->inputs_count, 0); unsigned n = so->inputs_count++; assert(instr->opc == OPC_META_INPUT); + instr->input.inidx = n; instr->input.sysval = slot; so->inputs[n].sysval = true; so->inputs[n].slot = slot; so->inputs[n].compmask = compmask; - so->inputs[n].regid = r; so->inputs[n].interpolate = INTERP_MODE_FLAT; so->total_in++; - - ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1); - ctx->ir->inputs[r] = instr; } -static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot, - struct ir3_instruction *instr) +static struct ir3_instruction * +create_sysval_input(struct ir3_context *ctx, gl_system_value slot, + unsigned compmask) { - add_sysval_input_compmask(ctx, slot, 0x1, instr); + assert(compmask); + struct ir3_instruction *sysval = create_input(ctx, compmask); + add_sysval_input_compmask(ctx, slot, compmask, sysval); + return sysval; } static struct ir3_instruction * @@ -1228,14 +1376,10 @@ get_barycentric_centroid(struct ir3_context *ctx) struct ir3_instruction *xy[2]; struct ir3_instruction *ij; - ij = create_input_compmask(ctx, 0, 0x3); + ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID, 0x3); ir3_split_dest(ctx->block, xy, ij, 0, 2); ctx->ij_centroid = ir3_create_collect(ctx, xy, 2); - - add_sysval_input_compmask(ctx, - SYSTEM_VALUE_BARYCENTRIC_CENTROID, - 0x3, ij); } return ctx->ij_centroid; @@ -1248,14 +1392,10 @@ get_barycentric_sample(struct ir3_context *ctx) struct ir3_instruction *xy[2]; struct ir3_instruction *ij; - ij = create_input_compmask(ctx, 0, 0x3); + ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE, 0x3); ir3_split_dest(ctx->block, xy, ij, 0, 2); ctx->ij_sample = ir3_create_collect(ctx, xy, 2); - - add_sysval_input_compmask(ctx, - SYSTEM_VALUE_BARYCENTRIC_SAMPLE, - 0x3, ij); } return ctx->ij_sample; @@ -1274,12 +1414,12 @@ static struct ir3_instruction * get_frag_coord(struct ir3_context *ctx) { if (!ctx->frag_coord) { - struct ir3_block *b = ctx->block; + struct ir3_block *b = ctx->in_block; struct ir3_instruction *xyzw[4]; struct ir3_instruction *hw_frag_coord; - hw_frag_coord = create_input_compmask(ctx, 0, 0xf); - ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4); + hw_frag_coord = create_sysval_input(ctx, SYSTEM_VALUE_FRAG_COORD, 0xf); + ir3_split_dest(b, xyzw, hw_frag_coord, 0, 4); /* for frag_coord.xy, we get unsigned values.. we need * to subtract (integer) 8 and divide by 16 (right- @@ -1291,19 +1431,11 @@ get_frag_coord(struct ir3_context *ctx) * */ for (int i = 0; i < 2; i++) { - xyzw[i] = ir3_SUB_S(b, xyzw[i], 0, - create_immed(b, 8), 0); - xyzw[i] = ir3_SHR_B(b, xyzw[i], 0, - create_immed(b, 4), 0); xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32); + xyzw[i] = ir3_MUL_F(b, xyzw[i], 0, create_immed(b, fui(1.0 / 16.0)), 0); } ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4); - - add_sysval_input_compmask(ctx, - SYSTEM_VALUE_FRAG_COORD, - 0xf, hw_frag_coord); - ctx->so->frag_coord = true; } @@ -1336,13 +1468,13 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) idx += nir_src_as_uint(intr->src[0]); for (int i = 0; i < intr->num_components; i++) { dst[i] = create_uniform_typed(b, idx + i, - nir_dest_bit_size(intr->dest) < 32 ? TYPE_F16 : TYPE_F32); + nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32); } } else { src = ir3_get_src(ctx, &intr->src[0]); for (int i = 0; i < intr->num_components; i++) { dst[i] = create_uniform_indirect(b, idx + i, - ir3_get_addr(ctx, src[0], 1)); + ir3_get_addr0(ctx, src[0], 1)); } /* NOTE: if relative addressing is used, we set * constlen in the compiler (to worst-case value) @@ -1393,9 +1525,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_load_tess_coord: if (!ctx->tess_coord) { - ctx->tess_coord = create_input_compmask(ctx, 0, 0x3); - add_sysval_input_compmask(ctx, SYSTEM_VALUE_TESS_COORD, - 0x3, ctx->tess_coord); + ctx->tess_coord = + create_sysval_input(ctx, SYSTEM_VALUE_TESS_COORD, 0x3); } ir3_split_dest(b, dst, ctx->tess_coord, 0, 2); @@ -1405,7 +1536,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_end_patch_ir3: assert(ctx->so->type == MESA_SHADER_TESS_CTRL); - struct ir3_instruction *end = ir3_ENDPATCH(b); + struct ir3_instruction *end = ir3_ENDIF(b); array_insert(b, b->keeps, end); end->barrier_class = IR3_BARRIER_EVERYTHING; @@ -1464,6 +1595,9 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_load_ubo: emit_intrinsic_load_ubo(ctx, intr, dst); break; + case nir_intrinsic_load_ubo_ir3: + emit_intrinsic_load_ubo_ldc(ctx, intr, dst); + break; case nir_intrinsic_load_frag_coord: ir3_split_dest(b, dst, get_frag_coord(ctx), 0, 4); break; @@ -1482,10 +1616,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) } case nir_intrinsic_load_size_ir3: if (!ctx->ij_size) { - ctx->ij_size = create_input(ctx, 0); - - add_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE, - ctx->ij_size); + ctx->ij_size = + create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE, 0x1); } dst[0] = ctx->ij_size; break; @@ -1519,7 +1651,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) * that is easier than mapping things back to a * nir_variable to figure out what it is. */ - dst[i] = ctx->ir->inputs[inloc]; + dst[i] = ctx->inputs[inloc]; + compile_assert(ctx, dst[i]); } } } else { @@ -1533,17 +1666,17 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) idx += nir_src_as_uint(intr->src[0]); for (int i = 0; i < intr->num_components; i++) { unsigned n = idx * 4 + i + comp; - dst[i] = ctx->ir->inputs[n]; - compile_assert(ctx, ctx->ir->inputs[n]); + dst[i] = ctx->inputs[n]; + compile_assert(ctx, ctx->inputs[n]); } } else { src = ir3_get_src(ctx, &intr->src[0]); struct ir3_instruction *collect = - ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs); - struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4); + ir3_create_collect(ctx, ctx->ir->inputs, ctx->ninputs); + struct ir3_instruction *addr = ir3_get_addr0(ctx, src[0], 4); for (int i = 0; i < intr->num_components; i++) { unsigned n = idx * 4 + i + comp; - dst[i] = create_indirect_load(ctx, ctx->ir->ninputs, + dst[i] = create_indirect_load(ctx, ctx->ninputs, n, addr, collect); } } @@ -1597,37 +1730,56 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_shared_atomic_comp_swap: dst[0] = emit_intrinsic_atomic_shared(ctx, intr); break; - case nir_intrinsic_image_deref_load: + case nir_intrinsic_image_load: emit_intrinsic_load_image(ctx, intr, dst); break; - case nir_intrinsic_image_deref_store: + case nir_intrinsic_bindless_image_load: + /* Bindless uses the IBO state, which doesn't have swizzle filled out, + * so using isam doesn't work. + * + * TODO: can we use isam if we fill out more fields? + */ + ctx->funcs->emit_intrinsic_load_image(ctx, intr, dst); + break; + case nir_intrinsic_image_store: + case nir_intrinsic_bindless_image_store: if ((ctx->so->type == MESA_SHADER_FRAGMENT) && !ctx->s->info.fs.early_fragment_tests) ctx->so->no_earlyz = true; ctx->funcs->emit_intrinsic_store_image(ctx, intr); break; - case nir_intrinsic_image_deref_size: + case nir_intrinsic_image_size: + case nir_intrinsic_bindless_image_size: emit_intrinsic_image_size(ctx, intr, dst); break; - case nir_intrinsic_image_deref_atomic_add: - case nir_intrinsic_image_deref_atomic_imin: - case nir_intrinsic_image_deref_atomic_umin: - case nir_intrinsic_image_deref_atomic_imax: - case nir_intrinsic_image_deref_atomic_umax: - case nir_intrinsic_image_deref_atomic_and: - case nir_intrinsic_image_deref_atomic_or: - case nir_intrinsic_image_deref_atomic_xor: - case nir_intrinsic_image_deref_atomic_exchange: - case nir_intrinsic_image_deref_atomic_comp_swap: + case nir_intrinsic_image_atomic_add: + case nir_intrinsic_bindless_image_atomic_add: + case nir_intrinsic_image_atomic_imin: + case nir_intrinsic_bindless_image_atomic_imin: + case nir_intrinsic_image_atomic_umin: + case nir_intrinsic_bindless_image_atomic_umin: + case nir_intrinsic_image_atomic_imax: + case nir_intrinsic_bindless_image_atomic_imax: + case nir_intrinsic_image_atomic_umax: + case nir_intrinsic_bindless_image_atomic_umax: + case nir_intrinsic_image_atomic_and: + case nir_intrinsic_bindless_image_atomic_and: + case nir_intrinsic_image_atomic_or: + case nir_intrinsic_bindless_image_atomic_or: + case nir_intrinsic_image_atomic_xor: + case nir_intrinsic_bindless_image_atomic_xor: + case nir_intrinsic_image_atomic_exchange: + case nir_intrinsic_bindless_image_atomic_exchange: + case nir_intrinsic_image_atomic_comp_swap: + case nir_intrinsic_bindless_image_atomic_comp_swap: if ((ctx->so->type == MESA_SHADER_FRAGMENT) && !ctx->s->info.fs.early_fragment_tests) ctx->so->no_earlyz = true; dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr); break; - case nir_intrinsic_barrier: + case nir_intrinsic_control_barrier: case nir_intrinsic_memory_barrier: case nir_intrinsic_group_memory_barrier: - case nir_intrinsic_memory_barrier_atomic_counter: case nir_intrinsic_memory_barrier_buffer: case nir_intrinsic_memory_barrier_image: case nir_intrinsic_memory_barrier_shared: @@ -1644,32 +1796,34 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) src = ir3_get_src(ctx, &intr->src[0]); for (int i = 0; i < intr->num_components; i++) { unsigned n = idx * 4 + i + comp; - ctx->ir->outputs[n] = src[i]; + ctx->outputs[n] = src[i]; } break; case nir_intrinsic_load_base_vertex: case nir_intrinsic_load_first_vertex: if (!ctx->basevertex) { ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE); - add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex); } dst[0] = ctx->basevertex; break; + case nir_intrinsic_load_base_instance: + if (!ctx->base_instance) { + ctx->base_instance = create_driver_param(ctx, IR3_DP_INSTID_BASE); + } + dst[0] = ctx->base_instance; + break; case nir_intrinsic_load_vertex_id_zero_base: case nir_intrinsic_load_vertex_id: if (!ctx->vertex_id) { gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ? SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE; - ctx->vertex_id = create_input(ctx, 0); - add_sysval_input(ctx, sv, ctx->vertex_id); + ctx->vertex_id = create_sysval_input(ctx, sv, 0x1); } dst[0] = ctx->vertex_id; break; case nir_intrinsic_load_instance_id: if (!ctx->instance_id) { - ctx->instance_id = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID, - ctx->instance_id); + ctx->instance_id = create_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID, 0x1); } dst[0] = ctx->instance_id; break; @@ -1678,18 +1832,14 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) /* fall-thru */ case nir_intrinsic_load_sample_id_no_per_sample: if (!ctx->samp_id) { - ctx->samp_id = create_input(ctx, 0); + ctx->samp_id = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID, 0x1); ctx->samp_id->regs[0]->flags |= IR3_REG_HALF; - add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID, - ctx->samp_id); } dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32); break; case nir_intrinsic_load_sample_mask_in: if (!ctx->samp_mask_in) { - ctx->samp_mask_in = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN, - ctx->samp_mask_in); + ctx->samp_mask_in = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN, 0x1); } dst[0] = ctx->samp_mask_in; break; @@ -1703,29 +1853,28 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_load_front_face: if (!ctx->frag_face) { ctx->so->frag_face = true; - ctx->frag_face = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face); + ctx->frag_face = create_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, 0x1); ctx->frag_face->regs[0]->flags |= IR3_REG_HALF; } /* for fragface, we get -1 for back and 0 for front. However this is * the inverse of what nir expects (where ~0 is true). */ - dst[0] = ir3_COV(b, ctx->frag_face, TYPE_S16, TYPE_S32); - dst[0] = ir3_NOT_B(b, dst[0], 0); + dst[0] = ir3_CMPS_S(b, + ctx->frag_face, 0, + create_immed_typed(b, 0, TYPE_U16), 0); + dst[0]->cat2.condition = IR3_COND_EQ; break; case nir_intrinsic_load_local_invocation_id: if (!ctx->local_invocation_id) { - ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7); - add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID, - 0x7, ctx->local_invocation_id); + ctx->local_invocation_id = + create_sysval_input(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID, 0x7); } ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3); break; case nir_intrinsic_load_work_group_id: if (!ctx->work_group_id) { - ctx->work_group_id = create_input_compmask(ctx, 0, 0x7); - add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID, - 0x7, ctx->work_group_id); + ctx->work_group_id = + create_sysval_input(ctx, SYSTEM_VALUE_WORK_GROUP_ID, 0x7); ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH; } ir3_split_dest(b, dst, ctx->work_group_id, 0, 3); @@ -1747,7 +1896,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) if (intr->intrinsic == nir_intrinsic_discard_if) { /* conditional discard: */ src = ir3_get_src(ctx, &intr->src[0]); - cond = ir3_b2n(b, src[0]); + cond = src[0]; } else { /* unconditional discard: */ cond = create_immed(b, 1); @@ -1762,6 +1911,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) cond->regs[0]->flags &= ~IR3_REG_SSA; kill = ir3_KILL(b, cond, 0); + kill->regs[1]->num = regid(REG_P0, 0); array_insert(ctx->ir, ctx->ir->predicates, kill); array_insert(b, b->keeps, kill); @@ -1774,7 +1924,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) struct ir3_instruction *cond, *kill; src = ir3_get_src(ctx, &intr->src[0]); - cond = ir3_b2n(b, src[0]); + cond = src[0]; /* NOTE: only cmps.*.* can write p0.x: */ cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0); @@ -1783,7 +1933,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) /* condition always goes in predicate register: */ cond->regs[0]->num = regid(REG_P0, 0); - kill = ir3_CONDEND(b, cond, 0); + kill = ir3_IF(b, cond, 0); kill->barrier_class = IR3_BARRIER_EVERYTHING; kill->barrier_conflict = IR3_BARRIER_EVERYTHING; @@ -1799,6 +1949,9 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_store_shared_ir3: emit_intrinsic_store_shared_ir3(ctx, intr); break; + case nir_intrinsic_bindless_resource_ir3: + dst[0] = ir3_get_src(ctx, &intr->src[0])[0]; + break; default: ir3_context_error(ctx, "Unhandled intrinsic type: %s\n", nir_intrinsic_infos[intr->intrinsic].name); @@ -1815,7 +1968,7 @@ emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr) struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def, instr->def.num_components); - if (instr->def.bit_size < 32) { + if (instr->def.bit_size == 16) { for (int i = 0; i < instr->def.num_components; i++) dst[i] = create_immed_typed(ctx->block, instr->value[i].u16, @@ -1834,7 +1987,7 @@ emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef) { struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &undef->def, undef->def.num_components); - type_t type = (undef->def.bit_size < 32) ? TYPE_U16 : TYPE_U32; + type_t type = (undef->def.bit_size == 16) ? TYPE_U16 : TYPE_U32; /* backend doesn't want undefined instructions, so just plug * in 0.0.. @@ -1847,34 +2000,42 @@ emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef) * texture fetch/sample instructions: */ +static type_t +get_tex_dest_type(nir_tex_instr *tex) +{ + type_t type; + + switch (nir_alu_type_get_base_type(tex->dest_type)) { + case nir_type_invalid: + case nir_type_float: + type = nir_dest_bit_size(tex->dest) == 16 ? TYPE_F16 : TYPE_F32; + break; + case nir_type_int: + type = nir_dest_bit_size(tex->dest) == 16 ? TYPE_S16 : TYPE_S32; + break; + case nir_type_uint: + case nir_type_bool: + type = nir_dest_bit_size(tex->dest) == 16 ? TYPE_U16 : TYPE_U32; + break; + default: + unreachable("bad dest_type"); + } + + return type; +} + static void tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp) { - unsigned coords, flags = 0; + unsigned coords = glsl_get_sampler_dim_coordinate_components(tex->sampler_dim); + unsigned flags = 0; /* note: would use tex->coord_components.. except txs.. also, * since array index goes after shadow ref, we don't want to * count it: */ - switch (tex->sampler_dim) { - case GLSL_SAMPLER_DIM_1D: - case GLSL_SAMPLER_DIM_BUF: - coords = 1; - break; - case GLSL_SAMPLER_DIM_2D: - case GLSL_SAMPLER_DIM_RECT: - case GLSL_SAMPLER_DIM_EXTERNAL: - case GLSL_SAMPLER_DIM_MS: - coords = 2; - break; - case GLSL_SAMPLER_DIM_3D: - case GLSL_SAMPLER_DIM_CUBE: - coords = 3; + if (coords == 3) flags |= IR3_INSTR_3D; - break; - default: - unreachable("bad sampler_dim"); - } if (tex->is_shadow && tex->op != nir_texop_lod) flags |= IR3_INSTR_S; @@ -1890,37 +2051,135 @@ tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp) * or immediate (in which case it will get lowered later to a non .s2en * version of the tex instruction which encode tex/samp as immediates: */ -static struct ir3_instruction * +static struct tex_src_info get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex) { - int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset); - int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset); + struct ir3_block *b = ctx->block; + struct tex_src_info info = { 0 }; + int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_handle); + int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle); struct ir3_instruction *texture, *sampler; - if (texture_idx >= 0) { - texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0]; - texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16); - } else { - /* TODO what to do for dynamic case? I guess we only need the - * max index for astc srgb workaround so maybe not a problem - * to worry about if we don't enable indirect samplers for - * a4xx? + if (texture_idx >= 0 || sampler_idx >= 0) { + /* Bindless case */ + info.flags |= IR3_INSTR_B; + + /* Gather information required to determine which encoding to + * choose as well as for prefetch. */ - ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index); - texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16); - } + nir_intrinsic_instr *bindless_tex = NULL; + bool tex_const; + if (texture_idx >= 0) { + ctx->so->bindless_tex = true; + bindless_tex = ir3_bindless_resource(tex->src[texture_idx].src); + assert(bindless_tex); + info.tex_base = nir_intrinsic_desc_set(bindless_tex); + tex_const = nir_src_is_const(bindless_tex->src[0]); + if (tex_const) + info.tex_idx = nir_src_as_uint(bindless_tex->src[0]); + } else { + /* To simplify some of the logic below, assume the index is + * constant 0 when it's not enabled. + */ + tex_const = true; + info.tex_idx = 0; + } + nir_intrinsic_instr *bindless_samp = NULL; + bool samp_const; + if (sampler_idx >= 0) { + ctx->so->bindless_samp = true; + bindless_samp = ir3_bindless_resource(tex->src[sampler_idx].src); + assert(bindless_samp); + info.samp_base = nir_intrinsic_desc_set(bindless_samp); + samp_const = nir_src_is_const(bindless_samp->src[0]); + if (samp_const) + info.samp_idx = nir_src_as_uint(bindless_samp->src[0]); + } else { + samp_const = true; + info.samp_idx = 0; + } + + /* Choose encoding. */ + if (tex_const && samp_const && info.tex_idx < 256 && info.samp_idx < 256) { + if (info.tex_idx < 16 && info.samp_idx < 16 && + (!bindless_tex || !bindless_samp || info.tex_base == info.samp_base)) { + /* Everything fits within the instruction */ + info.base = info.tex_base; + info.combined_idx = info.samp_idx | (info.tex_idx << 4); + } else { + info.base = info.tex_base; + info.a1_val = info.tex_idx << 3 | info.samp_base; + info.combined_idx = info.samp_idx; + info.flags |= IR3_INSTR_A1EN; + } + info.samp_tex = NULL; + } else { + info.flags |= IR3_INSTR_S2EN; + /* In the indirect case, we only use a1.x to store the sampler + * base if it differs from the texture base. + */ + if (!bindless_tex || !bindless_samp || info.tex_base == info.samp_base) { + info.base = info.tex_base; + } else { + info.base = info.tex_base; + info.a1_val = info.samp_base; + info.flags |= IR3_INSTR_A1EN; + } + + /* Note: the indirect source is now a vec2 instead of hvec2, and + * for some reason the texture and sampler are swapped. + */ + struct ir3_instruction *texture, *sampler; + + if (bindless_tex) { + texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0]; + } else { + texture = create_immed(b, 0); + } - if (sampler_idx >= 0) { - sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0]; - sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16); + if (bindless_samp) { + sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0]; + } else { + sampler = create_immed(b, 0); + } + info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + texture, + sampler, + }, 2); + } } else { - sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16); - } + info.flags |= IR3_INSTR_S2EN; + texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset); + sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset); + if (texture_idx >= 0) { + texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0]; + texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16); + } else { + /* TODO what to do for dynamic case? I guess we only need the + * max index for astc srgb workaround so maybe not a problem + * to worry about if we don't enable indirect samplers for + * a4xx? + */ + ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index); + texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16); + info.tex_idx = tex->texture_index; + } + + if (sampler_idx >= 0) { + sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0]; + sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16); + } else { + sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16); + info.samp_idx = tex->texture_index; + } - return ir3_create_collect(ctx, (struct ir3_instruction*[]){ - sampler, - texture, - }, 2); + info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + sampler, + texture, + }, 2); + } + + return info; } static void @@ -1930,6 +2189,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) struct ir3_instruction **dst, *sam, *src0[12], *src1[4]; struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy; struct ir3_instruction *lod, *compare, *proj, *sample_index; + struct tex_src_info info = { 0 }; bool has_bias = false, has_lod = false, has_proj = false, has_off = false; unsigned i, coords, flags, ncomp; unsigned nsrc0 = 0, nsrc1 = 0; @@ -1978,6 +2238,8 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) break; case nir_tex_src_texture_offset: case nir_tex_src_sampler_offset: + case nir_tex_src_texture_handle: + case nir_tex_src_sampler_handle: /* handled in get_tex_samp_src() */ break; default: @@ -2143,26 +2405,11 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) src1[nsrc1++] = lod; } - switch (tex->dest_type) { - case nir_type_invalid: - case nir_type_float: - type = TYPE_F32; - break; - case nir_type_int: - type = TYPE_S32; - break; - case nir_type_uint: - case nir_type_bool: - type = TYPE_U32; - break; - default: - unreachable("bad dest_type"); - } + type = get_tex_dest_type(tex); if (opc == OPC_GETLOD) type = TYPE_S32; - struct ir3_instruction *samp_tex; if (tex->op == nir_texop_txf_ms_fb) { /* only expect a single txf_ms_fb per shader: */ @@ -2170,14 +2417,15 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT); ctx->so->fb_read = true; - samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16), create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16), }, 2); + info.flags = IR3_INSTR_S2EN; ctx->so->num_samp++; } else { - samp_tex = get_tex_samp_tex_src(ctx, tex); + info = get_tex_samp_tex_src(ctx, tex); } struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0); @@ -2190,13 +2438,18 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) sam = ir3_META_TEX_PREFETCH(b); __ssa_dst(sam)->wrmask = MASK(ncomp); /* dst */ + __ssa_src(sam, get_barycentric_pixel(ctx), 0); sam->prefetch.input_offset = ir3_nir_coord_offset(tex->src[idx].src.ssa); - sam->prefetch.tex = tex->texture_index; - sam->prefetch.samp = tex->sampler_index; + /* make sure not to add irrelevant flags like S2EN */ + sam->flags = flags | (info.flags & IR3_INSTR_B); + sam->prefetch.tex = info.tex_idx; + sam->prefetch.samp = info.samp_idx; + sam->prefetch.tex_base = info.tex_base; + sam->prefetch.samp_base = info.samp_base; } else { - sam = ir3_SAM(b, opc, type, MASK(ncomp), flags, - samp_tex, col0, col1); + info.flags |= flags; + sam = emit_sam(ctx, opc, info, type, MASK(ncomp), col0, col1); } if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) { @@ -2209,8 +2462,8 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) /* we need to sample the alpha separately with a non-ASTC * texture state: */ - sam = ir3_SAM(b, opc, type, 0b1000, flags, - samp_tex, col0, col1); + sam = ir3_SAM(b, opc, type, 0b1000, flags | info.flags, + info.samp_tex, col0, col1); array_insert(ctx->ir, ctx->ir->astc_srgb, sam); @@ -2240,17 +2493,17 @@ emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx) { struct ir3_block *b = ctx->block; struct ir3_instruction **dst, *sam; + type_t dst_type = get_tex_dest_type(tex); + struct tex_src_info info = get_tex_samp_tex_src(ctx, tex); dst = ir3_get_dst(ctx, &tex->dest, 1); - sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 1 << idx, 0, - get_tex_samp_tex_src(ctx, tex), NULL, NULL); + sam = emit_sam(ctx, OPC_GETINFO, info, dst_type, 1 << idx, NULL, NULL); /* even though there is only one component, since it ends * up in .y/.z/.w rather than .x, we need a split_dest() */ - if (idx) - ir3_split_dest(b, dst, sam, 0, idx + 1); + ir3_split_dest(b, dst, sam, idx, 1); /* The # of levels comes from getinfo.z. We need to add 1 to it, since * the value in TEX_CONST_0 is zero-based. @@ -2268,8 +2521,11 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex) struct ir3_instruction **dst, *sam; struct ir3_instruction *lod; unsigned flags, coords; + type_t dst_type = get_tex_dest_type(tex); + struct tex_src_info info = get_tex_samp_tex_src(ctx, tex); tex_info(tex, &flags, &coords); + info.flags |= flags; /* Actually we want the number of dimensions, not coordinates. This * distinction only matters for cubes. @@ -2279,14 +2535,12 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex) dst = ir3_get_dst(ctx, &tex->dest, 4); - compile_assert(ctx, tex->num_srcs == 1); - compile_assert(ctx, tex->src[0].src_type == nir_tex_src_lod); - - lod = ir3_get_src(ctx, &tex->src[0].src)[0]; + int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod); + compile_assert(ctx, lod_idx >= 0); - sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags, - get_tex_samp_tex_src(ctx, tex), lod, NULL); + lod = ir3_get_src(ctx, &tex->src[lod_idx].src)[0]; + sam = emit_sam(ctx, OPC_GETSIZE, info, dst_type, 0b1111, lod, NULL); ir3_split_dest(b, dst, sam, 0, 4); /* Array size actually ends up in .w rather than .z. This doesn't @@ -2414,18 +2668,23 @@ emit_block(struct ir3_context *ctx, nir_block *nblock) list_addtail(&block->node, &ctx->ir->block_list); /* re-emit addr register in each block if needed: */ - for (int i = 0; i < ARRAY_SIZE(ctx->addr_ht); i++) { - _mesa_hash_table_destroy(ctx->addr_ht[i], NULL); - ctx->addr_ht[i] = NULL; + for (int i = 0; i < ARRAY_SIZE(ctx->addr0_ht); i++) { + _mesa_hash_table_destroy(ctx->addr0_ht[i], NULL); + ctx->addr0_ht[i] = NULL; } - nir_foreach_instr(instr, nblock) { + _mesa_hash_table_u64_destroy(ctx->addr1_ht, NULL); + ctx->addr1_ht = NULL; + + nir_foreach_instr (instr, nblock) { ctx->cur_instr = instr; emit_instr(ctx, instr); ctx->cur_instr = NULL; if (ctx->error) return; } + + _mesa_hash_table_clear(ctx->sel_cond_conversions, NULL); } static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list); @@ -2435,8 +2694,7 @@ emit_if(struct ir3_context *ctx, nir_if *nif) { struct ir3_instruction *condition = ir3_get_src(ctx, &nif->condition)[0]; - ctx->block->condition = - ir3_get_predicate(ctx, ir3_b2n(condition->block, condition)); + ctx->block->condition = ir3_get_predicate(ctx, condition); emit_cf_list(ctx, &nif->then_list); emit_cf_list(ctx, &nif->else_list); @@ -2466,7 +2724,7 @@ stack_pop(struct ir3_context *ctx) static void emit_cf_list(struct ir3_context *ctx, struct exec_list *list) { - foreach_list_typed(nir_cf_node, node, node, list) { + foreach_list_typed (nir_cf_node, node, node, list) { switch (node->type) { case nir_cf_node_block: emit_block(ctx, nir_cf_node_as_block(node)); @@ -2520,9 +2778,7 @@ emit_stream_out(struct ir3_context *ctx) * so that it is seen as live over the entire duration * of the shader: */ - vtxcnt = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt); - + vtxcnt = create_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, 0x1); maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX); /* at this point, we are at the original 'end' block, @@ -2682,7 +2938,7 @@ setup_input(struct ir3_context *ctx, nir_variable *in) return; so->inputs[n].slot = slot; - so->inputs[n].compmask = (1 << (ncomp + frac)) - 1; + so->inputs[n].compmask |= (1 << (ncomp + frac)) - 1; so->inputs_count = MAX2(so->inputs_count, n + 1); so->inputs[n].interpolate = in->data.interpolation; @@ -2699,18 +2955,6 @@ setup_input(struct ir3_context *ctx, nir_variable *in) if (slot == VARYING_SLOT_POS) { ir3_context_error(ctx, "fragcoord should be a sysval!\n"); - } else if (slot == VARYING_SLOT_PNTC) { - /* see for example st_nir_fixup_varying_slots().. this is - * maybe a bit mesa/st specific. But we need things to line - * up for this in fdN_program: - * unsigned texmask = 1 << (slot - VARYING_SLOT_VAR0); - * if (emit->sprite_coord_enable & texmask) { - * ... - * } - */ - so->inputs[n].slot = VARYING_SLOT_VAR8; - so->inputs[n].bary = true; - instr = create_frag_input(ctx, false, idx); } else { /* detect the special case for front/back colors where * we need to do flat vs smooth shading depending on @@ -2740,15 +2984,35 @@ setup_input(struct ir3_context *ctx, nir_variable *in) instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx); } - compile_assert(ctx, idx < ctx->ir->ninputs); + compile_assert(ctx, idx < ctx->ninputs); - ctx->ir->inputs[idx] = instr; + ctx->inputs[idx] = instr; } } else if (ctx->so->type == MESA_SHADER_VERTEX) { + struct ir3_instruction *input = NULL, *in; + struct ir3_instruction *components[4]; + unsigned mask = (1 << (ncomp + frac)) - 1; + + foreach_input (in, ctx->ir) { + if (in->input.inidx == n) { + input = in; + break; + } + } + + if (!input) { + input = create_input(ctx, mask); + input->input.inidx = n; + } else { + input->regs[0]->wrmask |= mask; + } + + ir3_split_dest(ctx->block, components, input, frac, ncomp); + for (int i = 0; i < ncomp; i++) { unsigned idx = (n * 4) + i + frac; - compile_assert(ctx, idx < ctx->ir->ninputs); - ctx->ir->inputs[idx] = create_input(ctx, idx); + compile_assert(ctx, idx < ctx->ninputs); + ctx->inputs[idx] = components[i]; } } else { ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type); @@ -2776,8 +3040,8 @@ pack_inlocs(struct ir3_context *ctx) * First Step: scan shader to find which bary.f/ldlv remain: */ - list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) { - list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) { + foreach_block (block, &ctx->ir->block_list) { + foreach_instr (instr, &block->instr_list) { if (is_input(instr)) { unsigned inloc = instr->regs[1]->iim_val; unsigned i = inloc / 4; @@ -2840,14 +3104,18 @@ pack_inlocs(struct ir3_context *ctx) * Third Step: reassign packed inloc's: */ - list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) { - list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) { + foreach_block (block, &ctx->ir->block_list) { + foreach_instr (instr, &block->instr_list) { if (is_input(instr)) { unsigned inloc = instr->regs[1]->iim_val; unsigned i = inloc / 4; unsigned j = inloc % 4; instr->regs[1]->iim_val = so->inputs[i].inloc + j; + } else if (instr->opc == OPC_META_TEX_PREFETCH) { + unsigned i = instr->prefetch.input_offset / 4; + unsigned j = instr->prefetch.input_offset % 4; + instr->prefetch.input_offset = so->inputs[i].inloc + j; } } } @@ -2861,12 +3129,10 @@ setup_output(struct ir3_context *ctx, nir_variable *out) unsigned n = out->data.driver_location; unsigned frac = out->data.location_frac; unsigned slot = out->data.location; - unsigned comp = 0; if (ctx->so->type == MESA_SHADER_FRAGMENT) { switch (slot) { case FRAG_RESULT_DEPTH: - comp = 2; /* tgsi will write to .z component */ so->writes_pos = true; break; case FRAG_RESULT_COLOR: @@ -2924,13 +3190,13 @@ setup_output(struct ir3_context *ctx, nir_variable *out) compile_assert(ctx, n < ARRAY_SIZE(so->outputs)); so->outputs[n].slot = slot; - so->outputs[n].regid = regid(n, comp); + so->outputs[n].regid = regid(n, 0); so->outputs_count = MAX2(so->outputs_count, n + 1); for (int i = 0; i < ncomp; i++) { unsigned idx = (n * 4) + i + frac; - compile_assert(ctx, idx < ctx->ir->noutputs); - ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0)); + compile_assert(ctx, idx < ctx->noutputs); + ctx->outputs[idx] = create_immed(ctx->block, fui(0.0)); } /* if varying packing doesn't happen, we could end up in a situation @@ -2943,56 +3209,54 @@ setup_output(struct ir3_context *ctx, nir_variable *out) */ for (int i = 0; i < frac; i++) { unsigned idx = (n * 4) + i; - if (!ctx->ir->outputs[idx]) { - ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0)); + if (!ctx->outputs[idx]) { + ctx->outputs[idx] = create_immed(ctx->block, fui(0.0)); } } } -static int -max_drvloc(struct exec_list *vars) -{ - int drvloc = -1; - nir_foreach_variable(var, vars) { - drvloc = MAX2(drvloc, (int)var->data.driver_location); - } - return drvloc; -} - -static const unsigned max_sysvals[] = { - [MESA_SHADER_VERTEX] = 16, - [MESA_SHADER_TESS_CTRL] = 16, - [MESA_SHADER_TESS_EVAL] = 16, - [MESA_SHADER_GEOMETRY] = 16, - [MESA_SHADER_FRAGMENT] = 24, // TODO - [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need? - [MESA_SHADER_KERNEL] = 16, // TODO how many do we actually need? -}; - static void emit_instructions(struct ir3_context *ctx) { - unsigned ninputs, noutputs; nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s); - ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4; - noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4; + ctx->ninputs = ctx->s->num_inputs * 4; + ctx->noutputs = ctx->s->num_outputs * 4; + ctx->inputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->ninputs); + ctx->outputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->noutputs); - /* we need to leave room for sysvals: - */ - ninputs += max_sysvals[ctx->so->type]; - if (ctx->so->type == MESA_SHADER_VERTEX || - ctx->so->type == MESA_SHADER_TESS_EVAL) - noutputs += 8; /* gs or tess header + primitive_id */ - - ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs); + ctx->ir = ir3_create(ctx->compiler, ctx->so->type); /* Create inputs in first block: */ ctx->block = get_block(ctx, nir_start_block(fxn)); ctx->in_block = ctx->block; - list_addtail(&ctx->block->node, &ctx->ir->block_list); - ninputs -= max_sysvals[ctx->so->type]; + /* for fragment shader, the vcoord input register is used as the + * base for bary.f varying fetch instrs: + * + * TODO defer creating ctx->ij_pixel and corresponding sysvals + * until emit_intrinsic when we know they are actually needed. + * For now, we defer creating ctx->ij_centroid, etc, since we + * only need ij_pixel for "old style" varying inputs (ie. + * tgsi_to_nir) + */ + if (ctx->so->type == MESA_SHADER_FRAGMENT) { + ctx->ij_pixel = create_input(ctx, 0x3); + } + + /* Setup inputs: */ + nir_foreach_variable (var, &ctx->s->inputs) { + setup_input(ctx, var); + } + + /* Defer add_sysval_input() stuff until after setup_inputs(), + * because sysvals need to be appended after varyings: + */ + if (ctx->ij_pixel) { + add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL, + 0x3, ctx->ij_pixel); + } + /* Tesselation shaders always need primitive ID for indexing the * BO. Geometry shaders don't always need it but when they do it has be @@ -3004,110 +3268,37 @@ emit_instructions(struct ir3_context *ctx) switch (ctx->so->type) { case MESA_SHADER_VERTEX: if (has_tess) { - ctx->tcs_header = create_input(ctx, 0); - ctx->primitive_id = create_input(ctx, 0); + ctx->tcs_header = create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); } else if (has_gs) { - ctx->gs_header = create_input(ctx, 0); - ctx->primitive_id = create_input(ctx, 0); + ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); } break; case MESA_SHADER_TESS_CTRL: - ctx->tcs_header = create_input(ctx, 0); - ctx->primitive_id = create_input(ctx, 0); + ctx->tcs_header = create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); break; case MESA_SHADER_TESS_EVAL: if (has_gs) - ctx->gs_header = create_input(ctx, 0); - ctx->primitive_id = create_input(ctx, 0); + ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); break; case MESA_SHADER_GEOMETRY: - ctx->gs_header = create_input(ctx, 0); - ctx->primitive_id = create_input(ctx, 0); + ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); break; default: break; } - /* for fragment shader, the vcoord input register is used as the - * base for bary.f varying fetch instrs: - * - * TODO defer creating ctx->ij_pixel and corresponding sysvals - * until emit_intrinsic when we know they are actually needed. - * For now, we defer creating ctx->ij_centroid, etc, since we - * only need ij_pixel for "old style" varying inputs (ie. - * tgsi_to_nir) - */ - struct ir3_instruction *vcoord = NULL; - if (ctx->so->type == MESA_SHADER_FRAGMENT) { - struct ir3_instruction *xy[2]; - - vcoord = create_input_compmask(ctx, 0, 0x3); - ir3_split_dest(ctx->block, xy, vcoord, 0, 2); - - ctx->ij_pixel = ir3_create_collect(ctx, xy, 2); - } - - /* Setup inputs: */ - nir_foreach_variable(var, &ctx->s->inputs) { - setup_input(ctx, var); - } - - /* Defer add_sysval_input() stuff until after setup_inputs(), - * because sysvals need to be appended after varyings: - */ - if (vcoord) { - add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PIXEL, - 0x3, vcoord); - } - - if (ctx->primitive_id) - add_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, ctx->primitive_id); - if (ctx->gs_header) - add_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, ctx->gs_header); - if (ctx->tcs_header) - add_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, ctx->tcs_header); - /* Setup outputs: */ - nir_foreach_variable(var, &ctx->s->outputs) { + nir_foreach_variable (var, &ctx->s->outputs) { setup_output(ctx, var); } - /* Set up the shared system values as outputs for the vertex and tess eval - * shaders so they don't clobber them for the next shader in the pipeline. - */ - if (ctx->so->type == MESA_SHADER_VERTEX || - (has_gs && ctx->so->type == MESA_SHADER_TESS_EVAL)) { - struct ir3_shader_variant *so = ctx->so; - if (ctx->primitive_id) { - unsigned n = so->outputs_count++; - so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID; - so->outputs[n].regid = regid(n, 0); - ctx->ir->outputs[n * 4] = ctx->primitive_id; - - compile_assert(ctx, n * 4 < ctx->ir->noutputs); - } - - if (ctx->gs_header) { - unsigned n = so->outputs_count++; - so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3; - so->outputs[n].regid = regid(n, 0); - ctx->ir->outputs[n * 4] = ctx->gs_header; - - compile_assert(ctx, n * 4 < ctx->ir->noutputs); - } - - if (ctx->tcs_header) { - unsigned n = so->outputs_count++; - so->outputs[n].slot = VARYING_SLOT_TCS_HEADER_IR3; - so->outputs[n].regid = regid(n, 0); - ctx->ir->outputs[n * 4] = ctx->tcs_header; - - compile_assert(ctx, n * 4 < ctx->ir->noutputs); - } - } - /* Find # of samplers: */ - nir_foreach_variable(var, &ctx->s->uniforms) { + nir_foreach_variable (var, &ctx->s->uniforms) { ctx->so->num_samp += glsl_type_get_sampler_count(var->type); /* just assume that we'll be reading from images.. if it * is write-only we don't have to count it, but not sure @@ -3117,7 +3308,7 @@ emit_instructions(struct ir3_context *ctx) } /* NOTE: need to do something more clever when we support >1 fxn */ - nir_foreach_register(reg, &fxn->registers) { + nir_foreach_register (reg, &fxn->registers) { ir3_declare_array(ctx, reg); } /* And emit the body: */ @@ -3125,28 +3316,6 @@ emit_instructions(struct ir3_context *ctx) emit_function(ctx, fxn); } -/* from NIR perspective, we actually have varying inputs. But the varying - * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The - * only actual inputs are the sysvals. - */ -static void -fixup_frag_inputs(struct ir3_context *ctx) -{ - struct ir3_shader_variant *so = ctx->so; - struct ir3 *ir = ctx->ir; - unsigned i = 0; - - /* sysvals should appear at the end of the inputs, drop everything else: */ - while ((i < so->inputs_count) && !so->inputs[i].sysval) - i++; - - /* at IR level, inputs are always blocks of 4 scalars: */ - i *= 4; - - ir->inputs = &ir->inputs[i]; - ir->ninputs -= i; -} - /* Fixup tex sampler state for astc/srgb workaround instructions. We * need to assign the tex state indexes for these after we know the * max tex index. @@ -3188,23 +3357,44 @@ fixup_binning_pass(struct ir3_context *ctx) struct ir3 *ir = ctx->ir; unsigned i, j; + /* first pass, remove unused outputs from the IR level outputs: */ + for (i = 0, j = 0; i < ir->outputs_count; i++) { + struct ir3_instruction *out = ir->outputs[i]; + assert(out->opc == OPC_META_COLLECT); + unsigned outidx = out->collect.outidx; + unsigned slot = so->outputs[outidx].slot; + + /* throw away everything but first position/psize */ + if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) { + ir->outputs[j] = ir->outputs[i]; + j++; + } + } + ir->outputs_count = j; + + /* second pass, cleanup the unused slots in ir3_shader_variant::outputs + * table: + */ for (i = 0, j = 0; i < so->outputs_count; i++) { unsigned slot = so->outputs[i].slot; /* throw away everything but first position/psize */ if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) { - if (i != j) { - so->outputs[j] = so->outputs[i]; - ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0]; - ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1]; - ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2]; - ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3]; + so->outputs[j] = so->outputs[i]; + + /* fixup outidx to point to new output table entry: */ + struct ir3_instruction *out; + foreach_output (out, ir) { + if (out->collect.outidx == i) { + out->collect.outidx = j; + break; + } } + j++; } } so->outputs_count = j; - ir->noutputs = j * 4; } static void @@ -3213,19 +3403,27 @@ collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir) unsigned idx = 0; /* Collect sampling instructions eligible for pre-dispatch. */ - list_for_each_entry(struct ir3_block, block, &ir->block_list, node) { - list_for_each_entry_safe(struct ir3_instruction, instr, - &block->instr_list, node) { + foreach_block (block, &ir->block_list) { + foreach_instr_safe (instr, &block->instr_list) { if (instr->opc == OPC_META_TEX_PREFETCH) { assert(idx < ARRAY_SIZE(ctx->so->sampler_prefetch)); struct ir3_sampler_prefetch *fetch = &ctx->so->sampler_prefetch[idx]; idx++; - fetch->cmd = IR3_SAMPLER_PREFETCH_CMD; + if (instr->flags & IR3_INSTR_B) { + fetch->cmd = IR3_SAMPLER_BINDLESS_PREFETCH_CMD; + /* In bindless mode, the index is actually the base */ + fetch->tex_id = instr->prefetch.tex_base; + fetch->samp_id = instr->prefetch.samp_base; + fetch->tex_bindless_id = instr->prefetch.tex; + fetch->samp_bindless_id = instr->prefetch.samp; + } else { + fetch->cmd = IR3_SAMPLER_PREFETCH_CMD; + fetch->tex_id = instr->prefetch.tex; + fetch->samp_id = instr->prefetch.samp; + } fetch->wrmask = instr->regs[0]->wrmask; - fetch->tex_id = instr->prefetch.tex; - fetch->samp_id = instr->prefetch.samp; fetch->dst = instr->regs[0]->num; fetch->src = instr->prefetch.input_offset; @@ -3233,7 +3431,7 @@ collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir) MAX2(ctx->so->total_in, instr->prefetch.input_offset + 2); /* Disable half precision until supported. */ - fetch->half_precision = 0x0; + fetch->half_precision = !!(instr->regs[0]->flags & IR3_REG_HALF); /* Remove the prefetch placeholder instruction: */ list_delinit(&instr->node); @@ -3248,8 +3446,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, { struct ir3_context *ctx; struct ir3 *ir; - struct ir3_instruction **inputs; - unsigned i; int ret = 0, max_bary; assert(!so->ir); @@ -3271,17 +3467,90 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, ir = so->ir = ctx->ir; - /* keep track of the inputs from TGSI perspective.. */ - inputs = ir->inputs; + assert((ctx->noutputs % 4) == 0); - /* but fixup actual inputs for frag shader: */ - if (so->type == MESA_SHADER_FRAGMENT) - fixup_frag_inputs(ctx); + /* Setup IR level outputs, which are "collects" that gather + * the scalar components of outputs. + */ + for (unsigned i = 0; i < ctx->noutputs; i += 4) { + unsigned ncomp = 0; + /* figure out the # of components written: + * + * TODO do we need to handle holes, ie. if .x and .z + * components written, but .y component not written? + */ + for (unsigned j = 0; j < 4; j++) { + if (!ctx->outputs[i + j]) + break; + ncomp++; + } + + /* Note that in some stages, like TCS, store_output is + * lowered to memory writes, so no components of the + * are "written" from the PoV of traditional store- + * output instructions: + */ + if (!ncomp) + continue; + + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->outputs[i], ncomp); + + int outidx = i / 4; + assert(outidx < so->outputs_count); + + /* stash index into so->outputs[] so we can map the + * output back to slot/etc later: + */ + out->collect.outidx = outidx; + + array_insert(ir, ir->outputs, out); + } + + /* Set up the gs header as an output for the vertex shader so it won't + * clobber it for the tess ctrl shader. + * + * TODO this could probably be done more cleanly in a nir pass. + */ + if (ctx->so->type == MESA_SHADER_VERTEX || + (ctx->so->key.has_gs && ctx->so->type == MESA_SHADER_TESS_EVAL)) { + if (ctx->primitive_id) { + unsigned n = so->outputs_count++; + so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID; + + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->primitive_id, 1); + out->collect.outidx = n; + array_insert(ir, ir->outputs, out); + } + + if (ctx->gs_header) { + unsigned n = so->outputs_count++; + so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3; + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->gs_header, 1); + out->collect.outidx = n; + array_insert(ir, ir->outputs, out); + } + + if (ctx->tcs_header) { + unsigned n = so->outputs_count++; + so->outputs[n].slot = VARYING_SLOT_TCS_HEADER_IR3; + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->tcs_header, 1); + out->collect.outidx = n; + array_insert(ir, ir->outputs, out); + } + } /* at this point, for binning pass, throw away unneeded outputs: */ if (so->binning_pass && (ctx->compiler->gpu_id < 600)) fixup_binning_pass(ctx); + ir3_debug_print(ir, "BEFORE CF"); + + ir3_cf(ir); + ir3_debug_print(ir, "BEFORE CP"); ir3_cp(ir, so); @@ -3300,8 +3569,8 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, */ if (ctx->compiler->gpu_id >= 600 && so->binning_pass && so->type == MESA_SHADER_VERTEX) { - for (int i = 0; i < ir->ninputs; i++) { - struct ir3_instruction *in = ir->inputs[i]; + for (int i = 0; i < ctx->ninputs; i++) { + struct ir3_instruction *in = ctx->inputs[i]; if (!in) continue; @@ -3320,20 +3589,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, } } - /* Insert mov if there's same instruction for each output. - * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow - */ - for (int i = ir->noutputs - 1; i >= 0; i--) { - if (!ir->outputs[i]) - continue; - for (unsigned j = 0; j < i; j++) { - if (ir->outputs[i] == ir->outputs[j]) { - ir->outputs[i] = - ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32); - } - } - } - ir3_debug_print(ir, "BEFORE GROUPING"); ir3_sched_add_deps(ir); @@ -3345,9 +3600,9 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, ir3_debug_print(ir, "AFTER GROUPING"); - ir3_depth(ir, so); + ir3_dce(ir, so); - ir3_debug_print(ir, "AFTER DEPTH"); + ir3_debug_print(ir, "AFTER DCE"); /* do Sethi–Ullman numbering before scheduling: */ ir3_sun(ir); @@ -3358,10 +3613,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, goto out; } - if (compiler->gpu_id >= 600) { - ir3_a6xx_fixup_atomic_dests(ir, so); - } - ir3_debug_print(ir, "AFTER SCHED"); /* Pre-assign VS inputs on a6xx+ binning pass shader, to align @@ -3375,8 +3626,8 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, so->binning_pass; if (pre_assign_inputs) { - for (unsigned i = 0; i < ir->ninputs; i++) { - struct ir3_instruction *instr = ir->inputs[i]; + for (unsigned i = 0; i < ctx->ninputs; i++) { + struct ir3_instruction *instr = ctx->inputs[i]; if (!instr) continue; @@ -3388,7 +3639,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, instr->regs[0]->num = regid; } - ret = ir3_ra(so, ir->inputs, ir->ninputs); + ret = ir3_ra(so, ctx->inputs, ctx->ninputs); } else if (ctx->tcs_header) { /* We need to have these values in the same registers between VS and TCS * since the VS chains to TCS and doesn't get the sysvals redelivered. @@ -3413,8 +3664,8 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, struct ir3_instruction *instr, *precolor[2]; int idx = 0; - foreach_input(instr, ir) { - if (instr->input.sysval != SYSTEM_VALUE_BARYCENTRIC_PIXEL) + foreach_input (instr, ir) { + if (instr->input.sysval != SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL) continue; assert(idx < ARRAY_SIZE(precolor)); @@ -3434,53 +3685,59 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, goto out; } - ir3_debug_print(ir, "AFTER RA"); + ir3_postsched(ctx); + ir3_debug_print(ir, "AFTER POSTSCHED"); + + if (compiler->gpu_id >= 600) { + if (ir3_a6xx_fixup_atomic_dests(ir, so)) { + ir3_debug_print(ir, "AFTER ATOMIC FIXUP"); + } + } if (so->type == MESA_SHADER_FRAGMENT) pack_inlocs(ctx); - /* fixup input/outputs: */ - for (i = 0; i < so->outputs_count; i++) { - /* sometimes we get outputs that don't write the .x coord, like: - * - * decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0) - * - * Presumably the result of varying packing and then eliminating - * some unneeded varyings? Just skip head to the first valid - * component of the output. - */ - for (unsigned j = 0; j < 4; j++) { - struct ir3_instruction *instr = ir->outputs[(i*4) + j]; - if (instr) { - so->outputs[i].regid = instr->regs[0]->num; - so->outputs[i].half = !!(instr->regs[0]->flags & IR3_REG_HALF); - break; - } - } - } + /* + * Fixup inputs/outputs to point to the actual registers assigned: + * + * 1) initialize to r63.x (invalid/unused) + * 2) iterate IR level inputs/outputs and update the variants + * inputs/outputs table based on the assigned registers for + * the remaining inputs/outputs. + */ - /* Note that some or all channels of an input may be unused: */ - for (i = 0; i < so->inputs_count; i++) { - unsigned j, reg = regid(63,0); - bool half = false; - for (j = 0; j < 4; j++) { - struct ir3_instruction *in = inputs[(i*4) + j]; + for (unsigned i = 0; i < so->inputs_count; i++) + so->inputs[i].regid = INVALID_REG; + for (unsigned i = 0; i < so->outputs_count; i++) + so->outputs[i].regid = INVALID_REG; - if (!in) - continue; + struct ir3_instruction *out; + foreach_output (out, ir) { + assert(out->opc == OPC_META_COLLECT); + unsigned outidx = out->collect.outidx; - if (in->flags & IR3_INSTR_UNUSED) - continue; + so->outputs[outidx].regid = out->regs[0]->num; + so->outputs[outidx].half = !!(out->regs[0]->flags & IR3_REG_HALF); + } - reg = in->regs[0]->num - j; - if (half) { - compile_assert(ctx, in->regs[0]->flags & IR3_REG_HALF); - } else { - half = !!(in->regs[0]->flags & IR3_REG_HALF); + struct ir3_instruction *in; + foreach_input (in, ir) { + assert(in->opc == OPC_META_INPUT); + unsigned inidx = in->input.inidx; + + if (pre_assign_inputs && !so->inputs[inidx].sysval) { + if (VALIDREG(so->nonbinning->inputs[inidx].regid)) { + compile_assert(ctx, in->regs[0]->num == + so->nonbinning->inputs[inidx].regid); + compile_assert(ctx, !!(in->regs[0]->flags & IR3_REG_HALF) == + so->nonbinning->inputs[inidx].half); } + so->inputs[inidx].regid = so->nonbinning->inputs[inidx].regid; + so->inputs[inidx].half = so->nonbinning->inputs[inidx].half; + } else { + so->inputs[inidx].regid = in->regs[0]->num; + so->inputs[inidx].half = !!(in->regs[0]->flags & IR3_REG_HALF); } - so->inputs[i].regid = reg; - so->inputs[i].half = half; } if (ctx->astc_srgb) @@ -3489,7 +3746,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, /* We need to do legalize after (for frag shader's) the "bary.f" * offsets (inloc) have been assigned. */ - ir3_legalize(ir, &so->has_ssbo, &so->need_pixlod, &max_bary); + ir3_legalize(ir, so, &max_bary); ir3_debug_print(ir, "AFTER LEGALIZE"); @@ -3498,8 +3755,8 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, */ if (so->type == MESA_SHADER_TESS_CTRL || so->type == MESA_SHADER_GEOMETRY ) { - list_for_each_entry (struct ir3_block, block, &ir->block_list, node) { - list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) { + foreach_block (block, &ir->block_list) { + foreach_instr (instr, &block->instr_list) { instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY; break; } @@ -3517,6 +3774,10 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, /* Collect sampling instructions eligible for pre-dispatch. */ collect_tex_prefetches(ctx, ir); + if (so->type == MESA_SHADER_FRAGMENT && + ctx->s->info.fs.needs_helper_invocations) + so->need_pixlod = true; + out: if (ret) { if (so->ir)