X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Ffreedreno%2Fir3%2Fir3_compiler_nir.c;h=f1ba10456ed78ad6592c982c9f898bdca6cba3ea;hb=e40b11bbcb02dde1a8f989ca6545e22414c6f4ce;hp=afab76ab8c8c491c3c436e66d7e98d503072b809;hpb=91a1354cd6e2cad77799b5ce68927d954bb0213c;p=mesa.git diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index afab76ab8c8..f1ba10456ed 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -51,9 +51,8 @@ create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n, mov = ir3_instr_create(block, OPC_MOV); mov->cat1.src_type = TYPE_U32; mov->cat1.dst_type = TYPE_U32; - ir3_reg_create(mov, 0, 0); - src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV); - src->instr = collect; + __ssa_dst(mov); + src = __ssa_src(mov, collect, IR3_REG_RELATIV); src->size = arrsz; src->array.offset = n; @@ -63,39 +62,33 @@ create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n, } static struct ir3_instruction * -create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask) +create_input(struct ir3_context *ctx, unsigned compmask) { struct ir3_instruction *in; in = ir3_instr_create(ctx->in_block, OPC_META_INPUT); - in->inout.block = ctx->in_block; - ir3_reg_create(in, n, 0); + in->input.sysval = ~0; + __ssa_dst(in)->wrmask = compmask; - in->regs[0]->wrmask = compmask; + array_insert(ctx->ir, ctx->ir->inputs, in); return in; } static struct ir3_instruction * -create_input(struct ir3_context *ctx, unsigned n) -{ - return create_input_compmask(ctx, n, 0x1); -} - -static struct ir3_instruction * -create_frag_input(struct ir3_context *ctx, bool use_ldlv) +create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n) { struct ir3_block *block = ctx->block; struct ir3_instruction *instr; - /* actual inloc is assigned and fixed up later: */ - struct ir3_instruction *inloc = create_immed(block, 0); + /* packed inloc is fixed up later: */ + struct ir3_instruction *inloc = create_immed(block, n); if (use_ldlv) { instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0); instr->cat6.type = TYPE_U32; instr->cat6.iim_val = 1; } else { - instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0); + instr = ir3_BARY_F(block, inloc, 0, ctx->ij_pixel, 0); instr->regs[2]->wrmask = 0x3; } @@ -107,7 +100,8 @@ create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp) { /* first four vec4 sysval's reserved for UBOs: */ /* NOTE: dp is in scalar, but there can be >4 dp components: */ - unsigned n = ctx->so->constbase.driver_param; + struct ir3_const_state *const_state = &ctx->so->shader->const_state; + unsigned n = const_state->offsets.driver_param; unsigned r = regid(n + dp / 4, dp % 4); return create_uniform(ctx->block, r); } @@ -293,6 +287,8 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) unsigned bs[info->num_inputs]; /* bit size */ struct ir3_block *b = ctx->block; unsigned dst_sz, wrmask; + type_t dst_type = nir_dest_bit_size(alu->dest.dest) < 32 ? + TYPE_U16 : TYPE_U32; if (alu->dest.dest.is_ssa) { dst_sz = alu->dest.dest.ssa.num_components; @@ -320,8 +316,8 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]]; if (!src[i]) - src[i] = create_immed(ctx->block, 0); - dst[i] = ir3_MOV(b, src[i], TYPE_U32); + src[i] = create_immed_typed(ctx->block, 0, dst_type); + dst[i] = ir3_MOV(b, src[i], dst_type); } ir3_put_dst(ctx, &alu->dest.dest); @@ -331,14 +327,13 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) /* We also get mov's with more than one component for mov's so * handle those specially: */ - if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) { - type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32; + if (alu->op == nir_op_mov) { nir_alu_src *asrc = &alu->src[0]; struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src); for (unsigned i = 0; i < dst_sz; i++) { if (wrmask & (1 << i)) { - dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type); + dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], dst_type); } else { dst[i] = NULL; } @@ -385,12 +380,24 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) case nir_op_u2u8: dst[0] = create_cov(ctx, src[0], bs[0], alu->op); break; + case nir_op_fquantize2f16: + dst[0] = create_cov(ctx, + create_cov(ctx, src[0], 32, nir_op_f2f16), + 16, nir_op_f2f32); + break; + case nir_op_f2b16: { + struct ir3_instruction *zero = create_immed_typed(b, 0, TYPE_F16); + dst[0] = ir3_CMPS_F(b, src[0], 0, zero, 0); + dst[0]->cat2.condition = IR3_COND_NE; + break; + } case nir_op_f2b32: dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); break; case nir_op_b2f16: + dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16); + break; case nir_op_b2f32: dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32); break; @@ -399,10 +406,15 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) case nir_op_b2i32: dst[0] = ir3_b2n(b, src[0]); break; + case nir_op_i2b16: { + struct ir3_instruction *zero = create_immed_typed(b, 0, TYPE_S16); + dst[0] = ir3_CMPS_S(b, src[0], 0, zero, 0); + dst[0]->cat2.condition = IR3_COND_NE; + break; + } case nir_op_i2b32: dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); break; case nir_op_fneg: @@ -429,7 +441,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) (list_length(&alu->src[0].src.ssa->uses) == 1) && ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) { src[0]->flags |= IR3_INSTR_SAT; - dst[0] = ir3_MOV(b, src[0], TYPE_U32); + dst[0] = ir3_MOV(b, src[0], dst_type); } else { /* otherwise generate a max.f that saturates.. blob does * similar (generating a cat2 mov using max.f) @@ -451,33 +463,35 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0); break; case nir_op_fddx: + case nir_op_fddx_coarse: dst[0] = ir3_DSX(b, src[0], 0); dst[0]->cat5.type = TYPE_F32; break; case nir_op_fddy: + case nir_op_fddy_coarse: dst[0] = ir3_DSY(b, src[0], 0); dst[0]->cat5.type = TYPE_F32; break; break; + case nir_op_flt16: case nir_op_flt32: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_LT; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_fge16: case nir_op_fge32: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_GE; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_feq16: case nir_op_feq32: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_EQ; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_fne16: case nir_op_fne32: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); break; case nir_op_fceil: dst[0] = ir3_CEIL_F(b, src[0], 0); @@ -538,16 +552,17 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) case nir_op_umin: dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0); break; - case nir_op_imul: - /* - * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16) - * mull.u tmp0, a, b ; mul low, i.e. al * bl - * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix, i.e. ah * bl << 16 - * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16 - */ - dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0, - ir3_MADSH_M16(b, src[0], 0, src[1], 0, - ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0); + case nir_op_umul_low: + dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0); + break; + case nir_op_imadsh_mix16: + dst[0] = ir3_MADSH_M16(b, src[0], 0, src[1], 0, src[2], 0); + break; + case nir_op_imad24_ir3: + dst[0] = ir3_MAD_S24(b, src[0], 0, src[1], 0, src[2], 0); + break; + case nir_op_imul24: + dst[0] = ir3_MUL_S24(b, src[0], 0, src[1], 0); break; case nir_op_ineg: dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG); @@ -573,47 +588,57 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) case nir_op_ushr: dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0); break; + case nir_op_ilt16: case nir_op_ilt32: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_LT; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_ige16: case nir_op_ige32: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_GE; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_ieq16: case nir_op_ieq32: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_EQ; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_ine16: case nir_op_ine32: dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_NE; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_ult16: case nir_op_ult32: dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_LT; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_uge16: case nir_op_uge32: dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_GE; - dst[0] = ir3_n2b(b, dst[0]); break; + case nir_op_b16csel: case nir_op_b32csel: { struct ir3_instruction *cond = ir3_b2n(b, src[0]); + + if ((src[0]->regs[0]->flags & IR3_REG_HALF)) + cond->regs[0]->flags |= IR3_REG_HALF; + compile_assert(ctx, bs[1] == bs[2]); - /* the boolean condition is 32b even if src[1] and src[2] are - * half-precision, but sel.b16 wants all three src's to be the - * same type. + /* Make sure the boolean condition has the same bit size as the other + * two arguments, adding a conversion if necessary. */ - if (bs[1] < 32) + if (bs[1] < bs[0]) cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16); - dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0); + else if (bs[1] > bs[0]) + cond = ir3_COV(b, cond, TYPE_U16, TYPE_U32); + + if (bs[1] > 16) + dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0); + else + dst[0] = ir3_SEL_B16(b, src[1], 0, cond, 0, src[2], 0); break; } case nir_op_bit_count: { @@ -670,6 +695,21 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) break; } + if (nir_alu_type_get_base_type(info->output_type) == nir_type_bool) { + assert(dst_sz == 1); + + if (nir_dest_bit_size(alu->dest.dest) < 32) + dst[0]->regs[0]->flags |= IR3_REG_HALF; + + dst[0] = ir3_n2b(b, dst[0]); + } + + if (nir_dest_bit_size(alu->dest.dest) < 32) { + for (unsigned i = 0; i < dst_sz; i++) { + dst[i]->regs[0]->flags |= IR3_REG_HALF; + } + } + ir3_put_dst(ctx, &alu->dest.dest); } @@ -680,12 +720,12 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr, { struct ir3_block *b = ctx->block; struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1; - nir_const_value *const_offset; /* UBO addresses are the first driver params, but subtract 2 here to * account for nir_lower_uniforms_to_ubo rebasing the UBOs such that UBO 0 * is the uniforms: */ - unsigned ubo = regid(ctx->so->constbase.ubo, 0) - 2; - const unsigned ptrsz = ir3_pointer_size(ctx); + struct ir3_const_state *const_state = &ctx->so->shader->const_state; + unsigned ubo = regid(const_state->offsets.ubo, 0) - 2; + const unsigned ptrsz = ir3_pointer_size(ctx->compiler); int off = 0; @@ -698,14 +738,20 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr, } else { base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, ptrsz)); base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, ptrsz)); + + /* NOTE: since relative addressing is used, make sure constlen is + * at least big enough to cover all the UBO addresses, since the + * assembler won't know what the max address reg is. + */ + ctx->so->constlen = MAX2(ctx->so->constlen, + const_state->offsets.ubo + (ctx->s->info.num_ubos * ptrsz)); } /* note: on 32bit gpu's base_hi is ignored and DCE'd */ addr = base_lo; - const_offset = nir_src_as_const_value(intr->src[1]); - if (const_offset) { - off += const_offset->u32[0]; + if (nir_src_is_const(intr->src[1])) { + off += nir_src_as_uint(intr->src[1]); } else { /* For load_ubo_indirect, second src is indirect offset: */ src1 = ir3_get_src(ctx, &intr->src[1])[0]; @@ -740,9 +786,9 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr, for (int i = 0; i < intr->num_components; i++) { struct ir3_instruction *load = - ir3_LDG(b, addr, 0, create_immed(b, 1), 0); + ir3_LDG(b, addr, 0, create_immed(b, 1), 0, /* num components */ + create_immed(b, off + i * 4), 0); load->cat6.type = TYPE_U32; - load->cat6.src_offset = off + i * 4; /* byte offset */ dst[i] = load; } } @@ -753,11 +799,12 @@ emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, struct ir3_instruction **dst) { /* SSBO size stored as a const starting at ssbo_sizes: */ - unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0]; - unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) + - ctx->so->const_layout.ssbo_size.off[blk_idx]; + struct ir3_const_state *const_state = &ctx->so->shader->const_state; + unsigned blk_idx = nir_src_as_uint(intr->src[0]); + unsigned idx = regid(const_state->offsets.ssbo_sizes, 0) + + const_state->ssbo_size.off[blk_idx]; - debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx)); + debug_assert(const_state->ssbo_size.mask & (1 << blk_idx)); dst[0] = create_uniform(ctx->block, idx); } @@ -774,8 +821,10 @@ emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr, offset = ir3_get_src(ctx, &intr->src[0])[0]; base = nir_intrinsic_base(intr); - ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0); - ldl->cat6.src_offset = base; + ldl = ir3_LDL(b, offset, 0, + create_immed(b, intr->num_components), 0, + create_immed(b, base), 0); + ldl->cat6.type = utype_dst(intr->dest); ldl->regs[0]->wrmask = MASK(intr->num_components); @@ -828,6 +877,75 @@ emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr) } } +/* src[] = { offset }. const_index[] = { base } */ +static void +emit_intrinsic_load_shared_ir3(struct ir3_context *ctx, nir_intrinsic_instr *intr, + struct ir3_instruction **dst) +{ + struct ir3_block *b = ctx->block; + struct ir3_instruction *load, *offset; + unsigned base; + + offset = ir3_get_src(ctx, &intr->src[0])[0]; + base = nir_intrinsic_base(intr); + + load = ir3_LDLW(b, offset, 0, + create_immed(b, intr->num_components), 0, + create_immed(b, base), 0); + + load->cat6.type = utype_dst(intr->dest); + load->regs[0]->wrmask = MASK(intr->num_components); + + load->barrier_class = IR3_BARRIER_SHARED_R; + load->barrier_conflict = IR3_BARRIER_SHARED_W; + + ir3_split_dest(b, dst, load, 0, intr->num_components); +} + +/* src[] = { value, offset }. const_index[] = { base, write_mask } */ +static void +emit_intrinsic_store_shared_ir3(struct ir3_context *ctx, nir_intrinsic_instr *intr) +{ + struct ir3_block *b = ctx->block; + struct ir3_instruction *store, *offset; + struct ir3_instruction * const *value; + unsigned base, wrmask; + + value = ir3_get_src(ctx, &intr->src[0]); + offset = ir3_get_src(ctx, &intr->src[1])[0]; + + base = nir_intrinsic_base(intr); + wrmask = nir_intrinsic_write_mask(intr); + + /* Combine groups of consecutive enabled channels in one write + * message. We use ffs to find the first enabled channel and then ffs on + * the bit-inverse, down-shifted writemask to determine the length of + * the block of enabled bits. + * + * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic()) + */ + while (wrmask) { + unsigned first_component = ffs(wrmask) - 1; + unsigned length = ffs(~(wrmask >> first_component)) - 1; + + store = ir3_STLW(b, offset, 0, + ir3_create_collect(ctx, &value[first_component], length), 0, + create_immed(b, length), 0); + + store->cat6.dst_offset = first_component + base; + store->cat6.type = utype_src(intr->src[0]); + store->barrier_class = IR3_BARRIER_SHARED_W; + store->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W; + + array_insert(b, b->keeps, store); + + /* Clear the bits in the writemask that we just wrote, then try + * again to see if more channels are left. + */ + wrmask &= (15 << (first_component + length)); + } +} + /* * CS shared variable atomic intrinsics * @@ -973,9 +1091,11 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr); struct ir3_instruction *sam, *lod; unsigned flags, ncoords = ir3_get_image_coords(var, &flags); + type_t dst_type = nir_dest_bit_size(intr->dest) < 32 ? + TYPE_U16 : TYPE_U32; lod = create_immed(b, 0); - sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags, + sam = ir3_SAM(b, OPC_GETSIZE, dst_type, 0b1111, flags, samp_tex, lod, NULL); /* Array size actually ends up in .w rather than .z. This doesn't @@ -1008,8 +1128,9 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, * bytes-per-pixel should have been emitted in 2nd slot of * image_dims. See ir3_shader::emit_image_dims(). */ - unsigned cb = regid(ctx->so->constbase.image_dims, 0) + - ctx->so->const_layout.image_dims.off[var->data.driver_location]; + struct ir3_const_state *const_state = &ctx->so->shader->const_state; + unsigned cb = regid(const_state->offsets.image_dims, 0) + + const_state->image_dims.off[var->data.driver_location]; struct ir3_instruction *aux = create_uniform(b, cb + 1); tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0); @@ -1034,7 +1155,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr) struct ir3_instruction *barrier; switch (intr->intrinsic) { - case nir_intrinsic_barrier: + case nir_intrinsic_control_barrier: barrier = ir3_BAR(b); barrier->cat7.g = true; barrier->cat7.l = true; @@ -1046,6 +1167,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr) barrier->cat7.g = true; barrier->cat7.r = true; barrier->cat7.w = true; + barrier->cat7.l = true; barrier->barrier_class = IR3_BARRIER_IMAGE_W | IR3_BARRIER_BUFFER_W; barrier->barrier_conflict = @@ -1109,24 +1231,103 @@ static void add_sysval_input_compmask(struct ir3_context *ctx, struct ir3_instruction *instr) { struct ir3_shader_variant *so = ctx->so; - unsigned r = regid(so->inputs_count, 0); unsigned n = so->inputs_count++; + assert(instr->opc == OPC_META_INPUT); + instr->input.inidx = n; + instr->input.sysval = slot; + so->inputs[n].sysval = true; so->inputs[n].slot = slot; so->inputs[n].compmask = compmask; - so->inputs[n].regid = r; so->inputs[n].interpolate = INTERP_MODE_FLAT; so->total_in++; +} - ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1); - ctx->ir->inputs[r] = instr; +static struct ir3_instruction * +create_sysval_input(struct ir3_context *ctx, gl_system_value slot, + unsigned compmask) +{ + assert(compmask); + struct ir3_instruction *sysval = create_input(ctx, compmask); + add_sysval_input_compmask(ctx, slot, compmask, sysval); + return sysval; } -static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot, - struct ir3_instruction *instr) +static struct ir3_instruction * +get_barycentric_centroid(struct ir3_context *ctx) +{ + if (!ctx->ij_centroid) { + struct ir3_instruction *xy[2]; + struct ir3_instruction *ij; + + ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_CENTROID, 0x3); + ir3_split_dest(ctx->block, xy, ij, 0, 2); + + ctx->ij_centroid = ir3_create_collect(ctx, xy, 2); + } + + return ctx->ij_centroid; +} + +static struct ir3_instruction * +get_barycentric_sample(struct ir3_context *ctx) { - add_sysval_input_compmask(ctx, slot, 0x1, instr); + if (!ctx->ij_sample) { + struct ir3_instruction *xy[2]; + struct ir3_instruction *ij; + + ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SAMPLE, 0x3); + ir3_split_dest(ctx->block, xy, ij, 0, 2); + + ctx->ij_sample = ir3_create_collect(ctx, xy, 2); + } + + return ctx->ij_sample; +} + +static struct ir3_instruction * +get_barycentric_pixel(struct ir3_context *ctx) +{ + /* TODO when tgsi_to_nir supports "new-style" FS inputs switch + * this to create ij_pixel only on demand: + */ + return ctx->ij_pixel; +} + +static struct ir3_instruction * +get_frag_coord(struct ir3_context *ctx) +{ + if (!ctx->frag_coord) { + struct ir3_block *b = ctx->block; + struct ir3_instruction *xyzw[4]; + struct ir3_instruction *hw_frag_coord; + + hw_frag_coord = create_sysval_input(ctx, SYSTEM_VALUE_FRAG_COORD, 0xf); + ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4); + + /* for frag_coord.xy, we get unsigned values.. we need + * to subtract (integer) 8 and divide by 16 (right- + * shift by 4) then convert to float: + * + * sub.s tmp, src, 8 + * shr.b tmp, tmp, 4 + * mov.u32f32 dst, tmp + * + */ + for (int i = 0; i < 2; i++) { + xyzw[i] = ir3_SUB_S(b, xyzw[i], 0, + create_immed(b, 8), 0); + xyzw[i] = ir3_SHR_B(b, xyzw[i], 0, + create_immed(b, 4), 0); + xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32); + } + + ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4); + ctx->so->frag_coord = true; + } + + return ctx->frag_coord; } static void @@ -1136,7 +1337,6 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) struct ir3_instruction **dst; struct ir3_instruction * const *src; struct ir3_block *b = ctx->block; - nir_const_value *const_offset; int idx, comp; if (info->has_dest) { @@ -1146,14 +1346,17 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) dst = NULL; } + const unsigned primitive_param = ctx->so->shader->const_state.offsets.primitive_param * 4; + const unsigned primitive_map = ctx->so->shader->const_state.offsets.primitive_map * 4; + switch (intr->intrinsic) { case nir_intrinsic_load_uniform: idx = nir_intrinsic_base(intr); - const_offset = nir_src_as_const_value(intr->src[0]); - if (const_offset) { - idx += const_offset->u32[0]; + if (nir_src_is_const(intr->src[0])) { + idx += nir_src_as_uint(intr->src[0]); for (int i = 0; i < intr->num_components; i++) { - dst[i] = create_uniform(b, idx + i); + dst[i] = create_uniform_typed(b, idx + i, + nir_dest_bit_size(intr->dest) < 32 ? TYPE_F16 : TYPE_F32); } } else { src = ir3_get_src(ctx, &intr->src[0]); @@ -1166,30 +1369,198 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) * since we don't know in the assembler what the max * addr reg value can be: */ - ctx->so->constlen = ctx->s->num_uniforms; + ctx->so->constlen = MAX2(ctx->so->constlen, + ctx->so->shader->ubo_state.size / 16); + } + break; + + case nir_intrinsic_load_vs_primitive_stride_ir3: + dst[0] = create_uniform(b, primitive_param + 0); + break; + case nir_intrinsic_load_vs_vertex_stride_ir3: + dst[0] = create_uniform(b, primitive_param + 1); + break; + case nir_intrinsic_load_hs_patch_stride_ir3: + dst[0] = create_uniform(b, primitive_param + 2); + break; + case nir_intrinsic_load_patch_vertices_in: + dst[0] = create_uniform(b, primitive_param + 3); + break; + case nir_intrinsic_load_tess_param_base_ir3: + dst[0] = create_uniform(b, primitive_param + 4); + dst[1] = create_uniform(b, primitive_param + 5); + break; + case nir_intrinsic_load_tess_factor_base_ir3: + dst[0] = create_uniform(b, primitive_param + 6); + dst[1] = create_uniform(b, primitive_param + 7); + break; + + case nir_intrinsic_load_primitive_location_ir3: + idx = nir_intrinsic_driver_location(intr); + dst[0] = create_uniform(b, primitive_map + idx); + break; + + case nir_intrinsic_load_gs_header_ir3: + dst[0] = ctx->gs_header; + break; + case nir_intrinsic_load_tcs_header_ir3: + dst[0] = ctx->tcs_header; + break; + + case nir_intrinsic_load_primitive_id: + dst[0] = ctx->primitive_id; + break; + + case nir_intrinsic_load_tess_coord: + if (!ctx->tess_coord) { + ctx->tess_coord = + create_sysval_input(ctx, SYSTEM_VALUE_TESS_COORD, 0x3); } + ir3_split_dest(b, dst, ctx->tess_coord, 0, 2); + + /* Unused, but ir3_put_dst() below wants to free something */ + dst[2] = create_immed(b, 0); break; + + case nir_intrinsic_end_patch_ir3: + assert(ctx->so->type == MESA_SHADER_TESS_CTRL); + struct ir3_instruction *end = ir3_ENDPATCH(b); + array_insert(b, b->keeps, end); + + end->barrier_class = IR3_BARRIER_EVERYTHING; + end->barrier_conflict = IR3_BARRIER_EVERYTHING; + break; + + case nir_intrinsic_store_global_ir3: { + struct ir3_instruction *value, *addr, *offset; + + addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + ir3_get_src(ctx, &intr->src[1])[0], + ir3_get_src(ctx, &intr->src[1])[1] + }, 2); + + offset = ir3_get_src(ctx, &intr->src[2])[0]; + + value = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), + intr->num_components); + + struct ir3_instruction *stg = + ir3_STG_G(ctx->block, addr, 0, value, 0, + create_immed(ctx->block, intr->num_components), 0, offset, 0); + stg->cat6.type = TYPE_U32; + stg->cat6.iim_val = 1; + + array_insert(b, b->keeps, stg); + + stg->barrier_class = IR3_BARRIER_BUFFER_W; + stg->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W; + break; + } + + case nir_intrinsic_load_global_ir3: { + struct ir3_instruction *addr, *offset; + + addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + ir3_get_src(ctx, &intr->src[0])[0], + ir3_get_src(ctx, &intr->src[0])[1] + }, 2); + + offset = ir3_get_src(ctx, &intr->src[1])[0]; + + struct ir3_instruction *load = + ir3_LDG(b, addr, 0, create_immed(ctx->block, intr->num_components), + 0, offset, 0); + load->cat6.type = TYPE_U32; + load->regs[0]->wrmask = MASK(intr->num_components); + + load->barrier_class = IR3_BARRIER_BUFFER_R; + load->barrier_conflict = IR3_BARRIER_BUFFER_W; + + ir3_split_dest(b, dst, load, 0, intr->num_components); + break; + } + case nir_intrinsic_load_ubo: emit_intrinsic_load_ubo(ctx, intr, dst); break; + case nir_intrinsic_load_frag_coord: + ir3_split_dest(b, dst, get_frag_coord(ctx), 0, 4); + break; + case nir_intrinsic_load_sample_pos_from_id: { + /* NOTE: blob seems to always use TYPE_F16 and then cov.f16f32, + * but that doesn't seem necessary. + */ + struct ir3_instruction *offset = + ir3_RGETPOS(b, ir3_get_src(ctx, &intr->src[0])[0], 0); + offset->regs[0]->wrmask = 0x3; + offset->cat5.type = TYPE_F32; + + ir3_split_dest(b, dst, offset, 0, 2); + + break; + } + case nir_intrinsic_load_size_ir3: + if (!ctx->ij_size) { + ctx->ij_size = + create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE, 0x1); + } + dst[0] = ctx->ij_size; + break; + case nir_intrinsic_load_barycentric_centroid: + ir3_split_dest(b, dst, get_barycentric_centroid(ctx), 0, 2); + break; + case nir_intrinsic_load_barycentric_sample: + if (ctx->so->key.msaa) { + ir3_split_dest(b, dst, get_barycentric_sample(ctx), 0, 2); + } else { + ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2); + } + break; + case nir_intrinsic_load_barycentric_pixel: + ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2); + break; + case nir_intrinsic_load_interpolated_input: + idx = nir_intrinsic_base(intr); + comp = nir_intrinsic_component(intr); + src = ir3_get_src(ctx, &intr->src[0]); + if (nir_src_is_const(intr->src[1])) { + struct ir3_instruction *coord = ir3_create_collect(ctx, src, 2); + idx += nir_src_as_uint(intr->src[1]); + for (int i = 0; i < intr->num_components; i++) { + unsigned inloc = idx * 4 + i + comp; + if (ctx->so->inputs[idx].bary && + !ctx->so->inputs[idx].use_ldlv) { + dst[i] = ir3_BARY_F(b, create_immed(b, inloc), 0, coord, 0); + } else { + /* for non-varyings use the pre-setup input, since + * that is easier than mapping things back to a + * nir_variable to figure out what it is. + */ + dst[i] = ctx->inputs[inloc]; + } + } + } else { + ir3_context_error(ctx, "unhandled"); + } + break; case nir_intrinsic_load_input: idx = nir_intrinsic_base(intr); comp = nir_intrinsic_component(intr); - const_offset = nir_src_as_const_value(intr->src[0]); - if (const_offset) { - idx += const_offset->u32[0]; + if (nir_src_is_const(intr->src[0])) { + idx += nir_src_as_uint(intr->src[0]); for (int i = 0; i < intr->num_components; i++) { unsigned n = idx * 4 + i + comp; - dst[i] = ctx->ir->inputs[n]; + dst[i] = ctx->inputs[n]; + compile_assert(ctx, ctx->inputs[n]); } } else { src = ir3_get_src(ctx, &intr->src[0]); struct ir3_instruction *collect = - ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs); + ir3_create_collect(ctx, ctx->ir->inputs, ctx->ninputs); struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4); for (int i = 0; i < intr->num_components; i++) { unsigned n = idx * 4 + i + comp; - dst[i] = create_indirect_load(ctx, ctx->ir->ninputs, + dst[i] = create_indirect_load(ctx, ctx->ninputs, n, addr, collect); } } @@ -1256,8 +1627,10 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) emit_intrinsic_image_size(ctx, intr, dst); break; case nir_intrinsic_image_deref_atomic_add: - case nir_intrinsic_image_deref_atomic_min: - case nir_intrinsic_image_deref_atomic_max: + case nir_intrinsic_image_deref_atomic_imin: + case nir_intrinsic_image_deref_atomic_umin: + case nir_intrinsic_image_deref_atomic_imax: + case nir_intrinsic_image_deref_atomic_umax: case nir_intrinsic_image_deref_atomic_and: case nir_intrinsic_image_deref_atomic_or: case nir_intrinsic_image_deref_atomic_xor: @@ -1268,7 +1641,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) ctx->so->no_earlyz = true; dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr); break; - case nir_intrinsic_barrier: + case nir_intrinsic_control_barrier: case nir_intrinsic_memory_barrier: case nir_intrinsic_group_memory_barrier: case nir_intrinsic_memory_barrier_atomic_counter: @@ -1282,57 +1655,56 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_store_output: idx = nir_intrinsic_base(intr); comp = nir_intrinsic_component(intr); - const_offset = nir_src_as_const_value(intr->src[1]); - compile_assert(ctx, const_offset != NULL); - idx += const_offset->u32[0]; + compile_assert(ctx, nir_src_is_const(intr->src[1])); + idx += nir_src_as_uint(intr->src[1]); src = ir3_get_src(ctx, &intr->src[0]); for (int i = 0; i < intr->num_components; i++) { unsigned n = idx * 4 + i + comp; - ctx->ir->outputs[n] = src[i]; + ctx->outputs[n] = src[i]; } break; case nir_intrinsic_load_base_vertex: case nir_intrinsic_load_first_vertex: if (!ctx->basevertex) { ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE); - add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex); } dst[0] = ctx->basevertex; break; + case nir_intrinsic_load_base_instance: + if (!ctx->base_instance) { + ctx->base_instance = create_driver_param(ctx, IR3_DP_INSTID_BASE); + } + dst[0] = ctx->base_instance; + break; case nir_intrinsic_load_vertex_id_zero_base: case nir_intrinsic_load_vertex_id: if (!ctx->vertex_id) { gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ? SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE; - ctx->vertex_id = create_input(ctx, 0); - add_sysval_input(ctx, sv, ctx->vertex_id); + ctx->vertex_id = create_sysval_input(ctx, sv, 0x1); } dst[0] = ctx->vertex_id; break; case nir_intrinsic_load_instance_id: if (!ctx->instance_id) { - ctx->instance_id = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID, - ctx->instance_id); + ctx->instance_id = create_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID, 0x1); } dst[0] = ctx->instance_id; break; case nir_intrinsic_load_sample_id: + ctx->so->per_samp = true; + /* fall-thru */ case nir_intrinsic_load_sample_id_no_per_sample: if (!ctx->samp_id) { - ctx->samp_id = create_input(ctx, 0); + ctx->samp_id = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID, 0x1); ctx->samp_id->regs[0]->flags |= IR3_REG_HALF; - add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID, - ctx->samp_id); } dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32); break; case nir_intrinsic_load_sample_mask_in: if (!ctx->samp_mask_in) { - ctx->samp_mask_in = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN, - ctx->samp_mask_in); + ctx->samp_mask_in = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN, 0x1); } dst[0] = ctx->samp_mask_in; break; @@ -1346,8 +1718,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) case nir_intrinsic_load_front_face: if (!ctx->frag_face) { ctx->so->frag_face = true; - ctx->frag_face = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face); + ctx->frag_face = create_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, 0x1); ctx->frag_face->regs[0]->flags |= IR3_REG_HALF; } /* for fragface, we get -1 for back and 0 for front. However this is @@ -1358,17 +1729,15 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) break; case nir_intrinsic_load_local_invocation_id: if (!ctx->local_invocation_id) { - ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7); - add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID, - 0x7, ctx->local_invocation_id); + ctx->local_invocation_id = + create_sysval_input(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID, 0x7); } ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3); break; case nir_intrinsic_load_work_group_id: if (!ctx->work_group_id) { - ctx->work_group_id = create_input_compmask(ctx, 0, 0x7); - add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID, - 0x7, ctx->work_group_id); + ctx->work_group_id = + create_sysval_input(ctx, SYSTEM_VALUE_WORK_GROUP_ID, 0x7); ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH; } ir3_split_dest(b, dst, ctx->work_group_id, 0, 3); @@ -1402,6 +1771,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) /* condition always goes in predicate register: */ cond->regs[0]->num = regid(REG_P0, 0); + cond->regs[0]->flags &= ~IR3_REG_SSA; kill = ir3_KILL(b, cond, 0); array_insert(ctx->ir, ctx->ir->predicates, kill); @@ -1411,6 +1781,36 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr) break; } + + case nir_intrinsic_cond_end_ir3: { + struct ir3_instruction *cond, *kill; + + src = ir3_get_src(ctx, &intr->src[0]); + cond = ir3_b2n(b, src[0]); + + /* NOTE: only cmps.*.* can write p0.x: */ + cond = ir3_CMPS_S(b, cond, 0, create_immed(b, 0), 0); + cond->cat2.condition = IR3_COND_NE; + + /* condition always goes in predicate register: */ + cond->regs[0]->num = regid(REG_P0, 0); + + kill = ir3_CONDEND(b, cond, 0); + + kill->barrier_class = IR3_BARRIER_EVERYTHING; + kill->barrier_conflict = IR3_BARRIER_EVERYTHING; + + array_insert(ctx->ir, ctx->ir->predicates, kill); + array_insert(b, b->keeps, kill); + break; + } + + case nir_intrinsic_load_shared_ir3: + emit_intrinsic_load_shared_ir3(ctx, intr, dst); + break; + case nir_intrinsic_store_shared_ir3: + emit_intrinsic_store_shared_ir3(ctx, intr); + break; default: ir3_context_error(ctx, "Unhandled intrinsic type: %s\n", nir_intrinsic_infos[intr->intrinsic].name); @@ -1426,10 +1826,19 @@ emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr) { struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def, instr->def.num_components); - type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32; - for (int i = 0; i < instr->def.num_components; i++) - dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type); + if (instr->def.bit_size < 32) { + for (int i = 0; i < instr->def.num_components; i++) + dst[i] = create_immed_typed(ctx->block, + instr->value[i].u16, + TYPE_U16); + } else { + for (int i = 0; i < instr->def.num_components; i++) + dst[i] = create_immed_typed(ctx->block, + instr->value[i].u32, + TYPE_U32); + } + } static void @@ -1450,6 +1859,30 @@ emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef) * texture fetch/sample instructions: */ +static type_t +get_tex_dest_type(nir_tex_instr *tex) +{ + type_t type; + + switch (nir_alu_type_get_base_type(tex->dest_type)) { + case nir_type_invalid: + case nir_type_float: + type = nir_dest_bit_size(tex->dest) < 32 ? TYPE_F16 : TYPE_F32; + break; + case nir_type_int: + type = nir_dest_bit_size(tex->dest) < 32 ? TYPE_S16 : TYPE_S32; + break; + case nir_type_uint: + case nir_type_bool: + type = nir_dest_bit_size(tex->dest) < 32 ? TYPE_U16 : TYPE_U32; + break; + default: + unreachable("bad dest_type"); + } + + return type; +} + static void tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp) { @@ -1468,6 +1901,8 @@ tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp) case GLSL_SAMPLER_DIM_RECT: case GLSL_SAMPLER_DIM_EXTERNAL: case GLSL_SAMPLER_DIM_MS: + case GLSL_SAMPLER_DIM_SUBPASS: + case GLSL_SAMPLER_DIM_SUBPASS_MS: coords = 2; break; case GLSL_SAMPLER_DIM_3D: @@ -1591,6 +2026,24 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) } switch (tex->op) { + case nir_texop_tex_prefetch: + compile_assert(ctx, !has_bias); + compile_assert(ctx, !has_lod); + compile_assert(ctx, !compare); + compile_assert(ctx, !has_proj); + compile_assert(ctx, !has_off); + compile_assert(ctx, !ddx); + compile_assert(ctx, !ddy); + compile_assert(ctx, !sample_index); + compile_assert(ctx, nir_tex_instr_src_index(tex, nir_tex_src_texture_offset) < 0); + compile_assert(ctx, nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset) < 0); + + if (ctx->so->num_sampler_prefetch < IR3_MAX_SAMPLER_PREFETCH) { + opc = OPC_META_TEX_PREFETCH; + ctx->so->num_sampler_prefetch++; + break; + } + /* fallthru */ case nir_texop_tex: opc = has_lod ? OPC_SAML : OPC_SAM; break; case nir_texop_txb: opc = OPC_SAMB; break; case nir_texop_txl: opc = OPC_SAML; break; @@ -1609,12 +2062,9 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) case 3: opc = OPC_GATHER4A; break; } break; + case nir_texop_txf_ms_fb: case nir_texop_txf_ms: opc = OPC_ISAMM; break; - case nir_texop_txs: - case nir_texop_query_levels: - case nir_texop_texture_samples: - case nir_texop_samples_identical: - case nir_texop_txf_ms_mcs: + default: ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op); return; } @@ -1691,7 +2141,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) /* NOTE a3xx (and possibly a4xx?) might be different, using isaml * with scaled x coord according to requested sample: */ - if (tex->op == nir_texop_txf_ms) { + if (opc == OPC_ISAMM) { if (ctx->compiler->txf_ms_with_isaml) { /* the samples are laid out in x dimension as * 0 1 2 3 @@ -1731,33 +2181,51 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) src1[nsrc1++] = lod; } - switch (tex->dest_type) { - case nir_type_invalid: - case nir_type_float: - type = TYPE_F32; - break; - case nir_type_int: - type = TYPE_S32; - break; - case nir_type_uint: - case nir_type_bool: - type = TYPE_U32; - break; - default: - unreachable("bad dest_type"); - } + type = get_tex_dest_type(tex); if (opc == OPC_GETLOD) - type = TYPE_U32; + type = TYPE_S32; + + struct ir3_instruction *samp_tex; + + if (tex->op == nir_texop_txf_ms_fb) { + /* only expect a single txf_ms_fb per shader: */ + compile_assert(ctx, !ctx->so->fb_read); + compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT); + + ctx->so->fb_read = true; + samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){ + create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16), + create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16), + }, 2); + + ctx->so->num_samp++; + } else { + samp_tex = get_tex_samp_tex_src(ctx, tex); + } - struct ir3_instruction *samp_tex = get_tex_samp_tex_src(ctx, tex); struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0); struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1); - sam = ir3_SAM(b, opc, type, MASK(ncomp), flags, - samp_tex, col0, col1); + if (opc == OPC_META_TEX_PREFETCH) { + int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord); + + compile_assert(ctx, tex->src[idx].src.is_ssa); + + sam = ir3_META_TEX_PREFETCH(b); + __ssa_dst(sam)->wrmask = MASK(ncomp); /* dst */ + sam->prefetch.input_offset = + ir3_nir_coord_offset(tex->src[idx].src.ssa); + sam->prefetch.tex = tex->texture_index; + sam->prefetch.samp = tex->sampler_index; + } else { + sam = ir3_SAM(b, opc, type, MASK(ncomp), flags, + samp_tex, col0, col1); + } if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) { + assert(opc != OPC_META_TEX_PREFETCH); + /* only need first 3 components: */ sam->regs[0]->wrmask = 0x7; ir3_split_dest(b, dst, sam, 0, 3); @@ -1783,7 +2251,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) compile_assert(ctx, tex->dest_type == nir_type_float); for (i = 0; i < 2; i++) { - dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0, + dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_S32, TYPE_F32), 0, factor, 0); } } @@ -1792,20 +2260,22 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) } static void -emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex) +emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx) { struct ir3_block *b = ctx->block; struct ir3_instruction **dst, *sam; + type_t dst_type = get_tex_dest_type(tex); dst = ir3_get_dst(ctx, &tex->dest, 1); - sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0, + sam = ir3_SAM(b, OPC_GETINFO, dst_type, 1 << idx, 0, get_tex_samp_tex_src(ctx, tex), NULL, NULL); /* even though there is only one component, since it ends - * up in .z rather than .x, we need a split_dest() + * up in .y/.z/.w rather than .x, we need a split_dest() */ - ir3_split_dest(b, dst, sam, 0, 3); + if (idx) + ir3_split_dest(b, dst, sam, 0, idx + 1); /* The # of levels comes from getinfo.z. We need to add 1 to it, since * the value in TEX_CONST_0 is zero-based. @@ -1823,6 +2293,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex) struct ir3_instruction **dst, *sam; struct ir3_instruction *lod; unsigned flags, coords; + type_t dst_type = get_tex_dest_type(tex); tex_info(tex, &flags, &coords); @@ -1839,7 +2310,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex) lod = ir3_get_src(ctx, &tex->src[0].src)[0]; - sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags, + sam = ir3_SAM(b, OPC_GETSIZE, dst_type, 0b1111, flags, get_tex_samp_tex_src(ctx, tex), lod, NULL); ir3_split_dest(b, dst, sam, 0, 4); @@ -1906,7 +2377,10 @@ emit_instr(struct ir3_context *ctx, nir_instr *instr) emit_tex_txs(ctx, tex); break; case nir_texop_query_levels: - emit_tex_query_levels(ctx, tex); + emit_tex_info(ctx, tex, 2); + break; + case nir_texop_texture_samples: + emit_tex_info(ctx, tex, 3); break; default: emit_tex(ctx, tex); @@ -1933,7 +2407,6 @@ get_block(struct ir3_context *ctx, const nir_block *nblock) { struct ir3_block *block; struct hash_entry *hentry; - unsigned i; hentry = _mesa_hash_table_search(ctx->block_ht, nblock); if (hentry) @@ -1943,12 +2416,9 @@ get_block(struct ir3_context *ctx, const nir_block *nblock) block->nblock = nblock; _mesa_hash_table_insert(ctx->block_ht, nblock, block); - block->predecessors_count = nblock->predecessors->entries; - block->predecessors = ralloc_array_size(block, - sizeof(block->predecessors[0]), block->predecessors_count); - i = 0; + block->predecessors = _mesa_pointer_set_create(block); set_foreach(nblock->predecessors, sentry) { - block->predecessors[i++] = get_block(ctx, sentry->key); + _mesa_set_add(block->predecessors, get_block(ctx, sentry->key)); } return block; @@ -2002,6 +2472,7 @@ static void emit_loop(struct ir3_context *ctx, nir_loop *nloop) { emit_cf_list(ctx, &nloop->body); + ctx->so->loops++; } static void @@ -2064,7 +2535,6 @@ emit_cf_list(struct ir3_context *ctx, struct exec_list *list) static void emit_stream_out(struct ir3_context *ctx) { - struct ir3_shader_variant *v = ctx->so; struct ir3 *ir = ctx->ir; struct ir3_stream_output_info *strmout = &ctx->so->shader->stream_output; @@ -2076,9 +2546,7 @@ emit_stream_out(struct ir3_context *ctx) * so that it is seen as live over the entire duration * of the shader: */ - vtxcnt = create_input(ctx, 0); - add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt); - + vtxcnt = create_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, 0x1); maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX); /* at this point, we are at the original 'end' block, @@ -2104,6 +2572,7 @@ emit_stream_out(struct ir3_context *ctx) /* setup 'if (vtxcnt < maxvtxcnt)' condition: */ cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0); cond->regs[0]->num = regid(REG_P0, 0); + cond->regs[0]->flags &= ~IR3_REG_SSA; cond->cat2.condition = IR3_COND_LT; /* condition goes on previous block to the conditional, @@ -2122,13 +2591,14 @@ emit_stream_out(struct ir3_context *ctx) * stripped out in the backend. */ for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) { + struct ir3_const_state *const_state = &ctx->so->shader->const_state; unsigned stride = strmout->stride[i]; struct ir3_instruction *base, *off; - base = create_uniform(ctx->block, regid(v->constbase.tfbo, i)); + base = create_uniform(ctx->block, regid(const_state->offsets.tfbo, i)); /* 24-bit should be enough: */ - off = ir3_MUL_U(ctx->block, vtxcnt, 0, + off = ir3_MUL_U24(ctx->block, vtxcnt, 0, create_immed(ctx->block, stride * 4), 0); bases[i] = ir3_ADD_S(ctx->block, off, 0, base, 0); @@ -2171,7 +2641,7 @@ emit_function(struct ir3_context *ctx, nir_function_impl *impl) /* at this point, we should have a single empty block, * into which we emit the 'end' instruction. */ - compile_assert(ctx, list_empty(&ctx->block->instr_list)); + compile_assert(ctx, list_is_empty(&ctx->block->instr_list)); /* If stream-out (aka transform-feedback) enabled, emit the * stream-out instructions, followed by a new empty block (into @@ -2191,46 +2661,25 @@ emit_function(struct ir3_context *ctx, nir_function_impl *impl) emit_stream_out(ctx); } - ir3_END(ctx->block); -} - -static struct ir3_instruction * -create_frag_coord(struct ir3_context *ctx, unsigned comp) -{ - struct ir3_block *block = ctx->block; - struct ir3_instruction *instr; - - if (!ctx->frag_coord) { - ctx->frag_coord = create_input_compmask(ctx, 0, 0xf); - /* defer add_sysval_input() until after all inputs created */ - } - - ir3_split_dest(block, &instr, ctx->frag_coord, comp, 1); - - switch (comp) { - case 0: /* .x */ - case 1: /* .y */ - /* for frag_coord, we get unsigned values.. we need - * to subtract (integer) 8 and divide by 16 (right- - * shift by 4) then convert to float: - * - * sub.s tmp, src, 8 - * shr.b tmp, tmp, 4 - * mov.u32f32 dst, tmp - * - */ - instr = ir3_SUB_S(block, instr, 0, - create_immed(block, 8), 0); - instr = ir3_SHR_B(block, instr, 0, - create_immed(block, 4), 0); - instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32); - - return instr; - case 2: /* .z */ - case 3: /* .w */ - default: - /* seems that we can use these as-is: */ - return instr; + /* Vertex shaders in a tessellation or geometry pipeline treat END as a + * NOP and has an epilogue that writes the VS outputs to local storage, to + * be read by the HS. Then it resets execution mask (chmask) and chains + * to the next shader (chsh). + */ + if ((ctx->so->type == MESA_SHADER_VERTEX && + (ctx->so->key.has_gs || ctx->so->key.tessellation)) || + (ctx->so->type == MESA_SHADER_TESS_EVAL && ctx->so->key.has_gs)) { + struct ir3_instruction *chmask = + ir3_CHMASK(ctx->block); + chmask->barrier_class = IR3_BARRIER_EVERYTHING; + chmask->barrier_conflict = IR3_BARRIER_EVERYTHING; + + struct ir3_instruction *chsh = + ir3_CHSH(ctx->block); + chsh->barrier_class = IR3_BARRIER_EVERYTHING; + chsh->barrier_conflict = IR3_BARRIER_EVERYTHING; + } else { + ir3_END(ctx->block); } } @@ -2243,6 +2692,12 @@ setup_input(struct ir3_context *ctx, nir_variable *in) unsigned frac = in->data.location_frac; unsigned slot = in->data.location; + /* Inputs are loaded using ldlw or ldg for these stages. */ + if (ctx->so->type == MESA_SHADER_TESS_CTRL || + ctx->so->type == MESA_SHADER_TESS_EVAL || + ctx->so->type == MESA_SHADER_GEOMETRY) + return; + /* skip unread inputs, we could end up with (for example), unsplit * matrix/etc inputs in the case they are not read, so just silently * skip these. @@ -2256,14 +2711,18 @@ setup_input(struct ir3_context *ctx, nir_variable *in) so->inputs[n].interpolate = in->data.interpolation; if (ctx->so->type == MESA_SHADER_FRAGMENT) { + + /* if any varyings have 'sample' qualifer, that triggers us + * to run in per-sample mode: + */ + so->per_samp |= in->data.sample; + for (int i = 0; i < ncomp; i++) { struct ir3_instruction *instr = NULL; unsigned idx = (n * 4) + i + frac; if (slot == VARYING_SLOT_POS) { - so->inputs[n].bary = false; - so->frag_coord = true; - instr = create_frag_coord(ctx, i); + ir3_context_error(ctx, "fragcoord should be a sysval!\n"); } else if (slot == VARYING_SLOT_PNTC) { /* see for example st_nir_fixup_varying_slots().. this is * maybe a bit mesa/st specific. But we need things to line @@ -2275,10 +2734,8 @@ setup_input(struct ir3_context *ctx, nir_variable *in) */ so->inputs[n].slot = VARYING_SLOT_VAR8; so->inputs[n].bary = true; - instr = create_frag_input(ctx, false); + instr = create_frag_input(ctx, false, idx); } else { - bool use_ldlv = false; - /* detect the special case for front/back colors where * we need to do flat vs smooth shading depending on * rast state: @@ -2299,23 +2756,35 @@ setup_input(struct ir3_context *ctx, nir_variable *in) if (ctx->compiler->flat_bypass) { if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) || (so->inputs[n].rasterflat && ctx->so->key.rasterflat)) - use_ldlv = true; + so->inputs[n].use_ldlv = true; } so->inputs[n].bary = true; - instr = create_frag_input(ctx, use_ldlv); + instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx); } - compile_assert(ctx, idx < ctx->ir->ninputs); + compile_assert(ctx, idx < ctx->ninputs); - ctx->ir->inputs[idx] = instr; + ctx->inputs[idx] = instr; } } else if (ctx->so->type == MESA_SHADER_VERTEX) { + /* We shouldn't have fractional input for VS input.. that only shows + * up with varying packing + */ + assert(frac == 0); + + struct ir3_instruction *input = create_input(ctx, (1 << ncomp) - 1); + struct ir3_instruction *components[ncomp]; + + input->input.inidx = n; + + ir3_split_dest(ctx->block, components, input, 0, ncomp); + for (int i = 0; i < ncomp; i++) { unsigned idx = (n * 4) + i + frac; - compile_assert(ctx, idx < ctx->ir->ninputs); - ctx->ir->inputs[idx] = create_input(ctx, idx); + compile_assert(ctx, idx < ctx->ninputs); + ctx->inputs[idx] = components[i]; } } else { ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type); @@ -2326,6 +2795,104 @@ setup_input(struct ir3_context *ctx, nir_variable *in) } } +/* Initially we assign non-packed inloc's for varyings, as we don't really + * know up-front which components will be unused. After all the compilation + * stages we scan the shader to see which components are actually used, and + * re-pack the inlocs to eliminate unneeded varyings. + */ +static void +pack_inlocs(struct ir3_context *ctx) +{ + struct ir3_shader_variant *so = ctx->so; + uint8_t used_components[so->inputs_count]; + + memset(used_components, 0, sizeof(used_components)); + + /* + * First Step: scan shader to find which bary.f/ldlv remain: + */ + + foreach_block (block, &ctx->ir->block_list) { + foreach_instr (instr, &block->instr_list) { + if (is_input(instr)) { + unsigned inloc = instr->regs[1]->iim_val; + unsigned i = inloc / 4; + unsigned j = inloc % 4; + + compile_assert(ctx, instr->regs[1]->flags & IR3_REG_IMMED); + compile_assert(ctx, i < so->inputs_count); + + used_components[i] |= 1 << j; + } else if (instr->opc == OPC_META_TEX_PREFETCH) { + for (int n = 0; n < 2; n++) { + unsigned inloc = instr->prefetch.input_offset + n; + unsigned i = inloc / 4; + unsigned j = inloc % 4; + + compile_assert(ctx, i < so->inputs_count); + + used_components[i] |= 1 << j; + } + } + } + } + + /* + * Second Step: reassign varying inloc/slots: + */ + + unsigned actual_in = 0; + unsigned inloc = 0; + + for (unsigned i = 0; i < so->inputs_count; i++) { + unsigned compmask = 0, maxcomp = 0; + + so->inputs[i].inloc = inloc; + so->inputs[i].bary = false; + + for (unsigned j = 0; j < 4; j++) { + if (!(used_components[i] & (1 << j))) + continue; + + compmask |= (1 << j); + actual_in++; + maxcomp = j + 1; + + /* at this point, since used_components[i] mask is only + * considering varyings (ie. not sysvals) we know this + * is a varying: + */ + so->inputs[i].bary = true; + } + + if (so->inputs[i].bary) { + so->varying_in++; + so->inputs[i].compmask = (1 << maxcomp) - 1; + inloc += maxcomp; + } + } + + /* + * Third Step: reassign packed inloc's: + */ + + foreach_block (block, &ctx->ir->block_list) { + foreach_instr (instr, &block->instr_list) { + if (is_input(instr)) { + unsigned inloc = instr->regs[1]->iim_val; + unsigned i = inloc / 4; + unsigned j = inloc % 4; + + instr->regs[1]->iim_val = so->inputs[i].inloc + j; + } else if (instr->opc == OPC_META_TEX_PREFETCH) { + unsigned i = instr->prefetch.input_offset / 4; + unsigned j = instr->prefetch.input_offset % 4; + instr->prefetch.input_offset = so->inputs[i].inloc + j; + } + } + } +} + static void setup_output(struct ir3_context *ctx, nir_variable *out) { @@ -2345,13 +2912,18 @@ setup_output(struct ir3_context *ctx, nir_variable *out) case FRAG_RESULT_COLOR: so->color0_mrt = 1; break; + case FRAG_RESULT_SAMPLE_MASK: + so->writes_smask = true; + break; default: if (slot >= FRAG_RESULT_DATA0) break; ir3_context_error(ctx, "unknown FS output name: %s\n", gl_frag_result_name(slot)); } - } else if (ctx->so->type == MESA_SHADER_VERTEX) { + } else if (ctx->so->type == MESA_SHADER_VERTEX || + ctx->so->type == MESA_SHADER_TESS_EVAL || + ctx->so->type == MESA_SHADER_GEOMETRY) { switch (slot) { case VARYING_SLOT_POS: so->writes_pos = true; @@ -2359,6 +2931,11 @@ setup_output(struct ir3_context *ctx, nir_variable *out) case VARYING_SLOT_PSIZ: so->writes_psize = true; break; + case VARYING_SLOT_PRIMITIVE_ID: + case VARYING_SLOT_LAYER: + case VARYING_SLOT_GS_VERTEX_FLAGS_IR3: + debug_assert(ctx->so->type == MESA_SHADER_GEOMETRY); + /* fall through */ case VARYING_SLOT_COL0: case VARYING_SLOT_COL1: case VARYING_SLOT_BFC0: @@ -2373,9 +2950,13 @@ setup_output(struct ir3_context *ctx, nir_variable *out) break; if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7)) break; - ir3_context_error(ctx, "unknown VS output name: %s\n", + ir3_context_error(ctx, "unknown %s shader output name: %s\n", + _mesa_shader_stage_to_string(ctx->so->type), gl_varying_slot_name(slot)); } + } else if (ctx->so->type == MESA_SHADER_TESS_CTRL) { + /* output lowered to buffer writes. */ + return; } else { ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type); } @@ -2388,8 +2969,8 @@ setup_output(struct ir3_context *ctx, nir_variable *out) for (int i = 0; i < ncomp; i++) { unsigned idx = (n * 4) + i + frac; - compile_assert(ctx, idx < ctx->ir->noutputs); - ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0)); + compile_assert(ctx, idx < ctx->noutputs); + ctx->outputs[idx] = create_immed(ctx->block, fui(0.0)); } /* if varying packing doesn't happen, we could end up in a situation @@ -2402,8 +2983,8 @@ setup_output(struct ir3_context *ctx, nir_variable *out) */ for (int i = 0; i < frac; i++) { unsigned idx = (n * 4) + i; - if (!ctx->ir->outputs[idx]) { - ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0)); + if (!ctx->outputs[idx]) { + ctx->outputs[idx] = create_immed(ctx->block, fui(0.0)); } } } @@ -2418,46 +2999,41 @@ max_drvloc(struct exec_list *vars) return drvloc; } -static const unsigned max_sysvals[] = { - [MESA_SHADER_FRAGMENT] = 24, // TODO - [MESA_SHADER_VERTEX] = 16, - [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need? - [MESA_SHADER_KERNEL] = 16, // TODO how many do we actually need? -}; - static void emit_instructions(struct ir3_context *ctx) { - unsigned ninputs, noutputs; nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s); - ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4; - noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4; + ctx->ninputs = (max_drvloc(&ctx->s->inputs) + 1) * 4; + ctx->noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4; - /* we need to leave room for sysvals: - */ - ninputs += max_sysvals[ctx->so->type]; + ctx->inputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->ninputs); + ctx->outputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->noutputs); - ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs); + ctx->ir = ir3_create(ctx->compiler, ctx->so->type); /* Create inputs in first block: */ ctx->block = get_block(ctx, nir_start_block(fxn)); ctx->in_block = ctx->block; list_addtail(&ctx->block->node, &ctx->ir->block_list); - ninputs -= max_sysvals[ctx->so->type]; - /* for fragment shader, the vcoord input register is used as the * base for bary.f varying fetch instrs: + * + * TODO defer creating ctx->ij_pixel and corresponding sysvals + * until emit_intrinsic when we know they are actually needed. + * For now, we defer creating ctx->ij_centroid, etc, since we + * only need ij_pixel for "old style" varying inputs (ie. + * tgsi_to_nir) */ struct ir3_instruction *vcoord = NULL; if (ctx->so->type == MESA_SHADER_FRAGMENT) { struct ir3_instruction *xy[2]; - vcoord = create_input_compmask(ctx, 0, 0x3); + vcoord = create_input(ctx, 0x3); ir3_split_dest(ctx->block, xy, vcoord, 0, 2); - ctx->frag_vcoord = ir3_create_collect(ctx, xy, 2); + ctx->ij_pixel = ir3_create_collect(ctx, xy, 2); } /* Setup inputs: */ @@ -2469,13 +3045,43 @@ emit_instructions(struct ir3_context *ctx) * because sysvals need to be appended after varyings: */ if (vcoord) { - add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD, + add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PIXEL, 0x3, vcoord); } - if (ctx->frag_coord) { - add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD, - 0xf, ctx->frag_coord); + + /* Tesselation shaders always need primitive ID for indexing the + * BO. Geometry shaders don't always need it but when they do it has be + * delivered and unclobbered in the VS. To make things easy, we always + * make room for it in VS/DS. + */ + bool has_tess = ctx->so->key.tessellation != IR3_TESS_NONE; + bool has_gs = ctx->so->key.has_gs; + switch (ctx->so->type) { + case MESA_SHADER_VERTEX: + if (has_tess) { + ctx->tcs_header = create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); + } else if (has_gs) { + ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); + } + break; + case MESA_SHADER_TESS_CTRL: + ctx->tcs_header = create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); + break; + case MESA_SHADER_TESS_EVAL: + if (has_gs) + ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); + break; + case MESA_SHADER_GEOMETRY: + ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1); + ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1); + break; + default: + break; } /* Setup outputs: */ @@ -2493,11 +3099,6 @@ emit_instructions(struct ir3_context *ctx) ctx->so->num_samp += glsl_type_get_image_count(var->type); } - /* Setup registers (which should only be arrays): */ - nir_foreach_register(reg, &ctx->s->registers) { - ir3_declare_array(ctx, reg); - } - /* NOTE: need to do something more clever when we support >1 fxn */ nir_foreach_register(reg, &fxn->registers) { ir3_declare_array(ctx, reg); @@ -2507,28 +3108,6 @@ emit_instructions(struct ir3_context *ctx) emit_function(ctx, fxn); } -/* from NIR perspective, we actually have varying inputs. But the varying - * inputs, from an IR standpoint, are just bary.f/ldlv instructions. The - * only actual inputs are the sysvals. - */ -static void -fixup_frag_inputs(struct ir3_context *ctx) -{ - struct ir3_shader_variant *so = ctx->so; - struct ir3 *ir = ctx->ir; - unsigned i = 0; - - /* sysvals should appear at the end of the inputs, drop everything else: */ - while ((i < so->inputs_count) && !so->inputs[i].sysval) - i++; - - /* at IR level, inputs are always blocks of 4 scalars: */ - i *= 4; - - ir->inputs = &ir->inputs[i]; - ir->ninputs -= i; -} - /* Fixup tex sampler state for astc/srgb workaround instructions. We * need to assign the tex state indexes for these after we know the * max tex index. @@ -2570,23 +3149,78 @@ fixup_binning_pass(struct ir3_context *ctx) struct ir3 *ir = ctx->ir; unsigned i, j; + /* first pass, remove unused outputs from the IR level outputs: */ + for (i = 0, j = 0; i < ir->outputs_count; i++) { + struct ir3_instruction *out = ir->outputs[i]; + assert(out->opc == OPC_META_COLLECT); + unsigned outidx = out->collect.outidx; + unsigned slot = so->outputs[outidx].slot; + + /* throw away everything but first position/psize */ + if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) { + ir->outputs[j] = ir->outputs[i]; + j++; + } + } + ir->outputs_count = j; + + /* second pass, cleanup the unused slots in ir3_shader_variant::outputs + * table: + */ for (i = 0, j = 0; i < so->outputs_count; i++) { unsigned slot = so->outputs[i].slot; /* throw away everything but first position/psize */ if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) { - if (i != j) { - so->outputs[j] = so->outputs[i]; - ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0]; - ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1]; - ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2]; - ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3]; + so->outputs[j] = so->outputs[i]; + + /* fixup outidx to point to new output table entry: */ + struct ir3_instruction *out; + foreach_output(out, ir) { + if (out->collect.outidx == i) { + out->collect.outidx = j; + break; + } } + j++; } } so->outputs_count = j; - ir->noutputs = j * 4; +} + +static void +collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir) +{ + unsigned idx = 0; + + /* Collect sampling instructions eligible for pre-dispatch. */ + foreach_block (block, &ir->block_list) { + foreach_instr_safe (instr, &block->instr_list) { + if (instr->opc == OPC_META_TEX_PREFETCH) { + assert(idx < ARRAY_SIZE(ctx->so->sampler_prefetch)); + struct ir3_sampler_prefetch *fetch = + &ctx->so->sampler_prefetch[idx]; + idx++; + + fetch->cmd = IR3_SAMPLER_PREFETCH_CMD; + fetch->wrmask = instr->regs[0]->wrmask; + fetch->tex_id = instr->prefetch.tex; + fetch->samp_id = instr->prefetch.samp; + fetch->dst = instr->regs[0]->num; + fetch->src = instr->prefetch.input_offset; + + ctx->so->total_in = + MAX2(ctx->so->total_in, instr->prefetch.input_offset + 2); + + /* Disable half precision until supported. */ + fetch->half_precision = !!(instr->regs[0]->flags & IR3_REG_HALF); + + /* Remove the prefetch placeholder instruction: */ + list_delinit(&instr->node); + } + } + } } int @@ -2595,8 +3229,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, { struct ir3_context *ctx; struct ir3 *ir; - struct ir3_instruction **inputs; - unsigned i, actual_in, inloc; int ret = 0, max_bary; assert(!so->ir); @@ -2618,52 +3250,88 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, ir = so->ir = ctx->ir; - /* keep track of the inputs from TGSI perspective.. */ - inputs = ir->inputs; + assert((ctx->noutputs % 4) == 0); - /* but fixup actual inputs for frag shader: */ - if (so->type == MESA_SHADER_FRAGMENT) - fixup_frag_inputs(ctx); + /* Setup IR level outputs, which are "collects" that gather + * the scalar components of outputs. + */ + for (unsigned i = 0; i < ctx->noutputs; i += 4) { + unsigned ncomp = 0; + /* figure out the # of components written: + * + * TODO do we need to handle holes, ie. if .x and .z + * components written, but .y component not written? + */ + for (unsigned j = 0; j < 4; j++) { + if (!ctx->outputs[i + j]) + break; + ncomp++; + } - /* at this point, for binning pass, throw away unneeded outputs: */ - if (so->binning_pass && (ctx->compiler->gpu_id < 600)) - fixup_binning_pass(ctx); + /* Note that in some stages, like TCS, store_output is + * lowered to memory writes, so no components of the + * are "written" from the PoV of traditional store- + * output instructions: + */ + if (!ncomp) + continue; - /* if we want half-precision outputs, mark the output registers - * as half: - */ - if (so->key.half_precision) { - for (i = 0; i < ir->noutputs; i++) { - struct ir3_instruction *out = ir->outputs[i]; + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->outputs[i], ncomp); - if (!out) - continue; + int outidx = i / 4; + assert(outidx < so->outputs_count); - /* if frag shader writes z, that needs to be full precision: */ - if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH) - continue; + /* stash index into so->outputs[] so we can map the + * output back to slot/etc later: + */ + out->collect.outidx = outidx; - out->regs[0]->flags |= IR3_REG_HALF; - /* output could be a fanout (ie. texture fetch output) - * in which case we need to propagate the half-reg flag - * up to the definer so that RA sees it: - */ - if (out->opc == OPC_META_FO) { - out = out->regs[1]->instr; - out->regs[0]->flags |= IR3_REG_HALF; - } + array_insert(ir, ir->outputs, out); + } - if (out->opc == OPC_MOV) { - out->cat1.dst_type = half_type(out->cat1.dst_type); - } + /* Set up the gs header as an output for the vertex shader so it won't + * clobber it for the tess ctrl shader. + * + * TODO this could probably be done more cleanly in a nir pass. + */ + if (ctx->so->type == MESA_SHADER_VERTEX || + (ctx->so->key.has_gs && ctx->so->type == MESA_SHADER_TESS_EVAL)) { + if (ctx->primitive_id) { + unsigned n = so->outputs_count++; + so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID; + + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->primitive_id, 1); + out->collect.outidx = n; + array_insert(ir, ir->outputs, out); + } + + if (ctx->gs_header) { + unsigned n = so->outputs_count++; + so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3; + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->gs_header, 1); + out->collect.outidx = n; + array_insert(ir, ir->outputs, out); } - } - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("BEFORE CP:\n"); - ir3_print(ir); + if (ctx->tcs_header) { + unsigned n = so->outputs_count++; + so->outputs[n].slot = VARYING_SLOT_TCS_HEADER_IR3; + struct ir3_instruction *out = + ir3_create_collect(ctx, &ctx->tcs_header, 1); + out->collect.outidx = n; + array_insert(ir, ir->outputs, out); + } } + /* at this point, for binning pass, throw away unneeded outputs: */ + if (so->binning_pass && (ctx->compiler->gpu_id < 600)) + fixup_binning_pass(ctx); + + ir3_debug_print(ir, "BEFORE CP"); + ir3_cp(ir, so); /* at this point, for binning pass, throw away unneeded outputs: @@ -2674,24 +3342,33 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, if (so->binning_pass && (ctx->compiler->gpu_id >= 600)) fixup_binning_pass(ctx); - /* Insert mov if there's same instruction for each output. - * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow + /* for a6xx+, binning and draw pass VS use same VBO state, so we + * need to make sure not to remove any inputs that are used by + * the nonbinning VS. */ - for (int i = ir->noutputs - 1; i >= 0; i--) { - if (!ir->outputs[i]) - continue; - for (unsigned j = 0; j < i; j++) { - if (ir->outputs[i] == ir->outputs[j]) { - ir->outputs[i] = - ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32); - } + if (ctx->compiler->gpu_id >= 600 && so->binning_pass && + so->type == MESA_SHADER_VERTEX) { + for (int i = 0; i < ctx->ninputs; i++) { + struct ir3_instruction *in = ctx->inputs[i]; + + if (!in) + continue; + + unsigned n = i / 4; + unsigned c = i % 4; + + debug_assert(n < so->nonbinning->inputs_count); + + if (so->nonbinning->inputs[n].sysval) + continue; + + /* be sure to keep inputs, even if only used in VS */ + if (so->nonbinning->inputs[n].compmask & (1 << c)) + array_insert(in->block, in->block->keeps, in); } } - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("BEFORE GROUPING:\n"); - ir3_print(ir); - } + ir3_debug_print(ir, "BEFORE GROUPING"); ir3_sched_add_deps(ir); @@ -2700,17 +3377,11 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, */ ir3_group(ir); - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("AFTER GROUPING:\n"); - ir3_print(ir); - } + ir3_debug_print(ir, "AFTER GROUPING"); - ir3_depth(ir); + ir3_depth(ir, so); - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("AFTER DEPTH:\n"); - ir3_print(ir); - } + ir3_debug_print(ir, "AFTER DEPTH"); /* do Sethi–Ullman numbering before scheduling: */ ir3_sun(ir); @@ -2725,71 +3396,124 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, ir3_a6xx_fixup_atomic_dests(ir, so); } - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("AFTER SCHED:\n"); - ir3_print(ir); + ir3_debug_print(ir, "AFTER SCHED"); + + /* Pre-assign VS inputs on a6xx+ binning pass shader, to align + * with draw pass VS, so binning and draw pass can both use the + * same VBO state. + * + * Note that VS inputs are expected to be full precision. + */ + bool pre_assign_inputs = (ir->compiler->gpu_id >= 600) && + (ir->type == MESA_SHADER_VERTEX) && + so->binning_pass; + + if (pre_assign_inputs) { + for (unsigned i = 0; i < ctx->ninputs; i++) { + struct ir3_instruction *instr = ctx->inputs[i]; + + if (!instr) + continue; + + unsigned n = i / 4; + unsigned c = i % 4; + unsigned regid = so->nonbinning->inputs[n].regid + c; + + instr->regs[0]->num = regid; + } + + ret = ir3_ra(so, ctx->inputs, ctx->ninputs); + } else if (ctx->tcs_header) { + /* We need to have these values in the same registers between VS and TCS + * since the VS chains to TCS and doesn't get the sysvals redelivered. + */ + + ctx->tcs_header->regs[0]->num = regid(0, 0); + ctx->primitive_id->regs[0]->num = regid(0, 1); + struct ir3_instruction *precolor[] = { ctx->tcs_header, ctx->primitive_id }; + ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor)); + } else if (ctx->gs_header) { + /* We need to have these values in the same registers between producer + * (VS or DS) and GS since the producer chains to GS and doesn't get + * the sysvals redelivered. + */ + + ctx->gs_header->regs[0]->num = regid(0, 0); + ctx->primitive_id->regs[0]->num = regid(0, 1); + struct ir3_instruction *precolor[] = { ctx->gs_header, ctx->primitive_id }; + ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor)); + } else if (so->num_sampler_prefetch) { + assert(so->type == MESA_SHADER_FRAGMENT); + struct ir3_instruction *instr, *precolor[2]; + int idx = 0; + + foreach_input(instr, ir) { + if (instr->input.sysval != SYSTEM_VALUE_BARYCENTRIC_PIXEL) + continue; + + assert(idx < ARRAY_SIZE(precolor)); + + precolor[idx] = instr; + instr->regs[0]->num = idx; + + idx++; + } + ret = ir3_ra(so, precolor, idx); + } else { + ret = ir3_ra(so, NULL, 0); } - ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face); if (ret) { DBG("RA failed!"); goto out; } - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("AFTER RA:\n"); - ir3_print(ir); - } + ir3_debug_print(ir, "AFTER RA"); - /* fixup input/outputs: */ - for (i = 0; i < so->outputs_count; i++) { - /* sometimes we get outputs that don't write the .x coord, like: - * - * decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0) - * - * Presumably the result of varying packing and then eliminating - * some unneeded varyings? Just skip head to the first valid - * component of the output. - */ - for (unsigned j = 0; j < 4; j++) { - struct ir3_instruction *instr = ir->outputs[(i*4) + j]; - if (instr) { - so->outputs[i].regid = instr->regs[0]->num; - break; - } - } + if (so->type == MESA_SHADER_FRAGMENT) + pack_inlocs(ctx); + + /* + * Fixup inputs/outputs to point to the actual registers assigned: + * + * 1) initialize to r63.x (invalid/unused) + * 2) iterate IR level inputs/outputs and update the variants + * inputs/outputs table based on the assigned registers for + * the remaining inputs/outputs. + */ + + for (unsigned i = 0; i < so->inputs_count; i++) + so->inputs[i].regid = INVALID_REG; + for (unsigned i = 0; i < so->outputs_count; i++) + so->outputs[i].regid = INVALID_REG; + + struct ir3_instruction *out; + foreach_output(out, ir) { + assert(out->opc == OPC_META_COLLECT); + unsigned outidx = out->collect.outidx; + + so->outputs[outidx].regid = out->regs[0]->num; + so->outputs[outidx].half = !!(out->regs[0]->flags & IR3_REG_HALF); } - /* Note that some or all channels of an input may be unused: */ - actual_in = 0; - inloc = 0; - for (i = 0; i < so->inputs_count; i++) { - unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0; - so->inputs[i].ncomp = 0; - so->inputs[i].inloc = inloc; - for (j = 0; j < 4; j++) { - struct ir3_instruction *in = inputs[(i*4) + j]; - if (in && !(in->flags & IR3_INSTR_UNUSED)) { - compmask |= (1 << j); - reg = in->regs[0]->num - j; - actual_in++; - so->inputs[i].ncomp++; - if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) { - /* assign inloc: */ - assert(in->regs[1]->flags & IR3_REG_IMMED); - in->regs[1]->iim_val = inloc + j; - maxcomp = j + 1; - } + struct ir3_instruction *in; + foreach_input(in, ir) { + assert(in->opc == OPC_META_INPUT); + unsigned inidx = in->input.inidx; + + if (pre_assign_inputs && !so->inputs[inidx].sysval) { + if (VALIDREG(so->nonbinning->inputs[inidx].regid)) { + compile_assert(ctx, in->regs[0]->num == + so->nonbinning->inputs[inidx].regid); + compile_assert(ctx, !!(in->regs[0]->flags & IR3_REG_HALF) == + so->nonbinning->inputs[inidx].half); } + so->inputs[inidx].regid = so->nonbinning->inputs[inidx].regid; + so->inputs[inidx].half = so->nonbinning->inputs[inidx].half; + } else { + so->inputs[inidx].regid = in->regs[0]->num; + so->inputs[inidx].half = !!(in->regs[0]->flags & IR3_REG_HALF); } - if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) { - so->varying_in++; - so->inputs[i].compmask = (1 << maxcomp) - 1; - inloc += maxcomp; - } else if (!so->inputs[i].sysval) { - so->inputs[i].compmask = compmask; - } - so->inputs[i].regid = reg; } if (ctx->astc_srgb) @@ -2800,21 +3524,32 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, */ ir3_legalize(ir, &so->has_ssbo, &so->need_pixlod, &max_bary); - if (ir3_shader_debug & IR3_DBG_OPTMSGS) { - printf("AFTER LEGALIZE:\n"); - ir3_print(ir); + ir3_debug_print(ir, "AFTER LEGALIZE"); + + /* Set (ss)(sy) on first TCS and GEOMETRY instructions, since we don't + * know what we might have to wait on when coming in from VS chsh. + */ + if (so->type == MESA_SHADER_TESS_CTRL || + so->type == MESA_SHADER_GEOMETRY ) { + foreach_block (block, &ir->block_list) { + foreach_instr (instr, &block->instr_list) { + instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY; + break; + } + } } so->branchstack = ctx->max_stack; /* Note that actual_in counts inputs that are not bary.f'd for FS: */ - if (so->type == MESA_SHADER_VERTEX) - so->total_in = actual_in; - else + if (so->type == MESA_SHADER_FRAGMENT) so->total_in = max_bary + 1; so->max_sun = ir->max_sun; + /* Collect sampling instructions eligible for pre-dispatch. */ + collect_tex_prefetches(ctx, ir); + out: if (ret) { if (so->ir)