X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Ffreedreno%2Fir3%2Fir3_a6xx.c;h=b10e1b9502763b6bd1f9e4b58d1c51d9361eb9c0;hb=4f91345f4923134b226ecd94e5636ea81c97e284;hp=00260a4c5343d00b0eafdc2055022d6720978d41;hpb=a06bb486b0441539d46c7746fc45253bbc09b7b2;p=mesa.git diff --git a/src/freedreno/ir3/ir3_a6xx.c b/src/freedreno/ir3/ir3_a6xx.c index 00260a4c534..b10e1b95027 100644 --- a/src/freedreno/ir3/ir3_a6xx.c +++ b/src/freedreno/ir3/ir3_a6xx.c @@ -37,18 +37,6 @@ * encoding compared to a4xx/a5xx. */ - -static struct ir3_instruction * -ssbo_offset(struct ir3_block *b, struct ir3_instruction *byte_offset) -{ - /* TODO hardware wants offset in terms of elements, not bytes. Which - * is kinda nice but opposite of what nir does. It would be nice if - * we had a way to request the units of the offset to avoid the extra - * shift instructions.. - */ - return ir3_SHR_B(b, byte_offset, 0, create_immed(b, 2), 0); -} - /* src[] = { buffer_index, offset }. No const_index */ static void emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, @@ -57,23 +45,17 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr, struct ir3_block *b = ctx->block; struct ir3_instruction *offset; struct ir3_instruction *ldib; - nir_const_value *buffer_index; - - /* can this be non-const buffer_index? how do we handle that? */ - buffer_index = nir_src_as_const_value(intr->src[0]); - compile_assert(ctx, buffer_index); - - int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, buffer_index->u32[0]); - offset = ssbo_offset(b, ir3_get_src(ctx, &intr->src[1])[0]); + offset = ir3_get_src(ctx, &intr->src[2])[0]; - ldib = ir3_LDIB(b, create_immed(b, ibo_idx), 0, offset, 0); + ldib = ir3_LDIB(b, ir3_ssbo_to_ibo(ctx, intr->src[0]), 0, offset, 0); ldib->regs[0]->wrmask = MASK(intr->num_components); ldib->cat6.iim_val = intr->num_components; ldib->cat6.d = 1; - ldib->cat6.type = TYPE_U32; + ldib->cat6.type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32; ldib->barrier_class = IR3_BARRIER_BUFFER_R; ldib->barrier_conflict = IR3_BARRIER_BUFFER_W; + ir3_handle_bindless_cat6(ldib, intr->src[0]); ir3_split_dest(b, dst, ldib, 0, intr->num_components); } @@ -84,31 +66,23 @@ emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; struct ir3_instruction *stib, *val, *offset; - nir_const_value *buffer_index; - /* TODO handle wrmask properly, see _store_shared().. but I think - * it is more a PITA than that, since blob ends up loading the - * masked components and writing them back out. - */ - unsigned wrmask = intr->const_index[0]; + unsigned wrmask = nir_intrinsic_write_mask(intr); unsigned ncomp = ffs(~wrmask) - 1; - /* can this be non-const buffer_index? how do we handle that? */ - buffer_index = nir_src_as_const_value(intr->src[1]); - compile_assert(ctx, buffer_index); - - int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, buffer_index->u32[0]); + assert(wrmask == BITFIELD_MASK(intr->num_components)); /* src0 is offset, src1 is value: */ val = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp); - offset = ssbo_offset(b, ir3_get_src(ctx, &intr->src[2])[0]); + offset = ir3_get_src(ctx, &intr->src[3])[0]; - stib = ir3_STIB(b, create_immed(b, ibo_idx), 0, offset, 0, val, 0); + stib = ir3_STIB(b, ir3_ssbo_to_ibo(ctx, intr->src[1]), 0, offset, 0, val, 0); stib->cat6.iim_val = ncomp; stib->cat6.d = 1; - stib->cat6.type = TYPE_U32; + stib->cat6.type = intr->src[0].ssa->bit_size == 16 ? TYPE_U16 : TYPE_U32; stib->barrier_class = IR3_BARRIER_BUFFER_W; stib->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W; + ir3_handle_bindless_cat6(stib, intr->src[1]); array_insert(b, b->keeps, stib); } @@ -134,18 +108,11 @@ static struct ir3_instruction * emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; - struct ir3_instruction *atomic, *ibo, *src0, *src1, *offset, *data, *dummy; - nir_const_value *buffer_index; + struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy; type_t type = TYPE_U32; - /* can this be non-const buffer_index? how do we handle that? */ - buffer_index = nir_src_as_const_value(intr->src[0]); - compile_assert(ctx, buffer_index); + ibo = ir3_ssbo_to_ibo(ctx, intr->src[0]); - int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, buffer_index->u32[0]); - ibo = create_immed(b, ibo_idx); - - offset = ir3_get_src(ctx, &intr->src[1])[0]; data = ir3_get_src(ctx, &intr->src[2])[0]; /* So this gets a bit creative: @@ -163,50 +130,51 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) * Note that nir already multiplies the offset by four */ dummy = create_immed(b, 0); - src0 = ssbo_offset(b, offset); - if (intr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) { + if (intr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap_ir3) { + src0 = ir3_get_src(ctx, &intr->src[4])[0]; struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[3])[0]; src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ dummy, compare, data }, 3); } else { + src0 = ir3_get_src(ctx, &intr->src[3])[0]; src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ dummy, data }, 2); } switch (intr->intrinsic) { - case nir_intrinsic_ssbo_atomic_add: + case nir_intrinsic_ssbo_atomic_add_ir3: atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_imin: + case nir_intrinsic_ssbo_atomic_imin_ir3: atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0); type = TYPE_S32; break; - case nir_intrinsic_ssbo_atomic_umin: + case nir_intrinsic_ssbo_atomic_umin_ir3: atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_imax: + case nir_intrinsic_ssbo_atomic_imax_ir3: atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0); type = TYPE_S32; break; - case nir_intrinsic_ssbo_atomic_umax: + case nir_intrinsic_ssbo_atomic_umax_ir3: atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_and: + case nir_intrinsic_ssbo_atomic_and_ir3: atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_or: + case nir_intrinsic_ssbo_atomic_or_ir3: atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_xor: + case nir_intrinsic_ssbo_atomic_xor_ir3: atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_exchange: + case nir_intrinsic_ssbo_atomic_exchange_ir3: atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_ssbo_atomic_comp_swap: + case nir_intrinsic_ssbo_atomic_comp_swap_ir3: atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0); break; default: @@ -218,6 +186,7 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) atomic->cat6.type = type; atomic->barrier_class = IR3_BARRIER_BUFFER_W; atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W; + ir3_handle_bindless_cat6(atomic, intr->src[0]); /* even if nothing consume the result, we can't DCE the instruction: */ array_insert(b, b->keeps, atomic); @@ -225,31 +194,54 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr) return atomic; } +/* src[] = { deref, coord, sample_index }. const_index[] = {} */ +static void +emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr, + struct ir3_instruction **dst) +{ + struct ir3_block *b = ctx->block; + struct ir3_instruction *ldib; + struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]); + unsigned ncoords = ir3_get_image_coords(intr, NULL); + + ldib = ir3_LDIB(b, ir3_image_to_ibo(ctx, intr->src[0]), 0, + ir3_create_collect(ctx, coords, ncoords), 0); + ldib->regs[0]->wrmask = MASK(intr->num_components); + ldib->cat6.iim_val = intr->num_components; + ldib->cat6.d = ncoords; + ldib->cat6.type = ir3_get_type_for_image_intrinsic(intr); + ldib->cat6.typed = true; + ldib->barrier_class = IR3_BARRIER_IMAGE_R; + ldib->barrier_conflict = IR3_BARRIER_IMAGE_W; + ir3_handle_bindless_cat6(ldib, intr->src[0]); + + ir3_split_dest(b, dst, ldib, 0, intr->num_components); +} + /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */ static void emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; - const nir_variable *var = nir_intrinsic_get_var(intr, 0); struct ir3_instruction *stib; struct ir3_instruction * const *value = ir3_get_src(ctx, &intr->src[3]); struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]); - unsigned ncoords = ir3_get_image_coords(var, NULL); - unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0])); - unsigned ibo_idx = ir3_image_to_ibo(&ctx->so->image_mapping, slot); - unsigned ncomp = ir3_get_num_components_for_glformat(var->data.image.format); + unsigned ncoords = ir3_get_image_coords(intr, NULL); + enum pipe_format format = nir_intrinsic_format(intr); + unsigned ncomp = ir3_get_num_components_for_image_format(format); /* src0 is offset, src1 is value: */ - stib = ir3_STIB(b, create_immed(b, ibo_idx), 0, + stib = ir3_STIB(b, ir3_image_to_ibo(ctx, intr->src[0]), 0, ir3_create_collect(ctx, coords, ncoords), 0, ir3_create_collect(ctx, value, ncomp), 0); stib->cat6.iim_val = ncomp; stib->cat6.d = ncoords; - stib->cat6.type = ir3_get_image_type(var); + stib->cat6.type = ir3_get_type_for_image_intrinsic(intr); stib->cat6.typed = true; stib->barrier_class = IR3_BARRIER_IMAGE_W; stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W; + ir3_handle_bindless_cat6(stib, intr->src[0]); array_insert(b, b->keeps, stib); } @@ -259,15 +251,12 @@ static struct ir3_instruction * emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr) { struct ir3_block *b = ctx->block; - const nir_variable *var = nir_intrinsic_get_var(intr, 0); struct ir3_instruction *atomic, *ibo, *src0, *src1, *dummy; struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]); struct ir3_instruction *value = ir3_get_src(ctx, &intr->src[3])[0]; - unsigned ncoords = ir3_get_image_coords(var, NULL); - unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0])); - unsigned ibo_idx = ir3_image_to_ibo(&ctx->so->image_mapping, slot); + unsigned ncoords = ir3_get_image_coords(intr, NULL); - ibo = create_immed(b, ibo_idx); + ibo = ir3_image_to_ibo(ctx, intr->src[0]); /* So this gets a bit creative: * @@ -284,7 +273,8 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr) dummy = create_immed(b, 0); src0 = ir3_create_collect(ctx, coords, ncoords); - if (intr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) { + if (intr->intrinsic == nir_intrinsic_image_atomic_comp_swap || + intr->intrinsic == nir_intrinsic_bindless_image_atomic_comp_swap) { struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[4])[0]; src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){ dummy, compare, value @@ -296,28 +286,40 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr) } switch (intr->intrinsic) { - case nir_intrinsic_image_deref_atomic_add: + case nir_intrinsic_image_atomic_add: + case nir_intrinsic_bindless_image_atomic_add: atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_min: + case nir_intrinsic_image_atomic_imin: + case nir_intrinsic_image_atomic_umin: + case nir_intrinsic_bindless_image_atomic_imin: + case nir_intrinsic_bindless_image_atomic_umin: atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_max: + case nir_intrinsic_image_atomic_imax: + case nir_intrinsic_image_atomic_umax: + case nir_intrinsic_bindless_image_atomic_imax: + case nir_intrinsic_bindless_image_atomic_umax: atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_and: + case nir_intrinsic_image_atomic_and: + case nir_intrinsic_bindless_image_atomic_and: atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_or: + case nir_intrinsic_image_atomic_or: + case nir_intrinsic_bindless_image_atomic_or: atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_xor: + case nir_intrinsic_image_atomic_xor: + case nir_intrinsic_bindless_image_atomic_xor: atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_exchange: + case nir_intrinsic_image_atomic_exchange: + case nir_intrinsic_bindless_image_atomic_exchange: atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0); break; - case nir_intrinsic_image_deref_atomic_comp_swap: + case nir_intrinsic_image_atomic_comp_swap: + case nir_intrinsic_bindless_image_atomic_comp_swap: atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0); break; default: @@ -326,10 +328,11 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr) atomic->cat6.iim_val = 1; atomic->cat6.d = ncoords; - atomic->cat6.type = ir3_get_image_type(var); + atomic->cat6.type = ir3_get_type_for_image_intrinsic(intr); atomic->cat6.typed = true; atomic->barrier_class = IR3_BARRIER_IMAGE_W; atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W; + ir3_handle_bindless_cat6(atomic, intr->src[0]); /* even if nothing consume the result, we can't DCE the instruction: */ array_insert(b, b->keeps, atomic); @@ -337,12 +340,33 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr) return atomic; } +static void +emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr, + struct ir3_instruction **dst) +{ + struct ir3_block *b = ctx->block; + struct ir3_instruction *ibo = ir3_image_to_ibo(ctx, intr->src[0]); + struct ir3_instruction *resinfo = ir3_RESINFO(b, ibo, 0); + resinfo->cat6.iim_val = 1; + resinfo->cat6.d = intr->num_components; + resinfo->cat6.type = TYPE_U32; + resinfo->cat6.typed = false; + /* resinfo has no writemask and always writes out 3 components: */ + compile_assert(ctx, intr->num_components <= 3); + resinfo->regs[0]->wrmask = MASK(3); + ir3_handle_bindless_cat6(resinfo, intr->src[0]); + + ir3_split_dest(b, dst, resinfo, 0, intr->num_components); +} + const struct ir3_context_funcs ir3_a6xx_funcs = { .emit_intrinsic_load_ssbo = emit_intrinsic_load_ssbo, .emit_intrinsic_store_ssbo = emit_intrinsic_store_ssbo, .emit_intrinsic_atomic_ssbo = emit_intrinsic_atomic_ssbo, + .emit_intrinsic_load_image = emit_intrinsic_load_image, .emit_intrinsic_store_image = emit_intrinsic_store_image, .emit_intrinsic_atomic_image = emit_intrinsic_atomic_image, + .emit_intrinsic_image_size = emit_intrinsic_image_size, }; /* @@ -350,91 +374,80 @@ const struct ir3_context_funcs ir3_a6xx_funcs = { * extra mov from src1.x to dst. This way the other compiler passes * can ignore this quirk of the new instruction encoding. * - * This might cause extra complication in the future when we support - * spilling, as I think we'd want to re-run the scheduling pass. One - * possible alternative might be to do this in the RA pass after - * ra_allocate() but before destroying the SSA links. (Ie. we do - * want to know if anything consumes the result of the atomic instr, - * if there is no consumer then inserting the extra mov is pointless. + * This should run after RA. */ static struct ir3_instruction * get_atomic_dest_mov(struct ir3_instruction *atomic) { + struct ir3_instruction *mov; + /* if we've already created the mov-out, then re-use it: */ if (atomic->data) return atomic->data; + /* We are already out of SSA here, so we can't use the nice builders: */ + mov = ir3_instr_create(atomic->block, OPC_MOV); + ir3_reg_create(mov, 0, 0); /* dst */ + ir3_reg_create(mov, 0, 0); /* src */ + + mov->cat1.src_type = TYPE_U32; + mov->cat1.dst_type = TYPE_U32; + /* extract back out the 'dummy' which serves as stand-in for dest: */ - struct ir3_instruction *src = ssa(atomic->regs[3]); - debug_assert(src->opc == OPC_META_FI); - struct ir3_instruction *dummy = ssa(src->regs[1]); + struct ir3_instruction *src = atomic->regs[3]->instr; + debug_assert(src->opc == OPC_META_COLLECT); - struct ir3_instruction *mov = ir3_MOV(atomic->block, dummy, TYPE_U32); + *mov->regs[0] = *atomic->regs[0]; + *mov->regs[1] = *src->regs[1]->instr->regs[0]; mov->flags |= IR3_INSTR_SY; - if (atomic->regs[0]->flags & IR3_REG_ARRAY) { - mov->regs[0]->flags |= IR3_REG_ARRAY; - mov->regs[0]->array = atomic->regs[0]->array; - } - /* it will have already been appended to the end of the block, which * isn't where we want it, so fix-up the location: */ - list_delinit(&mov->node); - list_add(&mov->node, &atomic->node); - - /* And because this is after instruction scheduling, we don't really - * have a good way to know if extra delay slots are needed. For - * example, if the result is consumed by an stib (storeImage()) there - * would be no extra delay slots in place already, but 5 are needed. - * Just plan for the worst and hope nobody looks at the resulting - * code that is generated :-( - */ - struct ir3_instruction *nop = ir3_NOP(atomic->block); - nop->repeat = 5; - - list_delinit(&nop->node); - list_add(&nop->node, &mov->node); + ir3_instr_move_after(mov, atomic); return atomic->data = mov; } -void +bool ir3_a6xx_fixup_atomic_dests(struct ir3 *ir, struct ir3_shader_variant *so) { - if (so->image_mapping.num_ibo == 0) - return; + bool progress = false; + + if (ir3_shader_nibo(so) == 0) + return false; - list_for_each_entry (struct ir3_block, block, &ir->block_list, node) { - list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) { + foreach_block (block, &ir->block_list) { + foreach_instr (instr, &block->instr_list) { instr->data = NULL; } } - list_for_each_entry (struct ir3_block, block, &ir->block_list, node) { - list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) { - struct ir3_register *reg; - - foreach_src(reg, instr) { - struct ir3_instruction *src = ssa(reg); + foreach_block (block, &ir->block_list) { + foreach_instr_safe (instr, &block->instr_list) { + foreach_src (reg, instr) { + struct ir3_instruction *src = reg->instr; if (!src) continue; - if (is_atomic(src->opc) && (src->flags & IR3_INSTR_G)) + if (is_atomic(src->opc) && (src->flags & IR3_INSTR_G)) { reg->instr = get_atomic_dest_mov(src); + progress = true; + } } } + } - /* we also need to fixup shader outputs: */ - for (unsigned i = 0; i < ir->noutputs; i++) { - if (!ir->outputs[i]) - continue; - if (is_atomic(ir->outputs[i]->opc) && (ir->outputs[i]->flags & IR3_INSTR_G)) - ir->outputs[i] = get_atomic_dest_mov(ir->outputs[i]); + /* we also need to fixup shader outputs: */ + foreach_output_n (out, n, ir) { + if (is_atomic(out->opc) && (out->flags & IR3_INSTR_G)) { + ir->outputs[n] = get_atomic_dest_mov(out); + progress = true; } } + return progress; }