+ enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
+ struct bifrost_regs s = { 0 };
+ uint64_t packed = 0;
+
+ if (regs.enabled[1]) {
+ /* Gotta save that bit!~ Required by the 63-x trick */
+ assert(regs.port[1] > regs.port[0]);
+ assert(regs.enabled[0]);
+
+ /* Do the 63-x trick, see docs/disasm */
+ if (regs.port[0] > 31) {
+ regs.port[0] = 63 - regs.port[0];
+ regs.port[1] = 63 - regs.port[1];
+ }
+
+ assert(regs.port[0] <= 31);
+ assert(regs.port[1] <= 63);
+
+ s.ctrl = ctrl;
+ s.reg1 = regs.port[1];
+ s.reg0 = regs.port[0];
+ } else {
+ /* Port 1 disabled, so set to zero and use port 1 for ctrl */
+ s.ctrl = 0;
+ s.reg1 = ctrl << 2;
+
+ if (regs.enabled[0]) {
+ /* Bit 0 upper bit of port 0 */
+ s.reg1 |= (regs.port[0] >> 5);
+
+ /* Rest of port 0 in usual spot */
+ s.reg0 = (regs.port[0] & 0b11111);
+ } else {
+ /* Bit 1 set if port 0 also disabled */
+ s.reg1 |= (1 << 1);
+ }
+ }
+
+ /* When port 3 isn't used, we have to set it to port 2, and vice versa,
+ * or INSTR_INVALID_ENC is raised. The reason is unknown. */
+
+ bool has_port2 = regs.write_fma || regs.write_add;
+ bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
+
+ if (!has_port3)
+ regs.port[3] = regs.port[2];
+
+ if (!has_port2)
+ regs.port[2] = regs.port[3];
+
+ s.reg3 = regs.port[3];
+ s.reg2 = regs.port[2];
+ s.uniform_const = regs.uniform_constant;
+
+ memcpy(&packed, &s, sizeof(s));
+ return packed;
+}
+
+static void
+bi_set_data_register(bi_clause *clause, unsigned idx)
+{
+ assert(idx & BIR_INDEX_REGISTER);
+ unsigned reg = idx & ~BIR_INDEX_REGISTER;
+ assert(reg <= 63);
+ clause->data_register = reg;
+}
+
+static void
+bi_read_data_register(bi_clause *clause, bi_instruction *ins)
+{
+ bi_set_data_register(clause, ins->src[0]);
+}
+
+static void
+bi_write_data_register(bi_clause *clause, bi_instruction *ins)
+{
+ bi_set_data_register(clause, ins->dest);
+}
+
+static enum bifrost_packed_src
+bi_get_src_reg_port(struct bi_registers *regs, unsigned src)
+{
+ unsigned reg = src & ~BIR_INDEX_REGISTER;
+
+ if (regs->port[0] == reg && regs->enabled[0])
+ return BIFROST_SRC_PORT0;
+ else if (regs->port[1] == reg && regs->enabled[1])
+ return BIFROST_SRC_PORT1;
+ else if (regs->port[3] == reg && regs->read_port3)
+ return BIFROST_SRC_PORT3;
+ else
+ unreachable("Tried to access register with no port");
+}
+
+static enum bifrost_packed_src
+bi_get_src(bi_instruction *ins, struct bi_registers *regs, unsigned s)
+{
+ unsigned src = ins->src[s];
+
+ if (src & BIR_INDEX_REGISTER)
+ return bi_get_src_reg_port(regs, src);
+ else if (src & BIR_INDEX_PASS)
+ return src & ~BIR_INDEX_PASS;
+ else {
+ bi_print_instruction(ins, stderr);
+ unreachable("Unknown src in above instruction");
+ }
+}
+
+/* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
+ * 16-bit and written components must correspond to valid swizzles (component x
+ * or y). */
+
+static unsigned
+bi_swiz16(bi_instruction *ins, unsigned src)
+{
+ assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
+ unsigned swizzle = 0;
+
+ for (unsigned c = 0; c < 2; ++c) {
+ if (!bi_writes_component(ins, src)) continue;
+
+ unsigned k = ins->swizzle[src][c];
+ assert(k <= 1);
+ swizzle |= (k << c);
+ }
+
+ return swizzle;
+}
+
+static unsigned
+bi_pack_fma_fma(bi_instruction *ins, struct bi_registers *regs)
+{
+ /* (-a)(-b) = ab, so we only need one negate bit */
+ bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
+
+ if (ins->op.mscale) {
+ assert(!(ins->src_abs[0] && ins->src_abs[1]));
+ assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
+
+ /* We can have exactly one abs, and can flip the multiplication
+ * to make it fit if we have to */
+ bool flip_ab = ins->src_abs[1];
+
+ struct bifrost_fma_mscale pack = {
+ .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
+ .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
+ .src2 = bi_get_src(ins, regs, 2),
+ .src3 = bi_get_src(ins, regs, 3),
+ .mscale_mode = 0,
+ .mode = ins->outmod,
+ .src0_abs = ins->src_abs[0] || ins->src_abs[1],
+ .src1_neg = negate_mul,
+ .src2_neg = ins->src_neg[2],
+ .op = BIFROST_FMA_OP_MSCALE,
+ };
+
+ RETURN_PACKED(pack);
+ } else if (ins->dest_type == nir_type_float32) {
+ struct bifrost_fma_fma pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .src2 = bi_get_src(ins, regs, 2),
+ .src0_abs = ins->src_abs[0],
+ .src1_abs = ins->src_abs[1],
+ .src2_abs = ins->src_abs[2],
+ .src0_neg = negate_mul,
+ .src2_neg = ins->src_neg[2],
+ .outmod = ins->outmod,
+ .roundmode = ins->roundmode,
+ .op = BIFROST_FMA_OP_FMA
+ };
+
+ RETURN_PACKED(pack);
+ } else if (ins->dest_type == nir_type_float16) {
+ struct bifrost_fma_fma16 pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .src2 = bi_get_src(ins, regs, 2),
+ .swizzle_0 = bi_swiz16(ins, 0),
+ .swizzle_1 = bi_swiz16(ins, 1),
+ .swizzle_2 = bi_swiz16(ins, 2),
+ .src0_neg = negate_mul,
+ .src2_neg = ins->src_neg[2],
+ .outmod = ins->outmod,
+ .roundmode = ins->roundmode,
+ .op = BIFROST_FMA_OP_FMA16
+ };
+
+ RETURN_PACKED(pack);
+ } else {
+ unreachable("Invalid fma dest type");
+ }
+}
+
+static unsigned
+bi_pack_fma_addmin_f32(bi_instruction *ins, struct bi_registers *regs)
+{
+ unsigned op =
+ (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
+ (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
+ BIFROST_FMA_OP_FMAX32;
+
+ struct bifrost_fma_add pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .src0_abs = ins->src_abs[0],
+ .src1_abs = ins->src_abs[1],
+ .src0_neg = ins->src_neg[0],
+ .src1_neg = ins->src_neg[1],
+ .unk = 0x0,
+ .outmod = ins->outmod,
+ .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
+ .op = op
+ };
+
+ RETURN_PACKED(pack);
+}
+
+static bool
+bi_pack_fp16_abs(bi_instruction *ins, struct bi_registers *regs, bool *flip)
+{
+ /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
+ * l be an auxiliary bit we encode. Then the hardware determines:
+ *
+ * abs0 = l || k
+ * abs1 = l && k
+ *
+ * Since add/min/max are commutative, this saves a bit by using the
+ * order of the operands as a bit (k). To pack this, first note:
+ *
+ * (l && k) implies (l || k).
+ *
+ * That is, if the second argument is abs'd, then the first argument
+ * also has abs. So there are three cases:
+ *
+ * Case 0: Neither src has absolute value. Then we have l = k = 0.
+ *
+ * Case 1: Exactly one src has absolute value. Assign that source to
+ * src0 and the other source to src1. Compute k = src1 < src0 based on
+ * that assignment. Then l = ~k.
+ *
+ * Case 2: Both sources have absolute value. Then we have l = k = 1.
+ * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
+ * That is, this encoding is only valid if src1 and src0 are distinct.
+ * This is a scheduling restriction (XXX); if an op of this type
+ * requires both identical sources to have abs value, then we must
+ * schedule to ADD (which does not use this ordering trick).
+ */
+
+ unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
+ unsigned src_0 = bi_get_src(ins, regs, 0);
+ unsigned src_1 = bi_get_src(ins, regs, 1);
+
+ assert(!(abs_0 && abs_1 && src_0 == src_1));
+
+ if (!abs_0 && !abs_1) {
+ /* Force k = 0 <===> NOT(src1 < src0) */
+ *flip = (src_1 < src_0);
+ return false;
+ } else if (abs_0 && !abs_1) {
+ return src_1 >= src_0;
+ } else if (abs_1 && !abs_0) {
+ *flip = true;
+ return src_0 >= src_1;
+ } else {
+ *flip = !(src_1 < src_0);
+ return true;
+ }
+}
+
+static unsigned
+bi_pack_fmadd_min_f16(bi_instruction *ins, struct bi_registers *regs, bool FMA)
+{
+ unsigned op =
+ (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
+ BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
+ (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
+ (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
+ BIFROST_FMA_OP_FMAX16;
+
+ bool flip = false;
+ bool l = bi_pack_fp16_abs(ins, regs, &flip);
+ unsigned src_0 = bi_get_src(ins, regs, 0);
+ unsigned src_1 = bi_get_src(ins, regs, 1);
+
+ if (FMA) {
+ struct bifrost_fma_add_minmax16 pack = {
+ .src0 = flip ? src_1 : src_0,
+ .src1 = flip ? src_0 : src_1,
+ .src0_neg = ins->src_neg[flip ? 1 : 0],
+ .src1_neg = ins->src_neg[flip ? 0 : 1],
+ .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
+ .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
+ .abs1 = l,
+ .outmod = ins->outmod,
+ .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
+ .op = op
+ };
+
+ RETURN_PACKED(pack);
+ } else {
+ /* Can't have modes for fp16 */
+ assert(ins->outmod == 0);
+
+ struct bifrost_add_fmin16 pack = {
+ .src0 = flip ? src_1 : src_0,
+ .src1 = flip ? src_0 : src_1,
+ .src0_neg = ins->src_neg[flip ? 1 : 0],
+ .src1_neg = ins->src_neg[flip ? 0 : 1],
+ .abs1 = l,
+ .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
+ .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
+ .mode = ins->minmax,
+ .op = op
+ };
+
+ RETURN_PACKED(pack);
+ }
+}
+
+static unsigned
+bi_pack_fma_addmin(bi_instruction *ins, struct bi_registers *regs)
+{
+ if (ins->dest_type == nir_type_float32)
+ return bi_pack_fma_addmin_f32(ins, regs);
+ else if(ins->dest_type == nir_type_float16)
+ return bi_pack_fmadd_min_f16(ins, regs, true);
+ else
+ unreachable("Unknown FMA/ADD type");
+}
+
+static unsigned
+bi_pack_fma_1src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
+{
+ struct bifrost_fma_inst pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .op = op
+ };
+
+ RETURN_PACKED(pack);
+}
+
+static unsigned
+bi_pack_fma_2src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
+{
+ struct bifrost_fma_2src pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .op = op
+ };
+
+ RETURN_PACKED(pack);
+}
+
+static unsigned
+bi_pack_add_1src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
+{
+ struct bifrost_add_inst pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .op = op
+ };
+
+ RETURN_PACKED(pack);
+}
+
+static enum bifrost_csel_cond
+bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
+{
+ nir_alu_type B = nir_alu_type_get_base_type(T);
+ unsigned idx = (B == nir_type_float) ? 0 :
+ ((B == nir_type_int) ? 1 : 2);
+
+ switch (cond){
+ case BI_COND_LT:
+ *flip = true;
+ case BI_COND_GT: {
+ const enum bifrost_csel_cond ops[] = {
+ BIFROST_FGT_F,
+ BIFROST_IGT_I,
+ BIFROST_UGT_I
+ };
+
+ return ops[idx];
+ }
+ case BI_COND_LE:
+ *flip = true;
+ case BI_COND_GE: {
+ const enum bifrost_csel_cond ops[] = {
+ BIFROST_FGE_F,
+ BIFROST_IGE_I,
+ BIFROST_UGE_I
+ };
+
+ return ops[idx];
+ }
+ case BI_COND_NE:
+ *invert = true;
+ case BI_COND_EQ: {
+ const enum bifrost_csel_cond ops[] = {
+ BIFROST_FEQ_F,
+ BIFROST_IEQ_F,
+ BIFROST_IEQ_F /* sign is irrelevant */
+ };
+
+ return ops[idx];
+ }
+ default:
+ unreachable("Invalid op for csel");
+ }
+}
+
+static unsigned
+bi_pack_fma_csel(bi_instruction *ins, struct bi_registers *regs)
+{
+ /* TODO: Use csel3 as well */
+ bool flip = false, invert = false;
+
+ enum bifrost_csel_cond cond =
+ bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
+
+ unsigned size = nir_alu_type_get_type_size(ins->dest_type);
+
+ unsigned cmp_0 = (flip ? 1 : 0);
+ unsigned cmp_1 = (flip ? 0 : 1);
+ unsigned res_0 = (invert ? 3 : 2);
+ unsigned res_1 = (invert ? 2 : 3);
+
+ struct bifrost_csel4 pack = {
+ .src0 = bi_get_src(ins, regs, cmp_0),
+ .src1 = bi_get_src(ins, regs, cmp_1),
+ .src2 = bi_get_src(ins, regs, res_0),
+ .src3 = bi_get_src(ins, regs, res_1),
+ .cond = cond,
+ .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
+ BIFROST_FMA_OP_CSEL4
+ };
+
+ RETURN_PACKED(pack);
+}
+
+static unsigned
+bi_pack_fma_frexp(bi_instruction *ins, struct bi_registers *regs)
+{
+ unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
+ return bi_pack_fma_1src(ins, regs, op);
+}
+
+static unsigned
+bi_pack_fma_reduce(bi_instruction *ins, struct bi_registers *regs)
+{
+ if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
+ return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
+ } else {
+ unreachable("Invalid reduce op");
+ }
+}
+
+/* We have a single convert opcode in the IR but a number of opcodes that could
+ * come out. In particular we have native opcodes for:
+ *
+ * [ui]16 --> [fui]32 -- int16_to_32
+ * f16 --> f32 -- float16_to_32
+ * f32 --> f16 -- float32_to_16
+ * f32 --> [ui]32 -- float32_to_int
+ * [ui]32 --> f32 -- int_to_float32
+ * [fui]16 --> [fui]16 -- f2i_i2f16
+ */
+
+static unsigned
+bi_pack_convert(bi_instruction *ins, struct bi_registers *regs, bool FMA)
+{
+ nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
+ unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
+ bool from_unsigned = from_base == nir_type_uint;
+
+ nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
+ unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
+ bool to_unsigned = to_base == nir_type_uint;
+ bool to_float = to_base == nir_type_float;
+
+ /* Sanity check */
+ assert((from_base != to_base) || (from_size != to_size));
+ assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
+
+ /* f32 to f16 is special */
+ if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
+ /* TODO: second vectorized source? */
+ struct bifrost_fma_2src pfma = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = BIFROST_SRC_STAGE, /* 0 */
+ .op = BIFROST_FMA_FLOAT32_TO_16
+ };
+
+ struct bifrost_add_2src padd = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = BIFROST_SRC_STAGE, /* 0 */
+ .op = BIFROST_ADD_FLOAT32_TO_16
+ };
+
+ if (FMA) {
+ RETURN_PACKED(pfma);
+ } else {
+ RETURN_PACKED(padd);
+ }
+ }
+
+ /* Otherwise, figure out the mode */
+ unsigned op = 0;
+
+ if (from_size == 16 && to_size == 32) {
+ unsigned component = ins->swizzle[0][0];
+ assert(component <= 1);
+
+ if (from_base == nir_type_float)
+ op = BIFROST_CONVERT_5(component);
+ else
+ op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
+ } else {
+ unsigned mode = 0;
+ unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
+ bool is_unsigned = from_unsigned;
+
+ if (from_base == nir_type_float) {
+ assert(to_base != nir_type_float);
+ is_unsigned = to_unsigned;
+
+ if (from_size == 32 && to_size == 32)
+ mode = BIFROST_CONV_F32_TO_I32;
+ else if (from_size == 16 && to_size == 16)
+ mode = BIFROST_CONV_F16_TO_I16;
+ else
+ unreachable("Invalid float conversion");
+ } else {
+ assert(to_base == nir_type_float);
+ assert(from_size == to_size);
+
+ if (to_size == 32)
+ mode = BIFROST_CONV_I32_TO_F32;
+ else if (to_size == 16)
+ mode = BIFROST_CONV_I16_TO_F16;
+ else
+ unreachable("Invalid int conversion");
+ }
+
+ /* Fixup swizzle for 32-bit only modes */
+
+ if (mode == BIFROST_CONV_I32_TO_F32)
+ swizzle = 0b11;
+ else if (mode == BIFROST_CONV_F32_TO_I32)
+ swizzle = 0b10;
+
+ op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
+
+ /* Unclear what the top bit is for... maybe 16-bit related */
+ bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
+ bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
+
+ if (!(mode2 || mode6))
+ op |= 0x100;
+ }
+
+ if (FMA)
+ return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
+ else
+ return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
+}
+
+static unsigned
+bi_pack_fma_select(bi_instruction *ins, struct bi_registers *regs)
+{
+ unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
+
+ if (size == 16) {
+ unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
+ unsigned op = BIFROST_FMA_SEL_16(swiz);
+ return bi_pack_fma_2src(ins, regs, op);
+ } else if (size == 8) {
+ unsigned swiz = 0;
+
+ for (unsigned c = 0; c < 4; ++c) {
+ if (ins->swizzle[c][0]) {
+ /* Ensure lowering restriction is met */
+ assert(ins->swizzle[c][0] == 2);
+ swiz |= (1 << c);
+ }
+ }
+
+ struct bifrost_fma_sel8 pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .src2 = bi_get_src(ins, regs, 2),
+ .src3 = bi_get_src(ins, regs, 3),
+ .swizzle = swiz,
+ .op = BIFROST_FMA_OP_SEL8
+ };
+
+ RETURN_PACKED(pack);
+ } else {
+ unreachable("Unimplemented");
+ }
+}
+
+static enum bifrost_fcmp_cond
+bi_fcmp_cond(enum bi_cond cond)
+{
+ switch (cond) {
+ case BI_COND_LT: return BIFROST_OLT;
+ case BI_COND_LE: return BIFROST_OLE;
+ case BI_COND_GE: return BIFROST_OGE;
+ case BI_COND_GT: return BIFROST_OGT;
+ case BI_COND_EQ: return BIFROST_OEQ;
+ case BI_COND_NE: return BIFROST_UNE;
+ default: unreachable("Unknown bi_cond");
+ }
+}
+
+/* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
+
+static enum bifrost_fcmp_cond
+bi_flip_fcmp(enum bifrost_fcmp_cond cond)
+{
+ switch (cond) {
+ case BIFROST_OGT:
+ return BIFROST_OLT;
+ case BIFROST_OGE:
+ return BIFROST_OLE;
+ case BIFROST_OLT:
+ return BIFROST_OGT;
+ case BIFROST_OLE:
+ return BIFROST_OGE;
+ case BIFROST_OEQ:
+ case BIFROST_UNE:
+ return cond;
+ default:
+ unreachable("Unknown fcmp cond");
+ }
+}
+
+static unsigned
+bi_pack_fma_cmp(bi_instruction *ins, struct bi_registers *regs)
+{
+ nir_alu_type Tl = ins->src_types[0];
+ nir_alu_type Tr = ins->src_types[1];
+
+ if (Tl == nir_type_float32 || Tr == nir_type_float32) {
+ /* TODO: Mixed 32/16 cmp */
+ assert(Tl == Tr);
+
+ enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
+
+ /* Only src1 has neg, so we arrange:
+ * a < b --- native
+ * a < -b --- native
+ * -a < -b <===> a > b
+ * -a < b <===> a > -b
+ * TODO: Is this NaN-precise?
+ */
+
+ bool flip = ins->src_neg[0];
+ bool neg = ins->src_neg[0] ^ ins->src_neg[1];
+
+ if (flip)
+ cond = bi_flip_fcmp(cond);
+
+ struct bifrost_fma_fcmp pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .src0_abs = ins->src_abs[0],
+ .src1_abs = ins->src_abs[1],
+ .src1_neg = neg,
+ .src_expand = 0,
+ .unk1 = 0,
+ .cond = cond,
+ .op = BIFROST_FMA_OP_FCMP_GL
+ };
+
+ RETURN_PACKED(pack);
+ } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
+ bool flip = false;
+ bool l = bi_pack_fp16_abs(ins, regs, &flip);
+ enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
+
+ if (flip)
+ cond = bi_flip_fcmp(cond);
+
+ struct bifrost_fma_fcmp16 pack = {
+ .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
+ .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
+ .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
+ .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
+ .abs1 = l,
+ .unk = 0,
+ .cond = cond,
+ .op = BIFROST_FMA_OP_FCMP_GL_16,
+ };
+
+ RETURN_PACKED(pack);
+ } else {
+ unreachable("Unknown cmp type");
+ }
+}
+
+static unsigned
+bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
+{
+ switch (op) {
+ case BI_BITWISE_OR:
+ /* Via De Morgan's */
+ return rshift ?
+ BIFROST_FMA_OP_RSHIFT_NAND :
+ BIFROST_FMA_OP_LSHIFT_NAND;
+ case BI_BITWISE_AND:
+ return rshift ?
+ BIFROST_FMA_OP_RSHIFT_AND :
+ BIFROST_FMA_OP_LSHIFT_AND;
+ case BI_BITWISE_XOR:
+ /* Shift direction handled out of band */
+ return BIFROST_FMA_OP_RSHIFT_XOR;
+ default:
+ unreachable("Unknown op");
+ }
+}
+
+static unsigned
+bi_pack_fma_bitwise(bi_instruction *ins, struct bi_registers *regs)
+{
+ unsigned size = nir_alu_type_get_type_size(ins->dest_type);
+ assert(size <= 32);
+
+ bool invert_0 = ins->bitwise.src_invert[0];
+ bool invert_1 = ins->bitwise.src_invert[1];
+
+ if (ins->op.bitwise == BI_BITWISE_OR) {
+ /* Becomes NAND, so via De Morgan's:
+ * f(A) | f(B) = ~(~f(A) & ~f(B))
+ * = NAND(~f(A), ~f(B))
+ */
+
+ invert_0 = !invert_0;
+ invert_1 = !invert_1;
+ } else if (ins->op.bitwise == BI_BITWISE_XOR) {
+ /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
+ * ~A ^ B = ~(A ^ B) = A ^ ~B
+ */
+
+ invert_0 ^= invert_1;
+ invert_1 = false;
+
+ /* invert_1 ends up specifying shift direction */
+ invert_1 = !ins->bitwise.rshift;
+ }
+
+ struct bifrost_shift_fma pack = {
+ .src0 = bi_get_src(ins, regs, 0),
+ .src1 = bi_get_src(ins, regs, 1),
+ .src2 = bi_get_src(ins, regs, 2),
+ .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
+ .unk = 1, /* XXX */
+ .invert_1 = invert_0,
+ .invert_2 = invert_1,
+ .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
+ };
+
+ RETURN_PACKED(pack);