.vector_channels = 4
};
- assert(blend.blend_location < BIFROST_MAX_RENDER_TARGET_COUNT);
+ assert(blend.blend_location < 8);
assert(ctx->blend_types);
assert(blend.src_types[0]);
ctx->blend_types[blend.blend_location] = blend.src_types[0];
emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
{
/* Make sure we've been lowered */
- assert(instr->def.num_components == 1);
+ assert(instr->def.num_components <= (32 / instr->def.bit_size));
+
+ /* Accumulate all the channels of the constant, as if we did an
+ * implicit SEL over them */
+ uint32_t acc = 0;
+
+ for (unsigned i = 0; i < instr->def.num_components; ++i) {
+ unsigned v = nir_const_value_as_uint(instr->value[i], instr->def.bit_size);
+ acc |= (v << (i * instr->def.bit_size));
+ }
bi_instruction move = {
.type = BI_MOV,
.dest = pan_ssa_index(&instr->def),
- .dest_type = instr->def.bit_size | nir_type_uint,
+ .dest_type = nir_type_uint32,
.src = {
BIR_INDEX_CONSTANT
},
.src_types = {
- instr->def.bit_size | nir_type_uint,
+ nir_type_uint32,
},
.constant = {
- .u64 = nir_const_value_as_uint(instr->value[0], instr->def.bit_size)
+ .u32 = acc
}
};
case nir_op_isub:
return BI_IMATH;
+ case nir_op_imul:
+ return BI_IMUL;
+
case nir_op_iand:
case nir_op_ior:
case nir_op_ixor:
+ case nir_op_inot:
+ case nir_op_ishl:
return BI_BITWISE;
BI_CASE_CMP(nir_op_flt)
BI_CASE_CMP(nir_op_ige)
BI_CASE_CMP(nir_op_ieq)
BI_CASE_CMP(nir_op_ine)
+ BI_CASE_CMP(nir_op_uge)
return BI_CMP;
case nir_op_b8csel:
case nir_op_frcp:
case nir_op_frsq:
+ case nir_op_iabs:
return BI_SPECIAL;
default:
BI_CASE_CMP(nir_op_fge)
BI_CASE_CMP(nir_op_ige)
+ BI_CASE_CMP(nir_op_uge)
return BI_COND_GE;
BI_CASE_CMP(nir_op_feq)
case nir_op_isub:
alu.op.imath = BI_IMATH_SUB;
break;
+ case nir_op_iabs:
+ alu.op.special = BI_SPECIAL_IABS;
+ break;
+ case nir_op_inot:
+ /* no dedicated bitwise not, but we can invert sources. convert to ~a | 0 */
+ alu.op.bitwise = BI_BITWISE_OR;
+ alu.bitwise.src_invert[0] = true;
+ alu.src[1] = BIR_INDEX_ZERO;
+ /* zero shift */
+ alu.src[2] = BIR_INDEX_ZERO;
+ alu.src_types[2] = alu.src_types[1];
+ break;
+ case nir_op_ishl:
+ alu.op.bitwise = BI_BITWISE_OR;
+ /* move src1 to src2 and replace with zero. underlying op is (src0 << src2) | src1 */
+ alu.src[2] = alu.src[1];
+ alu.src_types[2] = alu.src_types[1];
+ alu.src[1] = BIR_INDEX_ZERO;
+ break;
+ case nir_op_imul:
+ alu.op.imul = BI_IMUL_IMUL;
+ break;
case nir_op_fmax:
case nir_op_imax:
case nir_op_umax:
BI_CASE_CMP(nir_op_ieq)
BI_CASE_CMP(nir_op_fne)
BI_CASE_CMP(nir_op_ine)
+ BI_CASE_CMP(nir_op_uge)
alu.cond = bi_cond_for_nir(instr->op, false);
break;
case nir_op_fround_even:
break;
case nir_op_iand:
alu.op.bitwise = BI_BITWISE_AND;
+ /* zero shift */
+ alu.src[2] = BIR_INDEX_ZERO;
+ alu.src_types[2] = alu.src_types[1];
break;
case nir_op_ior:
alu.op.bitwise = BI_BITWISE_OR;
+ /* zero shift */
+ alu.src[2] = BIR_INDEX_ZERO;
+ alu.src_types[2] = alu.src_types[1];
break;
case nir_op_ixor:
alu.op.bitwise = BI_BITWISE_XOR;
+ /* zero shift */
+ alu.src[2] = BIR_INDEX_ZERO;
+ alu.src_types[2] = alu.src_types[1];
+ break;
+ case nir_op_f2i32:
+ alu.roundmode = BIFROST_RTZ;
+ break;
+
+ case nir_op_f2f16:
+ case nir_op_i2i16:
+ case nir_op_u2u16: {
+ if (nir_src_bit_size(instr->src[0].src) != 32)
+ break;
+
+ /* Should have been const folded */
+ assert(!nir_src_is_const(instr->src[0].src));
+
+ alu.src_types[1] = alu.src_types[0];
+ alu.src[1] = alu.src[0];
+
+ unsigned last = nir_dest_num_components(instr->dest.dest) - 1;
+ assert(last <= 1);
+
+ alu.swizzle[1][0] = instr->src[0].swizzle[last];
break;
+ }
+
default:
break;
}
bi_fuse_cond(&alu, instr->src[0],
&constants_left, &constant_shift, comps, false);
#endif
- } else if (alu.type == BI_BITWISE) {
- /* Implicit shift argument... at some point we should fold */
- alu.src[2] = BIR_INDEX_ZERO;
- alu.src_types[2] = alu.src_types[1];
}
bi_emit(ctx, alu);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
+ NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
+ glsl_type_size, 0);
NIR_PASS_V(nir, nir_lower_ssbo);
NIR_PASS_V(nir, nir_lower_mediump_outputs);