for (int i = 0; i < 4; i++) {
uint8_t swiz = desc->swizzle[i];
+ struct qreg result;
- if (swiz <= UTIL_FORMAT_SWIZZLE_W &&
- !format_warned &&
- (desc->channel[swiz].type != UTIL_FORMAT_TYPE_FLOAT ||
- desc->channel[swiz].size != 32)) {
- fprintf(stderr,
- "vtx element %d unsupported type: %s\n",
- attr, util_format_name(format));
- format_warned = true;
+ if (swiz > UTIL_FORMAT_SWIZZLE_W)
+ result = get_swizzled_channel(c, vpm_reads, swiz);
+ else if (desc->channel[swiz].size == 32 &&
+ desc->channel[swiz].type == UTIL_FORMAT_TYPE_FLOAT) {
+ result = get_swizzled_channel(c, vpm_reads, swiz);
+ } else if (desc->channel[swiz].size == 8 &&
+ (desc->channel[swiz].type == UTIL_FORMAT_TYPE_UNSIGNED ||
+ desc->channel[swiz].type == UTIL_FORMAT_TYPE_SIGNED) &&
+ desc->channel[swiz].normalized) {
+ struct qreg vpm = vpm_reads[0];
+ if (desc->channel[swiz].type == UTIL_FORMAT_TYPE_SIGNED)
+ vpm = qir_XOR(c, vpm, qir_uniform_ui(c, 0x80808080));
+ result = qir_UNPACK_8(c, vpm, swiz);
+ } else {
+ if (!format_warned) {
+ fprintf(stderr,
+ "vtx element %d unsupported type: %s\n",
+ attr, util_format_name(format));
+ format_warned = true;
+ }
+ result = qir_uniform_f(c, 0.0);
}
- c->inputs[attr * 4 + i] =
- get_swizzled_channel(c, vpm_reads, swiz);
+ if (desc->channel[swiz].normalized &&
+ desc->channel[swiz].type == UTIL_FORMAT_TYPE_SIGNED) {
+ result = qir_FSUB(c,
+ qir_FMUL(c,
+ result,
+ qir_uniform_f(c, 2.0)),
+ qir_uniform_f(c, 1.0));
+ }
+
+ c->inputs[attr * 4 + i] = result;
}
}
[QOP_R4_UNPACK_B] = { "r4_unpack_b", 1, 1 },
[QOP_R4_UNPACK_C] = { "r4_unpack_c", 1, 1 },
[QOP_R4_UNPACK_D] = { "r4_unpack_d", 1, 1 },
+ [QOP_UNPACK_8A] = { "unpack_8a", 1, 1 },
+ [QOP_UNPACK_8B] = { "unpack_8b", 1, 1 },
+ [QOP_UNPACK_8C] = { "unpack_8c", 1, 1 },
+ [QOP_UNPACK_8D] = { "unpack_8d", 1, 1 },
};
static const char *
QOP_FRAG_Z,
QOP_FRAG_W,
+ QOP_UNPACK_8A,
+ QOP_UNPACK_8B,
+ QOP_UNPACK_8C,
+ QOP_UNPACK_8D,
+
/** Texture x coordinate parameter write */
QOP_TEX_S,
/** Texture y coordinate parameter write */
return t;
}
+static inline struct qreg
+qir_UNPACK_8(struct vc4_compile *c, struct qreg src, int i)
+{
+ struct qreg t = qir_get_temp(c);
+ qir_emit(c, qir_inst(QOP_UNPACK_8A + i, t, src, c->undef));
+ return t;
+}
+
#endif /* VC4_QIR_H */
break;
+ case QOP_UNPACK_8A:
+ case QOP_UNPACK_8B:
+ case QOP_UNPACK_8C:
+ case QOP_UNPACK_8D: {
+ assert(src[0].mux == QPU_MUX_A);
+
+ /* And, since we're setting the pack bits, if the
+ * destination is in A it would get re-packed.
+ */
+ struct qpu_reg orig_dst = dst;
+ if (orig_dst.mux == QPU_MUX_A)
+ dst = qpu_rn(3);
+
+ queue(c, qpu_a_FMAX(dst, src[0], src[0]));
+ *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A +
+ (qinst->op -
+ QOP_UNPACK_8A),
+ QPU_UNPACK);
+
+ if (orig_dst.mux == QPU_MUX_A) {
+ queue(c, qpu_a_MOV(orig_dst, dst));
+ }
+ }
+ break;
+
default:
assert(qinst->op < ARRAY_SIZE(translate));
assert(translate[qinst->op].op != 0); /* NOPs */
ra_set_node_class(g, inst->dst.index, vc4->reg_class_a);
break;
+ case QOP_UNPACK_8A:
+ case QOP_UNPACK_8B:
+ case QOP_UNPACK_8C:
+ case QOP_UNPACK_8D:
+ /* The unpack flags require an A-file src register. */
+ ra_set_node_class(g, inst->src[0].index, vc4->reg_class_a);
+ break;
+
default:
break;
}
return FALSE;
}
- if (usage & PIPE_BIND_VERTEX_BUFFER &&
- (format == PIPE_FORMAT_R32G32B32A32_FLOAT ||
- format == PIPE_FORMAT_R32G32B32_FLOAT ||
- format == PIPE_FORMAT_R32G32_FLOAT ||
- format == PIPE_FORMAT_R32_FLOAT)) {
- retval |= PIPE_BIND_VERTEX_BUFFER;
+ if (usage & PIPE_BIND_VERTEX_BUFFER) {
+ switch (format) {
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ case PIPE_FORMAT_R32G32_FLOAT:
+ case PIPE_FORMAT_R32_FLOAT:
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ case PIPE_FORMAT_R8G8_UNORM:
+ case PIPE_FORMAT_R8_UNORM:
+ case PIPE_FORMAT_R8G8B8A8_SNORM:
+ case PIPE_FORMAT_R8G8B8_SNORM:
+ case PIPE_FORMAT_R8G8_SNORM:
+ case PIPE_FORMAT_R8_SNORM:
+ retval |= PIPE_BIND_VERTEX_BUFFER;
+ break;
+ default:
+ break;
+ }
}
if ((usage & PIPE_BIND_RENDER_TARGET) &&